|
From: <sv...@va...> - 2012-06-07 09:00:12
|
sewardj 2012-06-07 09:59:53 +0100 (Thu, 07 Jun 2012)
New Revision: 2376
Log:
Merge in a port for mips32-linux, by Petar Jovanovic and Dejan Jevtic,
mip...@rt..., Bug 270777.
VEX: new files for mips32.
Added files:
trunk/priv/guest_mips_defs.h
trunk/priv/guest_mips_helpers.c
trunk/priv/guest_mips_toIR.c
trunk/priv/host_mips_defs.c
trunk/priv/host_mips_defs.h
trunk/priv/host_mips_isel.c
trunk/pub/libvex_guest_mips32.h
Added: trunk/priv/host_mips_defs.h (+753 -0)
===================================================================
--- trunk/priv/host_mips_defs.h 2012-06-07 09:51:02 +01:00 (rev 2375)
+++ trunk/priv/host_mips_defs.h 2012-06-07 09:59:53 +01:00 (rev 2376)
@@ -0,0 +1,753 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin host_mips_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2010-2012 RT-RK
+ mip...@rt...
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VEX_HOST_MIPS_DEFS_H
+#define __VEX_HOST_MIPS_DEFS_H
+
+/* Num registers used for function calls */
+#define MIPS_N_REGPARMS 4
+
+/* --------- Registers. --------- */
+
+/* The usual HReg abstraction.
+ There are 32 general purpose regs.
+*/
+
+extern void ppHRegMIPS(HReg, Bool);
+
+extern HReg hregMIPS_GPR0(Bool mode64); // scratch reg / zero reg
+extern HReg hregMIPS_GPR1(Bool mode64); // reserved for trap handling
+extern HReg hregMIPS_GPR2(Bool mode64); // reserved for trap handling
+extern HReg hregMIPS_GPR3(Bool mode64);
+extern HReg hregMIPS_GPR4(Bool mode64);
+extern HReg hregMIPS_GPR5(Bool mode64);
+extern HReg hregMIPS_GPR6(Bool mode64);
+extern HReg hregMIPS_GPR7(Bool mode64);
+extern HReg hregMIPS_GPR8(Bool mode64);
+extern HReg hregMIPS_GPR9(Bool mode64);
+extern HReg hregMIPS_GPR10(Bool mode64);
+extern HReg hregMIPS_GPR11(Bool mode64);
+extern HReg hregMIPS_GPR12(Bool mode64);
+extern HReg hregMIPS_GPR13(Bool mode64);
+extern HReg hregMIPS_GPR14(Bool mode64);
+extern HReg hregMIPS_GPR15(Bool mode64);
+extern HReg hregMIPS_GPR16(Bool mode64);
+extern HReg hregMIPS_GPR17(Bool mode64);
+extern HReg hregMIPS_GPR18(Bool mode64);
+extern HReg hregMIPS_GPR19(Bool mode64);
+extern HReg hregMIPS_GPR20(Bool mode64);
+extern HReg hregMIPS_GPR21(Bool mode64);
+extern HReg hregMIPS_GPR22(Bool mode64);
+extern HReg hregMIPS_GPR23(Bool mode64); // GuestStatePtr
+extern HReg hregMIPS_GPR24(Bool mode64); // reserved for dispatcher
+extern HReg hregMIPS_GPR25(Bool mode64);
+extern HReg hregMIPS_GPR26(Bool mode64);
+extern HReg hregMIPS_GPR27(Bool mode64);
+extern HReg hregMIPS_GPR28(Bool mode64);
+extern HReg hregMIPS_GPR29(Bool mode64);
+extern HReg hregMIPS_GPR30(Bool mode64);
+extern HReg hregMIPS_GPR31(Bool mode64);
+extern HReg hregMIPS_PC(Bool mode64);
+
+extern HReg hregMIPS_HI(Bool mode64);
+extern HReg hregMIPS_LO(Bool mode64);
+
+extern HReg hregMIPS_F0(Bool mode64);
+extern HReg hregMIPS_F1(Bool mode64);
+extern HReg hregMIPS_F2(Bool mode64);
+extern HReg hregMIPS_F3(Bool mode64);
+extern HReg hregMIPS_F4(Bool mode64);
+extern HReg hregMIPS_F5(Bool mode64);
+extern HReg hregMIPS_F6(Bool mode64);
+extern HReg hregMIPS_F7(Bool mode64);
+extern HReg hregMIPS_F8(Bool mode64);
+extern HReg hregMIPS_F9(Bool mode64);
+extern HReg hregMIPS_F10(Bool mode64);
+extern HReg hregMIPS_F11(Bool mode64);
+extern HReg hregMIPS_F12(Bool mode64);
+extern HReg hregMIPS_F13(Bool mode64);
+extern HReg hregMIPS_F14(Bool mode64);
+extern HReg hregMIPS_F15(Bool mode64);
+extern HReg hregMIPS_F16(Bool mode64);
+extern HReg hregMIPS_F17(Bool mode64);
+extern HReg hregMIPS_F18(Bool mode64);
+extern HReg hregMIPS_F19(Bool mode64);
+extern HReg hregMIPS_F20(Bool mode64);
+extern HReg hregMIPS_F21(Bool mode64);
+extern HReg hregMIPS_F22(Bool mode64);
+extern HReg hregMIPS_F23(Bool mode64);
+extern HReg hregMIPS_F24(Bool mode64);
+extern HReg hregMIPS_F25(Bool mode64);
+extern HReg hregMIPS_F26(Bool mode64);
+extern HReg hregMIPS_F27(Bool mode64);
+extern HReg hregMIPS_F28(Bool mode64);
+extern HReg hregMIPS_F29(Bool mode64);
+extern HReg hregMIPS_F30(Bool mode64);
+extern HReg hregMIPS_F31(Bool mode64);
+extern HReg hregMIPS_FIR(void);
+extern HReg hregMIPS_FCCR(void);
+extern HReg hregMIPS_FEXR(void);
+extern HReg hregMIPS_FENR(void);
+extern HReg hregMIPS_FCSR(void);
+extern HReg hregMIPS_COND(void);
+
+extern HReg hregMIPS_D0(void);
+extern HReg hregMIPS_D1(void);
+extern HReg hregMIPS_D2(void);
+extern HReg hregMIPS_D3(void);
+extern HReg hregMIPS_D4(void);
+extern HReg hregMIPS_D5(void);
+extern HReg hregMIPS_D6(void);
+extern HReg hregMIPS_D7(void);
+extern HReg hregMIPS_D8(void);
+extern HReg hregMIPS_D9(void);
+extern HReg hregMIPS_D10(void);
+extern HReg hregMIPS_D11(void);
+extern HReg hregMIPS_D12(void);
+extern HReg hregMIPS_D13(void);
+extern HReg hregMIPS_D14(void);
+extern HReg hregMIPS_D15(void);
+
+#define GuestStatePointer(_mode64) hregMIPS_GPR10(_mode64)
+
+#define StackFramePointer(_mode64) hregMIPS_GPR30(_mode64)
+#define LinkRegister(_mode64) hregMIPS_GPR31(_mode64)
+#define StackPointer(_mode64) hregMIPS_GPR29(_mode64)
+#define FCSR() hregMIPS_FCSR()
+#define COND() hregMIPS_COND()
+
+#define HIRegister(_mode64) hregMIPS_HI(_mode64)
+#define LORegister(_mode64) hregMIPS_LO(_mode64)
+
+/* a0, a1, a2, a3 */
+#define MIPS_N_ARGREGS 4
+
+/* --------- Condition codes, Intel encoding. --------- */
+typedef enum {
+ MIPScc_EQ = 0, /* equal */
+ MIPScc_NE = 1, /* not equal */
+
+ MIPScc_HS = 2, /* >=u (higher or same) */
+ MIPScc_LO = 3, /* <u (lower) */
+
+ MIPScc_MI = 4, /* minus (negative) */
+ MIPScc_PL = 5, /* plus (zero or +ve) */
+
+ MIPScc_VS = 6, /* overflow */
+ MIPScc_VC = 7, /* no overflow */
+
+ MIPScc_HI = 8, /* >u (higher) */
+ MIPScc_LS = 9, /* <=u (lower or same) */
+
+ MIPScc_GE = 10, /* >=s (signed greater or equal) */
+ MIPScc_LT = 11, /* <s (signed less than) */
+
+ MIPScc_GT = 12, /* >s (signed greater) */
+ MIPScc_LE = 13, /* <=s (signed less or equal) */
+
+ MIPScc_AL = 14, /* always (unconditional) */
+ MIPScc_NV = 15 /* never (unconditional): */
+} MIPSCondCode;
+
+extern HChar *showMIPSCondCode(MIPSCondCode);
+
+/* --------- Memory address expressions (amodes). --------- */
+typedef enum {
+ Mam_IR, /* Immediate (signed 16-bit) + Reg */
+ Mam_RR /* Reg1 + Reg2 */
+} MIPSAModeTag;
+
+typedef struct {
+ MIPSAModeTag tag;
+ union {
+ struct {
+ HReg base;
+ Int index;
+ } IR;
+ struct {
+ HReg base;
+ HReg index;
+ } RR;
+ } Mam;
+} MIPSAMode;
+
+extern MIPSAMode *MIPSAMode_IR(Int, HReg);
+extern MIPSAMode *MIPSAMode_RR(HReg, HReg);
+
+extern MIPSAMode *dopyMIPSAMode(MIPSAMode *);
+extern MIPSAMode *nextMIPSAModeFloat(MIPSAMode *);
+extern MIPSAMode *nextMIPSAModeInt(MIPSAMode *);
+
+extern void ppMIPSAMode(MIPSAMode *, Bool);
+
+/* --------- Operand, which can be a reg or a u16/s16. --------- */
+/* ("RH" == "Register or Halfword immediate") */
+typedef enum {
+ Mrh_Imm,
+ Mrh_Reg
+} MIPSRHTag;
+
+typedef struct {
+ MIPSRHTag tag;
+ union {
+ struct {
+ Bool syned;
+ UShort imm16;
+ } Imm;
+ struct {
+ HReg reg;
+ } Reg;
+ } Mrh;
+} MIPSRH;
+
+extern void ppMIPSRH(MIPSRH *, Bool);
+
+extern MIPSRH *MIPSRH_Imm(Bool, UShort);
+extern MIPSRH *MIPSRH_Reg(HReg);
+
+/* --- Addressing Mode suitable for VFP --- */
+typedef struct {
+ HReg reg;
+ Int simm11;
+} MIPSAModeV;
+
+extern MIPSAModeV *mkMIPSAModeV(HReg reg, Int simm11);
+
+extern void ppMIPSAModeV(MIPSAModeV *);
+
+/* --------- Reg or imm-8x4 operands --------- */
+/* a.k.a (a very restricted form of) Shifter Operand,
+ in the MIPS parlance. */
+
+typedef enum {
+ MIPSri84_I84 = 5, /* imm8 `ror` (2 * imm4) */
+ MIPSri84_R /* reg */
+} MIPSRI84Tag;
+
+typedef struct {
+ MIPSRI84Tag tag;
+ union {
+ struct {
+ UShort imm8;
+ UShort imm4;
+ } I84;
+ struct {
+ HReg reg;
+ } R;
+ } MIPSri84;
+} MIPSRI84;
+
+extern MIPSRI84 *MIPSRI84_I84(UShort imm8, UShort imm4);
+extern MIPSRI84 *MIPSRI84_R(HReg);
+
+extern void ppMIPSRI84(MIPSRI84 *);
+
+/* --------- Reg or imm5 operands --------- */
+typedef enum {
+ MIPSri5_I5 = 7, /* imm5, 1 .. 31 only (no zero!) */
+ MIPSri5_R /* reg */
+} MIPSRI5Tag;
+
+typedef struct {
+ MIPSRI5Tag tag;
+ union {
+ struct {
+ UInt imm5;
+ } I5;
+ struct {
+ HReg reg;
+ } R;
+ } MIPSri5;
+} MIPSRI5;
+
+extern MIPSRI5 *MIPSRI5_I5(UInt imm5);
+extern MIPSRI5 *MIPSRI5_R(HReg);
+
+extern void ppMIPSRI5(MIPSRI5 *);
+
+/* --------- Instructions. --------- */
+
+/*Tags for operations*/
+
+/* --------- */
+typedef enum {
+ Mun_CLO,
+ Mun_CLZ,
+ Mun_NOP,
+} MIPSUnaryOp;
+
+extern HChar *showMIPSUnaryOp(MIPSUnaryOp);
+/* --------- */
+
+/* --------- */
+
+typedef enum {
+ Malu_INVALID,
+ Malu_ADD, Malu_SUB,
+ Malu_AND, Malu_OR, Malu_NOR, Malu_XOR,
+} MIPSAluOp;
+
+extern HChar *showMIPSAluOp(MIPSAluOp,
+ Bool /* is the 2nd operand an immediate? */ );
+
+/* --------- */
+typedef enum {
+ Mshft_INVALID,
+ Mshft_SLL, Mshft_SRL,
+ Mshft_SRA
+} MIPSShftOp;
+
+extern HChar *showMIPSShftOp(MIPSShftOp,
+ Bool /* is the 2nd operand an immediate? */ ,
+ Bool /* is this a 32bit or 64bit op? */ );
+
+/* --------- */
+typedef enum {
+ Macc_ADD,
+ Macc_SUB
+} MIPSMaccOp;
+
+extern HChar *showMIPSMaccOp(MIPSMaccOp, Bool);
+/* --------- */
+
+/* ----- Instruction tags ----- */
+typedef enum {
+ Min_LI, /* load word (32/64-bit) immediate (fake insn) */
+ Min_Alu, /* word add/sub/and/or/xor/nor/others? */
+ Min_Shft, /* word sll/srl/sra */
+ Min_Unary, /* clo, clz, nop, neg */
+
+ Min_Cmp, /* word compare (fake insn) */
+
+ Min_Mul, /* widening/non-widening multiply */
+ Min_Div, /* div */
+
+ Min_Call, /* call to address in register */
+
+ /* The following 5 insns are mandated by translation chaining */
+ Min_XDirect, /* direct transfer to GA */
+ Min_XIndir, /* indirect transfer to GA */
+ Min_XAssisted, /* assisted transfer to GA */
+ Min_EvCheck, /* Event check */
+ Min_ProfInc, /* 64-bit profile counter increment */
+
+ Min_RdWrLR, /* Read/Write Link Register */
+ Min_Mthi, /* Move to HI from GP register */
+ Min_Mtlo, /* Move to LO from GP register */
+ Min_Mfhi, /* Move from HI to GP register */
+ Min_Mflo, /* Move from LO to GP register */
+ Min_Macc, /* Multiply and accumulate */
+
+ Min_Load, /* zero-extending load a 8|16|32 bit value from mem */
+ Min_Store, /* store a 8|16|32 bit value to mem */
+ Min_LoadL, /* mips Load Linked Word */
+ Min_StoreC, /* mips Store Conditional Word */
+
+ Min_FpUnary, /* FP unary op */
+ Min_FpBinary, /* FP binary op */
+ Min_FpConvert, /* FP conversion op */
+ Min_FpMulAcc, /* FP multipy-accumulate style op */
+ Min_FpLdSt, /* FP load/store */
+ Min_FpSTFIW, /* stfiwx */
+ Min_FpRSP, /* FP round IEEE754 double to IEEE754 single */
+ Min_FpCftI, /* fcfid/fctid/fctiw */
+ Min_FpCMov, /* FP floating point conditional move */
+ Min_MtFCSR, /* set FCSR register */
+ Min_MfFCSR, /* get FCSR register */
+ Min_FpCompare, /* FP compare, generating value into int reg */
+ Min_MovCond
+} MIPSInstrTag;
+
+/* --------- */
+typedef enum {
+ Mfp_INVALID,
+
+ /* Ternary */
+ Mfp_MADDD, Mfp_MSUBD,
+ Mfp_MADDS, Mfp_MSUBS,
+
+ /* Binary */
+ Mfp_ADDD, Mfp_SUBD, Mfp_MULD, Mfp_DIVD,
+ Mfp_ADDS, Mfp_SUBS, Mfp_MULS, Mfp_DIVS, Mfp_CVTSD, Mfp_CVTSW, Mfp_CVTWD,
+ Mfp_CVTWS, Mfp_TRULS, Mfp_TRULD, Mfp_TRUWS, Mfp_TRUWD, Mfp_FLOORWS,
+ Mfp_FLOORWD, Mfp_ROUNDWS, Mfp_ROUNDWD, Mfp_CVTDW, Mfp_CMP,
+ Mfp_CEILWS, Mfp_CEILWD, Mfp_CEILLS, Mfp_CEILLD,
+
+ /* Unary */
+ Mfp_SQRTS, Mfp_SQRTD, Mfp_RSQRTS, Mfp_RSQRTD, Mfp_RECIPS, Mfp_RECIPD,
+ Mfp_ABSS, Mfp_ABSD, Mfp_NEGS, Mfp_NEGD, Mfp_MOVS, Mfp_MOVD,
+ Mfp_RES, Mfp_RSQRTE, Mfp_FRIN, Mfp_FRIM, Mfp_FRIP, Mfp_FRIZ, Mfp_CVTD
+} MIPSFpOp;
+
+extern HChar *showMIPSFpOp(MIPSFpOp);
+
+/*--------- Structure for instructions ----------*/
+/* Destinations are on the LEFT (first operand) */
+
+typedef struct {
+ MIPSInstrTag tag;
+ union {
+ /* Get a 32/64-bit literal into a register.
+ May turn into a number of real insns. */
+ struct {
+ HReg dst;
+ ULong imm;
+ } LI;
+ /* Integer add/sub/and/or/xor. Limitations:
+ - For add, the immediate, if it exists, is a signed 16.
+ - For sub, the immediate, if it exists, is a signed 16
+ which may not be -32768, since no such instruction
+ exists, and so we have to emit addi with +32768, but
+ that is not possible.
+ - For and/or/xor, the immediate, if it exists,
+ is an unsigned 16.
+ */
+ struct {
+ MIPSAluOp op;
+ HReg dst;
+ HReg srcL;
+ MIPSRH *srcR;
+ } Alu;
+ /* Integer shl/shr/sar.
+ Limitations: the immediate, if it exists,
+ is a signed 5-bit value between 1 and 31 inclusive.
+ */
+ struct {
+ MIPSShftOp op;
+ Bool sz32; /* mode64 has both 32 and 64bit shft */
+ HReg dst;
+ HReg srcL;
+ MIPSRH *srcR;
+ } Shft;
+ /* Clz, Clo, nop */
+ struct {
+ MIPSUnaryOp op;
+ HReg dst;
+ HReg src;
+ } Unary;
+ /* Word compare. Fake instruction, used for basic block ending */
+ struct {
+ Bool syned;
+ Bool sz32;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
+
+ MIPSCondCode cond;
+ } Cmp;
+ struct {
+ Bool widening; //True => widening, False => non-widening
+ Bool syned; //signed/unsigned - meaningless if widenind = False
+ Bool sz32;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
+ } Mul;
+ struct {
+ Bool syned; //signed/unsigned - meaningless if widenind = False
+ Bool sz32;
+ HReg srcL;
+ HReg srcR;
+ } Div;
+ /* Pseudo-insn. Call target (an absolute address), on given
+ condition (which could be Mcc_ALWAYS). argiregs indicates
+ which of r3 .. r10
+ carries argument values for this call,
+ using a bit mask (1<<N is set if rN holds an arg, for N in
+ 3 .. 10 inclusive).
+ If cond is != Mcc_ALWAYS, src is checked.
+ Otherwise, unconditional call */
+ struct {
+ MIPSCondCode cond;
+ Addr32 target;
+ UInt argiregs;
+ HReg src;
+ } Call;
+ /* Update the guest EIP value, then exit requesting to chain
+ to it. May be conditional. Urr, use of Addr32 implicitly
+ assumes that wordsize(guest) == wordsize(host). */
+ struct {
+ Addr32 dstGA; /* next guest address */
+ MIPSAMode* amPC; /* amode in guest state for PC */
+ MIPSCondCode cond; /* can be MIPScc_AL */
+ Bool toFastEP; /* chain to the slow or fast point? */
+ } XDirect;
+ /* Boring transfer to a guest address not known at JIT time.
+ Not chainable. May be conditional. */
+ struct {
+ HReg dstGA;
+ MIPSAMode* amPC;
+ MIPSCondCode cond; /* can be MIPScc_AL */
+ } XIndir;
+ /* Assisted transfer to a guest address, most general case.
+ Not chainable. May be conditional. */
+ struct {
+ HReg dstGA;
+ MIPSAMode* amPC;
+ MIPSCondCode cond; /* can be MIPScc_AL */
+ IRJumpKind jk;
+ } XAssisted;
+ /* Zero extending loads. Dst size is host word size */
+ struct {
+ UChar sz; /* 1|2|4|8 */
+ HReg dst;
+ MIPSAMode *src;
+ } Load;
+ /* 64/32/16/8 bit stores */
+ struct {
+ UChar sz; /* 1|2|4|8 */
+ MIPSAMode *dst;
+ HReg src;
+ } Store;
+ struct {
+ UChar sz; /* 4|8 */
+ HReg dst;
+ MIPSAMode *src;
+ } LoadL;
+ struct {
+ UChar sz; /* 4|8 */
+ MIPSAMode *dst;
+ HReg src;
+ } StoreC;
+ /* Move from HI/LO register to GP register. */
+ struct {
+ HReg dst;
+ } MfHL;
+
+ /* Move to HI/LO register from GP register. */
+ struct {
+ HReg src;
+ } MtHL;
+
+ /* Read/Write Link Register */
+ struct {
+ Bool wrLR;
+ HReg gpr;
+ } RdWrLR;
+
+ /* MIPS Multiply and accumulate instructions. */
+ struct {
+ MIPSMaccOp op;
+ Bool syned;
+
+ HReg srcL;
+ HReg srcR;
+ } Macc;
+
+ /* MIPS Floating point */
+ struct {
+ MIPSFpOp op;
+ HReg dst;
+ HReg src;
+ } FpUnary;
+ struct {
+ MIPSFpOp op;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
+ } FpBinary;
+ struct {
+ MIPSFpOp op;
+ HReg dst;
+ HReg srcML;
+ HReg srcMR;
+ HReg srcAcc;
+ } FpMulAcc;
+ struct {
+ Bool isLoad;
+ UChar sz; /* only 4 (IEEE single) or 8 (IEEE double) */
+ HReg reg;
+ MIPSAMode *addr;
+ } FpLdSt;
+
+ struct {
+ MIPSFpOp op;
+ HReg dst;
+ HReg src;
+ } FpConvert;
+ struct {
+ MIPSFpOp op;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
+ UChar cond1;
+ } FpCompare;
+ struct {
+ MIPSFpOp op;
+ HReg dst;
+ HReg srcL;
+ MIPSRH *srcR;
+ HReg condR;
+ MIPSCondCode cond;
+ } MovCond;
+ /* Move from GP register to FCSR register. */
+ struct {
+ HReg src;
+ } MtFCSR;
+ /* Move from FCSR register to GP register. */
+ struct {
+ HReg dst;
+ } MfFCSR;
+ struct {
+ MIPSAMode* amCounter;
+ MIPSAMode* amFailAddr;
+ } EvCheck;
+ struct {
+ /* No fields. The address of the counter to inc is
+ installed later, post-translation, by patching it in,
+ as it is not known at translation time. */
+ } ProfInc;
+
+ } Min;
+} MIPSInstr;
+
+extern MIPSInstr *MIPSInstr_LI(HReg, ULong);
+extern MIPSInstr *MIPSInstr_Alu(MIPSAluOp, HReg, HReg, MIPSRH *);
+extern MIPSInstr *MIPSInstr_Shft(MIPSShftOp, Bool sz32, HReg, HReg, MIPSRH *);
+extern MIPSInstr *MIPSInstr_Unary(MIPSUnaryOp op, HReg dst, HReg src);
+extern MIPSInstr *MIPSInstr_Cmp(Bool, Bool, HReg, HReg, HReg, MIPSCondCode);
+
+extern MIPSInstr *MIPSInstr_Mul(Bool syned, Bool hi32, Bool sz32, HReg,
+ HReg, HReg);
+extern MIPSInstr *MIPSInstr_Div(Bool syned, Bool sz32, HReg, HReg);
+extern MIPSInstr *MIPSInstr_Madd(Bool, HReg, HReg);
+extern MIPSInstr *MIPSInstr_Msub(Bool, HReg, HReg);
+
+extern MIPSInstr *MIPSInstr_Load(UChar sz, HReg dst, MIPSAMode * src,
+ Bool mode64);
+extern MIPSInstr *MIPSInstr_Store(UChar sz, MIPSAMode * dst, HReg src,
+ Bool mode64);
+
+extern MIPSInstr *MIPSInstr_LoadL(UChar sz, HReg dst, MIPSAMode * src,
+ Bool mode64);
+extern MIPSInstr *MIPSInstr_StoreC(UChar sz, MIPSAMode * dst, HReg src,
+ Bool mode64);
+
+extern MIPSInstr *MIPSInstr_Call(MIPSCondCode, Addr32, UInt, HReg);
+extern MIPSInstr *MIPSInstr_CallAlways(MIPSCondCode, Addr32, UInt);
+
+extern MIPSInstr *MIPSInstr_XDirect(Addr32 dstGA, MIPSAMode* amPC,
+ MIPSCondCode cond, Bool toFastEP);
+extern MIPSInstr *MIPSInstr_XIndir(HReg dstGA, MIPSAMode* amPC,
+ MIPSCondCode cond);
+extern MIPSInstr *MIPSInstr_XAssisted(HReg dstGA, MIPSAMode* amPC,
+ MIPSCondCode cond, IRJumpKind jk);
+
+extern MIPSInstr *MIPSInstr_FpUnary(MIPSFpOp op, HReg dst, HReg src);
+extern MIPSInstr *MIPSInstr_FpBinary(MIPSFpOp op, HReg dst, HReg srcL,
+ HReg srcR);
+extern MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src);
+extern MIPSInstr *MIPSInstr_FpCompare(MIPSFpOp op, HReg dst, HReg srcL,
+ HReg srcR, UChar cond1);
+extern MIPSInstr *MIPSInstr_FpMulAcc(MIPSFpOp op, HReg dst, HReg srcML,
+ HReg srcMR, HReg srcAcc);
+extern MIPSInstr *MIPSInstr_FpLdSt(Bool isLoad, UChar sz, HReg, MIPSAMode *);
+extern MIPSInstr *MIPSInstr_FpSTFIW(HReg addr, HReg data);
+extern MIPSInstr *MIPSInstr_FpRSP(HReg dst, HReg src);
+extern MIPSInstr *MIPSInstr_FpCftI(Bool fromI, Bool int32, HReg dst, HReg src);
+extern MIPSInstr *MIPSInstr_FpCMov(MIPSCondCode, HReg dst, HReg src);
+extern MIPSInstr *MIPSInstr_MtFCSR(HReg src);
+extern MIPSInstr *MIPSInstr_MfFCSR(HReg dst);
+extern MIPSInstr *MIPSInstr_FpCmp(HReg dst, HReg srcL, HReg srcR);
+
+extern MIPSInstr *MIPSInstr_Mfhi(HReg dst);
+extern MIPSInstr *MIPSInstr_Mflo(HReg dst);
+extern MIPSInstr *MIPSInstr_Mthi(HReg src);
+extern MIPSInstr *MIPSInstr_Mtlo(HReg src);
+
+extern MIPSInstr *MIPSInstr_RdWrLR(Bool wrLR, HReg gpr);
+
+// srcL will be copied if !condR
+extern MIPSInstr *MIPSInstr_MovCond(HReg dst, HReg srcL, MIPSRH * src,
+ HReg condR, MIPSCondCode cond);
+
+extern MIPSInstr *MIPSInstr_EvCheck(MIPSAMode* amCounter,
+ MIPSAMode* amFailAddr );
+extern MIPSInstr *MIPSInstr_ProfInc( void );
+
+extern void ppMIPSInstr(MIPSInstr *, Bool mode64);
+
+/* Some functions that insulate the register allocator from details
+ of the underlying instruction set. */
+extern void getRegUsage_MIPSInstr (HRegUsage *, MIPSInstr *, Bool);
+extern void mapRegs_MIPSInstr (HRegRemap *, MIPSInstr *, Bool mode64);
+extern Bool isMove_MIPSInstr (MIPSInstr *, HReg *, HReg *);
+extern Int emit_MIPSInstr (/*MB_MOD*/Bool* is_profInc,
+ UChar* buf, Int nbuf, MIPSInstr* i,
+ Bool mode64,
+ void* disp_cp_chain_me_to_slowEP,
+ void* disp_cp_chain_me_to_fastEP,
+ void* disp_cp_xindir,
+ void* disp_cp_xassisted );
+
+extern void genSpill_MIPS ( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2,
+ HReg rreg, Int offset, Bool);
+extern void genReload_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2,
+ HReg rreg, Int offset, Bool);
+
+extern void getAllocableRegs_MIPS (Int *, HReg **, Bool mode64);
+extern HInstrArray *iselSB_MIPS ( IRSB*,
+ VexArch,
+ VexArchInfo*,
+ VexAbiInfo*,
+ Int offs_Host_EvC_Counter,
+ Int offs_Host_EvC_FailAddr,
+ Bool chainingAllowed,
+ Bool addProfInc,
+ Addr64 max_ga );
+
+/* How big is an event check? This is kind of a kludge because it
+ depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
+ and so assumes that they are both <= 128, and so can use the short
+ offset encoding. This is all checked with assertions, so in the
+ worst case we will merely assert at startup. */
+extern Int evCheckSzB_MIPS ( void );
+
+/* Perform a chaining and unchaining of an XDirect jump. */
+extern VexInvalRange chainXDirect_MIPS ( void* place_to_chain,
+ void* disp_cp_chain_me_EXPECTED,
+ void* place_to_jump_to,
+ Bool mode64 );
+
+extern VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain,
+ void* place_to_jump_to_EXPECTED,
+ void* disp_cp_chain_me,
+ Bool mode64 );
+
+/* Patch the counter location into an existing ProfInc point. */
+extern VexInvalRange patchProfInc_MIPS ( void* place_to_patch,
+ ULong* location_of_counter,
+ Bool mode64 );
+
+#endif /* ndef __LIBVEX_HOST_MIPS_HDEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end host-mips_defs.h ---*/
+/*---------------------------------------------------------------*/
Added: trunk/priv/host_mips_defs.c (+4048 -0)
===================================================================
--- trunk/priv/host_mips_defs.c 2012-06-07 09:51:02 +01:00 (rev 2375)
+++ trunk/priv/host_mips_defs.c 2012-06-07 09:59:53 +01:00 (rev 2376)
@@ -0,0 +1,4048 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin host_mips_defs.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2010-2012 RT-RK
+ mip...@rt...
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+#include "libvex_trc_values.h"
+
+#include "main_util.h"
+#include "host_generic_regs.h"
+#include "host_mips_defs.h"
+
+/*---------------- Registers ----------------*/
+
+void ppHRegMIPS(HReg reg, Bool mode64)
+{
+ Int r;
+ static HChar *ireg32_names[35]
+ = { "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
+ "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
+ "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
+ "$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31",
+ "%32", "%33", "%34",
+ };
+
+ static HChar *freg32_names[32]
+ = { "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "f30", "$f31"
+ };
+
+ static HChar *freg64_names[32]
+ = { "$d0", "$d1", "$d2", "$d3", "$d4", "$d5", "$d6", "$d7",
+ "$d8", "$d9", "$d10", "$d11", "$d12", "$d13", "$d14", "$d15",
+ };
+
+ /* Be generic for all virtual regs. */
+ if (hregIsVirtual(reg)) {
+ ppHReg(reg);
+ return;
+ }
+
+ /* But specific for real regs. */
+ vassert(hregClass(reg) == HRcInt32 || hregClass(reg) == HRcInt64 ||
+ hregClass(reg) == HRcFlt32 || hregClass(reg) == HRcFlt64);
+
+ /* But specific for real regs. */
+ {
+ switch (hregClass(reg)) {
+ case HRcInt32:
+ r = hregNumber(reg);
+ vassert(r >= 0 && r < 32);
+ vex_printf("%s", ireg32_names[r]);
+ return;
+ case HRcFlt32:
+ r = hregNumber(reg);
+ vassert(r >= 0 && r < 32);
+ vex_printf("%s", freg32_names[r]);
+ return;
+ case HRcFlt64:
+ r = hregNumber(reg);
+ vassert(r >= 0 && r < 32);
+ vex_printf("%s", freg64_names[r]);
+ return;
+ default:
+ vpanic("ppHRegMIPS");
+ break;
+ }
+ }
+
+ return;
+}
+
+#define MkHRegGPR(_n, _mode64) \
+ mkHReg(_n, _mode64 ? HRcInt64 : HRcInt32, False)
+
+HReg hregMIPS_GPR0(Bool mode64)
+{
+ return MkHRegGPR(0, mode64);
+}
+
+HReg hregMIPS_GPR1(Bool mode64)
+{
+ return MkHRegGPR(1, mode64);
+}
+
+HReg hregMIPS_GPR2(Bool mode64)
+{
+ return MkHRegGPR(2, mode64);
+}
+
+HReg hregMIPS_GPR3(Bool mode64)
+{
+ return MkHRegGPR(3, mode64);
+}
+
+HReg hregMIPS_GPR4(Bool mode64)
+{
+ return MkHRegGPR(4, mode64);
+}
+
+HReg hregMIPS_GPR5(Bool mode64)
+{
+ return MkHRegGPR(5, mode64);
+}
+
+HReg hregMIPS_GPR6(Bool mode64)
+{
+ return MkHRegGPR(6, mode64);
+}
+
+HReg hregMIPS_GPR7(Bool mode64)
+{
+ return MkHRegGPR(7, mode64);
+}
+
+HReg hregMIPS_GPR8(Bool mode64)
+{
+ return MkHRegGPR(8, mode64);
+}
+
+HReg hregMIPS_GPR9(Bool mode64)
+{
+ return MkHRegGPR(9, mode64);
+}
+
+HReg hregMIPS_GPR10(Bool mode64)
+{
+ return MkHRegGPR(10, mode64);
+}
+
+HReg hregMIPS_GPR11(Bool mode64)
+{
+ return MkHRegGPR(11, mode64);
+}
+
+HReg hregMIPS_GPR12(Bool mode64)
+{
+ return MkHRegGPR(12, mode64);
+}
+
+HReg hregMIPS_GPR13(Bool mode64)
+{
+ return MkHRegGPR(13, mode64);
+}
+
+HReg hregMIPS_GPR14(Bool mode64)
+{
+ return MkHRegGPR(14, mode64);
+}
+
+HReg hregMIPS_GPR15(Bool mode64)
+{
+ return MkHRegGPR(15, mode64);
+}
+
+HReg hregMIPS_GPR16(Bool mode64)
+{
+ return MkHRegGPR(16, mode64);
+}
+
+HReg hregMIPS_GPR17(Bool mode64)
+{
+ return MkHRegGPR(17, mode64);
+}
+
+HReg hregMIPS_GPR18(Bool mode64)
+{
+ return MkHRegGPR(18, mode64);
+}
+
+HReg hregMIPS_GPR19(Bool mode64)
+{
+ return MkHRegGPR(19, mode64);
+}
+
+HReg hregMIPS_GPR20(Bool mode64)
+{
+ return MkHRegGPR(20, mode64);
+}
+
+HReg hregMIPS_GPR21(Bool mode64)
+{
+ return MkHRegGPR(21, mode64);
+}
+
+HReg hregMIPS_GPR22(Bool mode64)
+{
+ return MkHRegGPR(22, mode64);
+}
+
+HReg hregMIPS_GPR23(Bool mode64)
+{
+ return MkHRegGPR(23, mode64);
+}
+
+HReg hregMIPS_GPR24(Bool mode64)
+{
+ return MkHRegGPR(24, mode64);
+}
+
+HReg hregMIPS_GPR25(Bool mode64)
+{
+ return MkHRegGPR(25, mode64);
+}
+
+HReg hregMIPS_GPR26(Bool mode64)
+{
+ return MkHRegGPR(26, mode64);
+}
+
+HReg hregMIPS_GPR27(Bool mode64)
+{
+ return MkHRegGPR(27, mode64);
+}
+
+HReg hregMIPS_GPR28(Bool mode64)
+{
+ return MkHRegGPR(28, mode64);
+}
+
+HReg hregMIPS_GPR29(Bool mode64)
+{
+ return MkHRegGPR(29, mode64);
+}
+
+HReg hregMIPS_GPR30(Bool mode64)
+{
+ return MkHRegGPR(30, mode64);
+}
+
+HReg hregMIPS_GPR31(Bool mode64)
+{
+ return MkHRegGPR(31, mode64);
+}
+
+#define MkHRegFPR(_n, _mode64) \
+ mkHReg(_n, _mode64 ? HRcFlt64 : HRcFlt32, False)
+
+HReg hregMIPS_F0(Bool mode64)
+{
+ return MkHRegFPR(0, mode64);
+}
+
+HReg hregMIPS_F1(Bool mode64)
+{
+ return MkHRegFPR(1, mode64);
+}
+
+HReg hregMIPS_F2(Bool mode64)
+{
+ return MkHRegFPR(2, mode64);
+}
+
+HReg hregMIPS_F3(Bool mode64)
+{
+ return MkHRegFPR(3, mode64);
+}
+
+HReg hregMIPS_F4(Bool mode64)
+{
+ return MkHRegFPR(4, mode64);
+}
+
+HReg hregMIPS_F5(Bool mode64)
+{
+ return MkHRegFPR(5, mode64);
+}
+
+HReg hregMIPS_F6(Bool mode64)
+{
+ return MkHRegFPR(6, mode64);
+}
+
+HReg hregMIPS_F7(Bool mode64)
+{
+ return MkHRegFPR(7, mode64);
+}
+
+HReg hregMIPS_F8(Bool mode64)
+{
+ return MkHRegFPR(8, mode64);
+}
+
+HReg hregMIPS_F9(Bool mode64)
+{
+ return MkHRegFPR(9, mode64);
+}
+
+HReg hregMIPS_F10(Bool mode64)
+{
+ return MkHRegFPR(10, mode64);
+}
+
+HReg hregMIPS_F11(Bool mode64)
+{
+ return MkHRegFPR(11, mode64);
+}
+
+HReg hregMIPS_F12(Bool mode64)
+{
+ return MkHRegFPR(12, mode64);
+}
+
+HReg hregMIPS_F13(Bool mode64)
+{
+ return MkHRegFPR(13, mode64);
+}
+
+HReg hregMIPS_F14(Bool mode64)
+{
+ return MkHRegFPR(14, mode64);
+}
+
+HReg hregMIPS_F15(Bool mode64)
+{
+ return MkHRegFPR(15, mode64);
+}
+
+HReg hregMIPS_F16(Bool mode64)
+{
+ return MkHRegFPR(16, mode64);
+}
+
+HReg hregMIPS_F17(Bool mode64)
+{
+ return MkHRegFPR(17, mode64);
+}
+
+HReg hregMIPS_F18(Bool mode64)
+{
+ return MkHRegFPR(18, mode64);
+}
+
+HReg hregMIPS_F19(Bool mode64)
+{
+ return MkHRegFPR(19, mode64);
+}
+
+HReg hregMIPS_F20(Bool mode64)
+{
+ return MkHRegFPR(20, mode64);
+}
+
+HReg hregMIPS_F21(Bool mode64)
+{
+ return MkHRegFPR(21, mode64);
+}
+
+HReg hregMIPS_F22(Bool mode64)
+{
+ return MkHRegFPR(22, mode64);
+}
+
+HReg hregMIPS_F23(Bool mode64)
+{
+ return MkHRegFPR(23, mode64);
+}
+
+HReg hregMIPS_F24(Bool mode64)
+{
+ return MkHRegFPR(24, mode64);
+}
+
+HReg hregMIPS_F25(Bool mode64)
+{
+ return MkHRegFPR(25, mode64);
+}
+
+HReg hregMIPS_F26(Bool mode64)
+{
+ return MkHRegFPR(26, mode64);
+}
+
+HReg hregMIPS_F27(Bool mode64)
+{
+ return MkHRegFPR(27, mode64);
+}
+
+HReg hregMIPS_F28(Bool mode64)
+{
+ return MkHRegFPR(28, mode64);
+}
+
+HReg hregMIPS_F29(Bool mode64)
+{
+ return MkHRegFPR(29, mode64);
+}
+
+HReg hregMIPS_F30(Bool mode64)
+{
+ return MkHRegFPR(30, mode64);
+}
+
+HReg hregMIPS_F31(Bool mode64)
+{
+ return MkHRegFPR(31, mode64);
+}
+
+HReg hregMIPS_PC(Bool mode64)
+{
+ return mkHReg(32, mode64 ? HRcFlt64 : HRcFlt32, False);
+}
+
+HReg hregMIPS_HI(Bool mode64)
+{
+ return mkHReg(33, mode64 ? HRcFlt64 : HRcFlt32, False);
+}
+
+HReg hregMIPS_LO(Bool mode64)
+{
+ return mkHReg(34, mode64 ? HRcFlt64 : HRcFlt32, False);
+}
+
+HReg hregMIPS_D0(void)
+{
+ return mkHReg(0, HRcFlt64, False);
+}
+
+HReg hregMIPS_D1(void)
+{
+ return mkHReg(2, HRcFlt64, False);
+}
+
+HReg hregMIPS_D2(void)
+{
+ return mkHReg(4, HRcFlt64, False);
+}
+
+HReg hregMIPS_D3(void)
+{
+ return mkHReg(6, HRcFlt64, False);
+}
+
+HReg hregMIPS_D4(void)
+{
+ return mkHReg(8, HRcFlt64, False);
+}
+
+HReg hregMIPS_D5(void)
+{
+ return mkHReg(10, HRcFlt64, False);
+}
+
+HReg hregMIPS_D6(void)
+{
+ return mkHReg(12, HRcFlt64, False);
+}
+
+HReg hregMIPS_D7(void)
+{
+ return mkHReg(14, HRcFlt64, False);
+}
+
+HReg hregMIPS_D8(void)
+{
+ return mkHReg(16, HRcFlt64, False);
+}
+
+HReg hregMIPS_D9(void)
+{
+ return mkHReg(18, HRcFlt64, False);
+}
+
+HReg hregMIPS_D10(void)
+{
+ return mkHReg(20, HRcFlt64, False);
+}
+
+HReg hregMIPS_D11(void)
+{
+ return mkHReg(22, HRcFlt64, False);
+}
+
+HReg hregMIPS_D12(void)
+{
+ return mkHReg(24, HRcFlt64, False);
+}
+
+HReg hregMIPS_D13(void)
+{
+ return mkHReg(26, HRcFlt64, False);
+}
+
+HReg hregMIPS_D14(void)
+{
+ return mkHReg(28, HRcFlt64, False);
+}
+
+HReg hregMIPS_D15(void)
+{
+ return mkHReg(30, HRcFlt64, False);
+}
+
+HReg hregMIPS_FIR(void)
+{
+ return mkHReg(35, HRcInt32, False);
+}
+
+HReg hregMIPS_FCCR(void)
+{
+ return mkHReg(36, HRcInt32, False);
+}
+
+HReg hregMIPS_FEXR(void)
+{
+ return mkHReg(37, HRcInt32, False);
+}
+
+HReg hregMIPS_FENR(void)
+{
+ return mkHReg(38, HRcInt32, False);
+}
+
+HReg hregMIPS_FCSR(void)
+{
+ return mkHReg(39, HRcInt32, False);
+}
+
+HReg hregMIPS_COND(void)
+{
+ return mkHReg(47, HRcInt32, False);
+}
+
+void getAllocableRegs_MIPS(Int * nregs, HReg ** arr, Bool mode64)
+{
+ if (mode64)
+ *nregs = 27;
+ else
+ *nregs = 34;
+ UInt i = 0;
+ *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+
+ //ZERO = constant 0
+ //AT = assembler temporary
+ // callee saves ones are listed first, since we prefer them
+ // if they're available
+ (*arr)[i++] = hregMIPS_GPR16(mode64);
+ (*arr)[i++] = hregMIPS_GPR17(mode64);
+ (*arr)[i++] = hregMIPS_GPR18(mode64);
+ (*arr)[i++] = hregMIPS_GPR19(mode64);
+ (*arr)[i++] = hregMIPS_GPR20(mode64);
+ (*arr)[i++] = hregMIPS_GPR21(mode64);
+ (*arr)[i++] = hregMIPS_GPR22(mode64);
+ if (!mode64)
+ (*arr)[i++] = hregMIPS_GPR23(mode64);
+
+ // otherwise we'll have to slum it out with caller-saves ones
+ if (mode64) {
+ (*arr)[i++] = hregMIPS_GPR8(mode64);
+ (*arr)[i++] = hregMIPS_GPR9(mode64);
+ (*arr)[i++] = hregMIPS_GPR10(mode64);
+ (*arr)[i++] = hregMIPS_GPR11(mode64);
+ }
+ (*arr)[i++] = hregMIPS_GPR12(mode64);
+ (*arr)[i++] = hregMIPS_GPR13(mode64);
+ (*arr)[i++] = hregMIPS_GPR14(mode64);
+ (*arr)[i++] = hregMIPS_GPR15(mode64);
+ (*arr)[i++] = hregMIPS_GPR24(mode64);
+ /***********mips32********************/
+ // t0 (=dispatch_ctr)
+ // t1 spill reg temp
+ // t2 (=guest_state)
+ // t3 (=PC = next guest address)
+ // K0 and K1 are reserved for OS kernel
+ // GP = global pointer
+ // SP = stack pointer
+ // FP = frame pointer
+ // RA = link register
+ // + PC, HI and LO
+ (*arr)[i++] = hregMIPS_F20(mode64);
+ (*arr)[i++] = hregMIPS_F21(mode64);
+ (*arr)[i++] = hregMIPS_F22(mode64);
+ (*arr)[i++] = hregMIPS_F23(mode64);
+ (*arr)[i++] = hregMIPS_F24(mode64);
+ (*arr)[i++] = hregMIPS_F25(mode64);
+ (*arr)[i++] = hregMIPS_F26(mode64);
+ (*arr)[i++] = hregMIPS_F27(mode64);
+ (*arr)[i++] = hregMIPS_F28(mode64);
+ (*arr)[i++] = hregMIPS_F29(mode64);
+ (*arr)[i++] = hregMIPS_F30(mode64);
+ if (!mode64) {
+ /* Fake double floating point */
+ (*arr)[i++] = hregMIPS_D0();
+ (*arr)[i++] = hregMIPS_D1();
+ (*arr)[i++] = hregMIPS_D2();
+ (*arr)[i++] = hregMIPS_D3();
+ (*arr)[i++] = hregMIPS_D4();
+ (*arr)[i++] = hregMIPS_D5();
+ (*arr)[i++] = hregMIPS_D6();
+ (*arr)[i++] = hregMIPS_D7();
+ (*arr)[i++] = hregMIPS_D8();
+ (*arr)[i++] = hregMIPS_D9();
+ }
+ vassert(i == *nregs);
+
+}
+
+/*----------------- Condition Codes ----------------------*/
+
+HChar *showMIPSCondCode(MIPSCondCode cond)
+{
+ HChar* ret;
+ switch (cond) {
+ case MIPScc_EQ:
+ ret = "EQ"; /* equal */
+ break;
+ case MIPScc_NE:
+ ret = "NEQ"; /* not equal */
+ break;
+ case MIPScc_HS:
+ ret = "GE"; /* >=u (Greater Than or Equal) */
+ break;
+ case MIPScc_LO:
+ ret = "LT"; /* <u (lower) */
+ break;
+ case MIPScc_MI:
+ ret = "mi"; /* minus (negative) */
+ break;
+ case MIPScc_PL:
+ ret = "pl"; /* plus (zero or +ve) */
+ break;
+ case MIPScc_VS:
+ ret = "vs"; /* overflow */
+ break;
+ case MIPScc_VC:
+ ret = "vc"; /* no overflow */
+ break;
+ case MIPScc_HI:
+ ret = "hi"; /* >u (higher) */
+ break;
+ case MIPScc_LS:
+ ret = "ls"; /* <=u (lower or same) */
+ break;
+ case MIPScc_GE:
+ ret = "ge"; /* >=s (signed greater or equal) */
+ break;
+ case MIPScc_LT:
+ ret = "lt"; /* <s (signed less than) */
+ break;
+ case MIPScc_GT:
+ ret = "gt"; /* >s (signed greater) */
+ break;
+ case MIPScc_LE:
+ ret = "le"; /* <=s (signed less or equal) */
+ break;
+ case MIPScc_AL:
+ ret = "al"; /* always (unconditional) */
+ break;
+ case MIPScc_NV:
+ ret = "nv"; /* never (unconditional): */
+ break;
+ default:
+ vpanic("showMIPSCondCode");
+ break;
+ }
+ return ret;
+}
+
+HChar *showMIPSFpOp(MIPSFpOp op)
+{
+ HChar *ret;
+ switch (op) {
+ case Mfp_ADDD:
+ ret = "ADD.D";
+ break;
+ case Mfp_SUBD:
+ ret = "SUB.D";
+ break;
+ case Mfp_MULD:
+ ret = "MUL.D";
+ break;
+ case Mfp_DIVD:
+ ret = "DIV.D";
+ break;
+ case Mfp_MADDD:
+ ret = "MADD.D";
+ break;
+ case Mfp_MSUBD:
+ ret = "MSUB.D";
+ break;
+ case Mfp_MADDS:
+ ret = "MADD.S";
+ break;
+ case Mfp_MSUBS:
+ ret = "MSUB.S";
+ break;
+ case Mfp_ADDS:
+ ret = "ADD.S";
+ break;
+ case Mfp_SUBS:
+ ret = "SUB.S";
+ break;
+ case Mfp_MULS:
+ ret = "MUL.S";
+ break;
+ case Mfp_DIVS:
+ ret = "DIV.S";
+ break;
+ case Mfp_SQRTS:
+ ret = "SQRT.S";
+ break;
+ case Mfp_SQRTD:
+ ret = "SQRT.D";
+ break;
+ case Mfp_RSQRTS:
+ ret = "RSQRT.S";
+ break;
+ case Mfp_RSQRTD:
+ ret = "RSQRT.D";
+ break;
+ case Mfp_RECIPS:
+ ret = "RECIP.S";
+ break;
+ case Mfp_RECIPD:
+ ret = "RECIP.D";
+ break;
+ case Mfp_ABSS:
+ ret = "ABS.S";
+ break;
+ case Mfp_ABSD:
+ ret = "ABS.D";
+ break;
+ case Mfp_NEGS:
+ ret = "NEG.S";
+ break;
+ case Mfp_NEGD:
+ ret = "NEG.D";
+ break;
+ case Mfp_MOVS:
+ ret = "MOV.S";
+ break;
+ case Mfp_MOVD:
+ ret = "MOV.D";
+ break;
+ case Mfp_RES:
+ ret = "RES";
+ break;
+ case Mfp_ROUNDWS:
+ ret = "ROUND.W.S";
+ break;
+ case Mfp_ROUNDWD:
+ ret = "ROUND.W.D";
+ break;
+ case Mfp_FLOORWS:
+ ret = "FLOOR.W.S";
+ break;
+ case Mfp_FLOORWD:
+ ret = "FLOOR.W.D";
+ break;
+ case Mfp_RSQRTE:
+ ret = "frsqrte";
+ break;
+ case Mfp_CVTDW:
+ case Mfp_CVTD:
+ ret = "CVT.D";
+ break;
+ case Mfp_CVTSD:
+ case Mfp_CVTSW:
+ ret = "CVT.S";
+ break;
+ case Mfp_CVTWS:
+ case Mfp_CVTWD:
+ ret = "CVT.W";
+ break;
+ case Mfp_TRUWD:
+ case Mfp_TRUWS:
+ ret = "TRUNC.W";
+ break;
+ case Mfp_TRULD:
+ case Mfp_TRULS:
+ ret = "TRUNC.L";
+ break;
+ case Mfp_CEILWS:
+ case Mfp_CEILWD:
+ ret = "CEIL.W";
+ break;
+ case Mfp_CEILLS:
+ case Mfp_CEILLD:
+ ret = "CEIL.L";
+ break;
+ case Mfp_CMP:
+ ret = "C.cond.d";
+ break;
+ default:
+ vpanic("showMIPSFpOp");
+ break;
+ }
+ return ret;
+}
+
+/* --------- MIPSAMode: memory address expressions. --------- */
+
+MIPSAMode *MIPSAMode_IR(Int idx, HReg base)
+{
+ MIPSAMode *am = LibVEX_Alloc(sizeof(MIPSAMode));
+ am->tag = Mam_IR;
+ am->Mam.IR.base = base;
+ am->Mam.IR.index = idx;
+
+ return am;
+}
+
+MIPSAMode *MIPSAMode_RR(HReg idx, HReg base)
+{
+ MIPSAMode *am = LibVEX_Alloc(sizeof(MIPSAMode));
+ am->tag = Mam_RR;
+ am->Mam.RR.base = base;
+ am->Mam.RR.index = idx;
+
+ return am;
+}
+
+MIPSAMode *dopyMIPSAMode(MIPSAMode * am)
+{
+ MIPSAMode* ret;
+ switch (am->tag) {
+ case Mam_IR:
+ ret = MIPSAMode_IR(am->Mam.IR.index, am->Mam.IR.base);
+ break;
+ case Mam_RR:
+ ret = MIPSAMode_RR(am->Mam.RR.index, am->Mam.RR.base);
+ break;
+ default:
+ vpanic("dopyMIPSAMode");
+ break;
+ }
+ return ret;
+}
+
+MIPSAMode *nextMIPSAModeFloat(MIPSAMode * am)
+{
+ MIPSAMode* ret;
+ switch (am->tag) {
+ case Mam_IR:
+ ret = MIPSAMode_IR(am->Mam.IR.index + 8, am->Mam.IR.base);
+ break;
+ case Mam_RR:
+ ret = MIPSAMode_RR(am->Mam.RR.index + 1, am->Mam.RR.base);
+ break;
+ default:
+ vpanic("dopyMIPSAMode");
+ break;
+ }
+ return ret;
+}
+
+MIPSAMode *nextMIPSAModeInt(MIPSAMode * am)
+{
+ MIPSAMode* ret;
+ switch (am->tag) {
+ case Mam_IR:
+ ret = MIPSAMode_IR(am->Mam.IR.index + 4, am->Mam.IR.base);
+ break;
+ case Mam_RR:
+ ret = MIPSAMode_RR(am->Mam.RR.index + 1, am->Mam.RR.base);
+ break;
+ default:
+ vpanic("dopyMIPSAMode");
+ break;
+ }
+ return ret;
+}
+
+void ppMIPSAMode(MIPSAMode * am, Bool mode64)
+{
+ switch (am->tag) {
+ case Mam_IR:
+ if (am->Mam.IR.index == 0)
+ vex_printf("0(");
+ else
+ vex_printf("%d(", (Int) am->Mam.IR.index);
+ ppHRegMIPS(am->Mam.IR.base, mode64);
+ vex_printf(")");
+ return;
+ case Mam_RR:
+ ppHRegMIPS(am->Mam.RR.base, mode64);
+ vex_printf(", ");
+ ppHRegMIPS(am->Mam.RR.index, mode64);
+ return;
+ default:
+ vpanic("ppMIPSAMode");
+ break;
+ }
+}
+
+static void addRegUsage_MIPSAMode(HRegUsage * u, MIPSAMode * am)
+{
+ switch (am->tag) {
+ case Mam_IR:
+ addHRegUse(u, HRmRead, am->Mam.IR.base);
+ return;
+ case Mam_RR:
+ addHRegUse(u, HRmRead, am->Mam.RR.base);
+ addHRegUse(u, HRmRead, am->Mam.RR.index);
+ return;
+ default:
+ vpanic("addRegUsage_MIPSAMode");
+ break;
+ }
+}
+
+static void mapRegs_MIPSAMode(HRegRemap * m, MIPSAMode * am)
+{
+ switch (am->tag) {
+ case Mam_IR:
+ am->Mam.IR.base = lookupHRegRemap(m, am->Mam.IR.base);
+ return;
+ case Mam_RR:
+ am->Mam.RR.base = lookupHRegRemap(m, am->Mam.RR.base);
+ am->Mam.RR.index = lookupHRegRemap(m, am->Mam.RR.index);
+ return;
+ default:
+ vpanic("mapRegs_MIPSAMode");
+ break;
+ }
+}
+
+/* --------- Operand, which can be a reg or a u16/s16. --------- */
+
+MIPSRH *MIPSRH_Imm(Bool syned, UShort imm16)
+{
+ MIPSRH *op = LibVEX_Alloc(sizeof(MIPSRH));
+ op->tag = Mrh_Imm;
+ op->Mrh.Imm.syned = syned;
+ op->Mrh.Imm.imm16 = imm16;
+ /* If this is a signed value, ensure it's not -32768, so that we
+ are guaranteed always to be able to negate if needed. */
+ if (syned)
+ vassert(imm16 != 0x8000);
+ vassert(syned == True || syned == False);
+ return op;
+}
+
+MIPSRH *MIPSRH_Reg(HReg reg)
+{
+ MIPSRH *op = LibVEX_Alloc(sizeof(MIPSRH));
+ op->tag = Mrh_Reg;
+ op->Mrh.Reg.reg = reg;
+ return op;
+}
+
+void ppMIPSRH(MIPSRH * op, Bool mode64)
+{
+ MIPSRHTag tag = op->tag;
+ switch (tag) {
+ case Mrh_Imm:
+ if (op->Mrh.Imm.syned)
+ vex_printf("%d", (Int) (Short) op->Mrh.Imm.imm16);
+ else
+ vex_printf("%u", (UInt) (UShort) op->Mrh.Imm.imm16);
+ return;
+ case Mrh_Reg:
+ ppHRegMIPS(op->Mrh.Reg.reg, mode64);
+ return;
+ default:
+ vpanic("ppMIPSRH");
+ break;
+ }
+}
+
+/* An MIPSRH can only be used in a "read" context (what would it mean
+ to write or modify a literal?) and so we enumerate its registers
+ accordingly. */
+static void addRegUsage_MIPSRH(HRegUsage * u, MIPSRH * op)
+{
+ switch (op->tag) {
+ case Mrh_Imm:
+ return;
+ case Mrh_Reg:
+ addHRegUse(u, HRmRead, op->Mrh.Reg.reg);
+ return;
+ default:
+ vpanic("addRegUsage_MIPSRH");
+ break;
+ }
+}
+
+static void mapRegs_MIPSRH(HRegRemap * m, MIPSRH * op)
+{
+ switch (op->tag) {
+ case Mrh_Imm:
+ return;
+ case Mrh_Reg:
+ op->Mrh.Reg.reg = lookupHRegRemap(m, op->Mrh.Reg.reg);
+ return;
+ default:
+ vpanic("mapRegs_MIPSRH");
+ break;
+ }
+}
+
+/* --------- Instructions. --------- */
+
+HChar *showMIPSUnaryOp(MIPSUnaryOp op)
+{
+ HChar* ret;
+ switch (op) {
+ case Mun_CLO:
+ ret = "clo";
+ break;
+ case Mun_CLZ:
+ ret = "clz";
+ break;
+ case Mun_NOP:
+ ret = "nop";
+ break;
+ default:
+ vpanic("showMIPSUnaryOp");
+ break;
+ }
+ return ret;
+}
+
+HChar *showMIPSAluOp(MIPSAluOp op, Bool immR)
+{
+ HChar* ret;
+ switch (op) {
+ case Malu_ADD:
+ ret = immR ? "addiu" : "addu";
+ break;
+ case Malu_SUB:
+ ret = "subu";
+ break;
+ case Malu_AND:
+ ret = immR ? "andi" : "and";
+ break;
+ case Malu_OR:
+ ret = immR ? "ori" : "or";
+ break;
+ case Malu_NOR:
+ vassert(immR == False); /*there's no nor with an immediate operand!? */
+ ret = "nor";
+ break;
+ case Malu_XOR:
+ ret = immR ? "xori" : "xor";
+ break;
+ default:
+ vpanic("showMIPSAluOp");
+ break;
+ }
+ return ret;
+}
+
+HChar *showMIPSShftOp(MIPSShftOp op, Bool immR, Bool sz32)
+{
+ HChar *ret;
+ switch (op) {
+ case Mshft_SRA:
+ ret = immR ? (sz32 ? "sar" : "dsar") : (sz32 ? "sarv" : "dsrav");
+ break;
+ case Mshft_SLL:
+ ret = immR ? (sz32 ? "sll" : "dsll") : (sz32 ? "sllv" : "dsllv");
+ break;
+ case Mshft_SRL:
+ ret = immR ? (sz32 ? "srl" : "dsrl") : (sz32 ? "srlv" : "dsrlv");
+ break;
+ default:
+ vpanic("showMIPSShftOp");
+ break;
+ }
+ return ret;
+}
+
+HChar *showMIPSMaccOp(MIPSMaccOp op, Bool variable)
+{
+ HChar *ret;
+ switch (op) {
+ case Macc_ADD:
+ ret = variable ? "madd" : "maddu";
+ break;
+ case Macc_SUB:
+ ret = variable ? "msub" : "msubu";
+ break;
+ default:
+ vpanic("showMIPSAccOp");
+ break;
+ }
+ return ret;
+}
+
+MIPSInstr *MIPSInstr_LI(HReg dst, ULong imm)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_LI;
+ i->Min.LI.dst = dst;
+ i->Min.LI.imm = imm;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_Alu(MIPSAluOp op, HReg dst, HReg srcL, MIPSRH * srcR)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Alu;
+ i->Min.Alu.op = op;
+ i->Min.Alu.dst = dst;
+ i->Min.Alu.srcL = srcL;
+ i->Min.Alu.srcR = srcR;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_Shft(MIPSShftOp op, Bool sz32, HReg dst, HReg srcL,
+ MIPSRH * srcR)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Shft;
+ i->Min.Shft.op = op;
+ i->Min.Shft.sz32 = sz32;
+ i->Min.Shft.dst = dst;
+ i->Min.Shft.srcL = srcL;
+ i->Min.Shft.srcR = srcR;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_Unary(MIPSUnaryOp op, HReg dst, HReg src)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Unary;
+ i->Min.Unary.op = op;
+ i->Min.Unary.dst = dst;
+ i->Min.Unary.src = src;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_Cmp(Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR,
+ MIPSCondCode cond)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Cmp;
+ i->Min.Cmp.syned = syned;
+ i->Min.Cmp.sz32 = sz32;
+ i->Min.Cmp.dst = dst;
+ i->Min.Cmp.srcL = srcL;
+ i->Min.Cmp.srcR = srcR;
+ i->Min.Cmp.cond = cond;
+ return i;
+}
+
+/* multiply */
+MIPSInstr *MIPSInstr_Mul(Bool syned, Bool wid, Bool sz32, HReg dst, HReg srcL,
+ HReg srcR)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Mul;
+ i->Min.Mul.syned = syned;
+ i->Min.Mul.widening = wid; /* widen=True else False */
+ i->Min.Mul.sz32 = sz32; /* True = 32 bits */
+ i->Min.Mul.dst = dst;
+ i->Min.Mul.srcL = srcL;
+ i->Min.Mul.srcR = srcR;
+ return i;
+}
+
+/* msub */
+MIPSInstr *MIPSInstr_Msub(Bool syned, HReg srcL, HReg srcR)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Macc;
+
+ i->Min.Macc.op = Macc_SUB;
+ i->Min.Macc.syned = syned;
+ i->Min.Macc.srcL = srcL;
+ i->Min.Macc.srcR = srcR;
+ return i;
+}
+
+/* madd */
+MIPSInstr *MIPSInstr_Madd(Bool syned, HReg srcL, HReg srcR)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Macc;
+
+ i->Min.Macc.op = Macc_ADD;
+ i->Min.Macc.syned = syned;
+ i->Min.Macc.srcL = srcL;
+ i->Min.Macc.srcR = srcR;
+ return i;
+}
+
+/* div */
+MIPSInstr *MIPSInstr_Div(Bool syned, Bool sz32, HReg srcL, HReg srcR)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Div;
+ i->Min.Div.syned = syned;
+ i->Min.Div.sz32 = sz32; /* True = 32 bits */
+ i->Min.Div.srcL = srcL;
+ i->Min.Div.srcR = srcR;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_Call(MIPSCondCode cond, Addr32 target, UInt argiregs,
+ HReg src)
+{
+ UInt mask;
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Call;
+ i->Min.Call.cond = cond;
+ i->Min.Call.target = target;
+ i->Min.Call.argiregs = argiregs;
+ i->Min.Call.src = src;
+ /* Only r4 .. r7 inclusive may be used as arg regs. Hence: */
+ mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
+ vassert(0 == (argiregs & ~mask));
+ return i;
+}
+
+MIPSInstr *MIPSInstr_CallAlways(MIPSCondCode cond, Addr32 target, UInt argiregs)
+{
+ UInt mask;
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Call;
+ i->Min.Call.cond = cond;
+ i->Min.Call.target = target;
+ i->Min.Call.argiregs = argiregs;
+ /* Only r4 .. r7 inclusive may be used as arg regs. Hence: */
+ mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
+ vassert(0 == (argiregs & ~mask));
+ return i;
+}
+
+MIPSInstr *MIPSInstr_XDirect ( Addr32 dstGA, MIPSAMode* amPC,
+ MIPSCondCode cond, Bool toFastEP ) {
+ MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_XDirect;
+ i->Min.XDirect.dstGA = dstGA;
+ i->Min.XDirect.amPC = amPC;
+ i->Min.XDirect.cond = cond;
+ i->Min.XDirect.toFastEP = toFastEP;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_XIndir ( HReg dstGA, MIPSAMode* amPC,
+ MIPSCondCode cond ) {
+ MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_XIndir;
+ i->Min.XIndir.dstGA = dstGA;
+ i->Min.XIndir.amPC = amPC;
+ i->Min.XIndir.cond = cond;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_XAssisted ( HReg dstGA, MIPSAMode* amPC,
+ MIPSCondCode cond, IRJumpKind jk ) {
+ MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_XAssisted;
+ i->Min.XAssisted.dstGA = dstGA;
+ i->Min.XAssisted.amPC = amPC;
+ i->Min.XAssisted.cond = cond;
+ i->Min.XAssisted.jk = jk;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_Load(UChar sz, HReg dst, MIPSAMode * src, Bool mode64)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Load;
+ i->Min.Load.sz = sz;
+ i->Min.Load.src = src;
+ i->Min.Load.dst = dst;
+ vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+
+ if (sz == 8)
+ vassert(mode64);
+ return i;
+}
+
+MIPSInstr *MIPSInstr_Store(UChar sz, MIPSAMode * dst, HReg src, Bool mode64)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Store;
+ i->Min.Store.sz = sz;
+ i->Min.Store.src = src;
+ i->Min.Store.dst = dst;
+ vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+
+ if (sz == 8)
+ vassert(mode64);
+ return i;
+}
+
+MIPSInstr *MIPSInstr_LoadL(UChar sz, HReg dst, MIPSAMode * src, Bool mode64)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_LoadL;
+ i->Min.LoadL.sz = sz;
+ i->Min.LoadL.src = src;
+ i->Min.LoadL.dst = dst;
+ vassert(sz == 4 || sz == 8);
+
+ if (sz == 8)
+ vassert(mode64);
+ return i;
+}
+
+MIPSInstr *MIPSInstr_StoreC(UChar sz, MIPSAMode * dst, HReg src, Bool mode64)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_StoreC;
+ i->Min.StoreC.sz = sz;
+ i->Min.StoreC.src = src;
+ i->Min.StoreC.dst = dst;
+ vassert(sz == 4 || sz == 8);
+
+ if (sz == 8)
+ vassert(mode64);
+ return i;
+}
+
+MIPSInstr *MIPSInstr_Mthi(HReg src)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Mthi;
+ i->Min.MtHL.src = src;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_Mtlo(HReg src)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Mtlo;
+ i->Min.MtHL.src = src;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_Mfhi(HReg dst)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Mfhi;
+ i->Min.MfHL.dst = dst;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_Mflo(HReg dst)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_Mflo;
+ i->Min.MfHL.dst = dst;
+ return i;
+}
+
+/* Read/Write Link Register */
+MIPSInstr *MIPSInstr_RdWrLR(Bool wrLR, HReg gpr)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_RdWrLR;
+ i->Min.RdWrLR.wrLR = wrLR;
+ i->Min.RdWrLR.gpr = gpr;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_FpLdSt(Bool isLoad, UChar sz, HReg reg, MIPSAMode * addr)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_FpLdSt;
+ i->Min.FpLdSt.isLoad = isLoad;
+ i->Min.FpLdSt.sz = sz;
+ i->Min.FpLdSt.reg = reg;
+ i->Min.FpLdSt.addr = addr;
+ vassert(sz == 4 || sz == 8);
+ return i;
+}
+
+MIPSInstr *MIPSInstr_FpUnary(MIPSFpOp op, HReg dst, HReg src)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_FpUnary;
+ i->Min.FpUnary.op = op;
+ i->Min.FpUnary.dst = dst;
+ i->Min.FpUnary.src = src;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_FpBinary(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_FpBinary;
+ i->Min.FpBinary.op = op;
+ i->Min.FpBinary.dst = dst;
+ i->Min.FpBinary.srcL = srcL;
+ i->Min.FpBinary.srcR = srcR;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_FpConvert;
+ i->Min.FpConvert.op = op;
+ i->Min.FpConvert.dst = dst;
+ i->Min.FpConvert.src = src;
+ return i;
+
+}
+
+MIPSInstr *MIPSInstr_FpCompare(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR,
+ UChar cond1)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_FpCompare;
+ i->Min.FpCompare.op = op;
+ i->Min.FpCompare.dst = dst;
+ i->Min.FpCompare.srcL = srcL;
+ i->Min.FpCompare.srcR = srcR;
+ i->Min.FpCompare.cond1 = cond1;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_MovCond(HReg dst, HReg argL, MIPSRH * argR, HReg condR,
+ MIPSCondCode cond)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_MovCond;
+ i->Min.MovCond.dst = dst;
+ i->Min.MovCond.srcL = argL;
+ i->Min.MovCond.srcR = argR;
+ i->Min.MovCond.condR = condR;
+ i->Min.MovCond.cond = cond;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_MtFCSR(HReg src)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_MtFCSR;
+ i->Min.MtFCSR.src = src;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_MfFCSR(HReg dst)
+{
+ MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_MfFCSR;
+ i->Min.MfFCSR.dst = dst;
+ return i;
+}
+
+MIPSInstr *MIPSInstr_EvCheck ( MIPSAMode* amCounter,
+ MIPSAMode* amFailAddr ) {
+ MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_EvCheck;
+ i->Min.EvCheck.amCounter = amCounter;
+ i->Min.EvCheck.amFailAddr = amFailAddr;
+ return i;
+}
+
+MIPSInstr* MIPSInstr_ProfInc ( void ) {
+ MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
+ i->tag = Min_ProfInc;
+ return i;
+}
+
+/* -------- Pretty Print instructions ------------- */
+static void ppLoadImm(HReg dst, ULong imm, Bool mode64)
+{
+ vex_printf("li ");
+ ppHRegMIPS(dst, mode64);
+ vex_printf(",0x%016llx", imm);
+}
+
+void ppMIPSInstr(MIPSInstr * i, Bool mode64)
+{
+ switch (i->tag) {
+ case Min_LI:
+ ppLoadImm(i->Min.LI.dst, i->Min.LI.imm, mode64);
+ break;
+ case Min_Alu: {
+ HReg r_srcL = i->Min.Alu.srcL;
+ MIPSRH *rh_srcR = ...
[truncated message content] |