|
From: <sv...@va...> - 2014-08-15 09:29:48
|
Author: sewardj
Date: Fri Aug 15 09:29:36 2014
New Revision: 2925
Log:
No functional change. Remove commented out code copied from the
arm32 port, which is never going to get used.
Modified:
trunk/priv/host_arm64_defs.c
trunk/priv/host_arm64_isel.c
Modified: trunk/priv/host_arm64_defs.c
==============================================================================
--- trunk/priv/host_arm64_defs.c (original)
+++ trunk/priv/host_arm64_defs.c Fri Aug 15 09:29:36 2014
@@ -89,7 +89,6 @@
HReg hregARM64_X5 ( void ) { return mkHReg(5, HRcInt64, False); }
HReg hregARM64_X6 ( void ) { return mkHReg(6, HRcInt64, False); }
HReg hregARM64_X7 ( void ) { return mkHReg(7, HRcInt64, False); }
-//ZZ HReg hregARM_R8 ( void ) { return mkHReg(8, HRcInt32, False); }
HReg hregARM64_X9 ( void ) { return mkHReg(9, HRcInt64, False); }
HReg hregARM64_X10 ( void ) { return mkHReg(10, HRcInt64, False); }
HReg hregARM64_X11 ( void ) { return mkHReg(11, HRcInt64, False); }
@@ -114,21 +113,11 @@
HReg hregARM64_D11 ( void ) { return mkHReg(11, HRcFlt64, False); }
HReg hregARM64_D12 ( void ) { return mkHReg(12, HRcFlt64, False); }
HReg hregARM64_D13 ( void ) { return mkHReg(13, HRcFlt64, False); }
-//ZZ HReg hregARM_S26 ( void ) { return mkHReg(26, HRcFlt32, False); }
-//ZZ HReg hregARM_S27 ( void ) { return mkHReg(27, HRcFlt32, False); }
-//ZZ HReg hregARM_S28 ( void ) { return mkHReg(28, HRcFlt32, False); }
-//ZZ HReg hregARM_S29 ( void ) { return mkHReg(29, HRcFlt32, False); }
-//ZZ HReg hregARM_S30 ( void ) { return mkHReg(30, HRcFlt32, False); }
HReg hregARM64_Q16 ( void ) { return mkHReg(16, HRcVec128, False); }
HReg hregARM64_Q17 ( void ) { return mkHReg(17, HRcVec128, False); }
HReg hregARM64_Q18 ( void ) { return mkHReg(18, HRcVec128, False); }
HReg hregARM64_Q19 ( void ) { return mkHReg(19, HRcVec128, False); }
HReg hregARM64_Q20 ( void ) { return mkHReg(20, HRcVec128, False); }
-//ZZ HReg hregARM_Q11 ( void ) { return mkHReg(11, HRcVec128, False); }
-//ZZ HReg hregARM_Q12 ( void ) { return mkHReg(12, HRcVec128, False); }
-//ZZ HReg hregARM_Q13 ( void ) { return mkHReg(13, HRcVec128, False); }
-//ZZ HReg hregARM_Q14 ( void ) { return mkHReg(14, HRcVec128, False); }
-//ZZ HReg hregARM_Q15 ( void ) { return mkHReg(15, HRcVec128, False); }
void getAllocableRegs_ARM64 ( Int* nregs, HReg** arr )
{
@@ -322,148 +311,6 @@
}
-//ZZ /* --------- Mem AModes: Addressing Mode 2 --------- */
-//ZZ
-//ZZ ARMAMode2* ARMAMode2_RI ( HReg reg, Int simm9 ) {
-//ZZ ARMAMode2* am = LibVEX_Alloc(sizeof(ARMAMode2));
-//ZZ am->tag = ARMam2_RI;
-//ZZ am->ARMam2.RI.reg = reg;
-//ZZ am->ARMam2.RI.simm9 = simm9;
-//ZZ vassert(-255 <= simm9 && simm9 <= 255);
-//ZZ return am;
-//ZZ }
-//ZZ ARMAMode2* ARMAMode2_RR ( HReg base, HReg index ) {
-//ZZ ARMAMode2* am = LibVEX_Alloc(sizeof(ARMAMode2));
-//ZZ am->tag = ARMam2_RR;
-//ZZ am->ARMam2.RR.base = base;
-//ZZ am->ARMam2.RR.index = index;
-//ZZ return am;
-//ZZ }
-//ZZ
-//ZZ void ppARMAMode2 ( ARMAMode2* am ) {
-//ZZ switch (am->tag) {
-//ZZ case ARMam2_RI:
-//ZZ vex_printf("%d(", am->ARMam2.RI.simm9);
-//ZZ ppHRegARM(am->ARMam2.RI.reg);
-//ZZ vex_printf(")");
-//ZZ break;
-//ZZ case ARMam2_RR:
-//ZZ vex_printf("(");
-//ZZ ppHRegARM(am->ARMam2.RR.base);
-//ZZ vex_printf(",");
-//ZZ ppHRegARM(am->ARMam2.RR.index);
-//ZZ vex_printf(")");
-//ZZ break;
-//ZZ default:
-//ZZ vassert(0);
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ static void addRegUsage_ARMAMode2 ( HRegUsage* u, ARMAMode2* am ) {
-//ZZ switch (am->tag) {
-//ZZ case ARMam2_RI:
-//ZZ addHRegUse(u, HRmRead, am->ARMam2.RI.reg);
-//ZZ return;
-//ZZ case ARMam2_RR:
-//ZZ // addHRegUse(u, HRmRead, am->ARMam2.RR.base);
-//ZZ // addHRegUse(u, HRmRead, am->ARMam2.RR.index);
-//ZZ // return;
-//ZZ default:
-//ZZ vpanic("addRegUsage_ARMAmode2");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ static void mapRegs_ARMAMode2 ( HRegRemap* m, ARMAMode2* am ) {
-//ZZ switch (am->tag) {
-//ZZ case ARMam2_RI:
-//ZZ am->ARMam2.RI.reg = lookupHRegRemap(m, am->ARMam2.RI.reg);
-//ZZ return;
-//ZZ case ARMam2_RR:
-//ZZ //am->ARMam2.RR.base =lookupHRegRemap(m, am->ARMam2.RR.base);
-//ZZ //am->ARMam2.RR.index = lookupHRegRemap(m, am->ARMam2.RR.index);
-//ZZ //return;
-//ZZ default:
-//ZZ vpanic("mapRegs_ARMAmode2");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ
-//ZZ /* --------- Mem AModes: Addressing Mode VFP --------- */
-//ZZ
-//ZZ ARMAModeV* mkARMAModeV ( HReg reg, Int simm11 ) {
-//ZZ ARMAModeV* am = LibVEX_Alloc(sizeof(ARMAModeV));
-//ZZ vassert(simm11 >= -1020 && simm11 <= 1020);
-//ZZ vassert(0 == (simm11 & 3));
-//ZZ am->reg = reg;
-//ZZ am->simm11 = simm11;
-//ZZ return am;
-//ZZ }
-//ZZ
-//ZZ void ppARMAModeV ( ARMAModeV* am ) {
-//ZZ vex_printf("%d(", am->simm11);
-//ZZ ppHRegARM(am->reg);
-//ZZ vex_printf(")");
-//ZZ }
-//ZZ
-//ZZ static void addRegUsage_ARMAModeV ( HRegUsage* u, ARMAModeV* am ) {
-//ZZ addHRegUse(u, HRmRead, am->reg);
-//ZZ }
-//ZZ
-//ZZ static void mapRegs_ARMAModeV ( HRegRemap* m, ARMAModeV* am ) {
-//ZZ am->reg = lookupHRegRemap(m, am->reg);
-//ZZ }
-//ZZ
-//ZZ
-//ZZ /* --------- Mem AModes: Addressing Mode Neon ------- */
-//ZZ
-//ZZ ARMAModeN *mkARMAModeN_RR ( HReg rN, HReg rM ) {
-//ZZ ARMAModeN* am = LibVEX_Alloc(sizeof(ARMAModeN));
-//ZZ am->tag = ARMamN_RR;
-//ZZ am->ARMamN.RR.rN = rN;
-//ZZ am->ARMamN.RR.rM = rM;
-//ZZ return am;
-//ZZ }
-//ZZ
-//ZZ ARMAModeN *mkARMAModeN_R ( HReg rN ) {
-//ZZ ARMAModeN* am = LibVEX_Alloc(sizeof(ARMAModeN));
-//ZZ am->tag = ARMamN_R;
-//ZZ am->ARMamN.R.rN = rN;
-//ZZ return am;
-//ZZ }
-//ZZ
-//ZZ static void addRegUsage_ARMAModeN ( HRegUsage* u, ARMAModeN* am ) {
-//ZZ if (am->tag == ARMamN_R) {
-//ZZ addHRegUse(u, HRmRead, am->ARMamN.R.rN);
-//ZZ } else {
-//ZZ addHRegUse(u, HRmRead, am->ARMamN.RR.rN);
-//ZZ addHRegUse(u, HRmRead, am->ARMamN.RR.rM);
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ static void mapRegs_ARMAModeN ( HRegRemap* m, ARMAModeN* am ) {
-//ZZ if (am->tag == ARMamN_R) {
-//ZZ am->ARMamN.R.rN = lookupHRegRemap(m, am->ARMamN.R.rN);
-//ZZ } else {
-//ZZ am->ARMamN.RR.rN = lookupHRegRemap(m, am->ARMamN.RR.rN);
-//ZZ am->ARMamN.RR.rM = lookupHRegRemap(m, am->ARMamN.RR.rM);
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ void ppARMAModeN ( ARMAModeN* am ) {
-//ZZ vex_printf("[");
-//ZZ if (am->tag == ARMamN_R) {
-//ZZ ppHRegARM(am->ARMamN.R.rN);
-//ZZ } else {
-//ZZ ppHRegARM(am->ARMamN.RR.rN);
-//ZZ }
-//ZZ vex_printf("]");
-//ZZ if (am->tag == ARMamN_RR) {
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(am->ARMamN.RR.rM);
-//ZZ }
-//ZZ }
-
-
/* --------- Reg or uimm12<<{0,12} operands --------- */
ARM64RIA* ARM64RIA_I12 ( UShort imm12, UChar shift ) {
@@ -636,138 +483,6 @@
}
-//ZZ /* -------- Neon Immediate operatnd --------- */
-//ZZ
-//ZZ ARMNImm* ARMNImm_TI ( UInt type, UInt imm8 ) {
-//ZZ ARMNImm* i = LibVEX_Alloc(sizeof(ARMNImm));
-//ZZ i->type = type;
-//ZZ i->imm8 = imm8;
-//ZZ return i;
-//ZZ }
-//ZZ
-//ZZ ULong ARMNImm_to_Imm64 ( ARMNImm* imm ) {
-//ZZ int i, j;
-//ZZ ULong y, x = imm->imm8;
-//ZZ switch (imm->type) {
-//ZZ case 3:
-//ZZ x = x << 8; /* fallthrough */
-//ZZ case 2:
-//ZZ x = x << 8; /* fallthrough */
-//ZZ case 1:
-//ZZ x = x << 8; /* fallthrough */
-//ZZ case 0:
-//ZZ return (x << 32) | x;
-//ZZ case 5:
-//ZZ case 6:
-//ZZ if (imm->type == 5)
-//ZZ x = x << 8;
-//ZZ else
-//ZZ x = (x << 8) | x;
-//ZZ /* fallthrough */
-//ZZ case 4:
-//ZZ x = (x << 16) | x;
-//ZZ return (x << 32) | x;
-//ZZ case 8:
-//ZZ x = (x << 8) | 0xFF;
-//ZZ /* fallthrough */
-//ZZ case 7:
-//ZZ x = (x << 8) | 0xFF;
-//ZZ return (x << 32) | x;
-//ZZ case 9:
-//ZZ x = 0;
-//ZZ for (i = 7; i >= 0; i--) {
-//ZZ y = ((ULong)imm->imm8 >> i) & 1;
-//ZZ for (j = 0; j < 8; j++) {
-//ZZ x = (x << 1) | y;
-//ZZ }
-//ZZ }
-//ZZ return x;
-//ZZ case 10:
-//ZZ x |= (x & 0x80) << 5;
-//ZZ x |= (~x & 0x40) << 5;
-//ZZ x &= 0x187F; /* 0001 1000 0111 1111 */
-//ZZ x |= (x & 0x40) << 4;
-//ZZ x |= (x & 0x40) << 3;
-//ZZ x |= (x & 0x40) << 2;
-//ZZ x |= (x & 0x40) << 1;
-//ZZ x = x << 19;
-//ZZ x = (x << 32) | x;
-//ZZ return x;
-//ZZ default:
-//ZZ vpanic("ARMNImm_to_Imm64");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ ARMNImm* Imm64_to_ARMNImm ( ULong x ) {
-//ZZ ARMNImm tmp;
-//ZZ if ((x & 0xFFFFFFFF) == (x >> 32)) {
-//ZZ if ((x & 0xFFFFFF00) == 0)
-//ZZ return ARMNImm_TI(0, x & 0xFF);
-//ZZ if ((x & 0xFFFF00FF) == 0)
-//ZZ return ARMNImm_TI(1, (x >> 8) & 0xFF);
-//ZZ if ((x & 0xFF00FFFF) == 0)
-//ZZ return ARMNImm_TI(2, (x >> 16) & 0xFF);
-//ZZ if ((x & 0x00FFFFFF) == 0)
-//ZZ return ARMNImm_TI(3, (x >> 24) & 0xFF);
-//ZZ if ((x & 0xFFFF00FF) == 0xFF)
-//ZZ return ARMNImm_TI(7, (x >> 8) & 0xFF);
-//ZZ if ((x & 0xFF00FFFF) == 0xFFFF)
-//ZZ return ARMNImm_TI(8, (x >> 16) & 0xFF);
-//ZZ if ((x & 0xFFFF) == ((x >> 16) & 0xFFFF)) {
-//ZZ if ((x & 0xFF00) == 0)
-//ZZ return ARMNImm_TI(4, x & 0xFF);
-//ZZ if ((x & 0x00FF) == 0)
-//ZZ return ARMNImm_TI(5, (x >> 8) & 0xFF);
-//ZZ if ((x & 0xFF) == ((x >> 8) & 0xFF))
-//ZZ return ARMNImm_TI(6, x & 0xFF);
-//ZZ }
-//ZZ if ((x & 0x7FFFF) == 0) {
-//ZZ tmp.type = 10;
-//ZZ tmp.imm8 = ((x >> 19) & 0x7F) | ((x >> 24) & 0x80);
-//ZZ if (ARMNImm_to_Imm64(&tmp) == x)
-//ZZ return ARMNImm_TI(tmp.type, tmp.imm8);
-//ZZ }
-//ZZ } else {
-//ZZ /* This can only be type 9. */
-//ZZ tmp.imm8 = (((x >> 56) & 1) << 7)
-//ZZ | (((x >> 48) & 1) << 6)
-//ZZ | (((x >> 40) & 1) << 5)
-//ZZ | (((x >> 32) & 1) << 4)
-//ZZ | (((x >> 24) & 1) << 3)
-//ZZ | (((x >> 16) & 1) << 2)
-//ZZ | (((x >> 8) & 1) << 1)
-//ZZ | (((x >> 0) & 1) << 0);
-//ZZ tmp.type = 9;
-//ZZ if (ARMNImm_to_Imm64 (&tmp) == x)
-//ZZ return ARMNImm_TI(tmp.type, tmp.imm8);
-//ZZ }
-//ZZ return NULL;
-//ZZ }
-//ZZ
-//ZZ void ppARMNImm (ARMNImm* i) {
-//ZZ ULong x = ARMNImm_to_Imm64(i);
-//ZZ vex_printf("0x%llX%llX", x, x);
-//ZZ }
-//ZZ
-//ZZ /* -- Register or scalar operand --- */
-//ZZ
-//ZZ ARMNRS* mkARMNRS(ARMNRS_tag tag, HReg reg, UInt index)
-//ZZ {
-//ZZ ARMNRS *p = LibVEX_Alloc(sizeof(ARMNRS));
-//ZZ p->tag = tag;
-//ZZ p->reg = reg;
-//ZZ p->index = index;
-//ZZ return p;
-//ZZ }
-//ZZ
-//ZZ void ppARMNRS(ARMNRS *p)
-//ZZ {
-//ZZ ppHRegARM(p->reg);
-//ZZ if (p->tag == ARMNRS_Scalar) {
-//ZZ vex_printf("[%d]", p->index);
-//ZZ }
-//ZZ }
-
/* --------- Instructions. --------- */
static const HChar* showARM64LogicOp ( ARM64LogicOp op ) {
@@ -1063,363 +778,6 @@
}
}
-//ZZ const HChar* showARMNeonBinOp ( ARMNeonBinOp op ) {
-//ZZ switch (op) {
-//ZZ case ARMneon_VAND: return "vand";
-//ZZ case ARMneon_VORR: return "vorr";
-//ZZ case ARMneon_VXOR: return "veor";
-//ZZ case ARMneon_VADD: return "vadd";
-//ZZ case ARMneon_VRHADDS: return "vrhadd";
-//ZZ case ARMneon_VRHADDU: return "vrhadd";
-//ZZ case ARMneon_VADDFP: return "vadd";
-//ZZ case ARMneon_VPADDFP: return "vpadd";
-//ZZ case ARMneon_VABDFP: return "vabd";
-//ZZ case ARMneon_VSUB: return "vsub";
-//ZZ case ARMneon_VSUBFP: return "vsub";
-//ZZ case ARMneon_VMINU: return "vmin";
-//ZZ case ARMneon_VMINS: return "vmin";
-//ZZ case ARMneon_VMINF: return "vmin";
-//ZZ case ARMneon_VMAXU: return "vmax";
-//ZZ case ARMneon_VMAXS: return "vmax";
-//ZZ case ARMneon_VMAXF: return "vmax";
-//ZZ case ARMneon_VQADDU: return "vqadd";
-//ZZ case ARMneon_VQADDS: return "vqadd";
-//ZZ case ARMneon_VQSUBU: return "vqsub";
-//ZZ case ARMneon_VQSUBS: return "vqsub";
-//ZZ case ARMneon_VCGTU: return "vcgt";
-//ZZ case ARMneon_VCGTS: return "vcgt";
-//ZZ case ARMneon_VCGTF: return "vcgt";
-//ZZ case ARMneon_VCGEF: return "vcgt";
-//ZZ case ARMneon_VCGEU: return "vcge";
-//ZZ case ARMneon_VCGES: return "vcge";
-//ZZ case ARMneon_VCEQ: return "vceq";
-//ZZ case ARMneon_VCEQF: return "vceq";
-//ZZ case ARMneon_VPADD: return "vpadd";
-//ZZ case ARMneon_VPMINU: return "vpmin";
-//ZZ case ARMneon_VPMINS: return "vpmin";
-//ZZ case ARMneon_VPMINF: return "vpmin";
-//ZZ case ARMneon_VPMAXU: return "vpmax";
-//ZZ case ARMneon_VPMAXS: return "vpmax";
-//ZZ case ARMneon_VPMAXF: return "vpmax";
-//ZZ case ARMneon_VEXT: return "vext";
-//ZZ case ARMneon_VMUL: return "vmuli";
-//ZZ case ARMneon_VMULLU: return "vmull";
-//ZZ case ARMneon_VMULLS: return "vmull";
-//ZZ case ARMneon_VMULP: return "vmul";
-//ZZ case ARMneon_VMULFP: return "vmul";
-//ZZ case ARMneon_VMULLP: return "vmul";
-//ZZ case ARMneon_VQDMULH: return "vqdmulh";
-//ZZ case ARMneon_VQRDMULH: return "vqrdmulh";
-//ZZ case ARMneon_VQDMULL: return "vqdmull";
-//ZZ case ARMneon_VTBL: return "vtbl";
-//ZZ case ARMneon_VRECPS: return "vrecps";
-//ZZ case ARMneon_VRSQRTS: return "vrecps";
-//ZZ /* ... */
-//ZZ default: vpanic("showARMNeonBinOp");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ const HChar* showARMNeonBinOpDataType ( ARMNeonBinOp op ) {
-//ZZ switch (op) {
-//ZZ case ARMneon_VAND:
-//ZZ case ARMneon_VORR:
-//ZZ case ARMneon_VXOR:
-//ZZ return "";
-//ZZ case ARMneon_VADD:
-//ZZ case ARMneon_VSUB:
-//ZZ case ARMneon_VEXT:
-//ZZ case ARMneon_VMUL:
-//ZZ case ARMneon_VPADD:
-//ZZ case ARMneon_VTBL:
-//ZZ case ARMneon_VCEQ:
-//ZZ return ".i";
-//ZZ case ARMneon_VRHADDU:
-//ZZ case ARMneon_VMINU:
-//ZZ case ARMneon_VMAXU:
-//ZZ case ARMneon_VQADDU:
-//ZZ case ARMneon_VQSUBU:
-//ZZ case ARMneon_VCGTU:
-//ZZ case ARMneon_VCGEU:
-//ZZ case ARMneon_VMULLU:
-//ZZ case ARMneon_VPMINU:
-//ZZ case ARMneon_VPMAXU:
-//ZZ return ".u";
-//ZZ case ARMneon_VRHADDS:
-//ZZ case ARMneon_VMINS:
-//ZZ case ARMneon_VMAXS:
-//ZZ case ARMneon_VQADDS:
-//ZZ case ARMneon_VQSUBS:
-//ZZ case ARMneon_VCGTS:
-//ZZ case ARMneon_VCGES:
-//ZZ case ARMneon_VQDMULL:
-//ZZ case ARMneon_VMULLS:
-//ZZ case ARMneon_VPMINS:
-//ZZ case ARMneon_VPMAXS:
-//ZZ case ARMneon_VQDMULH:
-//ZZ case ARMneon_VQRDMULH:
-//ZZ return ".s";
-//ZZ case ARMneon_VMULP:
-//ZZ case ARMneon_VMULLP:
-//ZZ return ".p";
-//ZZ case ARMneon_VADDFP:
-//ZZ case ARMneon_VABDFP:
-//ZZ case ARMneon_VPADDFP:
-//ZZ case ARMneon_VSUBFP:
-//ZZ case ARMneon_VMULFP:
-//ZZ case ARMneon_VMINF:
-//ZZ case ARMneon_VMAXF:
-//ZZ case ARMneon_VPMINF:
-//ZZ case ARMneon_VPMAXF:
-//ZZ case ARMneon_VCGTF:
-//ZZ case ARMneon_VCGEF:
-//ZZ case ARMneon_VCEQF:
-//ZZ case ARMneon_VRECPS:
-//ZZ case ARMneon_VRSQRTS:
-//ZZ return ".f";
-//ZZ /* ... */
-//ZZ default: vpanic("showARMNeonBinOpDataType");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ const HChar* showARMNeonUnOp ( ARMNeonUnOp op ) {
-//ZZ switch (op) {
-//ZZ case ARMneon_COPY: return "vmov";
-//ZZ case ARMneon_COPYLS: return "vmov";
-//ZZ case ARMneon_COPYLU: return "vmov";
-//ZZ case ARMneon_COPYN: return "vmov";
-//ZZ case ARMneon_COPYQNSS: return "vqmovn";
-//ZZ case ARMneon_COPYQNUS: return "vqmovun";
-//ZZ case ARMneon_COPYQNUU: return "vqmovn";
-//ZZ case ARMneon_NOT: return "vmvn";
-//ZZ case ARMneon_EQZ: return "vceq";
-//ZZ case ARMneon_CNT: return "vcnt";
-//ZZ case ARMneon_CLS: return "vcls";
-//ZZ case ARMneon_CLZ: return "vclz";
-//ZZ case ARMneon_DUP: return "vdup";
-//ZZ case ARMneon_PADDLS: return "vpaddl";
-//ZZ case ARMneon_PADDLU: return "vpaddl";
-//ZZ case ARMneon_VQSHLNSS: return "vqshl";
-//ZZ case ARMneon_VQSHLNUU: return "vqshl";
-//ZZ case ARMneon_VQSHLNUS: return "vqshlu";
-//ZZ case ARMneon_REV16: return "vrev16";
-//ZZ case ARMneon_REV32: return "vrev32";
-//ZZ case ARMneon_REV64: return "vrev64";
-//ZZ case ARMneon_VCVTFtoU: return "vcvt";
-//ZZ case ARMneon_VCVTFtoS: return "vcvt";
-//ZZ case ARMneon_VCVTUtoF: return "vcvt";
-//ZZ case ARMneon_VCVTStoF: return "vcvt";
-//ZZ case ARMneon_VCVTFtoFixedU: return "vcvt";
-//ZZ case ARMneon_VCVTFtoFixedS: return "vcvt";
-//ZZ case ARMneon_VCVTFixedUtoF: return "vcvt";
-//ZZ case ARMneon_VCVTFixedStoF: return "vcvt";
-//ZZ case ARMneon_VCVTF32toF16: return "vcvt";
-//ZZ case ARMneon_VCVTF16toF32: return "vcvt";
-//ZZ case ARMneon_VRECIP: return "vrecip";
-//ZZ case ARMneon_VRECIPF: return "vrecipf";
-//ZZ case ARMneon_VNEGF: return "vneg";
-//ZZ case ARMneon_ABS: return "vabs";
-//ZZ case ARMneon_VABSFP: return "vabsfp";
-//ZZ case ARMneon_VRSQRTEFP: return "vrsqrtefp";
-//ZZ case ARMneon_VRSQRTE: return "vrsqrte";
-//ZZ /* ... */
-//ZZ default: vpanic("showARMNeonUnOp");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ const HChar* showARMNeonUnOpDataType ( ARMNeonUnOp op ) {
-//ZZ switch (op) {
-//ZZ case ARMneon_COPY:
-//ZZ case ARMneon_NOT:
-//ZZ return "";
-//ZZ case ARMneon_COPYN:
-//ZZ case ARMneon_EQZ:
-//ZZ case ARMneon_CNT:
-//ZZ case ARMneon_DUP:
-//ZZ case ARMneon_REV16:
-//ZZ case ARMneon_REV32:
-//ZZ case ARMneon_REV64:
-//ZZ return ".i";
-//ZZ case ARMneon_COPYLU:
-//ZZ case ARMneon_PADDLU:
-//ZZ case ARMneon_COPYQNUU:
-//ZZ case ARMneon_VQSHLNUU:
-//ZZ case ARMneon_VRECIP:
-//ZZ case ARMneon_VRSQRTE:
-//ZZ return ".u";
-//ZZ case ARMneon_CLS:
-//ZZ case ARMneon_CLZ:
-//ZZ case ARMneon_COPYLS:
-//ZZ case ARMneon_PADDLS:
-//ZZ case ARMneon_COPYQNSS:
-//ZZ case ARMneon_COPYQNUS:
-//ZZ case ARMneon_VQSHLNSS:
-//ZZ case ARMneon_VQSHLNUS:
-//ZZ case ARMneon_ABS:
-//ZZ return ".s";
-//ZZ case ARMneon_VRECIPF:
-//ZZ case ARMneon_VNEGF:
-//ZZ case ARMneon_VABSFP:
-//ZZ case ARMneon_VRSQRTEFP:
-//ZZ return ".f";
-//ZZ case ARMneon_VCVTFtoU: return ".u32.f32";
-//ZZ case ARMneon_VCVTFtoS: return ".s32.f32";
-//ZZ case ARMneon_VCVTUtoF: return ".f32.u32";
-//ZZ case ARMneon_VCVTStoF: return ".f32.s32";
-//ZZ case ARMneon_VCVTF16toF32: return ".f32.f16";
-//ZZ case ARMneon_VCVTF32toF16: return ".f16.f32";
-//ZZ case ARMneon_VCVTFtoFixedU: return ".u32.f32";
-//ZZ case ARMneon_VCVTFtoFixedS: return ".s32.f32";
-//ZZ case ARMneon_VCVTFixedUtoF: return ".f32.u32";
-//ZZ case ARMneon_VCVTFixedStoF: return ".f32.s32";
-//ZZ /* ... */
-//ZZ default: vpanic("showARMNeonUnOpDataType");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ const HChar* showARMNeonUnOpS ( ARMNeonUnOpS op ) {
-//ZZ switch (op) {
-//ZZ case ARMneon_SETELEM: return "vmov";
-//ZZ case ARMneon_GETELEMU: return "vmov";
-//ZZ case ARMneon_GETELEMS: return "vmov";
-//ZZ case ARMneon_VDUP: return "vdup";
-//ZZ /* ... */
-//ZZ default: vpanic("showARMNeonUnarySOp");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ const HChar* showARMNeonUnOpSDataType ( ARMNeonUnOpS op ) {
-//ZZ switch (op) {
-//ZZ case ARMneon_SETELEM:
-//ZZ case ARMneon_VDUP:
-//ZZ return ".i";
-//ZZ case ARMneon_GETELEMS:
-//ZZ return ".s";
-//ZZ case ARMneon_GETELEMU:
-//ZZ return ".u";
-//ZZ /* ... */
-//ZZ default: vpanic("showARMNeonUnarySOp");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ const HChar* showARMNeonShiftOp ( ARMNeonShiftOp op ) {
-//ZZ switch (op) {
-//ZZ case ARMneon_VSHL: return "vshl";
-//ZZ case ARMneon_VSAL: return "vshl";
-//ZZ case ARMneon_VQSHL: return "vqshl";
-//ZZ case ARMneon_VQSAL: return "vqshl";
-//ZZ /* ... */
-//ZZ default: vpanic("showARMNeonShiftOp");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ const HChar* showARMNeonShiftOpDataType ( ARMNeonShiftOp op ) {
-//ZZ switch (op) {
-//ZZ case ARMneon_VSHL:
-//ZZ case ARMneon_VQSHL:
-//ZZ return ".u";
-//ZZ case ARMneon_VSAL:
-//ZZ case ARMneon_VQSAL:
-//ZZ return ".s";
-//ZZ /* ... */
-//ZZ default: vpanic("showARMNeonShiftOpDataType");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ const HChar* showARMNeonDualOp ( ARMNeonDualOp op ) {
-//ZZ switch (op) {
-//ZZ case ARMneon_TRN: return "vtrn";
-//ZZ case ARMneon_ZIP: return "vzip";
-//ZZ case ARMneon_UZP: return "vuzp";
-//ZZ /* ... */
-//ZZ default: vpanic("showARMNeonDualOp");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ const HChar* showARMNeonDualOpDataType ( ARMNeonDualOp op ) {
-//ZZ switch (op) {
-//ZZ case ARMneon_TRN:
-//ZZ case ARMneon_ZIP:
-//ZZ case ARMneon_UZP:
-//ZZ return "i";
-//ZZ /* ... */
-//ZZ default: vpanic("showARMNeonDualOp");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ static const HChar* showARMNeonDataSize_wrk ( UInt size )
-//ZZ {
-//ZZ switch (size) {
-//ZZ case 0: return "8";
-//ZZ case 1: return "16";
-//ZZ case 2: return "32";
-//ZZ case 3: return "64";
-//ZZ default: vpanic("showARMNeonDataSize");
-//ZZ }
-//ZZ }
-//ZZ
-//ZZ static const HChar* showARMNeonDataSize ( ARMInstr* i )
-//ZZ {
-//ZZ switch (i->tag) {
-//ZZ case ARMin_NBinary:
-//ZZ if (i->ARMin.NBinary.op == ARMneon_VEXT)
-//ZZ return "8";
-//ZZ if (i->ARMin.NBinary.op == ARMneon_VAND ||
-//ZZ i->ARMin.NBinary.op == ARMneon_VORR ||
-//ZZ i->ARMin.NBinary.op == ARMneon_VXOR)
-//ZZ return "";
-//ZZ return showARMNeonDataSize_wrk(i->ARMin.NBinary.size);
-//ZZ case ARMin_NUnary:
-//ZZ if (i->ARMin.NUnary.op == ARMneon_COPY ||
-//ZZ i->ARMin.NUnary.op == ARMneon_NOT ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTF32toF16||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTF16toF32||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedS ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedU ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFixedStoF ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFixedUtoF ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFtoS ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFtoU ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTStoF ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTUtoF)
-//ZZ return "";
-//ZZ if (i->ARMin.NUnary.op == ARMneon_VQSHLNSS ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VQSHLNUU ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VQSHLNUS) {
-//ZZ UInt size;
-//ZZ size = i->ARMin.NUnary.size;
-//ZZ if (size & 0x40)
-//ZZ return "64";
-//ZZ if (size & 0x20)
-//ZZ return "32";
-//ZZ if (size & 0x10)
-//ZZ return "16";
-//ZZ if (size & 0x08)
-//ZZ return "8";
-//ZZ vpanic("showARMNeonDataSize");
-//ZZ }
-//ZZ return showARMNeonDataSize_wrk(i->ARMin.NUnary.size);
-//ZZ case ARMin_NUnaryS:
-//ZZ if (i->ARMin.NUnaryS.op == ARMneon_VDUP) {
-//ZZ int size;
-//ZZ size = i->ARMin.NUnaryS.size;
-//ZZ if ((size & 1) == 1)
-//ZZ return "8";
-//ZZ if ((size & 3) == 2)
-//ZZ return "16";
-//ZZ if ((size & 7) == 4)
-//ZZ return "32";
-//ZZ vpanic("showARMNeonDataSize");
-//ZZ }
-//ZZ return showARMNeonDataSize_wrk(i->ARMin.NUnaryS.size);
-//ZZ case ARMin_NShift:
-//ZZ return showARMNeonDataSize_wrk(i->ARMin.NShift.size);
-//ZZ case ARMin_NDual:
-//ZZ return showARMNeonDataSize_wrk(i->ARMin.NDual.size);
-//ZZ default:
-//ZZ vpanic("showARMNeonDataSize");
-//ZZ }
-//ZZ }
-
ARM64Instr* ARM64Instr_Arith ( HReg dst,
HReg argL, ARM64RIA* argR, Bool isAdd ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
@@ -1614,11 +972,6 @@
i->tag = ARM64in_MFence;
return i;
}
-//ZZ ARM64Instr* ARM64Instr_CLREX( void ) {
-//ZZ ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
-//ZZ i->tag = ARM64in_CLREX;
-//ZZ return i;
-//ZZ }
ARM64Instr* ARM64Instr_VLdStS ( Bool isLoad, HReg sD, HReg rN, UInt uimm12 ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VLdStS;
@@ -1834,119 +1187,6 @@
vassert(amtB >= 1 && amtB <= 15);
return i;
}
-//ZZ ARMInstr* ARMInstr_VAluS ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_VAluS;
-//ZZ i->ARMin.VAluS.op = op;
-//ZZ i->ARMin.VAluS.dst = dst;
-//ZZ i->ARMin.VAluS.argL = argL;
-//ZZ i->ARMin.VAluS.argR = argR;
-//ZZ return i;
-//ZZ }
-//ZZ ARMInstr* ARMInstr_VCMovD ( ARMCondCode cond, HReg dst, HReg src ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_VCMovD;
-//ZZ i->ARMin.VCMovD.cond = cond;
-//ZZ i->ARMin.VCMovD.dst = dst;
-//ZZ i->ARMin.VCMovD.src = src;
-//ZZ vassert(cond != ARMcc_AL);
-//ZZ return i;
-//ZZ }
-//ZZ ARMInstr* ARMInstr_VCMovS ( ARMCondCode cond, HReg dst, HReg src ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_VCMovS;
-//ZZ i->ARMin.VCMovS.cond = cond;
-//ZZ i->ARMin.VCMovS.dst = dst;
-//ZZ i->ARMin.VCMovS.src = src;
-//ZZ vassert(cond != ARMcc_AL);
-//ZZ return i;
-//ZZ }
-//ZZ ARMInstr* ARMInstr_VXferD ( Bool toD, HReg dD, HReg rHi, HReg rLo ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_VXferD;
-//ZZ i->ARMin.VXferD.toD = toD;
-//ZZ i->ARMin.VXferD.dD = dD;
-//ZZ i->ARMin.VXferD.rHi = rHi;
-//ZZ i->ARMin.VXferD.rLo = rLo;
-//ZZ return i;
-//ZZ }
-//ZZ ARMInstr* ARMInstr_VXferS ( Bool toS, HReg fD, HReg rLo ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_VXferS;
-//ZZ i->ARMin.VXferS.toS = toS;
-//ZZ i->ARMin.VXferS.fD = fD;
-//ZZ i->ARMin.VXferS.rLo = rLo;
-//ZZ return i;
-//ZZ }
-//ZZ ARMInstr* ARMInstr_VCvtID ( Bool iToD, Bool syned,
-//ZZ HReg dst, HReg src ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_VCvtID;
-//ZZ i->ARMin.VCvtID.iToD = iToD;
-//ZZ i->ARMin.VCvtID.syned = syned;
-//ZZ i->ARMin.VCvtID.dst = dst;
-//ZZ i->ARMin.VCvtID.src = src;
-//ZZ return i;
-//ZZ }
-//ZZ ARMInstr* ARMInstr_NLdStD ( Bool isLoad, HReg dD, ARMAModeN *amode ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_NLdStD;
-//ZZ i->ARMin.NLdStD.isLoad = isLoad;
-//ZZ i->ARMin.NLdStD.dD = dD;
-//ZZ i->ARMin.NLdStD.amode = amode;
-//ZZ return i;
-//ZZ }
-//ZZ
-//ZZ ARMInstr* ARMInstr_NUnary ( ARMNeonUnOp op, HReg dQ, HReg nQ,
-//ZZ UInt size, Bool Q ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_NUnary;
-//ZZ i->ARMin.NUnary.op = op;
-//ZZ i->ARMin.NUnary.src = nQ;
-//ZZ i->ARMin.NUnary.dst = dQ;
-//ZZ i->ARMin.NUnary.size = size;
-//ZZ i->ARMin.NUnary.Q = Q;
-//ZZ return i;
-//ZZ }
-//ZZ
-//ZZ ARMInstr* ARMInstr_NUnaryS ( ARMNeonUnOpS op, ARMNRS* dst, ARMNRS* src,
-//ZZ UInt size, Bool Q ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_NUnaryS;
-//ZZ i->ARMin.NUnaryS.op = op;
-//ZZ i->ARMin.NUnaryS.src = src;
-//ZZ i->ARMin.NUnaryS.dst = dst;
-//ZZ i->ARMin.NUnaryS.size = size;
-//ZZ i->ARMin.NUnaryS.Q = Q;
-//ZZ return i;
-//ZZ }
-//ZZ
-//ZZ ARMInstr* ARMInstr_NDual ( ARMNeonDualOp op, HReg nQ, HReg mQ,
-//ZZ UInt size, Bool Q ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_NDual;
-//ZZ i->ARMin.NDual.op = op;
-//ZZ i->ARMin.NDual.arg1 = nQ;
-//ZZ i->ARMin.NDual.arg2 = mQ;
-//ZZ i->ARMin.NDual.size = size;
-//ZZ i->ARMin.NDual.Q = Q;
-//ZZ return i;
-//ZZ }
-//ZZ
-//ZZ ARMInstr* ARMInstr_NBinary ( ARMNeonBinOp op,
-//ZZ HReg dst, HReg argL, HReg argR,
-//ZZ UInt size, Bool Q ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_NBinary;
-//ZZ i->ARMin.NBinary.op = op;
-//ZZ i->ARMin.NBinary.argL = argL;
-//ZZ i->ARMin.NBinary.argR = argR;
-//ZZ i->ARMin.NBinary.dst = dst;
-//ZZ i->ARMin.NBinary.size = size;
-//ZZ i->ARMin.NBinary.Q = Q;
-//ZZ return i;
-//ZZ }
-
ARM64Instr* ARM64Instr_VImmQ (HReg rQ, UShort imm) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VImmQ;
@@ -2013,77 +1253,6 @@
}
return i;
}
-
-//ZZ ARMInstr* ARMInstr_NCMovQ ( ARMCondCode cond, HReg dst, HReg src ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_NCMovQ;
-//ZZ i->ARMin.NCMovQ.cond = cond;
-//ZZ i->ARMin.NCMovQ.dst = dst;
-//ZZ i->ARMin.NCMovQ.src = src;
-//ZZ vassert(cond != ARMcc_AL);
-//ZZ return i;
-//ZZ }
-//ZZ
-//ZZ ARMInstr* ARMInstr_NShift ( ARMNeonShiftOp op,
-//ZZ HReg dst, HReg argL, HReg argR,
-//ZZ UInt size, Bool Q ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_NShift;
-//ZZ i->ARMin.NShift.op = op;
-//ZZ i->ARMin.NShift.argL = argL;
-//ZZ i->ARMin.NShift.argR = argR;
-//ZZ i->ARMin.NShift.dst = dst;
-//ZZ i->ARMin.NShift.size = size;
-//ZZ i->ARMin.NShift.Q = Q;
-//ZZ return i;
-//ZZ }
-//ZZ
-//ZZ ARMInstr* ARMInstr_NShl64 ( HReg dst, HReg src, UInt amt )
-//ZZ {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_NShl64;
-//ZZ i->ARMin.NShl64.dst = dst;
-//ZZ i->ARMin.NShl64.src = src;
-//ZZ i->ARMin.NShl64.amt = amt;
-//ZZ vassert(amt >= 1 && amt <= 63);
-//ZZ return i;
-//ZZ }
-//ZZ
-//ZZ /* Helper copy-pasted from isel.c */
-//ZZ static Bool fitsIn8x4 ( UInt* u8, UInt* u4, UInt u )
-//ZZ {
-//ZZ UInt i;
-//ZZ for (i = 0; i < 16; i++) {
-//ZZ if (0 == (u & 0xFFFFFF00)) {
-//ZZ *u8 = u;
-//ZZ *u4 = i;
-//ZZ return True;
-//ZZ }
-//ZZ u = ROR32(u, 30);
-//ZZ }
-//ZZ vassert(i == 16);
-//ZZ return False;
-//ZZ }
-//ZZ
-//ZZ ARMInstr* ARMInstr_Add32 ( HReg rD, HReg rN, UInt imm32 ) {
-//ZZ UInt u8, u4;
-//ZZ ARMInstr *i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ /* Try to generate single ADD if possible */
-//ZZ if (fitsIn8x4(&u8, &u4, imm32)) {
-//ZZ i->tag = ARMin_Alu;
-//ZZ i->ARMin.Alu.op = ARMalu_ADD;
-//ZZ i->ARMin.Alu.dst = rD;
-//ZZ i->ARMin.Alu.argL = rN;
-//ZZ i->ARMin.Alu.argR = ARMRI84_I84(u8, u4);
-//ZZ } else {
-//ZZ i->tag = ARMin_Add32;
-//ZZ i->ARMin.Add32.rD = rD;
-//ZZ i->ARMin.Add32.rN = rN;
-//ZZ i->ARMin.Add32.imm32 = imm32;
-//ZZ }
-//ZZ return i;
-//ZZ }
-
ARM64Instr* ARM64Instr_EvCheck ( ARM64AMode* amCounter,
ARM64AMode* amFailAddr ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
@@ -2093,12 +1262,6 @@
return i;
}
-//ZZ ARMInstr* ARMInstr_ProfInc ( void ) {
-//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
-//ZZ i->tag = ARMin_ProfInc;
-//ZZ return i;
-//ZZ }
-
/* ... */
void ppARM64Instr ( ARM64Instr* i ) {
@@ -2308,9 +1471,6 @@
case ARM64in_MFence:
vex_printf("(mfence) dsb sy; dmb sy; isb");
return;
-//ZZ case ARM64in_CLREX:
-//ZZ vex_printf("clrex");
-//ZZ return;
case ARM64in_VLdStS:
if (i->ARM64in.VLdStS.isLoad) {
vex_printf("ldr ");
@@ -2507,156 +1667,6 @@
vex_printf(".16b, #%u", i->ARM64in.VExtV.amtB);
return;
}
-//ZZ case ARMin_VAluS:
-//ZZ vex_printf("f%-3ss ", showARMVfpOp(i->ARMin.VAluS.op));
-//ZZ ppHRegARM(i->ARMin.VAluS.dst);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.VAluS.argL);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.VAluS.argR);
-//ZZ return;
-//ZZ case ARMin_VCMovD:
-//ZZ vex_printf("fcpyd%s ", showARMCondCode(i->ARMin.VCMovD.cond));
-//ZZ ppHRegARM(i->ARMin.VCMovD.dst);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.VCMovD.src);
-//ZZ return;
-//ZZ case ARMin_VCMovS:
-//ZZ vex_printf("fcpys%s ", showARMCondCode(i->ARMin.VCMovS.cond));
-//ZZ ppHRegARM(i->ARMin.VCMovS.dst);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.VCMovS.src);
-//ZZ return;
-//ZZ case ARMin_VXferD:
-//ZZ vex_printf("vmov ");
-//ZZ if (i->ARMin.VXferD.toD) {
-//ZZ ppHRegARM(i->ARMin.VXferD.dD);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.VXferD.rLo);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.VXferD.rHi);
-//ZZ } else {
-//ZZ ppHRegARM(i->ARMin.VXferD.rLo);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.VXferD.rHi);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.VXferD.dD);
-//ZZ }
-//ZZ return;
-//ZZ case ARMin_VXferS:
-//ZZ vex_printf("vmov ");
-//ZZ if (i->ARMin.VXferS.toS) {
-//ZZ ppHRegARM(i->ARMin.VXferS.fD);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.VXferS.rLo);
-//ZZ } else {
-//ZZ ppHRegARM(i->ARMin.VXferS.rLo);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.VXferS.fD);
-//ZZ }
-//ZZ return;
-//ZZ case ARMin_VCvtID: {
-//ZZ const HChar* nm = "?";
-//ZZ if (i->ARMin.VCvtID.iToD) {
-//ZZ nm = i->ARMin.VCvtID.syned ? "fsitod" : "fuitod";
-//ZZ } else {
-//ZZ nm = i->ARMin.VCvtID.syned ? "ftosid" : "ftouid";
-//ZZ }
-//ZZ vex_printf("%s ", nm);
-//ZZ ppHRegARM(i->ARMin.VCvtID.dst);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.VCvtID.src);
-//ZZ return;
-//ZZ }
-//ZZ case ARMin_NLdStD:
-//ZZ if (i->ARMin.NLdStD.isLoad)
-//ZZ vex_printf("vld1.32 {");
-//ZZ else
-//ZZ vex_printf("vst1.32 {");
-//ZZ ppHRegARM(i->ARMin.NLdStD.dD);
-//ZZ vex_printf("} ");
-//ZZ ppARMAModeN(i->ARMin.NLdStD.amode);
-//ZZ return;
-//ZZ case ARMin_NUnary:
-//ZZ vex_printf("%s%s%s ",
-//ZZ showARMNeonUnOp(i->ARMin.NUnary.op),
-//ZZ showARMNeonUnOpDataType(i->ARMin.NUnary.op),
-//ZZ showARMNeonDataSize(i));
-//ZZ ppHRegARM(i->ARMin.NUnary.dst);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.NUnary.src);
-//ZZ if (i->ARMin.NUnary.op == ARMneon_EQZ)
-//ZZ vex_printf(", #0");
-//ZZ if (i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedS ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedU ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFixedStoF ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFixedUtoF) {
-//ZZ vex_printf(", #%d", i->ARMin.NUnary.size);
-//ZZ }
-//ZZ if (i->ARMin.NUnary.op == ARMneon_VQSHLNSS ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VQSHLNUU ||
-//ZZ i->ARMin.NUnary.op == ARMneon_VQSHLNUS) {
-//ZZ UInt size;
-//ZZ size = i->ARMin.NUnary.size;
-//ZZ if (size & 0x40) {
-//ZZ vex_printf(", #%d", size - 64);
-//ZZ } else if (size & 0x20) {
-//ZZ vex_printf(", #%d", size - 32);
-//ZZ } else if (size & 0x10) {
-//ZZ vex_printf(", #%d", size - 16);
-//ZZ } else if (size & 0x08) {
-//ZZ vex_printf(", #%d", size - 8);
-//ZZ }
-//ZZ }
-//ZZ return;
-//ZZ case ARMin_NUnaryS:
-//ZZ vex_printf("%s%s%s ",
-//ZZ showARMNeonUnOpS(i->ARMin.NUnaryS.op),
-//ZZ showARMNeonUnOpSDataType(i->ARMin.NUnaryS.op),
-//ZZ showARMNeonDataSize(i));
-//ZZ ppARMNRS(i->ARMin.NUnaryS.dst);
-//ZZ vex_printf(", ");
-//ZZ ppARMNRS(i->ARMin.NUnaryS.src);
-//ZZ return;
-//ZZ case ARMin_NShift:
-//ZZ vex_printf("%s%s%s ",
-//ZZ showARMNeonShiftOp(i->ARMin.NShift.op),
-//ZZ showARMNeonShiftOpDataType(i->ARMin.NShift.op),
-//ZZ showARMNeonDataSize(i));
-//ZZ ppHRegARM(i->ARMin.NShift.dst);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.NShift.argL);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.NShift.argR);
-//ZZ return;
-//ZZ case ARMin_NShl64:
-//ZZ vex_printf("vshl.i64 ");
-//ZZ ppHRegARM(i->ARMin.NShl64.dst);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.NShl64.src);
-//ZZ vex_printf(", #%u", i->ARMin.NShl64.amt);
-//ZZ return;
-//ZZ case ARMin_NDual:
-//ZZ vex_printf("%s%s%s ",
-//ZZ showARMNeonDualOp(i->ARMin.NDual.op),
-//ZZ showARMNeonDualOpDataType(i->ARMin.NDual.op),
-//ZZ showARMNeonDataSize(i));
-//ZZ ppHRegARM(i->ARMin.NDual.arg1);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.NDual.arg2);
-//ZZ return;
-//ZZ case ARMin_NBinary:
-//ZZ vex_printf("%s%s%s",
-//ZZ showARMNeonBinOp(i->ARMin.NBinary.op),
-//ZZ showARMNeonBinOpDataType(i->ARMin.NBinary.op),
-//ZZ showARMNeonDataSize(i));
-//ZZ vex_printf(" ");
-//ZZ ppHRegARM(i->ARMin.NBinary.dst);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.NBinary.argL);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.NBinary.argR);
-//ZZ return;
case ARM64in_VImmQ:
vex_printf("qimm ");
ppHRegARM64(i->ARM64in.VImmQ.rQ);
@@ -2710,20 +1720,6 @@
ppHRegARM64(i->ARM64in.VMov.src);
return;
}
-//ZZ case ARMin_NCMovQ:
-//ZZ vex_printf("vmov%s ", showARMCondCode(i->ARMin.NCMovQ.cond));
-//ZZ ppHRegARM(i->ARMin.NCMovQ.dst);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.NCMovQ.src);
-//ZZ return;
-//ZZ case ARMin_Add32:
-//ZZ vex_printf("add32 ");
-//ZZ ppHRegARM(i->ARMin.Add32.rD);
-//ZZ vex_printf(", ");
-//ZZ ppHRegARM(i->ARMin.Add32.rN);
-//ZZ vex_printf(", ");
-//ZZ vex_printf("%d", i->ARMin.Add32.imm32);
-//ZZ return;
case ARM64in_EvCheck:
vex_printf("(evCheck) ldr w9,");
ppARM64AMode(i->ARM64in.EvCheck.amCounter);
@@ -2910,8 +1906,6 @@
return;
case ARM64in_MFence:
return;
-//ZZ case ARMin_CLREX:
-//ZZ return;
case ARM64in_VLdStS:
addHRegUse(u, HRmRead, i->ARM64in.VLdStS.rN);
if (i->ARM64in.VLdStS.isLoad) {
@@ -3006,79 +2000,6 @@
addHRegUse(u, HRmWrite, i->ARM64in.VExtV.dst);
addHRegUse(u, HRmRead, i->ARM64in.VExtV.srcLo);
addHRegUse(u, HRmRead, i->ARM64in.VExtV.srcHi);
-//ZZ case ARMin_VAluS:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.VAluS.dst);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VAluS.argL);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VAluS.argR);
-//ZZ return;
-//ZZ case ARMin_VUnaryS:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.VUnaryS.dst);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VUnaryS.src);
-//ZZ return;
-//ZZ case ARMin_VCMovD:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.VCMovD.dst);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VCMovD.dst);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VCMovD.src);
-//ZZ return;
-//ZZ case ARMin_VCMovS:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.VCMovS.dst);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VCMovS.dst);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VCMovS.src);
-//ZZ return;
-//ZZ case ARMin_VXferD:
-//ZZ if (i->ARMin.VXferD.toD) {
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.VXferD.dD);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VXferD.rHi);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VXferD.rLo);
-//ZZ } else {
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VXferD.dD);
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.VXferD.rHi);
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.VXferD.rLo);
-//ZZ }
-//ZZ return;
-//ZZ case ARMin_VXferS:
-//ZZ if (i->ARMin.VXferS.toS) {
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.VXferS.fD);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VXferS.rLo);
-//ZZ } else {
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VXferS.fD);
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.VXferS.rLo);
-//ZZ }
-//ZZ return;
-//ZZ case ARMin_VCvtID:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.VCvtID.dst);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.VCvtID.src);
-//ZZ return;
-//ZZ case ARMin_NLdStD:
-//ZZ if (i->ARMin.NLdStD.isLoad)
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.NLdStD.dD);
-//ZZ else
-//ZZ addHRegUse(u, HRmRead, i->ARMin.NLdStD.dD);
-//ZZ addRegUsage_ARMAModeN(u, i->ARMin.NLdStD.amode);
-//ZZ return;
-//ZZ case ARMin_NUnary:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.NUnary.dst);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.NUnary.src);
-//ZZ return;
-//ZZ case ARMin_NUnaryS:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.NUnaryS.dst->reg);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.NUnaryS.src->reg);
-//ZZ return;
-//ZZ case ARMin_NShift:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.NShift.dst);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.NShift.argL);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.NShift.argR);
-//ZZ return;
-//ZZ case ARMin_NShl64:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.NShl64.dst);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.NShl64.src);
-//ZZ return;
-//ZZ case ARMin_NDual:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.NDual.arg1);
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.NDual.arg2);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.NDual.arg1);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.NDual.arg2);
-//ZZ return;
case ARM64in_VImmQ:
addHRegUse(u, HRmWrite, i->ARM64in.VImmQ.rQ);
return;
@@ -3107,22 +2028,6 @@
addHRegUse(u, HRmWrite, i->ARM64in.VMov.dst);
addHRegUse(u, HRmRead, i->ARM64in.VMov.src);
return;
-//ZZ case ARMin_NBinary:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.NBinary.dst);
-//ZZ /* TODO: sometimes dst is also being read! */
-//ZZ // XXX fix this
-//ZZ addHRegUse(u, HRmRead, i->ARMin.NBinary.argL);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.NBinary.argR);
-//ZZ return;
-//ZZ case ARMin_NCMovQ:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.NCMovQ.dst);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.NCMovQ.dst);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.NCMovQ.src);
-//ZZ return;
-//ZZ case ARMin_Add32:
-//ZZ addHRegUse(u, HRmWrite, i->ARMin.Add32.rD);
-//ZZ addHRegUse(u, HRmRead, i->ARMin.Add32.rN);
-//ZZ return;
case ARM64in_EvCheck:
/* We expect both amodes only to mention x21, so this is in
fact pointless, since x21 isn't allocatable, but
@@ -3232,8 +2137,6 @@
return;
case ARM64in_MFence:
return;
-//ZZ case ARMin_CLREX:
-//ZZ return;
case ARM64in_VLdStS:
i->ARM64in.VLdStS.sD = lookupHRegRemap(m, i->ARM64in.VLdStS.sD);
i->ARM64in.VLdStS.rN = lookupHRegRemap(m, i->ARM64in.VLdStS.rN);
@@ -3314,60 +2217,6 @@
i->ARM64in.VExtV.srcLo = lookupHRegRemap(m, i->ARM64in.VExtV.srcLo);
i->ARM64in.VExtV.srcHi = lookupHRegRemap(m, i->ARM64in.VExtV.srcHi);
return;
-
-//ZZ case ARMin_VAluS:
-//ZZ i->ARMin.VAluS.dst = lookupHRegRemap(m, i->ARMin.VAluS.dst);
-//ZZ i->ARMin.VAluS.argL = lookupHRegRemap(m, i->ARMin.VAluS.argL);
-//ZZ i->ARMin.VAluS.argR = lookupHRegRemap(m, i->ARMin.VAluS.argR);
-//ZZ return;
-//ZZ case ARMin_VCMovD:
-//ZZ i->ARMin.VCMovD.dst = lookupHRegRemap(m, i->ARMin.VCMovD.dst);
-//ZZ i->ARMin.VCMovD.src = lookupHRegRemap(m, i->ARMin.VCMovD.src);
-//ZZ return;
-//ZZ case ARMin_VCMovS:
-//ZZ i->ARMin.VCMovS.dst = lookupHRegRemap(m, i->ARMin.VCMovS.dst);
-//ZZ i->ARMin.VCMovS.src = lookupHRegRemap(m, i->ARMin.VCMovS.src);
-//ZZ return;
-//ZZ case ARMin_VXferD:
-//ZZ i->ARMin.VXferD.dD = lookupHRegRemap(m, i->ARMin.VXferD.dD);
-//ZZ i->ARMin.VXferD.rHi = lookupHRegRemap(m, i->ARMin.VXferD.rHi);
-//ZZ i->ARMin.VXferD.rLo = lookupHRegRemap(m, i->ARMin.VXferD.rLo);
-//ZZ return;
-//ZZ case ARMin_VXferS:
-//ZZ i->ARMin.VXferS.fD = lookupHRegRemap(m, i->ARMin.VXferS.fD);
-//ZZ i->ARMin.VXferS.rLo = lookupHRegRemap(m, i->ARMin.VXferS.rLo);
-//ZZ return;
-//ZZ case ARMin_VCvtID:
-//ZZ i->ARMin.VCvtID.dst = lookupHRegRemap(m, i->ARMin.VCvtID.dst);
-//ZZ i->ARMin.VCvtID.src = lookupHRegRemap(m, i->ARMin.VCvtID.src);
-//ZZ return;
-//ZZ case ARMin_NLdStD:
-//ZZ i->ARMin.NLdStD.dD = lookupHRegRemap(m, i->ARMin.NLdStD.dD);
-//ZZ mapRegs_ARMAModeN(m, i->ARMin.NLdStD.amode);
-//ZZ return;
-//ZZ case ARMin_NUnary:
-//ZZ i->ARMin.NUnary.src = lookupHRegRemap(m, i->ARMin.NUnary.src);
-//ZZ i->ARMin.NUnary.dst = lookupHRegRemap(m, i->ARMin.NUnary.dst);
-//ZZ return;
-//ZZ case ARMin_NUnaryS:
-//ZZ i->ARMin.NUnaryS.src->reg
-//ZZ = lookupHRegRemap(m, i->ARMin.NUnaryS.src->reg);
-//ZZ i->ARMin.NUnaryS.dst->reg
-//ZZ = lookupHRegRemap(m, i->ARMin.NUnaryS.dst->reg);
-//ZZ return;
-//ZZ case ARMin_NShift:
-//ZZ i->ARMin.NShift.dst = lookupHRegRemap(m, i->ARMin.NShift.dst);
-//ZZ i->ARMin.NShift.argL = lookupHRegRemap(m, i->ARMin.NShift.argL);
-//ZZ i->ARMin.NShift.argR = lookupHRegRemap(m, i->ARMin.NShift.argR);
-//ZZ return;
-//ZZ case ARMin_NShl64:
-//ZZ i->ARMin.NShl64.dst = lookupHRegRemap(m, i->ARMin.NShl64.dst);
-//ZZ i->ARMin.NShl64.src = lookupHRegRemap(m, i->ARMin.NShl64.src);
-//ZZ return;
-//ZZ case ARMin_NDual:
-//ZZ i->ARMin.NDual.arg1 = lookupHRegRemap(m, i->ARMin.NDual.arg1);
-//ZZ i->ARMin.NDual.arg2 = lookupHRegRemap(m, i->ARMin.NDual.arg2);
-//ZZ return;
case ARM64in_VImmQ:
i->ARM64in.VImmQ.rQ = lookupHRegRemap(m, i->ARM64in.VImmQ.rQ);
return;
@@ -3407,20 +2256,6 @@
i->ARM64in.VMov.dst = lookupHRegRemap(m, i->ARM64in.VMov.dst);
i->ARM64in.VMov.src = lookupHRegRemap(m, i->ARM64in.VMov.src);
return;
-
-//ZZ case ARMin_NBinary:
-//ZZ i->ARMin.NBinary.argL = lookupHRegRemap(m, i->ARMin.NBinary.argL);
-//ZZ i->ARMin.NBinary.argR = lookupHRegRemap(m, i->ARMin.NBinary.argR);
-//ZZ i->ARMin.NBinary.dst = lookupHRegRemap(m, i->ARMin.NBinary.dst);
-//ZZ return;
-//ZZ case ARMin_NCMovQ:
-//ZZ i->ARMin.NCMovQ.dst = lookupHRegRemap(m, i->ARMin.NCMovQ.dst);
-//ZZ i->ARMin.NCMovQ.src = lookupHRegRemap(m, i->ARMin.NCMovQ.src);
-//ZZ return;
-//ZZ case ARMin_Add32:
-//ZZ i->ARMin.Add32.rD = lookupHRegRemap(m, i->ARMin.Add32.rD);
-//ZZ i->ARMin.Add32.rN = lookupHRegRemap(m, i->ARMin.Add32.rN);
-//ZZ return;
case ARM64in_EvCheck:
/* We expect both amodes only to mention x21, so this is in
fact pointless, since x21 isn't allocatable, but
@@ -3902,42 +2737,6 @@
#define XX______(zzx7,zzx6) \
((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24))
*/
-//ZZ /* Generate a skeletal insn that involves an a RI84 shifter operand.
-//ZZ Returns a word which is all zeroes apart from bits 25 and 11..0,
-//ZZ since it is those that encode the shifter operand (at least to the
-//ZZ extent that we care about it.) */
-//ZZ static UInt skeletal_RI84 ( ARMRI84* ri )
-//ZZ {
-//ZZ UInt instr;
-//ZZ if (ri->tag == ARMri84_I84) {
-//ZZ vassert(0 == (ri->ARMri84.I84.imm4 & ~0x0F));
-//ZZ vassert(0 == (ri->ARMri84.I84.imm8 & ~0xFF));
-//ZZ instr = 1 << 25;
-//ZZ instr |= (ri->ARMri84.I84.imm4 << 8);
-//ZZ instr |= ri->ARMri84.I84.imm8;
-//ZZ } else {
-//ZZ instr = 0 << 25;
-//ZZ instr |= iregNo(ri->ARMri84.R.reg);
-//ZZ }
-//ZZ return instr;
-//ZZ }
-//ZZ
-//ZZ /* Ditto for RI5. Resulting word is zeroes apart from bit 4 and bits
-//ZZ 11..7. */
-//ZZ static UInt skeletal_RI5 ( ARMRI5* ri )
-//ZZ {
-//ZZ UInt instr;
-//ZZ if (ri->tag == ARMri5_I5) {
-//ZZ UInt imm5 = ri->ARMri5.I5.imm5;
-//ZZ vassert(imm5 >= 1 && imm5 <= 31);
-//ZZ instr = 0 << 4;
-//ZZ instr |= imm5 << 7;
-//ZZ } else {
-//ZZ instr = 1 << 4;
-//ZZ instr |= iregNo(ri->ARMri5.R.reg) << 8;
-//ZZ }
-//ZZ return instr;
-//ZZ }
/* Get an immediate into a register, using only that register. */
@@ -4483,126 +3282,6 @@
i->ARM64in.LdSt8.amode );
goto done;
}
-//ZZ case ARMin_LdSt32:
-//ZZ case ARMin_LdSt8U: {
-//ZZ UInt bL, bB;
-//ZZ HReg rD;
-//ZZ ARMAMode1* am;
-//ZZ ARMCondCode cc;
-//ZZ if (i->tag == ARMin_LdSt32) {
-//ZZ bB = 0;
-//ZZ bL = i->ARMin.LdSt32.isLoad ? 1 : 0;
-//ZZ am = i->ARMin.LdSt32.amode;
-//ZZ rD = i->ARMin.LdSt32.rD;
-//ZZ cc = i->ARMin.LdSt32.cc;
-//ZZ } else {
-//ZZ bB = 1;
-//ZZ bL = i->ARMin.LdSt8U.isLoad ? 1 : 0;
-//ZZ am = i->ARMin.LdSt8U.amode;
-//ZZ rD = i->ARMin.LdSt8U.rD;
-//ZZ cc = i->ARMin.LdSt8U.cc;
-//ZZ }
-//ZZ vassert(cc != ARMcc_NV);
-//ZZ if (am->tag == ARMam1_RI) {
-//ZZ Int simm12;
-//ZZ UInt instr, bP;
-//ZZ if (am->ARMam1.RI.simm13 < 0) {
-//ZZ bP = 0;
-//ZZ simm12 = -am->ARMam1.RI.simm13;
-//ZZ } else {
-//ZZ bP = 1;
-//ZZ simm12 = am->ARMam1.RI.simm13;
-//ZZ }
-//ZZ vassert(simm12 >= 0 && simm12 <= 4095);
-//ZZ instr = XXXXX___(cc,X0101,BITS4(bP,bB,0,bL),
-//ZZ iregNo(am->ARMam1.RI.reg),
-//ZZ iregNo(rD));
-//ZZ instr |= simm12;
-//ZZ *p++ = instr;
-//ZZ goto done;
-//ZZ } else {
-//ZZ // RR case
-//ZZ goto bad;
-//ZZ }
-//ZZ }
-//ZZ case ARMin_LdSt16: {
-//ZZ HReg rD = i->ARMin.LdSt16.rD;
-//ZZ UInt bS = i->ARMin.LdSt16.signedLoad ? 1 : 0;
-//ZZ UInt bL = i->ARMin.LdSt16.isLoad ? 1 : 0;
-//ZZ ARMAMode2* am = i->ARMin.LdSt16.amode;
-//ZZ ARMCondCode cc = i->ARMin.LdSt16.cc;
-//ZZ vassert(cc != ARMcc_NV);
-//ZZ if (am->tag == ARMam2_RI) {
-//ZZ HReg rN = am->ARMam2.RI.reg;
-//ZZ Int simm8;
-//ZZ UInt bP, imm8hi, imm8lo, instr;
-//ZZ if (am->ARMam2.RI.simm9 < 0) {
-//ZZ bP = 0;
-//ZZ simm8 = -am->ARMam2.RI.simm9;
-//ZZ } else {
-//ZZ bP = 1;
-//ZZ simm8 = am->ARMam2.RI.simm9;
-//ZZ }
-//ZZ vassert(simm8 >= 0 && simm8 <= 255);
-//ZZ imm8hi = (simm8 >> 4) & 0xF;
-//ZZ imm8lo = simm8 & 0xF;
-//ZZ vassert(!(bL == 0 && bS == 1)); // "! signed store"
-//ZZ /**/ if (bL == 0 && bS == 0) {
-//ZZ // strh
-//ZZ instr = XXXXXXXX(cc,X0001, BITS4(bP,1,0,0), iregNo(rN),
-//ZZ iregNo(rD), imm8hi, X1011, imm8lo);
-//ZZ *p++ = instr;
-//ZZ goto done;
-//ZZ }
-//ZZ else if (bL == 1 && bS == 0) {
-//ZZ // ldrh
-//ZZ instr = XXXXXXXX(cc,X0001, BITS4(bP,1,0,1), iregNo(rN),
-//ZZ iregNo(rD), imm8hi, X1011, imm8lo);
-//ZZ *p++ = instr;
-//ZZ goto done;
-//ZZ }
-//ZZ else if (bL == 1 && bS == 1) {
-//ZZ // ldrsh
-//ZZ instr = XXXXXXXX(cc,X0001, BITS4(bP,1,0,1), iregNo(rN),
-//ZZ iregNo(rD), imm8hi, X1111, imm8lo);
-//ZZ *p++ = instr;
-//ZZ goto done;
-//ZZ }
-//ZZ else vassert(0); // ill-constructed insn
-//ZZ } else {
-//ZZ // RR case
-//ZZ goto bad;
-//ZZ }
-//ZZ }
-//ZZ case ARMin_Ld8S: {
-//ZZ HReg rD = i->ARMin.Ld8S.rD;
-//ZZ ARMAMode2* am = i->ARMin.Ld8S.amode;
-//ZZ ARMCondCode cc = i->ARMin.Ld8S.cc;
-//ZZ vassert(cc != ARMcc_NV);
-//ZZ if (am->tag == ARMam2_RI) {
-//ZZ HReg rN = am->ARMam2.RI.reg;
-//ZZ Int simm8;
-//ZZ UInt bP, imm8hi, imm8lo, instr;
-//ZZ if (am->ARMam2.RI.simm9 < 0) {
-//ZZ bP = 0;
-//ZZ simm8 = -am->ARMam2.RI.simm9;
-//ZZ } else {
-//ZZ bP = 1;
-//ZZ simm8 = am->ARMam2.RI.simm9;
-//ZZ }
-//ZZ vassert(simm8 >= 0 && simm8 <= 255);
-//ZZ imm8hi = (simm8 >> 4) & 0xF;
-//ZZ imm8lo = simm8 & 0xF;
-//ZZ // ldrsb
-//ZZ instr = XXXXXXXX(cc,X0001, BITS4(bP,1,0,1), iregNo(rN),
-//ZZ iregNo(rD), imm8hi, X1101, imm8lo);
-//ZZ *p++ = instr;
-//ZZ goto done;
-//ZZ } else {
-//ZZ // RR case
-//ZZ goto bad;
-//ZZ }
-//ZZ }
case ARM64in_XDirect: {
/* NB: what goes on here has to be very closely coordinated
@@ -6074,878 +4753,6 @@
X000000 | (imm4 << 1), vN, vD);
goto done;
}
-//ZZ case ARMin_VAluS: {
-//ZZ UInt dN = fregNo(i->ARMin.VAluS.argL);
-//ZZ UInt dD = fregNo(i->ARMin.VAluS.dst);
-//ZZ UInt dM = fregNo(i->ARMin.VAluS.argR);
-//ZZ UInt bN = dN & 1;
-//ZZ UInt bD = dD & 1;
-//ZZ UInt bM = dM & 1;
-//ZZ UInt pqrs = X1111; /* undefined */
-//ZZ switch (i->ARMin.VAluS.op) {
-//ZZ case ARMvfp_ADD: pqrs = X0110; break;
-//ZZ case ARMvfp_SUB: pqrs = X0111; break;
-//ZZ case ARMvfp_MUL: pqrs = X0100; break;
-//ZZ case ARMvfp_DIV: pqrs = X1000; break;
-//ZZ default: goto bad;
-//ZZ }
-//ZZ vassert(pqrs != X1111);
-//ZZ UInt bP = (pqrs >> 3) & 1;
-//ZZ UInt bQ = (pqrs >> 2) & 1;
-//ZZ UInt bR = (pqrs >> 1) & 1;
-//ZZ UInt bS = (pqrs >> 0) & 1;
-//ZZ UInt insn = XXXXXXXX(0xE, X1110, BITS4(bP,bD,bQ,bR),
-//ZZ (dN >> 1), (dD >> 1),
-//ZZ X1010, BITS4(bN,bS,bM,0), (dM >> 1));
-//ZZ *p++ = insn;
-//ZZ goto done;
-//ZZ }
-//ZZ case ARMin_VUnaryS: {
-//ZZ UInt fD = fregNo(i->ARMin.VUnaryS.dst);
-//ZZ UInt fM = fregNo(i->ARMin.VUnaryS.src);
-//ZZ UInt insn = 0;
-//ZZ switch (i->ARMin.VUnaryS.op) {
-//ZZ case ARMvfpu_COPY:
-//ZZ insn = XXXXXXXX(0xE, X1110, BITS4(1,(fD & 1),1,1), X0000,
-//ZZ (fD >> 1), X1010, BITS4(0,1,(fM & 1),0),
-//ZZ (fM >> 1));
-//ZZ break;
-//ZZ case ARMvfpu_ABS:
-//ZZ insn = XXXXXXXX(0xE, X1110, BITS4(1,(fD & 1),1,1), X0000,
-//ZZ (fD >> 1), X1010, BITS4(1,1,(fM & 1),0),
-//ZZ (fM >> 1));
-//ZZ break;
-//ZZ case ARMvfpu_NEG:
-//ZZ insn = XXXXXXXX(0xE, X1110, BITS4(1,(fD & 1),1,1), X0001,
-//ZZ (fD >> 1), X1010, BITS4(0,1,(fM & 1),0),
-//ZZ (fM >> 1));
-//ZZ break;
-//ZZ case ARMvfpu_SQRT:
-//ZZ insn = XXXXXXXX(0xE, X1110, BITS4(1,(fD & 1),1,1), X0001,
-//ZZ (fD >> 1), X1010, BITS4(1,1,(fM & 1),0),
-//ZZ (fM >> 1));
-//ZZ break;
-//ZZ default:
-//ZZ goto bad;
-//ZZ }
-//ZZ *p++ = insn;
-//ZZ goto done;
-//ZZ }
-//ZZ case ARMin_VCMovD: {
-//ZZ UInt cc = (UInt)i->ARMin.VCMovD.cond;
-//ZZ UInt dD = dregNo(i->ARMin.VCMovD.dst);
-//ZZ UInt dM = dregNo(i->ARMin.VCMovD.src);
-//ZZ vassert(cc < 16 && cc != ARMcc_AL);
-//ZZ UInt insn = XXXXXXXX(cc, X1110,X1011,X0000,dD,X1011,X0100,dM);
-//ZZ *p++ = insn;
-//ZZ goto done;
-//ZZ }
-//ZZ case ARMin_VCMovS: {
-//ZZ UInt cc = (UInt)i->ARMin.VCMovS.cond;
-//ZZ UInt fD = fregNo(i->ARMin.VCMovS.dst);
-//ZZ UInt fM = fregNo(i->ARMin.VCMovS.src);
-//ZZ vassert(cc < 16 && cc != ARMcc_AL);
-//ZZ UInt insn = XXXXXXXX(cc, X1110, BITS4(1,(fD & 1),1,1),
-//ZZ X0000,(fD >> 1),X1010,
-//ZZ BITS4(0,1,(fM & 1),0), (fM >> 1));
-//ZZ *p++ = insn;
-//ZZ goto done;
-//ZZ }
-//ZZ case ARMin_VXferD: {
-//ZZ UInt dD = dregNo(i->ARMin.VXferD.dD);
-//ZZ UInt rHi = iregNo(i->ARMin.VXferD.rHi);
-//ZZ UInt rLo = iregNo(i->ARMin.VXferD.rLo);
-//ZZ /* vmov dD, rLo, rHi is
-//ZZ E C 4 rHi rLo B (0,0,dD[4],1) dD[3:0]
-//ZZ vmov rLo, rHi, dD is
-//ZZ ...
[truncated message content] |