|
From: <sv...@va...> - 2005-06-28 21:00:09
|
Author: cerion
Date: 2005-06-28 21:59:18 +0100 (Tue, 28 Jun 2005)
New Revision: 1227
Log:
Reshuffled host-ppc32 AltiVec integer insns
Added some AltiVec fp insns and CMov
Modified:
trunk/priv/host-ppc32/hdefs.c
trunk/priv/host-ppc32/hdefs.h
Modified: trunk/priv/host-ppc32/hdefs.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/host-ppc32/hdefs.c 2005-06-28 18:06:23 UTC (rev 1226)
+++ trunk/priv/host-ppc32/hdefs.c 2005-06-28 20:59:18 UTC (rev 1227)
@@ -513,119 +513,77 @@
=20
HChar* showPPC32AvOp ( PPC32AvOp op ) {
switch (op) {
- /* mov */
- case Pav_MOV: return "vmr";
+
+ /* Unary */
+ case Pav_MOV: return "vmr"; /* Mov */
=20
- /* Bitwise */
- case Pav_AND: return "vand";
+ case Pav_AND: return "vand"; /* Bitwise */
case Pav_OR: return "vor";
case Pav_XOR: return "vxor";
case Pav_NOT: return "vnot";
=20
+ case Pav_UNPCKH8S: return "vupkhsb"; /* Unpack */
+ case Pav_UNPCKH16S: return "vupkhsh";
+ case Pav_UNPCKL8S: return "vupklsb";
+ case Pav_UNPCKL16S: return "vupklsh";
+ case Pav_UNPCKHPIX: return "vupkhpx";
+ case Pav_UNPCKLPIX: return "vupklpx";
+
/* Integer binary */
- case Pav_ADD8UM: return "vaddubm";
- case Pav_ADD16UM: return "vadduhm";
- case Pav_ADD32UM: return "vadduwm";
- case Pav_ADD8US: return "vaddubs";
- case Pav_ADD16US: return "vadduhs";
- case Pav_ADD32US: return "vadduws";
- case Pav_ADD8SS: return "vaddsbs";
- case Pav_ADD16SS: return "vaddshs";
- case Pav_ADD32SS: return "vaddsws";
+ case Pav_ADDUM: return "vaddu_m"; // b,h,w
+ case Pav_ADDUS: return "vaddu_s"; // b,h,w
+ case Pav_ADDSS: return "vadds_s"; // b,h,w
=20
- case Pav_SUB8UM: return "vsububm";
- case Pav_SUB16UM: return "vsubuhm";
- case Pav_SUB32UM: return "vsubuwm";
- case Pav_SUB8US: return "vsububs";
- case Pav_SUB16US: return "vsubuhs";
- case Pav_SUB32US: return "vsubuws";
- case Pav_SUB8SS: return "vsubsbs";
- case Pav_SUB16SS: return "vsubshs";
- case Pav_SUB32SS: return "vsubsws";
+ case Pav_SUBUM: return "vsubu_m"; // b,h,w
+ case Pav_SUBUS: return "vsubu_s"; // b,h,w
+ case Pav_SUBSS: return "vsubs_s"; // b,h,w
=20
- case Pav_OMUL8U: return "vmuloub";
- case Pav_OMUL16U: return "vmulouh";
- case Pav_OMUL8S: return "vmulosb";
- case Pav_OMUL16S: return "vmulosh";
- case Pav_EMUL8U: return "vmuleub";
- case Pav_EMUL16U: return "vmuleuh";
- case Pav_EMUL8S: return "vmulesb";
- case Pav_EMUL16S: return "vmulesh";
+ case Pav_OMULU: return "vmulou"; // b,h
+ case Pav_OMULS: return "vmulos"; // b,h
+ case Pav_EMULU: return "vmuleu"; // b,h
+ case Pav_EMULS: return "vmules"; // b,h
=20
- case Pav_AVG8U: return "vavgub";
- case Pav_AVG16U: return "vavguh";
- case Pav_AVG32U: return "vavguw";
- case Pav_AVG8S: return "vavgsb";
- case Pav_AVG16S: return "vavgsh";
- case Pav_AVG32S: return "vavgsw";
+ case Pav_AVGU: return "vavgu"; // b,h,w
+ case Pav_AVGS: return "vavgs"; // b,h,w
=20
- case Pav_MAX8U: return "vmaxub";
- case Pav_MAX16U: return "vmaxuh";
- case Pav_MAX32U: return "vmaxuw";
- case Pav_MAX8S: return "vmaxsb";
- case Pav_MAX16S: return "vmaxsh";
- case Pav_MAX32S: return "vmaxsw";
+ case Pav_MAXU: return "vmaxu"; // b,h,w
+ case Pav_MAXS: return "vmaxs"; // b,h,w
=20
- case Pav_MIN8U: return "vminub";
- case Pav_MIN16U: return "vminuh";
- case Pav_MIN32U: return "vminuw";
- case Pav_MIN8S: return "vminsb";
- case Pav_MIN16S: return "vminsh";
- case Pav_MIN32S: return "vminsw";
+ case Pav_MINU: return "vminu"; // b,h,w
+ case Pav_MINS: return "vmins"; // b,h,w
=20
/* Compare (always affects CR field 6) */
- case Pav_CMPEQ8U: return "vcmpequb";
- case Pav_CMPEQ16U: return "vcmpequh";
- case Pav_CMPEQ32U: return "vcmpequw";
- case Pav_CMPGT8U: return "vcmpgtub";
- case Pav_CMPGT16U: return "vcmpgtuh";
- case Pav_CMPGT32U: return "vcmpgtuw";
- case Pav_CMPGT8S: return "vcmpgtsb";
- case Pav_CMPGT16S: return "vcmpgtsh";
- case Pav_CMPGT32S: return "vcmpgtsw";
+ case Pav_CMPEQU: return "vcmpequ"; // b,h,w
+ case Pav_CMPGTU: return "vcmpgtu"; // b,h,w
+ case Pav_CMPGTS: return "vcmpgts"; // b,h,w
=20
/* Shift */
- case Pav_SHL8: return "vslb";
- case Pav_SHL16: return "vslh";
- case Pav_SHL32: return "vslw";
- case Pav_SHL128: return "vsl";
- case Pav_SHR8: return "vsrb";
- case Pav_SHR16: return "vsrh";
- case Pav_SHR32: return "vsrw";
- case Pav_SHR128: return "vsr";
- case Pav_SAR8: return "vsrab";
- case Pav_SAR16: return "vsrah";
- case Pav_SAR32: return "vsraw";
- case Pav_ROTL8: return "vrlb";
- case Pav_ROTL16: return "vrlh";
- case Pav_ROTL32: return "vrlw";
- =20
+ case Pav_SHL: return "vsl"; // ' ',b,h,w
+ case Pav_SHR: return "vsr"; // ' ',b,h,w
+ case Pav_SAR: return "vsra"; // b,h,w
+ case Pav_ROTL: return "vrl"; // b,h,w
+
/* Pack */
- case Pav_PACKU16UM: return "vpkuhum";
- case Pav_PACKU32UM: return "vpkuwum";
- case Pav_PACKU16US: return "vpkuhus";
- case Pav_PACKU32US: return "vpkuwus";
- case Pav_PACKS16US: return "vpkshus";
- case Pav_PACKS32US: return "vpkswus";
- case Pav_PACKS16SS: return "vpkshss";
- case Pav_PACKS32SS: return "vpkswss";
+ case Pav_PACKUUM: return "vpku_um"; // h,w
+ case Pav_PACKUUS: return "vpku_us"; // h,w
+ case Pav_PACKSUS: return "vpks_us"; // h,w
+ case Pav_PACKSSS: return "vpks_ss"; // h,w
case Pav_PACKPXL: return "vpkpx";
=20
- /* Unpack (srcL ignored) */
- case Pav_UNPCKH8S: return "vupkhsb";
- case Pav_UNPCKH16S: return "vupkhsh";
- case Pav_UNPCKL8S: return "vupklsb";
- case Pav_UNPCKL16S: return "vupklsh";
- case Pav_UNPCKHPIX: return "vupkhpx";
- case Pav_UNPCKLPIX: return "vupklpx";
- =20
/* Merge */
- case Pav_MRG8HI: return "vmrghb";
- case Pav_MRG16HI: return "vmrghh";
- case Pav_MRG32HI: return "vmrghw";
- case Pav_MRG8LO: return "vmrglb";
- case Pav_MRG16LO: return "vmrglh";
- case Pav_MRG32LO: return "vmrglw";
+ case Pav_MRGHI: return "vmrgh"; // b,h,w
+ case Pav_MRGLO: return "vmrgl"; // b,h,w
+
+
+ /* Floating Point Binary */
+ case Pav_ADDF: return "vaddfp";
+ case Pav_SUBF: return "vsubfp";
+ case Pav_MULF: return "vmaddfp";
+ case Pav_MAXF: return "vmaxfp";
+ case Pav_MINF: return "vminfp";
+ case Pav_CMPEQF: return "vcmpeqfp";
+ case Pav_CMPGTF: return "vcmpgtfp";
+ case Pav_CMPGEF: return "vcmpgefp";
=20
default: vpanic("showPPC32AvOp");
}
@@ -873,6 +831,42 @@
i->Pin.AvBinary.srcR =3D srcR;
return i;
}
+PPC32Instr* PPC32Instr_AvBin8x16 ( PPC32AvOp op, HReg dst, HReg srcL, HR=
eg srcR ) {
+ PPC32Instr* i =3D LibVEX_Alloc(sizeof(PPC32Instr));
+ i->tag =3D Pin_AvBin8x16;
+ i->Pin.AvBin8x16.op =3D op;
+ i->Pin.AvBin8x16.dst =3D dst;
+ i->Pin.AvBin8x16.srcL =3D srcL;
+ i->Pin.AvBin8x16.srcR =3D srcR;
+ return i;
+}
+PPC32Instr* PPC32Instr_AvBin16x8 ( PPC32AvOp op, HReg dst, HReg srcL, HR=
eg srcR ) {
+ PPC32Instr* i =3D LibVEX_Alloc(sizeof(PPC32Instr));
+ i->tag =3D Pin_AvBin16x8;
+ i->Pin.AvBin16x8.op =3D op;
+ i->Pin.AvBin16x8.dst =3D dst;
+ i->Pin.AvBin16x8.srcL =3D srcL;
+ i->Pin.AvBin16x8.srcR =3D srcR;
+ return i;
+}
+PPC32Instr* PPC32Instr_AvBin32x4 ( PPC32AvOp op, HReg dst, HReg srcL, HR=
eg srcR ) {
+ PPC32Instr* i =3D LibVEX_Alloc(sizeof(PPC32Instr));
+ i->tag =3D Pin_AvBin32x4;
+ i->Pin.AvBin32x4.op =3D op;
+ i->Pin.AvBin32x4.dst =3D dst;
+ i->Pin.AvBin32x4.srcL =3D srcL;
+ i->Pin.AvBin32x4.srcR =3D srcR;
+ return i;
+}
+PPC32Instr* PPC32Instr_AvBin32Fx4 ( PPC32AvOp op, HReg dst, HReg srcL, H=
Reg srcR ) {
+ PPC32Instr* i =3D LibVEX_Alloc(sizeof(PPC32Instr));
+ i->tag =3D Pin_AvBin32Fx4;
+ i->Pin.AvBin32Fx4.op =3D op;
+ i->Pin.AvBin32Fx4.dst =3D dst;
+ i->Pin.AvBin32Fx4.srcL =3D srcL;
+ i->Pin.AvBin32Fx4.srcR =3D srcR;
+ return i;
+}
PPC32Instr* PPC32Instr_AvPerm ( HReg ctl, HReg dst, HReg srcL, HReg srcR=
) {
PPC32Instr* i =3D LibVEX_Alloc(sizeof(PPC32Instr));
i->tag =3D Pin_AvPerm;
@@ -908,6 +902,15 @@
i->Pin.AvSplat.src =3D src;
return i;
}
+PPC32Instr* PPC32Instr_AvCMov ( PPC32CondCode cond, HReg dst, HReg src )=
{
+ PPC32Instr* i =3D LibVEX_Alloc(sizeof(PPC32Instr));
+ i->tag =3D Pin_AvCMov;
+ i->Pin.AvCMov.cond =3D cond;
+ i->Pin.AvCMov.dst =3D dst;
+ i->Pin.AvCMov.src =3D src;
+ vassert(cond.test !=3D Pct_ALWAYS);
+ return i;
+}
PPC32Instr* PPC32Instr_AvLdVSCR ( HReg src ) {
PPC32Instr* i =3D LibVEX_Alloc(sizeof(PPC32Instr));
i->tag =3D Pin_AvLdVSCR;
@@ -1258,6 +1261,38 @@
vex_printf(",");
ppHRegPPC32(i->Pin.AvBinary.srcR);
return;
+ case Pin_AvBin8x16:
+ vex_printf("%s(b) ", showPPC32AvOp(i->Pin.AvBin8x16.op));
+ ppHRegPPC32(i->Pin.AvBin8x16.dst);
+ vex_printf(",");
+ ppHRegPPC32(i->Pin.AvBin8x16.srcL);
+ vex_printf(",");
+ ppHRegPPC32(i->Pin.AvBin8x16.srcR);
+ return;
+ case Pin_AvBin16x8:
+ vex_printf("%s(h) ", showPPC32AvOp(i->Pin.AvBin16x8.op));
+ ppHRegPPC32(i->Pin.AvBin16x8.dst);
+ vex_printf(",");
+ ppHRegPPC32(i->Pin.AvBin16x8.srcL);
+ vex_printf(",");
+ ppHRegPPC32(i->Pin.AvBin16x8.srcR);
+ return;
+ case Pin_AvBin32x4:
+ vex_printf("%s(w) ", showPPC32AvOp(i->Pin.AvBin32x4.op));
+ ppHRegPPC32(i->Pin.AvBin32x4.dst);
+ vex_printf(",");
+ ppHRegPPC32(i->Pin.AvBin32x4.srcL);
+ vex_printf(",");
+ ppHRegPPC32(i->Pin.AvBin32x4.srcR);
+ return;
+ case Pin_AvBin32Fx4:
+ vex_printf("%s ", showPPC32AvOp(i->Pin.AvBin32Fx4.op));
+ ppHRegPPC32(i->Pin.AvBin32Fx4.dst);
+ vex_printf(",");
+ ppHRegPPC32(i->Pin.AvBin32Fx4.srcL);
+ vex_printf(",");
+ ppHRegPPC32(i->Pin.AvBin32Fx4.srcR);
+ return;
case Pin_AvPerm:
vex_printf("vperm ");
ppHRegPPC32(i->Pin.AvPerm.dst);
@@ -1306,6 +1341,25 @@
return;
}
=20
+ case Pin_AvCMov:
+ vex_printf("avcmov (%s) ", showPPC32CondCode(i->Pin.AvCMov.cond));
+ ppHRegPPC32(i->Pin.AvCMov.dst);
+ vex_printf(",");
+ ppHRegPPC32(i->Pin.AvCMov.src);
+ vex_printf(": ");
+ vex_printf("if (v_dst !=3D v_src) { ");
+ if (i->Pin.AvCMov.cond.test !=3D Pct_ALWAYS) {
+ vex_printf("if (%%crf0.%s) { ", showPPC32CondCode(i->Pin.AvCMov=
.cond));
+ }
+ vex_printf("vmr ");
+ ppHRegPPC32(i->Pin.AvCMov.dst);
+ vex_printf(",");
+ ppHRegPPC32(i->Pin.AvCMov.src);
+ if (i->Pin.FpCMov.cond.test !=3D Pct_ALWAYS)
+ vex_printf(" }");
+ vex_printf(" }");
+ return;
+
case Pin_AvLdVSCR:
vex_printf("mtvscr ");
ppHRegPPC32(i->Pin.AvLdVSCR.src);
@@ -1482,6 +1536,28 @@
addHRegUse(u, HRmRead, i->Pin.AvBinary.srcL);
addHRegUse(u, HRmRead, i->Pin.AvBinary.srcR);
return;
+ case Pin_AvBin8x16:
+ addHRegUse(u, HRmWrite, i->Pin.AvBin8x16.dst);
+ addHRegUse(u, HRmRead, i->Pin.AvBin8x16.srcL);
+ addHRegUse(u, HRmRead, i->Pin.AvBin8x16.srcR);
+ return;
+ case Pin_AvBin16x8:
+ addHRegUse(u, HRmWrite, i->Pin.AvBin16x8.dst);
+ addHRegUse(u, HRmRead, i->Pin.AvBin16x8.srcL);
+ addHRegUse(u, HRmRead, i->Pin.AvBin16x8.srcR);
+ return;
+ case Pin_AvBin32x4:
+ addHRegUse(u, HRmWrite, i->Pin.AvBin32x4.dst);
+ addHRegUse(u, HRmRead, i->Pin.AvBin32x4.srcL);
+ addHRegUse(u, HRmRead, i->Pin.AvBin32x4.srcR);
+ if (i->Pin.AvBin32x4.op =3D=3D Pav_MULF)
+ addHRegUse(u, HRmWrite, hregPPC32_GPR29());
+ return;
+ case Pin_AvBin32Fx4:
+ addHRegUse(u, HRmWrite, i->Pin.AvBin32Fx4.dst);
+ addHRegUse(u, HRmRead, i->Pin.AvBin32Fx4.srcL);
+ addHRegUse(u, HRmRead, i->Pin.AvBin32Fx4.srcR);
+ return;
case Pin_AvPerm:
addHRegUse(u, HRmWrite, i->Pin.AvPerm.dst);
addHRegUse(u, HRmRead, i->Pin.AvPerm.ctl);
@@ -1503,6 +1579,10 @@
addHRegUse(u, HRmWrite, i->Pin.AvSplat.dst);
addRegUsage_PPC32RI(u, i->Pin.AvSplat.src);
return;
+ case Pin_AvCMov:
+ addHRegUse(u, HRmModify, i->Pin.AvCMov.dst);
+ addHRegUse(u, HRmRead, i->Pin.AvCMov.src);
+ return;
case Pin_AvLdVSCR:
addHRegUse(u, HRmRead, i->Pin.AvLdVSCR.src);
return;
@@ -1629,6 +1709,26 @@
mapReg(m, &i->Pin.AvBinary.srcL);
mapReg(m, &i->Pin.AvBinary.srcR);
return;
+ case Pin_AvBin8x16:
+ mapReg(m, &i->Pin.AvBin8x16.dst);
+ mapReg(m, &i->Pin.AvBin8x16.srcL);
+ mapReg(m, &i->Pin.AvBin8x16.srcR);
+ return;
+ case Pin_AvBin16x8:
+ mapReg(m, &i->Pin.AvBin16x8.dst);
+ mapReg(m, &i->Pin.AvBin16x8.srcL);
+ mapReg(m, &i->Pin.AvBin16x8.srcR);
+ return;
+ case Pin_AvBin32x4:
+ mapReg(m, &i->Pin.AvBin32x4.dst);
+ mapReg(m, &i->Pin.AvBin32x4.srcL);
+ mapReg(m, &i->Pin.AvBin32x4.srcR);
+ return;
+ case Pin_AvBin32Fx4:
+ mapReg(m, &i->Pin.AvBin32Fx4.dst);
+ mapReg(m, &i->Pin.AvBin32Fx4.srcL);
+ mapReg(m, &i->Pin.AvBin32Fx4.srcR);
+ return;
case Pin_AvPerm:
mapReg(m, &i->Pin.AvPerm.dst);
mapReg(m, &i->Pin.AvPerm.srcL);
@@ -1650,6 +1750,10 @@
mapReg(m, &i->Pin.AvSplat.dst);
mapRegs_PPC32RI(m, i->Pin.AvSplat.src);
return;
+ case Pin_AvCMov:
+ mapReg(m, &i->Pin.AvCMov.dst);
+ mapReg(m, &i->Pin.AvCMov.src);
+ return;
case Pin_AvLdVSCR:
mapReg(m, &i->Pin.AvLdVSCR.src);
return;
@@ -1997,10 +2101,24 @@
vassert(r2 < 0x20);
vassert(r3 < 0x20);
vassert(opc2 < 0x800);
- theInstr =3D ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) | (opc2<<1)=
);
+ theInstr =3D ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) | opc2);
return emit32(p, theInstr);
}
=20
+static UChar* mkFormVXR ( UChar* p, UInt opc1, UInt r1, UInt r2, UInt Rc=
,
+ UInt r3, UInt opc2 )
+{
+ UInt theInstr;
+ vassert(opc1 < 0x40);
+ vassert(r1 < 0x20);
+ vassert(r2 < 0x20);
+ vassert(r3 < 0x20);
+ vassert(Rc < 0x2);
+ vassert(opc2 < 0x400);
+ theInstr =3D ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) | (Rc<<10) =
| opc2);
+ return emit32(p, theInstr);
+}
+
static UChar* mkFormVA ( UChar* p, UInt opc1, UInt r1, UInt r2,
UInt r3, UInt r4, UInt opc2 )
{
@@ -2669,108 +2787,107 @@
case Pav_OR: opc2 =3D 1156; break; // vor
case Pav_XOR: opc2 =3D 1120; break; // vxor
=20
- /* Add */
- case Pav_ADD8UM: opc2 =3D 0; break; // vaddubm
- case Pav_ADD16UM: opc2 =3D 64; break; // vadduhm
- case Pav_ADD32UM: opc2 =3D 128; break; // vadduwm
- case Pav_ADD8US: opc2 =3D 512; break; // vaddubs
- case Pav_ADD16US: opc2 =3D 576; break; // vadduhs
- case Pav_ADD32US: opc2 =3D 640; break; // vadduws
- case Pav_ADD8SS: opc2 =3D 768; break; // vaddsbs
- case Pav_ADD16SS: opc2 =3D 832; break; // vaddshs
- case Pav_ADD32SS: opc2 =3D 896; break; // vaddsws
+ /* Shift */
+ case Pav_SHL: opc2 =3D 452; break; // vsl
+ case Pav_SHR: opc2 =3D 708; break; // vsr
=20
- /* Subtract */
- case Pav_SUB8UM: opc2 =3D 1024; break; // vsububm
- case Pav_SUB16UM: opc2 =3D 1088; break; // vsubuhm
- case Pav_SUB32UM: opc2 =3D 1152; break; // vsubuwm
- case Pav_SUB8US: opc2 =3D 1536; break; // vsububs
- case Pav_SUB16US: opc2 =3D 1600; break; // vsubuhs
- case Pav_SUB32US: opc2 =3D 1664; break; // vsubuws
- case Pav_SUB8SS: opc2 =3D 1792; break; // vsubsbs
- case Pav_SUB16SS: opc2 =3D 1856; break; // vsubshs
- case Pav_SUB32SS: opc2 =3D 1920; break; // vsubsws
+ default:
+ goto bad;
+ }
+ p =3D mkFormVX( p, 4, v_dst, v_srcL, v_srcR, opc2 );
+ goto done;
+ }
=20
- /* Multiply odd/even */
- case Pav_OMUL8U: opc2 =3D 8; break; // vmuloub
- case Pav_OMUL16U: opc2 =3D 72; break; // vmulouh
- case Pav_OMUL8S: opc2 =3D 264; break; // vmulosb
- case Pav_OMUL16S: opc2 =3D 328; break; // vmulosh
- case Pav_EMUL8U: opc2 =3D 520; break; // vmuleub
- case Pav_EMUL16U: opc2 =3D 584; break; // vmuleuh
- case Pav_EMUL8S: opc2 =3D 776; break; // vmulesb
- case Pav_EMUL16S: opc2 =3D 840; break; // vmulesh
+ case Pin_AvBin8x16: {
+ UInt v_dst =3D vregNo(i->Pin.AvBin8x16.dst);
+ UInt v_srcL =3D vregNo(i->Pin.AvBin8x16.srcL);
+ UInt v_srcR =3D vregNo(i->Pin.AvBin8x16.srcR);
+ UInt opc2;
+ switch (i->Pin.AvBin8x16.op) {
=20
- /* Average */
- case Pav_AVG8U: opc2 =3D 1026; break; // vavgub
- case Pav_AVG16U: opc2 =3D 1090; break; // vavguh
- case Pav_AVG32U: opc2 =3D 1154; break; // vavguw
- case Pav_AVG8S: opc2 =3D 1282; break; // vavgsb
- case Pav_AVG16S: opc2 =3D 1346; break; // vavgsh
- case Pav_AVG32S: opc2 =3D 1410; break; // vavgsw
+ case Pav_ADDUM: opc2 =3D 0; break; // vaddubm
+ case Pav_ADDUS: opc2 =3D 512; break; // vaddubs
+ case Pav_ADDSS: opc2 =3D 768; break; // vaddsbs
=20
- /* Maximum */
- case Pav_MAX8U: opc2 =3D 2; break; // vmaxub
- case Pav_MAX16U: opc2 =3D 66; break; // vmaxuh
- case Pav_MAX32U: opc2 =3D 130; break; // vmaxuw
- case Pav_MAX8S: opc2 =3D 258; break; // vmaxsb
- case Pav_MAX16S: opc2 =3D 322; break; // vmaxsh
- case Pav_MAX32S: opc2 =3D 386; break; // vmaxsw
+ case Pav_SUBUM: opc2 =3D 1024; break; // vsububm
+ case Pav_SUBUS: opc2 =3D 1536; break; // vsububs
+ case Pav_SUBSS: opc2 =3D 1792; break; // vsubsbs
=20
- /* Minimum */
- case Pav_MIN8U: opc2 =3D 514; break; // vminub
- case Pav_MIN16U: opc2 =3D 578; break; // vminuh
- case Pav_MIN32U: opc2 =3D 642; break; // vminuw
- case Pav_MIN8S: opc2 =3D 770; break; // vminsb
- case Pav_MIN16S: opc2 =3D 834; break; // vminsh
- case Pav_MIN32S: opc2 =3D 898; break; // vminsw
+ case Pav_OMULU: opc2 =3D 8; break; // vmuloub
+ case Pav_OMULS: opc2 =3D 264; break; // vmulosb
+ case Pav_EMULU: opc2 =3D 520; break; // vmuleub
+ case Pav_EMULS: opc2 =3D 776; break; // vmulesb
=20
- /* Compare (always affects CR field 6) */
- /* XXX: Actually VXR-Form, but Rc always 0, so keep life easy... *=
/
- case Pav_CMPEQ8U: opc2 =3D 6; break; // vcmpequb
- case Pav_CMPEQ16U: opc2 =3D 70; break; // vcmpequh
- case Pav_CMPEQ32U: opc2 =3D 134; break; // vcmpequw
- case Pav_CMPGT8U: opc2 =3D 518; break; // vcmpgtub
- case Pav_CMPGT16U: opc2 =3D 582; break; // vcmpgtuh
- case Pav_CMPGT32U: opc2 =3D 646; break; // vcmpgtuw
- case Pav_CMPGT8S: opc2 =3D 774; break; // vcmpgtsb
- case Pav_CMPGT16S: opc2 =3D 838; break; // vcmpgtsh
- case Pav_CMPGT32S: opc2 =3D 902; break; // vcmpgtsw
+ case Pav_AVGU: opc2 =3D 1026; break; // vavgub
+ case Pav_AVGS: opc2 =3D 1282; break; // vavgsb
+ case Pav_MAXU: opc2 =3D 2; break; // vmaxub
+ case Pav_MAXS: opc2 =3D 258; break; // vmaxsb
+ case Pav_MINU: opc2 =3D 514; break; // vminub
+ case Pav_MINS: opc2 =3D 770; break; // vminsb
=20
- /* Shift */
- case Pav_SHL8: opc2 =3D 260; break; // vslb
- case Pav_SHL16: opc2 =3D 324; break; // vslh
- case Pav_SHL32: opc2 =3D 388; break; // vslw
- case Pav_SHL128: opc2 =3D 452; break; // vsl
- case Pav_SHR8: opc2 =3D 516; break; // vsrb
- case Pav_SHR16: opc2 =3D 580; break; // vsrh
- case Pav_SHR32: opc2 =3D 644; break; // vsrw
- case Pav_SHR128: opc2 =3D 708; break; // vsr
- case Pav_SAR8: opc2 =3D 772; break; // vsrab
- case Pav_SAR16: opc2 =3D 836; break; // vsrah
- case Pav_SAR32: opc2 =3D 900; break; // vsraw
- case Pav_ROTL8: opc2 =3D 4; break; // vrlb
- case Pav_ROTL16: opc2 =3D 68; break; // vrlh
- case Pav_ROTL32: opc2 =3D 132; break; // vrlw
+ case Pav_CMPEQU: opc2 =3D 6; break; // vcmpequb
+ case Pav_CMPGTU: opc2 =3D 518; break; // vcmpgtub
+ case Pav_CMPGTS: opc2 =3D 774; break; // vcmpgtsb
=20
- /* Pack */
- case Pav_PACKU16UM: opc2 =3D 14; break; // vpkuhum
- case Pav_PACKU32UM: opc2 =3D 78; break; // vpkuwum
- case Pav_PACKU16US: opc2 =3D 142; break; // vpkuhus
- case Pav_PACKU32US: opc2 =3D 206; break; // vpkuwus
- case Pav_PACKS16US: opc2 =3D 270; break; // vpkshus
- case Pav_PACKS32US: opc2 =3D 334; break; // vpkswus
- case Pav_PACKS16SS: opc2 =3D 398; break; // vpkshss
- case Pav_PACKS32SS: opc2 =3D 462; break; // vpkswss
+ case Pav_SHL: opc2 =3D 260; break; // vslb
+ case Pav_SHR: opc2 =3D 516; break; // vsrb
+ case Pav_SAR: opc2 =3D 772; break; // vsrab
+ case Pav_ROTL: opc2 =3D 4; break; // vrlb
+
+ case Pav_MRGHI: opc2 =3D 12; break; // vmrghb
+ case Pav_MRGLO: opc2 =3D 268; break; // vmrglb
+
+ default:
+ goto bad;
+ }
+ p =3D mkFormVX( p, 4, v_dst, v_srcL, v_srcR, opc2 );
+ goto done;
+ }
+
+ case Pin_AvBin16x8: {
+ UInt v_dst =3D vregNo(i->Pin.AvBin16x8.dst);
+ UInt v_srcL =3D vregNo(i->Pin.AvBin16x8.srcL);
+ UInt v_srcR =3D vregNo(i->Pin.AvBin16x8.srcR);
+ UInt opc2;
+ switch (i->Pin.AvBin16x8.op) {
+
+ case Pav_ADDUM: opc2 =3D 64; break; // vadduhm
+ case Pav_ADDUS: opc2 =3D 576; break; // vadduhs
+ case Pav_ADDSS: opc2 =3D 832; break; // vaddshs
+
+ case Pav_SUBUM: opc2 =3D 1088; break; // vsubuhm
+ case Pav_SUBUS: opc2 =3D 1600; break; // vsubuhs
+ case Pav_SUBSS: opc2 =3D 1856; break; // vsubshs
+
+ case Pav_OMULU: opc2 =3D 72; break; // vmulouh
+ case Pav_OMULS: opc2 =3D 328; break; // vmulosh
+ case Pav_EMULU: opc2 =3D 584; break; // vmuleuh
+ case Pav_EMULS: opc2 =3D 840; break; // vmulesh
+
+ case Pav_AVGU: opc2 =3D 1090; break; // vavguh
+ case Pav_AVGS: opc2 =3D 1346; break; // vavgsh
+ case Pav_MAXU: opc2 =3D 66; break; // vmaxuh
+ case Pav_MAXS: opc2 =3D 322; break; // vmaxsh
+ case Pav_MINS: opc2 =3D 834; break; // vminsh
+ case Pav_MINU: opc2 =3D 578; break; // vminuh
+
+ case Pav_CMPEQU: opc2 =3D 70; break; // vcmpequh
+ case Pav_CMPGTU: opc2 =3D 582; break; // vcmpgtuh
+ case Pav_CMPGTS: opc2 =3D 838; break; // vcmpgtsh
+
+ case Pav_SHL: opc2 =3D 324; break; // vslh
+ case Pav_SHR: opc2 =3D 580; break; // vsrh
+ case Pav_SAR: opc2 =3D 836; break; // vsrah
+ case Pav_ROTL: opc2 =3D 68; break; // vrlh
+
+ case Pav_PACKUUM: opc2 =3D 14; break; // vpkuhum
+ case Pav_PACKUUS: opc2 =3D 142; break; // vpkuhus
+ case Pav_PACKSUS: opc2 =3D 270; break; // vpkshus
+ case Pav_PACKSSS: opc2 =3D 398; break; // vpkshss
case Pav_PACKPXL: opc2 =3D 782; break; // vpkpx
=20
- /* Merge */
- case Pav_MRG8HI: opc2 =3D 12; break; // vmrghb
- case Pav_MRG16HI: opc2 =3D 76; break; // vmrghh
- case Pav_MRG32HI: opc2 =3D 140; break; // vmrghw
- case Pav_MRG8LO: opc2 =3D 268; break; // vmrglb
- case Pav_MRG16LO: opc2 =3D 332; break; // vmrglh
- case Pav_MRG32LO: opc2 =3D 396; break; // vmrglw
+ case Pav_MRGHI: opc2 =3D 76; break; // vmrghh
+ case Pav_MRGLO: opc2 =3D 332; break; // vmrglh
=20
default:
goto bad;
@@ -2779,6 +2896,108 @@
goto done;
}
=20
+ case Pin_AvBin32x4: {
+ UInt v_dst =3D vregNo(i->Pin.AvBin32x4.dst);
+ UInt v_srcL =3D vregNo(i->Pin.AvBin32x4.srcL);
+ UInt v_srcR =3D vregNo(i->Pin.AvBin32x4.srcR);
+ UInt opc2;
+ switch (i->Pin.AvBin32x4.op) {
+
+ case Pav_ADDUM: opc2 =3D 128; break; // vadduwm
+ case Pav_ADDUS: opc2 =3D 640; break; // vadduws
+ case Pav_ADDSS: opc2 =3D 896; break; // vaddsws
+
+ case Pav_SUBUM: opc2 =3D 1152; break; // vsubuwm
+ case Pav_SUBUS: opc2 =3D 1664; break; // vsubuws
+ case Pav_SUBSS: opc2 =3D 1920; break; // vsubsws
+
+ case Pav_AVGU: opc2 =3D 1154; break; // vavguw
+ case Pav_AVGS: opc2 =3D 1410; break; // vavgsw
+
+ case Pav_MAXU: opc2 =3D 130; break; // vmaxuw
+ case Pav_MAXS: opc2 =3D 386; break; // vmaxsw
+
+ case Pav_MINS: opc2 =3D 898; break; // vminsw
+ case Pav_MINU: opc2 =3D 642; break; // vminuw
+
+ case Pav_CMPEQU: opc2 =3D 134; break; // vcmpequw
+ case Pav_CMPGTS: opc2 =3D 902; break; // vcmpgtsw
+ case Pav_CMPGTU: opc2 =3D 646; break; // vcmpgtuw
+
+ case Pav_SHL: opc2 =3D 388; break; // vslw
+ case Pav_SHR: opc2 =3D 644; break; // vsrw
+ case Pav_SAR: opc2 =3D 900; break; // vsraw
+ case Pav_ROTL: opc2 =3D 132; break; // vrlw
+
+ case Pav_PACKUUM: opc2 =3D 78; break; // vpkuwum
+ case Pav_PACKUUS: opc2 =3D 206; break; // vpkuwus
+ case Pav_PACKSUS: opc2 =3D 334; break; // vpkswus
+ case Pav_PACKSSS: opc2 =3D 462; break; // vpkswss
+
+ case Pav_MRGHI: opc2 =3D 140; break; // vmrghw
+ case Pav_MRGLO: opc2 =3D 396; break; // vmrglw
+
+ default:
+ goto bad;
+ }
+ p =3D mkFormVX( p, 4, v_dst, v_srcL, v_srcR, opc2 );
+ goto done;
+ }
+
+ case Pin_AvBin32Fx4: {
+ UInt v_dst =3D vregNo(i->Pin.AvBin32Fx4.dst);
+ UInt v_srcL =3D vregNo(i->Pin.AvBin32Fx4.srcL);
+ UInt v_srcR =3D vregNo(i->Pin.AvBin32Fx4.srcR);
+ switch (i->Pin.AvBin32Fx4.op) {
+
+ case Pav_ADDF:
+ p =3D mkFormVX( p, 4, v_dst, v_srcL, v_srcR, 10 ); // vaddfp
+ break;
+ case Pav_SUBF:
+ p =3D mkFormVX( p, 4, v_dst, v_srcL, v_srcR, 74 ); // vsubfp
+ break;
+ case Pav_MAXF:
+ p =3D mkFormVX( p, 4, v_dst, v_srcL, v_srcR, 1034 ); // vmaxfp
+ break;
+ case Pav_MINF:
+ p =3D mkFormVX( p, 4, v_dst, v_srcL, v_srcR, 1098 ); // vminfp
+ break;
+
+ case Pav_MULF: {
+ /* Make a vmulfp from a vmaddfp:
+ load -0.0 (0x8000_0000) to each 32-bit word of vB
+ this makes the add a noop.
+ */
+ UInt vB =3D 29; // XXX: Using r29 for temp
+ UInt zero_simm =3D 0x80000000;
+
+ // Better way to load zero_imm?
+ // vspltisw vB,0x1F (0x1F =3D> each word of vB)
+ p =3D mkFormVX( p, 4, vB, zero_simm, 0, 908 );
+
+ // vslw vB,vB,vB (each word of vB =3D (0x1F << 0x1F) =3D 0x800=
00000
+ p =3D mkFormVX( p, 4, vB, vB, vB, 388 );
+
+ // Finally, do the multiply:
+ p =3D mkFormVA( p, 4, v_dst, v_srcL, vB, v_srcR, 46 );
+ break;
+ }
+ case Pav_CMPEQF:
+ p =3D mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 198 ); // vcmp=
eqfp
+ break;
+ case Pav_CMPGTF:
+ p =3D mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 1, 710 ); // vcmp=
gtfp
+ break;
+ case Pav_CMPGEF:
+ p =3D mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 1, 454 ); // vcmp=
gefp
+ break;
+
+ default:
+ goto bad;
+ }
+ goto done;
+ }
+
case Pin_AvPerm: { // vperm
UInt v_ctl =3D vregNo(i->Pin.AvPerm.ctl);
UInt v_dst =3D vregNo(i->Pin.AvPerm.dst);
@@ -2814,7 +3033,7 @@
vassert(sz =3D=3D 8 || sz =3D=3D 16 || sz =3D=3D 32);
=20
if (i->Pin.AvSplat.src->tag =3D=3D Pri_Imm) {
- opc2 =3D (sz =3D=3D 8) ? 780 : (sz =3D=3D 16) ? 844 : 908; // 8,16,32
+ opc2 =3D (sz =3D=3D 8) ? 780 : (sz =3D=3D 16) ? 844 : 908; //=
8,16,32
simm_src =3D i->Pin.AvSplat.src->Pri.Imm.imm32;
p =3D mkFormVX( p, 4, v_dst, simm_src, 0, opc2 );
} else { // Pri_Reg
@@ -2825,6 +3044,25 @@
goto done;
}
=20
+ case Pin_AvCMov: {
+ UInt v_dst =3D vregNo(i->Pin.AvCMov.dst);
+ UInt v_src =3D vregNo(i->Pin.AvCMov.src);
+ PPC32CondCode cc =3D i->Pin.AvCMov.cond;
+
+ if (v_dst =3D=3D v_src) goto done;
+ =20
+ vassert(cc.test !=3D Pct_ALWAYS);
+
+ /* jmp fwds 2 insns if !condition */
+ if (cc.test !=3D Pct_ALWAYS) {
+ /* bc !ct,cf,n_bytes>>2 */
+ p =3D mkFormB(p, invertCondTest(cc.test), cc.flag, 8>>2, 0, 0);
+ }
+ /* vmr */
+ p =3D mkFormVX( p, 4, v_dst, v_src, v_src, 1156 );
+ goto done;
+ }
+
case Pin_AvLdVSCR: { // mtvscr
UInt v_src =3D vregNo(i->Pin.AvLdVSCR.src);
p =3D mkFormVX( p, 4, 0, 0, v_src, 1604 );
Modified: trunk/priv/host-ppc32/hdefs.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/priv/host-ppc32/hdefs.h 2005-06-28 18:06:23 UTC (rev 1226)
+++ trunk/priv/host-ppc32/hdefs.h 2005-06-28 20:59:18 UTC (rev 1227)
@@ -340,52 +340,34 @@
/* Integer Binary */
Pav_AND, Pav_OR, Pav_XOR, /* Bitwise */
=20
- Pav_ADD8UM, Pav_ADD16UM, Pav_ADD32UM,
- Pav_ADD8US, Pav_ADD16US, Pav_ADD32US,
- Pav_ADD8SS, Pav_ADD16SS, Pav_ADD32SS,
+ Pav_ADDUM, Pav_ADDUS,Pav_ADDSS,
=20
- Pav_SUB8UM, Pav_SUB16UM, Pav_SUB32UM,
- Pav_SUB8US, Pav_SUB16US, Pav_SUB32US,
- Pav_SUB8SS, Pav_SUB16SS, Pav_SUB32SS,
+ Pav_SUBUM, Pav_SUBUS, Pav_SUBSS,
=20
- Pav_OMUL8U, Pav_OMUL16U,
- Pav_OMUL8S, Pav_OMUL16S,
- Pav_EMUL8U, Pav_EMUL16U,
- Pav_EMUL8S, Pav_EMUL16S,
+ Pav_OMULU, Pav_OMULS, Pav_EMULU, Pav_EMULS,
=20
- Pav_AVG8U, Pav_AVG16U, Pav_AVG32U,
- Pav_AVG8S, Pav_AVG16S, Pav_AVG32S,
- Pav_MAX8U, Pav_MAX16U, Pav_MAX32U,
- Pav_MAX8S, Pav_MAX16S, Pav_MAX32S,
- Pav_MIN8U, Pav_MIN16U, Pav_MIN32U,
- Pav_MIN8S, Pav_MIN16S, Pav_MIN32S,
+ Pav_AVGU, Pav_AVGS,
+ Pav_MAXU, Pav_MAXS,
+ Pav_MINU, Pav_MINS,
=20
/* Compare (always affects CR field 6) */
- Pav_CMPEQ8U, Pav_CMPEQ16U, Pav_CMPEQ32U,
- Pav_CMPGT8U, Pav_CMPGT16U, Pav_CMPGT32U,
- Pav_CMPGT8S, Pav_CMPGT16S, Pav_CMPGT32S,
+ Pav_CMPEQU, Pav_CMPGTU, Pav_CMPGTS,
=20
/* Shift */
- Pav_SHL8, Pav_SHL16, Pav_SHL32, Pav_SHL128,
- Pav_SHR8, Pav_SHR16, Pav_SHR32, Pav_SHR128,
- Pav_SAR8, Pav_SAR16, Pav_SAR32,
- Pav_ROTL8, Pav_ROTL16, Pav_ROTL32,
+ Pav_SHL, Pav_SHR, Pav_SAR, Pav_ROTL,
=20
/* Pack */
- Pav_PACKU16UM, Pav_PACKU32UM,
- Pav_PACKU16US, Pav_PACKU32US,
- Pav_PACKS16US, Pav_PACKS32US,
- Pav_PACKS16SS, Pav_PACKS32SS, Pav_PACKPXL,
+ Pav_PACKUUM, Pav_PACKUUS, Pav_PACKSUS, Pav_PACKSSS,
+ Pav_PACKPXL,
=20
/* Merge */
- Pav_MRG8HI, Pav_MRG16HI, Pav_MRG32HI,
- Pav_MRG8LO, Pav_MRG16LO, Pav_MRG32LO,
+ Pav_MRGHI, Pav_MRGLO,
=20
-//.. /* Floating point binary */
-//.. Xsse_ADDF, Xsse_SUBF, Xsse_MULF, Xsse_DIVF,
-//.. Xsse_MAXF, Xsse_MINF,
-//.. Xsse_CMPEQF, Xsse_CMPLTF, Xsse_CMPLEF, Xsse_CMPUNF,
-//..=20
+ /* Floating point binary */
+ Pav_ADDF, Pav_SUBF, Pav_MULF,
+ Pav_MAXF, Pav_MINF,
+ Pav_CMPEQF, Pav_CMPGTF, Pav_CMPGEF,
+
//.. /* Floating point unary */
//.. Xsse_RCPF, Xsse_RSQRTF, Xsse_SQRTF,
}
@@ -425,20 +407,20 @@
// Pin_AvConst, /* Generate restricted AV literal */
Pin_AvLdSt, /* AV load/store (kludging for AMode_IR) */
Pin_AvUnary, /* AV unary general reg=3D>reg */
+
Pin_AvBinary, /* AV binary general reg,reg=3D>reg */
+ Pin_AvBin8x16, /* AV binary, 8x4 */
+ Pin_AvBin16x8, /* AV binary, 16x4 */
+ Pin_AvBin32x4, /* AV binary, 32x4 */
=20
+ Pin_AvBin32Fx4, /* AV FP binary, 32Fx4 */
+
Pin_AvPerm, /* AV permute (shuffle) */
Pin_AvSel, /* AV select */
Pin_AvShlDbl, /* AV shift-left double by imm */
Pin_AvSplat, /* One elem repeated throughout dst */
- Pin_AvLdVSCR /* mtvscr */
-
-//.. Xin_SseLdzLO, /* SSE load low 32/64 bits, zero remainder of =
reg */
-//.. Xin_Sse32Fx4, /* SSE binary, 32Fx4 */
-//.. Xin_Sse32FLo, /* SSE binary, 32F in lowest lane only */
-//.. Xin_Sse64Fx2, /* SSE binary, 64Fx2 */
-//.. Xin_Sse64FLo, /* SSE binary, 64F in lowest lane only */
-//.. Xin_SseCMov, /* SSE conditional move */
+ Pin_AvLdVSCR, /* mtvscr */
+ Pin_AvCMov /* AV conditional move */
}
PPC32InstrTag;
=20
@@ -612,6 +594,30 @@
HReg srcL;
HReg srcR;
} AvBinary;
+ struct {
+ PPC32AvOp op;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
+ } AvBin8x16;
+ struct {
+ PPC32AvOp op;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
+ } AvBin16x8;
+ struct {
+ PPC32AvOp op;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
+ } AvBin32x4;
+ struct {
+ PPC32AvOp op;
+ HReg dst;
+ HReg srcL;
+ HReg srcR;
+ } AvBin32Fx4;
/* Perm,Sel,SlDbl,Splat are all weird AV permutations */
struct {
HReg ctl;
@@ -636,6 +642,13 @@
HReg dst;
PPC32RI* src;
} AvSplat;
+ /* Mov src to dst on the given condition, which may not
+ be the bogus Xcc_ALWAYS. */
+ struct {
+ PPC32CondCode cond;
+ HReg dst;
+ HReg src;
+ } AvCMov;
/* Load AlitVec Status & Control Register */
struct {
HReg src;
@@ -675,10 +688,15 @@
extern PPC32Instr* PPC32Instr_AvLdSt ( Bool isLoad, UChar sz, HReg, =
PPC32AMode* );
extern PPC32Instr* PPC32Instr_AvUnary ( PPC32FpOp op, HReg dst, HReg =
src );
extern PPC32Instr* PPC32Instr_AvBinary ( PPC32FpOp op, HReg dst, HReg =
srcL, HReg srcR );
+extern PPC32Instr* PPC32Instr_AvBin8x16 ( PPC32FpOp op, HReg dst, HReg =
srcL, HReg srcR );
+extern PPC32Instr* PPC32Instr_AvBin16x8 ( PPC32FpOp op, HReg dst, HReg =
srcL, HReg srcR );
+extern PPC32Instr* PPC32Instr_AvBin32x4 ( PPC32FpOp op, HReg dst, HReg =
srcL, HReg srcR );
+extern PPC32Instr* PPC32Instr_AvBin32Fx4 ( PPC32FpOp op, HReg dst, HReg =
srcL, HReg srcR );
extern PPC32Instr* PPC32Instr_AvPerm ( HReg ctl, HReg dst, HReg srcL=
, HReg srcR );
extern PPC32Instr* PPC32Instr_AvSel ( HReg ctl, HReg dst, HReg srcL=
, HReg srcR );
extern PPC32Instr* PPC32Instr_AvShlDbl ( UChar shift, HReg dst, HReg s=
rcL, HReg srcR );
extern PPC32Instr* PPC32Instr_AvSplat ( UChar sz, HReg dst, PPC32RI* =
src );
+extern PPC32Instr* PPC32Instr_AvCMov ( PPC32CondCode, HReg dst, HReg=
src );
extern PPC32Instr* PPC32Instr_AvLdVSCR ( HReg src );
=20
extern void ppPPC32Instr ( PPC32Instr* );
|