|
From: <sv...@va...> - 2015-03-13 12:47:02
|
Author: florian
Date: Fri Mar 13 12:46:49 2015
New Revision: 3103
Log:
r2974 moved the inline definition of LibVEX_Alloc from libvex.h
to main_util.c because it caused linker problems with ICC.
See comments in BZ #339542.
This change re-enables inlining of that function by adding it
(renamed as LibVEX_Alloc_inline) to main_util.h.
500+ callsites changed accordingly.
Modified:
trunk/priv/host_amd64_defs.c
trunk/priv/host_amd64_isel.c
trunk/priv/host_arm64_defs.c
trunk/priv/host_arm64_isel.c
trunk/priv/host_arm_defs.c
trunk/priv/host_arm_isel.c
trunk/priv/host_generic_reg_alloc2.c
trunk/priv/host_generic_regs.c
trunk/priv/host_mips_defs.c
trunk/priv/host_mips_isel.c
trunk/priv/host_ppc_defs.c
trunk/priv/host_ppc_isel.c
trunk/priv/host_s390_defs.c
trunk/priv/host_s390_isel.c
trunk/priv/host_x86_defs.c
trunk/priv/host_x86_isel.c
trunk/priv/ir_defs.c
trunk/priv/ir_opt.c
trunk/priv/main_util.c
trunk/priv/main_util.h
Modified: trunk/priv/host_amd64_defs.c
==============================================================================
--- trunk/priv/host_amd64_defs.c (original)
+++ trunk/priv/host_amd64_defs.c Fri Mar 13 12:46:49 2015
@@ -136,7 +136,7 @@
{
#if 0
*nregs = 6;
- *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+ *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg));
(*arr)[ 0] = hregAMD64_RSI();
(*arr)[ 1] = hregAMD64_RDI();
(*arr)[ 2] = hregAMD64_RBX();
@@ -147,7 +147,7 @@
#endif
#if 1
*nregs = 20;
- *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+ *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg));
(*arr)[ 0] = hregAMD64_RSI();
(*arr)[ 1] = hregAMD64_RDI();
(*arr)[ 2] = hregAMD64_R8();
@@ -203,14 +203,14 @@
/* --------- AMD64AMode: memory address expressions. --------- */
AMD64AMode* AMD64AMode_IR ( UInt imm32, HReg reg ) {
- AMD64AMode* am = LibVEX_Alloc(sizeof(AMD64AMode));
+ AMD64AMode* am = LibVEX_Alloc_inline(sizeof(AMD64AMode));
am->tag = Aam_IR;
am->Aam.IR.imm = imm32;
am->Aam.IR.reg = reg;
return am;
}
AMD64AMode* AMD64AMode_IRRS ( UInt imm32, HReg base, HReg indEx, Int shift ) {
- AMD64AMode* am = LibVEX_Alloc(sizeof(AMD64AMode));
+ AMD64AMode* am = LibVEX_Alloc_inline(sizeof(AMD64AMode));
am->tag = Aam_IRRS;
am->Aam.IRRS.imm = imm32;
am->Aam.IRRS.base = base;
@@ -273,19 +273,19 @@
/* --------- Operand, which can be reg, immediate or memory. --------- */
AMD64RMI* AMD64RMI_Imm ( UInt imm32 ) {
- AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI));
+ AMD64RMI* op = LibVEX_Alloc_inline(sizeof(AMD64RMI));
op->tag = Armi_Imm;
op->Armi.Imm.imm32 = imm32;
return op;
}
AMD64RMI* AMD64RMI_Reg ( HReg reg ) {
- AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI));
+ AMD64RMI* op = LibVEX_Alloc_inline(sizeof(AMD64RMI));
op->tag = Armi_Reg;
op->Armi.Reg.reg = reg;
return op;
}
AMD64RMI* AMD64RMI_Mem ( AMD64AMode* am ) {
- AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI));
+ AMD64RMI* op = LibVEX_Alloc_inline(sizeof(AMD64RMI));
op->tag = Armi_Mem;
op->Armi.Mem.am = am;
return op;
@@ -353,13 +353,13 @@
/* --------- Operand, which can be reg or immediate only. --------- */
AMD64RI* AMD64RI_Imm ( UInt imm32 ) {
- AMD64RI* op = LibVEX_Alloc(sizeof(AMD64RI));
+ AMD64RI* op = LibVEX_Alloc_inline(sizeof(AMD64RI));
op->tag = Ari_Imm;
op->Ari.Imm.imm32 = imm32;
return op;
}
AMD64RI* AMD64RI_Reg ( HReg reg ) {
- AMD64RI* op = LibVEX_Alloc(sizeof(AMD64RI));
+ AMD64RI* op = LibVEX_Alloc_inline(sizeof(AMD64RI));
op->tag = Ari_Reg;
op->Ari.Reg.reg = reg;
return op;
@@ -409,13 +409,13 @@
/* --------- Operand, which can be reg or memory only. --------- */
AMD64RM* AMD64RM_Reg ( HReg reg ) {
- AMD64RM* op = LibVEX_Alloc(sizeof(AMD64RM));
+ AMD64RM* op = LibVEX_Alloc_inline(sizeof(AMD64RM));
op->tag = Arm_Reg;
op->Arm.Reg.reg = reg;
return op;
}
AMD64RM* AMD64RM_Mem ( AMD64AMode* am ) {
- AMD64RM* op = LibVEX_Alloc(sizeof(AMD64RM));
+ AMD64RM* op = LibVEX_Alloc_inline(sizeof(AMD64RM));
op->tag = Arm_Mem;
op->Arm.Mem.am = am;
return op;
@@ -606,14 +606,14 @@
}
AMD64Instr* AMD64Instr_Imm64 ( ULong imm64, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Imm64;
i->Ain.Imm64.imm64 = imm64;
i->Ain.Imm64.dst = dst;
return i;
}
AMD64Instr* AMD64Instr_Alu64R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Alu64R;
i->Ain.Alu64R.op = op;
i->Ain.Alu64R.src = src;
@@ -621,7 +621,7 @@
return i;
}
AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp op, AMD64RI* src, AMD64AMode* dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Alu64M;
i->Ain.Alu64M.op = op;
i->Ain.Alu64M.src = src;
@@ -630,7 +630,7 @@
return i;
}
AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Sh64;
i->Ain.Sh64.op = op;
i->Ain.Sh64.src = src;
@@ -638,28 +638,28 @@
return i;
}
AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Test64;
i->Ain.Test64.imm32 = imm32;
i->Ain.Test64.dst = dst;
return i;
}
AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Unary64;
i->Ain.Unary64.op = op;
i->Ain.Unary64.dst = dst;
return i;
}
AMD64Instr* AMD64Instr_Lea64 ( AMD64AMode* am, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Lea64;
i->Ain.Lea64.am = am;
i->Ain.Lea64.dst = dst;
return i;
}
AMD64Instr* AMD64Instr_Alu32R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Alu32R;
i->Ain.Alu32R.op = op;
i->Ain.Alu32R.src = src;
@@ -672,14 +672,14 @@
return i;
}
AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* src ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_MulL;
i->Ain.MulL.syned = syned;
i->Ain.MulL.src = src;
return i;
}
AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* src ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Div;
i->Ain.Div.syned = syned;
i->Ain.Div.sz = sz;
@@ -688,14 +688,14 @@
return i;
}
AMD64Instr* AMD64Instr_Push( AMD64RMI* src ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Push;
i->Ain.Push.src = src;
return i;
}
AMD64Instr* AMD64Instr_Call ( AMD64CondCode cond, Addr64 target, Int regparms,
RetLoc rloc ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Call;
i->Ain.Call.cond = cond;
i->Ain.Call.target = target;
@@ -708,7 +708,7 @@
AMD64Instr* AMD64Instr_XDirect ( Addr64 dstGA, AMD64AMode* amRIP,
AMD64CondCode cond, Bool toFastEP ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_XDirect;
i->Ain.XDirect.dstGA = dstGA;
i->Ain.XDirect.amRIP = amRIP;
@@ -718,7 +718,7 @@
}
AMD64Instr* AMD64Instr_XIndir ( HReg dstGA, AMD64AMode* amRIP,
AMD64CondCode cond ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_XIndir;
i->Ain.XIndir.dstGA = dstGA;
i->Ain.XIndir.amRIP = amRIP;
@@ -727,7 +727,7 @@
}
AMD64Instr* AMD64Instr_XAssisted ( HReg dstGA, AMD64AMode* amRIP,
AMD64CondCode cond, IRJumpKind jk ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_XAssisted;
i->Ain.XAssisted.dstGA = dstGA;
i->Ain.XAssisted.amRIP = amRIP;
@@ -737,7 +737,7 @@
}
AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_CMov64;
i->Ain.CMov64.cond = cond;
i->Ain.CMov64.src = src;
@@ -747,7 +747,7 @@
}
AMD64Instr* AMD64Instr_CLoad ( AMD64CondCode cond, UChar szB,
AMD64AMode* addr, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_CLoad;
i->Ain.CLoad.cond = cond;
i->Ain.CLoad.szB = szB;
@@ -758,7 +758,7 @@
}
AMD64Instr* AMD64Instr_CStore ( AMD64CondCode cond, UChar szB,
HReg src, AMD64AMode* addr ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_CStore;
i->Ain.CStore.cond = cond;
i->Ain.CStore.szB = szB;
@@ -768,7 +768,7 @@
return i;
}
AMD64Instr* AMD64Instr_MovxLQ ( Bool syned, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_MovxLQ;
i->Ain.MovxLQ.syned = syned;
i->Ain.MovxLQ.src = src;
@@ -777,7 +777,7 @@
}
AMD64Instr* AMD64Instr_LoadEX ( UChar szSmall, Bool syned,
AMD64AMode* src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_LoadEX;
i->Ain.LoadEX.szSmall = szSmall;
i->Ain.LoadEX.syned = syned;
@@ -787,7 +787,7 @@
return i;
}
AMD64Instr* AMD64Instr_Store ( UChar sz, HReg src, AMD64AMode* dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Store;
i->Ain.Store.sz = sz;
i->Ain.Store.src = src;
@@ -796,14 +796,14 @@
return i;
}
AMD64Instr* AMD64Instr_Set64 ( AMD64CondCode cond, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Set64;
i->Ain.Set64.cond = cond;
i->Ain.Set64.dst = dst;
return i;
}
AMD64Instr* AMD64Instr_Bsfr64 ( Bool isFwds, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Bsfr64;
i->Ain.Bsfr64.isFwds = isFwds;
i->Ain.Bsfr64.src = src;
@@ -811,12 +811,12 @@
return i;
}
AMD64Instr* AMD64Instr_MFence ( void ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_MFence;
return i;
}
AMD64Instr* AMD64Instr_ACAS ( AMD64AMode* addr, UChar sz ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_ACAS;
i->Ain.ACAS.addr = addr;
i->Ain.ACAS.sz = sz;
@@ -824,7 +824,7 @@
return i;
}
AMD64Instr* AMD64Instr_DACAS ( AMD64AMode* addr, UChar sz ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_DACAS;
i->Ain.DACAS.addr = addr;
i->Ain.DACAS.sz = sz;
@@ -834,7 +834,7 @@
AMD64Instr* AMD64Instr_A87Free ( Int nregs )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_A87Free;
i->Ain.A87Free.nregs = nregs;
vassert(nregs >= 1 && nregs <= 7);
@@ -842,7 +842,7 @@
}
AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush, UChar szB )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_A87PushPop;
i->Ain.A87PushPop.addr = addr;
i->Ain.A87PushPop.isPush = isPush;
@@ -852,33 +852,33 @@
}
AMD64Instr* AMD64Instr_A87FpOp ( A87FpOp op )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_A87FpOp;
i->Ain.A87FpOp.op = op;
return i;
}
AMD64Instr* AMD64Instr_A87LdCW ( AMD64AMode* addr )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_A87LdCW;
i->Ain.A87LdCW.addr = addr;
return i;
}
AMD64Instr* AMD64Instr_A87StSW ( AMD64AMode* addr )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_A87StSW;
i->Ain.A87StSW.addr = addr;
return i;
}
AMD64Instr* AMD64Instr_LdMXCSR ( AMD64AMode* addr ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_LdMXCSR;
i->Ain.LdMXCSR.addr = addr;
return i;
}
AMD64Instr* AMD64Instr_SseUComIS ( Int sz, HReg srcL, HReg srcR, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseUComIS;
i->Ain.SseUComIS.sz = toUChar(sz);
i->Ain.SseUComIS.srcL = srcL;
@@ -888,7 +888,7 @@
return i;
}
AMD64Instr* AMD64Instr_SseSI2SF ( Int szS, Int szD, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseSI2SF;
i->Ain.SseSI2SF.szS = toUChar(szS);
i->Ain.SseSI2SF.szD = toUChar(szD);
@@ -899,7 +899,7 @@
return i;
}
AMD64Instr* AMD64Instr_SseSF2SI ( Int szS, Int szD, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseSF2SI;
i->Ain.SseSF2SI.szS = toUChar(szS);
i->Ain.SseSF2SI.szD = toUChar(szD);
@@ -911,7 +911,7 @@
}
AMD64Instr* AMD64Instr_SseSDSS ( Bool from64, HReg src, HReg dst )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseSDSS;
i->Ain.SseSDSS.from64 = from64;
i->Ain.SseSDSS.src = src;
@@ -920,7 +920,7 @@
}
AMD64Instr* AMD64Instr_SseLdSt ( Bool isLoad, Int sz,
HReg reg, AMD64AMode* addr ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseLdSt;
i->Ain.SseLdSt.isLoad = isLoad;
i->Ain.SseLdSt.sz = toUChar(sz);
@@ -931,7 +931,7 @@
}
AMD64Instr* AMD64Instr_SseLdzLO ( Int sz, HReg reg, AMD64AMode* addr )
{
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseLdzLO;
i->Ain.SseLdzLO.sz = sz;
i->Ain.SseLdzLO.reg = reg;
@@ -940,7 +940,7 @@
return i;
}
AMD64Instr* AMD64Instr_Sse32Fx4 ( AMD64SseOp op, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Sse32Fx4;
i->Ain.Sse32Fx4.op = op;
i->Ain.Sse32Fx4.src = src;
@@ -949,7 +949,7 @@
return i;
}
AMD64Instr* AMD64Instr_Sse32FLo ( AMD64SseOp op, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Sse32FLo;
i->Ain.Sse32FLo.op = op;
i->Ain.Sse32FLo.src = src;
@@ -958,7 +958,7 @@
return i;
}
AMD64Instr* AMD64Instr_Sse64Fx2 ( AMD64SseOp op, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Sse64Fx2;
i->Ain.Sse64Fx2.op = op;
i->Ain.Sse64Fx2.src = src;
@@ -967,7 +967,7 @@
return i;
}
AMD64Instr* AMD64Instr_Sse64FLo ( AMD64SseOp op, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_Sse64FLo;
i->Ain.Sse64FLo.op = op;
i->Ain.Sse64FLo.src = src;
@@ -976,7 +976,7 @@
return i;
}
AMD64Instr* AMD64Instr_SseReRg ( AMD64SseOp op, HReg re, HReg rg ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseReRg;
i->Ain.SseReRg.op = op;
i->Ain.SseReRg.src = re;
@@ -984,7 +984,7 @@
return i;
}
AMD64Instr* AMD64Instr_SseCMov ( AMD64CondCode cond, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseCMov;
i->Ain.SseCMov.cond = cond;
i->Ain.SseCMov.src = src;
@@ -993,7 +993,7 @@
return i;
}
AMD64Instr* AMD64Instr_SseShuf ( Int order, HReg src, HReg dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_SseShuf;
i->Ain.SseShuf.order = order;
i->Ain.SseShuf.src = src;
@@ -1003,7 +1003,7 @@
}
//uu AMD64Instr* AMD64Instr_AvxLdSt ( Bool isLoad,
//uu HReg reg, AMD64AMode* addr ) {
-//uu AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+//uu AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
//uu i->tag = Ain_AvxLdSt;
//uu i->Ain.AvxLdSt.isLoad = isLoad;
//uu i->Ain.AvxLdSt.reg = reg;
@@ -1011,7 +1011,7 @@
//uu return i;
//uu }
//uu AMD64Instr* AMD64Instr_AvxReRg ( AMD64SseOp op, HReg re, HReg rg ) {
-//uu AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+//uu AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
//uu i->tag = Ain_AvxReRg;
//uu i->Ain.AvxReRg.op = op;
//uu i->Ain.AvxReRg.src = re;
@@ -1020,14 +1020,14 @@
//uu }
AMD64Instr* AMD64Instr_EvCheck ( AMD64AMode* amCounter,
AMD64AMode* amFailAddr ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_EvCheck;
i->Ain.EvCheck.amCounter = amCounter;
i->Ain.EvCheck.amFailAddr = amFailAddr;
return i;
}
AMD64Instr* AMD64Instr_ProfInc ( void ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
i->tag = Ain_ProfInc;
return i;
}
Modified: trunk/priv/host_amd64_isel.c
==============================================================================
--- trunk/priv/host_amd64_isel.c (original)
+++ trunk/priv/host_amd64_isel.c Fri Mar 13 12:46:49 2015
@@ -4923,7 +4923,7 @@
vassert(archinfo_host->endness == VexEndnessLE);
/* Make up an initial environment to use. */
- env = LibVEX_Alloc(sizeof(ISelEnv));
+ env = LibVEX_Alloc_inline(sizeof(ISelEnv));
env->vreg_ctr = 0;
/* Set up output code array. */
@@ -4935,8 +4935,8 @@
/* Make up an IRTemp -> virtual HReg mapping. This doesn't
change as we go along. */
env->n_vregmap = bb->tyenv->types_used;
- env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
- env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+ env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+ env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
/* and finally ... */
env->chainingAllowed = chainingAllowed;
Modified: trunk/priv/host_arm64_defs.c
==============================================================================
--- trunk/priv/host_arm64_defs.c (original)
+++ trunk/priv/host_arm64_defs.c Fri Mar 13 12:46:49 2015
@@ -124,7 +124,7 @@
{
Int i = 0;
*nregs = 26;
- *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+ *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg));
// callee saves ones (22 to 28) are listed first, since we prefer
// them if they're available
@@ -222,7 +222,7 @@
/* --------- Memory address expressions (amodes). --------- */
ARM64AMode* ARM64AMode_RI9 ( HReg reg, Int simm9 ) {
- ARM64AMode* am = LibVEX_Alloc(sizeof(ARM64AMode));
+ ARM64AMode* am = LibVEX_Alloc_inline(sizeof(ARM64AMode));
am->tag = ARM64am_RI9;
am->ARM64am.RI9.reg = reg;
am->ARM64am.RI9.simm9 = simm9;
@@ -231,7 +231,7 @@
}
ARM64AMode* ARM64AMode_RI12 ( HReg reg, Int uimm12, UChar szB ) {
- ARM64AMode* am = LibVEX_Alloc(sizeof(ARM64AMode));
+ ARM64AMode* am = LibVEX_Alloc_inline(sizeof(ARM64AMode));
am->tag = ARM64am_RI12;
am->ARM64am.RI12.reg = reg;
am->ARM64am.RI12.uimm12 = uimm12;
@@ -245,7 +245,7 @@
}
ARM64AMode* ARM64AMode_RR ( HReg base, HReg index ) {
- ARM64AMode* am = LibVEX_Alloc(sizeof(ARM64AMode));
+ ARM64AMode* am = LibVEX_Alloc_inline(sizeof(ARM64AMode));
am->tag = ARM64am_RR;
am->ARM64am.RR.base = base;
am->ARM64am.RR.index = index;
@@ -315,7 +315,7 @@
/* --------- Reg or uimm12<<{0,12} operands --------- */
ARM64RIA* ARM64RIA_I12 ( UShort imm12, UChar shift ) {
- ARM64RIA* riA = LibVEX_Alloc(sizeof(ARM64RIA));
+ ARM64RIA* riA = LibVEX_Alloc_inline(sizeof(ARM64RIA));
riA->tag = ARM64riA_I12;
riA->ARM64riA.I12.imm12 = imm12;
riA->ARM64riA.I12.shift = shift;
@@ -324,7 +324,7 @@
return riA;
}
ARM64RIA* ARM64RIA_R ( HReg reg ) {
- ARM64RIA* riA = LibVEX_Alloc(sizeof(ARM64RIA));
+ ARM64RIA* riA = LibVEX_Alloc_inline(sizeof(ARM64RIA));
riA->tag = ARM64riA_R;
riA->ARM64riA.R.reg = reg;
return riA;
@@ -372,7 +372,7 @@
/* --------- Reg or "bitfield" (logic immediate) operands --------- */
ARM64RIL* ARM64RIL_I13 ( UChar bitN, UChar immR, UChar immS ) {
- ARM64RIL* riL = LibVEX_Alloc(sizeof(ARM64RIL));
+ ARM64RIL* riL = LibVEX_Alloc_inline(sizeof(ARM64RIL));
riL->tag = ARM64riL_I13;
riL->ARM64riL.I13.bitN = bitN;
riL->ARM64riL.I13.immR = immR;
@@ -383,7 +383,7 @@
return riL;
}
ARM64RIL* ARM64RIL_R ( HReg reg ) {
- ARM64RIL* riL = LibVEX_Alloc(sizeof(ARM64RIL));
+ ARM64RIL* riL = LibVEX_Alloc_inline(sizeof(ARM64RIL));
riL->tag = ARM64riL_R;
riL->ARM64riL.R.reg = reg;
return riL;
@@ -433,14 +433,14 @@
/* --------------- Reg or uimm6 operands --------------- */
ARM64RI6* ARM64RI6_I6 ( UInt imm6 ) {
- ARM64RI6* ri6 = LibVEX_Alloc(sizeof(ARM64RI6));
+ ARM64RI6* ri6 = LibVEX_Alloc_inline(sizeof(ARM64RI6));
ri6->tag = ARM64ri6_I6;
ri6->ARM64ri6.I6.imm6 = imm6;
vassert(imm6 > 0 && imm6 < 64);
return ri6;
}
ARM64RI6* ARM64RI6_R ( HReg reg ) {
- ARM64RI6* ri6 = LibVEX_Alloc(sizeof(ARM64RI6));
+ ARM64RI6* ri6 = LibVEX_Alloc_inline(sizeof(ARM64RI6));
ri6->tag = ARM64ri6_R;
ri6->ARM64ri6.R.reg = reg;
return ri6;
@@ -828,7 +828,7 @@
ARM64Instr* ARM64Instr_Arith ( HReg dst,
HReg argL, ARM64RIA* argR, Bool isAdd ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Arith;
i->ARM64in.Arith.dst = dst;
i->ARM64in.Arith.argL = argL;
@@ -837,7 +837,7 @@
return i;
}
ARM64Instr* ARM64Instr_Cmp ( HReg argL, ARM64RIA* argR, Bool is64 ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Cmp;
i->ARM64in.Cmp.argL = argL;
i->ARM64in.Cmp.argR = argR;
@@ -846,7 +846,7 @@
}
ARM64Instr* ARM64Instr_Logic ( HReg dst,
HReg argL, ARM64RIL* argR, ARM64LogicOp op ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Logic;
i->ARM64in.Logic.dst = dst;
i->ARM64in.Logic.argL = argL;
@@ -855,7 +855,7 @@
return i;
}
ARM64Instr* ARM64Instr_Test ( HReg argL, ARM64RIL* argR ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Test;
i->ARM64in.Test.argL = argL;
i->ARM64in.Test.argR = argR;
@@ -863,7 +863,7 @@
}
ARM64Instr* ARM64Instr_Shift ( HReg dst,
HReg argL, ARM64RI6* argR, ARM64ShiftOp op ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Shift;
i->ARM64in.Shift.dst = dst;
i->ARM64in.Shift.argL = argL;
@@ -872,7 +872,7 @@
return i;
}
ARM64Instr* ARM64Instr_Unary ( HReg dst, HReg src, ARM64UnaryOp op ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Unary;
i->ARM64in.Unary.dst = dst;
i->ARM64in.Unary.src = src;
@@ -880,7 +880,7 @@
return i;
}
ARM64Instr* ARM64Instr_MovI ( HReg dst, HReg src ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_MovI;
i->ARM64in.MovI.dst = dst;
i->ARM64in.MovI.src = src;
@@ -889,14 +889,14 @@
return i;
}
ARM64Instr* ARM64Instr_Imm64 ( HReg dst, ULong imm64 ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Imm64;
i->ARM64in.Imm64.dst = dst;
i->ARM64in.Imm64.imm64 = imm64;
return i;
}
ARM64Instr* ARM64Instr_LdSt64 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_LdSt64;
i->ARM64in.LdSt64.isLoad = isLoad;
i->ARM64in.LdSt64.rD = rD;
@@ -904,7 +904,7 @@
return i;
}
ARM64Instr* ARM64Instr_LdSt32 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_LdSt32;
i->ARM64in.LdSt32.isLoad = isLoad;
i->ARM64in.LdSt32.rD = rD;
@@ -912,7 +912,7 @@
return i;
}
ARM64Instr* ARM64Instr_LdSt16 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_LdSt16;
i->ARM64in.LdSt16.isLoad = isLoad;
i->ARM64in.LdSt16.rD = rD;
@@ -920,7 +920,7 @@
return i;
}
ARM64Instr* ARM64Instr_LdSt8 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_LdSt8;
i->ARM64in.LdSt8.isLoad = isLoad;
i->ARM64in.LdSt8.rD = rD;
@@ -929,7 +929,7 @@
}
ARM64Instr* ARM64Instr_XDirect ( Addr64 dstGA, ARM64AMode* amPC,
ARM64CondCode cond, Bool toFastEP ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_XDirect;
i->ARM64in.XDirect.dstGA = dstGA;
i->ARM64in.XDirect.amPC = amPC;
@@ -939,7 +939,7 @@
}
ARM64Instr* ARM64Instr_XIndir ( HReg dstGA, ARM64AMode* amPC,
ARM64CondCode cond ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_XIndir;
i->ARM64in.XIndir.dstGA = dstGA;
i->ARM64in.XIndir.amPC = amPC;
@@ -948,7 +948,7 @@
}
ARM64Instr* ARM64Instr_XAssisted ( HReg dstGA, ARM64AMode* amPC,
ARM64CondCode cond, IRJumpKind jk ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_XAssisted;
i->ARM64in.XAssisted.dstGA = dstGA;
i->ARM64in.XAssisted.amPC = amPC;
@@ -958,7 +958,7 @@
}
ARM64Instr* ARM64Instr_CSel ( HReg dst, HReg argL, HReg argR,
ARM64CondCode cond ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_CSel;
i->ARM64in.CSel.dst = dst;
i->ARM64in.CSel.argL = argL;
@@ -968,7 +968,7 @@
}
ARM64Instr* ARM64Instr_Call ( ARM64CondCode cond, Addr64 target, Int nArgRegs,
RetLoc rloc ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Call;
i->ARM64in.Call.cond = cond;
i->ARM64in.Call.target = target;
@@ -978,7 +978,7 @@
return i;
}
extern ARM64Instr* ARM64Instr_AddToSP ( Int simm ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_AddToSP;
i->ARM64in.AddToSP.simm = simm;
vassert(-4096 < simm && simm < 4096);
@@ -986,14 +986,14 @@
return i;
}
extern ARM64Instr* ARM64Instr_FromSP ( HReg dst ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_FromSP;
i->ARM64in.FromSP.dst = dst;
return i;
}
ARM64Instr* ARM64Instr_Mul ( HReg dst, HReg argL, HReg argR,
ARM64MulOp op ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_Mul;
i->ARM64in.Mul.dst = dst;
i->ARM64in.Mul.argL = argL;
@@ -1002,26 +1002,26 @@
return i;
}
ARM64Instr* ARM64Instr_LdrEX ( Int szB ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_LdrEX;
i->ARM64in.LdrEX.szB = szB;
vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
return i;
}
ARM64Instr* ARM64Instr_StrEX ( Int szB ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_StrEX;
i->ARM64in.StrEX.szB = szB;
vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
return i;
}
ARM64Instr* ARM64Instr_MFence ( void ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_MFence;
return i;
}
ARM64Instr* ARM64Instr_VLdStS ( Bool isLoad, HReg sD, HReg rN, UInt uimm12 ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VLdStS;
i->ARM64in.VLdStS.isLoad = isLoad;
i->ARM64in.VLdStS.sD = sD;
@@ -1031,7 +1031,7 @@
return i;
}
ARM64Instr* ARM64Instr_VLdStD ( Bool isLoad, HReg dD, HReg rN, UInt uimm12 ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VLdStD;
i->ARM64in.VLdStD.isLoad = isLoad;
i->ARM64in.VLdStD.dD = dD;
@@ -1041,7 +1041,7 @@
return i;
}
ARM64Instr* ARM64Instr_VLdStQ ( Bool isLoad, HReg rQ, HReg rN ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VLdStQ;
i->ARM64in.VLdStQ.isLoad = isLoad;
i->ARM64in.VLdStQ.rQ = rQ;
@@ -1049,7 +1049,7 @@
return i;
}
ARM64Instr* ARM64Instr_VCvtI2F ( ARM64CvtOp how, HReg rD, HReg rS ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VCvtI2F;
i->ARM64in.VCvtI2F.how = how;
i->ARM64in.VCvtI2F.rD = rD;
@@ -1058,7 +1058,7 @@
}
ARM64Instr* ARM64Instr_VCvtF2I ( ARM64CvtOp how, HReg rD, HReg rS,
UChar armRM ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VCvtF2I;
i->ARM64in.VCvtF2I.how = how;
i->ARM64in.VCvtF2I.rD = rD;
@@ -1068,7 +1068,7 @@
return i;
}
ARM64Instr* ARM64Instr_VCvtSD ( Bool sToD, HReg dst, HReg src ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VCvtSD;
i->ARM64in.VCvtSD.sToD = sToD;
i->ARM64in.VCvtSD.dst = dst;
@@ -1076,7 +1076,7 @@
return i;
}
ARM64Instr* ARM64Instr_VUnaryD ( ARM64FpUnaryOp op, HReg dst, HReg src ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VUnaryD;
i->ARM64in.VUnaryD.op = op;
i->ARM64in.VUnaryD.dst = dst;
@@ -1084,7 +1084,7 @@
return i;
}
ARM64Instr* ARM64Instr_VUnaryS ( ARM64FpUnaryOp op, HReg dst, HReg src ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VUnaryS;
i->ARM64in.VUnaryS.op = op;
i->ARM64in.VUnaryS.dst = dst;
@@ -1093,7 +1093,7 @@
}
ARM64Instr* ARM64Instr_VBinD ( ARM64FpBinOp op,
HReg dst, HReg argL, HReg argR ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VBinD;
i->ARM64in.VBinD.op = op;
i->ARM64in.VBinD.dst = dst;
@@ -1103,7 +1103,7 @@
}
ARM64Instr* ARM64Instr_VBinS ( ARM64FpBinOp op,
HReg dst, HReg argL, HReg argR ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VBinS;
i->ARM64in.VBinS.op = op;
i->ARM64in.VBinS.dst = dst;
@@ -1112,14 +1112,14 @@
return i;
}
ARM64Instr* ARM64Instr_VCmpD ( HReg argL, HReg argR ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VCmpD;
i->ARM64in.VCmpD.argL = argL;
i->ARM64in.VCmpD.argR = argR;
return i;
}
ARM64Instr* ARM64Instr_VCmpS ( HReg argL, HReg argR ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VCmpS;
i->ARM64in.VCmpS.argL = argL;
i->ARM64in.VCmpS.argR = argR;
@@ -1127,7 +1127,7 @@
}
ARM64Instr* ARM64Instr_VFCSel ( HReg dst, HReg argL, HReg argR,
ARM64CondCode cond, Bool isD ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VFCSel;
i->ARM64in.VFCSel.dst = dst;
i->ARM64in.VFCSel.argL = argL;
@@ -1137,14 +1137,14 @@
return i;
}
ARM64Instr* ARM64Instr_FPCR ( Bool toFPCR, HReg iReg ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_FPCR;
i->ARM64in.FPCR.toFPCR = toFPCR;
i->ARM64in.FPCR.iReg = iReg;
return i;
}
ARM64Instr* ARM64Instr_FPSR ( Bool toFPSR, HReg iReg ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_FPSR;
i->ARM64in.FPSR.toFPSR = toFPSR;
i->ARM64in.FPSR.iReg = iReg;
@@ -1152,7 +1152,7 @@
}
ARM64Instr* ARM64Instr_VBinV ( ARM64VecBinOp op,
HReg dst, HReg argL, HReg argR ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VBinV;
i->ARM64in.VBinV.op = op;
i->ARM64in.VBinV.dst = dst;
@@ -1161,7 +1161,7 @@
return i;
}
ARM64Instr* ARM64Instr_VModifyV ( ARM64VecModifyOp op, HReg mod, HReg arg ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VModifyV;
i->ARM64in.VModifyV.op = op;
i->ARM64in.VModifyV.mod = mod;
@@ -1169,7 +1169,7 @@
return i;
}
ARM64Instr* ARM64Instr_VUnaryV ( ARM64VecUnaryOp op, HReg dst, HReg arg ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VUnaryV;
i->ARM64in.VUnaryV.op = op;
i->ARM64in.VUnaryV.dst = dst;
@@ -1178,7 +1178,7 @@
}
ARM64Instr* ARM64Instr_VNarrowV ( ARM64VecNarrowOp op,
UInt dszBlg2, HReg dst, HReg src ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VNarrowV;
i->ARM64in.VNarrowV.op = op;
i->ARM64in.VNarrowV.dszBlg2 = dszBlg2;
@@ -1189,7 +1189,7 @@
}
ARM64Instr* ARM64Instr_VShiftImmV ( ARM64VecShiftImmOp op,
HReg dst, HReg src, UInt amt ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VShiftImmV;
i->ARM64in.VShiftImmV.op = op;
i->ARM64in.VShiftImmV.dst = dst;
@@ -1245,7 +1245,7 @@
return i;
}
ARM64Instr* ARM64Instr_VExtV ( HReg dst, HReg srcLo, HReg srcHi, UInt amtB ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VExtV;
i->ARM64in.VExtV.dst = dst;
i->ARM64in.VExtV.srcLo = srcLo;
@@ -1255,7 +1255,7 @@
return i;
}
ARM64Instr* ARM64Instr_VImmQ (HReg rQ, UShort imm) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VImmQ;
i->ARM64in.VImmQ.rQ = rQ;
i->ARM64in.VImmQ.imm = imm;
@@ -1270,21 +1270,21 @@
return i;
}
ARM64Instr* ARM64Instr_VDfromX ( HReg rD, HReg rX ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VDfromX;
i->ARM64in.VDfromX.rD = rD;
i->ARM64in.VDfromX.rX = rX;
return i;
}
ARM64Instr* ARM64Instr_VQfromX ( HReg rQ, HReg rXlo ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VQfromX;
i->ARM64in.VQfromX.rQ = rQ;
i->ARM64in.VQfromX.rXlo = rXlo;
return i;
}
ARM64Instr* ARM64Instr_VQfromXX ( HReg rQ, HReg rXhi, HReg rXlo ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VQfromXX;
i->ARM64in.VQfromXX.rQ = rQ;
i->ARM64in.VQfromXX.rXhi = rXhi;
@@ -1292,7 +1292,7 @@
return i;
}
ARM64Instr* ARM64Instr_VXfromQ ( HReg rX, HReg rQ, UInt laneNo ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VXfromQ;
i->ARM64in.VXfromQ.rX = rX;
i->ARM64in.VXfromQ.rQ = rQ;
@@ -1301,7 +1301,7 @@
return i;
}
ARM64Instr* ARM64Instr_VXfromDorS ( HReg rX, HReg rDorS, Bool fromD ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VXfromDorS;
i->ARM64in.VXfromDorS.rX = rX;
i->ARM64in.VXfromDorS.rDorS = rDorS;
@@ -1309,7 +1309,7 @@
return i;
}
ARM64Instr* ARM64Instr_VMov ( UInt szB, HReg dst, HReg src ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_VMov;
i->ARM64in.VMov.szB = szB;
i->ARM64in.VMov.dst = dst;
@@ -1330,14 +1330,14 @@
}
ARM64Instr* ARM64Instr_EvCheck ( ARM64AMode* amCounter,
ARM64AMode* amFailAddr ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_EvCheck;
i->ARM64in.EvCheck.amCounter = amCounter;
i->ARM64in.EvCheck.amFailAddr = amFailAddr;
return i;
}
ARM64Instr* ARM64Instr_ProfInc ( void ) {
- ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
+ ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
i->tag = ARM64in_ProfInc;
return i;
}
Modified: trunk/priv/host_arm64_isel.c
==============================================================================
--- trunk/priv/host_arm64_isel.c (original)
+++ trunk/priv/host_arm64_isel.c Fri Mar 13 12:46:49 2015
@@ -3930,7 +3930,7 @@
vassert(sizeof(ARM64Instr) <= 32);
/* Make up an initial environment to use. */
- env = LibVEX_Alloc(sizeof(ISelEnv));
+ env = LibVEX_Alloc_inline(sizeof(ISelEnv));
env->vreg_ctr = 0;
/* Set up output code array. */
@@ -3942,8 +3942,8 @@
/* Make up an IRTemp -> virtual HReg mapping. This doesn't
change as we go along. */
env->n_vregmap = bb->tyenv->types_used;
- env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
- env->vregmapHI = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+ env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+ env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
/* and finally ... */
env->chainingAllowed = chainingAllowed;
Modified: trunk/priv/host_arm_defs.c
==============================================================================
--- trunk/priv/host_arm_defs.c (original)
+++ trunk/priv/host_arm_defs.c Fri Mar 13 12:46:49 2015
@@ -123,7 +123,7 @@
{
Int i = 0;
*nregs = 26;
- *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
+ *arr = LibVEX_Alloc_inline(*nregs * sizeof(HReg));
// callee saves ones are listed first, since we prefer them
// if they're available
(*arr)[i++] = hregARM_R4();
@@ -217,7 +217,7 @@
/* --------- Mem AModes: Addressing Mode 1 --------- */
ARMAMode1* ARMAMode1_RI ( HReg reg, Int simm13 ) {
- ARMAMode1* am = LibVEX_Alloc(sizeof(ARMAMode1));
+ ARMAMode1* am = LibVEX_Alloc_inline(sizeof(ARMAMode1));
am->tag = ARMam1_RI;
am->ARMam1.RI.reg = reg;
am->ARMam1.RI.simm13 = simm13;
@@ -225,7 +225,7 @@
return am;
}
ARMAMode1* ARMAMode1_RRS ( HReg base, HReg index, UInt shift ) {
- ARMAMode1* am = LibVEX_Alloc(sizeof(ARMAMode1));
+ ARMAMode1* am = LibVEX_Alloc_inline(sizeof(ARMAMode1));
am->tag = ARMam1_RRS;
am->ARMam1.RRS.base = base;
am->ARMam1.RRS.index = index;
@@ -285,7 +285,7 @@
/* --------- Mem AModes: Addressing Mode 2 --------- */
ARMAMode2* ARMAMode2_RI ( HReg reg, Int simm9 ) {
- ARMAMode2* am = LibVEX_Alloc(sizeof(ARMAMode2));
+ ARMAMode2* am = LibVEX_Alloc_inline(sizeof(ARMAMode2));
am->tag = ARMam2_RI;
am->ARMam2.RI.reg = reg;
am->ARMam2.RI.simm9 = simm9;
@@ -293,7 +293,7 @@
return am;
}
ARMAMode2* ARMAMode2_RR ( HReg base, HReg index ) {
- ARMAMode2* am = LibVEX_Alloc(sizeof(ARMAMode2));
+ ARMAMode2* am = LibVEX_Alloc_inline(sizeof(ARMAMode2));
am->tag = ARMam2_RR;
am->ARMam2.RR.base = base;
am->ARMam2.RR.index = index;
@@ -351,7 +351,7 @@
/* --------- Mem AModes: Addressing Mode VFP --------- */
ARMAModeV* mkARMAModeV ( HReg reg, Int simm11 ) {
- ARMAModeV* am = LibVEX_Alloc(sizeof(ARMAModeV));
+ ARMAModeV* am = LibVEX_Alloc_inline(sizeof(ARMAModeV));
vassert(simm11 >= -1020 && simm11 <= 1020);
vassert(0 == (simm11 & 3));
am->reg = reg;
@@ -377,7 +377,7 @@
/* --------- Mem AModes: Addressing Mode Neon ------- */
ARMAModeN *mkARMAModeN_RR ( HReg rN, HReg rM ) {
- ARMAModeN* am = LibVEX_Alloc(sizeof(ARMAModeN));
+ ARMAModeN* am = LibVEX_Alloc_inline(sizeof(ARMAModeN));
am->tag = ARMamN_RR;
am->ARMamN.RR.rN = rN;
am->ARMamN.RR.rM = rM;
@@ -385,7 +385,7 @@
}
ARMAModeN *mkARMAModeN_R ( HReg rN ) {
- ARMAModeN* am = LibVEX_Alloc(sizeof(ARMAModeN));
+ ARMAModeN* am = LibVEX_Alloc_inline(sizeof(ARMAModeN));
am->tag = ARMamN_R;
am->ARMamN.R.rN = rN;
return am;
@@ -435,7 +435,7 @@
}
ARMRI84* ARMRI84_I84 ( UShort imm8, UShort imm4 ) {
- ARMRI84* ri84 = LibVEX_Alloc(sizeof(ARMRI84));
+ ARMRI84* ri84 = LibVEX_Alloc_inline(sizeof(ARMRI84));
ri84->tag = ARMri84_I84;
ri84->ARMri84.I84.imm8 = imm8;
ri84->ARMri84.I84.imm4 = imm4;
@@ -444,7 +444,7 @@
return ri84;
}
ARMRI84* ARMRI84_R ( HReg reg ) {
- ARMRI84* ri84 = LibVEX_Alloc(sizeof(ARMRI84));
+ ARMRI84* ri84 = LibVEX_Alloc_inline(sizeof(ARMRI84));
ri84->tag = ARMri84_R;
ri84->ARMri84.R.reg = reg;
return ri84;
@@ -492,14 +492,14 @@
/* --------- Reg or imm5 operands --------- */
ARMRI5* ARMRI5_I5 ( UInt imm5 ) {
- ARMRI5* ri5 = LibVEX_Alloc(sizeof(ARMRI5));
+ ARMRI5* ri5 = LibVEX_Alloc_inline(sizeof(ARMRI5));
ri5->tag = ARMri5_I5;
ri5->ARMri5.I5.imm5 = imm5;
vassert(imm5 > 0 && imm5 <= 31); // zero is not allowed
return ri5;
}
ARMRI5* ARMRI5_R ( HReg reg ) {
- ARMRI5* ri5 = LibVEX_Alloc(sizeof(ARMRI5));
+ ARMRI5* ri5 = LibVEX_Alloc_inline(sizeof(ARMRI5));
ri5->tag = ARMri5_R;
ri5->ARMri5.R.reg = reg;
return ri5;
@@ -545,7 +545,7 @@
/* -------- Neon Immediate operatnd --------- */
ARMNImm* ARMNImm_TI ( UInt type, UInt imm8 ) {
- ARMNImm* i = LibVEX_Alloc(sizeof(ARMNImm));
+ ARMNImm* i = LibVEX_Alloc_inline(sizeof(ARMNImm));
i->type = type;
i->imm8 = imm8;
return i;
@@ -659,7 +659,7 @@
ARMNRS* mkARMNRS(ARMNRS_tag tag, HReg reg, UInt index)
{
- ARMNRS *p = LibVEX_Alloc(sizeof(ARMNRS));
+ ARMNRS *p = LibVEX_Alloc_inline(sizeof(ARMNRS));
p->tag = tag;
p->reg = reg;
p->index = index;
@@ -1099,7 +1099,7 @@
ARMInstr* ARMInstr_Alu ( ARMAluOp op,
HReg dst, HReg argL, ARMRI84* argR ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Alu;
i->ARMin.Alu.op = op;
i->ARMin.Alu.dst = dst;
@@ -1109,7 +1109,7 @@
}
ARMInstr* ARMInstr_Shift ( ARMShiftOp op,
HReg dst, HReg argL, ARMRI5* argR ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Shift;
i->ARMin.Shift.op = op;
i->ARMin.Shift.dst = dst;
@@ -1118,7 +1118,7 @@
return i;
}
ARMInstr* ARMInstr_Unary ( ARMUnaryOp op, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Unary;
i->ARMin.Unary.op = op;
i->ARMin.Unary.dst = dst;
@@ -1126,7 +1126,7 @@
return i;
}
ARMInstr* ARMInstr_CmpOrTst ( Bool isCmp, HReg argL, ARMRI84* argR ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_CmpOrTst;
i->ARMin.CmpOrTst.isCmp = isCmp;
i->ARMin.CmpOrTst.argL = argL;
@@ -1134,14 +1134,14 @@
return i;
}
ARMInstr* ARMInstr_Mov ( HReg dst, ARMRI84* src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Mov;
i->ARMin.Mov.dst = dst;
i->ARMin.Mov.src = src;
return i;
}
ARMInstr* ARMInstr_Imm32 ( HReg dst, UInt imm32 ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Imm32;
i->ARMin.Imm32.dst = dst;
i->ARMin.Imm32.imm32 = imm32;
@@ -1149,7 +1149,7 @@
}
ARMInstr* ARMInstr_LdSt32 ( ARMCondCode cc,
Bool isLoad, HReg rD, ARMAMode1* amode ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_LdSt32;
i->ARMin.LdSt32.cc = cc;
i->ARMin.LdSt32.isLoad = isLoad;
@@ -1161,7 +1161,7 @@
ARMInstr* ARMInstr_LdSt16 ( ARMCondCode cc,
Bool isLoad, Bool signedLoad,
HReg rD, ARMAMode2* amode ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_LdSt16;
i->ARMin.LdSt16.cc = cc;
i->ARMin.LdSt16.isLoad = isLoad;
@@ -1173,7 +1173,7 @@
}
ARMInstr* ARMInstr_LdSt8U ( ARMCondCode cc,
Bool isLoad, HReg rD, ARMAMode1* amode ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_LdSt8U;
i->ARMin.LdSt8U.cc = cc;
i->ARMin.LdSt8U.isLoad = isLoad;
@@ -1183,7 +1183,7 @@
return i;
}
ARMInstr* ARMInstr_Ld8S ( ARMCondCode cc, HReg rD, ARMAMode2* amode ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Ld8S;
i->ARMin.Ld8S.cc = cc;
i->ARMin.Ld8S.rD = rD;
@@ -1193,7 +1193,7 @@
}
ARMInstr* ARMInstr_XDirect ( Addr32 dstGA, ARMAMode1* amR15T,
ARMCondCode cond, Bool toFastEP ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_XDirect;
i->ARMin.XDirect.dstGA = dstGA;
i->ARMin.XDirect.amR15T = amR15T;
@@ -1203,7 +1203,7 @@
}
ARMInstr* ARMInstr_XIndir ( HReg dstGA, ARMAMode1* amR15T,
ARMCondCode cond ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_XIndir;
i->ARMin.XIndir.dstGA = dstGA;
i->ARMin.XIndir.amR15T = amR15T;
@@ -1212,7 +1212,7 @@
}
ARMInstr* ARMInstr_XAssisted ( HReg dstGA, ARMAMode1* amR15T,
ARMCondCode cond, IRJumpKind jk ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_XAssisted;
i->ARMin.XAssisted.dstGA = dstGA;
i->ARMin.XAssisted.amR15T = amR15T;
@@ -1221,7 +1221,7 @@
return i;
}
ARMInstr* ARMInstr_CMov ( ARMCondCode cond, HReg dst, ARMRI84* src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_CMov;
i->ARMin.CMov.cond = cond;
i->ARMin.CMov.dst = dst;
@@ -1231,7 +1231,7 @@
}
ARMInstr* ARMInstr_Call ( ARMCondCode cond, Addr32 target, Int nArgRegs,
RetLoc rloc ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Call;
i->ARMin.Call.cond = cond;
i->ARMin.Call.target = target;
@@ -1241,27 +1241,27 @@
return i;
}
ARMInstr* ARMInstr_Mul ( ARMMulOp op ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_Mul;
i->ARMin.Mul.op = op;
return i;
}
ARMInstr* ARMInstr_LdrEX ( Int szB ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_LdrEX;
i->ARMin.LdrEX.szB = szB;
vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
return i;
}
ARMInstr* ARMInstr_StrEX ( Int szB ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_StrEX;
i->ARMin.StrEX.szB = szB;
vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
return i;
}
ARMInstr* ARMInstr_VLdStD ( Bool isLoad, HReg dD, ARMAModeV* am ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VLdStD;
i->ARMin.VLdStD.isLoad = isLoad;
i->ARMin.VLdStD.dD = dD;
@@ -1269,7 +1269,7 @@
return i;
}
ARMInstr* ARMInstr_VLdStS ( Bool isLoad, HReg fD, ARMAModeV* am ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VLdStS;
i->ARMin.VLdStS.isLoad = isLoad;
i->ARMin.VLdStS.fD = fD;
@@ -1277,7 +1277,7 @@
return i;
}
ARMInstr* ARMInstr_VAluD ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VAluD;
i->ARMin.VAluD.op = op;
i->ARMin.VAluD.dst = dst;
@@ -1286,7 +1286,7 @@
return i;
}
ARMInstr* ARMInstr_VAluS ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VAluS;
i->ARMin.VAluS.op = op;
i->ARMin.VAluS.dst = dst;
@@ -1295,7 +1295,7 @@
return i;
}
ARMInstr* ARMInstr_VUnaryD ( ARMVfpUnaryOp op, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VUnaryD;
i->ARMin.VUnaryD.op = op;
i->ARMin.VUnaryD.dst = dst;
@@ -1303,7 +1303,7 @@
return i;
}
ARMInstr* ARMInstr_VUnaryS ( ARMVfpUnaryOp op, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VUnaryS;
i->ARMin.VUnaryS.op = op;
i->ARMin.VUnaryS.dst = dst;
@@ -1311,14 +1311,14 @@
return i;
}
ARMInstr* ARMInstr_VCmpD ( HReg argL, HReg argR ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VCmpD;
i->ARMin.VCmpD.argL = argL;
i->ARMin.VCmpD.argR = argR;
return i;
}
ARMInstr* ARMInstr_VCMovD ( ARMCondCode cond, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VCMovD;
i->ARMin.VCMovD.cond = cond;
i->ARMin.VCMovD.dst = dst;
@@ -1327,7 +1327,7 @@
return i;
}
ARMInstr* ARMInstr_VCMovS ( ARMCondCode cond, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VCMovS;
i->ARMin.VCMovS.cond = cond;
i->ARMin.VCMovS.dst = dst;
@@ -1336,7 +1336,7 @@
return i;
}
ARMInstr* ARMInstr_VCvtSD ( Bool sToD, HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VCvtSD;
i->ARMin.VCvtSD.sToD = sToD;
i->ARMin.VCvtSD.dst = dst;
@@ -1344,7 +1344,7 @@
return i;
}
ARMInstr* ARMInstr_VXferD ( Bool toD, HReg dD, HReg rHi, HReg rLo ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VXferD;
i->ARMin.VXferD.toD = toD;
i->ARMin.VXferD.dD = dD;
@@ -1353,7 +1353,7 @@
return i;
}
ARMInstr* ARMInstr_VXferS ( Bool toS, HReg fD, HReg rLo ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VXferS;
i->ARMin.VXferS.toS = toS;
i->ARMin.VXferS.fD = fD;
@@ -1362,7 +1362,7 @@
}
ARMInstr* ARMInstr_VCvtID ( Bool iToD, Bool syned,
HReg dst, HReg src ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_VCvtID;
i->ARMin.VCvtID.iToD = iToD;
i->ARMin.VCvtID.syned = syned;
@@ -1371,25 +1371,25 @@
return i;
}
ARMInstr* ARMInstr_FPSCR ( Bool toFPSCR, HReg iReg ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_FPSCR;
i->ARMin.FPSCR.toFPSCR = toFPSCR;
i->ARMin.FPSCR.iReg = iReg;
return i;
}
ARMInstr* ARMInstr_MFence ( void ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_MFence;
return i;
}
ARMInstr* ARMInstr_CLREX( void ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_CLREX;
return i;
}
ARMInstr* ARMInstr_NLdStQ ( Bool isLoad, HReg dQ, ARMAModeN *amode ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NLdStQ;
i->ARMin.NLdStQ.isLoad = isLoad;
i->ARMin.NLdStQ.dQ = dQ;
@@ -1398,7 +1398,7 @@
}
ARMInstr* ARMInstr_NLdStD ( Bool isLoad, HReg dD, ARMAModeN *amode ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NLdStD;
i->ARMin.NLdStD.isLoad = isLoad;
i->ARMin.NLdStD.dD = dD;
@@ -1408,7 +1408,7 @@
ARMInstr* ARMInstr_NUnary ( ARMNeonUnOp op, HReg dQ, HReg nQ,
UInt size, Bool Q ) {
- ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
+ ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
i->tag = ARMin_NUnary;
i->ARMin.NUnary.op = op;
i->ARMin.NUnary.src = nQ;
@@ -1420,7 +142...
[truncated message content] |