|
From: <sv...@va...> - 2013-01-15 22:30:49
|
sewardj 2013-01-15 22:30:39 +0000 (Tue, 15 Jan 2013)
New Revision: 2636
Log:
Fix up the amd64 back end to be in sync with infrastructural changes
w.r.t. conditional dirty helpers that return values. Does not
actually handle such cases since the amd64 front end does not generat
them.
Modified files:
branches/COMEM/priv/host_amd64_defs.c
branches/COMEM/priv/host_amd64_defs.h
branches/COMEM/priv/host_amd64_isel.c
Modified: branches/COMEM/priv/host_amd64_isel.c (+45 -14)
===================================================================
--- branches/COMEM/priv/host_amd64_isel.c 2013-01-15 17:26:33 +00:00 (rev 2635)
+++ branches/COMEM/priv/host_amd64_isel.c 2013-01-15 22:30:39 +00:00 (rev 2636)
@@ -407,7 +407,8 @@
static
void doHelperCall ( ISelEnv* env,
Bool passBBP,
- IRExpr* guard, IRCallee* cee, IRExpr** args )
+ IRExpr* guard, IRCallee* cee, IRExpr** args,
+ RetLoc rloc )
{
AMD64CondCode cc;
HReg argregs[6];
@@ -587,7 +588,7 @@
addInstr(env, AMD64Instr_Call(
cc,
Ptr_to_ULong(cee->addr),
- n_args + (passBBP ? 1 : 0)
+ n_args + (passBBP ? 1 : 0), rloc
)
);
}
@@ -1126,7 +1127,7 @@
addInstr(env, AMD64Instr_MovxLQ(False, argR, argR));
addInstr(env, mk_iMOVsd_RR(argL, hregAMD64_RDI()) );
addInstr(env, mk_iMOVsd_RR(argR, hregAMD64_RSI()) );
- addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 2 ));
+ addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 2, RetLocInt ));
addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), dst));
return dst;
}
@@ -1595,7 +1596,8 @@
HReg arg = iselIntExpr_R(env, e->Iex.Unop.arg);
fn = (HWord)h_generic_calc_GetMSBs8x8;
addInstr(env, mk_iMOVsd_RR(arg, hregAMD64_RDI()) );
- addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 1 ));
+ addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn,
+ 1, RetLocInt ));
/* MovxLQ is not exactly the right thing here. We just
need to get the bottom 8 bits of RAX into dst, and zero
out everything else. Assuming that the helper returns
@@ -1629,7 +1631,7 @@
HReg dst = newVRegI(env);
HReg arg = iselIntExpr_R(env, e->Iex.Unop.arg);
addInstr(env, mk_iMOVsd_RR(arg, hregAMD64_RDI()) );
- addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 1 ));
+ addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 1, RetLocInt ));
addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), dst));
return dst;
}
@@ -1683,13 +1685,15 @@
HReg dst = newVRegI(env);
vassert(ty == e->Iex.CCall.retty);
- /* be very restrictive for now. Only 64-bit ints allowed
- for args, and 64 or 32 bits for return type. */
+ /* be very restrictive for now. Only 64-bit ints allowed for
+ args, and 64 or 32 bits for return type. Don't forget to
+ change the RetLoc if more types are allowed in future. */
if (e->Iex.CCall.retty != Ity_I64 && e->Iex.CCall.retty != Ity_I32)
goto irreducible;
/* Marshal args, do the call. */
- doHelperCall( env, False, NULL, e->Iex.CCall.cee, e->Iex.CCall.args );
+ doHelperCall( env, False, NULL, e->Iex.CCall.cee, e->Iex.CCall.args,
+ RetLocInt );
/* Move to dst, and zero out the top 32 bits if the result type is
Ity_I32. Probably overkill, but still .. */
@@ -2226,7 +2230,8 @@
vassert(cal->Iex.CCall.retty == Ity_I64); /* else ill-typed IR */
vassert(con->Iex.Const.con->tag == Ico_U64);
/* Marshal args, do the call. */
- doHelperCall( env, False, NULL, cal->Iex.CCall.cee, cal->Iex.CCall.args );
+ doHelperCall( env, False, NULL, cal->Iex.CCall.cee, cal->Iex.CCall.args,
+ RetLocInt );
addInstr(env, AMD64Instr_Imm64(con->Iex.Const.con->Ico.U64, tmp));
addInstr(env, AMD64Instr_Alu64R(Aalu_CMP,
AMD64RMI_Reg(hregAMD64_RAX()), tmp));
@@ -3321,7 +3326,8 @@
addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 16, argR,
AMD64AMode_IR(0, hregAMD64_RDX())));
/* call the helper */
- addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 3 ));
+ addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn,
+ 3, RetLocNone ));
/* fetch the result from memory, using %r_argp, which the
register allocator will keep alive across the call. */
addInstr(env, AMD64Instr_SseLdSt(True/*isLoad*/, 16, dst,
@@ -3369,7 +3375,8 @@
addInstr(env, mk_iMOVsd_RR(argR, hregAMD64_RDX()));
/* call the helper */
- addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 3 ));
+ addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn,
+ 3, RetLocNone ));
/* fetch the result from memory, using %r_argp, which the
register allocator will keep alive across the call. */
addInstr(env, AMD64Instr_SseLdSt(True/*isLoad*/, 16, dst,
@@ -3915,7 +3922,6 @@
/* --------- Call to DIRTY helper --------- */
case Ist_Dirty: {
- IRType retty;
IRDirty* d = stmt->Ist.Dirty.details;
Bool passBBP = False;
@@ -3924,15 +3930,37 @@
passBBP = toBool(d->nFxState > 0 && d->needsBBP);
+ /* Figure out the return type, if any. */
+ IRType retty = Ity_INVALID;
+ if (d->tmp != IRTemp_INVALID)
+ retty = typeOfIRTemp(env->type_env, d->tmp);
+
+ /* Marshal args, do the call, clear stack, set the return value
+ to 0x555..555 if this is a conditional call that returns a
+ value and the call is skipped. We need to set the ret-loc
+ correctly in order to implement the IRDirty semantics that
+ the return value is 0x555..555 if the call doesn't happen. */
+ RetLoc rloc = RetLocINVALID;
+ switch (retty) {
+ case Ity_INVALID: /* function doesn't return anything */
+ rloc = RetLocNone; break;
+ case Ity_I64:
+ case Ity_I32: case Ity_I16: case Ity_I8:
+ rloc = RetLocInt; break;
+ default:
+ break;
+ }
+ if (rloc == RetLocINVALID)
+ break; /* will go to stmt_fail: */
+
/* Marshal args, do the call, clear stack. */
- doHelperCall( env, passBBP, d->guard, d->cee, d->args );
+ doHelperCall( env, passBBP, d->guard, d->cee, d->args, rloc );
/* Now figure out what to do with the returned value, if any. */
if (d->tmp == IRTemp_INVALID)
/* No return value. Nothing to do. */
return;
- retty = typeOfIRTemp(env->type_env, d->tmp);
if (retty == Ity_I64 || retty == Ity_I32
|| retty == Ity_I16 || retty == Ity_I8) {
/* The returned value is in %rax. Park it in the register
@@ -3940,7 +3968,10 @@
HReg dst = lookupIRTemp(env, d->tmp);
addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(),dst) );
return;
+ } else {
+ vassert(0);
}
+
break;
}
Modified: branches/COMEM/priv/host_amd64_defs.h (+2 -1)
===================================================================
--- branches/COMEM/priv/host_amd64_defs.h 2013-01-15 17:26:33 +00:00 (rev 2635)
+++ branches/COMEM/priv/host_amd64_defs.h 2013-01-15 22:30:39 +00:00 (rev 2636)
@@ -473,6 +473,7 @@
AMD64CondCode cond;
Addr64 target;
Int regparms; /* 0 .. 6 */
+ RetLoc rloc; /* where the return value will be */
} Call;
/* Update the guest RIP value, then exit requesting to chain
to it. May be conditional. */
@@ -701,7 +702,7 @@
extern AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* );
extern AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* );
extern AMD64Instr* AMD64Instr_Push ( AMD64RMI* );
-extern AMD64Instr* AMD64Instr_Call ( AMD64CondCode, Addr64, Int );
+extern AMD64Instr* AMD64Instr_Call ( AMD64CondCode, Addr64, Int, RetLoc );
extern AMD64Instr* AMD64Instr_XDirect ( Addr64 dstGA, AMD64AMode* amRIP,
AMD64CondCode cond, Bool toFastEP );
extern AMD64Instr* AMD64Instr_XIndir ( HReg dstGA, AMD64AMode* amRIP,
Modified: branches/COMEM/priv/host_amd64_defs.c (+16 -3)
===================================================================
--- branches/COMEM/priv/host_amd64_defs.c 2013-01-15 17:26:33 +00:00 (rev 2635)
+++ branches/COMEM/priv/host_amd64_defs.c 2013-01-15 22:30:39 +00:00 (rev 2636)
@@ -693,13 +693,16 @@
i->Ain.Push.src = src;
return i;
}
-AMD64Instr* AMD64Instr_Call ( AMD64CondCode cond, Addr64 target, Int regparms ) {
+AMD64Instr* AMD64Instr_Call ( AMD64CondCode cond, Addr64 target, Int regparms,
+ RetLoc rloc ) {
AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
i->tag = Ain_Call;
i->Ain.Call.cond = cond;
i->Ain.Call.target = target;
i->Ain.Call.regparms = regparms;
+ i->Ain.Call.rloc = rloc;
vassert(regparms >= 0 && regparms <= 6);
+ vassert(rloc != RetLocINVALID);
return i;
}
@@ -1070,11 +1073,12 @@
ppAMD64RMI(i->Ain.Push.src);
return;
case Ain_Call:
- vex_printf("call%s[%d] ",
+ vex_printf("call%s[%d,",
i->Ain.Call.cond==Acc_ALWAYS
? "" : showAMD64CondCode(i->Ain.Call.cond),
i->Ain.Call.regparms );
- vex_printf("0x%llx", i->Ain.Call.target);
+ ppRetLoc(i->Ain.Call.rloc);
+ vex_printf("] 0x%llx", i->Ain.Call.target);
break;
case Ain_XDirect:
@@ -2663,6 +2667,15 @@
}
case Ain_Call: {
+ if (i->Ain.Call.cond != Acc_ALWAYS && i->Ain.Call.rloc != RetLocNone) {
+ /* The call might not happen (it isn't unconditional) and it
+ returns a result. In this case we will need to generate a
+ control flow diamond to put 0x555..555 in the return
+ register(s) in the case where the call doesn't happen. If
+ this ever becomes necessary, maybe copy code from the ARM
+ equivalent. Until that day, just give up. */
+ goto bad;
+ }
/* As per detailed comment for Ain_Call in
getRegUsage_AMD64Instr above, %r11 is used as an address
temporary. */
|