|
From: <sv...@va...> - 2012-04-25 16:48:06
|
sewardj 2012-04-25 17:47:53 +0100 (Wed, 25 Apr 2012)
New Revision: 2315
Log:
Try to fold out some of the lousy code generated by the amd64 front
end for 32 bit shifts.
Modified files:
trunk/priv/ir_opt.c
Modified: trunk/priv/ir_opt.c (+27 -1)
===================================================================
--- trunk/priv/ir_opt.c 2012-04-25 15:33:03 +01:00 (rev 2314)
+++ trunk/priv/ir_opt.c 2012-04-25 17:47:53 +01:00 (rev 2315)
@@ -4514,6 +4514,33 @@
/* 32Uto64( 16Uto32( x )) --> 16Uto64(x) */
if (is_Unop(aa, Iop_16Uto32))
return IRExpr_Unop(Iop_16Uto64, aa->Iex.Unop.arg);
+ /* 32Uto64(64to32( Shr64( 32Uto64(64to32(x)), sh ))
+ --> Shr64( 32Uto64(64to32(x)), sh )) */
+ if (is_Unop(aa, Iop_64to32)
+ && is_Binop(aa->Iex.Unop.arg, Iop_Shr64)
+ && is_Unop(aa->Iex.Unop.arg->Iex.Binop.arg1, Iop_32Uto64)
+ && is_Unop(aa->Iex.Unop.arg->Iex.Binop.arg1->Iex.Unop.arg,
+ Iop_64to32)) {
+ return aa->Iex.Unop.arg;
+ }
+ /* 32Uto64(64to32( Shl64( 32Uto64(64to32(x)), sh ))
+ --> 32Uto64(64to32( Shl64( x, sh )) */
+ if (is_Unop(aa, Iop_64to32)
+ && is_Binop(aa->Iex.Unop.arg, Iop_Shl64)
+ && is_Unop(aa->Iex.Unop.arg->Iex.Binop.arg1, Iop_32Uto64)
+ && is_Unop(aa->Iex.Unop.arg->Iex.Binop.arg1->Iex.Unop.arg,
+ Iop_64to32)) {
+ return
+ IRExpr_Unop(
+ Iop_32Uto64,
+ IRExpr_Unop(
+ Iop_64to32,
+ IRExpr_Binop(
+ Iop_Shl64,
+ aa->Iex.Unop.arg->Iex.Binop.arg1->Iex.Unop.arg->Iex.Unop.arg,
+ aa->Iex.Unop.arg->Iex.Binop.arg2
+ )));
+ }
break;
case Iop_1Sto32:
@@ -4529,7 +4556,6 @@
}
break;
-
default:
break;
}
|