You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
|
1
(30) |
2
(8) |
3
(5) |
4
(5) |
|
5
(3) |
6
(9) |
7
(5) |
8
(14) |
9
(17) |
10
(27) |
11
(10) |
|
12
(6) |
13
(10) |
14
(7) |
15
(16) |
16
(9) |
17
(14) |
18
(8) |
|
19
(5) |
20
(13) |
21
(21) |
22
(13) |
23
(4) |
24
(1) |
25
(4) |
|
26
(2) |
27
(7) |
28
(4) |
29
(5) |
30
(12) |
|
|
|
From: Zhi-Gang L. <zh...@gm...> - 2015-04-13 22:39:41
|
Philippe
Have you ever tried the patch I post in this mail chain for the TileGX
evCheck issue?
If works for you I will commit it once I have SVN access.
Thanks
ZhiGang
On Apr 12, 2015 1:25 AM, "Zhigang Liu" <zl...@ez...> wrote:
>
>
> ________________________________________
> From: Philippe Waroquiers <phi...@sk...>
> Sent: Saturday, April 11, 2015 7:30 AM
> To: Zhigang Liu
> Cc: Valgrind Developers
> Subject: Re: [Valgrind-developers] I need access to a TILEGX :) :
> libvexmultiarch_test failing with TILEGX host
>
> On Sat, 2015-04-11 at 01:12 +0200, Philippe Waroquiers wrote:
> > On Sat, 2015-04-11 at 01:02 +0200, Philippe Waroquiers wrote:
> > > Julian,
> > > do you agree that the offB_HOST_* offsets are depending on the host
> > > architecture, and not on the guest architecture ?
> > Moving the offB_HOST_* to the arch_host switch makes
> > guest amd64/host tilegx
> > work ok.
> >
> > It looks to me that this is the good thing to do
> After an irc discussion with Julian, it became clear that this
> is not the good thing to do, and that I misunderstood
> the somewhat misleading names offB_HOST_EvC_COUNTER and
> offB_HOST_EvC_FAILADDR.
>
> Here is what I understand now:
> These offB_HOST_* are really offset in the guest state,
> which give locations in the guest state that are used by the
> (generated) host code.
> Basically, a translation entry (generated host code) is doing
> if (-- guest_state->COUNTER) == 0) goto guest_state->FAILADDR
>
> So, COUNTER and FAILADDR are in the guest state.
> FAILADDR must be an host address
> (this is in fact wrongly defined in all 32 bits guest states.
> E.g. libvex_guest_x86.h and libvex_guest_ppc32.h defines
> UInt host_EvC_FAILADDR;
> while it should be the size of an host address (or at least
> big enough to hold a 64 bit host address, if the host would
> be 64 bits in a multiarch setup).
>
>
> So, now I think the problem guest amd64/host tilegx
> is better solved in the host tilegx code, that should ensure to always
> generate the same nr of bytes for the evCheck instructions
> (this was suggested by Zhigang)
> (or maybe dynamically compute
> the needed nr of instructions for an eventcheck, depending
> on the offsets of the host_EvC_*, that changes the size of the
> instructions).
>
> Zhigang, does the above look reasonable to do in tilegx ?
>
> Yes, thank you for finding this issue. I have a simple patch for this,
> would you mind to have a try.
> Thanks
> --- ZhiGang
>
> ******* Begin of the patch ******
> Index: priv/host_tilegx_defs.c
> ===================================================================
> --- priv/host_tilegx_defs.c (revision 3125)
> +++ priv/host_tilegx_defs.c (working copy)
> @@ -1348,11 +1348,10 @@
>
> static UChar *doAMode_IR ( UChar * p, UInt opc1, UInt rSD, TILEGXAMode *
> am )
> {
> - UInt rA; //, idx;
> + UInt rA;
> vassert(am->tag == GXam_IR);
>
> rA = iregNo(am->GXam.IR.base);
> - //idx = am->GXam.IR.index;
>
> if (opc1 == TILEGX_OPC_ST1 || opc1 == TILEGX_OPC_ST2 ||
> opc1 == TILEGX_OPC_ST4 || opc1 == TILEGX_OPC_ST) {
> @@ -1381,19 +1380,29 @@
> return p;
> }
>
> -/* Generate a machine-word sized load or store. Simplified version of
> - the GXin_Load and GXin_Store cases below. */
> +/* Generate a machine-word sized load or store using exact 2 bundles.
> + Simplified version of the GXin_Load and GXin_Store cases below. */
> static UChar* do_load_or_store_machine_word ( UChar* p, Bool isLoad, UInt
> reg,
> TILEGXAMode* am )
> {
> + UInt rA = iregNo(am->GXam.IR.base);
> +
> if (am->tag != GXam_IR)
> vpanic(__func__);
>
> - if (isLoad) /* load */
> - p = doAMode_IR(p, TILEGX_OPC_LD, reg, am);
> - else /* store */
> - p = doAMode_IR(p, TILEGX_OPC_ST, reg, am);
> -
> + if (isLoad) /* load */ {
> + /* r51 is reserved scratch registers. */
> + p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
> + 51, rA, am->GXam.IR.index));
> + /* load from address in r51 to rSD. */
> + p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_LD, 2, reg, 51));
> + } else /* store */ {
> + /* r51 is reserved scratch registers. */
> + p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
> + 51, rA, am->GXam.IR.index));
> + /* store rSD to address in r51 */
> + p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ST, 2, 51, reg));
> + }
> return p;
> }
> ******* END of the patch ******
>
>
> (waiting for this to be done, I could always disable in the test
> using tilegx as a host)
>
> Thanks
>
> Philippe
>
>
>
>
> ------------------------------------------------------------------------------
> BPM Camp - Free Virtual Workshop May 6th at 10am PDT/1PM EDT
> Develop your own process in accordance with the BPMN 2 standard
> Learn Process modeling best practices with Bonita BPM through live
> exercises
> http://www.bonitasoft.com/be-part-of-it/events/bpm-camp-virtual-
> event?utm_
> source=Sourceforge_BPM_Camp_5_6_15&utm_medium=email&utm_campaign=VA_SF
> _______________________________________________
> Valgrind-developers mailing list
> Val...@li...
> https://lists.sourceforge.net/lists/listinfo/valgrind-developers
>
|
|
From: Kandi, J. <jag...@in...> - 2015-04-13 22:23:45
|
Hi, I work for Intel Corporation as KNL middleware and tools enabling manager. I am wondering whether Valgrind community has any plans to make changes to this tool to support KNL Processor. If yes, who is the owner / right person to talk to? Regards, Jagan.... Software and Services Group Intel Corporation Mobile: 480-205-5812 Jag...@in...<mailto:Jag...@in...> |
|
From: Peter B. <be...@vn...> - 2015-04-13 17:45:47
|
On Sat, 2015-04-11 at 16:36 -0700, John Reiser wrote: > > On MIPS (ASUS RT-N16): > > $ LD_SHOW_AUXV=1 /bin/true | grep AT_HWCAP > > ### empty output from grep: no AT_HWCAP at all > > ... because the C library is uClibc, not glibc. Yes, the above is a neat trick from glibc. On linux, the AUXV is exported from the kernel via /proc/<pid>/auxv or /proc/self/auxv and it is also placed on the stack above the top most frame. We (IBM) also created a libauxv library which can help with reading and querying the AUXV contents: https://github.com/Libauxv/libauxv > Some digging shows that the AUX vector is: > 0x00000010 AT_HWCAP 0x00000000 [snip] > Still, AT_HWCAP is 0, which omits information such as support for mips16 and dsp > that is shown in /proc/cpuinfo below. The Linux kernel is 2.6.24 (dd-wrt + optware.) That seems like a kernel bug to me. > From the viewpoint of the end user, a commandline override such as --cpu=... > has an advantage because it allows working around bugs in AT_HWCAP > and/or /proc/cpuinfo. You'll get no argument from me on it being potentially useful to override the automatically detected cpu value. Peter |
|
From: Rhys K. <rhy...@gm...> - 2015-04-13 11:57:57
|
Thanks Mark for the patch. On Monday, 13 April 2015, Julian Seward <js...@ac...> wrote: > On 13/04/15 00:50, Mark Pauley wrote: > > > a breaking change to the initimg interfaces without making sure > > to un-bust initimg-darwin.c > > Ach; that is my bad really, for failing to set up or arrange to have > set up, a nightly builder for MacOSX. > > > configure.am file is a bit too strict for Apple LLVM checking, > > Committed as r15087 and r15088. Thanks for the fixes. > > J > > > > ------------------------------------------------------------------------------ > BPM Camp - Free Virtual Workshop May 6th at 10am PDT/1PM EDT > Develop your own process in accordance with the BPMN 2 standard > Learn Process modeling best practices with Bonita BPM through live > exercises > http://www.bonitasoft.com/be-part-of-it/events/bpm-camp-virtual- > event?utm_ > source=Sourceforge_BPM_Camp_5_6_15&utm_medium=email&utm_campaign=VA_SF > _______________________________________________ > Valgrind-developers mailing list > Val...@li... <javascript:;> > https://lists.sourceforge.net/lists/listinfo/valgrind-developers > |
|
From: Julian S. <js...@ac...> - 2015-04-13 11:44:10
|
On 13/04/15 00:50, Mark Pauley wrote: > a breaking change to the initimg interfaces without making sure > to un-bust initimg-darwin.c Ach; that is my bad really, for failing to set up or arrange to have set up, a nightly builder for MacOSX. > configure.am file is a bit too strict for Apple LLVM checking, Committed as r15087 and r15088. Thanks for the fixes. J |
|
From: <sv...@va...> - 2015-04-13 11:41:37
|
Author: sewardj
Date: Mon Apr 13 12:41:30 2015
New Revision: 15088
Log:
Make the version checking for Apple LLVM a bit less strict.
Modified:
trunk/configure.ac
Modified: trunk/configure.ac
==============================================================================
--- trunk/configure.ac (original)
+++ trunk/configure.ac Mon Apr 13 12:41:30 2015
@@ -154,7 +154,7 @@
# Note: m4 arguments are quoted with [ and ] so square brackets in shell
# statements have to be quoted.
case "${is_clang}-${gcc_version}" in
- applellvm-5.1|applellvm-6.0*)
+ applellvm-5.1|applellvm-6.*)
AC_MSG_RESULT([ok (Apple LLVM version ${gcc_version})])
;;
icc-1[[3-9]].*)
|
|
From: <sv...@va...> - 2015-04-13 11:39:57
|
Author: sewardj
Date: Mon Apr 13 12:39:50 2015
New Revision: 15087
Log:
Un-break the Darwin build after r15078. Patch from Mark Pauley
(pa...@un...).
Modified:
trunk/coregrind/m_initimg/initimg-darwin.c
Modified: trunk/coregrind/m_initimg/initimg-darwin.c
==============================================================================
--- trunk/coregrind/m_initimg/initimg-darwin.c (original)
+++ trunk/coregrind/m_initimg/initimg-darwin.c Mon Apr 13 12:39:50 2015
@@ -312,7 +312,8 @@
HChar** orig_envp,
const ExeInfo* info,
Addr clstack_end,
- SizeT clstack_max_size )
+ SizeT clstack_max_size,
+ const VexArchInfo* vex_archinfo )
{
HChar **cpp;
HChar *strtab; /* string table */
@@ -508,7 +509,8 @@
/*====================================================================*/
/* Create the client's initial memory image. */
-IIFinaliseImageInfo VG_(ii_create_image)( IICreateImageInfo iicii )
+IIFinaliseImageInfo VG_(ii_create_image)( IICreateImageInfo iicii,
+ const VexArchInfo* vex_archinfo )
{
ExeInfo info;
VG_(memset)( &info, 0, sizeof(info) );
@@ -548,7 +550,8 @@
iifii.initial_client_SP =
setup_client_stack( iicii.argv - 1, env, &info,
- iicii.clstack_end, iifii.clstack_max_size );
+ iicii.clstack_end, iifii.clstack_max_size,
+ vex_archinfo );
VG_(free)(env);
|
|
From: <sv...@va...> - 2015-04-13 11:33:36
|
Author: sewardj
Date: Mon Apr 13 12:33:29 2015
New Revision: 3128
Log:
Remove unused function "lshift".
Modified:
trunk/priv/guest_tilegx_helpers.c
Modified: trunk/priv/guest_tilegx_helpers.c
==============================================================================
--- trunk/priv/guest_tilegx_helpers.c (original)
+++ trunk/priv/guest_tilegx_helpers.c Mon Apr 13 12:33:29 2015
@@ -46,15 +46,6 @@
{ offsetof(VexGuestTILEGXState, field), \
(sizeof ((VexGuestTILEGXState*)0)->field) }
-/* generalised left-shifter */
-static inline UInt lshift ( UInt x, Int n )
-{
- if (n >= 0)
- return x << n;
- else
- return x >> (-n);
-}
-
IRExpr *guest_tilegx_spechelper ( const HChar * function_name, IRExpr ** args,
IRStmt ** precedingStmts, Int n_precedingStmts)
{
|
|
From: <sv...@va...> - 2015-04-13 10:49:23
|
Author: sewardj
Date: Mon Apr 13 11:49:15 2015
New Revision: 15086
Log:
Add an NCode template for 8 bit loads on 64 bit targets.
Requires vex r3127.
Modified:
branches/NCODE/memcheck/mc_include.h
branches/NCODE/memcheck/mc_main.c
branches/NCODE/memcheck/mc_translate.c
Modified: branches/NCODE/memcheck/mc_include.h
==============================================================================
--- branches/NCODE/memcheck/mc_include.h (original)
+++ branches/NCODE/memcheck/mc_include.h Mon Apr 13 11:49:15 2015
@@ -602,6 +602,7 @@
extern NCodeTemplate* MC_(tmpl__LOADV64le_on_64);
extern NCodeTemplate* MC_(tmpl__LOADV32le_on_64);
+extern NCodeTemplate* MC_(tmpl__LOADV8_on_64);
/* Helper functions defined in mc_main.c */
Modified: branches/NCODE/memcheck/mc_main.c
==============================================================================
--- branches/NCODE/memcheck/mc_main.c (original)
+++ branches/NCODE/memcheck/mc_main.c Mon Apr 13 11:49:15 2015
@@ -4276,6 +4276,7 @@
VG_REGPARM(1) static ULong mc_LOADV64le_on_64_slow ( Addr a );
VG_REGPARM(1) static ULong mc_LOADV32le_on_64_slow ( Addr a );
+VG_REGPARM(1) static ULong mc_LOADV8_on_64_slow ( Addr a );
static void* ncode_alloc ( UInt n ) {
return VG_(malloc)("mc.ncode_alloc (NCode, permanent)", n);
@@ -4294,6 +4295,7 @@
NCodeTemplate* MC_(tmpl__LOADV64le_on_64) = NULL;
NCodeTemplate* MC_(tmpl__LOADV32le_on_64) = NULL;
+NCodeTemplate* MC_(tmpl__LOADV8_on_64) = NULL;
static NCodeTemplate* mk_tmpl__LOADV64le_on_64 ( NAlloc na )
{
@@ -4306,27 +4308,49 @@
NReg a0 = mkNReg(Nrr_Argument, 0);
NReg s0 = mkNReg(Nrr_Scratch, 0);
- hot[0] = NInstr_SetFlagsWri (na, Nsf_TEST, a0, MASK(8));
- hot[1] = NInstr_Branch (na, Ncc_NZ, mkNLabel(Nlz_Cold, 4));
- hot[2] = NInstr_ShiftWri (na, Nsh_SHR, s0, a0, 16);
- hot[3] = NInstr_LoadU (na, 8, s0, NEA_IRS(na, (HWord)&primary_map[0],
- s0, 3));
- hot[4] = NInstr_AluWri (na, Nalu_AND, r0, a0, 0xFFFF);
- hot[5] = NInstr_ShiftWri (na, Nsh_SHR, r0, r0, 3);
- hot[6] = NInstr_LoadU (na, 2, r0, NEA_RRS(na, s0, r0, 1));
- hot[7] = NInstr_SetFlagsWri (na, Nsf_CMP, r0, VA_BITS16_DEFINED);
- hot[8] = NInstr_Branch (na, Ncc_NZ, mkNLabel(Nlz_Cold, 0));
- hot[9] = NInstr_ImmW (na, r0, V_BITS64_DEFINED);
+ /* NCode [r0] = "LOADV64le_on_64" [a0] s0 {
+ hot:
+ 0 tst.w a0, #0xFFFFFFF000000007 misaligned-or-high?
+ 1 bnz cold.4 yes, goto slow path
+ 2 shr.w s0, a0, #16 s0 = pri-map-ix
+ 3 ld.64 s0, [&pri_map[0] + s0 << #3] s0 = sec-map
+ 4 and.w r0, a0, #0xFFFF r0 = sec-map-offB
+ 5 shr.w r0, r0, #3 r0 = sec-map-ix
+ 6 ld.16 r0, [s0 + r0 << #1] r0 = sec-map-VABITS16
+ 7 cmp.w r0, #0xAAAA r0 == VABITS16_DEFINED?
+ 8 bnz cold.0 no, goto cold.0
+ 9 imm.w r0, #0x0 VBITS64_DEFINED
+ 10 nop continue
+ cold:
+ 0 mov.w s0, r0 s0 = sec-map-VABITS16
+ 1 imm.w r0, #0xFFFFFFFFFFFFFFFF VBITS64_UNDEFINED
+ 2 cmp.w s0, #0x5555 s0 == VABITS16_UNDEFINED?
+ 3 bz hot.10 yes, continue
+ 4 call r0 = mc_LOADV64le_on_64_slow[..](a0) call helper
+ 5 b hot.10 continue
+ }
+ */
+ hot[0] = NInstr_SetFlagsWri (na, Nsf_TEST, a0, MASK(8));
+ hot[1] = NInstr_Branch (na, Ncc_NZ, mkNLabel(Nlz_Cold, 4));
+ hot[2] = NInstr_ShiftWri (na, Nsh_SHR, s0, a0, 16);
+ hot[3] = NInstr_LoadU (na, 8, s0, NEA_IRS(na, (HWord)&primary_map[0],
+ s0, 3));
+ hot[4] = NInstr_AluWri (na, Nalu_AND, r0, a0, 0xFFFF);
+ hot[5] = NInstr_ShiftWri (na, Nsh_SHR, r0, r0, 3);
+ hot[6] = NInstr_LoadU (na, 2, r0, NEA_RRS(na, s0, r0, 1));
+ hot[7] = NInstr_SetFlagsWri (na, Nsf_CMP, r0, VA_BITS16_DEFINED);
+ hot[8] = NInstr_Branch (na, Ncc_NZ, mkNLabel(Nlz_Cold, 0));
+ hot[9] = NInstr_ImmW (na, r0, V_BITS64_DEFINED);
hot[10] = NInstr_Nop (na);
- cold[0] = NInstr_MovW (na, s0, r0);
- cold[1] = NInstr_ImmW (na, r0, V_BITS64_UNDEFINED);
- cold[2] = NInstr_SetFlagsWri (na, Nsf_CMP, s0, VA_BITS16_UNDEFINED);
- cold[3] = NInstr_Branch (na, Ncc_Z, mkNLabel(Nlz_Hot, 10));
- cold[4] = NInstr_Call (na, rINVALID, r0, mkNRegVec1(na, a0),
- (void*)& mc_LOADV64le_on_64_slow,
- "mc_LOADV64le_on_64_slow");
- cold[5] = NInstr_Branch(na, Ncc_ALWAYS, mkNLabel(Nlz_Hot, 10));
+ cold[0] = NInstr_MovW (na, s0, r0);
+ cold[1] = NInstr_ImmW (na, r0, V_BITS64_UNDEFINED);
+ cold[2] = NInstr_SetFlagsWri (na, Nsf_CMP, s0, VA_BITS16_UNDEFINED);
+ cold[3] = NInstr_Branch (na, Ncc_Z, mkNLabel(Nlz_Hot, 10));
+ cold[4] = NInstr_Call (na, rINVALID, r0, mkNRegVec1(na, a0),
+ (void*)& mc_LOADV64le_on_64_slow,
+ "mc_LOADV64le_on_64_slow");
+ cold[5] = NInstr_Branch (na, Ncc_ALWAYS, mkNLabel(Nlz_Hot, 10));
hot[11] = cold[6] = NULL;
NCodeTemplate* tmpl
@@ -4346,29 +4370,51 @@
NReg a0 = mkNReg(Nrr_Argument, 0);
NReg s0 = mkNReg(Nrr_Scratch, 0);
- hot[0] = NInstr_SetFlagsWri (na, Nsf_TEST, a0, MASK(4));
- hot[1] = NInstr_Branch (na, Ncc_NZ, mkNLabel(Nlz_Cold, 4));
- hot[2] = NInstr_ShiftWri (na, Nsh_SHR, s0, a0, 16);
- hot[3] = NInstr_LoadU (na, 8, s0, NEA_IRS(na, (HWord)&primary_map[0],
- s0, 3));
- hot[4] = NInstr_AluWri (na, Nalu_AND, r0, a0, 0xFFFF);
- hot[5] = NInstr_ShiftWri (na, Nsh_SHR, r0, r0, 2);
- hot[6] = NInstr_LoadU (na, 1, r0, NEA_RRS(na, s0, r0, 0));
- hot[7] = NInstr_SetFlagsWri (na, Nsf_CMP, r0, VA_BITS8_DEFINED);
- hot[8] = NInstr_Branch (na, Ncc_NZ, mkNLabel(Nlz_Cold, 0));
- hot[9] = NInstr_ImmW (na, r0, 0xFFFFFFFF00000000ULL
- | (ULong)V_BITS32_DEFINED);
+ /* NCode [r0] = "LOADV32le_on_64" [a0] s0 {
+ hot:
+ 0 tst.w a0, #0xFFFFFFF000000003 misaligned-or-high?
+ 1 bnz cold.4 yes, goto slow path
+ 2 shr.w s0, a0, #16 pri-map-ix
+ 3 ld.64 s0, [&pri_map[0] + s0 << #3] sec-map
+ 4 and.w r0, a0, #0xFFFF sec-map-offB
+ 5 shr.w r0, r0, #2 sec-map-ix
+ 6 ld.8 r0, [s0 + r0 << #0] sec-map-VABITS8
+ 7 cmp.w r0, #0xAA == VABITS8_DEFINED ?
+ 8 bnz cold.0 no, goto cold.0
+ 9 imm.w r0, #0xFFFFFFFF00000000 VBITS32_DEFINED (sort of)
+ 10 nop continue
+ cold:
+ 0 mov.w s0, r0 sec-map-VABITS8
+ 1 imm.w r0, #0xFFFFFFFFFFFFFFFF VBITS32_UNDEFINED
+ 2 cmp.w s0, #0x55 s-m-VABITS8 == VABITS8_UNDEF?
+ 3 bz hot.10 yes, continue
+ 4 call r0 = mc_LOADV32le_on_64_slow[..](a0) call helper
+ 5 b hot.10 continue
+ }
+ */
+ hot[0] = NInstr_SetFlagsWri (na, Nsf_TEST, a0, MASK(4));
+ hot[1] = NInstr_Branch (na, Ncc_NZ, mkNLabel(Nlz_Cold, 4));
+ hot[2] = NInstr_ShiftWri (na, Nsh_SHR, s0, a0, 16);
+ hot[3] = NInstr_LoadU (na, 8, s0, NEA_IRS(na, (HWord)&primary_map[0],
+ s0, 3));
+ hot[4] = NInstr_AluWri (na, Nalu_AND, r0, a0, 0xFFFF);
+ hot[5] = NInstr_ShiftWri (na, Nsh_SHR, r0, r0, 2);
+ hot[6] = NInstr_LoadU (na, 1, r0, NEA_RRS(na, s0, r0, 0));
+ hot[7] = NInstr_SetFlagsWri (na, Nsf_CMP, r0, VA_BITS8_DEFINED);
+ hot[8] = NInstr_Branch (na, Ncc_NZ, mkNLabel(Nlz_Cold, 0));
+ hot[9] = NInstr_ImmW (na, r0, 0xFFFFFFFF00000000ULL
+ | (ULong)V_BITS32_DEFINED);
hot[10] = NInstr_Nop (na);
- cold[0] = NInstr_MovW (na, s0, r0);
- cold[1] = NInstr_ImmW (na, r0, 0xFFFFFFFF00000000ULL
- | (ULong)V_BITS32_UNDEFINED);
- cold[2] = NInstr_SetFlagsWri (na, Nsf_CMP, s0, VA_BITS8_UNDEFINED);
- cold[3] = NInstr_Branch (na, Ncc_Z, mkNLabel(Nlz_Hot, 10));
- cold[4] = NInstr_Call (na, rINVALID, r0, mkNRegVec1(na, a0),
- (void*)& mc_LOADV32le_on_64_slow,
- "mc_LOADV32le_on_64_slow");
- cold[5] = NInstr_Branch(na, Ncc_ALWAYS, mkNLabel(Nlz_Hot, 10));
+ cold[0] = NInstr_MovW (na, s0, r0);
+ cold[1] = NInstr_ImmW (na, r0, 0xFFFFFFFF00000000ULL
+ | (ULong)V_BITS32_UNDEFINED);
+ cold[2] = NInstr_SetFlagsWri (na, Nsf_CMP, s0, VA_BITS8_UNDEFINED);
+ cold[3] = NInstr_Branch (na, Ncc_Z, mkNLabel(Nlz_Hot, 10));
+ cold[4] = NInstr_Call (na, rINVALID, r0, mkNRegVec1(na, a0),
+ (void*)& mc_LOADV32le_on_64_slow,
+ "mc_LOADV32le_on_64_slow");
+ cold[5] = NInstr_Branch (na, Ncc_ALWAYS, mkNLabel(Nlz_Hot, 10));
hot[11] = cold[6] = NULL;
NCodeTemplate* tmpl
@@ -4377,12 +4423,107 @@
return tmpl;
}
+static NCodeTemplate* mk_tmpl__LOADV8_on_64 ( NAlloc na )
+{
+ NInstr** hot = na((11+1) * sizeof(NInstr*));
+ NInstr** cold = na((14+1) * sizeof(NInstr*));
+
+ NReg rINVALID = mkNRegINVALID();
+
+ NReg r0 = mkNReg(Nrr_Result, 0);
+ NReg a0 = mkNReg(Nrr_Argument, 0);
+ NReg s0 = mkNReg(Nrr_Scratch, 0);
+
+ /*
+ h0 tst.w a0, #0xFFFFFFF000000000 high?
+ h1 bnz cold.12 yes, goto slow path
+ h2 shr.w s0, a0, #16 s0 = pri-map-ix
+ h3 ld.64 s0, [&pri_map[0] + s0 << #3] s0 = sec-map
+ h4 and.w r0, a0, #0xFFFF r0 = sec-map-offB
+ h5 shr.w r0, r0, #2 r0 = sec-map-ix
+ h6 ld.8 r0, [s0 + r0 << #0] r0 = sec-map-VABITS8
+ h7 cmp.w r0, #0xAA r0 == VABITS8_DEFINED?
+ h8 bnz cold.0 no, goto cold.0
+ h9 imm.w r0, #0xFFFFFFFFFFFFFF00 VBITS8_DEFINED | top56safe
+ h10 nop continue
+
+ c0 cmp.w r0, #0x55 VABITS8_UNDEFINED
+ c1 bnz cold.4
+
+ c2 imm.w r0, #0xFFFFFFFFFFFFFFFF VBITS8_UNDEFINED | top56safe
+ c3 b hot.10
+
+ // r0 holds sec-map-VABITS8
+ // a0 holds the address. Extract the relevant 2 bits and inspect.
+ c4 and.w s0, a0, #3 // addr & 3
+ c5 add.w s0, s0, s0 // 2 * (addr & 3)
+ c6 shr.w r0, r0, s0 // sec-map-VABITS8 >> (2 * (addr & 3))
+ c7 and.w r0, r0, #3 // (sec-map-VABITS8 >> (2 * (addr & 3))) & 3
+
+ c8 cmp.w r0, #2 // VABITS2_DEFINED
+ c9 jz hot.9
+
+ c10 cmp.w r0, #1 // VABITS2_UNDEFINED
+ c11 jz cold.2
+
+ c12 call r0 = mc_LOADV8_on_64_slow(a0)
+ c13 b hot.10
+ */
+ hot[0] = NInstr_SetFlagsWri (na, Nsf_TEST, a0, MASK(1));
+ hot[1] = NInstr_Branch (na, Ncc_NZ, mkNLabel(Nlz_Cold, 12));
+ hot[2] = NInstr_ShiftWri (na, Nsh_SHR, s0, a0, 16);
+ hot[3] = NInstr_LoadU (na, 8, s0, NEA_IRS(na, (HWord)&primary_map[0],
+ s0, 3));
+ hot[4] = NInstr_AluWri (na, Nalu_AND, r0, a0, 0xFFFF);
+ hot[5] = NInstr_ShiftWri (na, Nsh_SHR, r0, r0, 2);
+ hot[6] = NInstr_LoadU (na, 1, r0, NEA_RRS(na, s0, r0, 0));
+ hot[7] = NInstr_SetFlagsWri (na, Nsf_CMP, r0, VA_BITS8_DEFINED);
+ hot[8] = NInstr_Branch (na, Ncc_NZ, mkNLabel(Nlz_Cold, 0));
+ hot[9] = NInstr_ImmW (na, r0, 0xFFFFFFFFFFFFFF00ULL
+ | (ULong)V_BITS8_DEFINED);
+ hot[10] = NInstr_Nop (na);
+
+ cold[0] = NInstr_SetFlagsWri (na, Nsf_CMP, r0, VA_BITS8_UNDEFINED);
+ cold[1] = NInstr_Branch (na, Ncc_NZ, mkNLabel(Nlz_Cold, 4));
+
+ cold[2] = NInstr_ImmW (na, r0, 0xFFFFFFFFFFFFFF00ULL
+ | (ULong)V_BITS8_UNDEFINED);
+ cold[3] = NInstr_Branch (na, Ncc_ALWAYS, mkNLabel(Nlz_Hot, 10));
+
+ // r0 holds sec-map-VABITS8
+ // a0 holds the address. Extract the relevant 2 bits and inspect.
+ cold[4] = NInstr_AluWri (na, Nalu_AND, s0, a0, 3);
+ cold[5] = NInstr_AluWrr (na, Nalu_ADD, s0, s0, s0);
+ cold[6] = NInstr_ShiftWrr (na, Nsh_SHR, r0, r0, s0);
+ cold[7] = NInstr_AluWri (na, Nalu_AND, r0, r0, 3);
+
+ cold[8] = NInstr_SetFlagsWri (na, Nsf_CMP, r0, 2);
+ cold[9] = NInstr_Branch (na, Ncc_Z, mkNLabel(Nlz_Hot, 9));
+
+ cold[10]= NInstr_SetFlagsWri (na, Nsf_CMP, r0, 1);
+ cold[11]= NInstr_Branch (na, Ncc_Z, mkNLabel(Nlz_Cold, 2));
+
+ cold[12]= NInstr_Call (na, rINVALID, r0, mkNRegVec1(na, a0),
+ (void*)& mc_LOADV8_on_64_slow,
+ "mc_LOADV8_on_64_slow");
+ cold[13]= NInstr_Branch (na, Ncc_ALWAYS, mkNLabel(Nlz_Hot, 10));
+
+ hot[11] = cold[14] = NULL;
+ NCodeTemplate* tmpl
+ = mkNCodeTemplate(na,"LOADV8_on_64",
+ /*res, parms, scratch*/1, 1, 1, hot, cold);
+ return tmpl;
+}
+
+
void MC_(create_ncode_templates) ( void )
{
tl_assert(MC_(tmpl__LOADV64le_on_64) == NULL);
tl_assert(MC_(tmpl__LOADV32le_on_64) == NULL);
+ tl_assert(MC_(tmpl__LOADV8_on_64) == NULL);
MC_(tmpl__LOADV64le_on_64) = mk_tmpl__LOADV64le_on_64(ncode_alloc);
MC_(tmpl__LOADV32le_on_64) = mk_tmpl__LOADV32le_on_64(ncode_alloc);
+ MC_(tmpl__LOADV8_on_64) = mk_tmpl__LOADV8_on_64(ncode_alloc);
}
@@ -4886,6 +5027,11 @@
#endif
}
+VG_REGPARM(1) static ULong mc_LOADV8_on_64_slow ( Addr a )
+{
+ return mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
+}
+
VG_REGPARM(2)
void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
Modified: branches/NCODE/memcheck/mc_translate.c
==============================================================================
--- branches/NCODE/memcheck/mc_translate.c (original)
+++ branches/NCODE/memcheck/mc_translate.c Mon Apr 13 11:49:15 2015
@@ -4648,6 +4648,19 @@
= assignNew('V', mce, Ity_I32, unop(Iop_64to32, mkexpr(datavbits64)));
return datavbits32;
}
+ if (ty == Ity_I8) {
+ /* Unconditional LOAD8 on 64 bit host. Generate inline code. */
+ IRTemp datavbits64 = newTemp(mce, Ity_I64, VSh);
+ NCodeTemplate* tmpl = MC_(tmpl__LOADV8_on_64);
+ IRAtom** args = mkIRExprVec_1( addrAct );
+ IRTemp* ress = mkIRTempVec_1( datavbits64 );
+ /* The NCode block produces a 64 bit value, but we need to
+ truncate it to 8 bits. */
+ stmt( 'V', mce, IRStmt_NCode(tmpl, args, ress) );
+ IRAtom* datavbits8
+ = assignNew('V', mce, Ity_I8, unop(Iop_64to8, mkexpr(datavbits64)));
+ return datavbits8;
+ }
/* else fall through */
}
/* ------ END inline NCode ? ------ */
|
|
From: <sv...@va...> - 2015-04-13 10:47:22
|
Author: sewardj
Date: Mon Apr 13 11:47:13 2015
New Revision: 3127
Log:
Add new NInstrs: NIn_ShiftWrr, NIn_AluWrr for shifts and add/and where
both operands are registers.
amd64 back end: emit code for the above 2 new NInstrs
Modified:
branches/NCODE/priv/host_amd64_defs.c
branches/NCODE/priv/host_amd64_defs.h
branches/NCODE/priv/ir_defs.c
branches/NCODE/pub/libvex_ir.h
Modified: branches/NCODE/priv/host_amd64_defs.c
==============================================================================
--- branches/NCODE/priv/host_amd64_defs.c (original)
+++ branches/NCODE/priv/host_amd64_defs.c Mon Apr 13 11:47:13 2015
@@ -4218,23 +4218,69 @@
break;
}
+ case Nin_ShiftWrr: {
+ NShift how = ni->Nin.ShiftWrr.how;
+ HReg amt = mapNReg(nregMap, ni->Nin.ShiftWrr.amt);
+ HReg src = mapNReg(nregMap, ni->Nin.ShiftWrr.srcL);
+ HReg dst = mapNReg(nregMap, ni->Nin.ShiftWrr.dst);
+
+ AMD64ShiftOp shOp = Ash_INVALID;
+ switch (how) {
+ case Nsh_SHR: shOp = Ash_SHR; break;
+ default: break;
+ }
+ vassert(shOp != Ash_INVALID);
+
+ if (!sameHReg(src, dst)) {
+ HI( mk_iMOVsd_RR(src, dst) );
+ }
+ /* Now, we have the shift amount in register |amt|. Problem
+ is that it needs to be in %rcx, but we don't know whether
+ or not that is live. Rather than do this nicely, we can
+ take advantage of the fact that r11 is a guaranteed
+ available scratch temp, and temporarily store rcx in it.
+ Note that rcx could be live even through it isn't
+ allocatable, since the insn selector uses it to put
+ variable shift amounts in. So we can't safely trash it
+ here. */
+ HI( mk_iMOVsd_RR(hregAMD64_RCX(), hregAMD64_R11()) ); // save rcx
+ HI( mk_iMOVsd_RR(amt, hregAMD64_RCX()) ); // amt->rcx
+ HI( AMD64Instr_Sh64(shOp, 0/*meaning %cl*/, dst) );
+ HI( mk_iMOVsd_RR(hregAMD64_R11(), hregAMD64_RCX()) ); // restore rcx
+ break;
+ }
+
case Nin_AluWri: {
NAlu how = ni->Nin.AluWri.how;
HReg dstR = mapNReg(nregMap, ni->Nin.AluWri.dst);
HReg srcLR = mapNReg(nregMap, ni->Nin.AluWri.srcL);
HWord imm = ni->Nin.AluWri.srcR;
- // Verified correct, but currently unused
- //if (how == Nalu_AND && fitsIn32Bits((ULong)imm)) {
- // if (!sameHReg(srcLR, dstR)) {
- // HI( mk_iMOVsd_RR(srcLR, dstR) );
- // }
- // HI( AMD64Instr_Alu64R(Aalu_AND, AMD64RMI_Imm(imm), dstR) );
- // break;
- //}
if (how == Nalu_AND && imm == 0xFFFFULL) {
HI( AMD64Instr_MovxWQ(False/*!syned*/, srcLR, dstR) );
break;
}
+ if (how == Nalu_AND && fitsIn32Bits((ULong)imm)) {
+ if (!sameHReg(srcLR, dstR)) {
+ HI( mk_iMOVsd_RR(srcLR, dstR) );
+ }
+ HI( AMD64Instr_Alu64R(Aalu_AND, AMD64RMI_Imm(imm), dstR) );
+ break;
+ }
+ goto unhandled;
+ }
+
+ case Nin_AluWrr: {
+ NAlu how = ni->Nin.AluWrr.how;
+ HReg dstR = mapNReg(nregMap, ni->Nin.AluWrr.dst);
+ HReg srcLR = mapNReg(nregMap, ni->Nin.AluWrr.srcL);
+ HReg srcRR = mapNReg(nregMap, ni->Nin.AluWrr.srcR);
+ if (how == Nalu_ADD) {
+ if (!sameHReg(srcLR, dstR)) {
+ HI( mk_iMOVsd_RR(srcLR, dstR) );
+ }
+ HI( AMD64Instr_Alu64R(Aalu_ADD, AMD64RMI_Reg(srcRR), dstR) );
+ break;
+ }
goto unhandled;
}
@@ -4345,7 +4391,6 @@
const AMD64InstrNCode* hi_details = hi->Ain.NCode.details;
const NCodeTemplate* tmpl = hi_details->tmpl;
const RRegSet* rregsLiveAfter = hi_details->rrLiveAfter;
- const RRegUniverse* univ = RRegSet__getUniverse(rregsLiveAfter);
NRegMap nregMap;
nregMap.regsR = hi_details->regsR;
Modified: branches/NCODE/priv/host_amd64_defs.h
==============================================================================
--- branches/NCODE/priv/host_amd64_defs.h (original)
+++ branches/NCODE/priv/host_amd64_defs.h Mon Apr 13 11:47:13 2015
@@ -358,7 +358,7 @@
Ain_Imm64, /* Generate 64-bit literal to register */
Ain_Alu64R, /* 64-bit mov/arith/logical, dst=REG */
Ain_Alu64M, /* 64-bit mov/arith/logical, dst=MEM */
- Ain_Sh64, /* 64-bit shift/rotate, dst=REG or MEM */
+ Ain_Sh64, /* 64-bit shift, dst=REG, by imm or %cl */
Ain_Test64, /* 64-bit test (AND, set flags, discard result) */
Ain_Unary64, /* 64-bit not and neg */
Ain_Lea64, /* 64-bit compute EA into a reg */
Modified: branches/NCODE/priv/ir_defs.c
==============================================================================
--- branches/NCODE/priv/ir_defs.c (original)
+++ branches/NCODE/priv/ir_defs.c Mon Apr 13 11:47:13 2015
@@ -77,6 +77,7 @@
static const HChar* nameNAlu ( NAlu nal ) {
switch (nal) {
case Nalu_AND: return "and";
+ case Nalu_ADD: return "add";
default: return "nameNAlu???";
}
}
@@ -182,6 +183,14 @@
ppNReg(ni->Nin.ShiftWri.srcL);
vex_printf(", #%u", (UInt)ni->Nin.ShiftWri.amt);
break;
+ case Nin_ShiftWrr:
+ vex_printf("%s.w ", nameNShift(ni->Nin.ShiftWrr.how));
+ ppNReg(ni->Nin.ShiftWrr.dst);
+ vex_printf(", ");
+ ppNReg(ni->Nin.ShiftWrr.srcL);
+ vex_printf(", ");
+ ppNReg(ni->Nin.ShiftWrr.amt);
+ break;
case Nin_AluWri:
vex_printf("%s.w ", nameNAlu(ni->Nin.AluWri.how));
ppNReg(ni->Nin.AluWri.dst);
@@ -189,6 +198,14 @@
ppNReg(ni->Nin.AluWri.srcL);
vex_printf(", #0x%llx", (ULong)ni->Nin.AluWri.srcR);
break;
+ case Nin_AluWrr:
+ vex_printf("%s.w ", nameNAlu(ni->Nin.AluWrr.how));
+ ppNReg(ni->Nin.AluWrr.dst);
+ vex_printf(", ");
+ ppNReg(ni->Nin.AluWrr.srcL);
+ vex_printf(", ");
+ ppNReg(ni->Nin.AluWrr.srcR);
+ break;
case Nin_SetFlagsWri:
vex_printf("%s.w ", nameNSetFlags(ni->Nin.SetFlagsWri.how));
ppNReg(ni->Nin.SetFlagsWri.srcL);
@@ -2064,6 +2081,17 @@
in->Nin.ShiftWri.amt = amt;
return in;
}
+NInstr* NInstr_ShiftWrr ( NAlloc na,
+ NShift how, NReg dst, NReg srcL, NReg amt )
+{
+ NInstr* in = na(sizeof(NInstr));
+ in->tag = Nin_ShiftWrr;
+ in->Nin.ShiftWrr.how = how;
+ in->Nin.ShiftWrr.dst = dst;
+ in->Nin.ShiftWrr.srcL = srcL;
+ in->Nin.ShiftWrr.amt = amt;
+ return in;
+}
NInstr* NInstr_AluWri ( NAlloc na, NAlu how, NReg dst, NReg srcL, HWord srcR )
{
NInstr* in = na(sizeof(NInstr));
@@ -2074,6 +2102,16 @@
in->Nin.AluWri.srcR = srcR;
return in;
}
+NInstr* NInstr_AluWrr ( NAlloc na, NAlu how, NReg dst, NReg srcL, NReg srcR )
+{
+ NInstr* in = na(sizeof(NInstr));
+ in->tag = Nin_AluWrr;
+ in->Nin.AluWrr.how = how;
+ in->Nin.AluWrr.dst = dst;
+ in->Nin.AluWrr.srcL = srcL;
+ in->Nin.AluWrr.srcR = srcR;
+ return in;
+}
NInstr* NInstr_SetFlagsWri ( NAlloc na, NSetFlags how, NReg srcL, HWord srcR )
{
NInstr* in = na(sizeof(NInstr));
Modified: branches/NCODE/pub/libvex_ir.h
==============================================================================
--- branches/NCODE/pub/libvex_ir.h (original)
+++ branches/NCODE/pub/libvex_ir.h Mon Apr 13 11:47:13 2015
@@ -2673,7 +2673,8 @@
typedef
enum {
- Nalu_AND=0x1D30
+ Nalu_AND=0x1D30,
+ Nalu_ADD
}
NAlu;
@@ -2766,7 +2767,9 @@
Nin_Call,
Nin_ImmW,
Nin_ShiftWri,
+ Nin_ShiftWrr,
Nin_AluWri,
+ Nin_AluWrr,
Nin_SetFlagsWri,
Nin_MovW,
Nin_LoadU,
@@ -2802,12 +2805,24 @@
UInt amt; /* 1 .. host-word-size-1 only */
} ShiftWri;
struct {
+ NShift how;
+ NReg dst;
+ NReg srcL;
+ NReg amt; /* 0 .. host-word-size-1 only */
+ } ShiftWrr;
+ struct {
NAlu how;
NReg dst;
NReg srcL;
HWord srcR;
} AluWri;
struct {
+ NAlu how;
+ NReg dst;
+ NReg srcL;
+ NReg srcR;
+ } AluWrr;
+ struct {
NSetFlags how;
NReg srcL;
HWord srcR;
@@ -2839,8 +2854,12 @@
extern NInstr* NInstr_ImmW ( NAlloc na, NReg dst, HWord imm );
extern NInstr* NInstr_ShiftWri ( NAlloc na,
NShift how, NReg dst, NReg srcL, UInt amt );
+extern NInstr* NInstr_ShiftWrr ( NAlloc na,
+ NShift how, NReg dst, NReg srcL, NReg amt );
extern NInstr* NInstr_AluWri ( NAlloc na,
NAlu how, NReg dst, NReg srcL, HWord srcR );
+extern NInstr* NInstr_AluWrr ( NAlloc na,
+ NAlu how, NReg dst, NReg srcL, NReg srcR );
extern NInstr* NInstr_SetFlagsWri ( NAlloc na,
NSetFlags how, NReg srcL, HWord srcR );
extern NInstr* NInstr_MovW ( NAlloc na, NReg dst, NReg src );
|