You can subscribe to this list here.
| 2002 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
(122) |
Nov
(152) |
Dec
(69) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2003 |
Jan
(6) |
Feb
(25) |
Mar
(73) |
Apr
(82) |
May
(24) |
Jun
(25) |
Jul
(10) |
Aug
(11) |
Sep
(10) |
Oct
(54) |
Nov
(203) |
Dec
(182) |
| 2004 |
Jan
(307) |
Feb
(305) |
Mar
(430) |
Apr
(312) |
May
(187) |
Jun
(342) |
Jul
(487) |
Aug
(637) |
Sep
(336) |
Oct
(373) |
Nov
(441) |
Dec
(210) |
| 2005 |
Jan
(385) |
Feb
(480) |
Mar
(636) |
Apr
(544) |
May
(679) |
Jun
(625) |
Jul
(810) |
Aug
(838) |
Sep
(634) |
Oct
(521) |
Nov
(965) |
Dec
(543) |
| 2006 |
Jan
(494) |
Feb
(431) |
Mar
(546) |
Apr
(411) |
May
(406) |
Jun
(322) |
Jul
(256) |
Aug
(401) |
Sep
(345) |
Oct
(542) |
Nov
(308) |
Dec
(481) |
| 2007 |
Jan
(427) |
Feb
(326) |
Mar
(367) |
Apr
(255) |
May
(244) |
Jun
(204) |
Jul
(223) |
Aug
(231) |
Sep
(354) |
Oct
(374) |
Nov
(497) |
Dec
(362) |
| 2008 |
Jan
(322) |
Feb
(482) |
Mar
(658) |
Apr
(422) |
May
(476) |
Jun
(396) |
Jul
(455) |
Aug
(267) |
Sep
(280) |
Oct
(253) |
Nov
(232) |
Dec
(304) |
| 2009 |
Jan
(486) |
Feb
(470) |
Mar
(458) |
Apr
(423) |
May
(696) |
Jun
(461) |
Jul
(551) |
Aug
(575) |
Sep
(134) |
Oct
(110) |
Nov
(157) |
Dec
(102) |
| 2010 |
Jan
(226) |
Feb
(86) |
Mar
(147) |
Apr
(117) |
May
(107) |
Jun
(203) |
Jul
(193) |
Aug
(238) |
Sep
(300) |
Oct
(246) |
Nov
(23) |
Dec
(75) |
| 2011 |
Jan
(133) |
Feb
(195) |
Mar
(315) |
Apr
(200) |
May
(267) |
Jun
(293) |
Jul
(353) |
Aug
(237) |
Sep
(278) |
Oct
(611) |
Nov
(274) |
Dec
(260) |
| 2012 |
Jan
(303) |
Feb
(391) |
Mar
(417) |
Apr
(441) |
May
(488) |
Jun
(655) |
Jul
(590) |
Aug
(610) |
Sep
(526) |
Oct
(478) |
Nov
(359) |
Dec
(372) |
| 2013 |
Jan
(467) |
Feb
(226) |
Mar
(391) |
Apr
(281) |
May
(299) |
Jun
(252) |
Jul
(311) |
Aug
(352) |
Sep
(481) |
Oct
(571) |
Nov
(222) |
Dec
(231) |
| 2014 |
Jan
(185) |
Feb
(329) |
Mar
(245) |
Apr
(238) |
May
(281) |
Jun
(399) |
Jul
(382) |
Aug
(500) |
Sep
(579) |
Oct
(435) |
Nov
(487) |
Dec
(256) |
| 2015 |
Jan
(338) |
Feb
(357) |
Mar
(330) |
Apr
(294) |
May
(191) |
Jun
(108) |
Jul
(142) |
Aug
(261) |
Sep
(190) |
Oct
(54) |
Nov
(83) |
Dec
(22) |
| 2016 |
Jan
(49) |
Feb
(89) |
Mar
(33) |
Apr
(50) |
May
(27) |
Jun
(34) |
Jul
(53) |
Aug
(53) |
Sep
(98) |
Oct
(206) |
Nov
(93) |
Dec
(53) |
| 2017 |
Jan
(65) |
Feb
(82) |
Mar
(102) |
Apr
(86) |
May
(187) |
Jun
(67) |
Jul
(23) |
Aug
(93) |
Sep
(65) |
Oct
(45) |
Nov
(35) |
Dec
(17) |
| 2018 |
Jan
(26) |
Feb
(35) |
Mar
(38) |
Apr
(32) |
May
(8) |
Jun
(43) |
Jul
(27) |
Aug
(30) |
Sep
(43) |
Oct
(42) |
Nov
(38) |
Dec
(67) |
| 2019 |
Jan
(32) |
Feb
(37) |
Mar
(53) |
Apr
(64) |
May
(49) |
Jun
(18) |
Jul
(14) |
Aug
(53) |
Sep
(25) |
Oct
(30) |
Nov
(49) |
Dec
(31) |
| 2020 |
Jan
(87) |
Feb
(45) |
Mar
(37) |
Apr
(51) |
May
(99) |
Jun
(36) |
Jul
(11) |
Aug
(14) |
Sep
(20) |
Oct
(24) |
Nov
(40) |
Dec
(23) |
| 2021 |
Jan
(14) |
Feb
(53) |
Mar
(85) |
Apr
(15) |
May
(19) |
Jun
(3) |
Jul
(14) |
Aug
(1) |
Sep
(57) |
Oct
(73) |
Nov
(56) |
Dec
(22) |
| 2022 |
Jan
(3) |
Feb
(22) |
Mar
(6) |
Apr
(55) |
May
(46) |
Jun
(39) |
Jul
(15) |
Aug
(9) |
Sep
(11) |
Oct
(34) |
Nov
(20) |
Dec
(36) |
| 2023 |
Jan
(79) |
Feb
(41) |
Mar
(99) |
Apr
(169) |
May
(48) |
Jun
(16) |
Jul
(16) |
Aug
(57) |
Sep
(19) |
Oct
|
Nov
|
Dec
|
| S | M | T | W | T | F | S |
|---|---|---|---|---|---|---|
|
|
|
1
(9) |
2
(11) |
3
(3) |
4
(7) |
5
(15) |
|
6
(10) |
7
(9) |
8
(12) |
9
(10) |
10
(6) |
11
(12) |
12
(12) |
|
13
(11) |
14
(14) |
15
(18) |
16
(15) |
17
(19) |
18
(15) |
19
(9) |
|
20
|
21
(5) |
22
(10) |
23
(15) |
24
(18) |
25
(20) |
26
(15) |
|
27
(14) |
28
(11) |
29
(17) |
30
(17) |
31
(23) |
|
|
|
From: Anmol P. <An...@fr...> - 2014-07-10 16:27:17
|
Hello, I am trying to build (Valgrind Revision: 14147, VEX Revision: 2897) on the ARM V8 Foundation Model (FM000-KT-00035-r0p8-52rel06.tgz from https://silver.arm.com/browse/FM00A), other details of the build environment being: -------------------------------------------------------------------------------- root@genericarmv8:/home/b07584/valgrind# uname -a Linux genericarmv8 3.15.0-1-linaro-vexpress64 #1ubuntu1~ci+140621065049 SMP PREEMPT Sat Jun 21 06:51:35 UTC 20 aarch64 GNU/Linux -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- root@genericarmv8:/home/b07584/valgrind# gcc --version gcc (Linaro GCC 4.8-2014.04) 4.8.3 20140401 (prerelease) -------------------------------------------------------------------------------- I have the following build error: -------------------------------------------------------------------------------- make[3]: Warning: File 'm_ume/.deps/libcoregrind_arm64_linux_a-script.Po' has modification time 2423282 s in the future aarch64-oe-linux-gcc -DHAVE_CONFIG_H -I. -I.. -I.. -I../include -I../VEX/pub -I../VEX/pub -DVGA_arm64=1 -DVGO_linux=1 -DVGP_arm64_linux=1 -DVGPV_arm64_linux_vanilla=1 -I../coregrind -DVG_LIBDIR="\"/home/b07584/valgrind/lib/valgrind"\" -DVG_PLATFORM="\"arm64-linux\"" -O2 -g -Wall -Wmissing-prototypes -Wshadow -Wpointer-arith -Wstrict-prototypes -Wmissing-declarations -Wno-format-zero-length -Wno-tautological-compare -fno-strict-aliasing -fno-builtin -Wno-long-long -Wwrite-strings -fno-stack-protector -MT vgdb-vgdb-invoker-ptrace.o -MD -MP -MF .deps/vgdb-vgdb-invoker-ptrace.Tpo -c -o vgdb-vgdb-invoker-ptrace.o `test -f 'vgdb-invoker-ptrace.c' || echo './'`vgdb-invoker-ptrace.c vgdb-invoker-ptrace.c: In function 'restore_and_detach': vgdb-invoker-ptrace.c:748:7: error: invalid use of undefined type 'struct user_pt_regs' if (!setregs(pid_of_save_regs, &user_save.regs, sizeof(user_save.regs))) { ^ vgdb-invoker-ptrace.c:748:7: error: invalid use of undefined type 'struct user_pt_regs' vgdb-invoker-ptrace.c: In function 'invoker_invoke_gdbserver': vgdb-invoker-ptrace.c:786:24: error: storage size of 'user_mod' isn't known struct user_pt_regs user_mod; ^ vgdb-invoker-ptrace.c:824:4: error: 'user_save' has an incomplete type user_save = user_mod; ^ vgdb-invoker-ptrace.c:786:24: warning: unused variable 'user_mod' [-Wunused-variable] struct user_pt_regs user_mod; ^ vgdb-invoker-ptrace.c: At top level: cc1: warning: unrecognized command line option "-Wno-tautological-compare" [enabled by default] Makefile:7224: recipe for target 'vgdb-vgdb-invoker-ptrace.o' failed make[3]: *** [vgdb-vgdb-invoker-ptrace.o] Error 1 make[3]: Leaving directory '/home/b07584/valgrind/coregrind' Makefile:7370: recipe for target 'check' failed make[2]: *** [check] Error 2 make[2]: Leaving directory '/home/b07584/valgrind/coregrind' Makefile:745: recipe for target 'check-recursive' failed make[1]: *** [check-recursive] Error 1 make[1]: Leaving directory '/home/b07584/valgrind' Makefile:1033: recipe for target 'check' failed make: *** [check] Error 2 root@genericarmv8:/home/b07584/valgrind# -------------------------------------------------------------------------------- With the same revisions, on an x86_64, and: -------------------------------------------------------------------------------- b07584@atx-ub12-p64-42:~$ uname -a Linux atx-ub12-p64-42 3.13.0-27-generic #50-Ubuntu SMP Thu May 15 18:06:16 UTC 2014 x86_64 x86_64 x86_64 GNU/Linux -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- b07584@atx-ub12-p64-42:~$ gcc --version gcc (Ubuntu 4.8.2-19ubuntu1) 4.8.2 -------------------------------------------------------------------------------- - everything builds and works just fine... Please let me know if you need any more information. Thank you, Anmol P. Paralkar |
|
From: <sv...@va...> - 2014-07-10 14:56:56
|
Author: tom
Date: Thu Jul 10 14:56:48 2014
New Revision: 14151
Log:
Move DRM ioctl handlers to the right places. Fixes r13486 breakage.
Modified:
trunk/coregrind/m_syswrap/syswrap-linux.c
Modified: trunk/coregrind/m_syswrap/syswrap-linux.c
==============================================================================
--- trunk/coregrind/m_syswrap/syswrap-linux.c (original)
+++ trunk/coregrind/m_syswrap/syswrap-linux.c Thu Jul 10 14:56:48 2014
@@ -5230,151 +5230,6 @@
PRE_MEM_WRITE("fcntl(F_GETOWN_EX)", ARG3, sizeof(struct vki_f_owner_ex));
break;
- case VKI_DRM_IOCTL_VERSION:
- if (ARG3) {
- struct vki_drm_version *data = (struct vki_drm_version *)ARG3;
- PRE_MEM_WRITE("ioctl(DRM_VERSION).version_major", (Addr)&data->version_major, sizeof(data->version_major));
- PRE_MEM_WRITE("ioctl(DRM_VERSION).version_minor", (Addr)&data->version_minor, sizeof(data->version_minor));
- PRE_MEM_WRITE("ioctl(DRM_VERSION).version_patchlevel", (Addr)&data->version_patchlevel, sizeof(data->version_patchlevel));
- PRE_MEM_READ("ioctl(DRM_VERSION).name_len", (Addr)&data->name_len, sizeof(data->name_len));
- PRE_MEM_READ("ioctl(DRM_VERSION).name", (Addr)&data->name, sizeof(data->name));
- PRE_MEM_WRITE("ioctl(DRM_VERSION).name", (Addr)data->name, data->name_len);
- PRE_MEM_READ("ioctl(DRM_VERSION).date_len", (Addr)&data->date_len, sizeof(data->date_len));
- PRE_MEM_READ("ioctl(DRM_VERSION).date", (Addr)&data->date, sizeof(data->date));
- PRE_MEM_WRITE("ioctl(DRM_VERSION).date", (Addr)data->date, data->date_len);
- PRE_MEM_READ("ioctl(DRM_VERSION).desc_len", (Addr)&data->desc_len, sizeof(data->desc_len));
- PRE_MEM_READ("ioctl(DRM_VERSION).desc", (Addr)&data->desc, sizeof(data->desc));
- PRE_MEM_WRITE("ioctl(DRM_VERSION).desc", (Addr)data->desc, data->desc_len);
- }
- break;
- case VKI_DRM_IOCTL_GET_UNIQUE:
- if (ARG3) {
- struct vki_drm_unique *data = (struct vki_drm_unique *)ARG3;
- PRE_MEM_READ("ioctl(DRM_GET_UNIQUE).unique_len", (Addr)&data->unique_len, sizeof(data->unique_len));
- PRE_MEM_READ("ioctl(DRM_GET_UNIQUE).unique", (Addr)&data->unique, sizeof(data->unique));
- PRE_MEM_WRITE("ioctl(DRM_GET_UNIQUE).unique", (Addr)data->unique, data->unique_len);
- }
- break;
- case VKI_DRM_IOCTL_GET_MAGIC:
- if (ARG3) {
- struct vki_drm_auth *data = (struct vki_drm_auth *)ARG3;
- PRE_MEM_WRITE("ioctl(DRM_GET_MAGIC).magic", (Addr)&data->magic, sizeof(data->magic));
- }
- break;
- case VKI_DRM_IOCTL_WAIT_VBLANK:
- if (ARG3) {
- union vki_drm_wait_vblank *data = (union vki_drm_wait_vblank *)ARG3;
- PRE_MEM_READ("ioctl(DRM_WAIT_VBLANK).request.type", (Addr)&data->request.type, sizeof(data->request.type));
- PRE_MEM_READ("ioctl(DRM_WAIT_VBLANK).request.sequence", (Addr)&data->request.sequence, sizeof(data->request.sequence));
- /* XXX: It seems request.signal isn't used */
- PRE_MEM_WRITE("ioctl(DRM_WAIT_VBLANK).reply", (Addr)&data->reply, sizeof(data->reply));
- }
- break;
- case VKI_DRM_IOCTL_GEM_CLOSE:
- if (ARG3) {
- struct vki_drm_gem_close *data = (struct vki_drm_gem_close *)ARG3;
- PRE_MEM_READ("ioctl(DRM_GEM_CLOSE).handle", (Addr)&data->handle, sizeof(data->handle));
- }
- break;
- case VKI_DRM_IOCTL_GEM_FLINK:
- if (ARG3) {
- struct vki_drm_gem_flink *data = (struct vki_drm_gem_flink *)ARG3;
- PRE_MEM_READ("ioctl(DRM_GEM_FLINK).handle", (Addr)&data->handle, sizeof(data->handle));
- PRE_MEM_WRITE("ioctl(DRM_GEM_FLINK).name", (Addr)&data->name, sizeof(data->name));
- }
- break;
- case VKI_DRM_IOCTL_GEM_OPEN:
- if (ARG3) {
- struct vki_drm_gem_open *data = (struct vki_drm_gem_open *)ARG3;
- PRE_MEM_READ("ioctl(DRM_GEM_OPEN).name", (Addr)&data->name, sizeof(data->name));
- PRE_MEM_WRITE("ioctl(DRM_GEM_OPEN).handle", (Addr)&data->handle, sizeof(data->handle));
- PRE_MEM_WRITE("ioctl(DRM_GEM_OPEN).size", (Addr)&data->size, sizeof(data->size));
- }
- break;
- case VKI_DRM_IOCTL_I915_GETPARAM:
- if (ARG3) {
- vki_drm_i915_getparam_t *data = (vki_drm_i915_getparam_t *)ARG3;
- PRE_MEM_READ("ioctl(DRM_I915_GETPARAM).param", (Addr)&data->param, sizeof(data->param));
- PRE_MEM_WRITE("ioctl(DRM_I915_GETPARAM).value", (Addr)data->value, sizeof(int));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_BUSY:
- if (ARG3) {
- struct vki_drm_i915_gem_busy *data = (struct vki_drm_i915_gem_busy *)ARG3;
- PRE_MEM_READ("ioctl(DRM_I915_GEM_BUSY).handle", (Addr)&data->handle, sizeof(data->handle));
- PRE_MEM_WRITE("ioctl(DRM_I915_GEM_BUSY).busy", (Addr)&data->busy, sizeof(data->busy));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_CREATE:
- if (ARG3) {
- struct vki_drm_i915_gem_create *data = (struct vki_drm_i915_gem_create *)ARG3;
- PRE_MEM_READ("ioctl(DRM_I915_GEM_CREATE).size", (Addr)&data->size, sizeof(data->size));
- PRE_MEM_WRITE("ioctl(DRM_I915_GEM_CREATE).handle", (Addr)&data->handle, sizeof(data->handle));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_PREAD:
- if (ARG3) {
- struct vki_drm_i915_gem_pread *data = (struct vki_drm_i915_gem_pread *)ARG3;
- PRE_MEM_READ("ioctl(DRM_I915_GEM_PREAD).handle", (Addr)&data->handle, sizeof(data->handle));
- PRE_MEM_READ("ioctl(DRM_I915_GEM_PREAD).offset", (Addr)&data->offset, sizeof(data->offset));
- PRE_MEM_READ("ioctl(DRM_I915_GEM_PREAD).size", (Addr)&data->size, sizeof(data->size));
- PRE_MEM_READ("ioctl(DRM_I915_GEM_PREAD).data_ptr", (Addr)&data->data_ptr, sizeof(data->data_ptr));
- PRE_MEM_WRITE("ioctl(DRM_I915_GEM_PREAD).data_ptr", (Addr)data->data_ptr, data->size);
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_PWRITE:
- if (ARG3) {
- struct vki_drm_i915_gem_pwrite *data = (struct vki_drm_i915_gem_pwrite *)ARG3;
- PRE_MEM_READ("ioctl(DRM_I915_GEM_PWRITE).handle", (Addr)&data->handle, sizeof(data->handle));
- PRE_MEM_READ("ioctl(DRM_I915_GEM_PWRITE).offset", (Addr)&data->offset, sizeof(data->offset));
- PRE_MEM_READ("ioctl(DRM_I915_GEM_PWRITE).size", (Addr)&data->size, sizeof(data->size));
- PRE_MEM_READ("ioctl(DRM_I915_GEM_PWRITE).data_ptr", (Addr)&data->data_ptr, sizeof(data->data_ptr));
- /* PRE_MEM_READ("ioctl(DRM_I915_GEM_PWRITE).data_ptr", (Addr)data->data_ptr, data->size);
- * NB: the buffer is allowed to contain any amount of uninitialized data (e.g.
- * interleaved vertex attributes may have a wide stride with uninitialized data between
- * consecutive vertices) */
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_MMAP_GTT:
- if (ARG3) {
- struct vki_drm_i915_gem_mmap_gtt *data = (struct vki_drm_i915_gem_mmap_gtt *)ARG3;
- PRE_MEM_READ("ioctl(DRM_I915_GEM_MMAP_GTT).handle", (Addr)&data->handle, sizeof(data->handle));
- PRE_MEM_WRITE("ioctl(DRM_I915_GEM_MMAP_GTT).offset", (Addr)&data->offset, sizeof(data->offset));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_SET_DOMAIN:
- if (ARG3) {
- struct vki_drm_i915_gem_set_domain *data = (struct vki_drm_i915_gem_set_domain *)ARG3;
- PRE_MEM_READ("ioctl(DRM_I915_GEM_SET_DOMAIN).handle", (Addr)&data->handle, sizeof(data->handle));
- PRE_MEM_READ("ioctl(DRM_I915_GEM_SET_DOMAIN).read_domains", (Addr)&data->read_domains, sizeof(data->read_domains));
- PRE_MEM_READ("ioctl(DRM_I915_GEM_SET_DOMAIN).write_domain", (Addr)&data->write_domain, sizeof(data->write_domain));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_SET_TILING:
- if (ARG3) {
- struct vki_drm_i915_gem_set_tiling *data = (struct vki_drm_i915_gem_set_tiling *)ARG3;
- PRE_MEM_READ("ioctl(DRM_I915_GEM_SET_TILING).handle", (Addr)&data->handle, sizeof(data->handle));
- PRE_MEM_READ("ioctl(DRM_I915_GEM_SET_TILING).tiling_mode", (Addr)&data->tiling_mode, sizeof(data->tiling_mode));
- PRE_MEM_READ("ioctl(DRM_I915_GEM_SET_TILING).stride", (Addr)&data->stride, sizeof(data->stride));
- PRE_MEM_WRITE("ioctl(DRM_I915_GEM_SET_TILING).swizzle_mode", (Addr)&data->swizzle_mode, sizeof(data->swizzle_mode));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_GET_TILING:
- if (ARG3) {
- struct vki_drm_i915_gem_get_tiling *data = (struct vki_drm_i915_gem_get_tiling *)ARG3;
- PRE_MEM_READ("ioctl(DRM_I915_GEM_GET_TILING).handle", (Addr)&data->handle, sizeof(data->handle));
- PRE_MEM_WRITE("ioctl(DRM_I915_GEM_GET_TILING).tiling_mode", (Addr)&data->tiling_mode, sizeof(data->tiling_mode));
- PRE_MEM_WRITE("ioctl(DRM_I915_GEM_GET_TILING).swizzle_mode", (Addr)&data->swizzle_mode, sizeof(data->swizzle_mode));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_GET_APERTURE:
- if (ARG3) {
- struct vki_drm_i915_gem_get_aperture *data = (struct vki_drm_i915_gem_get_aperture *)ARG3;
- PRE_MEM_WRITE("ioctl(DRM_I915_GEM_GET_APERTURE).aper_size", (Addr)&data->aper_size, sizeof(data->aper_size));
- PRE_MEM_WRITE("ioctl(DRM_I915_GEM_GET_APERTURE).aper_available_size", (Addr)&data->aper_available_size, sizeof(data->aper_available_size));
- }
- break;
-
default:
PRINT("sys_fcntl[UNKNOWN] ( %ld, %ld, %ld )", ARG1,ARG2,ARG3);
I_die_here;
@@ -5570,104 +5425,6 @@
unsigned int, fd, unsigned int, request);
return;
- case VKI_DRM_IOCTL_VERSION:
- if (ARG3) {
- struct vki_drm_version *data = (struct vki_drm_version *)ARG3;
- POST_MEM_WRITE((Addr)&data->version_major, sizeof(data->version_major));
- POST_MEM_WRITE((Addr)&data->version_minor, sizeof(data->version_minor));
- POST_MEM_WRITE((Addr)&data->version_patchlevel, sizeof(data->version_patchlevel));
- POST_MEM_WRITE((Addr)&data->name_len, sizeof(data->name_len));
- POST_MEM_WRITE((Addr)data->name, data->name_len);
- POST_MEM_WRITE((Addr)&data->date_len, sizeof(data->date_len));
- POST_MEM_WRITE((Addr)data->date, data->date_len);
- POST_MEM_WRITE((Addr)&data->desc_len, sizeof(data->desc_len));
- POST_MEM_WRITE((Addr)data->desc, data->desc_len);
- }
- break;
- case VKI_DRM_IOCTL_GET_UNIQUE:
- if (ARG3) {
- struct vki_drm_unique *data = (struct vki_drm_unique *)ARG3;
- POST_MEM_WRITE((Addr)data->unique, sizeof(data->unique_len));
- }
- break;
- case VKI_DRM_IOCTL_GET_MAGIC:
- if (ARG3) {
- struct vki_drm_auth *data = (struct vki_drm_auth *)ARG3;
- POST_MEM_WRITE((Addr)&data->magic, sizeof(data->magic));
- }
- break;
- case VKI_DRM_IOCTL_WAIT_VBLANK:
- if (ARG3) {
- union vki_drm_wait_vblank *data = (union vki_drm_wait_vblank *)ARG3;
- POST_MEM_WRITE((Addr)&data->reply, sizeof(data->reply));
- }
- break;
- case VKI_DRM_IOCTL_GEM_FLINK:
- if (ARG3) {
- struct vki_drm_gem_flink *data = (struct vki_drm_gem_flink *)ARG3;
- POST_MEM_WRITE((Addr)&data->name, sizeof(data->name));
- }
- break;
- case VKI_DRM_IOCTL_GEM_OPEN:
- if (ARG3) {
- struct vki_drm_gem_open *data = (struct vki_drm_gem_open *)ARG3;
- POST_MEM_WRITE((Addr)&data->handle, sizeof(data->handle));
- POST_MEM_WRITE((Addr)&data->size, sizeof(data->size));
- }
- break;
- case VKI_DRM_IOCTL_I915_GETPARAM:
- if (ARG3) {
- vki_drm_i915_getparam_t *data = (vki_drm_i915_getparam_t *)ARG3;
- POST_MEM_WRITE((Addr)data->value, sizeof(int));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_BUSY:
- if (ARG3) {
- struct vki_drm_i915_gem_busy *data = (struct vki_drm_i915_gem_busy *)ARG3;
- POST_MEM_WRITE((Addr)&data->busy, sizeof(data->busy));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_CREATE:
- if (ARG3) {
- struct vki_drm_i915_gem_create *data = (struct vki_drm_i915_gem_create *)ARG3;
- POST_MEM_WRITE((Addr)&data->handle, sizeof(data->handle));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_PREAD:
- if (ARG3) {
- struct vki_drm_i915_gem_pread *data = (struct vki_drm_i915_gem_pread *)ARG3;
- POST_MEM_WRITE((Addr)data->data_ptr, data->size);
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_MMAP_GTT:
- if (ARG3) {
- struct vki_drm_i915_gem_mmap_gtt *data = (struct vki_drm_i915_gem_mmap_gtt *)ARG3;
- POST_MEM_WRITE((Addr)&data->offset, sizeof(data->offset));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_SET_TILING:
- if (ARG3) {
- struct vki_drm_i915_gem_set_tiling *data = (struct vki_drm_i915_gem_set_tiling *)ARG3;
- POST_MEM_WRITE((Addr)&data->tiling_mode, sizeof(data->tiling_mode));
- POST_MEM_WRITE((Addr)&data->stride, sizeof(data->stride));
- POST_MEM_WRITE((Addr)&data->swizzle_mode, sizeof(data->swizzle_mode));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_GET_TILING:
- if (ARG3) {
- struct vki_drm_i915_gem_get_tiling *data = (struct vki_drm_i915_gem_get_tiling *)ARG3;
- POST_MEM_WRITE((Addr)&data->tiling_mode, sizeof(data->tiling_mode));
- POST_MEM_WRITE((Addr)&data->swizzle_mode, sizeof(data->swizzle_mode));
- }
- break;
- case VKI_DRM_IOCTL_I915_GEM_GET_APERTURE:
- if (ARG3) {
- struct vki_drm_i915_gem_get_aperture *data = (struct vki_drm_i915_gem_get_aperture *)ARG3;
- POST_MEM_WRITE((Addr)&data->aper_size, sizeof(data->aper_size));
- POST_MEM_WRITE((Addr)&data->aper_available_size, sizeof(data->aper_available_size));
- }
- break;
-
default:
PRINT("sys_ioctl ( %ld, 0x%lx, 0x%lx )",ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "ioctl",
@@ -7089,6 +6846,151 @@
}
break;
+ case VKI_DRM_IOCTL_VERSION:
+ if (ARG3) {
+ struct vki_drm_version *data = (struct vki_drm_version *)ARG3;
+ PRE_MEM_WRITE("ioctl(DRM_VERSION).version_major", (Addr)&data->version_major, sizeof(data->version_major));
+ PRE_MEM_WRITE("ioctl(DRM_VERSION).version_minor", (Addr)&data->version_minor, sizeof(data->version_minor));
+ PRE_MEM_WRITE("ioctl(DRM_VERSION).version_patchlevel", (Addr)&data->version_patchlevel, sizeof(data->version_patchlevel));
+ PRE_MEM_READ("ioctl(DRM_VERSION).name_len", (Addr)&data->name_len, sizeof(data->name_len));
+ PRE_MEM_READ("ioctl(DRM_VERSION).name", (Addr)&data->name, sizeof(data->name));
+ PRE_MEM_WRITE("ioctl(DRM_VERSION).name", (Addr)data->name, data->name_len);
+ PRE_MEM_READ("ioctl(DRM_VERSION).date_len", (Addr)&data->date_len, sizeof(data->date_len));
+ PRE_MEM_READ("ioctl(DRM_VERSION).date", (Addr)&data->date, sizeof(data->date));
+ PRE_MEM_WRITE("ioctl(DRM_VERSION).date", (Addr)data->date, data->date_len);
+ PRE_MEM_READ("ioctl(DRM_VERSION).desc_len", (Addr)&data->desc_len, sizeof(data->desc_len));
+ PRE_MEM_READ("ioctl(DRM_VERSION).desc", (Addr)&data->desc, sizeof(data->desc));
+ PRE_MEM_WRITE("ioctl(DRM_VERSION).desc", (Addr)data->desc, data->desc_len);
+ }
+ break;
+ case VKI_DRM_IOCTL_GET_UNIQUE:
+ if (ARG3) {
+ struct vki_drm_unique *data = (struct vki_drm_unique *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_GET_UNIQUE).unique_len", (Addr)&data->unique_len, sizeof(data->unique_len));
+ PRE_MEM_READ("ioctl(DRM_GET_UNIQUE).unique", (Addr)&data->unique, sizeof(data->unique));
+ PRE_MEM_WRITE("ioctl(DRM_GET_UNIQUE).unique", (Addr)data->unique, data->unique_len);
+ }
+ break;
+ case VKI_DRM_IOCTL_GET_MAGIC:
+ if (ARG3) {
+ struct vki_drm_auth *data = (struct vki_drm_auth *)ARG3;
+ PRE_MEM_WRITE("ioctl(DRM_GET_MAGIC).magic", (Addr)&data->magic, sizeof(data->magic));
+ }
+ break;
+ case VKI_DRM_IOCTL_WAIT_VBLANK:
+ if (ARG3) {
+ union vki_drm_wait_vblank *data = (union vki_drm_wait_vblank *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_WAIT_VBLANK).request.type", (Addr)&data->request.type, sizeof(data->request.type));
+ PRE_MEM_READ("ioctl(DRM_WAIT_VBLANK).request.sequence", (Addr)&data->request.sequence, sizeof(data->request.sequence));
+ /* XXX: It seems request.signal isn't used */
+ PRE_MEM_WRITE("ioctl(DRM_WAIT_VBLANK).reply", (Addr)&data->reply, sizeof(data->reply));
+ }
+ break;
+ case VKI_DRM_IOCTL_GEM_CLOSE:
+ if (ARG3) {
+ struct vki_drm_gem_close *data = (struct vki_drm_gem_close *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_GEM_CLOSE).handle", (Addr)&data->handle, sizeof(data->handle));
+ }
+ break;
+ case VKI_DRM_IOCTL_GEM_FLINK:
+ if (ARG3) {
+ struct vki_drm_gem_flink *data = (struct vki_drm_gem_flink *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_GEM_FLINK).handle", (Addr)&data->handle, sizeof(data->handle));
+ PRE_MEM_WRITE("ioctl(DRM_GEM_FLINK).name", (Addr)&data->name, sizeof(data->name));
+ }
+ break;
+ case VKI_DRM_IOCTL_GEM_OPEN:
+ if (ARG3) {
+ struct vki_drm_gem_open *data = (struct vki_drm_gem_open *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_GEM_OPEN).name", (Addr)&data->name, sizeof(data->name));
+ PRE_MEM_WRITE("ioctl(DRM_GEM_OPEN).handle", (Addr)&data->handle, sizeof(data->handle));
+ PRE_MEM_WRITE("ioctl(DRM_GEM_OPEN).size", (Addr)&data->size, sizeof(data->size));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GETPARAM:
+ if (ARG3) {
+ vki_drm_i915_getparam_t *data = (vki_drm_i915_getparam_t *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_I915_GETPARAM).param", (Addr)&data->param, sizeof(data->param));
+ PRE_MEM_WRITE("ioctl(DRM_I915_GETPARAM).value", (Addr)data->value, sizeof(int));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_BUSY:
+ if (ARG3) {
+ struct vki_drm_i915_gem_busy *data = (struct vki_drm_i915_gem_busy *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_BUSY).handle", (Addr)&data->handle, sizeof(data->handle));
+ PRE_MEM_WRITE("ioctl(DRM_I915_GEM_BUSY).busy", (Addr)&data->busy, sizeof(data->busy));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_CREATE:
+ if (ARG3) {
+ struct vki_drm_i915_gem_create *data = (struct vki_drm_i915_gem_create *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_CREATE).size", (Addr)&data->size, sizeof(data->size));
+ PRE_MEM_WRITE("ioctl(DRM_I915_GEM_CREATE).handle", (Addr)&data->handle, sizeof(data->handle));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_PREAD:
+ if (ARG3) {
+ struct vki_drm_i915_gem_pread *data = (struct vki_drm_i915_gem_pread *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_PREAD).handle", (Addr)&data->handle, sizeof(data->handle));
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_PREAD).offset", (Addr)&data->offset, sizeof(data->offset));
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_PREAD).size", (Addr)&data->size, sizeof(data->size));
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_PREAD).data_ptr", (Addr)&data->data_ptr, sizeof(data->data_ptr));
+ PRE_MEM_WRITE("ioctl(DRM_I915_GEM_PREAD).data_ptr", (Addr)data->data_ptr, data->size);
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_PWRITE:
+ if (ARG3) {
+ struct vki_drm_i915_gem_pwrite *data = (struct vki_drm_i915_gem_pwrite *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_PWRITE).handle", (Addr)&data->handle, sizeof(data->handle));
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_PWRITE).offset", (Addr)&data->offset, sizeof(data->offset));
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_PWRITE).size", (Addr)&data->size, sizeof(data->size));
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_PWRITE).data_ptr", (Addr)&data->data_ptr, sizeof(data->data_ptr));
+ /* PRE_MEM_READ("ioctl(DRM_I915_GEM_PWRITE).data_ptr", (Addr)data->data_ptr, data->size);
+ * NB: the buffer is allowed to contain any amount of uninitialized data (e.g.
+ * interleaved vertex attributes may have a wide stride with uninitialized data between
+ * consecutive vertices) */
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_MMAP_GTT:
+ if (ARG3) {
+ struct vki_drm_i915_gem_mmap_gtt *data = (struct vki_drm_i915_gem_mmap_gtt *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_MMAP_GTT).handle", (Addr)&data->handle, sizeof(data->handle));
+ PRE_MEM_WRITE("ioctl(DRM_I915_GEM_MMAP_GTT).offset", (Addr)&data->offset, sizeof(data->offset));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_SET_DOMAIN:
+ if (ARG3) {
+ struct vki_drm_i915_gem_set_domain *data = (struct vki_drm_i915_gem_set_domain *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_SET_DOMAIN).handle", (Addr)&data->handle, sizeof(data->handle));
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_SET_DOMAIN).read_domains", (Addr)&data->read_domains, sizeof(data->read_domains));
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_SET_DOMAIN).write_domain", (Addr)&data->write_domain, sizeof(data->write_domain));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_SET_TILING:
+ if (ARG3) {
+ struct vki_drm_i915_gem_set_tiling *data = (struct vki_drm_i915_gem_set_tiling *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_SET_TILING).handle", (Addr)&data->handle, sizeof(data->handle));
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_SET_TILING).tiling_mode", (Addr)&data->tiling_mode, sizeof(data->tiling_mode));
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_SET_TILING).stride", (Addr)&data->stride, sizeof(data->stride));
+ PRE_MEM_WRITE("ioctl(DRM_I915_GEM_SET_TILING).swizzle_mode", (Addr)&data->swizzle_mode, sizeof(data->swizzle_mode));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_GET_TILING:
+ if (ARG3) {
+ struct vki_drm_i915_gem_get_tiling *data = (struct vki_drm_i915_gem_get_tiling *)ARG3;
+ PRE_MEM_READ("ioctl(DRM_I915_GEM_GET_TILING).handle", (Addr)&data->handle, sizeof(data->handle));
+ PRE_MEM_WRITE("ioctl(DRM_I915_GEM_GET_TILING).tiling_mode", (Addr)&data->tiling_mode, sizeof(data->tiling_mode));
+ PRE_MEM_WRITE("ioctl(DRM_I915_GEM_GET_TILING).swizzle_mode", (Addr)&data->swizzle_mode, sizeof(data->swizzle_mode));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_GET_APERTURE:
+ if (ARG3) {
+ struct vki_drm_i915_gem_get_aperture *data = (struct vki_drm_i915_gem_get_aperture *)ARG3;
+ PRE_MEM_WRITE("ioctl(DRM_I915_GEM_GET_APERTURE).aper_size", (Addr)&data->aper_size, sizeof(data->aper_size));
+ PRE_MEM_WRITE("ioctl(DRM_I915_GEM_GET_APERTURE).aper_available_size", (Addr)&data->aper_available_size, sizeof(data->aper_available_size));
+ }
+ break;
+
/* KVM ioctls that check for a numeric value as parameter */
case VKI_KVM_GET_API_VERSION:
case VKI_KVM_CREATE_VM:
@@ -8304,6 +8206,104 @@
}
break;
+ case VKI_DRM_IOCTL_VERSION:
+ if (ARG3) {
+ struct vki_drm_version *data = (struct vki_drm_version *)ARG3;
+ POST_MEM_WRITE((Addr)&data->version_major, sizeof(data->version_major));
+ POST_MEM_WRITE((Addr)&data->version_minor, sizeof(data->version_minor));
+ POST_MEM_WRITE((Addr)&data->version_patchlevel, sizeof(data->version_patchlevel));
+ POST_MEM_WRITE((Addr)&data->name_len, sizeof(data->name_len));
+ POST_MEM_WRITE((Addr)data->name, data->name_len);
+ POST_MEM_WRITE((Addr)&data->date_len, sizeof(data->date_len));
+ POST_MEM_WRITE((Addr)data->date, data->date_len);
+ POST_MEM_WRITE((Addr)&data->desc_len, sizeof(data->desc_len));
+ POST_MEM_WRITE((Addr)data->desc, data->desc_len);
+ }
+ break;
+ case VKI_DRM_IOCTL_GET_UNIQUE:
+ if (ARG3) {
+ struct vki_drm_unique *data = (struct vki_drm_unique *)ARG3;
+ POST_MEM_WRITE((Addr)data->unique, sizeof(data->unique_len));
+ }
+ break;
+ case VKI_DRM_IOCTL_GET_MAGIC:
+ if (ARG3) {
+ struct vki_drm_auth *data = (struct vki_drm_auth *)ARG3;
+ POST_MEM_WRITE((Addr)&data->magic, sizeof(data->magic));
+ }
+ break;
+ case VKI_DRM_IOCTL_WAIT_VBLANK:
+ if (ARG3) {
+ union vki_drm_wait_vblank *data = (union vki_drm_wait_vblank *)ARG3;
+ POST_MEM_WRITE((Addr)&data->reply, sizeof(data->reply));
+ }
+ break;
+ case VKI_DRM_IOCTL_GEM_FLINK:
+ if (ARG3) {
+ struct vki_drm_gem_flink *data = (struct vki_drm_gem_flink *)ARG3;
+ POST_MEM_WRITE((Addr)&data->name, sizeof(data->name));
+ }
+ break;
+ case VKI_DRM_IOCTL_GEM_OPEN:
+ if (ARG3) {
+ struct vki_drm_gem_open *data = (struct vki_drm_gem_open *)ARG3;
+ POST_MEM_WRITE((Addr)&data->handle, sizeof(data->handle));
+ POST_MEM_WRITE((Addr)&data->size, sizeof(data->size));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GETPARAM:
+ if (ARG3) {
+ vki_drm_i915_getparam_t *data = (vki_drm_i915_getparam_t *)ARG3;
+ POST_MEM_WRITE((Addr)data->value, sizeof(int));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_BUSY:
+ if (ARG3) {
+ struct vki_drm_i915_gem_busy *data = (struct vki_drm_i915_gem_busy *)ARG3;
+ POST_MEM_WRITE((Addr)&data->busy, sizeof(data->busy));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_CREATE:
+ if (ARG3) {
+ struct vki_drm_i915_gem_create *data = (struct vki_drm_i915_gem_create *)ARG3;
+ POST_MEM_WRITE((Addr)&data->handle, sizeof(data->handle));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_PREAD:
+ if (ARG3) {
+ struct vki_drm_i915_gem_pread *data = (struct vki_drm_i915_gem_pread *)ARG3;
+ POST_MEM_WRITE((Addr)data->data_ptr, data->size);
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_MMAP_GTT:
+ if (ARG3) {
+ struct vki_drm_i915_gem_mmap_gtt *data = (struct vki_drm_i915_gem_mmap_gtt *)ARG3;
+ POST_MEM_WRITE((Addr)&data->offset, sizeof(data->offset));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_SET_TILING:
+ if (ARG3) {
+ struct vki_drm_i915_gem_set_tiling *data = (struct vki_drm_i915_gem_set_tiling *)ARG3;
+ POST_MEM_WRITE((Addr)&data->tiling_mode, sizeof(data->tiling_mode));
+ POST_MEM_WRITE((Addr)&data->stride, sizeof(data->stride));
+ POST_MEM_WRITE((Addr)&data->swizzle_mode, sizeof(data->swizzle_mode));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_GET_TILING:
+ if (ARG3) {
+ struct vki_drm_i915_gem_get_tiling *data = (struct vki_drm_i915_gem_get_tiling *)ARG3;
+ POST_MEM_WRITE((Addr)&data->tiling_mode, sizeof(data->tiling_mode));
+ POST_MEM_WRITE((Addr)&data->swizzle_mode, sizeof(data->swizzle_mode));
+ }
+ break;
+ case VKI_DRM_IOCTL_I915_GEM_GET_APERTURE:
+ if (ARG3) {
+ struct vki_drm_i915_gem_get_aperture *data = (struct vki_drm_i915_gem_get_aperture *)ARG3;
+ POST_MEM_WRITE((Addr)&data->aper_size, sizeof(data->aper_size));
+ POST_MEM_WRITE((Addr)&data->aper_available_size, sizeof(data->aper_available_size));
+ }
+ break;
+
/* KVM ioctls that only write the system call return value */
case VKI_KVM_GET_API_VERSION:
case VKI_KVM_CREATE_VM:
|
|
From: <sv...@va...> - 2014-07-10 14:50:12
|
Author: tom
Date: Thu Jul 10 14:50:06 2014
New Revision: 14150
Log:
Add support for the F_OFD_SETLK, F_OFD_SETLKW, and F_OFD_GETLK fcntl
commands. BZ#337285.
Modified:
trunk/coregrind/m_syswrap/syswrap-linux.c
Modified: trunk/coregrind/m_syswrap/syswrap-linux.c
==============================================================================
--- trunk/coregrind/m_syswrap/syswrap-linux.c (original)
+++ trunk/coregrind/m_syswrap/syswrap-linux.c Thu Jul 10 14:50:06 2014
@@ -5451,6 +5451,9 @@
case VKI_F_SETLK64:
case VKI_F_SETLKW64:
# endif
+ case VKI_F_OFD_GETLK:
+ case VKI_F_OFD_SETLK:
+ case VKI_F_OFD_SETLKW:
PRINT("sys_fcntl64[ARG3=='lock'] ( %ld, %ld, %#lx )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "fcntl64",
unsigned int, fd, unsigned int, cmd,
|
|
From: <sv...@va...> - 2014-07-10 14:23:23
|
Author: sewardj
Date: Thu Jul 10 14:23:16 2014
New Revision: 14148
Log:
Enable test cases for: shll #imm, shrn #imm, rshrn #imm,
{smlal,umlal,smlsl,umlsl,smull,umull} (elem)
Modified:
trunk/none/tests/arm64/fp_and_simd.c
Modified: trunk/none/tests/arm64/fp_and_simd.c
==============================================================================
--- trunk/none/tests/arm64/fp_and_simd.c (original)
+++ trunk/none/tests/arm64/fp_and_simd.c Thu Jul 10 14:23:16 2014
@@ -3995,39 +3995,39 @@
if (1) test_uhsub_8b_8b_8b(TyB);
// shll{2} 8h_8b/16b_#8, 4s_4h/8h_#16, 2d_2s/4s_#32
- if (0) test_shll_8h_8b_8(TyB);
- if (0) test_shll2_8h_16b_8(TyB);
- if (0) test_shll_4s_4h_16(TyH);
- if (0) test_shll2_4s_8h_16(TyH);
- if (0) test_shll_2d_2s_32(TyS);
- if (0) test_shll2_2d_4s_32(TyS);
+ if (1) test_shll_8h_8b_8(TyB);
+ if (1) test_shll2_8h_16b_8(TyB);
+ if (1) test_shll_4s_4h_16(TyH);
+ if (1) test_shll2_4s_8h_16(TyH);
+ if (1) test_shll_2d_2s_32(TyS);
+ if (1) test_shll2_2d_4s_32(TyS);
// shrn{2} 2s/4s_2d, 8h/4h_4s, 8b/16b_8h, #imm in 1 .. elem_bits
// rshrn{2} 2s/4s_2d, 8h/4h_4s, 8b/16b_8h, #imm in 1 .. elem_bits
- if (0) test_shrn_2s_2d_1(TyD);
- if (0) test_shrn_2s_2d_32(TyD);
- if (0) test_shrn2_4s_2d_1(TyD);
- if (0) test_shrn2_4s_2d_32(TyD);
- if (0) test_shrn_4h_4s_1(TyS);
- if (0) test_shrn_4h_4s_16(TyS);
- if (0) test_shrn2_8h_4s_1(TyS);
- if (0) test_shrn2_8h_4s_16(TyS);
- if (0) test_shrn_8b_8h_1(TyH);
- if (0) test_shrn_8b_8h_8(TyH);
- if (0) test_shrn2_16b_8h_1(TyH);
- if (0) test_shrn2_16b_8h_8(TyH);
- if (0) test_rshrn_2s_2d_1(TyD);
- if (0) test_rshrn_2s_2d_32(TyD);
- if (0) test_rshrn2_4s_2d_1(TyD);
- if (0) test_rshrn2_4s_2d_32(TyD);
- if (0) test_rshrn_4h_4s_1(TyS);
- if (0) test_rshrn_4h_4s_16(TyS);
- if (0) test_rshrn2_8h_4s_1(TyS);
- if (0) test_rshrn2_8h_4s_16(TyS);
- if (0) test_rshrn_8b_8h_1(TyH);
- if (0) test_rshrn_8b_8h_8(TyH);
- if (0) test_rshrn2_16b_8h_1(TyH);
- if (0) test_rshrn2_16b_8h_8(TyH);
+ if (1) test_shrn_2s_2d_1(TyD);
+ if (1) test_shrn_2s_2d_32(TyD);
+ if (1) test_shrn2_4s_2d_1(TyD);
+ if (1) test_shrn2_4s_2d_32(TyD);
+ if (1) test_shrn_4h_4s_1(TyS);
+ if (1) test_shrn_4h_4s_16(TyS);
+ if (1) test_shrn2_8h_4s_1(TyS);
+ if (1) test_shrn2_8h_4s_16(TyS);
+ if (1) test_shrn_8b_8h_1(TyH);
+ if (1) test_shrn_8b_8h_8(TyH);
+ if (1) test_shrn2_16b_8h_1(TyH);
+ if (1) test_shrn2_16b_8h_8(TyH);
+ if (1) test_rshrn_2s_2d_1(TyD);
+ if (1) test_rshrn_2s_2d_32(TyD);
+ if (1) test_rshrn2_4s_2d_1(TyD);
+ if (1) test_rshrn2_4s_2d_32(TyD);
+ if (1) test_rshrn_4h_4s_1(TyS);
+ if (1) test_rshrn_4h_4s_16(TyS);
+ if (1) test_rshrn2_8h_4s_1(TyS);
+ if (1) test_rshrn2_8h_4s_16(TyS);
+ if (1) test_rshrn_8b_8h_1(TyH);
+ if (1) test_rshrn_8b_8h_8(TyH);
+ if (1) test_rshrn2_16b_8h_1(TyH);
+ if (1) test_rshrn2_16b_8h_8(TyH);
// sli d_#imm
// sri d_#imm
@@ -4154,56 +4154,56 @@
// umlal{2} 2d_2s/4s_s[], 4s_4h/8h_h[]
// smlsl{2} 2d_2s/4s_s[], 4s_4h/8h_h[]
// umlsl{2} 2d_2s/4s_s[], 4s_4h/8h_h[]
- // smull{2} 2d_2s/4s_s[]. 4s_4h/8h_h[]
- // umull{2} 2d_2s/4s_s[]. 4s_4h/8h_h[]
- if (0) test_smlal_2d_2s_s0(TyS);
- if (0) test_smlal_2d_2s_s3(TyS);
- if (0) test_smlal2_2d_4s_s1(TyS);
- if (0) test_smlal2_2d_4s_s2(TyS);
- if (0) test_smlal_4s_4h_h0(TyH);
- if (0) test_smlal_4s_4h_h7(TyH);
- if (0) test_smlal2_4s_8h_h1(TyH);
- if (0) test_smlal2_4s_8h_h4(TyH);
- if (0) test_umlal_2d_2s_s0(TyS);
- if (0) test_umlal_2d_2s_s3(TyS);
- if (0) test_umlal2_2d_4s_s1(TyS);
- if (0) test_umlal2_2d_4s_s2(TyS);
- if (0) test_umlal_4s_4h_h0(TyH);
- if (0) test_umlal_4s_4h_h7(TyH);
- if (0) test_umlal2_4s_8h_h1(TyH);
- if (0) test_umlal2_4s_8h_h4(TyH);
- if (0) test_smlsl_2d_2s_s0(TyS);
- if (0) test_smlsl_2d_2s_s3(TyS);
- if (0) test_smlsl2_2d_4s_s1(TyS);
- if (0) test_smlsl2_2d_4s_s2(TyS);
- if (0) test_smlsl_4s_4h_h0(TyH);
- if (0) test_smlsl_4s_4h_h7(TyH);
- if (0) test_smlsl2_4s_8h_h1(TyH);
- if (0) test_smlsl2_4s_8h_h4(TyH);
- if (0) test_umlsl_2d_2s_s0(TyS);
- if (0) test_umlsl_2d_2s_s3(TyS);
- if (0) test_umlsl2_2d_4s_s1(TyS);
- if (0) test_umlsl2_2d_4s_s2(TyS);
- if (0) test_umlsl_4s_4h_h0(TyH);
- if (0) test_umlsl_4s_4h_h7(TyH);
- if (0) test_umlsl2_4s_8h_h1(TyH);
- if (0) test_umlsl2_4s_8h_h4(TyH);
- if (0) test_smull_2d_2s_s0(TyS);
- if (0) test_smull_2d_2s_s3(TyS);
- if (0) test_smull2_2d_4s_s1(TyS);
- if (0) test_smull2_2d_4s_s2(TyS);
- if (0) test_smull_4s_4h_h0(TyH);
- if (0) test_smull_4s_4h_h7(TyH);
- if (0) test_smull2_4s_8h_h1(TyH);
- if (0) test_smull2_4s_8h_h4(TyH);
- if (0) test_umull_2d_2s_s0(TyS);
- if (0) test_umull_2d_2s_s3(TyS);
- if (0) test_umull2_2d_4s_s1(TyS);
- if (0) test_umull2_2d_4s_s2(TyS);
- if (0) test_umull_4s_4h_h0(TyH);
- if (0) test_umull_4s_4h_h7(TyH);
- if (0) test_umull2_4s_8h_h1(TyH);
- if (0) test_umull2_4s_8h_h4(TyH);
+ // smull{2} 2d_2s/4s_s[], 4s_4h/8h_h[]
+ // umull{2} 2d_2s/4s_s[], 4s_4h/8h_h[]
+ if (1) test_smlal_2d_2s_s0(TyS);
+ if (1) test_smlal_2d_2s_s3(TyS);
+ if (1) test_smlal2_2d_4s_s1(TyS);
+ if (1) test_smlal2_2d_4s_s2(TyS);
+ if (1) test_smlal_4s_4h_h0(TyH);
+ if (1) test_smlal_4s_4h_h7(TyH);
+ if (1) test_smlal2_4s_8h_h1(TyH);
+ if (1) test_smlal2_4s_8h_h4(TyH);
+ if (1) test_umlal_2d_2s_s0(TyS);
+ if (1) test_umlal_2d_2s_s3(TyS);
+ if (1) test_umlal2_2d_4s_s1(TyS);
+ if (1) test_umlal2_2d_4s_s2(TyS);
+ if (1) test_umlal_4s_4h_h0(TyH);
+ if (1) test_umlal_4s_4h_h7(TyH);
+ if (1) test_umlal2_4s_8h_h1(TyH);
+ if (1) test_umlal2_4s_8h_h4(TyH);
+ if (1) test_smlsl_2d_2s_s0(TyS);
+ if (1) test_smlsl_2d_2s_s3(TyS);
+ if (1) test_smlsl2_2d_4s_s1(TyS);
+ if (1) test_smlsl2_2d_4s_s2(TyS);
+ if (1) test_smlsl_4s_4h_h0(TyH);
+ if (1) test_smlsl_4s_4h_h7(TyH);
+ if (1) test_smlsl2_4s_8h_h1(TyH);
+ if (1) test_smlsl2_4s_8h_h4(TyH);
+ if (1) test_umlsl_2d_2s_s0(TyS);
+ if (1) test_umlsl_2d_2s_s3(TyS);
+ if (1) test_umlsl2_2d_4s_s1(TyS);
+ if (1) test_umlsl2_2d_4s_s2(TyS);
+ if (1) test_umlsl_4s_4h_h0(TyH);
+ if (1) test_umlsl_4s_4h_h7(TyH);
+ if (1) test_umlsl2_4s_8h_h1(TyH);
+ if (1) test_umlsl2_4s_8h_h4(TyH);
+ if (1) test_smull_2d_2s_s0(TyS);
+ if (1) test_smull_2d_2s_s3(TyS);
+ if (1) test_smull2_2d_4s_s1(TyS);
+ if (1) test_smull2_2d_4s_s2(TyS);
+ if (1) test_smull_4s_4h_h0(TyH);
+ if (1) test_smull_4s_4h_h7(TyH);
+ if (1) test_smull2_4s_8h_h1(TyH);
+ if (1) test_smull2_4s_8h_h4(TyH);
+ if (1) test_umull_2d_2s_s0(TyS);
+ if (1) test_umull_2d_2s_s3(TyS);
+ if (1) test_umull2_2d_4s_s1(TyS);
+ if (1) test_umull2_2d_4s_s2(TyS);
+ if (1) test_umull_4s_4h_h0(TyH);
+ if (1) test_umull_4s_4h_h7(TyH);
+ if (1) test_umull2_4s_8h_h1(TyH);
+ if (1) test_umull2_4s_8h_h4(TyH);
// smlal{2} 2d_(2s_2s)/(4s_4s), 4s_(4h_4h)/(8h_8h), 8h_(8b_8b)/(16b_16b)
// umlal{2} 2d_(2s_2s)/(4s_4s), 4s_(4h_4h)/(8h_8h), 8h_(8b_8b)/(16b_16b)
|
|
From: <sv...@va...> - 2014-07-10 14:22:55
|
Author: sewardj
Date: Thu Jul 10 14:22:45 2014
New Revision: 2898
Log:
arm64: implement: shll #imm, shrn #imm, rshrn #imm,
{smlal,umlal,smlsl,umlsl,smull,umull} (elem)
Modified:
trunk/priv/guest_arm64_toIR.c
trunk/pub/libvex_ir.h
Modified: trunk/priv/guest_arm64_toIR.c
==============================================================================
--- trunk/priv/guest_arm64_toIR.c (original)
+++ trunk/priv/guest_arm64_toIR.c Thu Jul 10 14:22:45 2014
@@ -618,6 +618,22 @@
return ops[size];
}
+static IROp mkVecINTERLEAVELO ( UInt size ) {
+ const IROp ops[4]
+ = { Iop_InterleaveLO8x16, Iop_InterleaveLO16x8,
+ Iop_InterleaveLO32x4, Iop_InterleaveLO64x2 };
+ vassert(size < 4);
+ return ops[size];
+}
+
+static IROp mkVecINTERLEAVEHI ( UInt size ) {
+ const IROp ops[4]
+ = { Iop_InterleaveHI8x16, Iop_InterleaveHI16x8,
+ Iop_InterleaveHI32x4, Iop_InterleaveHI64x2 };
+ vassert(size < 4);
+ return ops[size];
+}
+
static IROp mkVecMAXU ( UInt size ) {
const IROp ops[4]
= { Iop_Max8Ux16, Iop_Max16Ux8, Iop_Max32Ux4, Iop_Max64Ux2 };
@@ -646,6 +662,27 @@
return ops[size];
}
+static IROp mkVecMUL ( UInt size ) {
+ const IROp ops[4]
+ = { Iop_Mul8x16, Iop_Mul16x8, Iop_Mul32x4, Iop_INVALID };
+ vassert(size < 3);
+ return ops[size];
+}
+
+static IROp mkVecMULLU ( UInt sizeNarrow ) {
+ const IROp ops[4]
+ = { Iop_Mull8Ux8, Iop_Mull16Ux4, Iop_Mull32Ux2, Iop_INVALID };
+ vassert(sizeNarrow < 3);
+ return ops[sizeNarrow];
+}
+
+static IROp mkVecMULLS ( UInt sizeNarrow ) {
+ const IROp ops[4]
+ = { Iop_Mull8Sx8, Iop_Mull16Sx4, Iop_Mull32Sx2, Iop_INVALID };
+ vassert(sizeNarrow < 3);
+ return ops[sizeNarrow];
+}
+
static IRExpr* mkU ( IRType ty, ULong imm ) {
switch (ty) {
case Ity_I32: return mkU32((UInt)(imm & 0xFFFFFFFFULL));
@@ -5899,6 +5936,130 @@
}
+/* Return a temp which holds the vector dup of the lane of width
+ (1 << size) obtained from src[laneNo]. */
+static
+IRTemp math_DUP_VEC_ELEM ( IRExpr* src, UInt size, UInt laneNo )
+{
+ vassert(size <= 3);
+ /* Normalise |laneNo| so it is of the form
+ x000 for D, xx00 for S, xxx0 for H, and xxxx for B.
+ This puts the bits we want to inspect at constant offsets
+ regardless of the value of |size|.
+ */
+ UInt ix = laneNo << size;
+ vassert(ix <= 15);
+ IROp ops[4] = { Iop_INVALID, Iop_INVALID, Iop_INVALID, Iop_INVALID };
+ switch (size) {
+ case 0: /* B */
+ ops[0] = (ix & 1) ? Iop_CatOddLanes8x16 : Iop_CatEvenLanes8x16;
+ /* fallthrough */
+ case 1: /* H */
+ ops[1] = (ix & 2) ? Iop_CatOddLanes16x8 : Iop_CatEvenLanes16x8;
+ /* fallthrough */
+ case 2: /* S */
+ ops[2] = (ix & 4) ? Iop_CatOddLanes32x4 : Iop_CatEvenLanes32x4;
+ /* fallthrough */
+ case 3: /* D */
+ ops[3] = (ix & 8) ? Iop_InterleaveHI64x2 : Iop_InterleaveLO64x2;
+ break;
+ default:
+ vassert(0);
+ }
+ IRTemp res = newTemp(Ity_V128);
+ assign(res, src);
+ Int i;
+ for (i = 3; i >= 0; i--) {
+ if (ops[i] == Iop_INVALID)
+ break;
+ IRTemp tmp = newTemp(Ity_V128);
+ assign(tmp, binop(ops[i], mkexpr(res), mkexpr(res)));
+ res = tmp;
+ }
+ return res;
+}
+
+
+/* Let |srcV| be a V128 value, and let |imm5| be a lane-and-size
+ selector encoded as shown below. Return a new V128 holding the
+ selected lane from |srcV| dup'd out to V128, and also return the
+ lane number, log2 of the lane size in bytes, and width-character via
+ *laneNo, *laneSzLg2 and *laneCh respectively. It may be that imm5
+ is an invalid selector, in which case return
+ IRTemp_INVALID, 0, 0 and '?' respectively.
+
+ imm5 = xxxx1 signifies .b[xxxx]
+ = xxx10 .h[xxx]
+ = xx100 .s[xx]
+ = x1000 .d[x]
+ otherwise invalid
+*/
+static
+IRTemp handle_DUP_VEC_ELEM ( /*OUT*/UInt* laneNo,
+ /*OUT*/UInt* laneSzLg2, /*OUT*/HChar* laneCh,
+ IRExpr* srcV, UInt imm5 )
+{
+ *laneNo = 0;
+ *laneSzLg2 = 0;
+ *laneCh = '?';
+
+ if (imm5 & 1) {
+ *laneNo = (imm5 >> 1) & 15;
+ *laneSzLg2 = 0;
+ *laneCh = 'b';
+ }
+ else if (imm5 & 2) {
+ *laneNo = (imm5 >> 2) & 7;
+ *laneSzLg2 = 1;
+ *laneCh = 'h';
+ }
+ else if (imm5 & 4) {
+ *laneNo = (imm5 >> 3) & 3;
+ *laneSzLg2 = 2;
+ *laneCh = 's';
+ }
+ else if (imm5 & 8) {
+ *laneNo = (imm5 >> 4) & 1;
+ *laneSzLg2 = 3;
+ *laneCh = 'd';
+ }
+ else {
+ /* invalid */
+ return IRTemp_INVALID;
+ }
+
+ return math_DUP_VEC_ELEM(srcV, *laneSzLg2, *laneNo);
+}
+
+
+/* Clone |imm| to every lane of a V128, with lane size log2 of |size|. */
+static
+IRTemp math_VEC_DUP_IMM ( UInt size, ULong imm )
+{
+ IRType ty = Ity_INVALID;
+ IRTemp rcS = IRTemp_INVALID;
+ switch (size) {
+ case X01:
+ vassert(imm <= 0xFFFFULL);
+ ty = Ity_I16;
+ rcS = newTemp(ty); assign(rcS, mkU16( (UShort)imm ));
+ break;
+ case X10:
+ vassert(imm <= 0xFFFFFFFFULL);
+ ty = Ity_I32;
+ rcS = newTemp(ty); assign(rcS, mkU32( (UInt)imm ));
+ break;
+ case X11:
+ ty = Ity_I64;
+ rcS = newTemp(ty); assign(rcS, mkU64(imm)); break;
+ default:
+ vassert(0);
+ }
+ IRTemp rcV = math_DUP_TO_V128(rcS, ty);
+ return rcV;
+}
+
+
/* Let |new64| be a V128 in which only the lower 64 bits are interesting,
and the upper can contain any value -- it is ignored. If |is2| is False,
generate IR to put |new64| in the lower half of vector reg |dd| and zero
@@ -6222,55 +6383,20 @@
0q0 01110000 imm5 000001 n d DUP Vd.T, Vn.Ts[index]
*/
if (bitOP == 0 && imm4 == BITS4(0,0,0,0)) {
- Bool isQ = bitQ == 1;
- IRTemp w0 = newTemp(Ity_I64);
- const HChar* arT = "??";
- const HChar* arTs = "??";
- IRType laneTy = Ity_INVALID;
- UInt laneNo = 16; /* invalid */
- if (imm5 & 1) {
- arT = isQ ? "16b" : "8b";
- arTs = "b";
- laneNo = (imm5 >> 1) & 15;
- laneTy = Ity_I8;
- assign(w0, unop(Iop_8Uto64, getQRegLane(nn, laneNo, laneTy)));
- }
- else if (imm5 & 2) {
- arT = isQ ? "8h" : "4h";
- arTs = "h";
- laneNo = (imm5 >> 2) & 7;
- laneTy = Ity_I16;
- assign(w0, unop(Iop_16Uto64, getQRegLane(nn, laneNo, laneTy)));
- }
- else if (imm5 & 4) {
- arT = isQ ? "4s" : "2s";
- arTs = "s";
- laneNo = (imm5 >> 3) & 3;
- laneTy = Ity_I32;
- assign(w0, unop(Iop_32Uto64, getQRegLane(nn, laneNo, laneTy)));
- }
- else if ((imm5 & 8) && isQ) {
- arT = "2d";
- arTs = "d";
- laneNo = (imm5 >> 4) & 1;
- laneTy = Ity_I64;
- assign(w0, getQRegLane(nn, laneNo, laneTy));
- }
- else {
- /* invalid; leave laneTy unchanged. */
- }
- /* */
- if (laneTy != Ity_INVALID) {
- vassert(laneNo < 16);
- IRTemp w1 = math_DUP_TO_64(w0, laneTy);
- putQReg128(dd, binop(Iop_64HLtoV128,
- isQ ? mkexpr(w1) : mkU64(0), mkexpr(w1)));
- DIP("dup %s.%s, %s.%s[%u]\n",
- nameQReg128(dd), arT, nameQReg128(nn), arTs, laneNo);
- return True;
- }
- /* invalid */
- return False;
+ UInt laneNo = 0;
+ UInt laneSzLg2 = 0;
+ HChar laneCh = '?';
+ IRTemp res = handle_DUP_VEC_ELEM(&laneNo, &laneSzLg2, &laneCh,
+ getQReg128(nn), imm5);
+ if (res == IRTemp_INVALID)
+ return False;
+ if (bitQ == 0 && laneSzLg2 == X11)
+ return False; /* .1d case */
+ putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+ const HChar* arT = nameArr_Q_SZ(bitQ, laneSzLg2);
+ DIP("dup %s.%s, %s.%c[%u]\n",
+ nameQReg128(dd), arT, nameQReg128(nn), laneCh, laneNo);
+ return True;
}
/* -------- x,0,0001: DUP (general, vector) -------- */
@@ -7060,10 +7186,6 @@
1xxx:xxx -> D, SHR:64-xxxxxx
other -> invalid
*/
- const IROp opsSHRN[4]
- = { Iop_ShrN8x16, Iop_ShrN16x8, Iop_ShrN32x4, Iop_ShrN64x2 };
- const IROp opsSARN[4]
- = { Iop_SarN8x16, Iop_SarN16x8, Iop_SarN32x4, Iop_SarN64x2 };
UInt size = 0;
UInt shift = 0;
Bool isQ = bitQ == 1;
@@ -7072,7 +7194,7 @@
vassert(size >= 0 && size <= 3);
if (ok && size < 4 && shift > 0 && shift < (8 << size)
&& !(size == 3/*64bit*/ && !isQ)) {
- IROp op = isU ? opsSHRN[size] : opsSARN[size];
+ IROp op = isU ? mkVecSHRN(size) : mkVecSARN(size);
IRExpr* src = getQReg128(nn);
IRTemp res = newTemp(Ity_V128);
assign(res, binop(op, src, mkU8(shift)));
@@ -7125,6 +7247,36 @@
return False;
}
+ if (bitU == 0
+ && (opcode == BITS5(1,0,0,0,0) || opcode == BITS5(1,0,0,0,1))) {
+ /* -------- 0,10000 SHRN{,2} #imm -------- */
+ /* -------- 0,10001 RSHRN{,2} #imm -------- */
+ /* Narrows, and size is the narrow size. */
+ UInt size = 0;
+ UInt shift = 0;
+ Bool is2 = bitQ == 1;
+ Bool isR = opcode == BITS5(1,0,0,0,1);
+ Bool ok = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
+ if (!ok || size == X11) return False;
+ vassert(shift >= 1);
+ IRTemp t1 = newTemp(Ity_V128);
+ IRTemp t2 = newTemp(Ity_V128);
+ IRTemp t3 = newTemp(Ity_V128);
+ assign(t1, getQReg128(nn));
+ assign(t2, isR ? binop(mkVecADD(size+1),
+ mkexpr(t1),
+ mkexpr(math_VEC_DUP_IMM(size+1, 1ULL<<(shift-1))))
+ : mkexpr(t1));
+ assign(t3, binop(mkVecSHRN(size+1), mkexpr(t2), mkU8(shift)));
+ IRTemp t4 = math_NARROW_LANES(t3, t3, size);
+ putLO64andZUorPutHI64(is2, dd, t4);
+ const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+ const HChar* arrWide = nameArr_Q_SZ(1, size+1);
+ DIP("%s %s.%s, %s.%s, #%u\n", isR ? "rshrn" : "shrn",
+ nameQReg128(dd), arrNarrow, nameQReg128(nn), arrWide, shift);
+ return True;
+ }
+
if (opcode == BITS5(1,0,1,0,0)) {
/* -------- 0,10100 SSHLL{,2} #imm -------- */
/* -------- 1,10100 USHLL{,2} #imm -------- */
@@ -7284,40 +7436,26 @@
/* Narrows, and size refers to the narrowed lanes. */
if (size == X11) return False;
vassert(size <= 2);
- const IROp opADD[3] = { Iop_Add16x8, Iop_Add32x4, Iop_Add64x2 };
- const IROp opSUB[3] = { Iop_Sub16x8, Iop_Sub32x4, Iop_Sub64x2 };
- const IROp opSHR[3] = { Iop_ShrN16x8, Iop_ShrN32x4, Iop_ShrN64x2 };
- const UInt shift[3] = { 8, 16, 32 };
- const IROp opCAT[3] = { Iop_CatEvenLanes8x16, Iop_CatEvenLanes16x8,
- Iop_CatEvenLanes32x4 };
+ const UInt shift[3] = { 8, 16, 32 };
Bool isADD = opcode == BITS4(0,1,0,0);
Bool isR = bitU == 1;
/* Combined elements in wide lanes */
IRTemp wide = newTemp(Ity_V128);
- IRExpr* wideE = binop(isADD ? opADD[size] : opSUB[size],
+ IRExpr* wideE = binop(isADD ? mkVecADD(size+1) : mkVecSUB(size+1),
getQReg128(nn), getQReg128(mm));
if (isR) {
- IRType ty = Ity_INVALID;
- IRTemp rcS = IRTemp_INVALID;
- switch (size) {
- case X00: ty = Ity_I16;
- rcS = newTemp(ty); assign(rcS, mkU16(0x80)); break;
- case X01: ty = Ity_I32;
- rcS = newTemp(ty); assign(rcS, mkU32(0x8000)); break;
- case X10: ty = Ity_I64;
- rcS = newTemp(ty); assign(rcS, mkU64(0x80000000)); break;
- default: vassert(0);
- }
- IRTemp rcV = math_DUP_TO_V128(rcS, ty);
- wideE = binop(opADD[size], wideE, mkexpr(rcV));
+ wideE = binop(mkVecADD(size+1),
+ wideE,
+ mkexpr(math_VEC_DUP_IMM(size+1,
+ 1ULL << (shift[size]-1))));
}
assign(wide, wideE);
/* Top halves of elements, still in wide lanes */
IRTemp shrd = newTemp(Ity_V128);
- assign(shrd, binop(opSHR[size], mkexpr(wide), mkU8(shift[size])));
+ assign(shrd, binop(mkVecSHRN(size+1), mkexpr(wide), mkU8(shift[size])));
/* Elements now compacted into lower 64 bits */
IRTemp new64 = newTemp(Ity_V128);
- assign(new64, binop(opCAT[size], mkexpr(shrd), mkexpr(shrd)));
+ assign(new64, binop(mkVecCATEVENLANES(size), mkexpr(shrd), mkexpr(shrd)));
putLO64andZUorPutHI64(is2, dd, new64);
const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
const HChar* arrWide = nameArr_Q_SZ(1, size+1);
@@ -7359,40 +7497,36 @@
if (opcode == BITS4(1,1,0,0)
|| opcode == BITS4(1,0,0,0) || opcode == BITS4(1,0,1,0)) {
- /* -------- 0,1100 SMULL{2} -------- */ // 0 (ix)
+ /* -------- 0,1100 SMULL{2} -------- */ // 0 (ks)
/* -------- 1,1100 UMULL{2} -------- */ // 0
/* -------- 0,1000 SMLAL{2} -------- */ // 1
/* -------- 1,1000 UMLAL{2} -------- */ // 1
/* -------- 0,1010 SMLSL{2} -------- */ // 2
/* -------- 1,1010 UMLSL{2} -------- */ // 2
/* Widens, and size refers to the narrowed lanes. */
- UInt ix = 3;
+ UInt ks = 3;
switch (opcode) {
- case BITS4(1,1,0,0): ix = 0; break;
- case BITS4(1,0,0,0): ix = 1; break;
- case BITS4(1,0,1,0): ix = 2; break;
+ case BITS4(1,1,0,0): ks = 0; break;
+ case BITS4(1,0,0,0): ks = 1; break;
+ case BITS4(1,0,1,0): ks = 2; break;
default: vassert(0);
}
- vassert(ix >= 0 && ix <= 2);
- const IROp opMULLU[3] = { Iop_Mull8Ux8, Iop_Mull16Ux4, Iop_Mull32Ux2 };
- const IROp opMULLS[3] = { Iop_Mull8Sx8, Iop_Mull16Sx4, Iop_Mull32Sx2 };
- const IROp opADD[3] = { Iop_Add16x8, Iop_Add32x4, Iop_Add64x2 };
- const IROp opSUB[3] = { Iop_Sub16x8, Iop_Sub32x4, Iop_Sub64x2 };
+ vassert(ks >= 0 && ks <= 2);
if (size == X11) return False;
vassert(size <= 2);
Bool isU = bitU == 1;
- IROp mulOp = isU ? opMULLU[size] : opMULLS[size];
- IROp accOp = (ix == 1) ? opADD[size]
- : (ix == 2 ? opSUB[size] : Iop_INVALID);
+ IROp mulOp = isU ? mkVecMULLU(size) : mkVecMULLS(size);
+ IROp accOp = (ks == 1) ? mkVecADD(size+1)
+ : (ks == 2 ? mkVecSUB(size+1) : Iop_INVALID);
IRTemp mul = math_BINARY_WIDENING_V128(is2, mulOp,
getQReg128(nn), getQReg128(mm));
IRTemp res = newTemp(Ity_V128);
- assign(res, ix == 0 ? mkexpr(mul)
+ assign(res, ks == 0 ? mkexpr(mul)
: binop(accOp, getQReg128(dd), mkexpr(mul)));
putQReg128(dd, mkexpr(res));
const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
const HChar* arrWide = nameArr_Q_SZ(1, size+1);
- const HChar* nm = ix == 0 ? "mull" : (ix == 1 ? "mlal" : "mlsl");
+ const HChar* nm = ks == 0 ? "mull" : (ks == 1 ? "mlal" : "mlsl");
DIP("%c%s%s %s.%s, %s.%s, %s.%s\n", isU ? 'u' : 's', nm, is2 ? "2" : "",
nameQReg128(dd), arrWide,
nameQReg128(nn), arrNarrow, nameQReg128(mm), arrNarrow);
@@ -8259,6 +8393,26 @@
return False;
}
+ if (bitU == 1 && opcode == BITS5(1,0,0,1,1)) {
+ /* -------- 1,xx,10011 SHLL{2} #lane-width -------- */
+ /* Widens, and size is the narrow size. */
+ if (size == X11) return False;
+ Bool is2 = bitQ == 1;
+ IROp opINT = is2 ? mkVecINTERLEAVEHI(size) : mkVecINTERLEAVELO(size);
+ IROp opSHL = mkVecSHLN(size+1);
+ IRTemp src = newTemp(Ity_V128);
+ IRTemp res = newTemp(Ity_V128);
+ assign(src, getQReg128(nn));
+ assign(res, binop(opSHL, binop(opINT, mkexpr(src), mkexpr(src)),
+ mkU8(8 << size)));
+ putQReg128(dd, mkexpr(res));
+ const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+ const HChar* arrWide = nameArr_Q_SZ(1, size+1);
+ DIP("shll%s %s.%s, %s.%s, #%u\n", is2 ? "2" : "",
+ nameQReg128(dd), arrWide, nameQReg128(nn), arrNarrow, 8 << size);
+ return True;
+ }
+
if (bitU == 0 && size == X01 && opcode == BITS5(1,0,1,1,0)) {
/* -------- 0,01,10110: FCVTN 2s/4s_2d -------- */
IRTemp rm = mk_get_IR_rounding_mode();
@@ -8391,14 +8545,11 @@
vassert(0);
}
vassert(mm < 32 && ix < 16);
- IROp opMUL = size == X01 ? Iop_Mul16x8 : Iop_Mul32x4;
- IROp opADD = size == X01 ? Iop_Add16x8 : Iop_Add32x4;
- IROp opSUB = size == X01 ? Iop_Sub16x8 : Iop_Sub32x4;
- IRType ity = size == X01 ? Ity_I16 : Ity_I32;
+ IROp opMUL = mkVecMUL(size);
+ IROp opADD = mkVecADD(size);
+ IROp opSUB = mkVecSUB(size);
HChar ch = size == X01 ? 'h' : 's';
- IRTemp elemM = newTemp(ity);
- assign(elemM, getQRegLane(mm, ix, ity));
- IRTemp vecM = math_DUP_TO_V128(elemM, ity);
+ IRTemp vecM = math_DUP_VEC_ELEM(getQReg128(mm), size, ix);
IRTemp vecD = newTemp(Ity_V128);
IRTemp vecN = newTemp(Ity_V128);
IRTemp res = newTemp(Ity_V128);
@@ -8419,6 +8570,65 @@
return True;
}
+ if (opcode == BITS4(1,0,1,0)
+ || opcode == BITS4(0,0,1,0) || opcode == BITS4(0,1,1,0)) {
+ /* -------- 0,xx,1010 SMULL s/h variants only -------- */ // 0 (ks)
+ /* -------- 1,xx,1010 UMULL s/h variants only -------- */ // 0
+ /* -------- 0,xx,0010 SMLAL s/h variants only -------- */ // 1
+ /* -------- 1,xx,0010 UMLAL s/h variants only -------- */ // 1
+ /* -------- 0,xx,0110 SMLSL s/h variants only -------- */ // 2
+ /* -------- 1,xx,0110 SMLSL s/h variants only -------- */ // 2
+ /* Widens, and size refers to the narrowed lanes. */
+ UInt ks = 3;
+ switch (opcode) {
+ case BITS4(1,0,1,0): ks = 0; break;
+ case BITS4(0,0,1,0): ks = 1; break;
+ case BITS4(0,1,1,0): ks = 2; break;
+ default: vassert(0);
+ }
+ vassert(ks >= 0 && ks <= 2);
+ Bool isU = bitU == 1;
+ Bool is2 = bitQ == 1;
+ UInt mm = 32; // invalid
+ UInt ix = 16; // invalid
+ switch (size) {
+ case X00:
+ return False; // h_b_b[] case is not allowed
+ case X01:
+ mm = mmLO4; ix = (bitH << 2) | (bitL << 1) | (bitM << 0); break;
+ case X10:
+ mm = (bitM << 4) | mmLO4; ix = (bitH << 1) | (bitL << 0); break;
+ case X11:
+ return False; // q_d_d[] case is not allowed
+ default:
+ vassert(0);
+ }
+ vassert(mm < 32 && ix < 16);
+ IROp mulOp = isU ? mkVecMULLU(size) : mkVecMULLS(size);
+ IROp accOp = (ks == 1) ? mkVecADD(size+1)
+ : (ks == 2 ? mkVecSUB(size+1) : Iop_INVALID);
+ IRTemp vecM = math_DUP_VEC_ELEM(getQReg128(mm), size, ix);
+ IRTemp vecD = newTemp(Ity_V128);
+ IRTemp vecN = newTemp(Ity_V128);
+ assign(vecD, getQReg128(dd));
+ assign(vecN, getQReg128(nn));
+ IRTemp mul = math_BINARY_WIDENING_V128(is2, mulOp,
+ mkexpr(vecN), mkexpr(vecM));
+ IRTemp res = newTemp(Ity_V128);
+ assign(res, ks == 0 ? mkexpr(mul)
+ : binop(accOp, getQReg128(dd), mkexpr(mul)));
+ putQReg128(dd, mkexpr(res));
+ const HChar* nm = ks == 0 ? "mull" : (ks == 1 ? "mlal" : "mlsl");
+ const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+ const HChar* arrWide = nameArr_Q_SZ(1, size+1);
+ HChar ch = size == X01 ? 'h' : 's';
+ DIP("%c%s%s %s.%s, %s.%s, %s.%c[%u]\n",
+ isU ? 'u' : 's', nm, is2 ? "2" : "",
+ nameQReg128(dd), arrWide,
+ nameQReg128(nn), arrNarrow, nameQReg128(dd), ch, ix);
+ return True;
+ }
+
return False;
# undef INSN
}
Modified: trunk/pub/libvex_ir.h
==============================================================================
--- trunk/pub/libvex_ir.h (original)
+++ trunk/pub/libvex_ir.h Thu Jul 10 14:22:45 2014
@@ -1572,7 +1572,9 @@
Iop_InterleaveOddLanes32x4, Iop_InterleaveEvenLanes32x4,
/* CONCATENATION -- build a new value by concatenating either
- the even or odd lanes of both operands. */
+ the even or odd lanes of both operands. Note that
+ Cat{Odd,Even}Lanes64x2 are identical to Interleave{HI,LO}64x2
+ and so are omitted. */
Iop_CatOddLanes8x16, Iop_CatOddLanes16x8, Iop_CatOddLanes32x4,
Iop_CatEvenLanes8x16, Iop_CatEvenLanes16x8, Iop_CatEvenLanes32x4,
|