|
From: <sv...@va...> - 2014-01-12 12:54:12
|
Author: sewardj
Date: Sun Jan 12 12:54:00 2014
New Revision: 13770
Log:
Add support for ARMv8 AArch64 (the 64 bit ARM instruction set).
Added:
trunk/ARM64_TIDYUPS.txt
trunk/coregrind/m_dispatch/dispatch-arm64-linux.S
trunk/coregrind/m_gdbserver/valgrind-low-arm64.c
trunk/coregrind/m_sigframe/sigframe-arm64-linux.c
trunk/coregrind/m_syswrap/syscall-arm64-linux.S
trunk/coregrind/m_syswrap/syswrap-arm64-linux.c
trunk/include/vki/vki-arm64-linux.h
trunk/include/vki/vki-posixtypes-arm64-linux.h
trunk/include/vki/vki-scnums-arm64-linux.h
Modified:
trunk/Makefile.all.am
trunk/Makefile.tool.am
trunk/Makefile.vex.am
trunk/cachegrind/cg_arch.c
trunk/cachegrind/cg_branchpred.c
trunk/configure.ac
trunk/coregrind/Makefile.am
trunk/coregrind/launcher-linux.c
trunk/coregrind/m_aspacemgr/aspacemgr-common.c
trunk/coregrind/m_cache.c
trunk/coregrind/m_coredump/coredump-elf.c
trunk/coregrind/m_debugger.c
trunk/coregrind/m_debuginfo/d3basics.c
trunk/coregrind/m_debuginfo/debuginfo.c
trunk/coregrind/m_debuginfo/priv_storage.h
trunk/coregrind/m_debuginfo/readdwarf.c
trunk/coregrind/m_debuginfo/readelf.c
trunk/coregrind/m_debuginfo/storage.c
trunk/coregrind/m_debuglog.c
trunk/coregrind/m_gdbserver/target.c
trunk/coregrind/m_gdbserver/valgrind_low.h
trunk/coregrind/m_initimg/initimg-linux.c
trunk/coregrind/m_libcassert.c
trunk/coregrind/m_libcfile.c
trunk/coregrind/m_libcproc.c
trunk/coregrind/m_machine.c
trunk/coregrind/m_main.c
trunk/coregrind/m_options.c
trunk/coregrind/m_redir.c
trunk/coregrind/m_scheduler/scheduler.c
trunk/coregrind/m_signals.c
trunk/coregrind/m_stacktrace.c
trunk/coregrind/m_syscall.c
trunk/coregrind/m_syswrap/priv_types_n_macros.h
trunk/coregrind/m_syswrap/syscall-arm-linux.S
trunk/coregrind/m_syswrap/syswrap-linux.c
trunk/coregrind/m_syswrap/syswrap-main.c
trunk/coregrind/m_trampoline.S
trunk/coregrind/m_vki.c
trunk/coregrind/pub_core_aspacemgr.h
trunk/coregrind/pub_core_basics.h
trunk/coregrind/pub_core_debuginfo.h
trunk/coregrind/pub_core_machine.h
trunk/coregrind/pub_core_mallocfree.h
trunk/coregrind/pub_core_syscall.h
trunk/coregrind/pub_core_threadstate.h
trunk/coregrind/pub_core_trampoline.h
trunk/coregrind/pub_core_transtab_asm.h
trunk/docs/internals/register-uses.txt
trunk/drd/drd_bitmap.h
trunk/drd/drd_load_store.c
trunk/include/pub_tool_basics.h
trunk/include/pub_tool_machine.h
trunk/include/pub_tool_vkiscnums_asm.h
trunk/include/valgrind.h
trunk/include/vki/vki-linux.h
trunk/include/vki/vki-ppc32-linux.h
trunk/include/vki/vki-ppc64-linux.h
trunk/memcheck/mc_machine.c
Added: trunk/ARM64_TIDYUPS.txt
==============================================================================
--- trunk/ARM64_TIDYUPS.txt (added)
+++ trunk/ARM64_TIDYUPS.txt Sun Jan 12 12:54:00 2014
@@ -0,0 +1,178 @@
+
+## HOW TO Cross-CONFIGURE
+
+export CC=aarch64-linux-gnu-gcc
+export LD=aarch64-linux-gnu-ld
+export AR=aarch64-linux-gnu-ar
+
+./autogen.sh
+./configure --prefix=`pwd`/Inst --host=aarch64-unknown-linux --enable-only64bit
+
+##############################################################
+
+UnwindStartRegs -- what should that contain?
+
+
+
+vki-arm64-linux.h: vki_sigaction_base
+
+I really don't think that __vki_sigrestore_t sa_restorer
+should be present. Adding it surely puts sa_mask at a wrong
+offset compared to (kernel) reality. But not having it causes
+compilation of m_signals.c to fail in hard to understand ways,
+so adding it temporarily.
+
+
+m_trampoline.S: what's the unexecutable-insn value? 0xFFFFFFFF
+is there at the moment, but 0x00000000 is probably what it should be.
+Also, fix indentation/tab-vs-space stuff
+
+
+./include/vki/vki-arm64-linux.h: uses __uint128_t. Should change
+it to __vki_uint128_t, but what's the defn of that?
+
+
+
+m_debuginfo/priv_storage.h: need proper defn of DiCfSI
+
+
+readdwarf.c: is this correct?
+#elif defined(VGP_arm64_linux)
+# define FP_REG 29 //???
+# define SP_REG 31 //???
+# define RA_REG_DEFAULT 30 //???
+
+
+vki-arm64-linux.h:
+re linux-3.10.5/include/uapi/asm-generic/sembuf.h
+I'd say the amd64 version has padding it shouldn't have. Check?
+
+
+
+syswrap-linux.c run_a_thread_NORETURN assembly sections
+seems like tst->os_state.exitcode has word type
+in which case the ppc64_linux use of lwz to read it, is wrong
+
+
+
+syswrap-linux.c ML_(do_fork_clone)
+assuming that VGP_arm64_linux is the same as VGP_arm_linux here
+
+
+
+dispatch-arm64-linux.S: FIXME: set up FP control state before
+entering generated code. Also fix screwy indentation.
+
+dispatcher-ery general: what's a good (predictor-friendly) way to
+branch to a register?
+
+
+
+in vki-arm64-scnums.h
+//#if __BITS_PER_LONG == 64 && !defined(__SYSCALL_COMPAT)
+Probably want to reenable that and clean up accordingly
+
+
+
+putIRegXXorZR: figure out a way that the computed value is actually
+used, so as to keep any memory reads that might generate it, alive.
+(else the simulation can lose exceptions). At least, for writes to
+the zero register generated by loads .. or .. can anything other
+integer instructions, that write to a register, cause exceptions?
+
+
+
+loads/stores: generate stack alignment checks as necessary
+
+
+
+fix barrier insns: ISB, DMB
+
+
+
+fix atomic loads/stores
+
+
+
+FMADD/FMSUB/FNMADD/FNMSUB: generate and use the relevant fused
+IROps so as to avoid double rounding
+
+
+
+ARM64Instr_Call getRegUsage: re-check relative to what
+getAllocableRegs_ARM64 makes available
+
+
+
+Make dispatch-arm64-linux.S save any callee-saved Q regs
+I think what is required is to save D8-D15 and nothing more than that.
+
+
+
+wrapper for __NR3264_fstat -- correct?
+
+
+
+PRE(sys_clone): get rid of references to vki_modify_ldt_t
+and the definition of it in vki-arm64-linux.h. Ditto for
+32 bit arm.
+
+
+
+sigframe-arm64-linux.c: build_sigframe: references to nonexistent
+siguc->uc_mcontext.trap_no, siguc->uc_mcontext.error_code have been
+replaced by zero. Also in synth_ucontext.
+
+
+
+m_debugger.c:
+uregs.pstate = LibVEX_GuestARM64_get_nzcv(vex); /* is this correct? */
+Is that remotely correct?
+
+
+
+host_arm64_defs.c: emit_ARM64INstr:
+ARM64in_VDfromX and ARM64in_VQfromXX: use simple top-half zeroing
+MOVs to vector registers instead of INS Vd.D[0], Xreg, to avoid false
+dependencies on the top half of the register. (Or at least check
+the semantocs of INS Vd.D[0] to see if it zeroes out the top.)
+
+
+
+preferredVectorSubTypeFromSize: review perf effects and decide
+on a types-for-subparts policy
+
+
+
+fold_IRExpr_Unop: add a reduction rule for this
+1Sto64(CmpNEZ64( Or64(GET:I64(1192),GET:I64(1184)) ))
+vis 1Sto64(CmpNEZ64(x)) --> CmpwNEZ64(x)
+
+
+
+check insn selection for memcheck-only primops:
+Left64 CmpwNEZ64 V128to64 V128HIto64 1Sto64 CmpNEZ64 CmpNEZ32
+widen_z_8_to_64 1Sto32 Left32 32HLto64 CmpwNEZ32 CmpNEZ8
+
+
+
+isel: get rid of various cases where zero is put into a register
+and just use xzr instead. Especially for CmpNEZ64/32. And for
+writing zeroes into the CC thunk fields.
+
+
+
+/* Keep this list in sync with that in iselNext below */
+/* Keep this list in sync with that for Ist_Exit above */
+uh .. they are not in sync
+
+
+
+very stupid:
+imm64 x23, 0xFFFFFFFFFFFFFFA0
+17 F4 9F D2 F7 FF BF F2 F7 FF DF F2 F7 FF FF F2
+
+
+
+valgrind.h: fix VALGRIND_ALIGN_STACK/VALGRIND_RESTORE_STACK,
+also add CFI annotations
Modified: trunk/Makefile.all.am
==============================================================================
--- trunk/Makefile.all.am (original)
+++ trunk/Makefile.all.am Sun Jan 12 12:54:00 2014
@@ -156,6 +156,10 @@
AM_CCASFLAGS_ARM_LINUX = @FLAG_M32@ \
-marm -mcpu=cortex-a8 -g
+AM_FLAG_M3264_ARM64_LINUX = @FLAG_M64@
+AM_CFLAGS_ARM64_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE)
+AM_CCASFLAGS_ARM64_LINUX = @FLAG_M64@ -g
+
AM_FLAG_M3264_X86_DARWIN = -arch i386
AM_CFLAGS_X86_DARWIN = $(WERROR) -arch i386 $(AM_CFLAGS_BASE) \
-mmacosx-version-min=10.5 \
@@ -213,6 +217,7 @@
PRELOAD_LDFLAGS_PPC32_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@
PRELOAD_LDFLAGS_PPC64_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@
PRELOAD_LDFLAGS_ARM_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@
+PRELOAD_LDFLAGS_ARM64_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@
PRELOAD_LDFLAGS_X86_DARWIN = $(PRELOAD_LDFLAGS_COMMON_DARWIN) -arch i386
PRELOAD_LDFLAGS_AMD64_DARWIN = $(PRELOAD_LDFLAGS_COMMON_DARWIN) -arch x86_64
PRELOAD_LDFLAGS_S390X_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@
Modified: trunk/Makefile.tool.am
==============================================================================
--- trunk/Makefile.tool.am (original)
+++ trunk/Makefile.tool.am Sun Jan 12 12:54:00 2014
@@ -52,6 +52,9 @@
TOOL_LDFLAGS_ARM_LINUX = \
$(TOOL_LDFLAGS_COMMON_LINUX) @FLAG_M32@
+TOOL_LDFLAGS_ARM64_LINUX = \
+ $(TOOL_LDFLAGS_COMMON_LINUX) @FLAG_M64@
+
TOOL_LDFLAGS_S390X_LINUX = \
$(TOOL_LDFLAGS_COMMON_LINUX) @FLAG_M64@
@@ -105,6 +108,9 @@
LIBREPLACEMALLOC_ARM_LINUX = \
$(top_builddir)/coregrind/libreplacemalloc_toolpreload-arm-linux.a
+LIBREPLACEMALLOC_ARM64_LINUX = \
+ $(top_builddir)/coregrind/libreplacemalloc_toolpreload-arm64-linux.a
+
LIBREPLACEMALLOC_X86_DARWIN = \
$(top_builddir)/coregrind/libreplacemalloc_toolpreload-x86-darwin.a
@@ -145,6 +151,11 @@
$(LIBREPLACEMALLOC_ARM_LINUX) \
-Wl,--no-whole-archive
+LIBREPLACEMALLOC_LDFLAGS_ARM64_LINUX = \
+ -Wl,--whole-archive \
+ $(LIBREPLACEMALLOC_ARM64_LINUX) \
+ -Wl,--no-whole-archive
+
LIBREPLACEMALLOC_LDFLAGS_X86_DARWIN = \
$(LIBREPLACEMALLOC_X86_DARWIN)
Modified: trunk/Makefile.vex.am
==============================================================================
--- trunk/Makefile.vex.am (original)
+++ trunk/Makefile.vex.am Sun Jan 12 12:54:00 2014
@@ -24,6 +24,7 @@
pub/libvex_guest_ppc32.h \
pub/libvex_guest_ppc64.h \
pub/libvex_guest_arm.h \
+ pub/libvex_guest_arm64.h \
pub/libvex_guest_s390x.h \
pub/libvex_guest_mips32.h \
pub/libvex_guest_mips64.h \
@@ -42,6 +43,7 @@
priv/guest_amd64_defs.h \
priv/guest_ppc_defs.h \
priv/guest_arm_defs.h \
+ priv/guest_arm64_defs.h \
priv/guest_s390_defs.h \
priv/guest_mips_defs.h \
priv/host_generic_regs.h \
@@ -53,6 +55,7 @@
priv/host_amd64_defs.h \
priv/host_ppc_defs.h \
priv/host_arm_defs.h \
+ priv/host_arm64_defs.h \
priv/host_s390_defs.h \
priv/s390_disasm.h \
priv/s390_defs.h \
@@ -71,6 +74,7 @@
pub/libvex_guest_ppc32.h \
pub/libvex_guest_ppc64.h \
pub/libvex_guest_arm.h \
+ pub/libvex_guest_arm64.h \
pub/libvex_guest_s390x.h \
pub/libvex_guest_mips32.h \
pub/libvex_guest_mips64.h
@@ -114,6 +118,8 @@
priv/guest_ppc_toIR.c \
priv/guest_arm_helpers.c \
priv/guest_arm_toIR.c \
+ priv/guest_arm64_helpers.c \
+ priv/guest_arm64_toIR.c \
priv/guest_s390_helpers.c \
priv/guest_s390_toIR.c \
priv/guest_mips_helpers.c \
@@ -132,6 +138,8 @@
priv/host_ppc_isel.c \
priv/host_arm_defs.c \
priv/host_arm_isel.c \
+ priv/host_arm64_defs.c \
+ priv/host_arm64_isel.c \
priv/host_s390_defs.c \
priv/host_s390_isel.c \
priv/s390_disasm.c \
Modified: trunk/cachegrind/cg_arch.c
==============================================================================
--- trunk/cachegrind/cg_arch.c (original)
+++ trunk/cachegrind/cg_arch.c Sun Jan 12 12:54:00 2014
@@ -367,6 +367,14 @@
*D1c = (cache_t) { 16384, 4, 64 };
*LLc = (cache_t) { 262144, 8, 64 };
+#elif defined(VGA_arm64)
+
+ // Copy the 32-bit ARM version until such time as we have
+ // some real hardware to run on
+ *I1c = (cache_t) { 16384, 4, 64 };
+ *D1c = (cache_t) { 16384, 4, 64 };
+ *LLc = (cache_t) { 262144, 8, 64 };
+
#elif defined(VGA_s390x)
//
// Here is the cache data from older machine models:
Modified: trunk/cachegrind/cg_branchpred.c
==============================================================================
--- trunk/cachegrind/cg_branchpred.c (original)
+++ trunk/cachegrind/cg_branchpred.c Sun Jan 12 12:54:00 2014
@@ -44,12 +44,12 @@
/* How many bits at the bottom of an instruction address are
guaranteed to be zero? */
-#if defined(VGA_ppc32) || defined(VGA_ppc64) || defined(VGA_arm) \
- || defined(VGA_mips32) || defined(VGA_mips64)
+#if defined(VGA_ppc32) || defined(VGA_ppc64) \
+ || defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_arm64)
# define N_IADDR_LO_ZERO_BITS 2
#elif defined(VGA_x86) || defined(VGA_amd64)
# define N_IADDR_LO_ZERO_BITS 0
-#elif defined(VGA_s390x)
+#elif defined(VGA_s390x) || defined(VGA_arm)
# define N_IADDR_LO_ZERO_BITS 1
#else
# error "Unsupported architecture"
Modified: trunk/configure.ac
==============================================================================
--- trunk/configure.ac (original)
+++ trunk/configure.ac Sun Jan 12 12:54:00 2014
@@ -195,6 +195,11 @@
ARCH_MAX="arm"
;;
+ aarch64*)
+ AC_MSG_RESULT([ok (${host_cpu})])
+ ARCH_MAX="arm64"
+ ;;
+
mips)
AC_MSG_RESULT([ok (${host_cpu})])
ARCH_MAX="mips32"
@@ -559,6 +564,35 @@
valt_load_address_sec_inner="0xUNSET"
AC_MSG_RESULT([ok (${host_cpu}-${host_os})])
;;
+ arm64-linux)
+ valt_load_address_sec_norml="0xUNSET"
+ valt_load_address_sec_inner="0xUNSET"
+ if test x$vg_cv_only64bit = xyes; then
+ VGCONF_ARCH_PRI="arm64"
+ VGCONF_ARCH_SEC=""
+ VGCONF_PLATFORM_PRI_CAPS="ARM64_LINUX"
+ VGCONF_PLATFORM_SEC_CAPS=""
+ valt_load_address_pri_norml="0x38000000"
+ valt_load_address_pri_inner="0x28000000"
+ elif test x$vg_cv_only32bit = xyes; then
+ VGCONF_ARCH_PRI="arm"
+ VGCONF_ARCH_SEC=""
+ VGCONF_PLATFORM_PRI_CAPS="ARM_LINUX"
+ VGCONF_PLATFORM_SEC_CAPS=""
+ valt_load_address_pri_norml="0x38000000"
+ valt_load_address_pri_inner="0x28000000"
+ else
+ VGCONF_ARCH_PRI="arm64"
+ VGCONF_ARCH_SEC="arm"
+ VGCONF_PLATFORM_PRI_CAPS="ARM64_LINUX"
+ VGCONF_PLATFORM_SEC_CAPS="ARM_LINUX"
+ valt_load_address_pri_norml="0x38000000"
+ valt_load_address_pri_inner="0x28000000"
+ valt_load_address_sec_norml="0x38000000"
+ valt_load_address_sec_inner="0x28000000"
+ fi
+ AC_MSG_RESULT([ok (${ARCH_MAX}-${VGCONF_OS})])
+ ;;
s390x-linux)
VGCONF_ARCH_PRI="s390x"
VGCONF_ARCH_SEC=""
@@ -624,7 +658,10 @@
AM_CONDITIONAL(VGCONF_ARCHS_INCLUDE_PPC64,
test x$VGCONF_PLATFORM_PRI_CAPS = xPPC64_LINUX )
AM_CONDITIONAL(VGCONF_ARCHS_INCLUDE_ARM,
- test x$VGCONF_PLATFORM_PRI_CAPS = xARM_LINUX )
+ test x$VGCONF_PLATFORM_PRI_CAPS = xARM_LINUX \
+ -o x$VGCONF_PLATFORM_SEC_CAPS = xARM_LINUX )
+AM_CONDITIONAL(VGCONF_ARCHS_INCLUDE_ARM64,
+ test x$VGCONF_PLATFORM_PRI_CAPS = xARM64_LINUX )
AM_CONDITIONAL(VGCONF_ARCHS_INCLUDE_S390X,
test x$VGCONF_PLATFORM_PRI_CAPS = xS390X_LINUX )
AM_CONDITIONAL(VGCONF_ARCHS_INCLUDE_MIPS32,
@@ -645,7 +682,10 @@
AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_PPC64_LINUX,
test x$VGCONF_PLATFORM_PRI_CAPS = xPPC64_LINUX)
AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_ARM_LINUX,
- test x$VGCONF_PLATFORM_PRI_CAPS = xARM_LINUX)
+ test x$VGCONF_PLATFORM_PRI_CAPS = xARM_LINUX \
+ -o x$VGCONF_PLATFORM_SEC_CAPS = xARM_LINUX)
+AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_ARM64_LINUX,
+ test x$VGCONF_PLATFORM_PRI_CAPS = xARM64_LINUX)
AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_S390X_LINUX,
test x$VGCONF_PLATFORM_PRI_CAPS = xS390X_LINUX \
-o x$VGCONF_PLATFORM_SEC_CAPS = xS390X_LINUX)
@@ -653,7 +693,6 @@
test x$VGCONF_PLATFORM_PRI_CAPS = xMIPS32_LINUX)
AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_MIPS64_LINUX,
test x$VGCONF_PLATFORM_PRI_CAPS = xMIPS64_LINUX)
-
AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_X86_DARWIN,
test x$VGCONF_PLATFORM_PRI_CAPS = xX86_DARWIN \
-o x$VGCONF_PLATFORM_SEC_CAPS = xX86_DARWIN)
@@ -670,6 +709,7 @@
-o x$VGCONF_PLATFORM_PRI_CAPS = xPPC32_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xPPC64_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xARM_LINUX \
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xARM64_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xS390X_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xMIPS32_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xMIPS64_LINUX)
@@ -2406,6 +2446,7 @@
mflag_primary=$FLAG_M32
elif test x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xPPC64_LINUX \
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xARM64_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xS390X_LINUX ; then
mflag_primary=$FLAG_M64
elif test x$VGCONF_PLATFORM_PRI_CAPS = xX86_DARWIN ; then
Modified: trunk/coregrind/Makefile.am
==============================================================================
--- trunk/coregrind/Makefile.am (original)
+++ trunk/coregrind/Makefile.am Sun Jan 12 12:54:00 2014
@@ -61,6 +61,12 @@
vgdb_SOURCES = vgdb.c
+if VGCONF_PLATFORMS_INCLUDE_ARM64_LINUX
+# vgdb-invoker-ptrace.c isn't buildable on arm64-linux yet
+# so skip it. Unfortunately this also causes it to be skipped
+# for 32-bit ARM builds which are part of a bi-arch ARM build.
+vgdb_SOURCES += vgdb-invoker-none.c
+else
if VGCONF_OS_IS_LINUX
if VGCONF_PLATVARIANT_IS_ANDROID
vgdb_SOURCES += vgdb-invoker-none.c
@@ -68,6 +74,7 @@
vgdb_SOURCES += vgdb-invoker-ptrace.c
endif
endif
+endif
if VGCONF_OS_IS_DARWIN
# Some darwin specific stuff is needed as ptrace is not
# fully supported on MacOS. Till we find someone courageous
@@ -329,6 +336,7 @@
m_dispatch/dispatch-ppc32-linux.S \
m_dispatch/dispatch-ppc64-linux.S \
m_dispatch/dispatch-arm-linux.S \
+ m_dispatch/dispatch-arm64-linux.S \
m_dispatch/dispatch-s390x-linux.S \
m_dispatch/dispatch-mips32-linux.S \
m_dispatch/dispatch-mips64-linux.S \
@@ -345,6 +353,7 @@
m_gdbserver/valgrind-low-x86.c \
m_gdbserver/valgrind-low-amd64.c \
m_gdbserver/valgrind-low-arm.c \
+ m_gdbserver/valgrind-low-arm64.c \
m_gdbserver/valgrind-low-ppc32.c \
m_gdbserver/valgrind-low-ppc64.c \
m_gdbserver/valgrind-low-s390x.c \
@@ -368,6 +377,7 @@
m_sigframe/sigframe-ppc32-linux.c \
m_sigframe/sigframe-ppc64-linux.c \
m_sigframe/sigframe-arm-linux.c \
+ m_sigframe/sigframe-arm64-linux.c \
m_sigframe/sigframe-s390x-linux.c \
m_sigframe/sigframe-mips32-linux.c \
m_sigframe/sigframe-mips64-linux.c \
@@ -378,6 +388,7 @@
m_syswrap/syscall-ppc32-linux.S \
m_syswrap/syscall-ppc64-linux.S \
m_syswrap/syscall-arm-linux.S \
+ m_syswrap/syscall-arm64-linux.S \
m_syswrap/syscall-s390x-linux.S \
m_syswrap/syscall-mips32-linux.S \
m_syswrap/syscall-mips64-linux.S \
@@ -393,6 +404,7 @@
m_syswrap/syswrap-ppc32-linux.c \
m_syswrap/syswrap-ppc64-linux.c \
m_syswrap/syswrap-arm-linux.c \
+ m_syswrap/syswrap-arm64-linux.c \
m_syswrap/syswrap-s390x-linux.c \
m_syswrap/syswrap-mips32-linux.c \
m_syswrap/syswrap-mips64-linux.c \
Modified: trunk/coregrind/launcher-linux.c
==============================================================================
--- trunk/coregrind/launcher-linux.c (original)
+++ trunk/coregrind/launcher-linux.c Sun Jan 12 12:54:00 2014
@@ -57,6 +57,10 @@
#define EM_X86_64 62 // elf.h doesn't define this on some older systems
#endif
+#ifndef EM_AARCH64
+#define EM_AARCH64 183 // ditto
+#endif
+
/* Report fatal errors */
__attribute__((noreturn))
static void barf ( const char *format, ... )
@@ -220,6 +224,10 @@
(ehdr->e_ident[EI_OSABI] == ELFOSABI_SYSV ||
ehdr->e_ident[EI_OSABI] == ELFOSABI_LINUX)) {
platform = "mips64-linux";
+ } else if (ehdr->e_machine == EM_AARCH64 &&
+ (ehdr->e_ident[EI_OSABI] == ELFOSABI_SYSV ||
+ ehdr->e_ident[EI_OSABI] == ELFOSABI_LINUX)) {
+ platform = "arm64-linux";
}
} else if (header[EI_DATA] == ELFDATA2MSB) {
# if !defined(VGPV_arm_linux_android) \
@@ -309,12 +317,13 @@
target, because on most ppc64-linux setups, the basic /bin,
/usr/bin, etc, stuff is built in 32-bit mode, not 64-bit
mode. */
- if ((0==strcmp(VG_PLATFORM,"x86-linux")) ||
- (0==strcmp(VG_PLATFORM,"amd64-linux")) ||
- (0==strcmp(VG_PLATFORM,"ppc32-linux")) ||
- (0==strcmp(VG_PLATFORM,"ppc64-linux")) ||
- (0==strcmp(VG_PLATFORM,"arm-linux")) ||
- (0==strcmp(VG_PLATFORM,"s390x-linux")) ||
+ if ((0==strcmp(VG_PLATFORM,"x86-linux")) ||
+ (0==strcmp(VG_PLATFORM,"amd64-linux")) ||
+ (0==strcmp(VG_PLATFORM,"ppc32-linux")) ||
+ (0==strcmp(VG_PLATFORM,"ppc64-linux")) ||
+ (0==strcmp(VG_PLATFORM,"arm-linux")) ||
+ (0==strcmp(VG_PLATFORM,"arm64-linux")) ||
+ (0==strcmp(VG_PLATFORM,"s390x-linux")) ||
(0==strcmp(VG_PLATFORM,"mips32-linux")) ||
(0==strcmp(VG_PLATFORM,"mips64-linux")))
default_platform = VG_PLATFORM;
Modified: trunk/coregrind/m_aspacemgr/aspacemgr-common.c
==============================================================================
--- trunk/coregrind/m_aspacemgr/aspacemgr-common.c (original)
+++ trunk/coregrind/m_aspacemgr/aspacemgr-common.c Sun Jan 12 12:54:00 2014
@@ -152,15 +152,19 @@
{
SysRes res;
aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
-# if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
- || defined(VGP_arm_linux)
+
+# if defined(VGP_arm64_linux)
+ res = VG_(do_syscall6)(__NR3264_mmap, (UWord)start, length,
+ prot, flags, fd, offset);
+# elif defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
+ || defined(VGP_arm_linux)
/* mmap2 uses 4096 chunks even if actual page size is bigger. */
aspacem_assert((offset % 4096) == 0);
res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
prot, flags, fd, offset / 4096);
# elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux) \
|| defined(VGP_s390x_linux) || defined(VGP_mips32_linux) \
- || defined(VGP_mips64_linux)
+ || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
prot, flags, fd, offset);
# elif defined(VGP_x86_darwin)
@@ -242,8 +246,14 @@
/* --- Pertaining to files --- */
SysRes ML_(am_open) ( const HChar* pathname, Int flags, Int mode )
-{
+{
+# if defined(VGP_arm64_linux)
+ /* ARM64 wants to use __NR_openat rather than __NR_open. */
+ SysRes res = VG_(do_syscall4)(__NR_openat,
+ VKI_AT_FDCWD, (UWord)pathname, flags, mode);
+# else
SysRes res = VG_(do_syscall3)(__NR_open, (UWord)pathname, flags, mode);
+# endif
return res;
}
@@ -261,7 +271,12 @@
Int ML_(am_readlink)(HChar* path, HChar* buf, UInt bufsiz)
{
SysRes res;
+# if defined(VGP_arm64_linux)
+ res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
+ (UWord)path, (UWord)buf, bufsiz);
+# else
res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
+# endif
return sr_isError(res) ? -1 : sr_Res(res);
}
Modified: trunk/coregrind/m_cache.c
==============================================================================
--- trunk/coregrind/m_cache.c (original)
+++ trunk/coregrind/m_cache.c Sun Jan 12 12:54:00 2014
@@ -539,7 +539,7 @@
}
#elif defined(VGA_arm) || defined(VGA_ppc32) || defined(VGA_ppc64) || \
- defined(VGA_mips32) || defined(VGA_mips64)
+ defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_arm64)
static Bool
get_cache_info(VexArchInfo *vai)
Modified: trunk/coregrind/m_coredump/coredump-elf.c
==============================================================================
--- trunk/coregrind/m_coredump/coredump-elf.c (original)
+++ trunk/coregrind/m_coredump/coredump-elf.c Sun Jan 12 12:54:00 2014
@@ -233,7 +233,7 @@
}
static void fill_prstatus(const ThreadState *tst,
- struct vki_elf_prstatus *prs,
+ /*OUT*/struct vki_elf_prstatus *prs,
const vki_siginfo_t *si)
{
struct vki_user_regs_struct *regs;
@@ -252,12 +252,11 @@
prs->pr_pgrp = VG_(getpgrp)();
prs->pr_sid = VG_(getpgrp)();
-#ifdef VGP_s390x_linux
+#if defined(VGP_s390x_linux)
/* prs->pr_reg has struct type. Need to take address. */
regs = (struct vki_user_regs_struct *)&(prs->pr_reg);
#else
regs = (struct vki_user_regs_struct *)prs->pr_reg;
-
vg_assert(sizeof(*regs) == sizeof(prs->pr_reg));
#endif
@@ -302,10 +301,6 @@
regs->r14 = arch->vex.guest_R14;
regs->r15 = arch->vex.guest_R15;
-//:: regs->cs = arch->vex.guest_CS;
-//:: regs->fs = arch->vex.guest_FS;
-//:: regs->gs = arch->vex.guest_GS;
-
#elif defined(VGP_ppc32_linux)
# define DO(n) regs->gpr[n] = arch->vex.guest_GPR##n
DO(0); DO(1); DO(2); DO(3); DO(4); DO(5); DO(6); DO(7);
@@ -367,6 +362,10 @@
regs->ARM_pc = arch->vex.guest_R15T;
regs->ARM_cpsr = LibVEX_GuestARM_get_cpsr( &arch->vex );
+#elif defined(VGP_arm64_linux)
+ (void)arch;
+ I_die_here;
+
#elif defined(VGP_s390x_linux)
# define DO(n) regs->gprs[n] = arch->vex.guest_r##n
DO(0); DO(1); DO(2); DO(3); DO(4); DO(5); DO(6); DO(7);
@@ -377,6 +376,7 @@
DO(8); DO(9); DO(10); DO(11); DO(12); DO(13); DO(14); DO(15);
# undef DO
regs->orig_gpr2 = arch->vex.guest_r2;
+
#elif defined(VGP_mips32_linux)
# define DO(n) regs->MIPS_r##n = arch->vex.guest_r##n
DO(0); DO(1); DO(2); DO(3); DO(4); DO(5); DO(6); DO(7);
@@ -386,6 +386,7 @@
# undef DO
regs->MIPS_hi = arch->vex.guest_HI;
regs->MIPS_lo = arch->vex.guest_LO;
+
#elif defined(VGP_mips64_linux)
# define DO(n) regs->MIPS_r##n = arch->vex.guest_r##n
DO(0); DO(1); DO(2); DO(3); DO(4); DO(5); DO(6); DO(7);
@@ -395,6 +396,7 @@
# undef DO
regs->MIPS_hi = arch->vex.guest_HI;
regs->MIPS_lo = arch->vex.guest_LO;
+
#else
# error Unknown ELF platform
#endif
@@ -470,6 +472,9 @@
#elif defined(VGP_arm_linux)
// umm ...
+#elif defined(VGP_arm64_linux)
+ I_die_here;
+
#elif defined(VGP_s390x_linux)
# define DO(n) fpu->fprs[n].ui = arch->vex.guest_f##n
DO(0); DO(1); DO(2); DO(3); DO(4); DO(5); DO(6); DO(7);
@@ -606,16 +611,13 @@
if (VG_(threads)[i].status == VgTs_Empty)
continue;
-# if defined(VGP_x86_linux)
-# if !defined(VGPV_arm_linux_android) && !defined(VGPV_x86_linux_android) \
- && !defined(VGPV_mips32_linux_android)
+# if defined(VGP_x86_linux) && !defined(VGPV_x86_linux_android)
{
vki_elf_fpxregset_t xfpu;
fill_xfpu(&VG_(threads)[i], &xfpu);
add_note(¬elist, "LINUX", NT_PRXFPREG, &xfpu, sizeof(xfpu));
}
# endif
-# endif
fill_fpu(&VG_(threads)[i], &fpu);
# if !defined(VGPV_arm_linux_android) && !defined(VGPV_x86_linux_android) \
Modified: trunk/coregrind/m_debugger.c
==============================================================================
--- trunk/coregrind/m_debugger.c (original)
+++ trunk/coregrind/m_debugger.c Sun Jan 12 12:54:00 2014
@@ -232,6 +232,47 @@
uregs.ARM_cpsr = LibVEX_GuestARM_get_cpsr(vex);
return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &uregs);
+#elif defined(VGP_arm64_linux)
+ I_die_here;
+ //ATC
+ struct vki_user_pt_regs uregs;
+ VG_(memset)(&uregs, 0, sizeof(uregs));
+ uregs.regs[0] = vex->guest_X0;
+ uregs.regs[1] = vex->guest_X1;
+ uregs.regs[2] = vex->guest_X2;
+ uregs.regs[3] = vex->guest_X3;
+ uregs.regs[4] = vex->guest_X4;
+ uregs.regs[5] = vex->guest_X5;
+ uregs.regs[6] = vex->guest_X6;
+ uregs.regs[7] = vex->guest_X7;
+ uregs.regs[8] = vex->guest_X8;
+ uregs.regs[9] = vex->guest_X9;
+ uregs.regs[10] = vex->guest_X10;
+ uregs.regs[11] = vex->guest_X11;
+ uregs.regs[12] = vex->guest_X12;
+ uregs.regs[13] = vex->guest_X13;
+ uregs.regs[14] = vex->guest_X14;
+ uregs.regs[15] = vex->guest_X15;
+ uregs.regs[16] = vex->guest_X16;
+ uregs.regs[17] = vex->guest_X17;
+ uregs.regs[18] = vex->guest_X18;
+ uregs.regs[19] = vex->guest_X19;
+ uregs.regs[20] = vex->guest_X20;
+ uregs.regs[21] = vex->guest_X21;
+ uregs.regs[22] = vex->guest_X22;
+ uregs.regs[23] = vex->guest_X23;
+ uregs.regs[24] = vex->guest_X24;
+ uregs.regs[25] = vex->guest_X25;
+ uregs.regs[26] = vex->guest_X26;
+ uregs.regs[27] = vex->guest_X27;
+ uregs.regs[28] = vex->guest_X28;
+ uregs.regs[29] = vex->guest_X29;
+ uregs.regs[30] = vex->guest_X30;
+ uregs.sp = vex->guest_SP;
+ uregs.pc = vex->guest_PC;
+ uregs.pstate = LibVEX_GuestARM64_get_nzcv(vex); /* is this correct? */
+ return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &uregs);
+
#elif defined(VGP_x86_darwin)
I_die_here;
Modified: trunk/coregrind/m_debuginfo/d3basics.c
==============================================================================
--- trunk/coregrind/m_debuginfo/d3basics.c (original)
+++ trunk/coregrind/m_debuginfo/d3basics.c Sun Jan 12 12:54:00 2014
@@ -420,6 +420,8 @@
# elif defined(VGP_mips64_linux)
if (regno == 29) { *a = regs->sp; return True; }
if (regno == 30) { *a = regs->fp; return True; }
+# elif defined(VGP_arm64_linux)
+ I_die_here;
# else
# error "Unknown platform"
# endif
Modified: trunk/coregrind/m_debuginfo/debuginfo.c
==============================================================================
--- trunk/coregrind/m_debuginfo/debuginfo.c (original)
+++ trunk/coregrind/m_debuginfo/debuginfo.c Sun Jan 12 12:54:00 2014
@@ -824,7 +824,8 @@
|| defined(VGA_mips64)
is_rx_map = seg->hasR && seg->hasX;
is_rw_map = seg->hasR && seg->hasW;
-# elif defined(VGA_amd64) || defined(VGA_ppc64) || defined(VGA_arm)
+# elif defined(VGA_amd64) || defined(VGA_ppc64) || defined(VGA_arm) \
+ || defined(VGA_arm64)
is_rx_map = seg->hasR && seg->hasX && !seg->hasW;
is_rw_map = seg->hasR && seg->hasW && !seg->hasX;
# elif defined(VGP_s390x_linux)
@@ -2113,6 +2114,8 @@
case Creg_IA_BP: return eec->uregs->fp;
case Creg_MIPS_RA: return eec->uregs->ra;
# elif defined(VGA_ppc32) || defined(VGA_ppc64)
+# elif defined(VGP_arm64_linux)
+ I_die_here;
# else
# error "Unsupported arch"
# endif
@@ -2357,6 +2360,8 @@
cfa = cfsi->cfa_off + uregs->fp;
break;
# elif defined(VGA_ppc32) || defined(VGA_ppc64)
+# elif defined(VGP_arm64_linux)
+ I_die_here;
# else
# error "Unsupported arch"
# endif
@@ -2453,6 +2458,8 @@
# elif defined(VGA_mips32) || defined(VGA_mips64)
ipHere = uregsHere->pc;
# elif defined(VGA_ppc32) || defined(VGA_ppc64)
+# elif defined(VGP_arm64_linux)
+ I_die_here;
# else
# error "Unknown arch"
# endif
@@ -2533,6 +2540,8 @@
COMPUTE(uregsPrev.sp, uregsHere->sp, cfsi->sp_how, cfsi->sp_off);
COMPUTE(uregsPrev.fp, uregsHere->fp, cfsi->fp_how, cfsi->fp_off);
# elif defined(VGA_ppc32) || defined(VGA_ppc64)
+# elif defined(VGP_arm64_linux)
+ I_die_here;
# else
# error "Unknown arch"
# endif
Modified: trunk/coregrind/m_debuginfo/priv_storage.h
==============================================================================
--- trunk/coregrind/m_debuginfo/priv_storage.h (original)
+++ trunk/coregrind/m_debuginfo/priv_storage.h Sun Jan 12 12:54:00 2014
@@ -277,6 +277,18 @@
Int fp_off;
}
DiCfSI;
+#elif defined(VGA_arm64)
+/* Be generic until we know more about what's needed. */
+typedef
+ struct {
+ Addr base;
+ UInt len;
+ UChar cfa_how; /* a CFIC_ value */
+ UChar ra_how; /* a CFIR_ value */
+ Int cfa_off;
+ Int ra_off;
+ }
+ DiCfSI;
#else
# error "Unknown arch"
#endif
Modified: trunk/coregrind/m_debuginfo/readdwarf.c
==============================================================================
--- trunk/coregrind/m_debuginfo/readdwarf.c (original)
+++ trunk/coregrind/m_debuginfo/readdwarf.c Sun Jan 12 12:54:00 2014
@@ -1841,6 +1841,10 @@
# define FP_REG 12
# define SP_REG 13
# define RA_REG_DEFAULT 14 //???
+#elif defined(VGP_arm64_linux)
+# define FP_REG 29 //???
+# define SP_REG 31 //???
+# define RA_REG_DEFAULT 30 //???
#elif defined(VGP_x86_darwin)
# define FP_REG 5
# define SP_REG 4
@@ -2179,6 +2183,8 @@
si->cfa_how = CFIC_IA_SPREL;
# elif defined(VGA_arm)
si->cfa_how = CFIC_ARM_R13REL;
+# elif defined(VGA_arm64)
+ I_die_here;
# else
si->cfa_how = 0; /* invalid */
# endif
@@ -2206,6 +2212,8 @@
si->cfa_how = CFIC_ARM_R7REL;
si->cfa_off = ctxs->cfa_off;
}
+# elif defined(VGA_arm64)
+ if (1) { I_die_here; } // do we need any arm64 specifics here?
# endif
else {
why = 1;
@@ -2249,6 +2257,7 @@
why = 2; goto failed; /* otherwise give up */ \
}
+
# if defined(VGA_x86) || defined(VGA_amd64)
/* --- entire tail of this fn specialised for x86/amd64 --- */
@@ -2339,9 +2348,10 @@
return True;
-
# elif defined(VGA_s390x)
+ /* --- entire tail of this fn specialised for s390 --- */
+
SUMMARISE_HOW(si->ra_how, si->ra_off,
ctxs->reg[ctx->ra_reg] );
SUMMARISE_HOW(si->fp_how, si->fp_off,
@@ -2387,7 +2397,6 @@
return True;
-
# elif defined(VGA_mips32) || defined(VGA_mips64)
/* --- entire tail of this fn specialised for mips --- */
@@ -2431,9 +2440,12 @@
return True;
-
+# elif defined(VGA_arm64)
+ I_die_here;
# elif defined(VGA_ppc32) || defined(VGA_ppc64)
+ /* These don't use CFI based unwinding (is that really true?) */
+
# else
# error "Unknown arch"
# endif
@@ -2521,6 +2533,8 @@
return ML_(CfiExpr_CfiReg)( dstxa, Creg_IA_BP );
if (dwreg == srcuc->ra_reg)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_IA_IP );
+# elif defined(VGA_arm64)
+ I_die_here;
# elif defined(VGA_ppc32) || defined(VGA_ppc64)
# else
# error "Unknown arch"
Modified: trunk/coregrind/m_debuginfo/readelf.c
==============================================================================
--- trunk/coregrind/m_debuginfo/readelf.c (original)
+++ trunk/coregrind/m_debuginfo/readelf.c Sun Jan 12 12:54:00 2014
@@ -2088,7 +2088,8 @@
/* PLT is different on different platforms, it seems. */
# if defined(VGP_x86_linux) || defined(VGP_amd64_linux) \
|| defined(VGP_arm_linux) || defined (VGP_s390x_linux) \
- || defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
+ || defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+ || defined(VGP_arm64_linux)
/* Accept .plt where mapped as rx (code) */
if (0 == VG_(strcmp)(name, ".plt")) {
if (inrx && !di->plt_present) {
Modified: trunk/coregrind/m_debuginfo/storage.c
==============================================================================
--- trunk/coregrind/m_debuginfo/storage.c (original)
+++ trunk/coregrind/m_debuginfo/storage.c Sun Jan 12 12:54:00 2014
@@ -195,6 +195,8 @@
SHOW_HOW(si->sp_how, si->sp_off);
VG_(printf)(" FP=");
SHOW_HOW(si->fp_how, si->fp_off);
+# elif defined(VGA_arm64)
+ I_die_here;
# else
# error "Unknown arch"
# endif
Modified: trunk/coregrind/m_debuglog.c
==============================================================================
--- trunk/coregrind/m_debuglog.c (original)
+++ trunk/coregrind/m_debuglog.c Sun Jan 12 12:54:00 2014
@@ -103,6 +103,7 @@
}
#elif defined(VGP_amd64_linux)
+
__attribute__((noinline))
static UInt local_sys_write_stderr ( const HChar* buf, Int n )
{
@@ -267,6 +268,42 @@
return __res;
}
+#elif defined(VGP_arm64_linux)
+
+static UInt local_sys_write_stderr ( const HChar* buf, Int n )
+{
+ volatile ULong block[2];
+ block[0] = (ULong)buf;
+ block[1] = (ULong)n;
+ __asm__ volatile (
+ "mov x0, #2\n\t" /* stderr */
+ "ldr x1, [%0]\n\t" /* buf */
+ "ldr x2, [%0, #8]\n\t" /* n */
+ "mov x8, #"VG_STRINGIFY(__NR_write)"\n\t"
+ "svc 0x0\n" /* write() */
+ "str x0, [%0]\n\t"
+ :
+ : "r" (block)
+ : "x0","x1","x2","x7"
+ );
+ if (block[0] < 0)
+ block[0] = -1;
+ return (UInt)block[0];
+}
+
+static UInt local_sys_getpid ( void )
+{
+ UInt __res;
+ __asm__ volatile (
+ "mov x8, #"VG_STRINGIFY(__NR_getpid)"\n"
+ "svc 0x0\n" /* getpid() */
+ "mov %0, x0\n"
+ : "=r" (__res)
+ :
+ : "x0", "x8" );
+ return (UInt)__res;
+}
+
#elif defined(VGP_x86_darwin)
/* We would use VG_DARWIN_SYSNO_TO_KERNEL instead of VG_DARWIN_SYSNO_INDEX
@@ -350,6 +387,7 @@
}
#elif defined(VGP_s390x_linux)
+
static UInt local_sys_write_stderr ( const HChar* buf, Int n )
{
register Int r2 asm("2") = 2; /* file descriptor STDERR */
@@ -391,6 +429,7 @@
}
#elif defined(VGP_mips32_linux)
+
static UInt local_sys_write_stderr ( const HChar* buf, Int n )
{
volatile Int block[2];
@@ -428,6 +467,7 @@
}
#elif defined(VGP_mips64_linux)
+
static UInt local_sys_write_stderr ( const HChar* buf, Int n )
{
volatile Long block[2];
Added: trunk/coregrind/m_dispatch/dispatch-arm64-linux.S
==============================================================================
--- trunk/coregrind/m_dispatch/dispatch-arm64-linux.S (added)
+++ trunk/coregrind/m_dispatch/dispatch-arm64-linux.S Sun Jan 12 12:54:00 2014
@@ -0,0 +1,241 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The core dispatch loop, for jumping to a code address. ---*/
+/*--- dispatch-arm64-linux.S ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2013-2013 OpenWorks
+ in...@op...
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_arm64_linux)
+
+#include "pub_core_basics_asm.h"
+#include "pub_core_dispatch_asm.h"
+#include "pub_core_transtab_asm.h"
+#include "libvex_guest_offsets.h" /* for OFFSET_arm_R* */
+
+
+/*------------------------------------------------------------*/
+/*--- ---*/
+/*--- The dispatch loop. VG_(disp_run_translations) is ---*/
+/*--- used to run all translations, ---*/
+/*--- including no-redir ones. ---*/
+/*--- ---*/
+/*------------------------------------------------------------*/
+
+/*----------------------------------------------------*/
+/*--- Entry and preamble (set everything up) ---*/
+/*----------------------------------------------------*/
+
+/* signature:
+void VG_(disp_run_translations)( UWord* two_words,
+ void* guest_state,
+ Addr host_addr );
+*/
+.text
+.global VG_(disp_run_translations)
+VG_(disp_run_translations):
+ /* x0 holds two_words
+ x1 holds guest_state
+ x2 holds host_addr
+ */
+ /* Push the callee-saved registers. Unclear if x19/x20 are
+ callee-saved, but be on the safe side. Note this sequence
+ maintains 16-alignment of sp. Also save x0 since it will
+ be needed in the postamble. */
+ stp x29, x30, [sp, #-16]!
+ stp x27, x28, [sp, #-16]!
+ stp x25, x26, [sp, #-16]!
+ stp x23, x24, [sp, #-16]!
+ stp x21, x22, [sp, #-16]!
+ stp x19, x20, [sp, #-16]!
+ stp x0, xzr, [sp, #-16]!
+
+ /* set FPSCR to vex-required default value */
+ // FIXME
+ // mov r4, #0
+ // fmxr fpscr, r4
+
+ /* Set up the guest state pointer */
+ mov x21, x1
+
+ /* and jump into the code cache. Chained translations in
+ the code cache run, until for whatever reason, they can't
+ continue. When that happens, the translation in question
+ will jump (or call) to one of the continuation points
+ VG_(cp_...) below. */
+ br x2
+ /* NOTREACHED */
+
+/*----------------------------------------------------*/
+/*--- Postamble and exit. ---*/
+/*----------------------------------------------------*/
+
+postamble:
+ /* At this point, r1 and r2 contain two
+ words to be returned to the caller. r1
+ holds a TRC value, and r2 optionally may
+ hold another word (for CHAIN_ME exits, the
+ address of the place to patch.) */
+
+ /* We're leaving. Check that nobody messed with
+ FPSCR in ways we don't expect. */
+ // FIXME
+ // fmrx r4, fpscr
+ // bic r4, #0xF8000000 /* mask out NZCV and QC */
+ // bic r4, #0x0000009F /* mask out IDC,IXC,UFC,OFC,DZC,IOC */
+ // cmp r4, #0
+ // beq remove_frame /* we're OK */
+ /* otherwise we have an invariant violation */
+ // movw r1, #VG_TRC_INVARIANT_FAILED
+ // movw r2, #0
+ /* fall through */
+
+remove_frame:
+ /* Restore int regs, including importantly x0 (two_words),
+ but not x1 */
+ ldp x0, xzr, [sp], #16
+ ldp x19, x20, [sp], #16
+ ldp x21, x22, [sp], #16
+ ldp x23, x24, [sp], #16
+ ldp x25, x26, [sp], #16
+ ldp x27, x28, [sp], #16
+ ldp x29, x30, [sp], #16
+
+ /* Stash return values */
+ str x1, [x0, #0]
+ str x2, [x0, #8]
+ ret
+
+/*----------------------------------------------------*/
+/*--- Continuation points ---*/
+/*----------------------------------------------------*/
+
+/* ------ Chain me to slow entry point ------ */
+.global VG_(disp_cp_chain_me_to_slowEP)
+VG_(disp_cp_chain_me_to_slowEP):
+ /* We got called. The return address indicates
+ where the patching needs to happen. Collect
+ the return address and, exit back to C land,
+ handing the caller the pair (Chain_me_S, RA) */
+ mov x1, #VG_TRC_CHAIN_ME_TO_SLOW_EP
+ mov x2, x30 // 30 == LR
+ /* 4 = movw x9, disp_cp_chain_me_to_slowEP[15:0]
+ 4 = movk x9, disp_cp_chain_me_to_slowEP[31:16], lsl 16
+ 4 = movk x9, disp_cp_chain_me_to_slowEP[47:32], lsl 32
+ 4 = movk x9, disp_cp_chain_me_to_slowEP[63:48], lsl 48
+ 4 = blr x9
+ */
+ sub x2, x2, #4+4+4+4+4
+ b postamble
+
+/* ------ Chain me to fast entry point ------ */
+.global VG_(disp_cp_chain_me_to_fastEP)
+VG_(disp_cp_chain_me_to_fastEP):
+ /* We got called. The return address indicates
+ where the patching needs to happen. Collect
+ the return address and, exit back to C land,
+ handing the caller the pair (Chain_me_F, RA) */
+ mov x1, #VG_TRC_CHAIN_ME_TO_FAST_EP
+ mov x2, x30 // 30 == LR
+ /* 4 = movw x9, disp_cp_chain_me_to_fastEP[15:0]
+ 4 = movk x9, disp_cp_chain_me_to_fastEP[31:16], lsl 16
+ 4 = movk x9, disp_cp_chain_me_to_fastEP[47:32], lsl 32
+ 4 = movk x9, disp_cp_chain_me_to_fastEP[63:48], lsl 48
+ 4 = blr x9
+ */
+ sub x2, x2, #4+4+4+4+4
+ b postamble
+
+/* ------ Indirect but boring jump ------ */
+.global VG_(disp_cp_xindir)
+VG_(disp_cp_xindir):
+ /* Where are we going? */
+ ldr x0, [x21, #OFFSET_arm64_PC]
+
+ /* stats only */
+ adrp x1, VG_(stats__n_xindirs_32)
+ add x1, x1, :lo12:VG_(stats__n_xindirs_32)
+ ldr w2, [x1, #0]
+ add w2, w2, #1
+ str w2, [x1, #0]
+
+ /* try a fast lookup in the translation cache */
+ // x0 = next guest, x1,x2,x3,x4 scratch
+ mov x1, #VG_TT_FAST_MASK // x1 = VG_TT_FAST_MASK
+ and x2, x1, x0, LSR #2 // x2 = entry # = (x1 & (x0 >> 2))
+
+ adrp x4, VG_(tt_fast)
+ add x4, x4, :lo12:VG_(tt_fast) // x4 = &VG_(tt_fast)
+
+ add x1, x4, x2, LSL #4 // r1 = &tt_fast[entry#]
+
+ ldp x4, x5, [x1, #0] // x4 = .guest, x5 = .host
+
+ cmp x4, x0
+
+ // jump to host if lookup succeeded
+ bne fast_lookup_failed
+ br x5
+ /*NOTREACHED*/
+
+fast_lookup_failed:
+ /* RM ME -- stats only */
+ adrp x1, VG_(stats__n_xindir_misses_32)
+ add x1, x1, :lo12:VG_(stats__n_xindir_misses_32)
+ ldr w2, [x1, #0]
+ add w2, w2, #1
+ str w2, [x1, #0]
+
+ mov x1, #VG_TRC_INNER_FASTMISS
+ mov x2, #0
+ b postamble
+
+/* ------ Assisted jump ------ */
+.global VG_(disp_cp_xassisted)
+VG_(disp_cp_xassisted):
+ /* x21 contains the TRC */
+ mov x1, x21
+ mov x2, #0
+ b postamble
+
+/* ------ Event check failed ------ */
+.global VG_(disp_cp_evcheck_fail)
+VG_(disp_cp_evcheck_fail):
+ mov x1, #VG_TRC_INNER_COUNTERZERO
+ mov x2, #0
+ b postamble
+
+
+.size VG_(disp_run_translations), .-VG_(disp_run_translations)
+
+/* Let the linker know we don't need an executable stack */
+.section .note.GNU-stack,"",%progbits
+
+#endif // defined(VGP_arm64_linux)
+
+/*--------------------------------------------------------------------*/
+/*--- end dispatch-arm64-linux.S ---*/
+/*--------------------------------------------------------------------*/
Modified: trunk/coregrind/m_gdbserver/target.c
==============================================================================
--- trunk/coregrind/m_gdbserver/target.c (original)
+++ trunk/coregrind/m_gdbserver/target.c Sun Jan 12 12:54:00 2014
@@ -641,6 +641,8 @@
amd64_init_architecture(&the_low_target);
#elif defined(VGA_arm)
arm_init_architecture(&the_low_target);
+#elif defined(VGA_arm64)
+ arm_init_architecture(&the_low_target);
#elif defined(VGA_ppc32)
ppc32_init_architecture(&the_low_target);
#elif defined(VGA_ppc64)
@@ -652,6 +654,6 @@
#elif defined(VGA_mips64)
mips64_init_architecture(&the_low_target);
#else
- architecture missing in target.c valgrind_initialize_target
+ #error "architecture missing in target.c valgrind_initialize_target"
#endif
}
Added: trunk/coregrind/m_gdbserver/valgrind-low-arm64.c
==============================================================================
--- trunk/coregrind/m_gdbserver/valgrind-low-arm64.c (added)
+++ trunk/coregrind/m_gdbserver/valgrind-low-arm64.c Sun Jan 12 12:54:00 2014
@@ -0,0 +1,307 @@
+/* Low level interface to valgrind, for the remote server for GDB integrated
+ in valgrind.
+ Copyright (C) 2011
+ Free Software Foundation, Inc.
+
+ This file is part of VALGRIND.
+ It has been inspired from a file from gdbserver in gdb 6.6.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+#include "server.h"
+#include "target.h"
+#include "regdef.h"
+#include "regcache.h"
+
+#include "pub_core_aspacemgr.h"
+#include "pub_tool_machine.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_transtab.h"
+#include "pub_core_gdbserver.h"
+#include "pub_core_debuginfo.h"
+
+#include "valgrind_low.h"
+
+#include "libvex_guest_arm64.h"
+
+//ZZ static struct reg regs[] = {
+//ZZ { "r0", 0, 32 },
+//ZZ { "r1", 32, 32 },
+//ZZ { "r2", 64, 32 },
+//ZZ { "r3", 96, 32 },
+//ZZ { "r4", 128, 32 },
+//ZZ { "r5", 160, 32 },
+//ZZ { "r6", 192, 32 },
+//ZZ { "r7", 224, 32 },
+//ZZ { "r8", 256, 32 },
+//ZZ { "r9", 288, 32 },
+//ZZ { "r10", 320, 32 },
+//ZZ { "r11", 352, 32 },
+//ZZ { "r12", 384, 32 },
+//ZZ { "sp", 416, 32 },
+//ZZ { "lr", 448, 32 },
+//ZZ { "pc", 480, 32 },
+//ZZ { "", 512, 0 }, // It seems these entries are needed
+//ZZ { "", 512, 0 }, // as previous versions of arm <-> gdb placed
+//ZZ { "", 512, 0 }, // some floating point registers here. So, cpsr
+//ZZ { "", 512, 0 }, // must be register 25.
+//ZZ { "", 512, 0 },
+//ZZ { "", 512, 0 },
+//ZZ { "", 512, 0 },
+//ZZ { "", 512, 0 },
+//ZZ { "", 512, 0 },
+//ZZ { "cpsr", 512, 32 },
+//ZZ { "d0", 544, 64 },
+//ZZ { "d1", 608, 64 },
+//ZZ { "d2", 672, 64 },
+//ZZ { "d3", 736, 64 },
+//ZZ { "d4", 800, 64 },
+//ZZ { "d5", 864, 64 },
+//ZZ { "d6", 928, 64 },
+//ZZ { "d7", 992, 64 },
+//ZZ { "d8", 1056, 64 },
+//ZZ { "d9", 1120, 64 },
+//ZZ { "d10", 1184, 64 },
+//ZZ { "d11", 1248, 64 },
+//ZZ { "d12", 1312, 64 },
+//ZZ { "d13", 1376, 64 },
+//ZZ { "d14", 1440, 64 },
+//ZZ { "d15", 1504, 64 },
+//ZZ { "d16", 1568, 64 },
+//ZZ { "d17", 1632, 64 },
+//ZZ { "d18", 1696, 64 },
+//ZZ { "d19", 1760, 64 },
+//ZZ { "d20", 1824, 64 },
+//ZZ { "d21", 1888, 64 },
+//ZZ { "d22", 1952, 64 },
+//ZZ { "d23", 2016, 64 },
+//ZZ { "d24", 2080, 64 },
+//ZZ { "d25", 2144, 64 },
+//ZZ { "d26", 2208, 64 },
+//ZZ { "d27", 2272, 64 },
+//ZZ { "d28", 2336, 64 },
+//ZZ { "d29", 2400, 64 },
+//ZZ { "d30", 2464, 64 },
+//ZZ { "d31", 2528, 64 },
+//ZZ { "fpscr", 2592, 32 }
+//ZZ };
+//ZZ static const char *expedite_regs[] = { "r11", "sp", "pc", 0 };
+//ZZ #define num_regs (sizeof (regs) / sizeof (regs[0]))
+//ZZ
+//ZZ static
+//ZZ CORE_ADDR get_pc (void)
+//ZZ {
+//ZZ unsigned long pc;
+//ZZ
+//ZZ collect_register_by_name ("pc", &pc);
+//ZZ
+//ZZ dlog(1, "stop pc is %p\n", (void *) pc);
+//ZZ return pc;
+//ZZ }
+//ZZ
+//ZZ static
+//ZZ void set_pc (CORE_ADDR newpc)
+//ZZ {
+//ZZ Bool mod;
+//ZZ supply_register_by_name ("pc", &newpc, &mod);
+//ZZ if (mod)
+//ZZ dlog(1, "set pc to %p\n", C2v (newpc));
+//ZZ else
+//ZZ dlog(1, "set pc not changed %p\n", C2v (newpc));
+//ZZ }
+//ZZ
+//ZZ Addr thumb_pc (Addr pc)
+//ZZ {
+//ZZ // If the thumb bit (bit 0) is already set, we trust it.
+//ZZ if (pc & 1) {
+//ZZ dlog (1, "%p = thumb (bit0 is set)\n", C2v (pc));
+//ZZ return pc;
+//ZZ }
+//ZZ
+//ZZ // Here, bit 0 is not set.
+//ZZ // For a pc aligned on 4 bytes, we have to use the debug
+//ZZ // info to determine the thumb-ness.
+//ZZ // else (aligned on 2 bytes), we trust this is a thumb
+//ZZ // address and we set the thumb bit.
+//ZZ
+//ZZ if (pc & 2) {
+//ZZ dlog (1, "bit0 not set, bit1 set => %p = thumb\n", C2v (pc));
+//ZZ return pc | 1;
+//ZZ }
+//ZZ
+//ZZ // pc aligned on 4 bytes. We need to use debug info.
+//ZZ {
+//ZZ HChar fnname[200]; // ??? max size
+//ZZ Addr entrypoint;
+//ZZ Addr ptoc; // unused but needed.
+//ZZ // If this is a thumb instruction, we need to ask
+//ZZ // the debug info with the bit0 set
+//ZZ // (why can't debug info do that for us ???)
+//ZZ // (why if this is a 4 bytes thumb instruction ???)
+//ZZ if (VG_(get_fnname_raw) (pc | 1, fnname, 200)) {
+//ZZ if (VG_(lookup_symbol_SLOW)( "*", fnname, &entrypoint, &ptoc )) {
+//ZZ dlog (1, "fnname %s lookupsym %p => %p %s.\n",
+//ZZ fnname, C2v(entrypoint), C2v(pc),
+//ZZ (entrypoint & 1 ? "thumb" : "arm"));
+//ZZ if (entrypoint & 1)
+//ZZ return pc | 1;
+//ZZ else
+//ZZ return pc;
+//ZZ
+//ZZ } else {
+//ZZ dlog (1, "%p fnname %s lookupsym failed?. Assume arm\n",
+//ZZ C2v (pc), fnname);
+//ZZ return pc;
+//ZZ }
+//ZZ } else {
+//ZZ // Can't find function name. We assume this is arm
+//ZZ dlog (1, "%p unknown fnname?. Assume arm\n", C2v (pc));
+//ZZ return pc;
+//ZZ }
+//ZZ }
+//ZZ }
+//ZZ
+//ZZ /* store registers in the guest state (gdbserver_to_valgrind)
+//ZZ or fetch register from the guest state (valgrind_to_gdbserver). */
+//ZZ static
+//ZZ void transfer_register (ThreadId tid, int abs_regno, void * buf,
+//ZZ transfer_direction dir, int size, Bool *mod)
+//ZZ {
+//ZZ ThreadState* tst = VG_(get_ThreadState)(tid);
+//ZZ int set = abs_regno / num_regs;
+//ZZ int regno = abs_regno % num_regs;
+//ZZ *mod = False;
+//ZZ
+//ZZ VexGuestARMState* arm = (VexGuestARMState*) get_arch (set, tst);
+//ZZ
+//ZZ switch (regno) {
+//ZZ // numbers here have to match the order of regs above
+//ZZ // Attention: gdb order does not match valgrind order.
+//ZZ case 0: VG_(transfer) (&arm->guest_R0, buf, dir, size, mod); break;
+//ZZ case 1: VG_(transfer) (&arm->guest_R1, buf, dir, size, mod); break;
+//ZZ case 2: VG_(transfer) (&arm->guest_R2, buf, dir, size, mod); break;
+//ZZ case 3: VG_(transfer) (&arm->guest_R3, buf, dir, size, mod); break;
+//ZZ case 4: VG_(transfer) (&arm->guest_R4, buf, dir, size, mod); break;
+//ZZ case 5: VG_(transfer) (&arm->guest_R5, buf, dir, size, mod); break;
+//ZZ case 6: VG_(transfer) (&arm->guest_R6, buf, dir, size, mod); break;
+//ZZ case 7: VG_(transfer) (&arm->guest_R7, buf, dir, size, mod); break;
+//ZZ case 8: VG_(transfer) (&arm->guest_R8, buf, dir, size, mod); break;
+//ZZ case 9: VG_(transfer) (&arm->guest_R9, buf, dir, size, mod); break;
+//ZZ case 10: VG_(transfer) (&arm->guest_R10, buf, dir, size, mod); break;
+//ZZ case 11: VG_(transfer) (&arm->guest_R11, buf, dir, size, mod); break;
+//ZZ case 12: VG_(transfer) (&arm->guest_R12, buf, dir, size, mod); break;
+//ZZ case 13: VG_(transfer) (&arm->guest_R13, buf, dir, size, mod); break;
+//ZZ case 14: VG_(transfer) (&arm->guest_R14, buf, dir, size, mod); break;
+//ZZ case 15: {
+//ZZ VG_(transfer) (&arm->guest_R15T, buf, dir, size, mod);
+//ZZ if (dir == gdbserver_to_valgrind && *mod) {
+//ZZ // If gdb is changing the PC, we have to set the thumb bit
+//ZZ // if needed.
+//ZZ arm->guest_R15T = thumb_pc(arm->guest_R15T);
+//ZZ }
+//ZZ break;
+//ZZ }
+//ZZ case 16:
+//ZZ case 17:
+//ZZ case 18:
+//ZZ case 19:
+//ZZ case 20: /* 9 "empty registers". See struct reg regs above. */
+//ZZ case 21:
+//ZZ case 22:
+//ZZ case 23:
+//ZZ case 24: *mod = False; break;
+//ZZ case 25: {
+//ZZ UInt cpsr = LibVEX_GuestARM_get_cpsr (arm);
+//ZZ if (dir == valgrind_to_gdbserver) {
+//ZZ VG_(transfer) (&cpsr, buf, dir, size, mod);
+//ZZ } else {
+//ZZ # if 0
+//ZZ UInt newcpsr;
+//ZZ VG_(transfer) (&newcpsr, buf, dir, size, mod);
+//ZZ *mod = newcpsr != cpsr;
+//ZZ // GDBTD ???? see FIXME in guest_arm_helpers.c
+//ZZ LibVEX_GuestARM_put_flags (newcpsr, arm);
+//ZZ # else
+//ZZ *mod = False;
+//ZZ # endif
+//ZZ }
+//ZZ break;
+//ZZ }
+//ZZ case 26: VG_(transfer) (&arm->guest_D0, buf, dir, size, mod); break;
+//ZZ case 27: VG_(transfer) (&arm->guest_D1, buf, dir, size, mod); break;
+//ZZ case 28: VG_(transfer) (&arm->guest_D2, buf, dir, size, mod); break;
+//ZZ case 29: VG_(transfer) (&arm->guest_D3, buf, dir, size, mod); break;
+//ZZ case 30: VG_(transfer) (&arm->guest_D4, buf, dir, size, mod); break;
+//ZZ case 31: VG_(transfer) (&arm->guest_D5, buf, dir, size, mod); break;
+//ZZ case 32: VG_(transfer) (&arm->guest_D6, buf, dir, size, mod); break;
+//ZZ case 33: VG_(transfer) (&arm->guest_D7, buf, dir, size, mod); break;
+//ZZ case 34: VG_(transfer) (&arm->guest_D8, buf, dir, size, mod); break;
+//ZZ case 35: VG_(transfer) (&arm->guest_D9, buf, dir, size, mod); break;
+//ZZ case 36: VG_(transfer) (&arm->guest_D10, buf, dir, size, mod); break;
+//ZZ case 37: VG_(transfer) (&arm->guest_D11, buf, dir, size, mod); break;
+//ZZ case 38: VG_(transfer) (&arm->guest_D12, buf, dir, size, mod); break;
+//ZZ case 39: VG_(transfer) (&arm->guest_D13, buf, dir, size, mod); break;
+//ZZ case 40: VG_(transfer) (&arm->guest_D14, buf, dir, size, mod); break;
+//ZZ case 41: VG_(transfer) (&arm->guest_D15, buf, dir, size, mod); break;
+//ZZ case 42: VG_(transfer) (&arm->guest_D16, buf, dir, size, mod); break;
+//ZZ case 43: VG_(transfer) (&arm->guest_D17, buf, dir, size, mod); break;
+//ZZ case 44: VG_(transfer) (&arm->guest_D18, buf, dir, size, mod); break;
+//ZZ case 45: VG_(transfer) (&arm->guest_D19, buf, dir, size, mod); break;
+//ZZ case 46: VG_(transfer) (&arm->guest_D20, buf, dir, size, mod); break;
+//ZZ case 47: VG_(transfer) (&arm->guest_D21, buf, dir, size, mod); break;
+//ZZ case 48: VG_(transfer) (&arm->guest_D22, buf, dir, size, mod); break;
+//ZZ case 49: VG_(transfer) (&arm->guest_D23, buf, dir, size, mod); break;
+//ZZ case 50: VG_(transfer) (&arm->guest_D24, buf, dir, size, mod); break;
+//ZZ case 51: VG_(transfer) (&arm->guest_D25, buf, dir, size, mod); break;
+//ZZ case 52: VG_(transfer) (&arm->guest_D26, buf, dir, size, mod); break;
+//ZZ case 53: VG_(transfer) (&arm->guest_D27, buf, dir, size, mod); break;
+//ZZ case 54: VG_(transfer) (&arm->guest_D28, buf, dir, size, mod); break;
+//ZZ case 55: VG_(transfer) (&arm->guest_D29, buf, dir, size, mod); break;
+//ZZ case 56: VG_(transfer) (&arm->guest_D30, buf, dir, size, mod); break;
+//ZZ case 57: VG_(transfer) (&arm->guest_D31, buf, dir, size, mod); break;
+//ZZ case 58: VG_(transfer) (&arm->guest_FPSCR, buf, dir, size, mod); break;
+//ZZ default: vg_assert(0);
+//ZZ }
+//ZZ }
+//ZZ
+//ZZ static
+//ZZ const char* target_xml (Bool shadow_mode)
+//ZZ {
+//ZZ if (shadow_mode) {
+//ZZ return "arm-with-vfpv3-valgrind.xml";
+//ZZ } else {
+//ZZ return "arm-with-vfpv3.xml";
+//ZZ }
+//ZZ }
+//ZZ
+//ZZ static struct valgrind_target_ops low_target = {
+//ZZ num_regs,
+//ZZ regs,
+//ZZ 13, //SP
+//ZZ transfer_register,
+//ZZ get_pc,
+//ZZ set_pc,
+//ZZ "arm",
+//ZZ target_xml
+//ZZ };
+
+void arm64_init_architecture (struct valgrind_target_ops *target)
+{
+ vg_assert(0); // IMPLEMENT ME
+ //ZZ *target = low_target;
+ //ZZ set_register_cache (regs, num_regs);
+ //ZZ gdbserver_expedite_regs = expedite_regs;
+}
Modified: trunk/coregrind/m_gdbserver/valgrind_low.h
==============================================================================
--- trunk/coregrind/m_gdbserver/valgrind_low.h (original)
+++ trunk/coregrind/m_gdbserver/valgrind_low.h Sun Jan 12 12:54:00 2014
@@ -73,6 +73,7 @@
extern void x86_init_architecture (struct valgrind_target_ops *target);
extern void amd64_init_architecture (struct valgrind_target_ops *target);
extern void arm_init_architecture (struct valgrind_target_ops *target);
+extern void arm64_...
[truncated message content] |