|
From: Aaron C. <aar...@do...> - 2015-11-06 19:39:57
|
I am doing some work on an application that uses the latest Xen version (4.6).
When I ran valgrind on my application I noticed that it didn't support the
latest Xen HYPERCALL interface versions. I have created the following patch
based off of valgrind-3.11.0.
I have only tested it with my own applications, so it is entirely possible that
I have missed something.
I've never sent a message to this list before, so I hope this is the proper way
to include a patch.
Aaron Cornelius
==========================================================
diff -aur valgrind-3.11.0/coregrind/m_syswrap/syswrap-xen.c
valgrind-3.11.0_xen4.6/coregrind/m_syswrap/syswrap-xen.c
--- valgrind-3.11.0/coregrind/m_syswrap/syswrap-xen.c 2015-09-08
09:23:26.000000000 -0400
+++ valgrind-3.11.0_xen4.6/coregrind/m_syswrap/syswrap-xen.c 2015-11-05
10:36:03.268521000 -0500
@@ -584,6 +584,7 @@
case 0x00000009:
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
break;
default:
bad_intf_version(tid, layout, arrghs, status, flags,
@@ -626,6 +627,7 @@
break;
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, first_domain);
PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, max_domains);
PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, buffer);
@@ -676,6 +678,35 @@
break;
+ case VKI_XEN_SYSCTL_scheduler_op:
+ PRE_XEN_SYSCTL_READ(scheduler_op, cpupool_id);
+ PRE_XEN_SYSCTL_READ(scheduler_op, sched_id);
+ PRE_XEN_SYSCTL_READ(scheduler_op, cmd);
+
+ if ( sysctl->u.scheduler_op.cmd == VKI_XEN_SYSCTL_SCHEDOP_putinfo ) {
+ switch (sysctl->u.scheduler_op.sched_id) {
+ case VKI_XEN_SCHEDULER_CREDIT:
+ PRE_XEN_SYSCTL_READ(scheduler_op, u.sched_credit.tslice_ms);
+ PRE_XEN_SYSCTL_READ(scheduler_op, u.sched_credit.ratelimit_us);
+ break;
+
+ case VKI_XEN_SCHEDULER_ARINC653:
+ PRE_MEM_READ("XEN_SYSCTL_scheduler_op
*u.scheduler_op.u.sched_arinc653.schedule.p",
+ (Addr)sysctl->u.scheduler_op.u.sched_arinc653.schedule.p,
+ sizeof(struct vki_xen_sysctl_arinc653_schedule));
+ break;
+
+ default:
+ VG_(dmsg)("WARNING: XEN_SYSCTL_scheduler_OP for sched_id type "
+ "%u not implemented yet\n",
+ sysctl->u.scheduler_op.sched_id);
+ SET_STATUS_Failure(VKI_EINVAL);
+ return;
+ }
+ }
+
+ break;
+
case VKI_XEN_SYSCTL_physinfo:
/* No input params */
break;
@@ -728,6 +759,7 @@
case 0x00000008:
case 0x00000009:
case 0x0000000a:
+ case 0x0000000b:
break;
default:
bad_intf_version(tid, layout, arrghs, status, flags,
@@ -889,6 +921,7 @@
__PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_00000009,
cpumap.nr_bits);
break;
case 0x0000000a:
+ case 0x0000000b:
__PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_0000000a, vcpu);
if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD)
__PRE_XEN_DOMCTL_READ(
@@ -968,6 +1001,8 @@
break;
case 0x00000009:
+ case 0x0000000a:
+ case 0x0000000b:
__PRE_XEN_DOMCTL_READ(get_ext_vcpucontext, ext_vcpucontext_00000009,
vcpu);
break;
@@ -1008,6 +1043,8 @@
break;
case 0x00000009:
+ case 0x0000000a:
+ case 0x0000000b:
__PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
vcpu);
__PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
size);
#if defined(__i386__) || defined(__x86_64__)
@@ -1534,6 +1571,7 @@
case 0x00000009:
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
break;
default:
return;
@@ -1568,6 +1606,7 @@
break;
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a, num_domains);
POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_0000000a.buffer.p,
sizeof(*sysctl->u.getdomaininfolist_0000000a.buffer.p)
@@ -1593,6 +1632,30 @@
POST_XEN_SYSCTL_WRITE(cpupool_op, cpumap);
break;
+ case VKI_XEN_SYSCTL_scheduler_op:
+ if ( sysctl->u.scheduler_op.cmd == VKI_XEN_SYSCTL_SCHEDOP_getinfo ) {
+ switch (sysctl->u.scheduler_op.sched_id) {
+ case VKI_XEN_SCHEDULER_CREDIT:
+ POST_XEN_SYSCTL_WRITE(scheduler_op, u.sched_credit.tslice_ms);
+ POST_XEN_SYSCTL_WRITE(scheduler_op, u.sched_credit.ratelimit_us);
+ break;
+
+ case VKI_XEN_SCHEDULER_ARINC653:
+
POST_MEM_WRITE((Addr)sysctl->u.scheduler_op.u.sched_arinc653.schedule.p,
+ sizeof(struct vki_xen_sysctl_arinc653_schedule));
+ break;
+
+ default:
+ VG_(dmsg)("WARNING: XEN_SYSCTL_scheduler_OP for sched_id type "
+ "%u not supported\n",
+ sysctl->u.scheduler_op.sched_id);
+ SET_STATUS_Failure(VKI_EINVAL);
+ return;
+ }
+ }
+
+ break;
+
case VKI_XEN_SYSCTL_physinfo:
switch (sysctl->interface_version)
{
@@ -1613,6 +1676,7 @@
break;
case 0x0000000a:
case 0x0000000b:
+ case 0x0000000c:
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, threads_per_core);
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cores_per_socket);
POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_cpus);
@@ -1669,6 +1733,7 @@
case 0x00000008:
case 0x00000009:
case 0x0000000a:
+ case 0x0000000b:
break;
default:
return;
@@ -1791,6 +1856,7 @@
domctl->u.vcpuaffinity_00000009.cpumap.nr_bits / 8);
break;
case 0x0000000a:
+ case 0x0000000b:
if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD)
POST_MEM_WRITE(
(Addr)domctl->u.vcpuaffinity_0000000a.cpumap_hard.bitmap.p,
@@ -1840,6 +1906,7 @@
break;
case 0x00000009:
case 0x0000000a:
+ case 0x0000000b:
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, domain);
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, flags);
POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, tot_pages);
@@ -1892,6 +1959,8 @@
break;
case 0x00000009:
+ case 0x0000000a:
+ case 0x0000000b:
__POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext,
ext_vcpucontext_00000009, size);
#if defined(__i386__) || defined(__x86_64__)
__POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
diff -aur valgrind-3.11.0/include/vki/vki-xen-domctl.h
valgrind-3.11.0_xen4.6/include/vki/vki-xen-domctl.h
--- valgrind-3.11.0/include/vki/vki-xen-domctl.h 2015-09-08 09:23:26.000000000 -0400
+++ valgrind-3.11.0_xen4.6/include/vki/vki-xen-domctl.h 2015-11-05
08:54:42.764521000 -0500
@@ -36,6 +36,7 @@
* - 0x00000008: Xen 4.2
* - 0x00000009: Xen 4.3 & 4.4
* - 0x0000000a: Xen 4.5
+ * - 0x0000000b: Xen 4.6
*
* When adding a new subop be sure to include the variants used by all
* of the above, both here and in syswrap-xen.c
@@ -116,6 +117,11 @@
#define VKI_XEN_DOMCTL_cacheflush 71
#define VKI_XEN_DOMCTL_get_vcpu_msrs 72
#define VKI_XEN_DOMCTL_set_vcpu_msrs 73
+#define VKI_XEN_DOMCTL_setvnumainfo 74
+#define VKI_XEN_DOMCTL_psr_cmt_op 75
+#define VKI_XEN_DOMCTL_monitor_op 77
+#define VKI_XEN_DOMCTL_psr_cat_op 78
+#define VKI_XEN_DOMCTL_soft_reset 79
#define VKI_XEN_DOMCTL_gdbsx_guestmemio 1000
#define VKI_XEN_DOMCTL_gdbsx_pausevcpu 1001
#define VKI_XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -187,6 +193,7 @@
DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_domctl_getdomaininfo_00000009_t);
/* vki_xen_domctl_getdomaininfo_0000000a is the same as 00000009 */
+/* vki_xen_domctl_getdomaininfo_0000000b is the same as 00000009 */
/* Get/set the NUMA node(s) with which the guest has affinity with. */
/* XEN_DOMCTL_setnodeaffinity */
@@ -216,6 +223,8 @@
struct vki_xenctl_bitmap cpumap_soft; /* IN/OUT */
};
+/* vki_xen_domctl_vcpuaffinity_0000000b is the same as 0000000a */
+
struct vki_xen_domctl_shadow_op_stats {
vki_uint32_t fault_count;
vki_uint32_t dirty_count;
@@ -413,6 +422,7 @@
};
/* vki_xen_domctl_ext_vcpucontext_0000000a is the same as 00000009 */
+/* vki_xen_domctl_ext_vcpucontext_0000000b is the same as 00000009 */
struct vki_xen_domctl_vcpuextstate {
vki_uint32_t vcpu;
@@ -532,6 +542,10 @@
struct vki_xen_domctl_cacheflush cacheflush;
//struct vki_xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
//struct vki_xen_domctl_gdbsx_domstatus gdbsx_domstatus;
+ //struct vki_xen_domctl_vnuma vnuma;
+ //struct vki_xen_domctl_psr_cmt_op psr_cmt_op;
+ //struct vki_xen_domctl_monitor_op monitor_op;
+ //struct vki_xen_domctl_psr_cat_op psr_cat_op;
vki_uint8_t pad[128];
} u;
};
diff -aur valgrind-3.11.0/include/vki/vki-xen-sysctl.h
valgrind-3.11.0_xen4.6/include/vki/vki-xen-sysctl.h
--- valgrind-3.11.0/include/vki/vki-xen-sysctl.h 2015-09-08 09:23:26.000000000 -0400
+++ valgrind-3.11.0_xen4.6/include/vki/vki-xen-sysctl.h 2015-11-05
10:30:26.820521000 -0500
@@ -9,6 +9,7 @@
* - 0x00000009: Xen 4.2
* - 0x0000000a: Xen 4.3 & 4.4
* - 0x0000000b: Xen 4.5
+ * - 0x0000000c: Xen 4.6
*
* When adding a new subop be sure to include the variants used by all
* of the above, both here and in syswrap-xen.c
@@ -38,6 +39,10 @@
#define VKI_XEN_SYSCTL_cpupool_op 18
#define VKI_XEN_SYSCTL_scheduler_op 19
#define VKI_XEN_SYSCTL_coverage_op 20
+#define VKI_XEN_SYSCTL_psr_cmt_op 21
+#define VKI_XEN_SYSCTL_pcitopoinfo 22
+#define VKI_XEN_SYSCTL_psr_cat_op 23
+#define VKI_XEN_SYSCTL_tmem_op 24
struct vki_xen_sysctl_readconsole {
/* IN */
@@ -88,6 +93,7 @@
};
/* vki_xen_sysctl_getdomaininfolist_0000000b is the same as 0000000a */
+/* vki_xen_sysctl_getdomaininfolist_0000000c is the same as 0000000a */
#define VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */
#define VKI_XEN_SYSCTL_CPUPOOL_OP_DESTROY 2 /* D */
@@ -107,6 +113,42 @@
struct vki_xenctl_bitmap cpumap; /* OUT: IF */
};
+struct vki_xen_sysctl_arinc653_schedule {
+ vki_xen_uint64_aligned_t major_frame;
+ vki_uint8_t num_sched_entries;
+ struct {
+ vki_xen_domain_handle_t dom_handle;
+ vki_uint32_t vcpu_id;
+ vki_xen_uint64_aligned_t runtime;
+ } sched_entries[64];
+};
+typedef struct vki_xen_sysctl_arinc653_schedule vki_xen_sysctl_arinc653_schedule_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_sysctl_arinc653_schedule_t);
+
+struct vki_xen_sysctl_credit_schedule {
+ vki_uint32_t tslice_ms;
+ vki_uint32_t ratelimit_us;
+};
+typedef struct vki_xen_sysctl_credit_schedule vki_xen_sysctl_credit_schedule_t;
+DEFINE_VKI_XEN_GUEST_HANDLE(vki_xen_sysctl_credit_schedule_t);
+
+#define VKI_XEN_SYSCTL_SCHEDOP_putinfo 0
+#define VKI_XEN_SYSCTL_SCHEDOP_getinfo 1
+struct vki_xen_sysctl_scheduler_op {
+ vki_uint32_t cpupool_id; /* IN */
+ /* Only VKI_XEN_SCHEDULER_CREDIT supported */
+ vki_uint32_t sched_id; /* IN */
+ vki_uint32_t cmd; /* IN */
+
+ /* The schedule fields point to user memory locations */
+ union {
+ struct vki_xen_sysctl_sched_arinc653 {
+ VKI_XEN_GUEST_HANDLE_64(vki_xen_sysctl_arinc653_schedule_t) schedule;
+ } sched_arinc653;
+ struct vki_xen_sysctl_credit_schedule sched_credit;
+ } u;
+};
+
struct vki_xen_sysctl_debug_keys {
/* IN variables. */
VKI_XEN_GUEST_HANDLE_64(char) keys;
@@ -160,6 +202,7 @@
};
/* vki_xen_sysctl_physinfo_0000000b is the same as 0000000a */
+/* vki_xen_sysctl_physinfo_0000000c is the same as 0000000a */
struct vki_xen_sysctl_sched_id {
/* OUT variable. */
@@ -190,9 +233,11 @@
//struct vki_xen_sysctl_page_offline_op page_offline;
//struct vki_xen_sysctl_lockprof_op lockprof_op;
struct vki_xen_sysctl_cpupool_op cpupool_op;
- //struct vki_xen_sysctl_scheduler_op scheduler_op;
+ struct vki_xen_sysctl_scheduler_op scheduler_op;
//struct vki_xen_sysctl_coverage_op coverage_op;
-
+ //struct vki_xen_sysctl_psr_cmt_op psr_cmt_op;
+ //struct vki_xen_sysctl_psr_cat_op psr_cat_op;
+ //struct vki_xen_sysctl_tmem_op tmem_op;
vki_uint8_t pad[128];
} u;
};
|