|
From: <sv...@va...> - 2006-05-17 20:18:14
|
Author: tom
Date: 2006-05-17 15:24:12 +0100 (Wed, 17 May 2006)
New Revision: 5905
Log:
Implement the set_robust_list and get_robust_list system calls.
Modified:
trunk/coregrind/m_syswrap/priv_syswrap-linux.h
trunk/coregrind/m_syswrap/syswrap-amd64-linux.c
trunk/coregrind/m_syswrap/syswrap-linux.c
trunk/coregrind/m_syswrap/syswrap-x86-linux.c
trunk/include/vki-linux.h
/usr/local/etc/subversion//commit-email.pl: `/usr/local/bin/svnlook diff =
/home/svn/repos/valgrind -r 5905' failed with this output:
Modified: trunk/coregrind/m_syswrap/priv_syswrap-linux.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/m_syswrap/priv_syswrap-linux.h 2006-05-17 13:53:23 UT=
C (rev 5904)
+++ trunk/coregrind/m_syswrap/priv_syswrap-linux.h 2006-05-17 14:24:12 UT=
C (rev 5905)
@@ -74,6 +74,8 @@
DECL_TEMPLATE(linux, sys_sendfile);
DECL_TEMPLATE(linux, sys_sendfile64);
DECL_TEMPLATE(linux, sys_futex);
+DECL_TEMPLATE(linux, sys_set_robust_list);
+DECL_TEMPLATE(linux, sys_get_robust_list);
DECL_TEMPLATE(linux, sys_pselect6);
DECL_TEMPLATE(linux, sys_ppoll);
=20
Modified: trunk/coregrind/m_syswrap/syswrap-amd64-linux.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/m_syswrap/syswrap-amd64-linux.c 2006-05-17 13:53:23 U=
TC (rev 5904)
+++ trunk/coregrind/m_syswrap/syswrap-amd64-linux.c 2006-05-17 14:24:12 U=
TC (rev 5905)
@@ -1328,6 +1328,8 @@
// LINX_(__NR_pselect6, sys_ni_syscall), // 270
// LINXY(__NR_ppoll, sys_ni_syscall), // 271
// LINX_(__NR_unshare, sys_unshare), // 272
+ LINX_(__NR_set_robust_list, sys_set_robust_list), // 273
+ LINXY(__NR_get_robust_list, sys_get_robust_list), // 274
};
=20
const UInt ML_(syscall_table_size) =3D=20
Modified: trunk/coregrind/m_syswrap/syswrap-linux.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/m_syswrap/syswrap-linux.c 2006-05-17 13:53:23 UTC (re=
v 5904)
+++ trunk/coregrind/m_syswrap/syswrap-linux.c 2006-05-17 14:24:12 UTC (re=
v 5905)
@@ -799,6 +799,37 @@
}
}
=20
+PRE(sys_set_robust_list)
+{
+ PRINT("sys_set_robust_list ( %p, %d )", ARG1,ARG2);
+ PRE_REG_READ2(long, "set_robust_list",=20
+ struct vki_robust_list_head *, head, vki_size_t, len);
+
+ /* Just check the robust_list_head structure is readable - don't
+ try and chase the list as the kernel will only read it when
+ the thread exits so the current contents is irrelevant. */
+ if (ARG1 !=3D 0)
+ PRE_MEM_READ("set_robust_list(head)", ARG1, ARG2);
+}
+
+PRE(sys_get_robust_list)
+{
+ PRINT("sys_get_robust_list ( %d, %p, %d )", ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "get_robust_list",
+ int, pid,
+ struct vki_robust_list_head **, head_ptr,
+ vki_size_t *, len_ptr);
+ PRE_MEM_WRITE("get_robust_list(head_ptr)",
+ ARG2, sizeof(struct vki_robust_list_head *));
+ PRE_MEM_WRITE("get_robust_list(len_ptr)",
+ ARG3, sizeof(struct vki_size_t *));
+}
+POST(sys_get_robust_list)
+{
+ POST_MEM_WRITE(ARG2, sizeof(struct vki_robust_list_head *));
+ POST_MEM_WRITE(ARG3, sizeof(struct vki_size_t *));
+}
+
PRE(sys_pselect6)
{
*flags |=3D SfMayBlock;
Modified: trunk/coregrind/m_syswrap/syswrap-x86-linux.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/coregrind/m_syswrap/syswrap-x86-linux.c 2006-05-17 13:53:23 UTC=
(rev 5904)
+++ trunk/coregrind/m_syswrap/syswrap-x86-linux.c 2006-05-17 14:24:12 UTC=
(rev 5905)
@@ -2172,6 +2172,8 @@
LINXY(__NR_ppoll, sys_ppoll), // 309
=20
// LINX_(__NR_unshare, sys_unshare), // 310
+ LINX_(__NR_set_robust_list, sys_set_robust_list), // 311
+ LINXY(__NR_get_robust_list, sys_get_robust_list), // 312
};
=20
const UInt ML_(syscall_table_size) =3D=20
Modified: trunk/include/vki-linux.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- trunk/include/vki-linux.h 2006-05-17 13:53:23 UTC (rev 5904)
+++ trunk/include/vki-linux.h 2006-05-17 14:24:12 UTC (rev 5905)
@@ -1126,6 +1126,37 @@
#define VKI_FUTEX_REQUEUE (3)
#define VKI_FUTEX_CMP_REQUEUE (4)
=20
+struct vki_robust_list {
+ struct vki_robust_list __user *next;
+};
+
+struct vki_robust_list_head {
+ /*
+ * The head of the list. Points back to itself if empty:
+ */
+ struct vki_robust_list list;
+
+ /*
+ * This relative offset is set by user-space, it gives the kernel
+ * the relative position of the futex field to examine. This way
+ * we keep userspace flexible, to freely shape its data-structure,
+ * without hardcoding any particular offset into the kernel:
+ */
+ long futex_offset;
+
+ /*
+ * The death of the thread may race with userspace setting
+ * up a lock's links. So to handle this race, userspace first
+ * sets this field to the address of the to-be-taken lock,
+ * then does the lock acquire, and then adds itself to the
+ * list, and then clears this field. Hence the kernel will
+ * always have full knowledge of all locks that the thread
+ * _might_ have taken. We check the owner TID in any case,
+ * so only truly owned locks will be handled.
+ */
+ struct vki_robust_list __user *list_op_pending;
+};
+
//----------------------------------------------------------------------
// From linux-2.6.8.1/include/linux/errno.h
//----------------------------------------------------------------------
svnlook: Can't open directory '/tmp/svnlook.8': Not a directory
|