|
From: Jeremy F. <je...@go...> - 2005-02-04 01:08:33
|
CVS commit by fitzhardinge:
Basic machinery to wrap functions. Not actually useful in this
checkin, since there's no way to activate it.
M +22 -0 core.h 1.79
M +39 -22 vg_from_ucode.c 1.91
M +199 -2 vg_redir.c 1.2
M +17 -0 x86/core_arch.h 1.21
M +41 -0 x86/dispatch.S 1.6
--- valgrind/coregrind/core.h #1.78:1.79
@@ -1103,4 +1103,26 @@ extern void VG_(resolve_seg_redirs)(SegI
extern Bool VG_(resolve_redir)(CodeRedirect *redir, const SegInfo *si);
+/* Wrapping machinery */
+enum return_type {
+ RT_RETURN,
+ RT_LONGJMP,
+ RT_EXIT,
+};
+
+typedef struct _FuncWrapper FuncWrapper;
+struct _FuncWrapper {
+ void *(*before)(ThreadState *tst);
+ void (*after) (void *nonce, enum return_type, Word retval);
+};
+
+extern void VG_(wrap_function)(Addr eip, const FuncWrapper *wrapper);
+extern const FuncWrapper *VG_(is_wrapped)(Addr eip);
+extern Bool VG_(is_wrapper_return)(Addr eip);
+
+/* Primary interface for adding wrappers for client-side functions. */
+extern void VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
+ const FuncWrapper *wrapper);
+
+
/* ---------------------------------------------------------------------
Exports of vg_main.c
--- valgrind/coregrind/vg_redir.c #1.1:1.2
@@ -54,5 +54,5 @@ struct _CodeRedirect {
};
-static Int addrcmp(const void *ap, const void *bp)
+static Int cmp_addrp(const void *ap, const void *bp)
{
Addr a = *(Addr *)ap;
@@ -78,5 +78,5 @@ static Char *straddr(void *p)
static SkipList sk_resolved_redir = SKIPLIST_INIT(CodeRedirect, from_addr,
- addrcmp, straddr, VG_AR_SYMTAB);
+ cmp_addrp, straddr, VG_AR_SYMTAB);
static CodeRedirect *unresolved_redir = NULL;
@@ -322,2 +322,199 @@ void VG_(setup_code_redirect_table) ( vo
}
+/*------------------------------------------------------------*/
+/*--- General function wrapping. ---*/
+/*------------------------------------------------------------*/
+
+/*
+ TODO:
+ - hook into the symtab machinery
+ - client-side wrappers?
+ - better interfaces for before() functions to get to arguments
+ - handle munmap of code (dlclose())
+ - handle thread exit
+ - handle longjmp
+ */
+struct callkey {
+ ThreadId tid; /* calling thread */
+ Addr esp; /* address of args on stack */
+ Addr eip; /* return address */
+};
+
+struct call_instance {
+ struct callkey key;
+
+ const FuncWrapper *wrapper;
+ void *nonce;
+};
+
+static inline Addr addrcmp(Addr a, Addr b)
+{
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ else
+ return 0;
+}
+
+static inline Int cmp(UInt a, UInt b)
+{
+ if (a < b)
+ return -1;
+ else if (a > b)
+ return 1;
+ else
+ return 0;
+}
+
+static Int keycmp(const void *pa, const void *pb)
+{
+ const struct callkey *a = (const struct callkey *)pa;
+ const struct callkey *b = (const struct callkey *)pb;
+ Int ret;
+
+ if ((ret = cmp(a->tid, b->tid)))
+ return ret;
+
+ if ((ret = addrcmp(a->esp, b->esp)))
+ return ret;
+
+ return addrcmp(a->eip, b->eip);
+}
+
+/* List of wrapped call invocations which are currently active */
+static SkipList wrapped_frames = SKIPLIST_INIT(struct call_instance, key, keycmp,
+ NULL, VG_AR_SYMTAB);
+
+static struct call_instance *find_call(Addr retaddr, Addr argsp, ThreadId tid)
+{
+ struct callkey key = { tid, argsp, retaddr };
+
+ return VG_(SkipList_Find_Exact)(&wrapped_frames, &key);
+}
+
+static void wrapper_return(Addr retaddr);
+
+/* Called from generated code via helper */
+void VG_(wrap_before)(ThreadState *tst, const FuncWrapper *wrapper)
+{
+ Addr retaddr = ARCH_RETADDR(tst->arch);
+ Addr argp = (Addr)&ARCH_FUNC_ARG(tst->arch, 0);
+ void *nonce = NULL;
+
+ if (wrapper->before)
+ nonce = (*wrapper->before)(tst);
+
+ if (wrapper->after) {
+ /* If there's an after function, make sure it gets called */
+ struct call_instance *call;
+
+ call = find_call(retaddr, argp, tst->tid);
+
+ if (call != NULL) {
+ /* Found a stale outstanding call; clean it up and recycle
+ the structure */
+ if (call->wrapper->after)
+ (*call->wrapper->after)(call->nonce, RT_LONGJMP, 0);
+ } else {
+ call = VG_(SkipNode_Alloc)(&wrapped_frames);
+
+ call->key.tid = tst->tid;
+ call->key.esp = argp;
+ call->key.eip = retaddr;
+
+ VG_(SkipList_Insert)(&wrapped_frames, call);
+
+ wrapper_return(retaddr);
+ }
+
+ call->wrapper = wrapper;
+ call->nonce = nonce;
+ } else
+ vg_assert(nonce == NULL);
+}
+
+/* Called from generated code via helper */
+void VG_(wrap_after)(ThreadState *tst)
+{
+ Addr EIP = ARCH_INSTR_PTR(tst->arch); /* instruction after call */
+ Addr ESP = ARCH_STACK_PTR(tst->arch); /* pointer to args */
+ Word ret = ARCH_RETVAL(tst->arch); /* return value */
+
+ struct call_instance *call = find_call(EIP, ESP, tst->tid);
+
+ if (call != NULL) {
+ if (call->wrapper->after)
+ (*call->wrapper->after)(call->nonce, RT_RETURN, ret);
+
+ VG_(SkipList_Remove)(&wrapped_frames, &call->key);
+ VG_(SkipNode_Free)(&wrapped_frames, call);
+ }
+}
+
+
+struct wrapped_function {
+ Addr eip; /* eip of function entrypoint */
+ const FuncWrapper *wrapper;
+};
+
+struct wrapper_return {
+ Addr eip; /* return address */
+};
+
+/* A mapping from eip of wrapped function entrypoints to actual wrappers */
+static SkipList wrapped_functions = SKIPLIST_INIT(struct wrapped_function, eip, cmp_addrp,
+ NULL, VG_AR_SYMTAB);
+
+/* A set of EIPs which are return addresses for wrapped functions */
+static SkipList wrapper_returns = SKIPLIST_INIT(struct wrapper_return, eip, cmp_addrp,
+ NULL, VG_AR_SYMTAB);
+
+/* Wrap function starting at eip */
+void VG_(wrap_function)(Addr eip, const FuncWrapper *wrapper)
+{
+ struct wrapped_function *func;
+
+ func = VG_(SkipList_Find_Exact)(&wrapped_functions, &eip);
+
+ if (func == NULL) {
+ func = VG_(SkipNode_Alloc)(&wrapped_functions);
+ VG_(invalidate_translations)(eip, 1, True);
+
+ func->eip = eip;
+ VG_(SkipList_Insert)(&wrapped_functions, func);
+ }
+
+ func->wrapper = wrapper;
+}
+
+const FuncWrapper *VG_(is_wrapped)(Addr eip)
+{
+ struct wrapped_function *func = VG_(SkipList_Find_Exact)(&wrapped_functions, &eip);
+
+ if (func)
+ return func->wrapper;
+ return NULL;
+}
+
+Bool VG_(is_wrapper_return)(Addr eip)
+{
+ struct wrapper_return *ret = VG_(SkipList_Find_Exact)(&wrapper_returns, &eip);
+
+ return ret != NULL;
+}
+
+void wrapper_return(Addr eip)
+{
+ struct wrapper_return *ret;
+
+ if (VG_(is_wrapper_return)(eip))
+ return;
+
+ VG_(invalidate_translations)(eip, 1, True);
+
+ ret = VG_(SkipNode_Alloc)(&wrapper_returns);
+ ret->eip = eip;
+
+ VG_(SkipList_Insert)(&wrapper_returns, ret);
+}
--- valgrind/coregrind/vg_from_ucode.c #1.90:1.91
@@ -2274,9 +2274,10 @@ static void emit_call_patchme( void )
maybe_emit_put_eflags(); /* save flags before end of BB */
- VG_(new_emit)(False, FlagsEmpty, FlagsEmpty);
if (jumpidx >= VG_MAX_JUMPS) {
/* If there too many jumps in this basic block, fall back to
dispatch loop. */
+ VG_(new_emit)(False, FlagsEmpty, FlagsEmpty);
+
VG_(emitB) ( 0xC3 ); /* ret */
@@ -4420,9 +4421,23 @@ UChar* VG_(emit_code) ( UCodeBlock* cb,
Addr orig_eip, curr_eip;
Int tgt;
+ const FuncWrapper *wrapper;
reset_state();
+ /* Set up running state. */
+ sselive = False;
+ orig_eip = cb->orig_eip; /* we know EIP is up to date on BB entry */
+ curr_eip = cb->orig_eip;
+ vg_assert(curr_eip != 0); /* otherwise the incremental updating
+ algorithm gets messed up. */
+
if (dis) VG_(printf)("Generated x86 code:\n");
+ if (VG_(is_wrapper_return)(orig_eip)) {
+ /* If this the return address for a wrapped function, call
+ wrap_return_helper. This also deals with managing the
+ dispatch_ctr, so there's no need to do that too. */
+ emit_call_abs(False, (Addr)VG_(helper_wrapper_return), FlagsEmpty, FlagsEmpty);
+ } else {
/* Generate subl $1, dispatch_ctr(%ebp) and drop into dispatch if
we hit zero. We have to do this regardless of whether we're
@@ -4443,11 +4458,13 @@ UChar* VG_(emit_code) ( UCodeBlock* cb,
emit_ret();
VG_(target_forward)(&tgt);
+ }
+
+ wrapper = VG_(is_wrapped)(orig_eip);
+ if (wrapper != NULL) {
+ /* This is the start of a wrapped function */
+ VG_(emit_pushl_lit32)((UInt)wrapper);
+ emit_call_abs(False, (Addr)VG_(helper_wrapper_before), FlagsEmpty, FlagsEmpty);
+ }
- /* Set up running state. */
- sselive = False;
- orig_eip = cb->orig_eip; /* we know EIP is up to date on BB entry */
- curr_eip = cb->orig_eip;
- vg_assert(curr_eip != 0); /* otherwise the incremental updating
- algorithm gets messed up. */
/* for each uinstr ... */
for (i = 0; i < cb->used; i++) {
--- valgrind/coregrind/x86/dispatch.S #1.5:1.6
@@ -190,4 +190,45 @@
1: addl $4, %esp /* remove our call address */
ret /* return into main dispatch loop above */
+
+/* This is called from generated code when we're about to
+ start running a wrapped function. When called, the only live
+ registers are %eax and %ebp, so they're the only ones we preserve.
+
+ We are called with FuncWrapper * on the stack
+
+ This calls VG_(wrap_before)(ThreadState *tst, FuncWrapper *wrapper)
+*/
+.globl VG_(helper_wrapper_before)
+VG_(helper_wrapper_before):
+ pushl %eax /* save %eax */
+ pushl %ebp /* save %ebp */
+
+ pushl 12(%esp) /* pass FuncWrapper * */
+ pushl %ebp /* pass %ebp */
+ call VG_(wrap_before)
+ add $8, %esp /* remove args */
+
+ popl %ebp /* restore %ebp */
+ popl %eax /* restore %eax */
+ ret $4 /* remove argument */
+
+/* This is called from generated code when we're returning from a wrapped function.
+ Again, the only live registers are %eax and %ebp.
+ This is inserted instead of the normal dispatch_ctr management code, so we need
+ to duplicate that functionality too.
+*/
+.globl VG_(helper_wrapper_return)
+VG_(helper_wrapper_return):
+ subl $1, VGOFF_DISPATCH_CTR(%ebp)
+ jz 1f
+ pushl %eax
+ pushl %ebp
+ call VG_(wrap_after)
+ popl %ebp
+ popl %eax
+ ret
+1: /* counter zero */
+ add $4, %esp /* remove return addr */
+ ret /* return back into dispatch loop */
.data
--- valgrind/coregrind/x86/core_arch.h #1.20:1.21
@@ -44,4 +44,18 @@
#define ARCH_FRAME_PTR(regs) ((regs).m_ebp)
+/* Gets the return address of a function call. Assumes the VCPU is in
+ a state where a return would work (ie just before a return
+ instruction, or at the start of a called function before any local
+ frame has been set up). */
+#define ARCH_RETADDR(regs) (*(Addr *)((regs).m_esp))
+
+/* Gets argument N for a function. Makes the same assumption as ARCH_RETADDR.
+
+ XXX Assumes all args are simple words on the stack; it should
+ really be a stdargs-like stateful interface. */
+#define ARCH_FUNC_ARG(regs, N) ((((Word *)((regs).m_esp))[(N)+1]))
+
+#define ARCH_RETVAL(regs) ((regs).m_eax)
+
#define ARCH_CLREQ_ARGS(regs) ((regs).m_eax)
#define ARCH_PTHREQ_RET(regs) ((regs).m_edx)
@@ -85,4 +99,7 @@
------------------------------------------------------------------ */
+extern const Char VG_(helper_wrapper_before)[]; /* in dispatch.S */
+extern const Char VG_(helper_wrapper_return)[]; /* in dispatch.S */
+
extern const Char VG_(helper_undefined_instruction)[];
|