mirror of
https://github.com/Zenithsiz/ftmemsim-valgrind.git
synced 2026-02-04 02:18:37 +00:00
Mega-merge of my last 2 weeks hacking. This basically does the groundwork
for pthread_* support. Major changes:
* Valgrind now contains a (skeletal!) user-space pthreads
implementation. The exciting bits are in new file vg_scheduler.c.
This contains thread management and scheduling, including nasty crud
to do with making some syscalls (read,write,nanosleep) nonblocking.
Also implementation of pthread_ functions: create join
mutex_{create,destroy,lock,unlock} and cancel.
* As a side effect of the above, major improvements to signal handling
and to the client-request machinery. This is now used to intercept
malloc/free etc too; the hacky way this is done before is gone.
Another side effect is that vg_dispatch.S is greatly simplified.
Also, the horrible hacks to do with delivering signals to threads
blocked in syscalls are gone, since the new mechanisms cover this case
easily.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@52
This commit is contained in:
parent
1271ae8557
commit
7a36f60133
@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so
|
||||
valgrinq_so_SOURCES = vg_valgrinq_dummy.c
|
||||
|
||||
valgrind_so_SOURCES = \
|
||||
vg_scheduler.c \
|
||||
vg_clientmalloc.c \
|
||||
vg_clientperms.c \
|
||||
vg_demangle.c \
|
||||
@ -70,10 +71,10 @@ noinst_HEADERS = \
|
||||
vg_unsafe.h
|
||||
|
||||
|
||||
vg_memory.o: vg_memory.c
|
||||
vg_memory.o: vg_memory.c $(noinst_HEADERS)
|
||||
$(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $<
|
||||
|
||||
vg_clientmalloc.o: vg_clientmalloc.c
|
||||
vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS)
|
||||
$(COMPILE) -fno-omit-frame-pointer -c $<
|
||||
|
||||
|
||||
|
||||
@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so
|
||||
valgrinq_so_SOURCES = vg_valgrinq_dummy.c
|
||||
|
||||
valgrind_so_SOURCES = \
|
||||
vg_scheduler.c \
|
||||
vg_clientmalloc.c \
|
||||
vg_clientperms.c \
|
||||
vg_demangle.c \
|
||||
@ -70,10 +71,10 @@ noinst_HEADERS = \
|
||||
vg_unsafe.h
|
||||
|
||||
|
||||
vg_memory.o: vg_memory.c
|
||||
vg_memory.o: vg_memory.c $(noinst_HEADERS)
|
||||
$(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $<
|
||||
|
||||
vg_clientmalloc.o: vg_clientmalloc.c
|
||||
vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS)
|
||||
$(COMPILE) -fno-omit-frame-pointer -c $<
|
||||
|
||||
|
||||
|
||||
@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so
|
||||
valgrinq_so_SOURCES = vg_valgrinq_dummy.c
|
||||
|
||||
valgrind_so_SOURCES = \
|
||||
vg_scheduler.c \
|
||||
vg_clientmalloc.c \
|
||||
vg_clientperms.c \
|
||||
vg_demangle.c \
|
||||
@ -70,10 +71,10 @@ noinst_HEADERS = \
|
||||
vg_unsafe.h
|
||||
|
||||
|
||||
vg_memory.o: vg_memory.c
|
||||
vg_memory.o: vg_memory.c $(noinst_HEADERS)
|
||||
$(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $<
|
||||
|
||||
vg_clientmalloc.o: vg_clientmalloc.c
|
||||
vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS)
|
||||
$(COMPILE) -fno-omit-frame-pointer -c $<
|
||||
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
# Process this file with autoconf to produce a configure script.
|
||||
AC_INIT(vg_clientmalloc.c)
|
||||
AM_CONFIG_HEADER(config.h)
|
||||
AM_INIT_AUTOMAKE(valgrind, 20020329)
|
||||
AM_INIT_AUTOMAKE(valgrind, 20020412)
|
||||
|
||||
AM_MAINTAINER_MODE
|
||||
|
||||
|
||||
@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so
|
||||
valgrinq_so_SOURCES = vg_valgrinq_dummy.c
|
||||
|
||||
valgrind_so_SOURCES = \
|
||||
vg_scheduler.c \
|
||||
vg_clientmalloc.c \
|
||||
vg_clientperms.c \
|
||||
vg_demangle.c \
|
||||
@ -70,10 +71,10 @@ noinst_HEADERS = \
|
||||
vg_unsafe.h
|
||||
|
||||
|
||||
vg_memory.o: vg_memory.c
|
||||
vg_memory.o: vg_memory.c $(noinst_HEADERS)
|
||||
$(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $<
|
||||
|
||||
vg_clientmalloc.o: vg_clientmalloc.c
|
||||
vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS)
|
||||
$(COMPILE) -fno-omit-frame-pointer -c $<
|
||||
|
||||
|
||||
|
||||
@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so
|
||||
valgrinq_so_SOURCES = vg_valgrinq_dummy.c
|
||||
|
||||
valgrind_so_SOURCES = \
|
||||
vg_scheduler.c \
|
||||
vg_clientmalloc.c \
|
||||
vg_clientperms.c \
|
||||
vg_demangle.c \
|
||||
@ -70,10 +71,10 @@ noinst_HEADERS = \
|
||||
vg_unsafe.h
|
||||
|
||||
|
||||
vg_memory.o: vg_memory.c
|
||||
vg_memory.o: vg_memory.c $(noinst_HEADERS)
|
||||
$(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $<
|
||||
|
||||
vg_clientmalloc.o: vg_clientmalloc.c
|
||||
vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS)
|
||||
$(COMPILE) -fno-omit-frame-pointer -c $<
|
||||
|
||||
|
||||
|
||||
@ -41,10 +41,6 @@
|
||||
# m_state_static, and back afterwards.
|
||||
|
||||
VG_(do_syscall):
|
||||
cmpl $2, VG_(syscall_depth)
|
||||
jz do_syscall_DEPTH_2
|
||||
|
||||
# depth 1 copy follows ...
|
||||
# Save all the int registers of the real machines state on the
|
||||
# simulators stack.
|
||||
pushal
|
||||
@ -104,76 +100,6 @@ VG_(do_syscall):
|
||||
|
||||
ret
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
do_syscall_DEPTH_2:
|
||||
|
||||
# depth 2 copy follows ...
|
||||
# Save all the int registers of the real machines state on the
|
||||
# simulators stack.
|
||||
pushal
|
||||
|
||||
# and save the real FPU state too
|
||||
fwait
|
||||
fnsave VG_(real_fpu_state_saved_over_syscall_d2)
|
||||
frstor VG_(real_fpu_state_saved_over_syscall_d2)
|
||||
|
||||
# remember what the simulators stack pointer is
|
||||
movl %esp, VG_(esp_saved_over_syscall_d2)
|
||||
|
||||
# Now copy the simulated machines state into the real one
|
||||
# esp still refers to the simulators stack
|
||||
frstor VG_(m_state_static)+40
|
||||
movl VG_(m_state_static)+32, %eax
|
||||
pushl %eax
|
||||
popfl
|
||||
movl VG_(m_state_static)+0, %eax
|
||||
movl VG_(m_state_static)+4, %ecx
|
||||
movl VG_(m_state_static)+8, %edx
|
||||
movl VG_(m_state_static)+12, %ebx
|
||||
movl VG_(m_state_static)+16, %esp
|
||||
movl VG_(m_state_static)+20, %ebp
|
||||
movl VG_(m_state_static)+24, %esi
|
||||
movl VG_(m_state_static)+28, %edi
|
||||
|
||||
# esp now refers to the simulatees stack
|
||||
# Do the actual system call
|
||||
int $0x80
|
||||
|
||||
# restore stack as soon as possible
|
||||
# esp refers to simulatees stack
|
||||
movl %esp, VG_(m_state_static)+16
|
||||
movl VG_(esp_saved_over_syscall_d2), %esp
|
||||
# esp refers to simulators stack
|
||||
|
||||
# ... and undo everything else.
|
||||
# Copy real state back to simulated state.
|
||||
movl %eax, VG_(m_state_static)+0
|
||||
movl %ecx, VG_(m_state_static)+4
|
||||
movl %edx, VG_(m_state_static)+8
|
||||
movl %ebx, VG_(m_state_static)+12
|
||||
movl %ebp, VG_(m_state_static)+20
|
||||
movl %esi, VG_(m_state_static)+24
|
||||
movl %edi, VG_(m_state_static)+28
|
||||
pushfl
|
||||
popl %eax
|
||||
movl %eax, VG_(m_state_static)+32
|
||||
fwait
|
||||
fnsave VG_(m_state_static)+40
|
||||
frstor VG_(m_state_static)+40
|
||||
|
||||
# Restore the state of the simulator
|
||||
frstor VG_(real_fpu_state_saved_over_syscall_d2)
|
||||
popal
|
||||
|
||||
ret
|
||||
|
||||
|
||||
##--------------------------------------------------------------------##
|
||||
##--- end vg_syscall.S ---##
|
||||
##--------------------------------------------------------------------##
|
||||
|
||||
@ -164,8 +164,10 @@ fi
|
||||
|
||||
VG_ARGS="$VALGRIND_OPTS $vgsupp $vgopts"
|
||||
export VG_ARGS
|
||||
LD_PRELOAD=$VALGRIND/valgrind.so:$LD_PRELOAD
|
||||
LD_LIBRARY_PATH=$VALGRIND:$LD_LIBRARY_PATH
|
||||
export LD_LIBRARY_PATH
|
||||
LD_PRELOAD=valgrind.so:$LD_PRELOAD
|
||||
export LD_PRELOAD
|
||||
#LD_DEBUG=files
|
||||
#export LD_DEBUG
|
||||
exec $argopts
|
||||
|
||||
|
||||
|
||||
@ -250,10 +250,9 @@ static ShadowChunk* client_malloc_shadow ( UInt align, UInt size,
|
||||
/* Allocate memory, noticing whether or not we are doing the full
|
||||
instrumentation thing. */
|
||||
|
||||
void* VG_(client_malloc) ( UInt size, UInt raw_alloc_kind )
|
||||
void* VG_(client_malloc) ( UInt size, VgAllocKind kind )
|
||||
{
|
||||
ShadowChunk* sc;
|
||||
VgAllocKind kind;
|
||||
|
||||
VGP_PUSHCC(VgpCliMalloc);
|
||||
client_malloc_init();
|
||||
@ -263,21 +262,15 @@ void* VG_(client_malloc) ( UInt size, UInt raw_alloc_kind )
|
||||
count_freelist(), vg_freed_list_volume,
|
||||
size, raw_alloc_kind );
|
||||
# endif
|
||||
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += size;
|
||||
|
||||
if (!VG_(clo_instrument)) {
|
||||
VGP_POPCC;
|
||||
return VG_(malloc) ( VG_AR_CLIENT, size );
|
||||
}
|
||||
switch (raw_alloc_kind) {
|
||||
case 0x4002: kind = Vg_AllocNewVec; break;
|
||||
case 0x4001: kind = Vg_AllocNew; break;
|
||||
case 0x4000: /* malloc */
|
||||
case 6666: /* calloc */
|
||||
kind = Vg_AllocMalloc; break;
|
||||
default: /* should not happen */
|
||||
/* therefore we make sure it doesn't -- JRS */
|
||||
VG_(panic)("VG_(client_malloc): raw_alloc_kind");
|
||||
break; /*NOTREACHED*/
|
||||
}
|
||||
|
||||
sc = client_malloc_shadow ( 0, size, kind );
|
||||
VGP_POPCC;
|
||||
return (void*)(sc->data);
|
||||
@ -295,6 +288,10 @@ void* VG_(client_memalign) ( UInt align, UInt size )
|
||||
count_freelist(), vg_freed_list_volume,
|
||||
align, size );
|
||||
# endif
|
||||
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += size;
|
||||
|
||||
if (!VG_(clo_instrument)) {
|
||||
VGP_POPCC;
|
||||
return VG_(malloc_aligned) ( VG_AR_CLIENT, align, size );
|
||||
@ -305,11 +302,10 @@ void* VG_(client_memalign) ( UInt align, UInt size )
|
||||
}
|
||||
|
||||
|
||||
void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind )
|
||||
void VG_(client_free) ( void* ptrV, VgAllocKind kind )
|
||||
{
|
||||
ShadowChunk* sc;
|
||||
UInt ml_no;
|
||||
VgAllocKind kind;
|
||||
|
||||
VGP_PUSHCC(VgpCliMalloc);
|
||||
client_malloc_init();
|
||||
@ -319,6 +315,9 @@ void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind )
|
||||
count_freelist(), vg_freed_list_volume,
|
||||
ptrV, raw_alloc_kind );
|
||||
# endif
|
||||
|
||||
vg_cmalloc_n_frees ++;
|
||||
|
||||
if (!VG_(clo_instrument)) {
|
||||
VGP_POPCC;
|
||||
VG_(free) ( VG_AR_CLIENT, ptrV );
|
||||
@ -340,16 +339,6 @@ void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind )
|
||||
return;
|
||||
}
|
||||
|
||||
switch (raw_alloc_kind) {
|
||||
case 0x5002: kind = Vg_AllocNewVec; break;
|
||||
case 0x5001: kind = Vg_AllocNew; break;
|
||||
case 0x5000:
|
||||
default:
|
||||
kind = Vg_AllocMalloc;
|
||||
/* should only happen if bug in client code */
|
||||
break;
|
||||
}
|
||||
|
||||
/* check if its a matching free() / delete / delete [] */
|
||||
if (kind != sc->allockind)
|
||||
VG_(record_freemismatch_error) ( (Addr) ptrV );
|
||||
@ -386,6 +375,9 @@ void* VG_(client_calloc) ( UInt nmemb, UInt size1 )
|
||||
nmemb, size1 );
|
||||
# endif
|
||||
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += nmemb * size1;
|
||||
|
||||
if (!VG_(clo_instrument)) {
|
||||
VGP_POPCC;
|
||||
return VG_(calloc) ( VG_AR_CLIENT, nmemb, size1 );
|
||||
@ -430,6 +422,10 @@ void* VG_(client_realloc) ( void* ptrV, UInt size_new )
|
||||
ptrV, size_new );
|
||||
# endif
|
||||
|
||||
vg_cmalloc_n_frees ++;
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += size_new;
|
||||
|
||||
if (!VG_(clo_instrument)) {
|
||||
vg_assert(ptrV != NULL && size_new != 0);
|
||||
VGP_POPCC;
|
||||
@ -573,364 +569,6 @@ void VG_(describe_addr) ( Addr a, AddrInfo* ai )
|
||||
return;
|
||||
}
|
||||
|
||||
/*------------------------------------------------------------*/
|
||||
/*--- Replace the C library versions with our own. Hairy. ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
|
||||
/* Below are new versions of malloc, __builtin_new, free,
|
||||
__builtin_delete, calloc and realloc.
|
||||
|
||||
malloc, __builtin_new, free, __builtin_delete, calloc and realloc
|
||||
can be entered either on the real CPU or the simulated one. If on
|
||||
the real one, this is because the dynamic linker is running the
|
||||
static initialisers for C++, before starting up Valgrind itself.
|
||||
In this case it is safe to route calls through to
|
||||
VG_(malloc)/vg_free, since that is self-initialising.
|
||||
|
||||
Once Valgrind is initialised, vg_running_on_simd_CPU becomes True.
|
||||
The call needs to be transferred from the simulated CPU back to the
|
||||
real one and routed to the vg_client_* functions. To do that, the
|
||||
args are passed to vg_trap_here, which the simulator detects. The
|
||||
bogus epilogue fn call is to guarantee that gcc doesn't tailcall
|
||||
vg_trap_here, since that would cause the simulator's detection to
|
||||
fail -- it only checks the targets of call transfers, not jumps.
|
||||
And of course we have to be sure gcc won't inline either the
|
||||
vg_trap_here or vg_bogus_epilogue. Ha ha ha. What a mess.
|
||||
*/
|
||||
|
||||
/* Place afterwards to guarantee it won't get inlined ... */
|
||||
static UInt vg_trap_here_WRAPPER ( UInt arg1, UInt arg2, UInt what_to_do );
|
||||
static void vg_bogus_epilogue ( void );
|
||||
|
||||
/* ALL calls to malloc wind up here. */
|
||||
void* malloc ( UInt n )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("malloc[simd=%d](%d)",
|
||||
(UInt)VG_(running_on_simd_CPU), n );
|
||||
|
||||
if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; }
|
||||
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += n;
|
||||
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4000 );
|
||||
vg_bogus_epilogue();
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
} else {
|
||||
void* v = VG_(malloc)(VG_AR_CLIENT, n);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
}
|
||||
}
|
||||
|
||||
void* __builtin_new ( UInt n )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("__builtin_new[simd=%d](%d)",
|
||||
(UInt)VG_(running_on_simd_CPU), n );
|
||||
|
||||
if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; }
|
||||
|
||||
vg_cmalloc_n_mallocs++;
|
||||
vg_cmalloc_bs_mallocd += n;
|
||||
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4001 );
|
||||
vg_bogus_epilogue();
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
} else {
|
||||
void* v = VG_(malloc)(VG_AR_CLIENT, n);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return v;
|
||||
}
|
||||
}
|
||||
|
||||
void* __builtin_vec_new ( Int n )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("__builtin_vec_new[simd=%d](%d)",
|
||||
(UInt)VG_(running_on_simd_CPU), n );
|
||||
|
||||
if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; }
|
||||
|
||||
vg_cmalloc_n_mallocs++;
|
||||
vg_cmalloc_bs_mallocd += n;
|
||||
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4002 );
|
||||
vg_bogus_epilogue();
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
} else {
|
||||
void* v = VG_(malloc)(VG_AR_CLIENT, n);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return v;
|
||||
}
|
||||
}
|
||||
|
||||
void free ( void* p )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("free[simd=%d](%p)\n",
|
||||
(UInt)VG_(running_on_simd_CPU), p );
|
||||
vg_cmalloc_n_frees ++;
|
||||
|
||||
if (p == NULL)
|
||||
return;
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
(void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5000 );
|
||||
vg_bogus_epilogue();
|
||||
} else {
|
||||
VG_(free)(VG_AR_CLIENT, p);
|
||||
}
|
||||
}
|
||||
|
||||
void __builtin_delete ( void* p )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("__builtin_delete[simd=%d](%p)\n",
|
||||
(UInt)VG_(running_on_simd_CPU), p );
|
||||
vg_cmalloc_n_frees ++;
|
||||
|
||||
if (p == NULL)
|
||||
return;
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
(void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5001 );
|
||||
vg_bogus_epilogue();
|
||||
} else {
|
||||
VG_(free)(VG_AR_CLIENT, p);
|
||||
}
|
||||
}
|
||||
|
||||
void __builtin_vec_delete ( void* p )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("__builtin_vec_delete[simd=%d](%p)\n",
|
||||
(UInt)VG_(running_on_simd_CPU), p );
|
||||
vg_cmalloc_n_frees ++;
|
||||
|
||||
if (p == NULL)
|
||||
return;
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
(void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5002 );
|
||||
vg_bogus_epilogue();
|
||||
} else {
|
||||
VG_(free)(VG_AR_CLIENT, p);
|
||||
}
|
||||
}
|
||||
|
||||
void* calloc ( UInt nmemb, UInt size )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("calloc[simd=%d](%d,%d)",
|
||||
(UInt)VG_(running_on_simd_CPU), nmemb, size );
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += size * nmemb;
|
||||
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
UInt v = vg_trap_here_WRAPPER ( nmemb, size, 6666 );
|
||||
vg_bogus_epilogue();
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
} else {
|
||||
void* v = VG_(calloc)(VG_AR_CLIENT, nmemb, size);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return v;
|
||||
}
|
||||
}
|
||||
|
||||
void* realloc ( void* ptrV, UInt new_size )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("realloc[simd=%d](%p,%d)",
|
||||
(UInt)VG_(running_on_simd_CPU), ptrV, new_size );
|
||||
|
||||
if (VG_(clo_sloppy_malloc))
|
||||
{ while ((new_size % 4) > 0) new_size++; }
|
||||
|
||||
vg_cmalloc_n_frees ++;
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += new_size;
|
||||
|
||||
if (ptrV == NULL)
|
||||
return malloc(new_size);
|
||||
if (new_size == 0) {
|
||||
free(ptrV);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = 0\n" );
|
||||
return NULL;
|
||||
}
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
UInt v = vg_trap_here_WRAPPER ( (UInt)ptrV, new_size, 7777 );
|
||||
vg_bogus_epilogue();
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
} else {
|
||||
void* v = VG_(realloc)(VG_AR_CLIENT, ptrV, new_size);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return v;
|
||||
}
|
||||
}
|
||||
|
||||
void* memalign ( Int alignment, Int n )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("memalign[simd=%d](al %d, size %d)",
|
||||
(UInt)VG_(running_on_simd_CPU), alignment, n );
|
||||
|
||||
if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; }
|
||||
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += n;
|
||||
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
UInt v = vg_trap_here_WRAPPER ( alignment, n, 8888 );
|
||||
vg_bogus_epilogue();
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
} else {
|
||||
void* v = VG_(malloc_aligned)(VG_AR_CLIENT, alignment, n);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
}
|
||||
}
|
||||
|
||||
void* valloc ( Int size )
|
||||
{
|
||||
return memalign(VKI_BYTES_PER_PAGE, size);
|
||||
}
|
||||
|
||||
|
||||
/* Various compatibility wrapper functions, for glibc and libstdc++. */
|
||||
void cfree ( void* p )
|
||||
{
|
||||
free ( p );
|
||||
}
|
||||
|
||||
void* mallinfo ( void )
|
||||
{
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"Warning: incorrectly-handled call to mallinfo()");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int mallopt ( int cmd, int value )
|
||||
{
|
||||
/* In glibc-2.2.4, 1 denoted a successful return value for mallopt */
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/* Bomb out if we get any of these. */
|
||||
void pvalloc ( void )
|
||||
{ VG_(panic)("call to pvalloc\n"); }
|
||||
|
||||
void malloc_stats ( void )
|
||||
{ VG_(panic)("call to malloc_stats\n"); }
|
||||
void malloc_usable_size ( void )
|
||||
{ VG_(panic)("call to malloc_usable_size\n"); }
|
||||
void malloc_trim ( void )
|
||||
{ VG_(panic)("call to malloc_trim\n"); }
|
||||
void malloc_get_state ( void )
|
||||
{ VG_(panic)("call to malloc_get_state\n"); }
|
||||
void malloc_set_state ( void )
|
||||
{ VG_(panic)("call to malloc_set_state\n"); }
|
||||
|
||||
|
||||
int __posix_memalign ( void **memptr, UInt alignment, UInt size )
|
||||
{
|
||||
void *mem;
|
||||
|
||||
/* Test whether the SIZE argument is valid. It must be a power of
|
||||
two multiple of sizeof (void *). */
|
||||
if (size % sizeof (void *) != 0 || (size & (size - 1)) != 0)
|
||||
return 22 /*EINVAL*/;
|
||||
|
||||
mem = memalign (alignment, size);
|
||||
|
||||
if (mem != NULL) {
|
||||
*memptr = mem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 12 /*ENOMEM*/;
|
||||
}
|
||||
|
||||
|
||||
/*------------------------------------------------------------*/
|
||||
/*--- Magic supporting hacks. ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
|
||||
extern UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do );
|
||||
|
||||
static
|
||||
UInt vg_trap_here_WRAPPER ( UInt arg1, UInt arg2, UInt what_to_do )
|
||||
{
|
||||
/* The point of this idiocy is to make a plain, ordinary call to
|
||||
vg_trap_here which vg_dispatch_when_CALL can spot. Left to
|
||||
itself, with -fpic, gcc generates "call vg_trap_here@PLT" which
|
||||
doesn't get spotted, for whatever reason. I guess I could check
|
||||
_all_ control flow transfers, but that would be an undesirable
|
||||
performance overhead.
|
||||
|
||||
If you compile without -fpic, gcc generates the obvious call
|
||||
insn, so the wrappers below will work if they just call
|
||||
vg_trap_here. But I don't want to rule out building with -fpic,
|
||||
hence this hack. Sigh.
|
||||
*/
|
||||
UInt v;
|
||||
|
||||
# define WHERE_TO VG_(trap_here)
|
||||
# define STRINGIFY(xx) __STRING(xx)
|
||||
|
||||
asm("# call to vg_trap_here\n"
|
||||
"\t pushl %3\n"
|
||||
"\t pushl %2\n"
|
||||
"\t pushl %1\n"
|
||||
"\t call " STRINGIFY(WHERE_TO) "\n"
|
||||
"\t addl $12, %%esp\n"
|
||||
"\t movl %%eax, %0\n"
|
||||
: "=r" (v)
|
||||
: "r" (arg1), "r" (arg2), "r" (what_to_do)
|
||||
: "eax", "esp", "cc", "memory");
|
||||
return v;
|
||||
|
||||
# undef WHERE_TO
|
||||
# undef STRINGIFY
|
||||
}
|
||||
|
||||
/* Last, but not least ... */
|
||||
void vg_bogus_epilogue ( void )
|
||||
{
|
||||
/* Runs on simulated CPU only. */
|
||||
}
|
||||
|
||||
UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do )
|
||||
{
|
||||
/* Calls to this fn are detected in vg_dispatch.S and are handled
|
||||
specially. So this fn should never be entered. */
|
||||
VG_(panic)("vg_trap_here called!");
|
||||
return 0; /*NOTREACHED*/
|
||||
}
|
||||
|
||||
|
||||
/*--------------------------------------------------------------------*/
|
||||
/*--- end vg_clientmalloc.c ---*/
|
||||
|
||||
@ -50,23 +50,18 @@
|
||||
#define VGP_(str) VGAPPEND(vgProf_,str)
|
||||
#define VGOFF_(str) VGAPPEND(vgOff_,str)
|
||||
|
||||
/* Reasons why the inner simulation loop might stop (i.e. why has
|
||||
vg_dispatch_ctr reached zero? */
|
||||
#define VG_Y_SIGCHECK 0 /* signal check due */
|
||||
#define VG_Y_SMC 1 /* write to code detected */
|
||||
#define VG_Y_EXIT 2 /* natural or debug end to simulation */
|
||||
#define VG_Y_TRANSLATE 3 /* translation of vg_m_eip needed */
|
||||
|
||||
/* Check for pending signals every this-many jumps. Since this
|
||||
happens in the region of once per millisecond, we also take the
|
||||
opportunity do do a bit of quick sanity checking at the same time.
|
||||
Look at the call sites of VG_(deliver_signals). */
|
||||
#define VG_SIGCHECK_INTERVAL 1000
|
||||
|
||||
/* A ,agic values that %ebp might be set to when returning to the
|
||||
/* Magic values that %ebp might be set to when returning to the
|
||||
dispatcher. The only other legitimate value is to point to the
|
||||
start of VG_(baseBlock). */
|
||||
#define VG_EBP_DISPATCH_CHECKED 17
|
||||
start of VG_(baseBlock). These also are return values from
|
||||
VG_(run_innerloop) to the scheduler. */
|
||||
#define VG_TRC_EBP_JMP_SPECIAL 17
|
||||
#define VG_TRC_EBP_JMP_SYSCALL 19
|
||||
#define VG_TRC_EBP_JMP_CLIENTREQ 23
|
||||
|
||||
#define VG_TRC_INNER_COUNTERZERO 29 /* ebp can't have this; sched return only */
|
||||
#define VG_TRC_INNER_FASTMISS 31 /* ditto. Means fast-cache miss. */
|
||||
#define VG_TRC_UNRESUMABLE_SIGNAL 37 /* ditto; got sigsegv/sigbus */
|
||||
|
||||
/* Debugging hack for assembly code ... sigh. */
|
||||
#if 0
|
||||
@ -75,12 +70,13 @@
|
||||
#define OYNK(nnn)
|
||||
#endif
|
||||
|
||||
#if 1
|
||||
#if 0
|
||||
#define OYNNK(nnn) pushal; pushl $nnn; call VG_(oynk) ; addl $4,%esp; popal
|
||||
#else
|
||||
#define OYNNK(nnn)
|
||||
#endif
|
||||
|
||||
|
||||
/* Constants for the fast translation lookup cache. */
|
||||
#define VG_TT_FAST_BITS 15
|
||||
#define VG_TT_FAST_SIZE (1 << VG_TT_FAST_BITS)
|
||||
@ -88,6 +84,7 @@
|
||||
|
||||
/* Constants for the fast original-code-write check cache. */
|
||||
|
||||
|
||||
/* Usually you want this to be zero. */
|
||||
#define VG_SMC_FASTCHECK_IN_C 0
|
||||
|
||||
|
||||
@ -61,8 +61,15 @@
|
||||
.globl VG_(run_innerloop)
|
||||
VG_(run_innerloop):
|
||||
#OYNK(1000)
|
||||
|
||||
# ----- entry point to VG_(run_innerloop) -----
|
||||
pushal
|
||||
pushl %ebx
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
pushl %ebp
|
||||
|
||||
# Set up the baseBlock pointer
|
||||
movl $VG_(baseBlock), %ebp
|
||||
|
||||
@ -70,19 +77,19 @@ VG_(run_innerloop):
|
||||
movl VGOFF_(m_eip), %esi
|
||||
movl (%ebp, %esi, 4), %eax
|
||||
|
||||
# fall thru to vg_dispatch
|
||||
# Start off dispatching paranoically, since we no longer have
|
||||
# any indication whether or not this might be a special call/ret
|
||||
# transfer.
|
||||
jmp dispatch_callret_maybe
|
||||
|
||||
.globl VG_(dispatch)
|
||||
VG_(dispatch):
|
||||
# %eax holds destination (original) address
|
||||
# To signal any kind of interruption, set vg_dispatch_ctr
|
||||
# to 1, and vg_interrupt_reason to the appropriate value
|
||||
# before jumping here.
|
||||
|
||||
|
||||
dispatch_main:
|
||||
# Jump here to do a new dispatch.
|
||||
# %eax holds destination (original) address.
|
||||
# %ebp indicates further details of the control transfer
|
||||
# requested to the address in %eax. The idea is that we
|
||||
# want to check all jump targets to see if they are either
|
||||
# VG_(signalreturn_bogusRA) or VG_(trap_here), both of which
|
||||
# VG_(signalreturn_bogusRA) or VG_(shutdown), both of which
|
||||
# require special treatment. However, testing all branch
|
||||
# targets is expensive, and anyway in most cases JITter knows
|
||||
# that a jump cannot be to either of these two. We therefore
|
||||
@ -92,37 +99,33 @@ VG_(dispatch):
|
||||
# this is a jump for which the JITter knows no check need be
|
||||
# made.
|
||||
#
|
||||
# If it is ebp == VG_EBP_DISPATCH_CHECKED, we had better make
|
||||
# If ebp == VG_EBP_JMP_CALLRET, we had better make
|
||||
# the check.
|
||||
#
|
||||
# If ebp == VG_EBP_JMP_SYSCALL, do a system call before
|
||||
# continuing at eax.
|
||||
#
|
||||
# If ebp == VG_EBP_JMP_CLIENTREQ, do a client request before
|
||||
# continuing at eax.
|
||||
#
|
||||
# If %ebp has any other value, we panic.
|
||||
#
|
||||
# What the JITter assumes is that VG_(signalreturn_bogusRA) can
|
||||
# only be arrived at from an x86 ret insn, and dually that
|
||||
# VG_(trap_here) can only be arrived at from an x86 call insn.
|
||||
# VG_(shutdown) can only be arrived at from an x86 call insn.
|
||||
# The net effect is that all call and return targets are checked
|
||||
# but straightforward jumps are not.
|
||||
#
|
||||
# Thinks ... is this safe if the client happens to tailcall
|
||||
# VG_(trap_here) ? I dont think that can happen -- if it did
|
||||
# it would be a problem.
|
||||
#
|
||||
|
||||
cmpl $VG_(baseBlock), %ebp
|
||||
jnz dispatch_checked_maybe
|
||||
jnz dispatch_exceptional
|
||||
|
||||
dispatch_unchecked:
|
||||
dispatch_boring:
|
||||
# save the jump address at VG_(baseBlock)[VGOFF_(m_eip)],
|
||||
# so that if this block takes a fault, we later know where we were.
|
||||
movl VGOFF_(m_eip), %esi
|
||||
movl %eax, (%ebp, %esi, 4)
|
||||
|
||||
# do we require attention?
|
||||
# this check has to be after the call/ret transfer checks, because
|
||||
# we have to ensure that any control transfer following a syscall
|
||||
# return is an ordinary transfer. By the time we get here, we have
|
||||
# established that the next transfer, which might get delayed till
|
||||
# after a syscall return, is an ordinary one.
|
||||
# All a bit subtle ...
|
||||
# do a timeslice check.
|
||||
# are we out of timeslice? If yes, defer to scheduler.
|
||||
#OYNK(1001)
|
||||
decl VG_(dispatch_ctr)
|
||||
jz counter_is_zero
|
||||
@ -136,243 +139,102 @@ dispatch_unchecked:
|
||||
# ebx points at a tt entry
|
||||
# now compare target with the tte.orig_addr field (+0)
|
||||
cmpl %eax, (%ebx)
|
||||
jnz full_search
|
||||
jnz fast_lookup_failed
|
||||
|
||||
# Found a match. Set the tte.mru_epoch field (+8)
|
||||
# and call the tte.trans_addr field (+4)
|
||||
movl VG_(current_epoch), %ecx
|
||||
movl %ecx, 8(%ebx)
|
||||
call *4(%ebx)
|
||||
jmp VG_(dispatch)
|
||||
jmp dispatch_main
|
||||
|
||||
full_search:
|
||||
#no luck? try the full table search
|
||||
pushl %eax
|
||||
call VG_(search_transtab)
|
||||
addl $4, %esp
|
||||
fast_lookup_failed:
|
||||
# %EIP is up to date here since dispatch_boring dominates
|
||||
movl $VG_TRC_INNER_FASTMISS, %eax
|
||||
jmp run_innerloop_exit
|
||||
|
||||
# %eax has trans addr or zero
|
||||
cmpl $0, %eax
|
||||
jz need_translation
|
||||
# full table search also zeroes the tte.last_use field,
|
||||
# so we dont have to do so here.
|
||||
call *%eax
|
||||
jmp VG_(dispatch)
|
||||
|
||||
need_translation:
|
||||
OYNK(1003)
|
||||
movl $VG_Y_TRANSLATE, VG_(interrupt_reason)
|
||||
counter_is_zero:
|
||||
OYNK(1004)
|
||||
popal
|
||||
# ----- (the only) exit point from VG_(run_innerloop) -----
|
||||
# ----- unless of course vg_oursignalhandler longjmp()s
|
||||
# ----- back through it, due to an unmanagable signal
|
||||
ret
|
||||
|
||||
|
||||
/* The normal way to get back to the translation loop is to put
|
||||
the address of the next (original) address and return.
|
||||
However, simulation of a RET insn requires a check as to whether
|
||||
the next address is vg_signalreturn_bogusRA. If so, a signal
|
||||
handler is returning, so we need to invoke our own mechanism to
|
||||
deal with that, by calling vg_signal_returns(). This restores
|
||||
the simulated machine state from the VgSigContext structure on
|
||||
the stack, including the (simulated, of course) %eip saved when
|
||||
the signal was delivered. We then arrange to jump to the
|
||||
restored %eip.
|
||||
*/
|
||||
dispatch_checked_maybe:
|
||||
# Possibly a checked dispatch. Sanity check ...
|
||||
cmpl $VG_EBP_DISPATCH_CHECKED, %ebp
|
||||
jz dispatch_checked
|
||||
# ebp has an invalid value ... crap out.
|
||||
pushl $panic_msg_ebp
|
||||
call VG_(panic)
|
||||
# (never returns)
|
||||
|
||||
dispatch_checked:
|
||||
OYNK(2000)
|
||||
# first off, restore %ebp -- since it is currently wrong
|
||||
movl $VG_(baseBlock), %ebp
|
||||
|
||||
# see if we need to mess with stack blocks
|
||||
pushl %ebp
|
||||
pushl %eax
|
||||
call VG_(delete_client_stack_blocks_following_ESP_change)
|
||||
popl %eax
|
||||
popl %ebp
|
||||
|
||||
# is this a signal return?
|
||||
cmpl $VG_(signalreturn_bogusRA), %eax
|
||||
jz dispatch_to_signalreturn_bogusRA
|
||||
# should we intercept this call?
|
||||
cmpl $VG_(trap_here), %eax
|
||||
jz dispatch_to_trap_here
|
||||
# ok, its not interesting. Handle the normal way.
|
||||
jmp dispatch_unchecked
|
||||
|
||||
dispatch_to_signalreturn_bogusRA:
|
||||
OYNK(2001)
|
||||
pushal
|
||||
call VG_(signal_returns)
|
||||
popal
|
||||
# %EIP will now point to the insn which should have followed
|
||||
# the signal delivery. Jump to it. Since we no longer have any
|
||||
# hint from the JITter about whether or not it is checkable,
|
||||
# go via the conservative route.
|
||||
movl VGOFF_(m_eip), %esi
|
||||
movl (%ebp, %esi, 4), %eax
|
||||
jmp dispatch_checked
|
||||
|
||||
|
||||
/* Similarly, check CALL targets to see if it is the ultra-magical
|
||||
vg_trap_here(), and, if so, act accordingly. See vg_clientmalloc.c.
|
||||
Be careful not to get the real and simulated CPUs,
|
||||
stacks and regs mixed up ...
|
||||
*/
|
||||
dispatch_to_trap_here:
|
||||
OYNK(111)
|
||||
/* Considering the params to vg_trap_here(), we should have:
|
||||
12(%ESP) is what_to_do
|
||||
8(%ESP) is arg2
|
||||
4(%ESP) is arg1
|
||||
0(%ESP) is return address
|
||||
*/
|
||||
movl VGOFF_(m_esp), %esi
|
||||
movl (%ebp, %esi, 4), %ebx
|
||||
# %ebx now holds simulated %ESP
|
||||
cmpl $0x4000, 12(%ebx)
|
||||
jz handle_malloc
|
||||
cmpl $0x4001, 12(%ebx)
|
||||
jz handle_malloc
|
||||
cmpl $0x4002, 12(%ebx)
|
||||
jz handle_malloc
|
||||
cmpl $0x5000, 12(%ebx)
|
||||
jz handle_free
|
||||
cmpl $0x5001, 12(%ebx)
|
||||
jz handle_free
|
||||
cmpl $0x5002, 12(%ebx)
|
||||
jz handle_free
|
||||
cmpl $6666, 12(%ebx)
|
||||
jz handle_calloc
|
||||
cmpl $7777, 12(%ebx)
|
||||
jz handle_realloc
|
||||
cmpl $8888, 12(%ebx)
|
||||
jz handle_memalign
|
||||
push $panic_msg_trap
|
||||
call VG_(panic)
|
||||
# vg_panic never returns
|
||||
|
||||
handle_malloc:
|
||||
# %ESP is in %ebx
|
||||
pushl 12(%ebx)
|
||||
pushl 8(%ebx)
|
||||
call VG_(client_malloc)
|
||||
addl $8, %esp
|
||||
# returned value is in %eax
|
||||
jmp save_eax_and_simulate_RET
|
||||
|
||||
handle_free:
|
||||
# %ESP is in %ebx
|
||||
pushl 12(%ebx)
|
||||
pushl 8(%ebx)
|
||||
call VG_(client_free)
|
||||
addl $8, %esp
|
||||
jmp simulate_RET
|
||||
|
||||
handle_calloc:
|
||||
# %ESP is in %ebx
|
||||
pushl 8(%ebx)
|
||||
pushl 4(%ebx)
|
||||
call VG_(client_calloc)
|
||||
addl $8, %esp
|
||||
# returned value is in %eax
|
||||
jmp save_eax_and_simulate_RET
|
||||
|
||||
handle_realloc:
|
||||
# %ESP is in %ebx
|
||||
pushl 8(%ebx)
|
||||
pushl 4(%ebx)
|
||||
call VG_(client_realloc)
|
||||
addl $8, %esp
|
||||
# returned value is in %eax
|
||||
jmp save_eax_and_simulate_RET
|
||||
|
||||
handle_memalign:
|
||||
# %ESP is in %ebx
|
||||
pushl 8(%ebx)
|
||||
pushl 4(%ebx)
|
||||
call VG_(client_memalign)
|
||||
addl $8, %esp
|
||||
# returned value is in %eax
|
||||
jmp save_eax_and_simulate_RET
|
||||
|
||||
save_eax_and_simulate_RET:
|
||||
movl VGOFF_(m_eax), %esi
|
||||
movl %eax, (%ebp, %esi, 4) # %eax -> %EAX
|
||||
# set %EAX bits to VALID
|
||||
movl VGOFF_(sh_eax), %esi
|
||||
movl $0x0 /* All 32 bits VALID */, (%ebp, %esi, 4)
|
||||
# fall thru ...
|
||||
simulate_RET:
|
||||
# standard return
|
||||
movl VGOFF_(m_esp), %esi
|
||||
movl (%ebp, %esi, 4), %ebx # %ESP -> %ebx
|
||||
movl 0(%ebx), %eax # RA -> %eax
|
||||
addl $4, %ebx # %ESP += 4
|
||||
movl %ebx, (%ebp, %esi, 4) # %ebx -> %ESP
|
||||
jmp dispatch_checked # jump to %eax
|
||||
|
||||
.data
|
||||
panic_msg_trap:
|
||||
.ascii "dispatch_to_trap_here: unknown what_to_do"
|
||||
.byte 0
|
||||
panic_msg_ebp:
|
||||
.ascii "vg_dispatch: %ebp has invalid value!"
|
||||
.byte 0
|
||||
.text
|
||||
|
||||
|
||||
/*------------------------------------------------------------*/
|
||||
/*--- A helper for delivering signals when the client is ---*/
|
||||
/*--- (presumably) blocked in a system call. ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
|
||||
/* Returns, in %eax, the next orig_addr to run.
|
||||
The caller needs to decide whether the returned orig_addr
|
||||
requires special handling.
|
||||
|
||||
extern Addr VG_(run_singleton_translation) ( Addr trans_addr )
|
||||
*/
|
||||
|
||||
/* should we take care to save the FPU state here? */
|
||||
|
||||
.globl VG_(run_singleton_translation)
|
||||
VG_(run_singleton_translation):
|
||||
movl 4(%esp), %eax # eax = trans_addr
|
||||
pushl %ebx
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
pushl %ebp
|
||||
|
||||
# set up ebp correctly for translations
|
||||
movl $VG_(baseBlock), %ebp
|
||||
|
||||
# run the translation
|
||||
call *%eax
|
||||
|
||||
# next orig_addr is correctly in %eax already
|
||||
# %EIP is up to date here since dispatch_boring dominates
|
||||
movl $VG_TRC_INNER_COUNTERZERO, %eax
|
||||
jmp run_innerloop_exit
|
||||
|
||||
run_innerloop_exit:
|
||||
popl %ebp
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %ebx
|
||||
ret
|
||||
|
||||
|
||||
|
||||
/* Other ways of getting out of the inner loop. Placed out-of-line to
|
||||
make it look cleaner.
|
||||
*/
|
||||
dispatch_exceptional:
|
||||
# this is jumped to only, not fallen-through from above
|
||||
cmpl $VG_TRC_EBP_JMP_SPECIAL, %ebp
|
||||
jz dispatch_callret_maybe
|
||||
cmpl $VG_TRC_EBP_JMP_SYSCALL, %ebp
|
||||
jz dispatch_syscall
|
||||
cmpl $VG_TRC_EBP_JMP_CLIENTREQ, %ebp
|
||||
jz dispatch_clientreq
|
||||
|
||||
# ebp has an invalid value ... crap out.
|
||||
pushl $panic_msg_ebp
|
||||
call VG_(panic)
|
||||
# (never returns)
|
||||
|
||||
dispatch_syscall:
|
||||
# save %eax in %EIP and defer to sched
|
||||
movl $VG_(baseBlock), %ebp
|
||||
movl VGOFF_(m_eip), %esi
|
||||
movl %eax, (%ebp, %esi, 4)
|
||||
movl $VG_TRC_EBP_JMP_SYSCALL, %eax
|
||||
jmp run_innerloop_exit
|
||||
|
||||
ret
|
||||
dispatch_clientreq:
|
||||
# save %eax in %EIP and defer to sched
|
||||
movl $VG_(baseBlock), %ebp
|
||||
movl VGOFF_(m_eip), %esi
|
||||
movl %eax, (%ebp, %esi, 4)
|
||||
movl $VG_TRC_EBP_JMP_CLIENTREQ, %eax
|
||||
jmp run_innerloop_exit
|
||||
|
||||
dispatch_callret_maybe:
|
||||
# save %eax in %EIP
|
||||
movl $VG_(baseBlock), %ebp
|
||||
movl VGOFF_(m_eip), %esi
|
||||
movl %eax, (%ebp, %esi, 4)
|
||||
|
||||
# see if we need to mess with stack blocks
|
||||
pushl %eax
|
||||
call VG_(delete_client_stack_blocks_following_ESP_change)
|
||||
popl %eax
|
||||
movl $VG_(baseBlock), %ebp
|
||||
|
||||
# is this a call/return which we need to mess with
|
||||
cmpl $VG_(signalreturn_bogusRA), %eax
|
||||
jz dispatch_callret
|
||||
cmpl $VG_(shutdown), %eax
|
||||
jz dispatch_callret
|
||||
|
||||
# ok, its not interesting. Handle the normal way.
|
||||
jmp dispatch_boring
|
||||
|
||||
dispatch_callret:
|
||||
# %EIP is up to date here since dispatch_callret_maybe dominates
|
||||
movl $VG_TRC_EBP_JMP_SPECIAL, %eax
|
||||
jmp run_innerloop_exit
|
||||
|
||||
|
||||
.data
|
||||
panic_msg_ebp:
|
||||
.ascii "vg_dispatch: %ebp has invalid value!"
|
||||
.byte 0
|
||||
.text
|
||||
|
||||
|
||||
##--------------------------------------------------------------------##
|
||||
##--- end vg_dispatch.S ---##
|
||||
|
||||
@ -1069,44 +1069,48 @@ static void synth_call_baseBlock_method ( Bool ensure_shortform,
|
||||
}
|
||||
|
||||
|
||||
/* Jump to the next translation, by loading its original addr into
|
||||
%eax and returning to the scheduler. Or, if is a RET transfer,
|
||||
don't return; instead jump to vg_dispatch_when_RET, which checks
|
||||
whether this is a signal handler returning, and takes suitable
|
||||
evasive action.
|
||||
*/
|
||||
static void synth_jmp_reg ( Int reg,
|
||||
Bool is_ret_dispatch,
|
||||
Bool is_call_dispatch )
|
||||
static void load_ebp_from_JmpKind ( JmpKind jmpkind )
|
||||
{
|
||||
switch (jmpkind) {
|
||||
case JmpBoring:
|
||||
break;
|
||||
case JmpCall:
|
||||
case JmpRet:
|
||||
emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_SPECIAL, R_EBP );
|
||||
break;
|
||||
case JmpSyscall:
|
||||
emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_SYSCALL, R_EBP );
|
||||
break;
|
||||
case JmpClientReq:
|
||||
emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_CLIENTREQ, R_EBP );
|
||||
break;
|
||||
default:
|
||||
VG_(panic)("load_ebp_from_JmpKind");
|
||||
}
|
||||
}
|
||||
|
||||
/* Jump to the next translation, by loading its original addr into
|
||||
%eax and returning to the scheduler. Signal special requirements
|
||||
by loading a special value into %ebp first.
|
||||
*/
|
||||
static void synth_jmp_reg ( Int reg, JmpKind jmpkind )
|
||||
{
|
||||
load_ebp_from_JmpKind ( jmpkind );
|
||||
if (reg != R_EAX)
|
||||
emit_movv_reg_reg ( 4, reg, R_EAX );
|
||||
if (is_ret_dispatch || is_call_dispatch) {
|
||||
/* The (hopefully) rare case. */
|
||||
vg_assert(!(is_ret_dispatch && is_call_dispatch));
|
||||
emit_movv_lit_reg ( 4, VG_EBP_DISPATCH_CHECKED, R_EBP );
|
||||
}
|
||||
emit_ret();
|
||||
}
|
||||
|
||||
|
||||
/* Same deal as synth_jmp_reg. */
|
||||
static void synth_jmp_lit ( Addr addr )
|
||||
static void synth_jmp_lit ( Addr addr, JmpKind jmpkind )
|
||||
{
|
||||
load_ebp_from_JmpKind ( jmpkind );
|
||||
emit_movv_lit_reg ( 4, addr, R_EAX );
|
||||
emit_ret();
|
||||
}
|
||||
|
||||
|
||||
/* Dispatch, but with a call-target check. */
|
||||
static void synth_jmp_lit_call_dispatch ( Addr addr )
|
||||
{
|
||||
emit_movv_lit_reg ( 4, addr, R_EAX );
|
||||
emit_movv_lit_reg ( 4, VG_EBP_DISPATCH_CHECKED, R_EBP );
|
||||
emit_ret();
|
||||
}
|
||||
|
||||
|
||||
static void synth_jcond_lit ( Condcode cond, Addr addr )
|
||||
{
|
||||
/* Do the following:
|
||||
@ -1124,7 +1128,7 @@ static void synth_jcond_lit ( Condcode cond, Addr addr )
|
||||
*/
|
||||
emit_get_eflags();
|
||||
emit_jcondshort_delta ( invertCondition(cond), 5+1 );
|
||||
synth_jmp_lit ( addr );
|
||||
synth_jmp_lit ( addr, JmpBoring );
|
||||
}
|
||||
|
||||
|
||||
@ -1138,7 +1142,7 @@ static void synth_jmp_ifzero_reg_lit ( Int reg, Addr addr )
|
||||
*/
|
||||
emit_cmpl_zero_reg ( reg );
|
||||
emit_jcondshort_delta ( CondNZ, 5+1 );
|
||||
synth_jmp_lit ( addr );
|
||||
synth_jmp_lit ( addr, JmpBoring );
|
||||
}
|
||||
|
||||
|
||||
@ -2472,25 +2476,29 @@ static void emitUInstr ( Int i, UInstr* u )
|
||||
vg_assert(u->tag2 == NoValue);
|
||||
vg_assert(u->tag1 == RealReg || u->tag1 == Literal);
|
||||
if (u->cond == CondAlways) {
|
||||
if (u->tag1 == RealReg) {
|
||||
synth_jmp_reg ( u->val1, u->ret_dispatch, u->call_dispatch );
|
||||
} else {
|
||||
vg_assert(!u->ret_dispatch);
|
||||
if (u->call_dispatch)
|
||||
synth_jmp_lit_call_dispatch (
|
||||
u->tag1==Literal ? u->lit32 : u->val1 );
|
||||
else
|
||||
synth_jmp_lit (
|
||||
u->tag1==Literal ? u->lit32 : u->val1 );
|
||||
switch (u->tag1) {
|
||||
case RealReg:
|
||||
synth_jmp_reg ( u->val1, u->jmpkind );
|
||||
break;
|
||||
case Literal:
|
||||
synth_jmp_lit ( u->lit32, u->jmpkind );
|
||||
break;
|
||||
default:
|
||||
VG_(panic)("emitUInstr(JMP, unconditional, default)");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (u->tag1 == RealReg) {
|
||||
VG_(panic)("emitUInstr: conditional jump to reg");
|
||||
} else {
|
||||
vg_assert(!u->ret_dispatch);
|
||||
vg_assert(!u->call_dispatch);
|
||||
synth_jcond_lit ( u->cond,
|
||||
u->tag1==Literal ? u->lit32 : u->val1 );
|
||||
switch (u->tag1) {
|
||||
case RealReg:
|
||||
VG_(panic)("emitUInstr(JMP, conditional, RealReg)");
|
||||
break;
|
||||
case Literal:
|
||||
vg_assert(u->jmpkind == JmpBoring);
|
||||
synth_jcond_lit ( u->cond, u->lit32 );
|
||||
break;
|
||||
default:
|
||||
VG_(panic)("emitUInstr(JMP, conditional, default)");
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
@ -48,45 +48,6 @@
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
On entry:
|
||||
%ECX value
|
||||
%EBX value
|
||||
%EAX value -- also the result
|
||||
RA <- %esp -- after pushal+pushfl is 36(%esp)
|
||||
*/
|
||||
.global VG_(helper_do_client_request)
|
||||
VG_(helper_do_client_request):
|
||||
pushal
|
||||
pushfl
|
||||
|
||||
movl 48(%esp), %eax
|
||||
pushl %eax
|
||||
movl 48(%esp), %eax
|
||||
pushl %eax
|
||||
movl 48(%esp), %eax
|
||||
pushl %eax
|
||||
|
||||
call VG_(handle_client_request)
|
||||
movl %eax, 52(%esp)
|
||||
|
||||
addl $12, %esp
|
||||
|
||||
popfl
|
||||
popal
|
||||
ret
|
||||
|
||||
|
||||
.global VG_(helper_do_syscall)
|
||||
VG_(helper_do_syscall):
|
||||
pushal
|
||||
call VG_(wrap_syscall)
|
||||
popal
|
||||
# movl $VG_(baseBlock), %ebp
|
||||
ret
|
||||
|
||||
|
||||
|
||||
.global VG_(helper_value_check0_fail)
|
||||
VG_(helper_value_check0_fail):
|
||||
pushal
|
||||
@ -116,21 +77,6 @@ VG_(helper_value_check4_fail):
|
||||
ret
|
||||
|
||||
|
||||
/* Set things up so the dispatch loop exits normally. Used when it is
|
||||
detected that the program wants to finish, ie it has called
|
||||
vg_shutdown.
|
||||
*/
|
||||
.global VG_(helper_request_normal_exit)
|
||||
VG_(helper_request_normal_exit):
|
||||
pushl %eax
|
||||
movl VG_(dispatch_ctr), %eax
|
||||
movl %eax, VG_(dispatch_ctr_SAVED)
|
||||
movl $1, VG_(dispatch_ctr)
|
||||
movl $VG_Y_EXIT, VG_(interrupt_reason)
|
||||
popl %eax
|
||||
ret
|
||||
|
||||
|
||||
/* Do a original-code-write check for the address in %ebp. */
|
||||
.global VG_(helper_smc_check4)
|
||||
VG_(helper_smc_check4):
|
||||
|
||||
@ -117,6 +117,27 @@
|
||||
prime. */
|
||||
#define VG_N_EC_LISTS /*997*/ 4999
|
||||
|
||||
/* Defines the thread-scheduling timeslice, in terms of the number of
|
||||
basic blocks we attempt to run each thread for. Smaller values
|
||||
give finer interleaving but much increased scheduling overheads. */
|
||||
#define VG_SCHEDULING_QUANTUM 10000
|
||||
|
||||
/* The maximum number of pthreads that we support. This is
|
||||
deliberately not very high since our implementation of some of the
|
||||
scheduler algorithms is surely O(N^2) in the number of threads,
|
||||
since that's simple, at least. And (in practice) we hope that most
|
||||
programs do not need many threads. */
|
||||
#define VG_N_THREADS 20
|
||||
|
||||
/* Number of file descriptors that can simultaneously be waited on for
|
||||
I/O to complete. Perhaps this should be the same as VG_N_THREADS
|
||||
(surely a thread can't wait on more than one fd at once?. Who
|
||||
knows.) */
|
||||
#define VG_N_WAITING_FDS 10
|
||||
|
||||
/* Maximum number of mutexes allowed. */
|
||||
#define VG_N_MUTEXES 10
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Basic types
|
||||
@ -352,31 +373,220 @@ extern Bool VG_(is_empty_arena) ( ArenaId aid );
|
||||
(VG_AR_CLIENT_REDZONE_SZW * VKI_BYTES_PER_WORD)
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_clientfuns.c
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* This doesn't export code or data that valgrind.so needs to link
|
||||
against. However, the scheduler does need to know the following
|
||||
request codes. A few, publically-visible, request codes are also
|
||||
defined in valgrind.h. */
|
||||
|
||||
#define VG_USERREQ__MALLOC 0x2001
|
||||
#define VG_USERREQ__BUILTIN_NEW 0x2002
|
||||
#define VG_USERREQ__BUILTIN_VEC_NEW 0x2003
|
||||
|
||||
#define VG_USERREQ__FREE 0x2004
|
||||
#define VG_USERREQ__BUILTIN_DELETE 0x2005
|
||||
#define VG_USERREQ__BUILTIN_VEC_DELETE 0x2006
|
||||
|
||||
#define VG_USERREQ__CALLOC 0x2007
|
||||
#define VG_USERREQ__REALLOC 0x2008
|
||||
#define VG_USERREQ__MEMALIGN 0x2009
|
||||
|
||||
|
||||
#define VG_USERREQ__PTHREAD_CREATE 0x3001
|
||||
#define VG_USERREQ__PTHREAD_CREATE_BOGUSRA 0x3002
|
||||
#define VG_USERREQ__PTHREAD_JOIN 0x3003
|
||||
#define VG_USERREQ__PTHREAD_GET_THREADID 0x3004
|
||||
#define VG_USERREQ__PTHREAD_MUTEX_INIT 0x3005
|
||||
#define VG_USERREQ__PTHREAD_MUTEX_LOCK 0x3006
|
||||
#define VG_USERREQ__PTHREAD_MUTEX_UNLOCK 0x3007
|
||||
#define VG_USERREQ__PTHREAD_MUTEX_DESTROY 0x3008
|
||||
#define VG_USERREQ__PTHREAD_CANCEL 0x3009
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Constants pertaining to the simulated CPU state, VG_(baseBlock),
|
||||
which need to go here to avoid ugly circularities.
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* How big is the saved FPU state? */
|
||||
#define VG_SIZE_OF_FPUSTATE 108
|
||||
/* ... and in words ... */
|
||||
#define VG_SIZE_OF_FPUSTATE_W ((VG_SIZE_OF_FPUSTATE+3)/4)
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_scheduler.c
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* ThreadIds are simply indices into the vg_threads[] array. */
|
||||
typedef
|
||||
UInt
|
||||
ThreadId;
|
||||
|
||||
/* MutexIds are simply indices into the vg_mutexes[] array. */
|
||||
typedef
|
||||
UInt
|
||||
MutexId;
|
||||
|
||||
|
||||
#define VG_INVALID_THREADID ((ThreadId)(-1))
|
||||
|
||||
typedef
|
||||
enum {
|
||||
VgTs_Empty, /* this slot is not in use */
|
||||
VgTs_Runnable, /* waiting to be scheduled */
|
||||
VgTs_WaitJoiner, /* waiting for someone to do join on me */
|
||||
VgTs_WaitJoinee, /* waiting for the thread I did join on */
|
||||
VgTs_WaitFD, /* waiting for I/O completion on a fd */
|
||||
VgTs_WaitMX, /* waiting on a mutex */
|
||||
VgTs_Sleeping /* sleeping for a while */
|
||||
}
|
||||
ThreadStatus;
|
||||
|
||||
typedef
|
||||
struct {
|
||||
/* The thread identity is simply the index in vg_threads[].
|
||||
ThreadId == 0 is the root thread and has the special property
|
||||
that we don't try and allocate or deallocate its stack. */
|
||||
|
||||
/* Current scheduling status. */
|
||||
ThreadStatus status;
|
||||
|
||||
/* Identity of joiner (thread who called join on me), or
|
||||
VG_INVALID_THREADID if no one asked to join yet. */
|
||||
ThreadId joiner;
|
||||
|
||||
/* Identity of mutex we are waiting on, if .status == WaitMX. */
|
||||
MutexId waited_on_mid;
|
||||
|
||||
/* If VgTs_Sleeping, this is when we should wake up. */
|
||||
ULong awaken_at;
|
||||
|
||||
/* return value */
|
||||
void* retval;
|
||||
|
||||
/* Stacks. When a thread slot is freed, we don't deallocate its
|
||||
stack; we just leave it lying around for the next use of the
|
||||
slot. If the next use of the slot requires a larger stack,
|
||||
only then is the old one deallocated and a new one
|
||||
allocated.
|
||||
|
||||
For the main thread (threadid == 0), this mechanism doesn't
|
||||
apply. We don't know the size of the stack since we didn't
|
||||
allocate it, and furthermore we never reallocate it. */
|
||||
|
||||
/* The allocated size of this thread's stack (permanently zero
|
||||
if this is ThreadId == 0, since we didn't allocate its stack) */
|
||||
UInt stack_size;
|
||||
|
||||
/* Address of the lowest word in this thread's stack. NULL means
|
||||
not allocated yet.
|
||||
*/
|
||||
Addr stack_base;
|
||||
|
||||
/* Saved machine context. */
|
||||
UInt m_eax;
|
||||
UInt m_ebx;
|
||||
UInt m_ecx;
|
||||
UInt m_edx;
|
||||
UInt m_esi;
|
||||
UInt m_edi;
|
||||
UInt m_ebp;
|
||||
UInt m_esp;
|
||||
UInt m_eflags;
|
||||
UInt m_eip;
|
||||
UInt m_fpu[VG_SIZE_OF_FPUSTATE_W];
|
||||
|
||||
UInt sh_eax;
|
||||
UInt sh_ebx;
|
||||
UInt sh_ecx;
|
||||
UInt sh_edx;
|
||||
UInt sh_esi;
|
||||
UInt sh_edi;
|
||||
UInt sh_ebp;
|
||||
UInt sh_esp;
|
||||
UInt sh_eflags;
|
||||
}
|
||||
ThreadState;
|
||||
|
||||
|
||||
/* Copy the specified thread's state into VG_(baseBlock) in
|
||||
preparation for running it. */
|
||||
extern void VG_(load_thread_state)( ThreadId );
|
||||
|
||||
/* Save the specified thread's state back in VG_(baseBlock), and fill
|
||||
VG_(baseBlock) with junk, for sanity-check reasons. */
|
||||
extern void VG_(save_thread_state)( ThreadId );
|
||||
|
||||
/* Get the thread state block for the specified thread. */
|
||||
extern ThreadState* VG_(get_thread_state)( ThreadId );
|
||||
|
||||
|
||||
/* Create, and add to TT/TC, the translation of a client basic
|
||||
block. */
|
||||
extern void VG_(create_translation_for) ( Addr orig_addr );
|
||||
|
||||
/* Return codes from the scheduler. */
|
||||
typedef
|
||||
enum { VgSrc_Deadlock, VgSrc_Shutdown, VgSrc_BbsDone }
|
||||
VgSchedReturnCode;
|
||||
|
||||
/* The scheduler. */
|
||||
extern VgSchedReturnCode VG_(scheduler) ( void );
|
||||
|
||||
extern void VG_(scheduler_init) ( void );
|
||||
|
||||
|
||||
/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
|
||||
extern jmp_buf VG_(scheduler_jmpbuf);
|
||||
/* ... and if so, here's the signal which caused it to do so. */
|
||||
extern Int VG_(longjmpd_on_signal);
|
||||
|
||||
|
||||
/* We check that the initial stack, which we can't move, is allocated
|
||||
here. VG_(scheduler_init) checks this.
|
||||
*/
|
||||
#define VG_STARTUP_STACK_MASK (Addr)0xBFFF8000
|
||||
|
||||
|
||||
/* The red-zone size which we put at the bottom (highest address) of
|
||||
thread stacks, for paranoia reasons. This can be arbitrary, and
|
||||
doesn't really need to be set at compile time. */
|
||||
#define VG_AR_CLIENT_STACKBASE_REDZONE_SZW 4
|
||||
|
||||
#define VG_AR_CLIENT_STACKBASE_REDZONE_SZB \
|
||||
(VG_AR_CLIENT_STACKBASE_REDZONE_SZW * VKI_BYTES_PER_WORD)
|
||||
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_signals.c
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* The maximum number of basic blocks that we're prepared to run in a
|
||||
signal handler which is called when the client is stuck in a
|
||||
blocking system call. The purpose of this is to check that such a
|
||||
signal handler doesn't merely do a longjmp() and keep going
|
||||
forever; it should return instead. NOTE that this doesn't apply to
|
||||
signals delivered under normal conditions, only when they are
|
||||
delivered and the client is already blocked in a system call. */
|
||||
#define VG_MAX_BBS_IN_IMMEDIATE_SIGNAL 50000
|
||||
|
||||
extern void VG_(sigstartup_actions) ( void );
|
||||
|
||||
extern void VG_(deliver_signals) ( void );
|
||||
extern void VG_(deliver_signals) ( ThreadId );
|
||||
extern void VG_(unblock_host_signal) ( Int sigNo );
|
||||
|
||||
|
||||
/* Fake system calls for signal handling. */
|
||||
extern void VG_(do__NR_sigaction) ( void );
|
||||
extern void VG_(do__NR_sigaction) ( ThreadId tid );
|
||||
extern void VG_(do__NR_sigprocmask) ( Int how, vki_ksigset_t* set );
|
||||
|
||||
/* Bogus return address for signal handlers. Is never executed. */
|
||||
extern void VG_(signalreturn_bogusRA) ( void );
|
||||
|
||||
/* Modify the current thread's state once we have detected it is
|
||||
returning from a signal handler. */
|
||||
extern void VG_(signal_returns) ( ThreadId );
|
||||
|
||||
/* Handy utilities to block/restore all host signals. */
|
||||
extern void VG_(block_all_host_signals)
|
||||
( /* OUT */ vki_ksigset_t* saved_mask );
|
||||
extern void VG_(restore_host_signals)
|
||||
( /* IN */ vki_ksigset_t* saved_mask );
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_mylibc.c
|
||||
@ -420,6 +630,7 @@ extern Char* VG_(strdup) ( ArenaId aid, const Char* s);
|
||||
|
||||
extern Char* VG_(getenv) ( Char* name );
|
||||
extern Int VG_(getpid) ( void );
|
||||
extern ULong VG_(read_microsecond_timer)( void );
|
||||
|
||||
|
||||
extern Char VG_(toupper) ( Char c );
|
||||
@ -444,19 +655,28 @@ extern void VG_(assert_fail) ( Char* expr, Char* file,
|
||||
Int line, Char* fn )
|
||||
__attribute__ ((__noreturn__));
|
||||
|
||||
/* Later ... extern void vg_restore_SIGABRT ( void ); */
|
||||
|
||||
/* Reading files. */
|
||||
extern Int VG_(open_read) ( Char* pathname );
|
||||
extern void VG_(close) ( Int fd );
|
||||
extern Int VG_(read) ( Int fd, void* buf, Int count);
|
||||
extern Int VG_(write) ( Int fd, void* buf, Int count);
|
||||
|
||||
extern Int VG_(fcntl) ( Int fd, Int cmd, Int arg );
|
||||
|
||||
extern Int VG_(select)( Int n,
|
||||
vki_fd_set* readfds,
|
||||
vki_fd_set* writefds,
|
||||
vki_fd_set* exceptfds,
|
||||
struct vki_timeval * timeout );
|
||||
extern Int VG_(nanosleep)( const struct vki_timespec *req,
|
||||
struct vki_timespec *rem );
|
||||
|
||||
|
||||
/* mmap-ery ... */
|
||||
extern void* VG_(mmap)( void* start, UInt length,
|
||||
UInt prot, UInt flags, UInt fd, UInt offset );
|
||||
|
||||
extern Int VG_(munmap)( void* start, Int length );
|
||||
extern Int VG_(munmap)( void* start, Int length );
|
||||
|
||||
|
||||
/* Print a (panic) message, and abort. */
|
||||
@ -594,6 +814,18 @@ typedef
|
||||
Condcode;
|
||||
|
||||
|
||||
/* Descriptions of additional properties of *unconditional* jumps. */
|
||||
typedef
|
||||
enum {
|
||||
JmpBoring=0, /* boring unconditional jump */
|
||||
JmpCall=1, /* jump due to an x86 call insn */
|
||||
JmpRet=2, /* jump due to an x86 ret insn */
|
||||
JmpSyscall=3, /* do a system call, then jump */
|
||||
JmpClientReq=4 /* do a client request, then jump */
|
||||
}
|
||||
JmpKind;
|
||||
|
||||
|
||||
/* Flags. User-level code can only read/write O(verflow), S(ign),
|
||||
Z(ero), A(ux-carry), C(arry), P(arity), and may also write
|
||||
D(irection). That's a total of 7 flags. A FlagSet is a bitset,
|
||||
@ -662,8 +894,7 @@ typedef
|
||||
UChar cond; /* condition, for jumps */
|
||||
Bool smc_check:1; /* do a smc test, if writes memory. */
|
||||
Bool signed_widen:1; /* signed or unsigned WIDEN ? */
|
||||
Bool ret_dispatch:1; /* Is this jump as a result of RET ? */
|
||||
Bool call_dispatch:1; /* Is this jump as a result of CALL ? */
|
||||
JmpKind jmpkind:3; /* additional properties of unconditional JMP */
|
||||
}
|
||||
UInstr;
|
||||
|
||||
@ -845,7 +1076,7 @@ typedef
|
||||
|
||||
extern Bool VG_(client_perm_maybe_describe)( Addr a, AddrInfo* ai );
|
||||
|
||||
extern UInt VG_(handle_client_request) ( UInt code, Addr aa, UInt nn );
|
||||
extern UInt VG_(handle_client_request) ( UInt* arg_block );
|
||||
|
||||
extern void VG_(delete_client_stack_blocks_following_ESP_change) ( void );
|
||||
|
||||
@ -886,13 +1117,10 @@ extern void VG_(symtab_notify_munmap) ( Addr start, UInt length );
|
||||
Exports of vg_clientmalloc.c
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* these numbers are not arbitary. if you change them,
|
||||
adjust vg_dispatch.S as well */
|
||||
|
||||
typedef
|
||||
enum {
|
||||
Vg_AllocMalloc = 0,
|
||||
Vg_AllocNew = 1,
|
||||
Vg_AllocNew = 1,
|
||||
Vg_AllocNewVec = 2
|
||||
}
|
||||
VgAllocKind;
|
||||
@ -912,20 +1140,19 @@ extern void VG_(clientmalloc_done) ( void );
|
||||
extern void VG_(describe_addr) ( Addr a, AddrInfo* ai );
|
||||
extern ShadowChunk** VG_(get_malloc_shadows) ( /*OUT*/ UInt* n_shadows );
|
||||
|
||||
/* This should never be called; if it is, something's seriously
|
||||
wrong. */
|
||||
extern UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do );
|
||||
/* These are called from the scheduler, when it intercepts a user
|
||||
request. */
|
||||
extern void* VG_(client_malloc) ( UInt size, VgAllocKind kind );
|
||||
extern void* VG_(client_memalign) ( UInt align, UInt size );
|
||||
extern void VG_(client_free) ( void* ptrV, VgAllocKind kind );
|
||||
extern void* VG_(client_calloc) ( UInt nmemb, UInt size1 );
|
||||
extern void* VG_(client_realloc) ( void* ptrV, UInt size_new );
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_main.c
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* How big is the saved FPU state? */
|
||||
#define VG_SIZE_OF_FPUSTATE 108
|
||||
/* ... and in words ... */
|
||||
#define VG_SIZE_OF_FPUSTATE_W ((VG_SIZE_OF_FPUSTATE+3)/4)
|
||||
|
||||
/* A structure used as an intermediary when passing the simulated
|
||||
CPU's state to some assembly fragments, particularly system calls.
|
||||
Stuff is copied from baseBlock to here, the assembly magic runs,
|
||||
@ -941,10 +1168,6 @@ extern UInt VG_(m_state_static) [8 /* int regs, in Intel order */
|
||||
extern void VG_(copy_baseBlock_to_m_state_static) ( void );
|
||||
extern void VG_(copy_m_state_static_to_baseBlock) ( void );
|
||||
|
||||
/* Create, and add to TT/TC, the translation of a client basic
|
||||
block. */
|
||||
extern void VG_(create_translation_for) ( Addr orig_addr );
|
||||
|
||||
/* Called when some unhandleable client behaviour is detected.
|
||||
Prints a msg and aborts. */
|
||||
extern void VG_(unimplemented) ( Char* msg );
|
||||
@ -960,12 +1183,6 @@ extern UInt VG_(stack)[10000];
|
||||
vg_deliver_signal_immediately(). */
|
||||
extern UInt VG_(sigstack)[10000];
|
||||
|
||||
|
||||
/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
|
||||
extern jmp_buf VG_(toploop_jmpbuf);
|
||||
/* ... and if so, here's the signal which caused it to do so. */
|
||||
extern Int VG_(longjmpd_on_signal);
|
||||
|
||||
/* Holds client's %esp at the point we gained control. From this the
|
||||
client's argc, argv and envp are deduced. */
|
||||
extern Addr VG_(esp_at_startup);
|
||||
@ -994,13 +1211,6 @@ extern ULong VG_(bbs_to_go);
|
||||
/* Counts downwards in vg_run_innerloop. */
|
||||
extern UInt VG_(dispatch_ctr);
|
||||
|
||||
/* If vg_dispatch_ctr is set to 1 to force a stop, its
|
||||
previous value is saved here. */
|
||||
extern UInt VG_(dispatch_ctr_SAVED);
|
||||
|
||||
/* This is why vg_run_innerloop() exited. */
|
||||
extern UInt VG_(interrupt_reason);
|
||||
|
||||
/* Is the client running on the simulated CPU or the real one? */
|
||||
extern Bool VG_(running_on_simd_CPU); /* Initially False */
|
||||
|
||||
@ -1068,6 +1278,10 @@ extern UInt VG_(smc_fancy_passed);
|
||||
extern UInt VG_(sanity_fast_count);
|
||||
extern UInt VG_(sanity_slow_count);
|
||||
|
||||
/* Counts pertaining to the scheduler. */
|
||||
extern UInt VG_(num_scheduling_events_MINOR);
|
||||
extern UInt VG_(num_scheduling_events_MAJOR);
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_memory.c
|
||||
@ -1095,7 +1309,7 @@ extern Bool VGM_(check_readable_asciiz) ( Addr a, Addr* bad_addr );
|
||||
|
||||
/* Sanity checks which may be done at any time. Doing them at
|
||||
signal-delivery time turns out to be convenient. */
|
||||
extern void VG_(do_sanity_checks) ( Bool force_expensive );
|
||||
extern void VG_(do_sanity_checks) ( ThreadId tid, Bool force_expensive );
|
||||
/* Very cheap ... */
|
||||
extern Bool VG_(first_and_last_secondaries_look_plausible) ( void );
|
||||
|
||||
@ -1134,22 +1348,21 @@ extern Bool VG_(is_plausible_stack_addr) ( Addr );
|
||||
Exports of vg_syscall_mem.c
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* Counts the depth of nested syscalls. Is used in
|
||||
VG_(deliver_signals) do discover whether or not the client is in a
|
||||
syscall (presumably _blocked_ in a syscall) when a signal is
|
||||
delivered. If so, the signal delivery mechanism needs to behave
|
||||
differently from normal. */
|
||||
extern Int VG_(syscall_depth);
|
||||
extern void VG_(perform_assumed_nonblocking_syscall) ( ThreadId tid );
|
||||
|
||||
extern void VG_(wrap_syscall) ( void );
|
||||
extern void VG_(check_known_blocking_syscall) ( ThreadId tid,
|
||||
Int syscallno,
|
||||
Int* /*IN*/ res );
|
||||
|
||||
extern Bool VG_(is_kerror) ( Int res );
|
||||
|
||||
#define KERNEL_DO_SYSCALL(result_lvalue) \
|
||||
#define KERNEL_DO_SYSCALL(thread_id, result_lvalue) \
|
||||
VG_(load_thread_state)(thread_id); \
|
||||
VG_(copy_baseBlock_to_m_state_static)(); \
|
||||
VG_(do_syscall)(); \
|
||||
VG_(copy_m_state_static_to_baseBlock)(); \
|
||||
result_lvalue = VG_(baseBlock)[VGOFF_(m_eax)];
|
||||
VG_(save_thread_state)(thread_id); \
|
||||
result_lvalue = VG_(get_thread_state)(thread_id)->m_eax;
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
@ -1242,20 +1455,15 @@ extern void VG_(swizzle_esp_then_start_GDB) ( void );
|
||||
Exports of vg_dispatch.S
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
extern void VG_(dispatch);
|
||||
extern void VG_(run_innerloop) ( void );
|
||||
|
||||
/* Returns the next orig_addr to run. */
|
||||
extern Addr VG_(run_singleton_translation) ( Addr trans_addr );
|
||||
/* Run a thread for a (very short) while, until some event happens
|
||||
which means we need to defer to the scheduler. */
|
||||
extern UInt VG_(run_innerloop) ( void );
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_helpers.S
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* For doing exits ... */
|
||||
extern void VG_(helper_request_normal_exit);
|
||||
|
||||
/* SMC fast checks. */
|
||||
extern void VG_(helper_smc_check4);
|
||||
|
||||
@ -1304,9 +1512,6 @@ extern void VG_(helper_value_check2_fail);
|
||||
extern void VG_(helper_value_check1_fail);
|
||||
extern void VG_(helper_value_check0_fail);
|
||||
|
||||
extern void VG_(helper_do_syscall);
|
||||
extern void VG_(helper_do_client_request);
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
The state of the simulated CPU.
|
||||
@ -1434,9 +1639,6 @@ extern Int VGOFF_(helper_value_check2_fail);
|
||||
extern Int VGOFF_(helper_value_check1_fail);
|
||||
extern Int VGOFF_(helper_value_check0_fail);
|
||||
|
||||
extern Int VGOFF_(helper_do_syscall);
|
||||
extern Int VGOFF_(helper_do_client_request);
|
||||
|
||||
extern Int VGOFF_(helperc_STOREV4); /* :: UInt -> Addr -> void */
|
||||
extern Int VGOFF_(helperc_STOREV2); /* :: UInt -> Addr -> void */
|
||||
extern Int VGOFF_(helperc_STOREV1); /* :: UInt -> Addr -> void */
|
||||
@ -1449,8 +1651,6 @@ extern Int VGOFF_(handle_esp_assignment); /* :: Addr -> void */
|
||||
extern Int VGOFF_(fpu_write_check); /* :: Addr -> Int -> void */
|
||||
extern Int VGOFF_(fpu_read_check); /* :: Addr -> Int -> void */
|
||||
|
||||
extern Int VGOFF_(helper_request_normal_exit);
|
||||
|
||||
|
||||
|
||||
#endif /* ndef __VG_INCLUDE_H */
|
||||
|
||||
@ -135,6 +135,10 @@ typedef
|
||||
/* Copied from /usr/src/linux-2.4.9-13/include/asm/errno.h */
|
||||
|
||||
#define VKI_EINVAL 22 /* Invalid argument */
|
||||
#define VKI_ENOMEM 12 /* Out of memory */
|
||||
|
||||
#define VKI_EWOULDBLOCK VKI_EAGAIN /* Operation would block */
|
||||
#define VKI_EAGAIN 11 /* Try again */
|
||||
|
||||
|
||||
/* Gawd ... hack ... */
|
||||
@ -166,6 +170,108 @@ typedef struct vki__user_cap_data_struct {
|
||||
#define VKI_SIZEOF_STRUCT_TERMIO 17
|
||||
|
||||
|
||||
/* File descriptor sets, for doing select(). Copied from
|
||||
/usr/src/linux-2.4.9-31/include/linux/posix_types.h
|
||||
*/
|
||||
/*
|
||||
* This allows for 1024 file descriptors: if NR_OPEN is ever grown
|
||||
* beyond that you'll have to change this too. But 1024 fd's seem to be
|
||||
* enough even for such "real" unices like OSF/1, so hopefully this is
|
||||
* one limit that doesn't have to be changed [again].
|
||||
*
|
||||
* Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in
|
||||
* <sys/time.h> (and thus <linux/time.h>) - but this is a more logical
|
||||
* place for them. Solved by having dummy defines in <sys/time.h>.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Those macros may have been defined in <gnu/types.h>. But we always
|
||||
* use the ones here.
|
||||
*/
|
||||
#undef VKI_NFDBITS
|
||||
#define VKI_NFDBITS (8 * sizeof(unsigned long))
|
||||
|
||||
#undef VKI_FD_SETSIZE
|
||||
#define VKI_FD_SETSIZE 1024
|
||||
|
||||
#undef VKI_FDSET_LONGS
|
||||
#define VKI_FDSET_LONGS (VKI_FD_SETSIZE/VKI_NFDBITS)
|
||||
|
||||
#undef VKI_FDELT
|
||||
#define VKI_FDELT(d) ((d) / VKI_NFDBITS)
|
||||
|
||||
#undef VKI_FDMASK
|
||||
#define VKI_FDMASK(d) (1UL << ((d) % VKI_NFDBITS))
|
||||
|
||||
typedef struct {
|
||||
unsigned long vki_fds_bits [VKI_FDSET_LONGS];
|
||||
} vki_fd_set;
|
||||
|
||||
|
||||
/* Gawd ...
|
||||
Copied from /usr/src/linux-2.4.9-31/./include/asm-i386/posix_types.h
|
||||
*/
|
||||
#undef VKI_FD_SET
|
||||
#define VKI_FD_SET(fd,fdsetp) \
|
||||
__asm__ __volatile__("btsl %1,%0": \
|
||||
"=m" (*(vki_fd_set *) (fdsetp)):"r" ((int) (fd)))
|
||||
|
||||
#undef VKI_FD_CLR
|
||||
#define VKI_FD_CLR(fd,fdsetp) \
|
||||
__asm__ __volatile__("btrl %1,%0": \
|
||||
"=m" (*(vki_fd_set *) (fdsetp)):"r" ((int) (fd)))
|
||||
|
||||
#undef VKI_FD_ISSET
|
||||
#define VKI_FD_ISSET(fd,fdsetp) (__extension__ ({ \
|
||||
unsigned char __result; \
|
||||
__asm__ __volatile__("btl %1,%2 ; setb %0" \
|
||||
:"=q" (__result) :"r" ((int) (fd)), \
|
||||
"m" (*(vki_fd_set *) (fdsetp))); \
|
||||
__result; }))
|
||||
|
||||
#undef VKI_FD_ZERO
|
||||
#define VKI_FD_ZERO(fdsetp) \
|
||||
do { \
|
||||
int __d0, __d1; \
|
||||
__asm__ __volatile__("cld ; rep ; stosl" \
|
||||
:"=m" (*(vki_fd_set *) (fdsetp)), \
|
||||
"=&c" (__d0), "=&D" (__d1) \
|
||||
:"a" (0), "1" (VKI_FDSET_LONGS), \
|
||||
"2" ((vki_fd_set *) (fdsetp)) : "memory"); \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
||||
/*
|
||||
./include/asm-i386/posix_types.h:typedef long __kernel_suseconds_t;
|
||||
./include/linux/types.h:typedef __kernel_suseconds_t suseconds_t;
|
||||
|
||||
./include/asm-i386/posix_types.h:typedef long __kernel_time_t;
|
||||
./include/linux/types.h:typedef __kernel_time_t time_t;
|
||||
*/
|
||||
|
||||
struct vki_timeval {
|
||||
/* time_t */ long tv_sec; /* seconds */
|
||||
/* suseconds_t */ long tv_usec; /* microseconds */
|
||||
};
|
||||
|
||||
|
||||
|
||||
/* For fcntl on fds ..
|
||||
from ./include/asm-i386/fcntl.h */
|
||||
#define VKI_F_GETFL 3 /* get file->f_flags */
|
||||
#define VKI_F_SETFL 4 /* set file->f_flags */
|
||||
|
||||
#define VKI_O_NONBLOCK 04000
|
||||
|
||||
/* For nanosleep ...
|
||||
from ./include/linux/time.h */
|
||||
struct vki_timespec {
|
||||
/* time_t */ long tv_sec; /* seconds */
|
||||
long tv_nsec; /* nanoseconds */
|
||||
};
|
||||
|
||||
|
||||
#endif /* ndef __VG_KERNELIFACE_H */
|
||||
|
||||
/*--------------------------------------------------------------------*/
|
||||
|
||||
@ -99,8 +99,6 @@ Int VGOFF_(helper_value_check4_fail) = INVALID_OFFSET;
|
||||
Int VGOFF_(helper_value_check2_fail) = INVALID_OFFSET;
|
||||
Int VGOFF_(helper_value_check1_fail) = INVALID_OFFSET;
|
||||
Int VGOFF_(helper_value_check0_fail) = INVALID_OFFSET;
|
||||
Int VGOFF_(helper_do_syscall) = INVALID_OFFSET;
|
||||
Int VGOFF_(helper_do_client_request) = INVALID_OFFSET;
|
||||
Int VGOFF_(helperc_LOADV4) = INVALID_OFFSET;
|
||||
Int VGOFF_(helperc_LOADV2) = INVALID_OFFSET;
|
||||
Int VGOFF_(helperc_LOADV1) = INVALID_OFFSET;
|
||||
@ -110,7 +108,6 @@ Int VGOFF_(helperc_STOREV1) = INVALID_OFFSET;
|
||||
Int VGOFF_(handle_esp_assignment) = INVALID_OFFSET;
|
||||
Int VGOFF_(fpu_write_check) = INVALID_OFFSET;
|
||||
Int VGOFF_(fpu_read_check) = INVALID_OFFSET;
|
||||
Int VGOFF_(helper_request_normal_exit) = INVALID_OFFSET;
|
||||
|
||||
|
||||
/* This is the actual defn of baseblock. */
|
||||
@ -305,14 +302,6 @@ static void vg_init_baseBlock ( void )
|
||||
= alloc_BaB_1_set( (Addr) & VG_(helper_DAS) );
|
||||
VGOFF_(helper_DAA)
|
||||
= alloc_BaB_1_set( (Addr) & VG_(helper_DAA) );
|
||||
|
||||
VGOFF_(helper_request_normal_exit)
|
||||
= alloc_BaB_1_set( (Addr) & VG_(helper_request_normal_exit) );
|
||||
|
||||
VGOFF_(helper_do_syscall)
|
||||
= alloc_BaB_1_set( (Addr) & VG_(helper_do_syscall) );
|
||||
VGOFF_(helper_do_client_request)
|
||||
= alloc_BaB_1_set( (Addr) & VG_(helper_do_client_request) );
|
||||
}
|
||||
|
||||
|
||||
@ -336,17 +325,6 @@ Addr VG_(esp_saved_over_syscall_d2);
|
||||
/* Counts downwards in vg_run_innerloop. */
|
||||
UInt VG_(dispatch_ctr);
|
||||
|
||||
/* If vg_dispatch_ctr is set to 1 to force a stop, its
|
||||
previous value is saved here. */
|
||||
UInt VG_(dispatch_ctr_SAVED);
|
||||
|
||||
/* This is why vg_run_innerloop() exited. */
|
||||
UInt VG_(interrupt_reason);
|
||||
|
||||
/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
|
||||
jmp_buf VG_(toploop_jmpbuf);
|
||||
/* ... and if so, here's the signal which caused it to do so. */
|
||||
Int VG_(longjmpd_on_signal);
|
||||
|
||||
/* 64-bit counter for the number of basic blocks done. */
|
||||
ULong VG_(bbs_done);
|
||||
@ -423,10 +401,12 @@ UInt VG_(smc_discard_count) = 0;
|
||||
|
||||
|
||||
/* Counts pertaining to internal sanity checking. */
|
||||
|
||||
UInt VG_(sanity_fast_count) = 0;
|
||||
UInt VG_(sanity_slow_count) = 0;
|
||||
|
||||
/* Counts pertaining to the scheduler. */
|
||||
UInt VG_(num_scheduling_events_MINOR) = 0;
|
||||
UInt VG_(num_scheduling_events_MAJOR) = 0;
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
@ -481,176 +461,6 @@ Char** VG_(client_envp);
|
||||
static Char vg_cmdline_copy[M_VG_CMDLINE_STRLEN];
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Top level simulation loop.
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* Create a translation of the client basic block beginning at
|
||||
orig_addr, and add it to the translation cache & translation table.
|
||||
This probably doesn't really belong here, but, hey ... */
|
||||
void VG_(create_translation_for) ( Addr orig_addr )
|
||||
{
|
||||
Addr trans_addr;
|
||||
TTEntry tte;
|
||||
Int orig_size, trans_size;
|
||||
/* Ensure there is space to hold a translation. */
|
||||
VG_(maybe_do_lru_pass)();
|
||||
VG_(translate)( orig_addr, &orig_size, &trans_addr, &trans_size );
|
||||
/* Copy data at trans_addr into the translation cache.
|
||||
Returned pointer is to the code, not to the 4-byte
|
||||
header. */
|
||||
/* Since the .orig_size and .trans_size fields are
|
||||
UShort, be paranoid. */
|
||||
vg_assert(orig_size > 0 && orig_size < 65536);
|
||||
vg_assert(trans_size > 0 && trans_size < 65536);
|
||||
tte.orig_size = orig_size;
|
||||
tte.orig_addr = orig_addr;
|
||||
tte.trans_size = trans_size;
|
||||
tte.trans_addr = VG_(copy_to_transcache)
|
||||
( trans_addr, trans_size );
|
||||
tte.mru_epoch = VG_(current_epoch);
|
||||
/* Free the intermediary -- was allocated by VG_(emit_code). */
|
||||
VG_(jitfree)( (void*)trans_addr );
|
||||
/* Add to trans tab and set back pointer. */
|
||||
VG_(add_to_trans_tab) ( &tte );
|
||||
/* Update stats. */
|
||||
VG_(this_epoch_in_count) ++;
|
||||
VG_(this_epoch_in_osize) += orig_size;
|
||||
VG_(this_epoch_in_tsize) += trans_size;
|
||||
VG_(overall_in_count) ++;
|
||||
VG_(overall_in_osize) += orig_size;
|
||||
VG_(overall_in_tsize) += trans_size;
|
||||
/* Record translated area for SMC detection. */
|
||||
VG_(smc_mark_original) (
|
||||
VG_(baseBlock)[VGOFF_(m_eip)], orig_size );
|
||||
}
|
||||
|
||||
|
||||
/* Runs the client program from %EIP (baseBlock[off_eip]) until it
|
||||
asks to exit, or until vg_bbs_to_go jumps have happened (the latter
|
||||
case is for debugging). */
|
||||
|
||||
void VG_(toploop) ( void )
|
||||
{
|
||||
volatile UInt dispatch_ctr_SAVED;
|
||||
volatile Int done_this_time;
|
||||
|
||||
/* For the LRU structures, records when the epoch began. */
|
||||
volatile ULong epoch_started_at = 0;
|
||||
|
||||
while (True) {
|
||||
next_outer_loop:
|
||||
|
||||
/* Age the LRU structures if an epoch has been completed. */
|
||||
if (VG_(bbs_done) - epoch_started_at >= VG_BBS_PER_EPOCH) {
|
||||
VG_(current_epoch)++;
|
||||
epoch_started_at = VG_(bbs_done);
|
||||
if (VG_(clo_verbosity) > 2) {
|
||||
UInt tt_used, tc_used;
|
||||
VG_(get_tt_tc_used) ( &tt_used, &tc_used );
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"%lu bbs, in: %d (%d -> %d), out %d (%d -> %d), TT %d, TC %d",
|
||||
VG_(bbs_done),
|
||||
VG_(this_epoch_in_count),
|
||||
VG_(this_epoch_in_osize),
|
||||
VG_(this_epoch_in_tsize),
|
||||
VG_(this_epoch_out_count),
|
||||
VG_(this_epoch_out_osize),
|
||||
VG_(this_epoch_out_tsize),
|
||||
tt_used, tc_used
|
||||
);
|
||||
}
|
||||
VG_(this_epoch_in_count) = 0;
|
||||
VG_(this_epoch_in_osize) = 0;
|
||||
VG_(this_epoch_in_tsize) = 0;
|
||||
VG_(this_epoch_out_count) = 0;
|
||||
VG_(this_epoch_out_osize) = 0;
|
||||
VG_(this_epoch_out_tsize) = 0;
|
||||
}
|
||||
|
||||
/* Figure out how many bbs to ask vg_run_innerloop to do. */
|
||||
if (VG_(bbs_to_go) >= VG_SIGCHECK_INTERVAL)
|
||||
VG_(dispatch_ctr) = 1 + VG_SIGCHECK_INTERVAL;
|
||||
else
|
||||
VG_(dispatch_ctr) = 1 + (UInt)VG_(bbs_to_go);
|
||||
|
||||
/* ... and remember what we asked for. */
|
||||
dispatch_ctr_SAVED = VG_(dispatch_ctr);
|
||||
|
||||
/* Now have a go at doing them. */
|
||||
VG_(interrupt_reason) = VG_Y_SIGCHECK;
|
||||
if (__builtin_setjmp(VG_(toploop_jmpbuf)) == 0) {
|
||||
/* try this ... */
|
||||
VG_(run_innerloop)();
|
||||
/* We get here if the client didn't take a fault. */
|
||||
switch (VG_(interrupt_reason)) {
|
||||
case VG_Y_SIGCHECK:
|
||||
/* The counter fell to zero and no other situation has
|
||||
been detected. */
|
||||
vg_assert(VG_(dispatch_ctr) == 0);
|
||||
done_this_time = dispatch_ctr_SAVED - 1;
|
||||
VG_(bbs_to_go) -= (ULong)done_this_time;
|
||||
VG_(bbs_done) += (ULong)done_this_time;
|
||||
/* Exit if the debug run has ended. */
|
||||
if (VG_(bbs_to_go) == 0) goto debug_stop;
|
||||
VG_(deliver_signals)();
|
||||
VG_(do_sanity_checks)(False);
|
||||
goto next_outer_loop;
|
||||
case VG_Y_EXIT:
|
||||
/* The target program tried to exit. */
|
||||
done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr_SAVED);
|
||||
done_this_time --;
|
||||
VG_(bbs_to_go) -= (ULong)done_this_time;
|
||||
VG_(bbs_done) += (ULong)done_this_time;
|
||||
return;
|
||||
case VG_Y_SMC:
|
||||
/* A write to original code was detected. */
|
||||
done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr_SAVED);
|
||||
VG_(bbs_to_go) -= (ULong)done_this_time;
|
||||
VG_(bbs_done) += (ULong)done_this_time;
|
||||
VG_(flush_transtab)();
|
||||
goto next_outer_loop;
|
||||
case VG_Y_TRANSLATE: {
|
||||
/* Need to provide a translation of code at vg_m_eip. */
|
||||
done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr);
|
||||
vg_assert(done_this_time > 0);
|
||||
done_this_time --;
|
||||
VG_(bbs_to_go) -= (ULong)done_this_time;
|
||||
VG_(bbs_done) += (ULong)done_this_time;
|
||||
VG_(create_translation_for)(VG_(baseBlock)[VGOFF_(m_eip)]);
|
||||
goto next_outer_loop;
|
||||
}
|
||||
default:
|
||||
VG_(panic)("vg_toploop: invalid interrupt reason");
|
||||
}
|
||||
} else {
|
||||
/* We get here if the client took a fault, which caused our
|
||||
signal handler to longjmp. */
|
||||
done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr);
|
||||
VG_(bbs_to_go) -= (ULong)done_this_time;
|
||||
VG_(bbs_done) += (ULong)done_this_time;
|
||||
if (VG_(interrupt_reason) == VG_Y_EXIT) return;
|
||||
VG_(deliver_signals)();
|
||||
VG_(do_sanity_checks)(False);
|
||||
VG_(unblock_host_signal)(VG_(longjmpd_on_signal));
|
||||
}
|
||||
}
|
||||
|
||||
/* NOTREACHED */
|
||||
|
||||
debug_stop:
|
||||
/* If we exited because of a debug stop, print the translation
|
||||
of the last block executed -- by translating it again, and
|
||||
throwing away the result. */
|
||||
VG_(printf)(
|
||||
"======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n");
|
||||
VG_(translate)( VG_(baseBlock)[VGOFF_(m_eip)], NULL, NULL, NULL );
|
||||
VG_(printf)("\n");
|
||||
VG_(printf)(
|
||||
"======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n");
|
||||
}
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Processing of command-line options.
|
||||
------------------------------------------------------------------ */
|
||||
@ -705,7 +515,7 @@ static void process_cmd_line_options ( void )
|
||||
VG_(clo_optimise) = True;
|
||||
VG_(clo_instrument) = True;
|
||||
VG_(clo_cleanup) = True;
|
||||
VG_(clo_client_perms) = False;
|
||||
VG_(clo_client_perms) = True;
|
||||
VG_(clo_smc_check) = /* VG_CLO_SMC_SOME */ VG_CLO_SMC_NONE;
|
||||
VG_(clo_trace_syscalls) = False;
|
||||
VG_(clo_trace_signals) = False;
|
||||
@ -1014,6 +824,7 @@ static void process_cmd_line_options ( void )
|
||||
bad_option("--gdb-attach=yes and --trace-children=yes");
|
||||
}
|
||||
|
||||
#if 0
|
||||
if (VG_(clo_client_perms) && !VG_(clo_instrument)) {
|
||||
VG_(message)(Vg_UserMsg, "");
|
||||
VG_(message)(Vg_UserMsg,
|
||||
@ -1023,6 +834,7 @@ static void process_cmd_line_options ( void )
|
||||
|
||||
if (VG_(clo_client_perms))
|
||||
vg_assert(VG_(clo_instrument));
|
||||
#endif
|
||||
|
||||
VG_(clo_logfile_fd) = eventually_logfile_fd;
|
||||
|
||||
@ -1106,8 +918,9 @@ void VG_(copy_m_state_static_to_baseBlock) ( void )
|
||||
static void vg_show_counts ( void )
|
||||
{
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
" dispatch: %lu basic blocks, %d tt_fast misses.",
|
||||
VG_(bbs_done), VG_(tt_fast_misses));
|
||||
" lru: %d epochs, %d clearings.",
|
||||
VG_(current_epoch),
|
||||
VG_(number_of_lrus) );
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
"translate: new %d (%d -> %d), discard %d (%d -> %d).",
|
||||
VG_(overall_in_count),
|
||||
@ -1117,9 +930,10 @@ static void vg_show_counts ( void )
|
||||
VG_(overall_out_osize),
|
||||
VG_(overall_out_tsize) );
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
" lru: %d epochs, %d clearings.",
|
||||
VG_(current_epoch),
|
||||
VG_(number_of_lrus) );
|
||||
" dispatch: %lu basic blocks, %d/%d sched events, %d tt_fast misses.",
|
||||
VG_(bbs_done), VG_(num_scheduling_events_MAJOR),
|
||||
VG_(num_scheduling_events_MINOR),
|
||||
VG_(tt_fast_misses));
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
"reg-alloc: %d t-req-spill, "
|
||||
"%d+%d orig+spill uis, %d total-reg-r.",
|
||||
@ -1150,7 +964,8 @@ static void vg_show_counts ( void )
|
||||
|
||||
void VG_(main) ( void )
|
||||
{
|
||||
Int i;
|
||||
Int i;
|
||||
VgSchedReturnCode src;
|
||||
|
||||
/* Set up our stack sanity-check words. */
|
||||
for (i = 0; i < 10; i++) {
|
||||
@ -1211,11 +1026,18 @@ void VG_(main) ( void )
|
||||
VG_(message)(Vg_UserMsg, "");
|
||||
|
||||
VG_(bbs_to_go) = VG_(clo_stop_after);
|
||||
VG_(toploop)();
|
||||
|
||||
VG_(scheduler_init)();
|
||||
src = VG_(scheduler)();
|
||||
|
||||
if (VG_(clo_verbosity) > 0)
|
||||
VG_(message)(Vg_UserMsg, "");
|
||||
|
||||
if (src == VgSrc_Deadlock) {
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"Warning: pthread scheduler exited due to deadlock");
|
||||
}
|
||||
|
||||
if (VG_(clo_instrument)) {
|
||||
VG_(show_all_errors)();
|
||||
VG_(clientmalloc_done)();
|
||||
@ -1226,8 +1048,9 @@ void VG_(main) ( void )
|
||||
if (VG_(clo_leak_check)) VG_(detect_memory_leaks)();
|
||||
}
|
||||
VG_(running_on_simd_CPU) = False;
|
||||
|
||||
VG_(do_sanity_checks)(True /*include expensive checks*/ );
|
||||
|
||||
VG_(do_sanity_checks)( 0 /* root thread */,
|
||||
True /*include expensive checks*/ );
|
||||
|
||||
if (VG_(clo_verbosity) > 1)
|
||||
vg_show_counts();
|
||||
@ -1262,6 +1085,7 @@ void VG_(main) ( void )
|
||||
}
|
||||
|
||||
/* Prepare to restore state to the real CPU. */
|
||||
VG_(load_thread_state)(0);
|
||||
VG_(copy_baseBlock_to_m_state_static)();
|
||||
|
||||
/* This pushes a return address on the simulator's stack, which
|
||||
@ -1349,116 +1173,6 @@ extern void VG_(unimplemented) ( Char* msg )
|
||||
}
|
||||
|
||||
|
||||
/*-------------------------------------------------------------*/
|
||||
/*--- Replace some C lib things with equivs which don't get ---*/
|
||||
/*--- spurious value warnings. THEY RUN ON SIMD CPU! ---*/
|
||||
/*-------------------------------------------------------------*/
|
||||
|
||||
char* strrchr ( const char* s, int c )
|
||||
{
|
||||
UChar ch = (UChar)((UInt)c);
|
||||
UChar* p = (UChar*)s;
|
||||
UChar* last = NULL;
|
||||
while (True) {
|
||||
if (*p == ch) last = p;
|
||||
if (*p == 0) return last;
|
||||
p++;
|
||||
}
|
||||
}
|
||||
|
||||
char* strchr ( const char* s, int c )
|
||||
{
|
||||
UChar ch = (UChar)((UInt)c);
|
||||
UChar* p = (UChar*)s;
|
||||
while (True) {
|
||||
if (*p == ch) return p;
|
||||
if (*p == 0) return NULL;
|
||||
p++;
|
||||
}
|
||||
}
|
||||
|
||||
char* strcat ( char* dest, const char* src )
|
||||
{
|
||||
Char* dest_orig = dest;
|
||||
while (*dest) dest++;
|
||||
while (*src) *dest++ = *src++;
|
||||
*dest = 0;
|
||||
return dest_orig;
|
||||
}
|
||||
|
||||
unsigned int strlen ( const char* str )
|
||||
{
|
||||
UInt i = 0;
|
||||
while (str[i] != 0) i++;
|
||||
return i;
|
||||
}
|
||||
|
||||
char* strcpy ( char* dest, const char* src )
|
||||
{
|
||||
Char* dest_orig = dest;
|
||||
while (*src) *dest++ = *src++;
|
||||
*dest = 0;
|
||||
return dest_orig;
|
||||
}
|
||||
|
||||
int strncmp ( const char* s1, const char* s2, unsigned int nmax )
|
||||
{
|
||||
unsigned int n = 0;
|
||||
while (True) {
|
||||
if (n >= nmax) return 0;
|
||||
if (*s1 == 0 && *s2 == 0) return 0;
|
||||
if (*s1 == 0) return -1;
|
||||
if (*s2 == 0) return 1;
|
||||
|
||||
if (*(UChar*)s1 < *(UChar*)s2) return -1;
|
||||
if (*(UChar*)s1 > *(UChar*)s2) return 1;
|
||||
|
||||
s1++; s2++; n++;
|
||||
}
|
||||
}
|
||||
|
||||
int strcmp ( const char* s1, const char* s2 )
|
||||
{
|
||||
while (True) {
|
||||
if (*s1 == 0 && *s2 == 0) return 0;
|
||||
if (*s1 == 0) return -1;
|
||||
if (*s2 == 0) return 1;
|
||||
|
||||
if (*(char*)s1 < *(char*)s2) return -1;
|
||||
if (*(char*)s1 > *(char*)s2) return 1;
|
||||
|
||||
s1++; s2++;
|
||||
}
|
||||
}
|
||||
|
||||
void* memchr(const void *s, int c, unsigned int n)
|
||||
{
|
||||
unsigned int i;
|
||||
UChar c0 = (UChar)c;
|
||||
UChar* p = (UChar*)s;
|
||||
for (i = 0; i < n; i++)
|
||||
if (p[i] == c0) return (void*)(&p[i]);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void* memcpy( void *dst, const void *src, unsigned int len )
|
||||
{
|
||||
register char *d;
|
||||
register char *s;
|
||||
if ( dst > src ) {
|
||||
d = (char *)dst + len - 1;
|
||||
s = (char *)src + len - 1;
|
||||
while ( len-- )
|
||||
*d-- = *s--;
|
||||
} else if ( dst < src ) {
|
||||
d = (char *)dst;
|
||||
s = (char *)src;
|
||||
while ( len-- )
|
||||
*d++ = *s++;
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
|
||||
/*--------------------------------------------------------------------*/
|
||||
/*--- end vg_main.c ---*/
|
||||
/*--------------------------------------------------------------------*/
|
||||
|
||||
@ -2122,10 +2122,11 @@ Bool VG_(first_and_last_secondaries_look_plausible) ( void )
|
||||
/* A fast sanity check -- suitable for calling circa once per
|
||||
millisecond. */
|
||||
|
||||
void VG_(do_sanity_checks) ( Bool force_expensive )
|
||||
void VG_(do_sanity_checks) ( ThreadId tid, Bool force_expensive )
|
||||
{
|
||||
Int i;
|
||||
Bool do_expensive_checks;
|
||||
Int i;
|
||||
Bool do_expensive_checks;
|
||||
ThreadState* tst;
|
||||
|
||||
if (VG_(sanity_level) < 1) return;
|
||||
|
||||
@ -2133,6 +2134,9 @@ void VG_(do_sanity_checks) ( Bool force_expensive )
|
||||
|
||||
VG_(sanity_fast_count)++;
|
||||
|
||||
tst = VG_(get_thread_state)(tid);
|
||||
vg_assert(tst != NULL && tst->status != VgTs_Empty);
|
||||
|
||||
/* Check that we haven't overrun our private stack. */
|
||||
for (i = 0; i < 10; i++) {
|
||||
vg_assert(VG_(stack)[i]
|
||||
@ -2146,7 +2150,7 @@ void VG_(do_sanity_checks) ( Bool force_expensive )
|
||||
if (VG_(clo_instrument)) {
|
||||
|
||||
/* Check that the eflags tag is as expected. */
|
||||
UInt vv = VG_(baseBlock)[VGOFF_(sh_eflags)];
|
||||
UInt vv = tst->sh_eflags;
|
||||
vg_assert(vv == VGM_EFLAGS_VALID || VGM_EFLAGS_INVALID);
|
||||
|
||||
/* Check that nobody has spuriously claimed that the first or
|
||||
@ -2154,12 +2158,6 @@ void VG_(do_sanity_checks) ( Bool force_expensive )
|
||||
vg_assert(VG_(first_and_last_secondaries_look_plausible));
|
||||
}
|
||||
|
||||
# if 0
|
||||
if ( (VG_(baseBlock)[VGOFF_(sh_eflags)] & 1) == 1)
|
||||
VG_(printf)("UNDEF\n") ; else
|
||||
VG_(printf)("def\n") ;
|
||||
# endif
|
||||
|
||||
/* --- Now some more expensive checks. ---*/
|
||||
|
||||
/* Once every 25 times, check some more expensive stuff. */
|
||||
@ -2233,6 +2231,9 @@ static void uint_to_bits ( UInt x, Char* str )
|
||||
vg_assert(w == 36);
|
||||
}
|
||||
|
||||
/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
|
||||
state table. */
|
||||
|
||||
void VG_(show_reg_tags) ( void )
|
||||
{
|
||||
Char buf1[36];
|
||||
|
||||
@ -232,7 +232,7 @@ Int VG_(ksignal)(Int signum, void (*sighandler)(Int))
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
mmap/munmap, exit
|
||||
mmap/munmap, exit, fcntl
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* Returns -1 on failure. */
|
||||
@ -266,6 +266,43 @@ void VG_(exit)( Int status )
|
||||
vg_assert(2+2 == 5);
|
||||
}
|
||||
|
||||
/* Returns -1 on error. */
|
||||
Int VG_(fcntl) ( Int fd, Int cmd, Int arg )
|
||||
{
|
||||
Int res = vg_do_syscall3(__NR_fcntl, fd, cmd, arg);
|
||||
return VG_(is_kerror)(res) ? -1 : res;
|
||||
}
|
||||
|
||||
/* Returns -1 on error. */
|
||||
Int VG_(select)( Int n,
|
||||
vki_fd_set* readfds,
|
||||
vki_fd_set* writefds,
|
||||
vki_fd_set* exceptfds,
|
||||
struct vki_timeval * timeout )
|
||||
{
|
||||
Int res;
|
||||
UInt args[5];
|
||||
args[0] = n;
|
||||
args[1] = (UInt)readfds;
|
||||
args[2] = (UInt)writefds;
|
||||
args[3] = (UInt)exceptfds;
|
||||
args[4] = (UInt)timeout;
|
||||
res = vg_do_syscall1(__NR_select, (UInt)(&(args[0])) );
|
||||
return VG_(is_kerror)(res) ? -1 : res;
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Returns -1 on error, but 0 if ok or interrupted. */
|
||||
Int VG_(nanosleep)( const struct vki_timespec *req,
|
||||
struct vki_timespec *rem )
|
||||
{
|
||||
Int res;
|
||||
res = vg_do_syscall2(__NR_nanosleep, (UInt)req, (UInt)rem);
|
||||
if (res == -VKI_EINVAL) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
printf implementation. The key function, vg_vprintf(), emits chars
|
||||
into a caller-supplied function. Distantly derived from:
|
||||
@ -809,7 +846,6 @@ void VG_(assert_fail) ( Char* expr, Char* file, Int line, Char* fn )
|
||||
"valgrind", file, line, fn, expr );
|
||||
VG_(printf)("Please report this bug to me at: %s\n\n", EMAIL_ADDR);
|
||||
VG_(shutdown_logging)();
|
||||
/* vg_restore_SIGABRT(); */
|
||||
VG_(exit)(1);
|
||||
}
|
||||
|
||||
@ -819,7 +855,6 @@ void VG_(panic) ( Char* str )
|
||||
VG_(printf)("Basic block ctr is approximately %llu\n", VG_(bbs_done) );
|
||||
VG_(printf)("Please report this bug to me at: %s\n\n", EMAIL_ADDR);
|
||||
VG_(shutdown_logging)();
|
||||
/* vg_restore_SIGABRT(); */
|
||||
VG_(exit)(1);
|
||||
}
|
||||
|
||||
@ -900,6 +935,16 @@ Int VG_(getpid) ( void )
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Read a notional elapsed (wallclock-time) timer, giving a 64-bit
|
||||
microseconds count. */
|
||||
ULong VG_(read_microsecond_timer)( void )
|
||||
{
|
||||
Int res;
|
||||
struct vki_timeval tv;
|
||||
res = vg_do_syscall2(__NR_gettimeofday, (UInt)&tv, (UInt)NULL);
|
||||
vg_assert(!VG_(is_kerror)(res));
|
||||
return (1000000ULL * (ULong)(tv.tv_sec)) + (ULong)(tv.tv_usec);
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Primitive support for bagging memory via mmap.
|
||||
|
||||
@ -36,13 +36,6 @@
|
||||
#include "vg_unsafe.h"
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
An implementation of signal sets and other grunge, identical to
|
||||
that in the target kernels (Linux 2.2.X and 2.4.X).
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Signal state for this process.
|
||||
------------------------------------------------------------------ */
|
||||
@ -64,8 +57,29 @@ void* VG_(sighandler)[VKI_KNSIG];
|
||||
|
||||
void* VG_(sigpending)[VKI_KNSIG];
|
||||
|
||||
/* See decl in vg_include.h for explanation. */
|
||||
Int VG_(syscall_depth) = 0;
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Handy utilities to block/restore all host signals.
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* Block all host signals, dumping the old mask in *saved_mask. */
|
||||
void VG_(block_all_host_signals) ( /* OUT */ vki_ksigset_t* saved_mask )
|
||||
{
|
||||
Int ret;
|
||||
vki_ksigset_t block_procmask;
|
||||
VG_(ksigfillset)(&block_procmask);
|
||||
ret = VG_(ksigprocmask)
|
||||
(VKI_SIG_SETMASK, &block_procmask, saved_mask);
|
||||
vg_assert(ret == 0);
|
||||
}
|
||||
|
||||
/* Restore the blocking mask using the supplied saved one. */
|
||||
void VG_(restore_host_signals) ( /* IN */ vki_ksigset_t* saved_mask )
|
||||
{
|
||||
Int ret;
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
|
||||
vg_assert(ret == 0);
|
||||
}
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
@ -78,9 +92,14 @@ Int VG_(syscall_depth) = 0;
|
||||
|
||||
typedef
|
||||
struct {
|
||||
UInt retaddr; /* Sig handler's (bogus) return address */
|
||||
Int sigNo; /* The arg to the sig handler. */
|
||||
/* These are parameters to the signal handler. */
|
||||
UInt retaddr; /* Sig handler's (bogus) return address */
|
||||
Int sigNo; /* The arg to the sig handler. */
|
||||
Addr psigInfo; /* ptr to siginfo_t; NULL for now. */
|
||||
Addr puContext; /* ptr to ucontext; NULL for now. */
|
||||
/* Sanity check word. */
|
||||
UInt magicPI;
|
||||
/* Saved processor state. */
|
||||
UInt fpustate[VG_SIZE_OF_FPUSTATE_W];
|
||||
UInt eax;
|
||||
UInt ecx;
|
||||
@ -92,9 +111,14 @@ typedef
|
||||
UInt edi;
|
||||
Addr eip;
|
||||
UInt eflags;
|
||||
/* Scheduler-private stuff: what was the thread's status prior to
|
||||
delivering this signal? */
|
||||
ThreadStatus status;
|
||||
/* Sanity check word. Is the highest-addressed word; do not
|
||||
move!*/
|
||||
UInt magicE;
|
||||
}
|
||||
VgSigContext;
|
||||
VgSigFrame;
|
||||
|
||||
|
||||
|
||||
@ -113,35 +137,52 @@ void VG_(signalreturn_bogusRA) ( void )
|
||||
handler. This includes the signal number and a bogus return
|
||||
address. */
|
||||
static
|
||||
void vg_push_signal_frame ( int sigNo )
|
||||
void vg_push_signal_frame ( ThreadId tid, int sigNo )
|
||||
{
|
||||
Int i;
|
||||
UInt esp;
|
||||
VgSigContext sigctx;
|
||||
Addr esp;
|
||||
VgSigFrame* frame;
|
||||
ThreadState* tst;
|
||||
|
||||
tst = VG_(get_thread_state)(tid);
|
||||
esp = tst->m_esp;
|
||||
|
||||
esp -= sizeof(VgSigFrame);
|
||||
frame = (VgSigFrame*)esp;
|
||||
/* Assert that the frame is placed correctly. */
|
||||
vg_assert( (sizeof(VgSigFrame) & 0x3) == 0 );
|
||||
vg_assert( ((Char*)(&frame->magicE)) + sizeof(UInt)
|
||||
== ((Char*)(tst->m_esp)) );
|
||||
|
||||
frame->retaddr = (UInt)(&VG_(signalreturn_bogusRA));
|
||||
frame->sigNo = sigNo;
|
||||
frame->psigInfo = (Addr)NULL;
|
||||
frame->puContext = (Addr)NULL;
|
||||
frame->magicPI = 0x31415927;
|
||||
|
||||
for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
|
||||
sigctx.fpustate[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i];
|
||||
frame->fpustate[i] = tst->m_fpu[i];
|
||||
|
||||
sigctx.magicPI = 0x31415927;
|
||||
sigctx.magicE = 0x27182818;
|
||||
sigctx.eax = VG_(baseBlock)[VGOFF_(m_eax)];
|
||||
sigctx.ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
|
||||
sigctx.edx = VG_(baseBlock)[VGOFF_(m_edx)];
|
||||
sigctx.ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
|
||||
sigctx.ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
|
||||
sigctx.esp = VG_(baseBlock)[VGOFF_(m_esp)];
|
||||
sigctx.esi = VG_(baseBlock)[VGOFF_(m_esi)];
|
||||
sigctx.edi = VG_(baseBlock)[VGOFF_(m_edi)];
|
||||
sigctx.eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
|
||||
sigctx.eip = VG_(baseBlock)[VGOFF_(m_eip)];
|
||||
sigctx.retaddr = (UInt)(&VG_(signalreturn_bogusRA));
|
||||
sigctx.sigNo = sigNo;
|
||||
frame->eax = tst->m_eax;
|
||||
frame->ecx = tst->m_ecx;
|
||||
frame->edx = tst->m_edx;
|
||||
frame->ebx = tst->m_ebx;
|
||||
frame->ebp = tst->m_ebp;
|
||||
frame->esp = tst->m_esp;
|
||||
frame->esi = tst->m_esi;
|
||||
frame->edi = tst->m_edi;
|
||||
frame->eip = tst->m_eip;
|
||||
frame->eflags = tst->m_eflags;
|
||||
|
||||
esp = VG_(baseBlock)[VGOFF_(m_esp)];
|
||||
vg_assert((sizeof(VgSigContext) & 0x3) == 0);
|
||||
frame->status = tst->status;
|
||||
|
||||
esp -= sizeof(VgSigContext);
|
||||
for (i = 0; i < sizeof(VgSigContext)/4; i++)
|
||||
((UInt*)esp)[i] = ((UInt*)(&sigctx))[i];
|
||||
frame->magicE = 0x27182818;
|
||||
|
||||
/* Set the thread so it will next run the handler. */
|
||||
tst->m_esp = esp;
|
||||
tst->m_eip = (Addr)VG_(sigpending)[sigNo];
|
||||
/* This thread needs to be marked runnable, but we leave that the
|
||||
caller to do. */
|
||||
|
||||
/* Make sigNo and retaddr fields readable -- at 0(%ESP) and 4(%ESP) */
|
||||
if (VG_(clo_instrument)) {
|
||||
@ -149,11 +190,9 @@ void vg_push_signal_frame ( int sigNo )
|
||||
VGM_(make_readable) ( ((Addr)esp)+4 ,4 );
|
||||
}
|
||||
|
||||
VG_(baseBlock)[VGOFF_(m_esp)] = esp;
|
||||
VG_(baseBlock)[VGOFF_(m_eip)] = (Addr)VG_(sigpending)[sigNo];
|
||||
/*
|
||||
VG_(printf)("pushed signal frame; %%ESP now = %p, next %%EBP = %p\n",
|
||||
esp, VG_(baseBlock)[VGOFF_(m_eip)]);
|
||||
esp, tst->m_eip);
|
||||
*/
|
||||
}
|
||||
|
||||
@ -162,43 +201,56 @@ void vg_push_signal_frame ( int sigNo )
|
||||
simulated machine state, and return the signal number that the
|
||||
frame was for. */
|
||||
static
|
||||
Int vg_pop_signal_frame ( void )
|
||||
Int vg_pop_signal_frame ( ThreadId tid )
|
||||
{
|
||||
UInt esp;
|
||||
Addr esp;
|
||||
Int sigNo, i;
|
||||
VgSigContext* sigctx;
|
||||
/* esp is now pointing at the magicPI word on the stack, viz,
|
||||
eight bytes above the bottom of the vg_sigcontext.
|
||||
*/
|
||||
esp = VG_(baseBlock)[VGOFF_(m_esp)];
|
||||
sigctx = (VgSigContext*)(esp-4);
|
||||
VgSigFrame* frame;
|
||||
ThreadState* tst;
|
||||
|
||||
vg_assert(sigctx->magicPI == 0x31415927);
|
||||
vg_assert(sigctx->magicE == 0x27182818);
|
||||
tst = VG_(get_thread_state)(tid);
|
||||
|
||||
/* esp is now pointing at the sigNo field in the signal frame. */
|
||||
esp = tst->m_esp;
|
||||
frame = (VgSigFrame*)(esp-4);
|
||||
|
||||
vg_assert(frame->magicPI == 0x31415927);
|
||||
vg_assert(frame->magicE == 0x27182818);
|
||||
if (VG_(clo_trace_signals))
|
||||
VG_(message)(Vg_DebugMsg, "vg_pop_signal_frame: valid magic");
|
||||
|
||||
/* restore machine state */
|
||||
for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
|
||||
VG_(baseBlock)[VGOFF_(m_fpustate) + i] = sigctx->fpustate[i];
|
||||
tst->m_fpu[i] = frame->fpustate[i];
|
||||
|
||||
/* Mark the sigctx structure as nonaccessible. Has to happen
|
||||
_before_ vg_m_state.m_esp is given a new value.*/
|
||||
if (VG_(clo_instrument))
|
||||
VGM_(handle_esp_assignment) ( sigctx->esp );
|
||||
/* Mark the frame structure as nonaccessible. Has to happen
|
||||
_before_ vg_m_state.m_esp is given a new value.
|
||||
handle_esp_assignment reads %ESP from baseBlock, so we park it
|
||||
there first. Re-place the junk there afterwards. */
|
||||
if (VG_(clo_instrument)) {
|
||||
vg_assert(VG_(baseBlock)[VGOFF_(m_esp)] == 0xDEADBEEF);
|
||||
VG_(baseBlock)[VGOFF_(m_esp)] = tst->m_esp;
|
||||
VGM_(handle_esp_assignment) ( frame->esp );
|
||||
VG_(baseBlock)[VGOFF_(m_esp)] = 0xDEADBEEF;
|
||||
}
|
||||
|
||||
/* Restore machine state from the saved context. */
|
||||
VG_(baseBlock)[VGOFF_(m_eax)] = sigctx->eax;
|
||||
VG_(baseBlock)[VGOFF_(m_ecx)] = sigctx->ecx;
|
||||
VG_(baseBlock)[VGOFF_(m_edx)] = sigctx->edx;
|
||||
VG_(baseBlock)[VGOFF_(m_ebx)] = sigctx->ebx;
|
||||
VG_(baseBlock)[VGOFF_(m_ebp)] = sigctx->ebp;
|
||||
VG_(baseBlock)[VGOFF_(m_esp)] = sigctx->esp;
|
||||
VG_(baseBlock)[VGOFF_(m_esi)] = sigctx->esi;
|
||||
VG_(baseBlock)[VGOFF_(m_edi)] = sigctx->edi;
|
||||
VG_(baseBlock)[VGOFF_(m_eflags)] = sigctx->eflags;
|
||||
VG_(baseBlock)[VGOFF_(m_eip)] = sigctx->eip;
|
||||
sigNo = sigctx->sigNo;
|
||||
tst->m_eax = frame->eax;
|
||||
tst->m_ecx = frame->ecx;
|
||||
tst->m_edx = frame->edx;
|
||||
tst->m_ebx = frame->ebx;
|
||||
tst->m_ebp = frame->ebp;
|
||||
tst->m_esp = frame->esp;
|
||||
tst->m_esi = frame->esi;
|
||||
tst->m_edi = frame->edi;
|
||||
tst->m_eflags = frame->eflags;
|
||||
tst->m_eip = frame->eip;
|
||||
sigNo = frame->sigNo;
|
||||
|
||||
/* And restore the thread's status to what it was before the signal
|
||||
was delivered. */
|
||||
tst->status = frame->status;
|
||||
|
||||
return sigNo;
|
||||
}
|
||||
|
||||
@ -207,18 +259,17 @@ Int vg_pop_signal_frame ( void )
|
||||
VgSigContext and continue with whatever was going on before the
|
||||
handler ran. */
|
||||
|
||||
void VG_(signal_returns) ( void )
|
||||
void VG_(signal_returns) ( ThreadId tid )
|
||||
{
|
||||
Int sigNo, ret;
|
||||
vki_ksigset_t block_procmask;
|
||||
Int sigNo;
|
||||
vki_ksigset_t saved_procmask;
|
||||
|
||||
/* Block host signals ... */
|
||||
VG_(ksigfillset)(&block_procmask);
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask);
|
||||
vg_assert(ret == 0);
|
||||
VG_(block_all_host_signals)( &saved_procmask );
|
||||
|
||||
sigNo = vg_pop_signal_frame();
|
||||
/* Pop the signal frame and restore tid's status to what it was
|
||||
before the signal was delivered. */
|
||||
sigNo = vg_pop_signal_frame(tid);
|
||||
|
||||
/* You would have thought that the following assertion made sense
|
||||
here:
|
||||
@ -242,40 +293,18 @@ void VG_(signal_returns) ( void )
|
||||
VG_(sigpending)[sigNo] = VG_SIGIDLE;
|
||||
|
||||
/* Unlock and return. */
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL);
|
||||
vg_assert(ret == 0);
|
||||
VG_(restore_host_signals)( &saved_procmask );
|
||||
|
||||
/* The main dispatch loop now continues at vg_m_eip. */
|
||||
}
|
||||
|
||||
|
||||
/* Restore the default host behaviour of SIGABRT, and unblock it,
|
||||
so we can exit the simulator cleanly by doing exit/abort/assert fail.
|
||||
*/
|
||||
void VG_(restore_SIGABRT) ( void )
|
||||
{
|
||||
vki_ksigset_t set;
|
||||
vki_ksigaction act;
|
||||
act.ksa_flags = VKI_SA_RESTART;
|
||||
act.ksa_handler = VKI_SIG_DFL;
|
||||
VG_(ksigemptyset)(&act.ksa_mask);
|
||||
|
||||
VG_(ksigemptyset)(&set);
|
||||
VG_(ksigaddset)(&set,VKI_SIGABRT);
|
||||
|
||||
/* If this doesn't work, tough. Don't check return code. */
|
||||
VG_(ksigaction)(VKI_SIGABRT, &act, NULL);
|
||||
VG_(ksigprocmask)(VKI_SIG_UNBLOCK, &set, NULL);
|
||||
/* Scheduler now can resume this thread, or perhaps some other. */
|
||||
}
|
||||
|
||||
|
||||
/* Deliver all pending signals, by building stack frames for their
|
||||
handlers. */
|
||||
void VG_(deliver_signals) ( void )
|
||||
void VG_(deliver_signals) ( ThreadId tid )
|
||||
{
|
||||
vki_ksigset_t block_procmask;
|
||||
vki_ksigset_t saved_procmask;
|
||||
Int ret, sigNo;
|
||||
Int sigNo;
|
||||
Bool found;
|
||||
|
||||
/* A cheap check. We don't need to have exclusive access
|
||||
@ -295,10 +324,9 @@ void VG_(deliver_signals) ( void )
|
||||
blocking all the host's signals. That means vg_oursignalhandler
|
||||
can't run whilst we are messing with stuff.
|
||||
*/
|
||||
VG_(ksigfillset)(&block_procmask);
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask);
|
||||
vg_assert(ret == 0);
|
||||
VG_(block_all_host_signals)( &saved_procmask );
|
||||
|
||||
/* Look for signals to deliver ... */
|
||||
for (sigNo = 1; sigNo < VKI_KNSIG; sigNo++) {
|
||||
if (VG_(sigpending)[sigNo] == VG_SIGIDLE ||
|
||||
VG_(sigpending)[sigNo] == VG_SIGRUNNING) continue;
|
||||
@ -310,94 +338,19 @@ void VG_(deliver_signals) ( void )
|
||||
%EIP so that when execution continues, we will enter the
|
||||
signal handler with the frame on top of the client's stack,
|
||||
as it expects. */
|
||||
vg_push_signal_frame ( sigNo );
|
||||
|
||||
vg_push_signal_frame ( tid, sigNo );
|
||||
VG_(get_thread_state)(tid)->status = VgTs_Runnable;
|
||||
|
||||
/* Signify that the signal has been delivered. */
|
||||
VG_(sigpending)[sigNo] = VG_SIGRUNNING;
|
||||
}
|
||||
|
||||
/* Unlock and return. */
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL);
|
||||
vg_assert(ret == 0);
|
||||
VG_(restore_host_signals)( &saved_procmask );
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/* ----------- HACK ALERT ----------- */
|
||||
/* Note carefully that this runs with all host signals disabled! */
|
||||
static
|
||||
void vg_deliver_signal_immediately ( Int sigNo )
|
||||
{
|
||||
Int n_bbs_done;
|
||||
Int sigNo2;
|
||||
Addr next_orig_addr;
|
||||
Addr next_trans_addr;
|
||||
|
||||
if (VG_(clo_verbosity) > 0
|
||||
&& (True || VG_(clo_trace_signals)))
|
||||
VG_(message)(Vg_DebugExtraMsg,
|
||||
"deliver signal %d immediately: BEGIN", sigNo );
|
||||
/* VG_(printf)("resumption addr is %p\n",
|
||||
VG_(baseBlock)[VGOFF_(m_eip)]); */
|
||||
|
||||
vg_push_signal_frame ( sigNo );
|
||||
n_bbs_done = 0;
|
||||
|
||||
/* Single-step the client (ie, run the handler) until it jumps to
|
||||
VG_(signalreturn_bogusRA) */
|
||||
|
||||
while (True) {
|
||||
|
||||
if (n_bbs_done >= VG_MAX_BBS_IN_IMMEDIATE_SIGNAL)
|
||||
VG_(unimplemented)(
|
||||
"handling signal whilst client blocked in syscall: "
|
||||
"handler runs too long"
|
||||
);
|
||||
|
||||
next_orig_addr = VG_(baseBlock)[VGOFF_(m_eip)];
|
||||
|
||||
if (next_orig_addr == (Addr)(&VG_(trap_here)))
|
||||
VG_(unimplemented)(
|
||||
"handling signal whilst client blocked in syscall: "
|
||||
"handler calls malloc (et al)"
|
||||
);
|
||||
|
||||
/* VG_(printf)("next orig addr = %p\n", next_orig_addr); */
|
||||
if (next_orig_addr == (Addr)(&VG_(signalreturn_bogusRA)))
|
||||
break;
|
||||
|
||||
next_trans_addr = VG_(search_transtab) ( next_orig_addr );
|
||||
if (next_trans_addr == (Addr)NULL) {
|
||||
VG_(create_translation_for) ( next_orig_addr );
|
||||
next_trans_addr = VG_(search_transtab) ( next_orig_addr );
|
||||
}
|
||||
|
||||
vg_assert(next_trans_addr != (Addr)NULL);
|
||||
next_orig_addr = VG_(run_singleton_translation)(next_trans_addr);
|
||||
VG_(baseBlock)[VGOFF_(m_eip)] = next_orig_addr;
|
||||
n_bbs_done++;
|
||||
}
|
||||
|
||||
sigNo2 = vg_pop_signal_frame();
|
||||
vg_assert(sigNo2 == sigNo);
|
||||
|
||||
if (VG_(clo_verbosity) > 0
|
||||
&& (True || VG_(clo_trace_signals)))
|
||||
VG_(message)(Vg_DebugExtraMsg,
|
||||
"deliver signal %d immediately: END, %d bbs done",
|
||||
sigNo, n_bbs_done );
|
||||
|
||||
/* Invalidate the tt_fast cache. We've been (potentially) adding
|
||||
translations and even possibly doing LRUs without keeping it up
|
||||
to date, so we'd better nuke it before going any further, to
|
||||
avoid inconsistencies with the main TT/TC structure. */
|
||||
VG_(invalidate_tt_fast)();
|
||||
}
|
||||
|
||||
|
||||
/* ----------- end of HACK ALERT ----------- */
|
||||
|
||||
|
||||
/* Receive a signal from the host, and either discard it or park it in
|
||||
the queue of pending signals. All other signals will be blocked
|
||||
when this handler runs. Runs with all host signals blocked, so as
|
||||
@ -405,8 +358,7 @@ void vg_deliver_signal_immediately ( Int sigNo )
|
||||
|
||||
static void VG_(oursignalhandler) ( Int sigNo )
|
||||
{
|
||||
Int ret;
|
||||
vki_ksigset_t block_procmask;
|
||||
Int dummy_local;
|
||||
vki_ksigset_t saved_procmask;
|
||||
|
||||
if (VG_(clo_trace_signals)) {
|
||||
@ -418,20 +370,24 @@ static void VG_(oursignalhandler) ( Int sigNo )
|
||||
/* Sanity check. Ensure we're really running on the signal stack
|
||||
we asked for. */
|
||||
if ( !(
|
||||
((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&ret))
|
||||
((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&dummy_local))
|
||||
&&
|
||||
((Char*)(&ret) < (Char*)(&(VG_(sigstack)[10000])))
|
||||
((Char*)(&dummy_local) < (Char*)(&(VG_(sigstack)[10000])))
|
||||
)
|
||||
) {
|
||||
VG_(message)(Vg_DebugMsg, "FATAL: signal delivered on the wrong stack?!");
|
||||
VG_(message)(Vg_DebugMsg, "A possible workaround follows. Please tell me");
|
||||
VG_(message)(Vg_DebugMsg, "(jseward@acm.org) if the suggested workaround doesn't help.");
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
"FATAL: signal delivered on the wrong stack?!");
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
"A possible workaround follows. Please tell me");
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
"(jseward@acm.org) if the suggested workaround doesn't help.");
|
||||
VG_(unimplemented)
|
||||
("support for progs compiled with -p/-pg; rebuild your prog without -p/-pg");
|
||||
("support for progs compiled with -p/-pg; "
|
||||
"rebuild your prog without -p/-pg");
|
||||
}
|
||||
|
||||
vg_assert((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&ret));
|
||||
vg_assert((Char*)(&ret) < (Char*)(&(VG_(sigstack)[10000])));
|
||||
vg_assert((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&dummy_local));
|
||||
vg_assert((Char*)(&dummy_local) < (Char*)(&(VG_(sigstack)[10000])));
|
||||
|
||||
if (sigNo == VKI_SIGABRT && VG_(sighandler)[sigNo] == NULL) {
|
||||
/* We get here if SIGABRT is delivered and the client hasn't
|
||||
@ -442,21 +398,19 @@ static void VG_(oursignalhandler) ( Int sigNo )
|
||||
VG_(end_msg)();
|
||||
}
|
||||
VG_(ksignal)(VKI_SIGABRT, VKI_SIG_DFL);
|
||||
VG_(interrupt_reason) = VG_Y_EXIT;
|
||||
VG_(longjmpd_on_signal) = VKI_SIGABRT;
|
||||
__builtin_longjmp(VG_(toploop_jmpbuf),1);
|
||||
__builtin_longjmp(VG_(scheduler_jmpbuf),1);
|
||||
}
|
||||
|
||||
/* Block all host signals. */
|
||||
VG_(ksigfillset)(&block_procmask);
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask);
|
||||
vg_assert(ret == 0);
|
||||
VG_(block_all_host_signals)( &saved_procmask );
|
||||
|
||||
if (VG_(sighandler)[sigNo] == NULL) {
|
||||
if (VG_(clo_trace_signals)) {
|
||||
VG_(add_to_msg)("unexpected!");
|
||||
VG_(end_msg)();
|
||||
}
|
||||
/* Note: we panic with all signals blocked here. Don't think
|
||||
that matters. */
|
||||
VG_(panic)("vg_oursignalhandler: unexpected signal");
|
||||
}
|
||||
|
||||
@ -478,47 +432,26 @@ static void VG_(oursignalhandler) ( Int sigNo )
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Ok, we'd better deliver it to the client, one way or another. */
|
||||
/* Ok, we'd better deliver it to the client. */
|
||||
vg_assert(VG_(sigpending)[sigNo] == VG_SIGIDLE);
|
||||
|
||||
if (VG_(syscall_depth) == 0) {
|
||||
/* The usual case; delivering a signal to the client, and the
|
||||
client is not currently in a syscall. Queue it up for
|
||||
delivery at some point in the future. */
|
||||
VG_(sigpending)[sigNo] = VG_(sighandler)[sigNo];
|
||||
if (VG_(clo_trace_signals)) {
|
||||
VG_(add_to_msg)("queued" );
|
||||
VG_(end_msg)();
|
||||
}
|
||||
} else {
|
||||
/* The nasty case, which was causing kmail to freeze up: the
|
||||
client is (presumably blocked) in a syscall. We have to
|
||||
deliver the signal right now, because it may be that
|
||||
running the sighandler is the only way that the syscall
|
||||
will be able to return. In which case, if we don't do
|
||||
that, the client will deadlock. */
|
||||
if (VG_(clo_trace_signals)) {
|
||||
VG_(add_to_msg)("delivering immediately" );
|
||||
VG_(end_msg)();
|
||||
}
|
||||
/* Note that this runs with all host signals blocked. */
|
||||
VG_(sigpending)[sigNo] = VG_(sighandler)[sigNo];
|
||||
vg_deliver_signal_immediately(sigNo);
|
||||
VG_(sigpending)[sigNo] = VG_SIGIDLE;
|
||||
/* VG_(printf)("resuming at %p\n", VG_(baseBlock)[VGOFF_(m_eip)]); */
|
||||
/* Queue it up for delivery at some point in the future. */
|
||||
VG_(sigpending)[sigNo] = VG_(sighandler)[sigNo];
|
||||
if (VG_(clo_trace_signals)) {
|
||||
VG_(add_to_msg)("queued" );
|
||||
VG_(end_msg)();
|
||||
}
|
||||
}
|
||||
|
||||
/* We've finished messing with the queue, so re-enable host signals. */
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL);
|
||||
/* We've finished messing with the queue, so re-enable host
|
||||
signals. */
|
||||
VG_(restore_host_signals)( &saved_procmask );
|
||||
|
||||
vg_assert(ret == 0);
|
||||
if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS
|
||||
|| sigNo == VKI_SIGFPE || sigNo == VKI_SIGILL) {
|
||||
/* Can't continue; must longjmp and thus enter the sighandler
|
||||
immediately. */
|
||||
/* Can't continue; must longjmp back to the scheduler and thus
|
||||
enter the sighandler immediately. */
|
||||
VG_(longjmpd_on_signal) = sigNo;
|
||||
__builtin_longjmp(VG_(toploop_jmpbuf),1);
|
||||
__builtin_longjmp(VG_(scheduler_jmpbuf),1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -559,17 +492,14 @@ void VG_(sigstartup_actions) ( void )
|
||||
{
|
||||
Int i, ret;
|
||||
|
||||
vki_ksigset_t block_procmask;
|
||||
vki_ksigset_t saved_procmask;
|
||||
vki_kstack_t altstack_info;
|
||||
vki_ksigaction sa;
|
||||
|
||||
/* VG_(printf)("SIGSTARTUP\n"); */
|
||||
/* VG_(printf)("SIGSTARTUP\n"); */
|
||||
/* Block all signals.
|
||||
saved_procmask remembers the previous mask. */
|
||||
VG_(ksigfillset)(&block_procmask);
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask);
|
||||
vg_assert(ret == 0);
|
||||
VG_(block_all_host_signals)( &saved_procmask );
|
||||
|
||||
/* Register an alternative stack for our own signal handler to run
|
||||
on. */
|
||||
@ -615,8 +545,7 @@ void VG_(sigstartup_actions) ( void )
|
||||
VG_(ksignal)(VKI_SIGABRT, &VG_(oursignalhandler));
|
||||
|
||||
/* Finally, restore the blocking mask. */
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL);
|
||||
vg_assert(ret == 0);
|
||||
VG_(restore_host_signals)( &saved_procmask );
|
||||
}
|
||||
|
||||
|
||||
@ -635,14 +564,10 @@ void VG_(sigshutdown_actions) ( void )
|
||||
{
|
||||
Int i, ret;
|
||||
|
||||
vki_ksigset_t block_procmask;
|
||||
vki_ksigset_t saved_procmask;
|
||||
vki_ksigaction sa;
|
||||
|
||||
/* Block all signals. */
|
||||
VG_(ksigfillset)(&block_procmask);
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask);
|
||||
vg_assert(ret == 0);
|
||||
VG_(block_all_host_signals)( &saved_procmask );
|
||||
|
||||
/* copy the sim signal actions to the real ones. */
|
||||
for (i = 1; i < VKI_KNSIG; i++) {
|
||||
@ -654,9 +579,7 @@ void VG_(sigshutdown_actions) ( void )
|
||||
ret = VG_(ksigaction)(i, &sa, NULL);
|
||||
}
|
||||
|
||||
/* Finally, copy the simulated process mask to the real one. */
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL);
|
||||
vg_assert(ret == 0);
|
||||
VG_(restore_host_signals)( &saved_procmask );
|
||||
}
|
||||
|
||||
|
||||
@ -665,18 +588,16 @@ void VG_(sigshutdown_actions) ( void )
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* Do more error checking? */
|
||||
void VG_(do__NR_sigaction) ( void )
|
||||
void VG_(do__NR_sigaction) ( ThreadId tid )
|
||||
{
|
||||
UInt res;
|
||||
void* our_old_handler;
|
||||
vki_ksigaction* new_action;
|
||||
vki_ksigaction* old_action;
|
||||
UInt param1
|
||||
= VG_(baseBlock)[VGOFF_(m_ebx)]; /* int sigNo */
|
||||
UInt param2
|
||||
= VG_(baseBlock)[VGOFF_(m_ecx)]; /* k_sigaction* new_action */
|
||||
UInt param3
|
||||
= VG_(baseBlock)[VGOFF_(m_edx)]; /* k_sigaction* old_action */
|
||||
ThreadState* tst = VG_(get_thread_state)( tid );
|
||||
UInt param1 = tst->m_ebx; /* int sigNo */
|
||||
UInt param2 = tst->m_ecx; /* k_sigaction* new_action */
|
||||
UInt param3 = tst->m_edx; /* k_sigaction* old_action */
|
||||
new_action = (vki_ksigaction*)param2;
|
||||
old_action = (vki_ksigaction*)param3;
|
||||
|
||||
@ -722,7 +643,7 @@ void VG_(do__NR_sigaction) ( void )
|
||||
}
|
||||
}
|
||||
|
||||
KERNEL_DO_SYSCALL(res);
|
||||
KERNEL_DO_SYSCALL(tid,res);
|
||||
/* VG_(printf)("RES = %d\n", res); */
|
||||
|
||||
/* If the client asks for the old handler, maintain our fiction
|
||||
@ -750,7 +671,7 @@ void VG_(do__NR_sigaction) ( void )
|
||||
goto good;
|
||||
|
||||
good:
|
||||
VG_(baseBlock)[VGOFF_(m_eax)] = (UInt)0;
|
||||
tst->m_eax = (UInt)0;
|
||||
return;
|
||||
|
||||
bad_signo:
|
||||
|
||||
@ -41,10 +41,6 @@
|
||||
# m_state_static, and back afterwards.
|
||||
|
||||
VG_(do_syscall):
|
||||
cmpl $2, VG_(syscall_depth)
|
||||
jz do_syscall_DEPTH_2
|
||||
|
||||
# depth 1 copy follows ...
|
||||
# Save all the int registers of the real machines state on the
|
||||
# simulators stack.
|
||||
pushal
|
||||
@ -104,76 +100,6 @@ VG_(do_syscall):
|
||||
|
||||
ret
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
do_syscall_DEPTH_2:
|
||||
|
||||
# depth 2 copy follows ...
|
||||
# Save all the int registers of the real machines state on the
|
||||
# simulators stack.
|
||||
pushal
|
||||
|
||||
# and save the real FPU state too
|
||||
fwait
|
||||
fnsave VG_(real_fpu_state_saved_over_syscall_d2)
|
||||
frstor VG_(real_fpu_state_saved_over_syscall_d2)
|
||||
|
||||
# remember what the simulators stack pointer is
|
||||
movl %esp, VG_(esp_saved_over_syscall_d2)
|
||||
|
||||
# Now copy the simulated machines state into the real one
|
||||
# esp still refers to the simulators stack
|
||||
frstor VG_(m_state_static)+40
|
||||
movl VG_(m_state_static)+32, %eax
|
||||
pushl %eax
|
||||
popfl
|
||||
movl VG_(m_state_static)+0, %eax
|
||||
movl VG_(m_state_static)+4, %ecx
|
||||
movl VG_(m_state_static)+8, %edx
|
||||
movl VG_(m_state_static)+12, %ebx
|
||||
movl VG_(m_state_static)+16, %esp
|
||||
movl VG_(m_state_static)+20, %ebp
|
||||
movl VG_(m_state_static)+24, %esi
|
||||
movl VG_(m_state_static)+28, %edi
|
||||
|
||||
# esp now refers to the simulatees stack
|
||||
# Do the actual system call
|
||||
int $0x80
|
||||
|
||||
# restore stack as soon as possible
|
||||
# esp refers to simulatees stack
|
||||
movl %esp, VG_(m_state_static)+16
|
||||
movl VG_(esp_saved_over_syscall_d2), %esp
|
||||
# esp refers to simulators stack
|
||||
|
||||
# ... and undo everything else.
|
||||
# Copy real state back to simulated state.
|
||||
movl %eax, VG_(m_state_static)+0
|
||||
movl %ecx, VG_(m_state_static)+4
|
||||
movl %edx, VG_(m_state_static)+8
|
||||
movl %ebx, VG_(m_state_static)+12
|
||||
movl %ebp, VG_(m_state_static)+20
|
||||
movl %esi, VG_(m_state_static)+24
|
||||
movl %edi, VG_(m_state_static)+28
|
||||
pushfl
|
||||
popl %eax
|
||||
movl %eax, VG_(m_state_static)+32
|
||||
fwait
|
||||
fnsave VG_(m_state_static)+40
|
||||
frstor VG_(m_state_static)+40
|
||||
|
||||
# Restore the state of the simulator
|
||||
frstor VG_(real_fpu_state_saved_over_syscall_d2)
|
||||
popal
|
||||
|
||||
ret
|
||||
|
||||
|
||||
##--------------------------------------------------------------------##
|
||||
##--- end vg_syscall.S ---##
|
||||
##--------------------------------------------------------------------##
|
||||
|
||||
@ -1607,7 +1607,7 @@ Addr dis_Grp5 ( UCodeBlock* cb, Int sz, Addr eip, Bool* isEnd )
|
||||
SMC_IF_ALL(cb);
|
||||
uInstr1(cb, JMP, 0, TempReg, t1);
|
||||
uCond(cb, CondAlways);
|
||||
LAST_UINSTR(cb).call_dispatch = True;
|
||||
LAST_UINSTR(cb).jmpkind = JmpCall;
|
||||
*isEnd = True;
|
||||
break;
|
||||
case 4: /* jmp Ev */
|
||||
@ -1654,7 +1654,7 @@ Addr dis_Grp5 ( UCodeBlock* cb, Int sz, Addr eip, Bool* isEnd )
|
||||
SMC_IF_ALL(cb);
|
||||
uInstr1(cb, JMP, 0, TempReg, t1);
|
||||
uCond(cb, CondAlways);
|
||||
LAST_UINSTR(cb).call_dispatch = True;
|
||||
LAST_UINSTR(cb).jmpkind = JmpCall;
|
||||
*isEnd = True;
|
||||
break;
|
||||
case 4: /* JMP Ev */
|
||||
@ -2859,32 +2859,6 @@ Addr dis_xadd_G_E ( UCodeBlock* cb,
|
||||
}
|
||||
|
||||
|
||||
/* Push %ECX, %EBX and %EAX, call helper_do_client_request, and put
|
||||
the resulting %EAX value back. */
|
||||
static
|
||||
void dis_ClientRequest ( UCodeBlock* cb )
|
||||
{
|
||||
Int tmpc = newTemp(cb);
|
||||
Int tmpb = newTemp(cb);
|
||||
Int tmpa = newTemp(cb);
|
||||
uInstr2(cb, GET, 4, ArchReg, R_ECX, TempReg, tmpc);
|
||||
uInstr2(cb, GET, 4, ArchReg, R_EBX, TempReg, tmpb);
|
||||
uInstr2(cb, GET, 4, ArchReg, R_EAX, TempReg, tmpa);
|
||||
uInstr0(cb, CALLM_S, 0);
|
||||
uInstr1(cb, PUSH, 4, TempReg, tmpc);
|
||||
uInstr1(cb, PUSH, 4, TempReg, tmpb);
|
||||
uInstr1(cb, PUSH, 4, TempReg, tmpa);
|
||||
uInstr1(cb, CALLM, 0, Lit16, VGOFF_(helper_do_client_request));
|
||||
uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty);
|
||||
uInstr1(cb, POP, 4, TempReg, tmpa);
|
||||
uInstr1(cb, CLEAR, 0, Lit16, 8);
|
||||
uInstr0(cb, CALLM_E, 0);
|
||||
uInstr2(cb, PUT, 4, TempReg, tmpa, ArchReg, R_EAX);
|
||||
if (dis)
|
||||
VG_(printf)("%%eax = client_request ( %%eax, %%ebx, %%ecx )\n");
|
||||
}
|
||||
|
||||
|
||||
/*------------------------------------------------------------*/
|
||||
/*--- Disassembling entire basic blocks ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
@ -2909,21 +2883,31 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd )
|
||||
if (dis) VG_(printf)("\t0x%x: ", eip);
|
||||
|
||||
/* Spot the client-request magic sequence, if required. */
|
||||
if (VG_(clo_client_perms)) {
|
||||
if (1 /*VG_(clo_client_perms)*/) {
|
||||
UChar* myeip = (UChar*)eip;
|
||||
/* Spot this:
|
||||
C1C01D roll $29, %eax
|
||||
C1C003 roll $3, %eax
|
||||
C1C01B roll $27, %eax
|
||||
C1C005 roll $5, %eax
|
||||
C1C81B rorl $27, %eax
|
||||
C1C805 rorl $5, %eax
|
||||
C1C00D roll $13, %eax
|
||||
C1C013 roll $19, %eax
|
||||
*/
|
||||
if (myeip[0] == 0xC1 && myeip[1] == 0xC0 && myeip[2] == 0x1D &&
|
||||
myeip[3] == 0xC1 && myeip[4] == 0xC0 && myeip[5] == 0x03 &&
|
||||
myeip[6] == 0xC1 && myeip[7] == 0xC0 && myeip[8] == 0x1B &&
|
||||
myeip[9] == 0xC1 && myeip[10] == 0xC0 && myeip[11] == 0x05) {
|
||||
vg_assert(VG_(clo_instrument));
|
||||
dis_ClientRequest(cb);
|
||||
eip += 12;
|
||||
if (myeip[ 0] == 0xC1 && myeip[ 1] == 0xC0 && myeip[ 2] == 0x1D &&
|
||||
myeip[ 3] == 0xC1 && myeip[ 4] == 0xC0 && myeip[ 5] == 0x03 &&
|
||||
myeip[ 6] == 0xC1 && myeip[ 7] == 0xC8 && myeip[ 8] == 0x1B &&
|
||||
myeip[ 9] == 0xC1 && myeip[10] == 0xC8 && myeip[11] == 0x05 &&
|
||||
myeip[12] == 0xC1 && myeip[13] == 0xC0 && myeip[14] == 0x0D &&
|
||||
myeip[15] == 0xC1 && myeip[16] == 0xC0 && myeip[17] == 0x13
|
||||
) {
|
||||
eip += 18;
|
||||
uInstr1(cb, JMP, 0, Literal, 0);
|
||||
uLiteral(cb, eip);
|
||||
uCond(cb, CondAlways);
|
||||
LAST_UINSTR(cb).jmpkind = JmpClientReq;
|
||||
*isEnd = True;
|
||||
if (dis)
|
||||
VG_(printf)("%%edx = client_request ( %%eax )\n");
|
||||
return eip;
|
||||
}
|
||||
}
|
||||
@ -2978,9 +2962,7 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd )
|
||||
uInstr2(cb, PUT, 4, TempReg, t1, ArchReg, R_ESP);
|
||||
uInstr1(cb, JMP, 0, TempReg, t2);
|
||||
uCond(cb, CondAlways);
|
||||
|
||||
if (d32 == 0)
|
||||
LAST_UINSTR(cb).ret_dispatch = True;
|
||||
LAST_UINSTR(cb).jmpkind = JmpRet;
|
||||
|
||||
*isEnd = True;
|
||||
if (dis) {
|
||||
@ -2992,22 +2974,6 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd )
|
||||
case 0xE8: /* CALL J4 */
|
||||
d32 = getUDisp32(eip); eip += 4;
|
||||
d32 += eip; /* eip now holds return-to addr, d32 is call-to addr */
|
||||
if (d32 == (Addr)&VG_(shutdown)) {
|
||||
/* Set vg_dispatch_ctr to 1, vg_interrupt_reason to VG_Y_EXIT,
|
||||
and get back to the dispatch loop. We ask for a jump to this
|
||||
CALL insn because vg_dispatch will ultimately transfer control
|
||||
to the real CPU, and we want this call to be the first insn
|
||||
it does. */
|
||||
uInstr0(cb, CALLM_S, 0);
|
||||
uInstr1(cb, CALLM, 0, Lit16, VGOFF_(helper_request_normal_exit));
|
||||
uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty);
|
||||
uInstr0(cb, CALLM_E, 0);
|
||||
uInstr1(cb, JMP, 0, Literal, 0);
|
||||
uLiteral(cb, eip-5);
|
||||
uCond(cb, CondAlways);
|
||||
*isEnd = True;
|
||||
if (dis) VG_(printf)("call 0x%x\n",d32);
|
||||
} else
|
||||
if (d32 == eip && getUChar(eip) >= 0x58
|
||||
&& getUChar(eip) <= 0x5F) {
|
||||
/* Specially treat the position-independent-code idiom
|
||||
@ -3040,7 +3006,7 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd )
|
||||
uInstr1(cb, JMP, 0, Literal, 0);
|
||||
uLiteral(cb, d32);
|
||||
uCond(cb, CondAlways);
|
||||
LAST_UINSTR(cb).call_dispatch = True;
|
||||
LAST_UINSTR(cb).jmpkind = JmpCall;
|
||||
*isEnd = True;
|
||||
if (dis) VG_(printf)("call 0x%x\n",d32);
|
||||
}
|
||||
@ -3179,14 +3145,10 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd )
|
||||
/* It's important that all ArchRegs carry their up-to-date value
|
||||
at this point. So we declare an end-of-block here, which
|
||||
forces any TempRegs caching ArchRegs to be flushed. */
|
||||
t1 = newTemp(cb);
|
||||
uInstr0(cb, CALLM_S, 0);
|
||||
uInstr1(cb, CALLM, 0, Lit16, VGOFF_(helper_do_syscall) );
|
||||
uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty);
|
||||
uInstr0(cb, CALLM_E, 0);
|
||||
uInstr1(cb, JMP, 0, Literal, 0);
|
||||
uLiteral(cb, eip);
|
||||
uCond(cb, CondAlways);
|
||||
LAST_UINSTR(cb).jmpkind = JmpSyscall;
|
||||
*isEnd = True;
|
||||
if (dis) VG_(printf)("int $0x80\n");
|
||||
break;
|
||||
|
||||
@ -153,8 +153,8 @@ void VG_(emptyUInstr) ( UInstr* u )
|
||||
u->val1 = u->val2 = u->val3 = 0;
|
||||
u->tag1 = u->tag2 = u->tag3 = NoValue;
|
||||
u->flags_r = u->flags_w = FlagsEmpty;
|
||||
u->call_dispatch = False;
|
||||
u->smc_check = u->signed_widen = u->ret_dispatch = False;
|
||||
u->jmpkind = JmpBoring;
|
||||
u->smc_check = u->signed_widen = False;
|
||||
u->lit32 = 0;
|
||||
u->opcode = 0;
|
||||
u->size = 0;
|
||||
@ -259,8 +259,7 @@ void copyAuxInfoFromTo ( UInstr* src, UInstr* dst )
|
||||
dst->extra4b = src->extra4b;
|
||||
dst->smc_check = src->smc_check;
|
||||
dst->signed_widen = src->signed_widen;
|
||||
dst->ret_dispatch = src->ret_dispatch;
|
||||
dst->call_dispatch = src->call_dispatch;
|
||||
dst->jmpkind = src->jmpkind;
|
||||
dst->flags_r = src->flags_r;
|
||||
dst->flags_w = src->flags_w;
|
||||
}
|
||||
@ -917,10 +916,15 @@ void VG_(ppUInstr) ( Int instrNo, UInstr* u )
|
||||
|
||||
case JMP: case CC2VAL:
|
||||
case PUSH: case POP: case CLEAR: case CALLM:
|
||||
if (u->opcode == JMP && u->ret_dispatch)
|
||||
VG_(printf)("-r");
|
||||
if (u->opcode == JMP && u->call_dispatch)
|
||||
VG_(printf)("-c");
|
||||
if (u->opcode == JMP) {
|
||||
switch (u->jmpkind) {
|
||||
case JmpCall: VG_(printf)("-c"); break;
|
||||
case JmpRet: VG_(printf)("-r"); break;
|
||||
case JmpSyscall: VG_(printf)("-sys"); break;
|
||||
case JmpClientReq: VG_(printf)("-cli"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
VG_(printf)("\t");
|
||||
ppUOperand(u, 1, u->size, False);
|
||||
break;
|
||||
|
||||
@ -533,9 +533,9 @@ void VG_(smc_check4) ( Addr a )
|
||||
|
||||
/* Force an exit before the next basic block, so the translation
|
||||
cache can be flushed appropriately. */
|
||||
VG_(dispatch_ctr_SAVED) = VG_(dispatch_ctr);
|
||||
VG_(dispatch_ctr) = 1;
|
||||
VG_(interrupt_reason) = VG_Y_SMC;
|
||||
// VG_(dispatch_ctr_SAVED) = VG_(dispatch_ctr);
|
||||
//VG_(dispatch_ctr) = 1;
|
||||
//VG_(interrupt_reason) = VG_Y_SMC;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so
|
||||
valgrinq_so_SOURCES = vg_valgrinq_dummy.c
|
||||
|
||||
valgrind_so_SOURCES = \
|
||||
vg_scheduler.c \
|
||||
vg_clientmalloc.c \
|
||||
vg_clientperms.c \
|
||||
vg_demangle.c \
|
||||
@ -70,10 +71,10 @@ noinst_HEADERS = \
|
||||
vg_unsafe.h
|
||||
|
||||
|
||||
vg_memory.o: vg_memory.c
|
||||
vg_memory.o: vg_memory.c $(noinst_HEADERS)
|
||||
$(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $<
|
||||
|
||||
vg_clientmalloc.o: vg_clientmalloc.c
|
||||
vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS)
|
||||
$(COMPILE) -fno-omit-frame-pointer -c $<
|
||||
|
||||
|
||||
|
||||
@ -48,19 +48,55 @@
|
||||
|
||||
/* This defines the magic code sequence which the JITter spots and
|
||||
handles magically. Don't look too closely at this; it will rot
|
||||
your brain.
|
||||
your brain. Valgrind dumps the result value in %EDX, so we first
|
||||
copy the default value there, so that it is returned when not
|
||||
running on Valgrind. Since %EAX points to a block of mem
|
||||
containing the args, you can pass as many args as you want like
|
||||
this. Currently this is set up to deal with 4 args since that's
|
||||
the max that we appear to need (pthread_create).
|
||||
*/
|
||||
#define VALGRIND_MAGIC_SEQUENCE(_zzq_res,_zzq_code,_zzq_addr,_zzq_len) \
|
||||
asm volatile("movl %1, %%eax\n\t" \
|
||||
"movl %2, %%ebx\n\t" \
|
||||
"movl %3, %%ecx\n\t" \
|
||||
"roll $29, %%eax ; roll $3, %%eax\n\t" \
|
||||
"roll $27, %%eax ; roll $5, %%eax\n\t" \
|
||||
"movl %%eax, %0\t" \
|
||||
: "=r" (_zzq_res) \
|
||||
: "r" (_zzq_code), "r" (_zzq_addr), "r" (_zzq_len) \
|
||||
: "eax", "ebx", "ecx", "cc", "memory" \
|
||||
);
|
||||
#define VALGRIND_MAGIC_SEQUENCE( \
|
||||
_zzq_rlval, /* result lvalue */ \
|
||||
_zzq_default, /* result returned when running on real CPU */ \
|
||||
_zzq_request, /* request code */ \
|
||||
_zzq_arg1, /* request first param */ \
|
||||
_zzq_arg2, /* request second param */ \
|
||||
_zzq_arg3, /* request third param */ \
|
||||
_zzq_arg4 /* request fourth param */ ) \
|
||||
\
|
||||
{ volatile unsigned int _zzq_args[5]; \
|
||||
_zzq_args[0] = (volatile unsigned int)_zzq_request; \
|
||||
_zzq_args[1] = (volatile unsigned int)_zzq_arg1; \
|
||||
_zzq_args[2] = (volatile unsigned int)_zzq_arg2; \
|
||||
_zzq_args[3] = (volatile unsigned int)_zzq_arg3; \
|
||||
_zzq_args[4] = (volatile unsigned int)_zzq_arg4; \
|
||||
asm volatile("movl %1, %%eax\n\t" \
|
||||
"movl %2, %%edx\n\t" \
|
||||
"roll $29, %%eax ; roll $3, %%eax\n\t" \
|
||||
"rorl $27, %%eax ; rorl $5, %%eax\n\t" \
|
||||
"roll $13, %%eax ; roll $19, %%eax\n\t" \
|
||||
"movl %%edx, %0\t" \
|
||||
: "=r" (_zzq_rlval) \
|
||||
: "r" (&_zzq_args[0]), "r" (_zzq_default) \
|
||||
: "eax", "edx", "cc", "memory" \
|
||||
); \
|
||||
}
|
||||
|
||||
|
||||
/* Some request codes. There are many more of these, but most are not
|
||||
exposed to end-user view. These are the public ones, all of the
|
||||
form 0x1000 + small_number.
|
||||
*/
|
||||
|
||||
#define VG_USERREQ__MAKE_NOACCESS 0x1001
|
||||
#define VG_USERREQ__MAKE_WRITABLE 0x1002
|
||||
#define VG_USERREQ__MAKE_READABLE 0x1003
|
||||
#define VG_USERREQ__DISCARD 0x1004
|
||||
#define VG_USERREQ__CHECK_WRITABLE 0x1005
|
||||
#define VG_USERREQ__CHECK_READABLE 0x1006
|
||||
#define VG_USERREQ__MAKE_NOACCESS_STACK 0x1007
|
||||
#define VG_USERREQ__RUNNING_ON_VALGRIND 0x1008
|
||||
#define VG_USERREQ__DO_LEAK_CHECK 0x1009 /* unimplemented */
|
||||
|
||||
|
||||
|
||||
@ -71,7 +107,9 @@
|
||||
descriptions Valgrind will use in subsequent error messages. */
|
||||
#define VALGRIND_MAKE_NOACCESS(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,1001,_qzz_addr,_qzz_len); \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
|
||||
VG_USERREQ__MAKE_NOACCESS, \
|
||||
_qzz_addr, _qzz_len, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
@ -79,7 +117,9 @@
|
||||
for _qzz_len bytes. */
|
||||
#define VALGRIND_MAKE_WRITABLE(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,1002,_qzz_addr,_qzz_len); \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
|
||||
VG_USERREQ__MAKE_WRITABLE, \
|
||||
_qzz_addr,_ qzz_len, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
@ -87,7 +127,9 @@
|
||||
for _qzz_len bytes. */
|
||||
#define VALGRIND_MAKE_READABLE(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,1003,_qzz_addr,_qzz_len); \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
|
||||
VG_USERREQ__MAKE_READABLE, \
|
||||
_qzz_addr, _qzz_len, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
@ -99,7 +141,9 @@
|
||||
handle. */
|
||||
#define VALGRIND_DISCARD(_qzz_blkindex) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,2004,0,_qzz_blkindex); \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
|
||||
VG_USERREQ__DISCARD, \
|
||||
0, _qzz_blkindex, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
@ -111,20 +155,24 @@
|
||||
If suitable addressibility is not established, Valgrind prints an
|
||||
error message and returns the address of the first offending byte.
|
||||
Otherwise it returns zero. */
|
||||
#define VALGRIND_CHECK_WRITABLE(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,2002,_qzz_addr,_qzz_len); \
|
||||
_qzz_res; \
|
||||
#define VALGRIND_CHECK_WRITABLE(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
|
||||
VG_USERREQ__CHECK_WRITABLE, \
|
||||
_qzz_addr, _qzz_len, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
/* Check that memory at _qzz_addr is addressible and defined for
|
||||
_qzz_len bytes. If suitable addressibility and definedness are not
|
||||
established, Valgrind prints an error message and returns the
|
||||
address of the first offending byte. Otherwise it returns zero. */
|
||||
#define VALGRIND_CHECK_READABLE(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,2003,_qzz_addr,_qzz_len); \
|
||||
_qzz_res; \
|
||||
#define VALGRIND_CHECK_READABLE(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
|
||||
VG_USERREQ__CHECK_READABLE, \
|
||||
_qzz_addr, _qzz_len, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
|
||||
@ -133,10 +181,10 @@
|
||||
are not established, Valgrind prints an error message and returns
|
||||
the address of the first offending byte. Otherwise it returns
|
||||
zero. */
|
||||
#define VALGRIND_CHECK_DEFINED(__lvalue) \
|
||||
(void) \
|
||||
VALGRIND_CHECK_READABLE( \
|
||||
(volatile unsigned char *)&(__lvalue), \
|
||||
#define VALGRIND_CHECK_DEFINED(__lvalue) \
|
||||
(void) \
|
||||
VALGRIND_CHECK_READABLE( \
|
||||
(volatile unsigned char *)&(__lvalue), \
|
||||
(unsigned int)(sizeof (__lvalue)))
|
||||
|
||||
|
||||
@ -146,11 +194,38 @@
|
||||
value. The record associated with this setting will be
|
||||
automatically removed by Valgrind when the containing routine
|
||||
exits. */
|
||||
#define VALGRIND_MAKE_NOACCESS_STACK(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,3001,_qzz_addr,_qzz_len); \
|
||||
_qzz_res; \
|
||||
#define VALGRIND_MAKE_NOACCESS_STACK(_qzz_addr,_qzz_len) \
|
||||
{unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
|
||||
VG_USERREQ__MAKE_NOACCESS_STACK, \
|
||||
_qzz_addr, _qzz_len, 0, 0); \
|
||||
}
|
||||
|
||||
|
||||
/* Returns 1 if running on Valgrind, 0 if running on the real CPU.
|
||||
Currently implemented but untested. */
|
||||
#define RUNNING_ON_VALGRIND \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* returned if not */, \
|
||||
VG_USERREQ__RUNNING_ON_VALGRIND, \
|
||||
0, 0, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
|
||||
/* Mark memory, intended to be on the client's stack, at _qzz_addr as
|
||||
unaddressible and undefined for _qzz_len bytes. Does not return a
|
||||
value. The record associated with this setting will be
|
||||
automatically removed by Valgrind when the containing routine
|
||||
exits.
|
||||
|
||||
Currently implemented but untested.
|
||||
*/
|
||||
#define VALGRIND_DO_LEAK_CHECK \
|
||||
{unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
|
||||
VG_USERREQ__DO_LEAK_CHECK, \
|
||||
0, 0, 0, 0); \
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so
|
||||
valgrinq_so_SOURCES = vg_valgrinq_dummy.c
|
||||
|
||||
valgrind_so_SOURCES = \
|
||||
vg_scheduler.c \
|
||||
vg_clientmalloc.c \
|
||||
vg_clientperms.c \
|
||||
vg_demangle.c \
|
||||
@ -70,10 +71,10 @@ noinst_HEADERS = \
|
||||
vg_unsafe.h
|
||||
|
||||
|
||||
vg_memory.o: vg_memory.c
|
||||
vg_memory.o: vg_memory.c $(noinst_HEADERS)
|
||||
$(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $<
|
||||
|
||||
vg_clientmalloc.o: vg_clientmalloc.c
|
||||
vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS)
|
||||
$(COMPILE) -fno-omit-frame-pointer -c $<
|
||||
|
||||
|
||||
|
||||
@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so
|
||||
valgrinq_so_SOURCES = vg_valgrinq_dummy.c
|
||||
|
||||
valgrind_so_SOURCES = \
|
||||
vg_scheduler.c \
|
||||
vg_clientmalloc.c \
|
||||
vg_clientperms.c \
|
||||
vg_demangle.c \
|
||||
@ -70,10 +71,10 @@ noinst_HEADERS = \
|
||||
vg_unsafe.h
|
||||
|
||||
|
||||
vg_memory.o: vg_memory.c
|
||||
vg_memory.o: vg_memory.c $(noinst_HEADERS)
|
||||
$(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $<
|
||||
|
||||
vg_clientmalloc.o: vg_clientmalloc.c
|
||||
vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS)
|
||||
$(COMPILE) -fno-omit-frame-pointer -c $<
|
||||
|
||||
|
||||
|
||||
@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so
|
||||
valgrinq_so_SOURCES = vg_valgrinq_dummy.c
|
||||
|
||||
valgrind_so_SOURCES = \
|
||||
vg_scheduler.c \
|
||||
vg_clientmalloc.c \
|
||||
vg_clientperms.c \
|
||||
vg_demangle.c \
|
||||
@ -70,10 +71,10 @@ noinst_HEADERS = \
|
||||
vg_unsafe.h
|
||||
|
||||
|
||||
vg_memory.o: vg_memory.c
|
||||
vg_memory.o: vg_memory.c $(noinst_HEADERS)
|
||||
$(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $<
|
||||
|
||||
vg_clientmalloc.o: vg_clientmalloc.c
|
||||
vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS)
|
||||
$(COMPILE) -fno-omit-frame-pointer -c $<
|
||||
|
||||
|
||||
|
||||
@ -10,7 +10,7 @@ void the_sighandler ( int signo )
|
||||
{
|
||||
int nw;
|
||||
// assert(signo == SIGUSR1);
|
||||
// printf("sighandler running; should unblock now\n");
|
||||
printf("sighandler running; should unblock now\n");
|
||||
nw = write(fds[1], "zzz", 1);
|
||||
// assert(nw == 1);
|
||||
}
|
||||
|
||||
139
valgrind.h
139
valgrind.h
@ -48,19 +48,55 @@
|
||||
|
||||
/* This defines the magic code sequence which the JITter spots and
|
||||
handles magically. Don't look too closely at this; it will rot
|
||||
your brain.
|
||||
your brain. Valgrind dumps the result value in %EDX, so we first
|
||||
copy the default value there, so that it is returned when not
|
||||
running on Valgrind. Since %EAX points to a block of mem
|
||||
containing the args, you can pass as many args as you want like
|
||||
this. Currently this is set up to deal with 4 args since that's
|
||||
the max that we appear to need (pthread_create).
|
||||
*/
|
||||
#define VALGRIND_MAGIC_SEQUENCE(_zzq_res,_zzq_code,_zzq_addr,_zzq_len) \
|
||||
asm volatile("movl %1, %%eax\n\t" \
|
||||
"movl %2, %%ebx\n\t" \
|
||||
"movl %3, %%ecx\n\t" \
|
||||
"roll $29, %%eax ; roll $3, %%eax\n\t" \
|
||||
"roll $27, %%eax ; roll $5, %%eax\n\t" \
|
||||
"movl %%eax, %0\t" \
|
||||
: "=r" (_zzq_res) \
|
||||
: "r" (_zzq_code), "r" (_zzq_addr), "r" (_zzq_len) \
|
||||
: "eax", "ebx", "ecx", "cc", "memory" \
|
||||
);
|
||||
#define VALGRIND_MAGIC_SEQUENCE( \
|
||||
_zzq_rlval, /* result lvalue */ \
|
||||
_zzq_default, /* result returned when running on real CPU */ \
|
||||
_zzq_request, /* request code */ \
|
||||
_zzq_arg1, /* request first param */ \
|
||||
_zzq_arg2, /* request second param */ \
|
||||
_zzq_arg3, /* request third param */ \
|
||||
_zzq_arg4 /* request fourth param */ ) \
|
||||
\
|
||||
{ volatile unsigned int _zzq_args[5]; \
|
||||
_zzq_args[0] = (volatile unsigned int)_zzq_request; \
|
||||
_zzq_args[1] = (volatile unsigned int)_zzq_arg1; \
|
||||
_zzq_args[2] = (volatile unsigned int)_zzq_arg2; \
|
||||
_zzq_args[3] = (volatile unsigned int)_zzq_arg3; \
|
||||
_zzq_args[4] = (volatile unsigned int)_zzq_arg4; \
|
||||
asm volatile("movl %1, %%eax\n\t" \
|
||||
"movl %2, %%edx\n\t" \
|
||||
"roll $29, %%eax ; roll $3, %%eax\n\t" \
|
||||
"rorl $27, %%eax ; rorl $5, %%eax\n\t" \
|
||||
"roll $13, %%eax ; roll $19, %%eax\n\t" \
|
||||
"movl %%edx, %0\t" \
|
||||
: "=r" (_zzq_rlval) \
|
||||
: "r" (&_zzq_args[0]), "r" (_zzq_default) \
|
||||
: "eax", "edx", "cc", "memory" \
|
||||
); \
|
||||
}
|
||||
|
||||
|
||||
/* Some request codes. There are many more of these, but most are not
|
||||
exposed to end-user view. These are the public ones, all of the
|
||||
form 0x1000 + small_number.
|
||||
*/
|
||||
|
||||
#define VG_USERREQ__MAKE_NOACCESS 0x1001
|
||||
#define VG_USERREQ__MAKE_WRITABLE 0x1002
|
||||
#define VG_USERREQ__MAKE_READABLE 0x1003
|
||||
#define VG_USERREQ__DISCARD 0x1004
|
||||
#define VG_USERREQ__CHECK_WRITABLE 0x1005
|
||||
#define VG_USERREQ__CHECK_READABLE 0x1006
|
||||
#define VG_USERREQ__MAKE_NOACCESS_STACK 0x1007
|
||||
#define VG_USERREQ__RUNNING_ON_VALGRIND 0x1008
|
||||
#define VG_USERREQ__DO_LEAK_CHECK 0x1009 /* unimplemented */
|
||||
|
||||
|
||||
|
||||
@ -71,7 +107,9 @@
|
||||
descriptions Valgrind will use in subsequent error messages. */
|
||||
#define VALGRIND_MAKE_NOACCESS(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,1001,_qzz_addr,_qzz_len); \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
|
||||
VG_USERREQ__MAKE_NOACCESS, \
|
||||
_qzz_addr, _qzz_len, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
@ -79,7 +117,9 @@
|
||||
for _qzz_len bytes. */
|
||||
#define VALGRIND_MAKE_WRITABLE(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,1002,_qzz_addr,_qzz_len); \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
|
||||
VG_USERREQ__MAKE_WRITABLE, \
|
||||
_qzz_addr,_ qzz_len, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
@ -87,7 +127,9 @@
|
||||
for _qzz_len bytes. */
|
||||
#define VALGRIND_MAKE_READABLE(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,1003,_qzz_addr,_qzz_len); \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
|
||||
VG_USERREQ__MAKE_READABLE, \
|
||||
_qzz_addr, _qzz_len, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
@ -99,7 +141,9 @@
|
||||
handle. */
|
||||
#define VALGRIND_DISCARD(_qzz_blkindex) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,2004,0,_qzz_blkindex); \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
|
||||
VG_USERREQ__DISCARD, \
|
||||
0, _qzz_blkindex, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
@ -111,20 +155,24 @@
|
||||
If suitable addressibility is not established, Valgrind prints an
|
||||
error message and returns the address of the first offending byte.
|
||||
Otherwise it returns zero. */
|
||||
#define VALGRIND_CHECK_WRITABLE(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,2002,_qzz_addr,_qzz_len); \
|
||||
_qzz_res; \
|
||||
#define VALGRIND_CHECK_WRITABLE(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
|
||||
VG_USERREQ__CHECK_WRITABLE, \
|
||||
_qzz_addr, _qzz_len, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
/* Check that memory at _qzz_addr is addressible and defined for
|
||||
_qzz_len bytes. If suitable addressibility and definedness are not
|
||||
established, Valgrind prints an error message and returns the
|
||||
address of the first offending byte. Otherwise it returns zero. */
|
||||
#define VALGRIND_CHECK_READABLE(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,2003,_qzz_addr,_qzz_len); \
|
||||
_qzz_res; \
|
||||
#define VALGRIND_CHECK_READABLE(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
|
||||
VG_USERREQ__CHECK_READABLE, \
|
||||
_qzz_addr, _qzz_len, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
|
||||
@ -133,10 +181,10 @@
|
||||
are not established, Valgrind prints an error message and returns
|
||||
the address of the first offending byte. Otherwise it returns
|
||||
zero. */
|
||||
#define VALGRIND_CHECK_DEFINED(__lvalue) \
|
||||
(void) \
|
||||
VALGRIND_CHECK_READABLE( \
|
||||
(volatile unsigned char *)&(__lvalue), \
|
||||
#define VALGRIND_CHECK_DEFINED(__lvalue) \
|
||||
(void) \
|
||||
VALGRIND_CHECK_READABLE( \
|
||||
(volatile unsigned char *)&(__lvalue), \
|
||||
(unsigned int)(sizeof (__lvalue)))
|
||||
|
||||
|
||||
@ -146,11 +194,38 @@
|
||||
value. The record associated with this setting will be
|
||||
automatically removed by Valgrind when the containing routine
|
||||
exits. */
|
||||
#define VALGRIND_MAKE_NOACCESS_STACK(_qzz_addr,_qzz_len) \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res,3001,_qzz_addr,_qzz_len); \
|
||||
_qzz_res; \
|
||||
#define VALGRIND_MAKE_NOACCESS_STACK(_qzz_addr,_qzz_len) \
|
||||
{unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
|
||||
VG_USERREQ__MAKE_NOACCESS_STACK, \
|
||||
_qzz_addr, _qzz_len, 0, 0); \
|
||||
}
|
||||
|
||||
|
||||
/* Returns 1 if running on Valgrind, 0 if running on the real CPU.
|
||||
Currently implemented but untested. */
|
||||
#define RUNNING_ON_VALGRIND \
|
||||
({unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* returned if not */, \
|
||||
VG_USERREQ__RUNNING_ON_VALGRIND, \
|
||||
0, 0, 0, 0); \
|
||||
_qzz_res; \
|
||||
})
|
||||
|
||||
|
||||
/* Mark memory, intended to be on the client's stack, at _qzz_addr as
|
||||
unaddressible and undefined for _qzz_len bytes. Does not return a
|
||||
value. The record associated with this setting will be
|
||||
automatically removed by Valgrind when the containing routine
|
||||
exits.
|
||||
|
||||
Currently implemented but untested.
|
||||
*/
|
||||
#define VALGRIND_DO_LEAK_CHECK \
|
||||
{unsigned int _qzz_res; \
|
||||
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
|
||||
VG_USERREQ__DO_LEAK_CHECK, \
|
||||
0, 0, 0, 0); \
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@ -164,8 +164,10 @@ fi
|
||||
|
||||
VG_ARGS="$VALGRIND_OPTS $vgsupp $vgopts"
|
||||
export VG_ARGS
|
||||
LD_PRELOAD=$VALGRIND/valgrind.so:$LD_PRELOAD
|
||||
LD_LIBRARY_PATH=$VALGRIND:$LD_LIBRARY_PATH
|
||||
export LD_LIBRARY_PATH
|
||||
LD_PRELOAD=valgrind.so:$LD_PRELOAD
|
||||
export LD_PRELOAD
|
||||
#LD_DEBUG=files
|
||||
#export LD_DEBUG
|
||||
exec $argopts
|
||||
|
||||
|
||||
|
||||
@ -250,10 +250,9 @@ static ShadowChunk* client_malloc_shadow ( UInt align, UInt size,
|
||||
/* Allocate memory, noticing whether or not we are doing the full
|
||||
instrumentation thing. */
|
||||
|
||||
void* VG_(client_malloc) ( UInt size, UInt raw_alloc_kind )
|
||||
void* VG_(client_malloc) ( UInt size, VgAllocKind kind )
|
||||
{
|
||||
ShadowChunk* sc;
|
||||
VgAllocKind kind;
|
||||
|
||||
VGP_PUSHCC(VgpCliMalloc);
|
||||
client_malloc_init();
|
||||
@ -263,21 +262,15 @@ void* VG_(client_malloc) ( UInt size, UInt raw_alloc_kind )
|
||||
count_freelist(), vg_freed_list_volume,
|
||||
size, raw_alloc_kind );
|
||||
# endif
|
||||
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += size;
|
||||
|
||||
if (!VG_(clo_instrument)) {
|
||||
VGP_POPCC;
|
||||
return VG_(malloc) ( VG_AR_CLIENT, size );
|
||||
}
|
||||
switch (raw_alloc_kind) {
|
||||
case 0x4002: kind = Vg_AllocNewVec; break;
|
||||
case 0x4001: kind = Vg_AllocNew; break;
|
||||
case 0x4000: /* malloc */
|
||||
case 6666: /* calloc */
|
||||
kind = Vg_AllocMalloc; break;
|
||||
default: /* should not happen */
|
||||
/* therefore we make sure it doesn't -- JRS */
|
||||
VG_(panic)("VG_(client_malloc): raw_alloc_kind");
|
||||
break; /*NOTREACHED*/
|
||||
}
|
||||
|
||||
sc = client_malloc_shadow ( 0, size, kind );
|
||||
VGP_POPCC;
|
||||
return (void*)(sc->data);
|
||||
@ -295,6 +288,10 @@ void* VG_(client_memalign) ( UInt align, UInt size )
|
||||
count_freelist(), vg_freed_list_volume,
|
||||
align, size );
|
||||
# endif
|
||||
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += size;
|
||||
|
||||
if (!VG_(clo_instrument)) {
|
||||
VGP_POPCC;
|
||||
return VG_(malloc_aligned) ( VG_AR_CLIENT, align, size );
|
||||
@ -305,11 +302,10 @@ void* VG_(client_memalign) ( UInt align, UInt size )
|
||||
}
|
||||
|
||||
|
||||
void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind )
|
||||
void VG_(client_free) ( void* ptrV, VgAllocKind kind )
|
||||
{
|
||||
ShadowChunk* sc;
|
||||
UInt ml_no;
|
||||
VgAllocKind kind;
|
||||
|
||||
VGP_PUSHCC(VgpCliMalloc);
|
||||
client_malloc_init();
|
||||
@ -319,6 +315,9 @@ void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind )
|
||||
count_freelist(), vg_freed_list_volume,
|
||||
ptrV, raw_alloc_kind );
|
||||
# endif
|
||||
|
||||
vg_cmalloc_n_frees ++;
|
||||
|
||||
if (!VG_(clo_instrument)) {
|
||||
VGP_POPCC;
|
||||
VG_(free) ( VG_AR_CLIENT, ptrV );
|
||||
@ -340,16 +339,6 @@ void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind )
|
||||
return;
|
||||
}
|
||||
|
||||
switch (raw_alloc_kind) {
|
||||
case 0x5002: kind = Vg_AllocNewVec; break;
|
||||
case 0x5001: kind = Vg_AllocNew; break;
|
||||
case 0x5000:
|
||||
default:
|
||||
kind = Vg_AllocMalloc;
|
||||
/* should only happen if bug in client code */
|
||||
break;
|
||||
}
|
||||
|
||||
/* check if its a matching free() / delete / delete [] */
|
||||
if (kind != sc->allockind)
|
||||
VG_(record_freemismatch_error) ( (Addr) ptrV );
|
||||
@ -386,6 +375,9 @@ void* VG_(client_calloc) ( UInt nmemb, UInt size1 )
|
||||
nmemb, size1 );
|
||||
# endif
|
||||
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += nmemb * size1;
|
||||
|
||||
if (!VG_(clo_instrument)) {
|
||||
VGP_POPCC;
|
||||
return VG_(calloc) ( VG_AR_CLIENT, nmemb, size1 );
|
||||
@ -430,6 +422,10 @@ void* VG_(client_realloc) ( void* ptrV, UInt size_new )
|
||||
ptrV, size_new );
|
||||
# endif
|
||||
|
||||
vg_cmalloc_n_frees ++;
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += size_new;
|
||||
|
||||
if (!VG_(clo_instrument)) {
|
||||
vg_assert(ptrV != NULL && size_new != 0);
|
||||
VGP_POPCC;
|
||||
@ -573,364 +569,6 @@ void VG_(describe_addr) ( Addr a, AddrInfo* ai )
|
||||
return;
|
||||
}
|
||||
|
||||
/*------------------------------------------------------------*/
|
||||
/*--- Replace the C library versions with our own. Hairy. ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
|
||||
/* Below are new versions of malloc, __builtin_new, free,
|
||||
__builtin_delete, calloc and realloc.
|
||||
|
||||
malloc, __builtin_new, free, __builtin_delete, calloc and realloc
|
||||
can be entered either on the real CPU or the simulated one. If on
|
||||
the real one, this is because the dynamic linker is running the
|
||||
static initialisers for C++, before starting up Valgrind itself.
|
||||
In this case it is safe to route calls through to
|
||||
VG_(malloc)/vg_free, since that is self-initialising.
|
||||
|
||||
Once Valgrind is initialised, vg_running_on_simd_CPU becomes True.
|
||||
The call needs to be transferred from the simulated CPU back to the
|
||||
real one and routed to the vg_client_* functions. To do that, the
|
||||
args are passed to vg_trap_here, which the simulator detects. The
|
||||
bogus epilogue fn call is to guarantee that gcc doesn't tailcall
|
||||
vg_trap_here, since that would cause the simulator's detection to
|
||||
fail -- it only checks the targets of call transfers, not jumps.
|
||||
And of course we have to be sure gcc won't inline either the
|
||||
vg_trap_here or vg_bogus_epilogue. Ha ha ha. What a mess.
|
||||
*/
|
||||
|
||||
/* Place afterwards to guarantee it won't get inlined ... */
|
||||
static UInt vg_trap_here_WRAPPER ( UInt arg1, UInt arg2, UInt what_to_do );
|
||||
static void vg_bogus_epilogue ( void );
|
||||
|
||||
/* ALL calls to malloc wind up here. */
|
||||
void* malloc ( UInt n )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("malloc[simd=%d](%d)",
|
||||
(UInt)VG_(running_on_simd_CPU), n );
|
||||
|
||||
if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; }
|
||||
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += n;
|
||||
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4000 );
|
||||
vg_bogus_epilogue();
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
} else {
|
||||
void* v = VG_(malloc)(VG_AR_CLIENT, n);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
}
|
||||
}
|
||||
|
||||
void* __builtin_new ( UInt n )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("__builtin_new[simd=%d](%d)",
|
||||
(UInt)VG_(running_on_simd_CPU), n );
|
||||
|
||||
if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; }
|
||||
|
||||
vg_cmalloc_n_mallocs++;
|
||||
vg_cmalloc_bs_mallocd += n;
|
||||
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4001 );
|
||||
vg_bogus_epilogue();
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
} else {
|
||||
void* v = VG_(malloc)(VG_AR_CLIENT, n);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return v;
|
||||
}
|
||||
}
|
||||
|
||||
void* __builtin_vec_new ( Int n )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("__builtin_vec_new[simd=%d](%d)",
|
||||
(UInt)VG_(running_on_simd_CPU), n );
|
||||
|
||||
if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; }
|
||||
|
||||
vg_cmalloc_n_mallocs++;
|
||||
vg_cmalloc_bs_mallocd += n;
|
||||
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4002 );
|
||||
vg_bogus_epilogue();
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
} else {
|
||||
void* v = VG_(malloc)(VG_AR_CLIENT, n);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return v;
|
||||
}
|
||||
}
|
||||
|
||||
void free ( void* p )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("free[simd=%d](%p)\n",
|
||||
(UInt)VG_(running_on_simd_CPU), p );
|
||||
vg_cmalloc_n_frees ++;
|
||||
|
||||
if (p == NULL)
|
||||
return;
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
(void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5000 );
|
||||
vg_bogus_epilogue();
|
||||
} else {
|
||||
VG_(free)(VG_AR_CLIENT, p);
|
||||
}
|
||||
}
|
||||
|
||||
void __builtin_delete ( void* p )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("__builtin_delete[simd=%d](%p)\n",
|
||||
(UInt)VG_(running_on_simd_CPU), p );
|
||||
vg_cmalloc_n_frees ++;
|
||||
|
||||
if (p == NULL)
|
||||
return;
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
(void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5001 );
|
||||
vg_bogus_epilogue();
|
||||
} else {
|
||||
VG_(free)(VG_AR_CLIENT, p);
|
||||
}
|
||||
}
|
||||
|
||||
void __builtin_vec_delete ( void* p )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("__builtin_vec_delete[simd=%d](%p)\n",
|
||||
(UInt)VG_(running_on_simd_CPU), p );
|
||||
vg_cmalloc_n_frees ++;
|
||||
|
||||
if (p == NULL)
|
||||
return;
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
(void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5002 );
|
||||
vg_bogus_epilogue();
|
||||
} else {
|
||||
VG_(free)(VG_AR_CLIENT, p);
|
||||
}
|
||||
}
|
||||
|
||||
void* calloc ( UInt nmemb, UInt size )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("calloc[simd=%d](%d,%d)",
|
||||
(UInt)VG_(running_on_simd_CPU), nmemb, size );
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += size * nmemb;
|
||||
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
UInt v = vg_trap_here_WRAPPER ( nmemb, size, 6666 );
|
||||
vg_bogus_epilogue();
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
} else {
|
||||
void* v = VG_(calloc)(VG_AR_CLIENT, nmemb, size);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return v;
|
||||
}
|
||||
}
|
||||
|
||||
void* realloc ( void* ptrV, UInt new_size )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("realloc[simd=%d](%p,%d)",
|
||||
(UInt)VG_(running_on_simd_CPU), ptrV, new_size );
|
||||
|
||||
if (VG_(clo_sloppy_malloc))
|
||||
{ while ((new_size % 4) > 0) new_size++; }
|
||||
|
||||
vg_cmalloc_n_frees ++;
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += new_size;
|
||||
|
||||
if (ptrV == NULL)
|
||||
return malloc(new_size);
|
||||
if (new_size == 0) {
|
||||
free(ptrV);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = 0\n" );
|
||||
return NULL;
|
||||
}
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
UInt v = vg_trap_here_WRAPPER ( (UInt)ptrV, new_size, 7777 );
|
||||
vg_bogus_epilogue();
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
} else {
|
||||
void* v = VG_(realloc)(VG_AR_CLIENT, ptrV, new_size);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return v;
|
||||
}
|
||||
}
|
||||
|
||||
void* memalign ( Int alignment, Int n )
|
||||
{
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)("memalign[simd=%d](al %d, size %d)",
|
||||
(UInt)VG_(running_on_simd_CPU), alignment, n );
|
||||
|
||||
if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; }
|
||||
|
||||
vg_cmalloc_n_mallocs ++;
|
||||
vg_cmalloc_bs_mallocd += n;
|
||||
|
||||
if (VG_(running_on_simd_CPU)) {
|
||||
UInt v = vg_trap_here_WRAPPER ( alignment, n, 8888 );
|
||||
vg_bogus_epilogue();
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
} else {
|
||||
void* v = VG_(malloc_aligned)(VG_AR_CLIENT, alignment, n);
|
||||
if (VG_(clo_trace_malloc))
|
||||
VG_(printf)(" = %p\n", v );
|
||||
return (void*)v;
|
||||
}
|
||||
}
|
||||
|
||||
void* valloc ( Int size )
|
||||
{
|
||||
return memalign(VKI_BYTES_PER_PAGE, size);
|
||||
}
|
||||
|
||||
|
||||
/* Various compatibility wrapper functions, for glibc and libstdc++. */
|
||||
void cfree ( void* p )
|
||||
{
|
||||
free ( p );
|
||||
}
|
||||
|
||||
void* mallinfo ( void )
|
||||
{
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"Warning: incorrectly-handled call to mallinfo()");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int mallopt ( int cmd, int value )
|
||||
{
|
||||
/* In glibc-2.2.4, 1 denoted a successful return value for mallopt */
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/* Bomb out if we get any of these. */
|
||||
void pvalloc ( void )
|
||||
{ VG_(panic)("call to pvalloc\n"); }
|
||||
|
||||
void malloc_stats ( void )
|
||||
{ VG_(panic)("call to malloc_stats\n"); }
|
||||
void malloc_usable_size ( void )
|
||||
{ VG_(panic)("call to malloc_usable_size\n"); }
|
||||
void malloc_trim ( void )
|
||||
{ VG_(panic)("call to malloc_trim\n"); }
|
||||
void malloc_get_state ( void )
|
||||
{ VG_(panic)("call to malloc_get_state\n"); }
|
||||
void malloc_set_state ( void )
|
||||
{ VG_(panic)("call to malloc_set_state\n"); }
|
||||
|
||||
|
||||
int __posix_memalign ( void **memptr, UInt alignment, UInt size )
|
||||
{
|
||||
void *mem;
|
||||
|
||||
/* Test whether the SIZE argument is valid. It must be a power of
|
||||
two multiple of sizeof (void *). */
|
||||
if (size % sizeof (void *) != 0 || (size & (size - 1)) != 0)
|
||||
return 22 /*EINVAL*/;
|
||||
|
||||
mem = memalign (alignment, size);
|
||||
|
||||
if (mem != NULL) {
|
||||
*memptr = mem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 12 /*ENOMEM*/;
|
||||
}
|
||||
|
||||
|
||||
/*------------------------------------------------------------*/
|
||||
/*--- Magic supporting hacks. ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
|
||||
extern UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do );
|
||||
|
||||
static
|
||||
UInt vg_trap_here_WRAPPER ( UInt arg1, UInt arg2, UInt what_to_do )
|
||||
{
|
||||
/* The point of this idiocy is to make a plain, ordinary call to
|
||||
vg_trap_here which vg_dispatch_when_CALL can spot. Left to
|
||||
itself, with -fpic, gcc generates "call vg_trap_here@PLT" which
|
||||
doesn't get spotted, for whatever reason. I guess I could check
|
||||
_all_ control flow transfers, but that would be an undesirable
|
||||
performance overhead.
|
||||
|
||||
If you compile without -fpic, gcc generates the obvious call
|
||||
insn, so the wrappers below will work if they just call
|
||||
vg_trap_here. But I don't want to rule out building with -fpic,
|
||||
hence this hack. Sigh.
|
||||
*/
|
||||
UInt v;
|
||||
|
||||
# define WHERE_TO VG_(trap_here)
|
||||
# define STRINGIFY(xx) __STRING(xx)
|
||||
|
||||
asm("# call to vg_trap_here\n"
|
||||
"\t pushl %3\n"
|
||||
"\t pushl %2\n"
|
||||
"\t pushl %1\n"
|
||||
"\t call " STRINGIFY(WHERE_TO) "\n"
|
||||
"\t addl $12, %%esp\n"
|
||||
"\t movl %%eax, %0\n"
|
||||
: "=r" (v)
|
||||
: "r" (arg1), "r" (arg2), "r" (what_to_do)
|
||||
: "eax", "esp", "cc", "memory");
|
||||
return v;
|
||||
|
||||
# undef WHERE_TO
|
||||
# undef STRINGIFY
|
||||
}
|
||||
|
||||
/* Last, but not least ... */
|
||||
void vg_bogus_epilogue ( void )
|
||||
{
|
||||
/* Runs on simulated CPU only. */
|
||||
}
|
||||
|
||||
UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do )
|
||||
{
|
||||
/* Calls to this fn are detected in vg_dispatch.S and are handled
|
||||
specially. So this fn should never be entered. */
|
||||
VG_(panic)("vg_trap_here called!");
|
||||
return 0; /*NOTREACHED*/
|
||||
}
|
||||
|
||||
|
||||
/*--------------------------------------------------------------------*/
|
||||
/*--- end vg_clientmalloc.c ---*/
|
||||
|
||||
@ -33,6 +33,8 @@
|
||||
#include "vg_include.h"
|
||||
#include "vg_constants.h"
|
||||
|
||||
#include "valgrind.h" /* for VG_USERREQ__* */
|
||||
|
||||
|
||||
/*------------------------------------------------------------*/
|
||||
/*--- General client block management. ---*/
|
||||
@ -287,73 +289,81 @@ void VG_(delete_client_stack_blocks_following_ESP_change) ( void )
|
||||
}
|
||||
|
||||
|
||||
UInt VG_(handle_client_request) ( UInt code, Addr aa, UInt nn )
|
||||
UInt VG_(handle_client_request) ( UInt* arg_block )
|
||||
{
|
||||
Int i;
|
||||
Bool ok;
|
||||
Addr bad_addr;
|
||||
Int i;
|
||||
Bool ok;
|
||||
Addr bad_addr;
|
||||
UInt* arg = arg_block;
|
||||
|
||||
if (VG_(clo_verbosity) > 2)
|
||||
VG_(printf)("client request: code %d, addr %p, len %d\n",
|
||||
code, aa, nn );
|
||||
arg[0], arg[1], arg[2] );
|
||||
|
||||
vg_assert(VG_(clo_client_perms));
|
||||
vg_assert(VG_(clo_instrument));
|
||||
|
||||
switch (code) {
|
||||
case 1001: /* make no access */
|
||||
switch (arg[0]) {
|
||||
case VG_USERREQ__MAKE_NOACCESS: /* make no access */
|
||||
i = vg_alloc_client_block();
|
||||
/* VG_(printf)("allocated %d %p\n", i, vg_cgbs); */
|
||||
vg_cgbs[i].kind = CG_NoAccess;
|
||||
vg_cgbs[i].start = aa;
|
||||
vg_cgbs[i].size = nn;
|
||||
vg_cgbs[i].start = arg[1];
|
||||
vg_cgbs[i].size = arg[2];
|
||||
vg_cgbs[i].where = VG_(get_ExeContext) ( False );
|
||||
VGM_(make_noaccess) ( aa, nn );
|
||||
VGM_(make_noaccess) ( arg[1], arg[2] );
|
||||
return i;
|
||||
case 1002: /* make writable */
|
||||
case VG_USERREQ__MAKE_WRITABLE: /* make writable */
|
||||
i = vg_alloc_client_block();
|
||||
vg_cgbs[i].kind = CG_Writable;
|
||||
vg_cgbs[i].start = aa;
|
||||
vg_cgbs[i].size = nn;
|
||||
vg_cgbs[i].start = arg[1];
|
||||
vg_cgbs[i].size = arg[2];
|
||||
vg_cgbs[i].where = VG_(get_ExeContext) ( False );
|
||||
VGM_(make_writable) ( aa, nn );
|
||||
VGM_(make_writable) ( arg[1], arg[2] );
|
||||
return i;
|
||||
case 1003: /* make readable */
|
||||
case VG_USERREQ__MAKE_READABLE: /* make readable */
|
||||
i = vg_alloc_client_block();
|
||||
vg_cgbs[i].kind = CG_Readable;
|
||||
vg_cgbs[i].start = aa;
|
||||
vg_cgbs[i].size = nn;
|
||||
vg_cgbs[i].start = arg[1];
|
||||
vg_cgbs[i].size = arg[2];
|
||||
vg_cgbs[i].where = VG_(get_ExeContext) ( False );
|
||||
VGM_(make_readable) ( aa, nn );
|
||||
VGM_(make_readable) ( arg[1], arg[2] );
|
||||
return i;
|
||||
|
||||
case 2002: /* check writable */
|
||||
ok = VGM_(check_writable) ( aa, nn, &bad_addr );
|
||||
case VG_USERREQ__CHECK_WRITABLE: /* check writable */
|
||||
ok = VGM_(check_writable) ( arg[1], arg[2], &bad_addr );
|
||||
if (!ok)
|
||||
VG_(record_user_err) ( bad_addr, True );
|
||||
return ok ? (UInt)NULL : bad_addr;
|
||||
case 2003: /* check readable */
|
||||
ok = VGM_(check_readable) ( aa, nn, &bad_addr );
|
||||
case VG_USERREQ__CHECK_READABLE: /* check readable */
|
||||
ok = VGM_(check_readable) ( arg[1], arg[2], &bad_addr );
|
||||
if (!ok)
|
||||
VG_(record_user_err) ( bad_addr, False );
|
||||
return ok ? (UInt)NULL : bad_addr;
|
||||
|
||||
case 2004: /* discard */
|
||||
case VG_USERREQ__DISCARD: /* discard */
|
||||
if (vg_cgbs == NULL
|
||||
|| nn >= vg_cgb_used || vg_cgbs[nn].kind == CG_NotInUse)
|
||||
|| arg[2] >= vg_cgb_used || vg_cgbs[arg[2]].kind == CG_NotInUse)
|
||||
return 1;
|
||||
vg_assert(nn >= 0 && nn < vg_cgb_used);
|
||||
vg_cgbs[nn].kind = CG_NotInUse;
|
||||
vg_assert(arg[2] >= 0 && arg[2] < vg_cgb_used);
|
||||
vg_cgbs[arg[2]].kind = CG_NotInUse;
|
||||
vg_cgb_discards++;
|
||||
return 0;
|
||||
|
||||
case 3001: /* make noaccess stack block */
|
||||
vg_add_client_stack_block ( aa, nn );
|
||||
case VG_USERREQ__MAKE_NOACCESS_STACK: /* make noaccess stack block */
|
||||
vg_add_client_stack_block ( arg[1], arg[2] );
|
||||
return 0;
|
||||
|
||||
case VG_USERREQ__RUNNING_ON_VALGRIND:
|
||||
return 1;
|
||||
|
||||
case VG_USERREQ__DO_LEAK_CHECK:
|
||||
VG_(detect_memory_leaks)();
|
||||
return 0; /* return value is meaningless */
|
||||
|
||||
default:
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"Warning: unknown client request code %d", code);
|
||||
"Warning: unknown client request code %d", arg[0]);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -50,23 +50,18 @@
|
||||
#define VGP_(str) VGAPPEND(vgProf_,str)
|
||||
#define VGOFF_(str) VGAPPEND(vgOff_,str)
|
||||
|
||||
/* Reasons why the inner simulation loop might stop (i.e. why has
|
||||
vg_dispatch_ctr reached zero? */
|
||||
#define VG_Y_SIGCHECK 0 /* signal check due */
|
||||
#define VG_Y_SMC 1 /* write to code detected */
|
||||
#define VG_Y_EXIT 2 /* natural or debug end to simulation */
|
||||
#define VG_Y_TRANSLATE 3 /* translation of vg_m_eip needed */
|
||||
|
||||
/* Check for pending signals every this-many jumps. Since this
|
||||
happens in the region of once per millisecond, we also take the
|
||||
opportunity do do a bit of quick sanity checking at the same time.
|
||||
Look at the call sites of VG_(deliver_signals). */
|
||||
#define VG_SIGCHECK_INTERVAL 1000
|
||||
|
||||
/* A ,agic values that %ebp might be set to when returning to the
|
||||
/* Magic values that %ebp might be set to when returning to the
|
||||
dispatcher. The only other legitimate value is to point to the
|
||||
start of VG_(baseBlock). */
|
||||
#define VG_EBP_DISPATCH_CHECKED 17
|
||||
start of VG_(baseBlock). These also are return values from
|
||||
VG_(run_innerloop) to the scheduler. */
|
||||
#define VG_TRC_EBP_JMP_SPECIAL 17
|
||||
#define VG_TRC_EBP_JMP_SYSCALL 19
|
||||
#define VG_TRC_EBP_JMP_CLIENTREQ 23
|
||||
|
||||
#define VG_TRC_INNER_COUNTERZERO 29 /* ebp can't have this; sched return only */
|
||||
#define VG_TRC_INNER_FASTMISS 31 /* ditto. Means fast-cache miss. */
|
||||
#define VG_TRC_UNRESUMABLE_SIGNAL 37 /* ditto; got sigsegv/sigbus */
|
||||
|
||||
/* Debugging hack for assembly code ... sigh. */
|
||||
#if 0
|
||||
@ -75,12 +70,13 @@
|
||||
#define OYNK(nnn)
|
||||
#endif
|
||||
|
||||
#if 1
|
||||
#if 0
|
||||
#define OYNNK(nnn) pushal; pushl $nnn; call VG_(oynk) ; addl $4,%esp; popal
|
||||
#else
|
||||
#define OYNNK(nnn)
|
||||
#endif
|
||||
|
||||
|
||||
/* Constants for the fast translation lookup cache. */
|
||||
#define VG_TT_FAST_BITS 15
|
||||
#define VG_TT_FAST_SIZE (1 << VG_TT_FAST_BITS)
|
||||
@ -88,6 +84,7 @@
|
||||
|
||||
/* Constants for the fast original-code-write check cache. */
|
||||
|
||||
|
||||
/* Usually you want this to be zero. */
|
||||
#define VG_SMC_FASTCHECK_IN_C 0
|
||||
|
||||
|
||||
358
vg_dispatch.S
358
vg_dispatch.S
@ -61,8 +61,15 @@
|
||||
.globl VG_(run_innerloop)
|
||||
VG_(run_innerloop):
|
||||
#OYNK(1000)
|
||||
|
||||
# ----- entry point to VG_(run_innerloop) -----
|
||||
pushal
|
||||
pushl %ebx
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
pushl %ebp
|
||||
|
||||
# Set up the baseBlock pointer
|
||||
movl $VG_(baseBlock), %ebp
|
||||
|
||||
@ -70,19 +77,19 @@ VG_(run_innerloop):
|
||||
movl VGOFF_(m_eip), %esi
|
||||
movl (%ebp, %esi, 4), %eax
|
||||
|
||||
# fall thru to vg_dispatch
|
||||
# Start off dispatching paranoically, since we no longer have
|
||||
# any indication whether or not this might be a special call/ret
|
||||
# transfer.
|
||||
jmp dispatch_callret_maybe
|
||||
|
||||
.globl VG_(dispatch)
|
||||
VG_(dispatch):
|
||||
# %eax holds destination (original) address
|
||||
# To signal any kind of interruption, set vg_dispatch_ctr
|
||||
# to 1, and vg_interrupt_reason to the appropriate value
|
||||
# before jumping here.
|
||||
|
||||
|
||||
dispatch_main:
|
||||
# Jump here to do a new dispatch.
|
||||
# %eax holds destination (original) address.
|
||||
# %ebp indicates further details of the control transfer
|
||||
# requested to the address in %eax. The idea is that we
|
||||
# want to check all jump targets to see if they are either
|
||||
# VG_(signalreturn_bogusRA) or VG_(trap_here), both of which
|
||||
# VG_(signalreturn_bogusRA) or VG_(shutdown), both of which
|
||||
# require special treatment. However, testing all branch
|
||||
# targets is expensive, and anyway in most cases JITter knows
|
||||
# that a jump cannot be to either of these two. We therefore
|
||||
@ -92,37 +99,33 @@ VG_(dispatch):
|
||||
# this is a jump for which the JITter knows no check need be
|
||||
# made.
|
||||
#
|
||||
# If it is ebp == VG_EBP_DISPATCH_CHECKED, we had better make
|
||||
# If ebp == VG_EBP_JMP_CALLRET, we had better make
|
||||
# the check.
|
||||
#
|
||||
# If ebp == VG_EBP_JMP_SYSCALL, do a system call before
|
||||
# continuing at eax.
|
||||
#
|
||||
# If ebp == VG_EBP_JMP_CLIENTREQ, do a client request before
|
||||
# continuing at eax.
|
||||
#
|
||||
# If %ebp has any other value, we panic.
|
||||
#
|
||||
# What the JITter assumes is that VG_(signalreturn_bogusRA) can
|
||||
# only be arrived at from an x86 ret insn, and dually that
|
||||
# VG_(trap_here) can only be arrived at from an x86 call insn.
|
||||
# VG_(shutdown) can only be arrived at from an x86 call insn.
|
||||
# The net effect is that all call and return targets are checked
|
||||
# but straightforward jumps are not.
|
||||
#
|
||||
# Thinks ... is this safe if the client happens to tailcall
|
||||
# VG_(trap_here) ? I dont think that can happen -- if it did
|
||||
# it would be a problem.
|
||||
#
|
||||
|
||||
cmpl $VG_(baseBlock), %ebp
|
||||
jnz dispatch_checked_maybe
|
||||
jnz dispatch_exceptional
|
||||
|
||||
dispatch_unchecked:
|
||||
dispatch_boring:
|
||||
# save the jump address at VG_(baseBlock)[VGOFF_(m_eip)],
|
||||
# so that if this block takes a fault, we later know where we were.
|
||||
movl VGOFF_(m_eip), %esi
|
||||
movl %eax, (%ebp, %esi, 4)
|
||||
|
||||
# do we require attention?
|
||||
# this check has to be after the call/ret transfer checks, because
|
||||
# we have to ensure that any control transfer following a syscall
|
||||
# return is an ordinary transfer. By the time we get here, we have
|
||||
# established that the next transfer, which might get delayed till
|
||||
# after a syscall return, is an ordinary one.
|
||||
# All a bit subtle ...
|
||||
# do a timeslice check.
|
||||
# are we out of timeslice? If yes, defer to scheduler.
|
||||
#OYNK(1001)
|
||||
decl VG_(dispatch_ctr)
|
||||
jz counter_is_zero
|
||||
@ -136,243 +139,102 @@ dispatch_unchecked:
|
||||
# ebx points at a tt entry
|
||||
# now compare target with the tte.orig_addr field (+0)
|
||||
cmpl %eax, (%ebx)
|
||||
jnz full_search
|
||||
jnz fast_lookup_failed
|
||||
|
||||
# Found a match. Set the tte.mru_epoch field (+8)
|
||||
# and call the tte.trans_addr field (+4)
|
||||
movl VG_(current_epoch), %ecx
|
||||
movl %ecx, 8(%ebx)
|
||||
call *4(%ebx)
|
||||
jmp VG_(dispatch)
|
||||
jmp dispatch_main
|
||||
|
||||
full_search:
|
||||
#no luck? try the full table search
|
||||
pushl %eax
|
||||
call VG_(search_transtab)
|
||||
addl $4, %esp
|
||||
fast_lookup_failed:
|
||||
# %EIP is up to date here since dispatch_boring dominates
|
||||
movl $VG_TRC_INNER_FASTMISS, %eax
|
||||
jmp run_innerloop_exit
|
||||
|
||||
# %eax has trans addr or zero
|
||||
cmpl $0, %eax
|
||||
jz need_translation
|
||||
# full table search also zeroes the tte.last_use field,
|
||||
# so we dont have to do so here.
|
||||
call *%eax
|
||||
jmp VG_(dispatch)
|
||||
|
||||
need_translation:
|
||||
OYNK(1003)
|
||||
movl $VG_Y_TRANSLATE, VG_(interrupt_reason)
|
||||
counter_is_zero:
|
||||
OYNK(1004)
|
||||
popal
|
||||
# ----- (the only) exit point from VG_(run_innerloop) -----
|
||||
# ----- unless of course vg_oursignalhandler longjmp()s
|
||||
# ----- back through it, due to an unmanagable signal
|
||||
ret
|
||||
|
||||
|
||||
/* The normal way to get back to the translation loop is to put
|
||||
the address of the next (original) address and return.
|
||||
However, simulation of a RET insn requires a check as to whether
|
||||
the next address is vg_signalreturn_bogusRA. If so, a signal
|
||||
handler is returning, so we need to invoke our own mechanism to
|
||||
deal with that, by calling vg_signal_returns(). This restores
|
||||
the simulated machine state from the VgSigContext structure on
|
||||
the stack, including the (simulated, of course) %eip saved when
|
||||
the signal was delivered. We then arrange to jump to the
|
||||
restored %eip.
|
||||
*/
|
||||
dispatch_checked_maybe:
|
||||
# Possibly a checked dispatch. Sanity check ...
|
||||
cmpl $VG_EBP_DISPATCH_CHECKED, %ebp
|
||||
jz dispatch_checked
|
||||
# ebp has an invalid value ... crap out.
|
||||
pushl $panic_msg_ebp
|
||||
call VG_(panic)
|
||||
# (never returns)
|
||||
|
||||
dispatch_checked:
|
||||
OYNK(2000)
|
||||
# first off, restore %ebp -- since it is currently wrong
|
||||
movl $VG_(baseBlock), %ebp
|
||||
|
||||
# see if we need to mess with stack blocks
|
||||
pushl %ebp
|
||||
pushl %eax
|
||||
call VG_(delete_client_stack_blocks_following_ESP_change)
|
||||
popl %eax
|
||||
popl %ebp
|
||||
|
||||
# is this a signal return?
|
||||
cmpl $VG_(signalreturn_bogusRA), %eax
|
||||
jz dispatch_to_signalreturn_bogusRA
|
||||
# should we intercept this call?
|
||||
cmpl $VG_(trap_here), %eax
|
||||
jz dispatch_to_trap_here
|
||||
# ok, its not interesting. Handle the normal way.
|
||||
jmp dispatch_unchecked
|
||||
|
||||
dispatch_to_signalreturn_bogusRA:
|
||||
OYNK(2001)
|
||||
pushal
|
||||
call VG_(signal_returns)
|
||||
popal
|
||||
# %EIP will now point to the insn which should have followed
|
||||
# the signal delivery. Jump to it. Since we no longer have any
|
||||
# hint from the JITter about whether or not it is checkable,
|
||||
# go via the conservative route.
|
||||
movl VGOFF_(m_eip), %esi
|
||||
movl (%ebp, %esi, 4), %eax
|
||||
jmp dispatch_checked
|
||||
|
||||
|
||||
/* Similarly, check CALL targets to see if it is the ultra-magical
|
||||
vg_trap_here(), and, if so, act accordingly. See vg_clientmalloc.c.
|
||||
Be careful not to get the real and simulated CPUs,
|
||||
stacks and regs mixed up ...
|
||||
*/
|
||||
dispatch_to_trap_here:
|
||||
OYNK(111)
|
||||
/* Considering the params to vg_trap_here(), we should have:
|
||||
12(%ESP) is what_to_do
|
||||
8(%ESP) is arg2
|
||||
4(%ESP) is arg1
|
||||
0(%ESP) is return address
|
||||
*/
|
||||
movl VGOFF_(m_esp), %esi
|
||||
movl (%ebp, %esi, 4), %ebx
|
||||
# %ebx now holds simulated %ESP
|
||||
cmpl $0x4000, 12(%ebx)
|
||||
jz handle_malloc
|
||||
cmpl $0x4001, 12(%ebx)
|
||||
jz handle_malloc
|
||||
cmpl $0x4002, 12(%ebx)
|
||||
jz handle_malloc
|
||||
cmpl $0x5000, 12(%ebx)
|
||||
jz handle_free
|
||||
cmpl $0x5001, 12(%ebx)
|
||||
jz handle_free
|
||||
cmpl $0x5002, 12(%ebx)
|
||||
jz handle_free
|
||||
cmpl $6666, 12(%ebx)
|
||||
jz handle_calloc
|
||||
cmpl $7777, 12(%ebx)
|
||||
jz handle_realloc
|
||||
cmpl $8888, 12(%ebx)
|
||||
jz handle_memalign
|
||||
push $panic_msg_trap
|
||||
call VG_(panic)
|
||||
# vg_panic never returns
|
||||
|
||||
handle_malloc:
|
||||
# %ESP is in %ebx
|
||||
pushl 12(%ebx)
|
||||
pushl 8(%ebx)
|
||||
call VG_(client_malloc)
|
||||
addl $8, %esp
|
||||
# returned value is in %eax
|
||||
jmp save_eax_and_simulate_RET
|
||||
|
||||
handle_free:
|
||||
# %ESP is in %ebx
|
||||
pushl 12(%ebx)
|
||||
pushl 8(%ebx)
|
||||
call VG_(client_free)
|
||||
addl $8, %esp
|
||||
jmp simulate_RET
|
||||
|
||||
handle_calloc:
|
||||
# %ESP is in %ebx
|
||||
pushl 8(%ebx)
|
||||
pushl 4(%ebx)
|
||||
call VG_(client_calloc)
|
||||
addl $8, %esp
|
||||
# returned value is in %eax
|
||||
jmp save_eax_and_simulate_RET
|
||||
|
||||
handle_realloc:
|
||||
# %ESP is in %ebx
|
||||
pushl 8(%ebx)
|
||||
pushl 4(%ebx)
|
||||
call VG_(client_realloc)
|
||||
addl $8, %esp
|
||||
# returned value is in %eax
|
||||
jmp save_eax_and_simulate_RET
|
||||
|
||||
handle_memalign:
|
||||
# %ESP is in %ebx
|
||||
pushl 8(%ebx)
|
||||
pushl 4(%ebx)
|
||||
call VG_(client_memalign)
|
||||
addl $8, %esp
|
||||
# returned value is in %eax
|
||||
jmp save_eax_and_simulate_RET
|
||||
|
||||
save_eax_and_simulate_RET:
|
||||
movl VGOFF_(m_eax), %esi
|
||||
movl %eax, (%ebp, %esi, 4) # %eax -> %EAX
|
||||
# set %EAX bits to VALID
|
||||
movl VGOFF_(sh_eax), %esi
|
||||
movl $0x0 /* All 32 bits VALID */, (%ebp, %esi, 4)
|
||||
# fall thru ...
|
||||
simulate_RET:
|
||||
# standard return
|
||||
movl VGOFF_(m_esp), %esi
|
||||
movl (%ebp, %esi, 4), %ebx # %ESP -> %ebx
|
||||
movl 0(%ebx), %eax # RA -> %eax
|
||||
addl $4, %ebx # %ESP += 4
|
||||
movl %ebx, (%ebp, %esi, 4) # %ebx -> %ESP
|
||||
jmp dispatch_checked # jump to %eax
|
||||
|
||||
.data
|
||||
panic_msg_trap:
|
||||
.ascii "dispatch_to_trap_here: unknown what_to_do"
|
||||
.byte 0
|
||||
panic_msg_ebp:
|
||||
.ascii "vg_dispatch: %ebp has invalid value!"
|
||||
.byte 0
|
||||
.text
|
||||
|
||||
|
||||
/*------------------------------------------------------------*/
|
||||
/*--- A helper for delivering signals when the client is ---*/
|
||||
/*--- (presumably) blocked in a system call. ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
|
||||
/* Returns, in %eax, the next orig_addr to run.
|
||||
The caller needs to decide whether the returned orig_addr
|
||||
requires special handling.
|
||||
|
||||
extern Addr VG_(run_singleton_translation) ( Addr trans_addr )
|
||||
*/
|
||||
|
||||
/* should we take care to save the FPU state here? */
|
||||
|
||||
.globl VG_(run_singleton_translation)
|
||||
VG_(run_singleton_translation):
|
||||
movl 4(%esp), %eax # eax = trans_addr
|
||||
pushl %ebx
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
pushl %ebp
|
||||
|
||||
# set up ebp correctly for translations
|
||||
movl $VG_(baseBlock), %ebp
|
||||
|
||||
# run the translation
|
||||
call *%eax
|
||||
|
||||
# next orig_addr is correctly in %eax already
|
||||
# %EIP is up to date here since dispatch_boring dominates
|
||||
movl $VG_TRC_INNER_COUNTERZERO, %eax
|
||||
jmp run_innerloop_exit
|
||||
|
||||
run_innerloop_exit:
|
||||
popl %ebp
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %ebx
|
||||
ret
|
||||
|
||||
|
||||
|
||||
/* Other ways of getting out of the inner loop. Placed out-of-line to
|
||||
make it look cleaner.
|
||||
*/
|
||||
dispatch_exceptional:
|
||||
# this is jumped to only, not fallen-through from above
|
||||
cmpl $VG_TRC_EBP_JMP_SPECIAL, %ebp
|
||||
jz dispatch_callret_maybe
|
||||
cmpl $VG_TRC_EBP_JMP_SYSCALL, %ebp
|
||||
jz dispatch_syscall
|
||||
cmpl $VG_TRC_EBP_JMP_CLIENTREQ, %ebp
|
||||
jz dispatch_clientreq
|
||||
|
||||
# ebp has an invalid value ... crap out.
|
||||
pushl $panic_msg_ebp
|
||||
call VG_(panic)
|
||||
# (never returns)
|
||||
|
||||
dispatch_syscall:
|
||||
# save %eax in %EIP and defer to sched
|
||||
movl $VG_(baseBlock), %ebp
|
||||
movl VGOFF_(m_eip), %esi
|
||||
movl %eax, (%ebp, %esi, 4)
|
||||
movl $VG_TRC_EBP_JMP_SYSCALL, %eax
|
||||
jmp run_innerloop_exit
|
||||
|
||||
ret
|
||||
dispatch_clientreq:
|
||||
# save %eax in %EIP and defer to sched
|
||||
movl $VG_(baseBlock), %ebp
|
||||
movl VGOFF_(m_eip), %esi
|
||||
movl %eax, (%ebp, %esi, 4)
|
||||
movl $VG_TRC_EBP_JMP_CLIENTREQ, %eax
|
||||
jmp run_innerloop_exit
|
||||
|
||||
dispatch_callret_maybe:
|
||||
# save %eax in %EIP
|
||||
movl $VG_(baseBlock), %ebp
|
||||
movl VGOFF_(m_eip), %esi
|
||||
movl %eax, (%ebp, %esi, 4)
|
||||
|
||||
# see if we need to mess with stack blocks
|
||||
pushl %eax
|
||||
call VG_(delete_client_stack_blocks_following_ESP_change)
|
||||
popl %eax
|
||||
movl $VG_(baseBlock), %ebp
|
||||
|
||||
# is this a call/return which we need to mess with
|
||||
cmpl $VG_(signalreturn_bogusRA), %eax
|
||||
jz dispatch_callret
|
||||
cmpl $VG_(shutdown), %eax
|
||||
jz dispatch_callret
|
||||
|
||||
# ok, its not interesting. Handle the normal way.
|
||||
jmp dispatch_boring
|
||||
|
||||
dispatch_callret:
|
||||
# %EIP is up to date here since dispatch_callret_maybe dominates
|
||||
movl $VG_TRC_EBP_JMP_SPECIAL, %eax
|
||||
jmp run_innerloop_exit
|
||||
|
||||
|
||||
.data
|
||||
panic_msg_ebp:
|
||||
.ascii "vg_dispatch: %ebp has invalid value!"
|
||||
.byte 0
|
||||
.text
|
||||
|
||||
|
||||
##--------------------------------------------------------------------##
|
||||
##--- end vg_dispatch.S ---##
|
||||
|
||||
@ -1069,44 +1069,48 @@ static void synth_call_baseBlock_method ( Bool ensure_shortform,
|
||||
}
|
||||
|
||||
|
||||
/* Jump to the next translation, by loading its original addr into
|
||||
%eax and returning to the scheduler. Or, if is a RET transfer,
|
||||
don't return; instead jump to vg_dispatch_when_RET, which checks
|
||||
whether this is a signal handler returning, and takes suitable
|
||||
evasive action.
|
||||
*/
|
||||
static void synth_jmp_reg ( Int reg,
|
||||
Bool is_ret_dispatch,
|
||||
Bool is_call_dispatch )
|
||||
static void load_ebp_from_JmpKind ( JmpKind jmpkind )
|
||||
{
|
||||
switch (jmpkind) {
|
||||
case JmpBoring:
|
||||
break;
|
||||
case JmpCall:
|
||||
case JmpRet:
|
||||
emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_SPECIAL, R_EBP );
|
||||
break;
|
||||
case JmpSyscall:
|
||||
emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_SYSCALL, R_EBP );
|
||||
break;
|
||||
case JmpClientReq:
|
||||
emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_CLIENTREQ, R_EBP );
|
||||
break;
|
||||
default:
|
||||
VG_(panic)("load_ebp_from_JmpKind");
|
||||
}
|
||||
}
|
||||
|
||||
/* Jump to the next translation, by loading its original addr into
|
||||
%eax and returning to the scheduler. Signal special requirements
|
||||
by loading a special value into %ebp first.
|
||||
*/
|
||||
static void synth_jmp_reg ( Int reg, JmpKind jmpkind )
|
||||
{
|
||||
load_ebp_from_JmpKind ( jmpkind );
|
||||
if (reg != R_EAX)
|
||||
emit_movv_reg_reg ( 4, reg, R_EAX );
|
||||
if (is_ret_dispatch || is_call_dispatch) {
|
||||
/* The (hopefully) rare case. */
|
||||
vg_assert(!(is_ret_dispatch && is_call_dispatch));
|
||||
emit_movv_lit_reg ( 4, VG_EBP_DISPATCH_CHECKED, R_EBP );
|
||||
}
|
||||
emit_ret();
|
||||
}
|
||||
|
||||
|
||||
/* Same deal as synth_jmp_reg. */
|
||||
static void synth_jmp_lit ( Addr addr )
|
||||
static void synth_jmp_lit ( Addr addr, JmpKind jmpkind )
|
||||
{
|
||||
load_ebp_from_JmpKind ( jmpkind );
|
||||
emit_movv_lit_reg ( 4, addr, R_EAX );
|
||||
emit_ret();
|
||||
}
|
||||
|
||||
|
||||
/* Dispatch, but with a call-target check. */
|
||||
static void synth_jmp_lit_call_dispatch ( Addr addr )
|
||||
{
|
||||
emit_movv_lit_reg ( 4, addr, R_EAX );
|
||||
emit_movv_lit_reg ( 4, VG_EBP_DISPATCH_CHECKED, R_EBP );
|
||||
emit_ret();
|
||||
}
|
||||
|
||||
|
||||
static void synth_jcond_lit ( Condcode cond, Addr addr )
|
||||
{
|
||||
/* Do the following:
|
||||
@ -1124,7 +1128,7 @@ static void synth_jcond_lit ( Condcode cond, Addr addr )
|
||||
*/
|
||||
emit_get_eflags();
|
||||
emit_jcondshort_delta ( invertCondition(cond), 5+1 );
|
||||
synth_jmp_lit ( addr );
|
||||
synth_jmp_lit ( addr, JmpBoring );
|
||||
}
|
||||
|
||||
|
||||
@ -1138,7 +1142,7 @@ static void synth_jmp_ifzero_reg_lit ( Int reg, Addr addr )
|
||||
*/
|
||||
emit_cmpl_zero_reg ( reg );
|
||||
emit_jcondshort_delta ( CondNZ, 5+1 );
|
||||
synth_jmp_lit ( addr );
|
||||
synth_jmp_lit ( addr, JmpBoring );
|
||||
}
|
||||
|
||||
|
||||
@ -2472,25 +2476,29 @@ static void emitUInstr ( Int i, UInstr* u )
|
||||
vg_assert(u->tag2 == NoValue);
|
||||
vg_assert(u->tag1 == RealReg || u->tag1 == Literal);
|
||||
if (u->cond == CondAlways) {
|
||||
if (u->tag1 == RealReg) {
|
||||
synth_jmp_reg ( u->val1, u->ret_dispatch, u->call_dispatch );
|
||||
} else {
|
||||
vg_assert(!u->ret_dispatch);
|
||||
if (u->call_dispatch)
|
||||
synth_jmp_lit_call_dispatch (
|
||||
u->tag1==Literal ? u->lit32 : u->val1 );
|
||||
else
|
||||
synth_jmp_lit (
|
||||
u->tag1==Literal ? u->lit32 : u->val1 );
|
||||
switch (u->tag1) {
|
||||
case RealReg:
|
||||
synth_jmp_reg ( u->val1, u->jmpkind );
|
||||
break;
|
||||
case Literal:
|
||||
synth_jmp_lit ( u->lit32, u->jmpkind );
|
||||
break;
|
||||
default:
|
||||
VG_(panic)("emitUInstr(JMP, unconditional, default)");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (u->tag1 == RealReg) {
|
||||
VG_(panic)("emitUInstr: conditional jump to reg");
|
||||
} else {
|
||||
vg_assert(!u->ret_dispatch);
|
||||
vg_assert(!u->call_dispatch);
|
||||
synth_jcond_lit ( u->cond,
|
||||
u->tag1==Literal ? u->lit32 : u->val1 );
|
||||
switch (u->tag1) {
|
||||
case RealReg:
|
||||
VG_(panic)("emitUInstr(JMP, conditional, RealReg)");
|
||||
break;
|
||||
case Literal:
|
||||
vg_assert(u->jmpkind == JmpBoring);
|
||||
synth_jcond_lit ( u->cond, u->lit32 );
|
||||
break;
|
||||
default:
|
||||
VG_(panic)("emitUInstr(JMP, conditional, default)");
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
54
vg_helpers.S
54
vg_helpers.S
@ -48,45 +48,6 @@
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
On entry:
|
||||
%ECX value
|
||||
%EBX value
|
||||
%EAX value -- also the result
|
||||
RA <- %esp -- after pushal+pushfl is 36(%esp)
|
||||
*/
|
||||
.global VG_(helper_do_client_request)
|
||||
VG_(helper_do_client_request):
|
||||
pushal
|
||||
pushfl
|
||||
|
||||
movl 48(%esp), %eax
|
||||
pushl %eax
|
||||
movl 48(%esp), %eax
|
||||
pushl %eax
|
||||
movl 48(%esp), %eax
|
||||
pushl %eax
|
||||
|
||||
call VG_(handle_client_request)
|
||||
movl %eax, 52(%esp)
|
||||
|
||||
addl $12, %esp
|
||||
|
||||
popfl
|
||||
popal
|
||||
ret
|
||||
|
||||
|
||||
.global VG_(helper_do_syscall)
|
||||
VG_(helper_do_syscall):
|
||||
pushal
|
||||
call VG_(wrap_syscall)
|
||||
popal
|
||||
# movl $VG_(baseBlock), %ebp
|
||||
ret
|
||||
|
||||
|
||||
|
||||
.global VG_(helper_value_check0_fail)
|
||||
VG_(helper_value_check0_fail):
|
||||
pushal
|
||||
@ -116,21 +77,6 @@ VG_(helper_value_check4_fail):
|
||||
ret
|
||||
|
||||
|
||||
/* Set things up so the dispatch loop exits normally. Used when it is
|
||||
detected that the program wants to finish, ie it has called
|
||||
vg_shutdown.
|
||||
*/
|
||||
.global VG_(helper_request_normal_exit)
|
||||
VG_(helper_request_normal_exit):
|
||||
pushl %eax
|
||||
movl VG_(dispatch_ctr), %eax
|
||||
movl %eax, VG_(dispatch_ctr_SAVED)
|
||||
movl $1, VG_(dispatch_ctr)
|
||||
movl $VG_Y_EXIT, VG_(interrupt_reason)
|
||||
popl %eax
|
||||
ret
|
||||
|
||||
|
||||
/* Do a original-code-write check for the address in %ebp. */
|
||||
.global VG_(helper_smc_check4)
|
||||
VG_(helper_smc_check4):
|
||||
|
||||
344
vg_include.h
344
vg_include.h
@ -117,6 +117,27 @@
|
||||
prime. */
|
||||
#define VG_N_EC_LISTS /*997*/ 4999
|
||||
|
||||
/* Defines the thread-scheduling timeslice, in terms of the number of
|
||||
basic blocks we attempt to run each thread for. Smaller values
|
||||
give finer interleaving but much increased scheduling overheads. */
|
||||
#define VG_SCHEDULING_QUANTUM 10000
|
||||
|
||||
/* The maximum number of pthreads that we support. This is
|
||||
deliberately not very high since our implementation of some of the
|
||||
scheduler algorithms is surely O(N^2) in the number of threads,
|
||||
since that's simple, at least. And (in practice) we hope that most
|
||||
programs do not need many threads. */
|
||||
#define VG_N_THREADS 20
|
||||
|
||||
/* Number of file descriptors that can simultaneously be waited on for
|
||||
I/O to complete. Perhaps this should be the same as VG_N_THREADS
|
||||
(surely a thread can't wait on more than one fd at once?. Who
|
||||
knows.) */
|
||||
#define VG_N_WAITING_FDS 10
|
||||
|
||||
/* Maximum number of mutexes allowed. */
|
||||
#define VG_N_MUTEXES 10
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Basic types
|
||||
@ -352,31 +373,220 @@ extern Bool VG_(is_empty_arena) ( ArenaId aid );
|
||||
(VG_AR_CLIENT_REDZONE_SZW * VKI_BYTES_PER_WORD)
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_clientfuns.c
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* This doesn't export code or data that valgrind.so needs to link
|
||||
against. However, the scheduler does need to know the following
|
||||
request codes. A few, publically-visible, request codes are also
|
||||
defined in valgrind.h. */
|
||||
|
||||
#define VG_USERREQ__MALLOC 0x2001
|
||||
#define VG_USERREQ__BUILTIN_NEW 0x2002
|
||||
#define VG_USERREQ__BUILTIN_VEC_NEW 0x2003
|
||||
|
||||
#define VG_USERREQ__FREE 0x2004
|
||||
#define VG_USERREQ__BUILTIN_DELETE 0x2005
|
||||
#define VG_USERREQ__BUILTIN_VEC_DELETE 0x2006
|
||||
|
||||
#define VG_USERREQ__CALLOC 0x2007
|
||||
#define VG_USERREQ__REALLOC 0x2008
|
||||
#define VG_USERREQ__MEMALIGN 0x2009
|
||||
|
||||
|
||||
#define VG_USERREQ__PTHREAD_CREATE 0x3001
|
||||
#define VG_USERREQ__PTHREAD_CREATE_BOGUSRA 0x3002
|
||||
#define VG_USERREQ__PTHREAD_JOIN 0x3003
|
||||
#define VG_USERREQ__PTHREAD_GET_THREADID 0x3004
|
||||
#define VG_USERREQ__PTHREAD_MUTEX_INIT 0x3005
|
||||
#define VG_USERREQ__PTHREAD_MUTEX_LOCK 0x3006
|
||||
#define VG_USERREQ__PTHREAD_MUTEX_UNLOCK 0x3007
|
||||
#define VG_USERREQ__PTHREAD_MUTEX_DESTROY 0x3008
|
||||
#define VG_USERREQ__PTHREAD_CANCEL 0x3009
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Constants pertaining to the simulated CPU state, VG_(baseBlock),
|
||||
which need to go here to avoid ugly circularities.
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* How big is the saved FPU state? */
|
||||
#define VG_SIZE_OF_FPUSTATE 108
|
||||
/* ... and in words ... */
|
||||
#define VG_SIZE_OF_FPUSTATE_W ((VG_SIZE_OF_FPUSTATE+3)/4)
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_scheduler.c
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* ThreadIds are simply indices into the vg_threads[] array. */
|
||||
typedef
|
||||
UInt
|
||||
ThreadId;
|
||||
|
||||
/* MutexIds are simply indices into the vg_mutexes[] array. */
|
||||
typedef
|
||||
UInt
|
||||
MutexId;
|
||||
|
||||
|
||||
#define VG_INVALID_THREADID ((ThreadId)(-1))
|
||||
|
||||
typedef
|
||||
enum {
|
||||
VgTs_Empty, /* this slot is not in use */
|
||||
VgTs_Runnable, /* waiting to be scheduled */
|
||||
VgTs_WaitJoiner, /* waiting for someone to do join on me */
|
||||
VgTs_WaitJoinee, /* waiting for the thread I did join on */
|
||||
VgTs_WaitFD, /* waiting for I/O completion on a fd */
|
||||
VgTs_WaitMX, /* waiting on a mutex */
|
||||
VgTs_Sleeping /* sleeping for a while */
|
||||
}
|
||||
ThreadStatus;
|
||||
|
||||
typedef
|
||||
struct {
|
||||
/* The thread identity is simply the index in vg_threads[].
|
||||
ThreadId == 0 is the root thread and has the special property
|
||||
that we don't try and allocate or deallocate its stack. */
|
||||
|
||||
/* Current scheduling status. */
|
||||
ThreadStatus status;
|
||||
|
||||
/* Identity of joiner (thread who called join on me), or
|
||||
VG_INVALID_THREADID if no one asked to join yet. */
|
||||
ThreadId joiner;
|
||||
|
||||
/* Identity of mutex we are waiting on, if .status == WaitMX. */
|
||||
MutexId waited_on_mid;
|
||||
|
||||
/* If VgTs_Sleeping, this is when we should wake up. */
|
||||
ULong awaken_at;
|
||||
|
||||
/* return value */
|
||||
void* retval;
|
||||
|
||||
/* Stacks. When a thread slot is freed, we don't deallocate its
|
||||
stack; we just leave it lying around for the next use of the
|
||||
slot. If the next use of the slot requires a larger stack,
|
||||
only then is the old one deallocated and a new one
|
||||
allocated.
|
||||
|
||||
For the main thread (threadid == 0), this mechanism doesn't
|
||||
apply. We don't know the size of the stack since we didn't
|
||||
allocate it, and furthermore we never reallocate it. */
|
||||
|
||||
/* The allocated size of this thread's stack (permanently zero
|
||||
if this is ThreadId == 0, since we didn't allocate its stack) */
|
||||
UInt stack_size;
|
||||
|
||||
/* Address of the lowest word in this thread's stack. NULL means
|
||||
not allocated yet.
|
||||
*/
|
||||
Addr stack_base;
|
||||
|
||||
/* Saved machine context. */
|
||||
UInt m_eax;
|
||||
UInt m_ebx;
|
||||
UInt m_ecx;
|
||||
UInt m_edx;
|
||||
UInt m_esi;
|
||||
UInt m_edi;
|
||||
UInt m_ebp;
|
||||
UInt m_esp;
|
||||
UInt m_eflags;
|
||||
UInt m_eip;
|
||||
UInt m_fpu[VG_SIZE_OF_FPUSTATE_W];
|
||||
|
||||
UInt sh_eax;
|
||||
UInt sh_ebx;
|
||||
UInt sh_ecx;
|
||||
UInt sh_edx;
|
||||
UInt sh_esi;
|
||||
UInt sh_edi;
|
||||
UInt sh_ebp;
|
||||
UInt sh_esp;
|
||||
UInt sh_eflags;
|
||||
}
|
||||
ThreadState;
|
||||
|
||||
|
||||
/* Copy the specified thread's state into VG_(baseBlock) in
|
||||
preparation for running it. */
|
||||
extern void VG_(load_thread_state)( ThreadId );
|
||||
|
||||
/* Save the specified thread's state back in VG_(baseBlock), and fill
|
||||
VG_(baseBlock) with junk, for sanity-check reasons. */
|
||||
extern void VG_(save_thread_state)( ThreadId );
|
||||
|
||||
/* Get the thread state block for the specified thread. */
|
||||
extern ThreadState* VG_(get_thread_state)( ThreadId );
|
||||
|
||||
|
||||
/* Create, and add to TT/TC, the translation of a client basic
|
||||
block. */
|
||||
extern void VG_(create_translation_for) ( Addr orig_addr );
|
||||
|
||||
/* Return codes from the scheduler. */
|
||||
typedef
|
||||
enum { VgSrc_Deadlock, VgSrc_Shutdown, VgSrc_BbsDone }
|
||||
VgSchedReturnCode;
|
||||
|
||||
/* The scheduler. */
|
||||
extern VgSchedReturnCode VG_(scheduler) ( void );
|
||||
|
||||
extern void VG_(scheduler_init) ( void );
|
||||
|
||||
|
||||
/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
|
||||
extern jmp_buf VG_(scheduler_jmpbuf);
|
||||
/* ... and if so, here's the signal which caused it to do so. */
|
||||
extern Int VG_(longjmpd_on_signal);
|
||||
|
||||
|
||||
/* We check that the initial stack, which we can't move, is allocated
|
||||
here. VG_(scheduler_init) checks this.
|
||||
*/
|
||||
#define VG_STARTUP_STACK_MASK (Addr)0xBFFF8000
|
||||
|
||||
|
||||
/* The red-zone size which we put at the bottom (highest address) of
|
||||
thread stacks, for paranoia reasons. This can be arbitrary, and
|
||||
doesn't really need to be set at compile time. */
|
||||
#define VG_AR_CLIENT_STACKBASE_REDZONE_SZW 4
|
||||
|
||||
#define VG_AR_CLIENT_STACKBASE_REDZONE_SZB \
|
||||
(VG_AR_CLIENT_STACKBASE_REDZONE_SZW * VKI_BYTES_PER_WORD)
|
||||
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_signals.c
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* The maximum number of basic blocks that we're prepared to run in a
|
||||
signal handler which is called when the client is stuck in a
|
||||
blocking system call. The purpose of this is to check that such a
|
||||
signal handler doesn't merely do a longjmp() and keep going
|
||||
forever; it should return instead. NOTE that this doesn't apply to
|
||||
signals delivered under normal conditions, only when they are
|
||||
delivered and the client is already blocked in a system call. */
|
||||
#define VG_MAX_BBS_IN_IMMEDIATE_SIGNAL 50000
|
||||
|
||||
extern void VG_(sigstartup_actions) ( void );
|
||||
|
||||
extern void VG_(deliver_signals) ( void );
|
||||
extern void VG_(deliver_signals) ( ThreadId );
|
||||
extern void VG_(unblock_host_signal) ( Int sigNo );
|
||||
|
||||
|
||||
/* Fake system calls for signal handling. */
|
||||
extern void VG_(do__NR_sigaction) ( void );
|
||||
extern void VG_(do__NR_sigaction) ( ThreadId tid );
|
||||
extern void VG_(do__NR_sigprocmask) ( Int how, vki_ksigset_t* set );
|
||||
|
||||
/* Bogus return address for signal handlers. Is never executed. */
|
||||
extern void VG_(signalreturn_bogusRA) ( void );
|
||||
|
||||
/* Modify the current thread's state once we have detected it is
|
||||
returning from a signal handler. */
|
||||
extern void VG_(signal_returns) ( ThreadId );
|
||||
|
||||
/* Handy utilities to block/restore all host signals. */
|
||||
extern void VG_(block_all_host_signals)
|
||||
( /* OUT */ vki_ksigset_t* saved_mask );
|
||||
extern void VG_(restore_host_signals)
|
||||
( /* IN */ vki_ksigset_t* saved_mask );
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_mylibc.c
|
||||
@ -420,6 +630,7 @@ extern Char* VG_(strdup) ( ArenaId aid, const Char* s);
|
||||
|
||||
extern Char* VG_(getenv) ( Char* name );
|
||||
extern Int VG_(getpid) ( void );
|
||||
extern ULong VG_(read_microsecond_timer)( void );
|
||||
|
||||
|
||||
extern Char VG_(toupper) ( Char c );
|
||||
@ -444,19 +655,28 @@ extern void VG_(assert_fail) ( Char* expr, Char* file,
|
||||
Int line, Char* fn )
|
||||
__attribute__ ((__noreturn__));
|
||||
|
||||
/* Later ... extern void vg_restore_SIGABRT ( void ); */
|
||||
|
||||
/* Reading files. */
|
||||
extern Int VG_(open_read) ( Char* pathname );
|
||||
extern void VG_(close) ( Int fd );
|
||||
extern Int VG_(read) ( Int fd, void* buf, Int count);
|
||||
extern Int VG_(write) ( Int fd, void* buf, Int count);
|
||||
|
||||
extern Int VG_(fcntl) ( Int fd, Int cmd, Int arg );
|
||||
|
||||
extern Int VG_(select)( Int n,
|
||||
vki_fd_set* readfds,
|
||||
vki_fd_set* writefds,
|
||||
vki_fd_set* exceptfds,
|
||||
struct vki_timeval * timeout );
|
||||
extern Int VG_(nanosleep)( const struct vki_timespec *req,
|
||||
struct vki_timespec *rem );
|
||||
|
||||
|
||||
/* mmap-ery ... */
|
||||
extern void* VG_(mmap)( void* start, UInt length,
|
||||
UInt prot, UInt flags, UInt fd, UInt offset );
|
||||
|
||||
extern Int VG_(munmap)( void* start, Int length );
|
||||
extern Int VG_(munmap)( void* start, Int length );
|
||||
|
||||
|
||||
/* Print a (panic) message, and abort. */
|
||||
@ -594,6 +814,18 @@ typedef
|
||||
Condcode;
|
||||
|
||||
|
||||
/* Descriptions of additional properties of *unconditional* jumps. */
|
||||
typedef
|
||||
enum {
|
||||
JmpBoring=0, /* boring unconditional jump */
|
||||
JmpCall=1, /* jump due to an x86 call insn */
|
||||
JmpRet=2, /* jump due to an x86 ret insn */
|
||||
JmpSyscall=3, /* do a system call, then jump */
|
||||
JmpClientReq=4 /* do a client request, then jump */
|
||||
}
|
||||
JmpKind;
|
||||
|
||||
|
||||
/* Flags. User-level code can only read/write O(verflow), S(ign),
|
||||
Z(ero), A(ux-carry), C(arry), P(arity), and may also write
|
||||
D(irection). That's a total of 7 flags. A FlagSet is a bitset,
|
||||
@ -662,8 +894,7 @@ typedef
|
||||
UChar cond; /* condition, for jumps */
|
||||
Bool smc_check:1; /* do a smc test, if writes memory. */
|
||||
Bool signed_widen:1; /* signed or unsigned WIDEN ? */
|
||||
Bool ret_dispatch:1; /* Is this jump as a result of RET ? */
|
||||
Bool call_dispatch:1; /* Is this jump as a result of CALL ? */
|
||||
JmpKind jmpkind:3; /* additional properties of unconditional JMP */
|
||||
}
|
||||
UInstr;
|
||||
|
||||
@ -845,7 +1076,7 @@ typedef
|
||||
|
||||
extern Bool VG_(client_perm_maybe_describe)( Addr a, AddrInfo* ai );
|
||||
|
||||
extern UInt VG_(handle_client_request) ( UInt code, Addr aa, UInt nn );
|
||||
extern UInt VG_(handle_client_request) ( UInt* arg_block );
|
||||
|
||||
extern void VG_(delete_client_stack_blocks_following_ESP_change) ( void );
|
||||
|
||||
@ -886,13 +1117,10 @@ extern void VG_(symtab_notify_munmap) ( Addr start, UInt length );
|
||||
Exports of vg_clientmalloc.c
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* these numbers are not arbitary. if you change them,
|
||||
adjust vg_dispatch.S as well */
|
||||
|
||||
typedef
|
||||
enum {
|
||||
Vg_AllocMalloc = 0,
|
||||
Vg_AllocNew = 1,
|
||||
Vg_AllocNew = 1,
|
||||
Vg_AllocNewVec = 2
|
||||
}
|
||||
VgAllocKind;
|
||||
@ -912,20 +1140,19 @@ extern void VG_(clientmalloc_done) ( void );
|
||||
extern void VG_(describe_addr) ( Addr a, AddrInfo* ai );
|
||||
extern ShadowChunk** VG_(get_malloc_shadows) ( /*OUT*/ UInt* n_shadows );
|
||||
|
||||
/* This should never be called; if it is, something's seriously
|
||||
wrong. */
|
||||
extern UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do );
|
||||
/* These are called from the scheduler, when it intercepts a user
|
||||
request. */
|
||||
extern void* VG_(client_malloc) ( UInt size, VgAllocKind kind );
|
||||
extern void* VG_(client_memalign) ( UInt align, UInt size );
|
||||
extern void VG_(client_free) ( void* ptrV, VgAllocKind kind );
|
||||
extern void* VG_(client_calloc) ( UInt nmemb, UInt size1 );
|
||||
extern void* VG_(client_realloc) ( void* ptrV, UInt size_new );
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_main.c
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* How big is the saved FPU state? */
|
||||
#define VG_SIZE_OF_FPUSTATE 108
|
||||
/* ... and in words ... */
|
||||
#define VG_SIZE_OF_FPUSTATE_W ((VG_SIZE_OF_FPUSTATE+3)/4)
|
||||
|
||||
/* A structure used as an intermediary when passing the simulated
|
||||
CPU's state to some assembly fragments, particularly system calls.
|
||||
Stuff is copied from baseBlock to here, the assembly magic runs,
|
||||
@ -941,10 +1168,6 @@ extern UInt VG_(m_state_static) [8 /* int regs, in Intel order */
|
||||
extern void VG_(copy_baseBlock_to_m_state_static) ( void );
|
||||
extern void VG_(copy_m_state_static_to_baseBlock) ( void );
|
||||
|
||||
/* Create, and add to TT/TC, the translation of a client basic
|
||||
block. */
|
||||
extern void VG_(create_translation_for) ( Addr orig_addr );
|
||||
|
||||
/* Called when some unhandleable client behaviour is detected.
|
||||
Prints a msg and aborts. */
|
||||
extern void VG_(unimplemented) ( Char* msg );
|
||||
@ -960,12 +1183,6 @@ extern UInt VG_(stack)[10000];
|
||||
vg_deliver_signal_immediately(). */
|
||||
extern UInt VG_(sigstack)[10000];
|
||||
|
||||
|
||||
/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
|
||||
extern jmp_buf VG_(toploop_jmpbuf);
|
||||
/* ... and if so, here's the signal which caused it to do so. */
|
||||
extern Int VG_(longjmpd_on_signal);
|
||||
|
||||
/* Holds client's %esp at the point we gained control. From this the
|
||||
client's argc, argv and envp are deduced. */
|
||||
extern Addr VG_(esp_at_startup);
|
||||
@ -994,13 +1211,6 @@ extern ULong VG_(bbs_to_go);
|
||||
/* Counts downwards in vg_run_innerloop. */
|
||||
extern UInt VG_(dispatch_ctr);
|
||||
|
||||
/* If vg_dispatch_ctr is set to 1 to force a stop, its
|
||||
previous value is saved here. */
|
||||
extern UInt VG_(dispatch_ctr_SAVED);
|
||||
|
||||
/* This is why vg_run_innerloop() exited. */
|
||||
extern UInt VG_(interrupt_reason);
|
||||
|
||||
/* Is the client running on the simulated CPU or the real one? */
|
||||
extern Bool VG_(running_on_simd_CPU); /* Initially False */
|
||||
|
||||
@ -1068,6 +1278,10 @@ extern UInt VG_(smc_fancy_passed);
|
||||
extern UInt VG_(sanity_fast_count);
|
||||
extern UInt VG_(sanity_slow_count);
|
||||
|
||||
/* Counts pertaining to the scheduler. */
|
||||
extern UInt VG_(num_scheduling_events_MINOR);
|
||||
extern UInt VG_(num_scheduling_events_MAJOR);
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_memory.c
|
||||
@ -1095,7 +1309,7 @@ extern Bool VGM_(check_readable_asciiz) ( Addr a, Addr* bad_addr );
|
||||
|
||||
/* Sanity checks which may be done at any time. Doing them at
|
||||
signal-delivery time turns out to be convenient. */
|
||||
extern void VG_(do_sanity_checks) ( Bool force_expensive );
|
||||
extern void VG_(do_sanity_checks) ( ThreadId tid, Bool force_expensive );
|
||||
/* Very cheap ... */
|
||||
extern Bool VG_(first_and_last_secondaries_look_plausible) ( void );
|
||||
|
||||
@ -1134,22 +1348,21 @@ extern Bool VG_(is_plausible_stack_addr) ( Addr );
|
||||
Exports of vg_syscall_mem.c
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* Counts the depth of nested syscalls. Is used in
|
||||
VG_(deliver_signals) do discover whether or not the client is in a
|
||||
syscall (presumably _blocked_ in a syscall) when a signal is
|
||||
delivered. If so, the signal delivery mechanism needs to behave
|
||||
differently from normal. */
|
||||
extern Int VG_(syscall_depth);
|
||||
extern void VG_(perform_assumed_nonblocking_syscall) ( ThreadId tid );
|
||||
|
||||
extern void VG_(wrap_syscall) ( void );
|
||||
extern void VG_(check_known_blocking_syscall) ( ThreadId tid,
|
||||
Int syscallno,
|
||||
Int* /*IN*/ res );
|
||||
|
||||
extern Bool VG_(is_kerror) ( Int res );
|
||||
|
||||
#define KERNEL_DO_SYSCALL(result_lvalue) \
|
||||
#define KERNEL_DO_SYSCALL(thread_id, result_lvalue) \
|
||||
VG_(load_thread_state)(thread_id); \
|
||||
VG_(copy_baseBlock_to_m_state_static)(); \
|
||||
VG_(do_syscall)(); \
|
||||
VG_(copy_m_state_static_to_baseBlock)(); \
|
||||
result_lvalue = VG_(baseBlock)[VGOFF_(m_eax)];
|
||||
VG_(save_thread_state)(thread_id); \
|
||||
result_lvalue = VG_(get_thread_state)(thread_id)->m_eax;
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
@ -1242,20 +1455,15 @@ extern void VG_(swizzle_esp_then_start_GDB) ( void );
|
||||
Exports of vg_dispatch.S
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
extern void VG_(dispatch);
|
||||
extern void VG_(run_innerloop) ( void );
|
||||
|
||||
/* Returns the next orig_addr to run. */
|
||||
extern Addr VG_(run_singleton_translation) ( Addr trans_addr );
|
||||
/* Run a thread for a (very short) while, until some event happens
|
||||
which means we need to defer to the scheduler. */
|
||||
extern UInt VG_(run_innerloop) ( void );
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Exports of vg_helpers.S
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* For doing exits ... */
|
||||
extern void VG_(helper_request_normal_exit);
|
||||
|
||||
/* SMC fast checks. */
|
||||
extern void VG_(helper_smc_check4);
|
||||
|
||||
@ -1304,9 +1512,6 @@ extern void VG_(helper_value_check2_fail);
|
||||
extern void VG_(helper_value_check1_fail);
|
||||
extern void VG_(helper_value_check0_fail);
|
||||
|
||||
extern void VG_(helper_do_syscall);
|
||||
extern void VG_(helper_do_client_request);
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
The state of the simulated CPU.
|
||||
@ -1434,9 +1639,6 @@ extern Int VGOFF_(helper_value_check2_fail);
|
||||
extern Int VGOFF_(helper_value_check1_fail);
|
||||
extern Int VGOFF_(helper_value_check0_fail);
|
||||
|
||||
extern Int VGOFF_(helper_do_syscall);
|
||||
extern Int VGOFF_(helper_do_client_request);
|
||||
|
||||
extern Int VGOFF_(helperc_STOREV4); /* :: UInt -> Addr -> void */
|
||||
extern Int VGOFF_(helperc_STOREV2); /* :: UInt -> Addr -> void */
|
||||
extern Int VGOFF_(helperc_STOREV1); /* :: UInt -> Addr -> void */
|
||||
@ -1449,8 +1651,6 @@ extern Int VGOFF_(handle_esp_assignment); /* :: Addr -> void */
|
||||
extern Int VGOFF_(fpu_write_check); /* :: Addr -> Int -> void */
|
||||
extern Int VGOFF_(fpu_read_check); /* :: Addr -> Int -> void */
|
||||
|
||||
extern Int VGOFF_(helper_request_normal_exit);
|
||||
|
||||
|
||||
|
||||
#endif /* ndef __VG_INCLUDE_H */
|
||||
|
||||
106
vg_kerneliface.h
106
vg_kerneliface.h
@ -135,6 +135,10 @@ typedef
|
||||
/* Copied from /usr/src/linux-2.4.9-13/include/asm/errno.h */
|
||||
|
||||
#define VKI_EINVAL 22 /* Invalid argument */
|
||||
#define VKI_ENOMEM 12 /* Out of memory */
|
||||
|
||||
#define VKI_EWOULDBLOCK VKI_EAGAIN /* Operation would block */
|
||||
#define VKI_EAGAIN 11 /* Try again */
|
||||
|
||||
|
||||
/* Gawd ... hack ... */
|
||||
@ -166,6 +170,108 @@ typedef struct vki__user_cap_data_struct {
|
||||
#define VKI_SIZEOF_STRUCT_TERMIO 17
|
||||
|
||||
|
||||
/* File descriptor sets, for doing select(). Copied from
|
||||
/usr/src/linux-2.4.9-31/include/linux/posix_types.h
|
||||
*/
|
||||
/*
|
||||
* This allows for 1024 file descriptors: if NR_OPEN is ever grown
|
||||
* beyond that you'll have to change this too. But 1024 fd's seem to be
|
||||
* enough even for such "real" unices like OSF/1, so hopefully this is
|
||||
* one limit that doesn't have to be changed [again].
|
||||
*
|
||||
* Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in
|
||||
* <sys/time.h> (and thus <linux/time.h>) - but this is a more logical
|
||||
* place for them. Solved by having dummy defines in <sys/time.h>.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Those macros may have been defined in <gnu/types.h>. But we always
|
||||
* use the ones here.
|
||||
*/
|
||||
#undef VKI_NFDBITS
|
||||
#define VKI_NFDBITS (8 * sizeof(unsigned long))
|
||||
|
||||
#undef VKI_FD_SETSIZE
|
||||
#define VKI_FD_SETSIZE 1024
|
||||
|
||||
#undef VKI_FDSET_LONGS
|
||||
#define VKI_FDSET_LONGS (VKI_FD_SETSIZE/VKI_NFDBITS)
|
||||
|
||||
#undef VKI_FDELT
|
||||
#define VKI_FDELT(d) ((d) / VKI_NFDBITS)
|
||||
|
||||
#undef VKI_FDMASK
|
||||
#define VKI_FDMASK(d) (1UL << ((d) % VKI_NFDBITS))
|
||||
|
||||
typedef struct {
|
||||
unsigned long vki_fds_bits [VKI_FDSET_LONGS];
|
||||
} vki_fd_set;
|
||||
|
||||
|
||||
/* Gawd ...
|
||||
Copied from /usr/src/linux-2.4.9-31/./include/asm-i386/posix_types.h
|
||||
*/
|
||||
#undef VKI_FD_SET
|
||||
#define VKI_FD_SET(fd,fdsetp) \
|
||||
__asm__ __volatile__("btsl %1,%0": \
|
||||
"=m" (*(vki_fd_set *) (fdsetp)):"r" ((int) (fd)))
|
||||
|
||||
#undef VKI_FD_CLR
|
||||
#define VKI_FD_CLR(fd,fdsetp) \
|
||||
__asm__ __volatile__("btrl %1,%0": \
|
||||
"=m" (*(vki_fd_set *) (fdsetp)):"r" ((int) (fd)))
|
||||
|
||||
#undef VKI_FD_ISSET
|
||||
#define VKI_FD_ISSET(fd,fdsetp) (__extension__ ({ \
|
||||
unsigned char __result; \
|
||||
__asm__ __volatile__("btl %1,%2 ; setb %0" \
|
||||
:"=q" (__result) :"r" ((int) (fd)), \
|
||||
"m" (*(vki_fd_set *) (fdsetp))); \
|
||||
__result; }))
|
||||
|
||||
#undef VKI_FD_ZERO
|
||||
#define VKI_FD_ZERO(fdsetp) \
|
||||
do { \
|
||||
int __d0, __d1; \
|
||||
__asm__ __volatile__("cld ; rep ; stosl" \
|
||||
:"=m" (*(vki_fd_set *) (fdsetp)), \
|
||||
"=&c" (__d0), "=&D" (__d1) \
|
||||
:"a" (0), "1" (VKI_FDSET_LONGS), \
|
||||
"2" ((vki_fd_set *) (fdsetp)) : "memory"); \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
||||
/*
|
||||
./include/asm-i386/posix_types.h:typedef long __kernel_suseconds_t;
|
||||
./include/linux/types.h:typedef __kernel_suseconds_t suseconds_t;
|
||||
|
||||
./include/asm-i386/posix_types.h:typedef long __kernel_time_t;
|
||||
./include/linux/types.h:typedef __kernel_time_t time_t;
|
||||
*/
|
||||
|
||||
struct vki_timeval {
|
||||
/* time_t */ long tv_sec; /* seconds */
|
||||
/* suseconds_t */ long tv_usec; /* microseconds */
|
||||
};
|
||||
|
||||
|
||||
|
||||
/* For fcntl on fds ..
|
||||
from ./include/asm-i386/fcntl.h */
|
||||
#define VKI_F_GETFL 3 /* get file->f_flags */
|
||||
#define VKI_F_SETFL 4 /* set file->f_flags */
|
||||
|
||||
#define VKI_O_NONBLOCK 04000
|
||||
|
||||
/* For nanosleep ...
|
||||
from ./include/linux/time.h */
|
||||
struct vki_timespec {
|
||||
/* time_t */ long tv_sec; /* seconds */
|
||||
long tv_nsec; /* nanoseconds */
|
||||
};
|
||||
|
||||
|
||||
#endif /* ndef __VG_KERNELIFACE_H */
|
||||
|
||||
/*--------------------------------------------------------------------*/
|
||||
|
||||
340
vg_main.c
340
vg_main.c
@ -99,8 +99,6 @@ Int VGOFF_(helper_value_check4_fail) = INVALID_OFFSET;
|
||||
Int VGOFF_(helper_value_check2_fail) = INVALID_OFFSET;
|
||||
Int VGOFF_(helper_value_check1_fail) = INVALID_OFFSET;
|
||||
Int VGOFF_(helper_value_check0_fail) = INVALID_OFFSET;
|
||||
Int VGOFF_(helper_do_syscall) = INVALID_OFFSET;
|
||||
Int VGOFF_(helper_do_client_request) = INVALID_OFFSET;
|
||||
Int VGOFF_(helperc_LOADV4) = INVALID_OFFSET;
|
||||
Int VGOFF_(helperc_LOADV2) = INVALID_OFFSET;
|
||||
Int VGOFF_(helperc_LOADV1) = INVALID_OFFSET;
|
||||
@ -110,7 +108,6 @@ Int VGOFF_(helperc_STOREV1) = INVALID_OFFSET;
|
||||
Int VGOFF_(handle_esp_assignment) = INVALID_OFFSET;
|
||||
Int VGOFF_(fpu_write_check) = INVALID_OFFSET;
|
||||
Int VGOFF_(fpu_read_check) = INVALID_OFFSET;
|
||||
Int VGOFF_(helper_request_normal_exit) = INVALID_OFFSET;
|
||||
|
||||
|
||||
/* This is the actual defn of baseblock. */
|
||||
@ -305,14 +302,6 @@ static void vg_init_baseBlock ( void )
|
||||
= alloc_BaB_1_set( (Addr) & VG_(helper_DAS) );
|
||||
VGOFF_(helper_DAA)
|
||||
= alloc_BaB_1_set( (Addr) & VG_(helper_DAA) );
|
||||
|
||||
VGOFF_(helper_request_normal_exit)
|
||||
= alloc_BaB_1_set( (Addr) & VG_(helper_request_normal_exit) );
|
||||
|
||||
VGOFF_(helper_do_syscall)
|
||||
= alloc_BaB_1_set( (Addr) & VG_(helper_do_syscall) );
|
||||
VGOFF_(helper_do_client_request)
|
||||
= alloc_BaB_1_set( (Addr) & VG_(helper_do_client_request) );
|
||||
}
|
||||
|
||||
|
||||
@ -336,17 +325,6 @@ Addr VG_(esp_saved_over_syscall_d2);
|
||||
/* Counts downwards in vg_run_innerloop. */
|
||||
UInt VG_(dispatch_ctr);
|
||||
|
||||
/* If vg_dispatch_ctr is set to 1 to force a stop, its
|
||||
previous value is saved here. */
|
||||
UInt VG_(dispatch_ctr_SAVED);
|
||||
|
||||
/* This is why vg_run_innerloop() exited. */
|
||||
UInt VG_(interrupt_reason);
|
||||
|
||||
/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */
|
||||
jmp_buf VG_(toploop_jmpbuf);
|
||||
/* ... and if so, here's the signal which caused it to do so. */
|
||||
Int VG_(longjmpd_on_signal);
|
||||
|
||||
/* 64-bit counter for the number of basic blocks done. */
|
||||
ULong VG_(bbs_done);
|
||||
@ -423,10 +401,12 @@ UInt VG_(smc_discard_count) = 0;
|
||||
|
||||
|
||||
/* Counts pertaining to internal sanity checking. */
|
||||
|
||||
UInt VG_(sanity_fast_count) = 0;
|
||||
UInt VG_(sanity_slow_count) = 0;
|
||||
|
||||
/* Counts pertaining to the scheduler. */
|
||||
UInt VG_(num_scheduling_events_MINOR) = 0;
|
||||
UInt VG_(num_scheduling_events_MAJOR) = 0;
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
@ -481,176 +461,6 @@ Char** VG_(client_envp);
|
||||
static Char vg_cmdline_copy[M_VG_CMDLINE_STRLEN];
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Top level simulation loop.
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* Create a translation of the client basic block beginning at
|
||||
orig_addr, and add it to the translation cache & translation table.
|
||||
This probably doesn't really belong here, but, hey ... */
|
||||
void VG_(create_translation_for) ( Addr orig_addr )
|
||||
{
|
||||
Addr trans_addr;
|
||||
TTEntry tte;
|
||||
Int orig_size, trans_size;
|
||||
/* Ensure there is space to hold a translation. */
|
||||
VG_(maybe_do_lru_pass)();
|
||||
VG_(translate)( orig_addr, &orig_size, &trans_addr, &trans_size );
|
||||
/* Copy data at trans_addr into the translation cache.
|
||||
Returned pointer is to the code, not to the 4-byte
|
||||
header. */
|
||||
/* Since the .orig_size and .trans_size fields are
|
||||
UShort, be paranoid. */
|
||||
vg_assert(orig_size > 0 && orig_size < 65536);
|
||||
vg_assert(trans_size > 0 && trans_size < 65536);
|
||||
tte.orig_size = orig_size;
|
||||
tte.orig_addr = orig_addr;
|
||||
tte.trans_size = trans_size;
|
||||
tte.trans_addr = VG_(copy_to_transcache)
|
||||
( trans_addr, trans_size );
|
||||
tte.mru_epoch = VG_(current_epoch);
|
||||
/* Free the intermediary -- was allocated by VG_(emit_code). */
|
||||
VG_(jitfree)( (void*)trans_addr );
|
||||
/* Add to trans tab and set back pointer. */
|
||||
VG_(add_to_trans_tab) ( &tte );
|
||||
/* Update stats. */
|
||||
VG_(this_epoch_in_count) ++;
|
||||
VG_(this_epoch_in_osize) += orig_size;
|
||||
VG_(this_epoch_in_tsize) += trans_size;
|
||||
VG_(overall_in_count) ++;
|
||||
VG_(overall_in_osize) += orig_size;
|
||||
VG_(overall_in_tsize) += trans_size;
|
||||
/* Record translated area for SMC detection. */
|
||||
VG_(smc_mark_original) (
|
||||
VG_(baseBlock)[VGOFF_(m_eip)], orig_size );
|
||||
}
|
||||
|
||||
|
||||
/* Runs the client program from %EIP (baseBlock[off_eip]) until it
|
||||
asks to exit, or until vg_bbs_to_go jumps have happened (the latter
|
||||
case is for debugging). */
|
||||
|
||||
void VG_(toploop) ( void )
|
||||
{
|
||||
volatile UInt dispatch_ctr_SAVED;
|
||||
volatile Int done_this_time;
|
||||
|
||||
/* For the LRU structures, records when the epoch began. */
|
||||
volatile ULong epoch_started_at = 0;
|
||||
|
||||
while (True) {
|
||||
next_outer_loop:
|
||||
|
||||
/* Age the LRU structures if an epoch has been completed. */
|
||||
if (VG_(bbs_done) - epoch_started_at >= VG_BBS_PER_EPOCH) {
|
||||
VG_(current_epoch)++;
|
||||
epoch_started_at = VG_(bbs_done);
|
||||
if (VG_(clo_verbosity) > 2) {
|
||||
UInt tt_used, tc_used;
|
||||
VG_(get_tt_tc_used) ( &tt_used, &tc_used );
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"%lu bbs, in: %d (%d -> %d), out %d (%d -> %d), TT %d, TC %d",
|
||||
VG_(bbs_done),
|
||||
VG_(this_epoch_in_count),
|
||||
VG_(this_epoch_in_osize),
|
||||
VG_(this_epoch_in_tsize),
|
||||
VG_(this_epoch_out_count),
|
||||
VG_(this_epoch_out_osize),
|
||||
VG_(this_epoch_out_tsize),
|
||||
tt_used, tc_used
|
||||
);
|
||||
}
|
||||
VG_(this_epoch_in_count) = 0;
|
||||
VG_(this_epoch_in_osize) = 0;
|
||||
VG_(this_epoch_in_tsize) = 0;
|
||||
VG_(this_epoch_out_count) = 0;
|
||||
VG_(this_epoch_out_osize) = 0;
|
||||
VG_(this_epoch_out_tsize) = 0;
|
||||
}
|
||||
|
||||
/* Figure out how many bbs to ask vg_run_innerloop to do. */
|
||||
if (VG_(bbs_to_go) >= VG_SIGCHECK_INTERVAL)
|
||||
VG_(dispatch_ctr) = 1 + VG_SIGCHECK_INTERVAL;
|
||||
else
|
||||
VG_(dispatch_ctr) = 1 + (UInt)VG_(bbs_to_go);
|
||||
|
||||
/* ... and remember what we asked for. */
|
||||
dispatch_ctr_SAVED = VG_(dispatch_ctr);
|
||||
|
||||
/* Now have a go at doing them. */
|
||||
VG_(interrupt_reason) = VG_Y_SIGCHECK;
|
||||
if (__builtin_setjmp(VG_(toploop_jmpbuf)) == 0) {
|
||||
/* try this ... */
|
||||
VG_(run_innerloop)();
|
||||
/* We get here if the client didn't take a fault. */
|
||||
switch (VG_(interrupt_reason)) {
|
||||
case VG_Y_SIGCHECK:
|
||||
/* The counter fell to zero and no other situation has
|
||||
been detected. */
|
||||
vg_assert(VG_(dispatch_ctr) == 0);
|
||||
done_this_time = dispatch_ctr_SAVED - 1;
|
||||
VG_(bbs_to_go) -= (ULong)done_this_time;
|
||||
VG_(bbs_done) += (ULong)done_this_time;
|
||||
/* Exit if the debug run has ended. */
|
||||
if (VG_(bbs_to_go) == 0) goto debug_stop;
|
||||
VG_(deliver_signals)();
|
||||
VG_(do_sanity_checks)(False);
|
||||
goto next_outer_loop;
|
||||
case VG_Y_EXIT:
|
||||
/* The target program tried to exit. */
|
||||
done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr_SAVED);
|
||||
done_this_time --;
|
||||
VG_(bbs_to_go) -= (ULong)done_this_time;
|
||||
VG_(bbs_done) += (ULong)done_this_time;
|
||||
return;
|
||||
case VG_Y_SMC:
|
||||
/* A write to original code was detected. */
|
||||
done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr_SAVED);
|
||||
VG_(bbs_to_go) -= (ULong)done_this_time;
|
||||
VG_(bbs_done) += (ULong)done_this_time;
|
||||
VG_(flush_transtab)();
|
||||
goto next_outer_loop;
|
||||
case VG_Y_TRANSLATE: {
|
||||
/* Need to provide a translation of code at vg_m_eip. */
|
||||
done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr);
|
||||
vg_assert(done_this_time > 0);
|
||||
done_this_time --;
|
||||
VG_(bbs_to_go) -= (ULong)done_this_time;
|
||||
VG_(bbs_done) += (ULong)done_this_time;
|
||||
VG_(create_translation_for)(VG_(baseBlock)[VGOFF_(m_eip)]);
|
||||
goto next_outer_loop;
|
||||
}
|
||||
default:
|
||||
VG_(panic)("vg_toploop: invalid interrupt reason");
|
||||
}
|
||||
} else {
|
||||
/* We get here if the client took a fault, which caused our
|
||||
signal handler to longjmp. */
|
||||
done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr);
|
||||
VG_(bbs_to_go) -= (ULong)done_this_time;
|
||||
VG_(bbs_done) += (ULong)done_this_time;
|
||||
if (VG_(interrupt_reason) == VG_Y_EXIT) return;
|
||||
VG_(deliver_signals)();
|
||||
VG_(do_sanity_checks)(False);
|
||||
VG_(unblock_host_signal)(VG_(longjmpd_on_signal));
|
||||
}
|
||||
}
|
||||
|
||||
/* NOTREACHED */
|
||||
|
||||
debug_stop:
|
||||
/* If we exited because of a debug stop, print the translation
|
||||
of the last block executed -- by translating it again, and
|
||||
throwing away the result. */
|
||||
VG_(printf)(
|
||||
"======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n");
|
||||
VG_(translate)( VG_(baseBlock)[VGOFF_(m_eip)], NULL, NULL, NULL );
|
||||
VG_(printf)("\n");
|
||||
VG_(printf)(
|
||||
"======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n");
|
||||
}
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Processing of command-line options.
|
||||
------------------------------------------------------------------ */
|
||||
@ -705,7 +515,7 @@ static void process_cmd_line_options ( void )
|
||||
VG_(clo_optimise) = True;
|
||||
VG_(clo_instrument) = True;
|
||||
VG_(clo_cleanup) = True;
|
||||
VG_(clo_client_perms) = False;
|
||||
VG_(clo_client_perms) = True;
|
||||
VG_(clo_smc_check) = /* VG_CLO_SMC_SOME */ VG_CLO_SMC_NONE;
|
||||
VG_(clo_trace_syscalls) = False;
|
||||
VG_(clo_trace_signals) = False;
|
||||
@ -1014,6 +824,7 @@ static void process_cmd_line_options ( void )
|
||||
bad_option("--gdb-attach=yes and --trace-children=yes");
|
||||
}
|
||||
|
||||
#if 0
|
||||
if (VG_(clo_client_perms) && !VG_(clo_instrument)) {
|
||||
VG_(message)(Vg_UserMsg, "");
|
||||
VG_(message)(Vg_UserMsg,
|
||||
@ -1023,6 +834,7 @@ static void process_cmd_line_options ( void )
|
||||
|
||||
if (VG_(clo_client_perms))
|
||||
vg_assert(VG_(clo_instrument));
|
||||
#endif
|
||||
|
||||
VG_(clo_logfile_fd) = eventually_logfile_fd;
|
||||
|
||||
@ -1106,8 +918,9 @@ void VG_(copy_m_state_static_to_baseBlock) ( void )
|
||||
static void vg_show_counts ( void )
|
||||
{
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
" dispatch: %lu basic blocks, %d tt_fast misses.",
|
||||
VG_(bbs_done), VG_(tt_fast_misses));
|
||||
" lru: %d epochs, %d clearings.",
|
||||
VG_(current_epoch),
|
||||
VG_(number_of_lrus) );
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
"translate: new %d (%d -> %d), discard %d (%d -> %d).",
|
||||
VG_(overall_in_count),
|
||||
@ -1117,9 +930,10 @@ static void vg_show_counts ( void )
|
||||
VG_(overall_out_osize),
|
||||
VG_(overall_out_tsize) );
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
" lru: %d epochs, %d clearings.",
|
||||
VG_(current_epoch),
|
||||
VG_(number_of_lrus) );
|
||||
" dispatch: %lu basic blocks, %d/%d sched events, %d tt_fast misses.",
|
||||
VG_(bbs_done), VG_(num_scheduling_events_MAJOR),
|
||||
VG_(num_scheduling_events_MINOR),
|
||||
VG_(tt_fast_misses));
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
"reg-alloc: %d t-req-spill, "
|
||||
"%d+%d orig+spill uis, %d total-reg-r.",
|
||||
@ -1150,7 +964,8 @@ static void vg_show_counts ( void )
|
||||
|
||||
void VG_(main) ( void )
|
||||
{
|
||||
Int i;
|
||||
Int i;
|
||||
VgSchedReturnCode src;
|
||||
|
||||
/* Set up our stack sanity-check words. */
|
||||
for (i = 0; i < 10; i++) {
|
||||
@ -1211,11 +1026,18 @@ void VG_(main) ( void )
|
||||
VG_(message)(Vg_UserMsg, "");
|
||||
|
||||
VG_(bbs_to_go) = VG_(clo_stop_after);
|
||||
VG_(toploop)();
|
||||
|
||||
VG_(scheduler_init)();
|
||||
src = VG_(scheduler)();
|
||||
|
||||
if (VG_(clo_verbosity) > 0)
|
||||
VG_(message)(Vg_UserMsg, "");
|
||||
|
||||
if (src == VgSrc_Deadlock) {
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"Warning: pthread scheduler exited due to deadlock");
|
||||
}
|
||||
|
||||
if (VG_(clo_instrument)) {
|
||||
VG_(show_all_errors)();
|
||||
VG_(clientmalloc_done)();
|
||||
@ -1226,8 +1048,9 @@ void VG_(main) ( void )
|
||||
if (VG_(clo_leak_check)) VG_(detect_memory_leaks)();
|
||||
}
|
||||
VG_(running_on_simd_CPU) = False;
|
||||
|
||||
VG_(do_sanity_checks)(True /*include expensive checks*/ );
|
||||
|
||||
VG_(do_sanity_checks)( 0 /* root thread */,
|
||||
True /*include expensive checks*/ );
|
||||
|
||||
if (VG_(clo_verbosity) > 1)
|
||||
vg_show_counts();
|
||||
@ -1262,6 +1085,7 @@ void VG_(main) ( void )
|
||||
}
|
||||
|
||||
/* Prepare to restore state to the real CPU. */
|
||||
VG_(load_thread_state)(0);
|
||||
VG_(copy_baseBlock_to_m_state_static)();
|
||||
|
||||
/* This pushes a return address on the simulator's stack, which
|
||||
@ -1349,116 +1173,6 @@ extern void VG_(unimplemented) ( Char* msg )
|
||||
}
|
||||
|
||||
|
||||
/*-------------------------------------------------------------*/
|
||||
/*--- Replace some C lib things with equivs which don't get ---*/
|
||||
/*--- spurious value warnings. THEY RUN ON SIMD CPU! ---*/
|
||||
/*-------------------------------------------------------------*/
|
||||
|
||||
char* strrchr ( const char* s, int c )
|
||||
{
|
||||
UChar ch = (UChar)((UInt)c);
|
||||
UChar* p = (UChar*)s;
|
||||
UChar* last = NULL;
|
||||
while (True) {
|
||||
if (*p == ch) last = p;
|
||||
if (*p == 0) return last;
|
||||
p++;
|
||||
}
|
||||
}
|
||||
|
||||
char* strchr ( const char* s, int c )
|
||||
{
|
||||
UChar ch = (UChar)((UInt)c);
|
||||
UChar* p = (UChar*)s;
|
||||
while (True) {
|
||||
if (*p == ch) return p;
|
||||
if (*p == 0) return NULL;
|
||||
p++;
|
||||
}
|
||||
}
|
||||
|
||||
char* strcat ( char* dest, const char* src )
|
||||
{
|
||||
Char* dest_orig = dest;
|
||||
while (*dest) dest++;
|
||||
while (*src) *dest++ = *src++;
|
||||
*dest = 0;
|
||||
return dest_orig;
|
||||
}
|
||||
|
||||
unsigned int strlen ( const char* str )
|
||||
{
|
||||
UInt i = 0;
|
||||
while (str[i] != 0) i++;
|
||||
return i;
|
||||
}
|
||||
|
||||
char* strcpy ( char* dest, const char* src )
|
||||
{
|
||||
Char* dest_orig = dest;
|
||||
while (*src) *dest++ = *src++;
|
||||
*dest = 0;
|
||||
return dest_orig;
|
||||
}
|
||||
|
||||
int strncmp ( const char* s1, const char* s2, unsigned int nmax )
|
||||
{
|
||||
unsigned int n = 0;
|
||||
while (True) {
|
||||
if (n >= nmax) return 0;
|
||||
if (*s1 == 0 && *s2 == 0) return 0;
|
||||
if (*s1 == 0) return -1;
|
||||
if (*s2 == 0) return 1;
|
||||
|
||||
if (*(UChar*)s1 < *(UChar*)s2) return -1;
|
||||
if (*(UChar*)s1 > *(UChar*)s2) return 1;
|
||||
|
||||
s1++; s2++; n++;
|
||||
}
|
||||
}
|
||||
|
||||
int strcmp ( const char* s1, const char* s2 )
|
||||
{
|
||||
while (True) {
|
||||
if (*s1 == 0 && *s2 == 0) return 0;
|
||||
if (*s1 == 0) return -1;
|
||||
if (*s2 == 0) return 1;
|
||||
|
||||
if (*(char*)s1 < *(char*)s2) return -1;
|
||||
if (*(char*)s1 > *(char*)s2) return 1;
|
||||
|
||||
s1++; s2++;
|
||||
}
|
||||
}
|
||||
|
||||
void* memchr(const void *s, int c, unsigned int n)
|
||||
{
|
||||
unsigned int i;
|
||||
UChar c0 = (UChar)c;
|
||||
UChar* p = (UChar*)s;
|
||||
for (i = 0; i < n; i++)
|
||||
if (p[i] == c0) return (void*)(&p[i]);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void* memcpy( void *dst, const void *src, unsigned int len )
|
||||
{
|
||||
register char *d;
|
||||
register char *s;
|
||||
if ( dst > src ) {
|
||||
d = (char *)dst + len - 1;
|
||||
s = (char *)src + len - 1;
|
||||
while ( len-- )
|
||||
*d-- = *s--;
|
||||
} else if ( dst < src ) {
|
||||
d = (char *)dst;
|
||||
s = (char *)src;
|
||||
while ( len-- )
|
||||
*d++ = *s++;
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
|
||||
/*--------------------------------------------------------------------*/
|
||||
/*--- end vg_main.c ---*/
|
||||
/*--------------------------------------------------------------------*/
|
||||
|
||||
21
vg_memory.c
21
vg_memory.c
@ -2122,10 +2122,11 @@ Bool VG_(first_and_last_secondaries_look_plausible) ( void )
|
||||
/* A fast sanity check -- suitable for calling circa once per
|
||||
millisecond. */
|
||||
|
||||
void VG_(do_sanity_checks) ( Bool force_expensive )
|
||||
void VG_(do_sanity_checks) ( ThreadId tid, Bool force_expensive )
|
||||
{
|
||||
Int i;
|
||||
Bool do_expensive_checks;
|
||||
Int i;
|
||||
Bool do_expensive_checks;
|
||||
ThreadState* tst;
|
||||
|
||||
if (VG_(sanity_level) < 1) return;
|
||||
|
||||
@ -2133,6 +2134,9 @@ void VG_(do_sanity_checks) ( Bool force_expensive )
|
||||
|
||||
VG_(sanity_fast_count)++;
|
||||
|
||||
tst = VG_(get_thread_state)(tid);
|
||||
vg_assert(tst != NULL && tst->status != VgTs_Empty);
|
||||
|
||||
/* Check that we haven't overrun our private stack. */
|
||||
for (i = 0; i < 10; i++) {
|
||||
vg_assert(VG_(stack)[i]
|
||||
@ -2146,7 +2150,7 @@ void VG_(do_sanity_checks) ( Bool force_expensive )
|
||||
if (VG_(clo_instrument)) {
|
||||
|
||||
/* Check that the eflags tag is as expected. */
|
||||
UInt vv = VG_(baseBlock)[VGOFF_(sh_eflags)];
|
||||
UInt vv = tst->sh_eflags;
|
||||
vg_assert(vv == VGM_EFLAGS_VALID || VGM_EFLAGS_INVALID);
|
||||
|
||||
/* Check that nobody has spuriously claimed that the first or
|
||||
@ -2154,12 +2158,6 @@ void VG_(do_sanity_checks) ( Bool force_expensive )
|
||||
vg_assert(VG_(first_and_last_secondaries_look_plausible));
|
||||
}
|
||||
|
||||
# if 0
|
||||
if ( (VG_(baseBlock)[VGOFF_(sh_eflags)] & 1) == 1)
|
||||
VG_(printf)("UNDEF\n") ; else
|
||||
VG_(printf)("def\n") ;
|
||||
# endif
|
||||
|
||||
/* --- Now some more expensive checks. ---*/
|
||||
|
||||
/* Once every 25 times, check some more expensive stuff. */
|
||||
@ -2233,6 +2231,9 @@ static void uint_to_bits ( UInt x, Char* str )
|
||||
vg_assert(w == 36);
|
||||
}
|
||||
|
||||
/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
|
||||
state table. */
|
||||
|
||||
void VG_(show_reg_tags) ( void )
|
||||
{
|
||||
Char buf1[36];
|
||||
|
||||
51
vg_mylibc.c
51
vg_mylibc.c
@ -232,7 +232,7 @@ Int VG_(ksignal)(Int signum, void (*sighandler)(Int))
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
mmap/munmap, exit
|
||||
mmap/munmap, exit, fcntl
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* Returns -1 on failure. */
|
||||
@ -266,6 +266,43 @@ void VG_(exit)( Int status )
|
||||
vg_assert(2+2 == 5);
|
||||
}
|
||||
|
||||
/* Returns -1 on error. */
|
||||
Int VG_(fcntl) ( Int fd, Int cmd, Int arg )
|
||||
{
|
||||
Int res = vg_do_syscall3(__NR_fcntl, fd, cmd, arg);
|
||||
return VG_(is_kerror)(res) ? -1 : res;
|
||||
}
|
||||
|
||||
/* Returns -1 on error. */
|
||||
Int VG_(select)( Int n,
|
||||
vki_fd_set* readfds,
|
||||
vki_fd_set* writefds,
|
||||
vki_fd_set* exceptfds,
|
||||
struct vki_timeval * timeout )
|
||||
{
|
||||
Int res;
|
||||
UInt args[5];
|
||||
args[0] = n;
|
||||
args[1] = (UInt)readfds;
|
||||
args[2] = (UInt)writefds;
|
||||
args[3] = (UInt)exceptfds;
|
||||
args[4] = (UInt)timeout;
|
||||
res = vg_do_syscall1(__NR_select, (UInt)(&(args[0])) );
|
||||
return VG_(is_kerror)(res) ? -1 : res;
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Returns -1 on error, but 0 if ok or interrupted. */
|
||||
Int VG_(nanosleep)( const struct vki_timespec *req,
|
||||
struct vki_timespec *rem )
|
||||
{
|
||||
Int res;
|
||||
res = vg_do_syscall2(__NR_nanosleep, (UInt)req, (UInt)rem);
|
||||
if (res == -VKI_EINVAL) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
printf implementation. The key function, vg_vprintf(), emits chars
|
||||
into a caller-supplied function. Distantly derived from:
|
||||
@ -809,7 +846,6 @@ void VG_(assert_fail) ( Char* expr, Char* file, Int line, Char* fn )
|
||||
"valgrind", file, line, fn, expr );
|
||||
VG_(printf)("Please report this bug to me at: %s\n\n", EMAIL_ADDR);
|
||||
VG_(shutdown_logging)();
|
||||
/* vg_restore_SIGABRT(); */
|
||||
VG_(exit)(1);
|
||||
}
|
||||
|
||||
@ -819,7 +855,6 @@ void VG_(panic) ( Char* str )
|
||||
VG_(printf)("Basic block ctr is approximately %llu\n", VG_(bbs_done) );
|
||||
VG_(printf)("Please report this bug to me at: %s\n\n", EMAIL_ADDR);
|
||||
VG_(shutdown_logging)();
|
||||
/* vg_restore_SIGABRT(); */
|
||||
VG_(exit)(1);
|
||||
}
|
||||
|
||||
@ -900,6 +935,16 @@ Int VG_(getpid) ( void )
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Read a notional elapsed (wallclock-time) timer, giving a 64-bit
|
||||
microseconds count. */
|
||||
ULong VG_(read_microsecond_timer)( void )
|
||||
{
|
||||
Int res;
|
||||
struct vki_timeval tv;
|
||||
res = vg_do_syscall2(__NR_gettimeofday, (UInt)&tv, (UInt)NULL);
|
||||
vg_assert(!VG_(is_kerror)(res));
|
||||
return (1000000ULL * (ULong)(tv.tv_sec)) + (ULong)(tv.tv_usec);
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Primitive support for bagging memory via mmap.
|
||||
|
||||
425
vg_signals.c
425
vg_signals.c
@ -36,13 +36,6 @@
|
||||
#include "vg_unsafe.h"
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
An implementation of signal sets and other grunge, identical to
|
||||
that in the target kernels (Linux 2.2.X and 2.4.X).
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Signal state for this process.
|
||||
------------------------------------------------------------------ */
|
||||
@ -64,8 +57,29 @@ void* VG_(sighandler)[VKI_KNSIG];
|
||||
|
||||
void* VG_(sigpending)[VKI_KNSIG];
|
||||
|
||||
/* See decl in vg_include.h for explanation. */
|
||||
Int VG_(syscall_depth) = 0;
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Handy utilities to block/restore all host signals.
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* Block all host signals, dumping the old mask in *saved_mask. */
|
||||
void VG_(block_all_host_signals) ( /* OUT */ vki_ksigset_t* saved_mask )
|
||||
{
|
||||
Int ret;
|
||||
vki_ksigset_t block_procmask;
|
||||
VG_(ksigfillset)(&block_procmask);
|
||||
ret = VG_(ksigprocmask)
|
||||
(VKI_SIG_SETMASK, &block_procmask, saved_mask);
|
||||
vg_assert(ret == 0);
|
||||
}
|
||||
|
||||
/* Restore the blocking mask using the supplied saved one. */
|
||||
void VG_(restore_host_signals) ( /* IN */ vki_ksigset_t* saved_mask )
|
||||
{
|
||||
Int ret;
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
|
||||
vg_assert(ret == 0);
|
||||
}
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
@ -78,9 +92,14 @@ Int VG_(syscall_depth) = 0;
|
||||
|
||||
typedef
|
||||
struct {
|
||||
UInt retaddr; /* Sig handler's (bogus) return address */
|
||||
Int sigNo; /* The arg to the sig handler. */
|
||||
/* These are parameters to the signal handler. */
|
||||
UInt retaddr; /* Sig handler's (bogus) return address */
|
||||
Int sigNo; /* The arg to the sig handler. */
|
||||
Addr psigInfo; /* ptr to siginfo_t; NULL for now. */
|
||||
Addr puContext; /* ptr to ucontext; NULL for now. */
|
||||
/* Sanity check word. */
|
||||
UInt magicPI;
|
||||
/* Saved processor state. */
|
||||
UInt fpustate[VG_SIZE_OF_FPUSTATE_W];
|
||||
UInt eax;
|
||||
UInt ecx;
|
||||
@ -92,9 +111,14 @@ typedef
|
||||
UInt edi;
|
||||
Addr eip;
|
||||
UInt eflags;
|
||||
/* Scheduler-private stuff: what was the thread's status prior to
|
||||
delivering this signal? */
|
||||
ThreadStatus status;
|
||||
/* Sanity check word. Is the highest-addressed word; do not
|
||||
move!*/
|
||||
UInt magicE;
|
||||
}
|
||||
VgSigContext;
|
||||
VgSigFrame;
|
||||
|
||||
|
||||
|
||||
@ -113,35 +137,52 @@ void VG_(signalreturn_bogusRA) ( void )
|
||||
handler. This includes the signal number and a bogus return
|
||||
address. */
|
||||
static
|
||||
void vg_push_signal_frame ( int sigNo )
|
||||
void vg_push_signal_frame ( ThreadId tid, int sigNo )
|
||||
{
|
||||
Int i;
|
||||
UInt esp;
|
||||
VgSigContext sigctx;
|
||||
Addr esp;
|
||||
VgSigFrame* frame;
|
||||
ThreadState* tst;
|
||||
|
||||
tst = VG_(get_thread_state)(tid);
|
||||
esp = tst->m_esp;
|
||||
|
||||
esp -= sizeof(VgSigFrame);
|
||||
frame = (VgSigFrame*)esp;
|
||||
/* Assert that the frame is placed correctly. */
|
||||
vg_assert( (sizeof(VgSigFrame) & 0x3) == 0 );
|
||||
vg_assert( ((Char*)(&frame->magicE)) + sizeof(UInt)
|
||||
== ((Char*)(tst->m_esp)) );
|
||||
|
||||
frame->retaddr = (UInt)(&VG_(signalreturn_bogusRA));
|
||||
frame->sigNo = sigNo;
|
||||
frame->psigInfo = (Addr)NULL;
|
||||
frame->puContext = (Addr)NULL;
|
||||
frame->magicPI = 0x31415927;
|
||||
|
||||
for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
|
||||
sigctx.fpustate[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i];
|
||||
frame->fpustate[i] = tst->m_fpu[i];
|
||||
|
||||
sigctx.magicPI = 0x31415927;
|
||||
sigctx.magicE = 0x27182818;
|
||||
sigctx.eax = VG_(baseBlock)[VGOFF_(m_eax)];
|
||||
sigctx.ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
|
||||
sigctx.edx = VG_(baseBlock)[VGOFF_(m_edx)];
|
||||
sigctx.ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
|
||||
sigctx.ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
|
||||
sigctx.esp = VG_(baseBlock)[VGOFF_(m_esp)];
|
||||
sigctx.esi = VG_(baseBlock)[VGOFF_(m_esi)];
|
||||
sigctx.edi = VG_(baseBlock)[VGOFF_(m_edi)];
|
||||
sigctx.eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
|
||||
sigctx.eip = VG_(baseBlock)[VGOFF_(m_eip)];
|
||||
sigctx.retaddr = (UInt)(&VG_(signalreturn_bogusRA));
|
||||
sigctx.sigNo = sigNo;
|
||||
frame->eax = tst->m_eax;
|
||||
frame->ecx = tst->m_ecx;
|
||||
frame->edx = tst->m_edx;
|
||||
frame->ebx = tst->m_ebx;
|
||||
frame->ebp = tst->m_ebp;
|
||||
frame->esp = tst->m_esp;
|
||||
frame->esi = tst->m_esi;
|
||||
frame->edi = tst->m_edi;
|
||||
frame->eip = tst->m_eip;
|
||||
frame->eflags = tst->m_eflags;
|
||||
|
||||
esp = VG_(baseBlock)[VGOFF_(m_esp)];
|
||||
vg_assert((sizeof(VgSigContext) & 0x3) == 0);
|
||||
frame->status = tst->status;
|
||||
|
||||
esp -= sizeof(VgSigContext);
|
||||
for (i = 0; i < sizeof(VgSigContext)/4; i++)
|
||||
((UInt*)esp)[i] = ((UInt*)(&sigctx))[i];
|
||||
frame->magicE = 0x27182818;
|
||||
|
||||
/* Set the thread so it will next run the handler. */
|
||||
tst->m_esp = esp;
|
||||
tst->m_eip = (Addr)VG_(sigpending)[sigNo];
|
||||
/* This thread needs to be marked runnable, but we leave that the
|
||||
caller to do. */
|
||||
|
||||
/* Make sigNo and retaddr fields readable -- at 0(%ESP) and 4(%ESP) */
|
||||
if (VG_(clo_instrument)) {
|
||||
@ -149,11 +190,9 @@ void vg_push_signal_frame ( int sigNo )
|
||||
VGM_(make_readable) ( ((Addr)esp)+4 ,4 );
|
||||
}
|
||||
|
||||
VG_(baseBlock)[VGOFF_(m_esp)] = esp;
|
||||
VG_(baseBlock)[VGOFF_(m_eip)] = (Addr)VG_(sigpending)[sigNo];
|
||||
/*
|
||||
VG_(printf)("pushed signal frame; %%ESP now = %p, next %%EBP = %p\n",
|
||||
esp, VG_(baseBlock)[VGOFF_(m_eip)]);
|
||||
esp, tst->m_eip);
|
||||
*/
|
||||
}
|
||||
|
||||
@ -162,43 +201,56 @@ void vg_push_signal_frame ( int sigNo )
|
||||
simulated machine state, and return the signal number that the
|
||||
frame was for. */
|
||||
static
|
||||
Int vg_pop_signal_frame ( void )
|
||||
Int vg_pop_signal_frame ( ThreadId tid )
|
||||
{
|
||||
UInt esp;
|
||||
Addr esp;
|
||||
Int sigNo, i;
|
||||
VgSigContext* sigctx;
|
||||
/* esp is now pointing at the magicPI word on the stack, viz,
|
||||
eight bytes above the bottom of the vg_sigcontext.
|
||||
*/
|
||||
esp = VG_(baseBlock)[VGOFF_(m_esp)];
|
||||
sigctx = (VgSigContext*)(esp-4);
|
||||
VgSigFrame* frame;
|
||||
ThreadState* tst;
|
||||
|
||||
vg_assert(sigctx->magicPI == 0x31415927);
|
||||
vg_assert(sigctx->magicE == 0x27182818);
|
||||
tst = VG_(get_thread_state)(tid);
|
||||
|
||||
/* esp is now pointing at the sigNo field in the signal frame. */
|
||||
esp = tst->m_esp;
|
||||
frame = (VgSigFrame*)(esp-4);
|
||||
|
||||
vg_assert(frame->magicPI == 0x31415927);
|
||||
vg_assert(frame->magicE == 0x27182818);
|
||||
if (VG_(clo_trace_signals))
|
||||
VG_(message)(Vg_DebugMsg, "vg_pop_signal_frame: valid magic");
|
||||
|
||||
/* restore machine state */
|
||||
for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
|
||||
VG_(baseBlock)[VGOFF_(m_fpustate) + i] = sigctx->fpustate[i];
|
||||
tst->m_fpu[i] = frame->fpustate[i];
|
||||
|
||||
/* Mark the sigctx structure as nonaccessible. Has to happen
|
||||
_before_ vg_m_state.m_esp is given a new value.*/
|
||||
if (VG_(clo_instrument))
|
||||
VGM_(handle_esp_assignment) ( sigctx->esp );
|
||||
/* Mark the frame structure as nonaccessible. Has to happen
|
||||
_before_ vg_m_state.m_esp is given a new value.
|
||||
handle_esp_assignment reads %ESP from baseBlock, so we park it
|
||||
there first. Re-place the junk there afterwards. */
|
||||
if (VG_(clo_instrument)) {
|
||||
vg_assert(VG_(baseBlock)[VGOFF_(m_esp)] == 0xDEADBEEF);
|
||||
VG_(baseBlock)[VGOFF_(m_esp)] = tst->m_esp;
|
||||
VGM_(handle_esp_assignment) ( frame->esp );
|
||||
VG_(baseBlock)[VGOFF_(m_esp)] = 0xDEADBEEF;
|
||||
}
|
||||
|
||||
/* Restore machine state from the saved context. */
|
||||
VG_(baseBlock)[VGOFF_(m_eax)] = sigctx->eax;
|
||||
VG_(baseBlock)[VGOFF_(m_ecx)] = sigctx->ecx;
|
||||
VG_(baseBlock)[VGOFF_(m_edx)] = sigctx->edx;
|
||||
VG_(baseBlock)[VGOFF_(m_ebx)] = sigctx->ebx;
|
||||
VG_(baseBlock)[VGOFF_(m_ebp)] = sigctx->ebp;
|
||||
VG_(baseBlock)[VGOFF_(m_esp)] = sigctx->esp;
|
||||
VG_(baseBlock)[VGOFF_(m_esi)] = sigctx->esi;
|
||||
VG_(baseBlock)[VGOFF_(m_edi)] = sigctx->edi;
|
||||
VG_(baseBlock)[VGOFF_(m_eflags)] = sigctx->eflags;
|
||||
VG_(baseBlock)[VGOFF_(m_eip)] = sigctx->eip;
|
||||
sigNo = sigctx->sigNo;
|
||||
tst->m_eax = frame->eax;
|
||||
tst->m_ecx = frame->ecx;
|
||||
tst->m_edx = frame->edx;
|
||||
tst->m_ebx = frame->ebx;
|
||||
tst->m_ebp = frame->ebp;
|
||||
tst->m_esp = frame->esp;
|
||||
tst->m_esi = frame->esi;
|
||||
tst->m_edi = frame->edi;
|
||||
tst->m_eflags = frame->eflags;
|
||||
tst->m_eip = frame->eip;
|
||||
sigNo = frame->sigNo;
|
||||
|
||||
/* And restore the thread's status to what it was before the signal
|
||||
was delivered. */
|
||||
tst->status = frame->status;
|
||||
|
||||
return sigNo;
|
||||
}
|
||||
|
||||
@ -207,18 +259,17 @@ Int vg_pop_signal_frame ( void )
|
||||
VgSigContext and continue with whatever was going on before the
|
||||
handler ran. */
|
||||
|
||||
void VG_(signal_returns) ( void )
|
||||
void VG_(signal_returns) ( ThreadId tid )
|
||||
{
|
||||
Int sigNo, ret;
|
||||
vki_ksigset_t block_procmask;
|
||||
Int sigNo;
|
||||
vki_ksigset_t saved_procmask;
|
||||
|
||||
/* Block host signals ... */
|
||||
VG_(ksigfillset)(&block_procmask);
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask);
|
||||
vg_assert(ret == 0);
|
||||
VG_(block_all_host_signals)( &saved_procmask );
|
||||
|
||||
sigNo = vg_pop_signal_frame();
|
||||
/* Pop the signal frame and restore tid's status to what it was
|
||||
before the signal was delivered. */
|
||||
sigNo = vg_pop_signal_frame(tid);
|
||||
|
||||
/* You would have thought that the following assertion made sense
|
||||
here:
|
||||
@ -242,40 +293,18 @@ void VG_(signal_returns) ( void )
|
||||
VG_(sigpending)[sigNo] = VG_SIGIDLE;
|
||||
|
||||
/* Unlock and return. */
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL);
|
||||
vg_assert(ret == 0);
|
||||
VG_(restore_host_signals)( &saved_procmask );
|
||||
|
||||
/* The main dispatch loop now continues at vg_m_eip. */
|
||||
}
|
||||
|
||||
|
||||
/* Restore the default host behaviour of SIGABRT, and unblock it,
|
||||
so we can exit the simulator cleanly by doing exit/abort/assert fail.
|
||||
*/
|
||||
void VG_(restore_SIGABRT) ( void )
|
||||
{
|
||||
vki_ksigset_t set;
|
||||
vki_ksigaction act;
|
||||
act.ksa_flags = VKI_SA_RESTART;
|
||||
act.ksa_handler = VKI_SIG_DFL;
|
||||
VG_(ksigemptyset)(&act.ksa_mask);
|
||||
|
||||
VG_(ksigemptyset)(&set);
|
||||
VG_(ksigaddset)(&set,VKI_SIGABRT);
|
||||
|
||||
/* If this doesn't work, tough. Don't check return code. */
|
||||
VG_(ksigaction)(VKI_SIGABRT, &act, NULL);
|
||||
VG_(ksigprocmask)(VKI_SIG_UNBLOCK, &set, NULL);
|
||||
/* Scheduler now can resume this thread, or perhaps some other. */
|
||||
}
|
||||
|
||||
|
||||
/* Deliver all pending signals, by building stack frames for their
|
||||
handlers. */
|
||||
void VG_(deliver_signals) ( void )
|
||||
void VG_(deliver_signals) ( ThreadId tid )
|
||||
{
|
||||
vki_ksigset_t block_procmask;
|
||||
vki_ksigset_t saved_procmask;
|
||||
Int ret, sigNo;
|
||||
Int sigNo;
|
||||
Bool found;
|
||||
|
||||
/* A cheap check. We don't need to have exclusive access
|
||||
@ -295,10 +324,9 @@ void VG_(deliver_signals) ( void )
|
||||
blocking all the host's signals. That means vg_oursignalhandler
|
||||
can't run whilst we are messing with stuff.
|
||||
*/
|
||||
VG_(ksigfillset)(&block_procmask);
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask);
|
||||
vg_assert(ret == 0);
|
||||
VG_(block_all_host_signals)( &saved_procmask );
|
||||
|
||||
/* Look for signals to deliver ... */
|
||||
for (sigNo = 1; sigNo < VKI_KNSIG; sigNo++) {
|
||||
if (VG_(sigpending)[sigNo] == VG_SIGIDLE ||
|
||||
VG_(sigpending)[sigNo] == VG_SIGRUNNING) continue;
|
||||
@ -310,94 +338,19 @@ void VG_(deliver_signals) ( void )
|
||||
%EIP so that when execution continues, we will enter the
|
||||
signal handler with the frame on top of the client's stack,
|
||||
as it expects. */
|
||||
vg_push_signal_frame ( sigNo );
|
||||
|
||||
vg_push_signal_frame ( tid, sigNo );
|
||||
VG_(get_thread_state)(tid)->status = VgTs_Runnable;
|
||||
|
||||
/* Signify that the signal has been delivered. */
|
||||
VG_(sigpending)[sigNo] = VG_SIGRUNNING;
|
||||
}
|
||||
|
||||
/* Unlock and return. */
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL);
|
||||
vg_assert(ret == 0);
|
||||
VG_(restore_host_signals)( &saved_procmask );
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/* ----------- HACK ALERT ----------- */
|
||||
/* Note carefully that this runs with all host signals disabled! */
|
||||
static
|
||||
void vg_deliver_signal_immediately ( Int sigNo )
|
||||
{
|
||||
Int n_bbs_done;
|
||||
Int sigNo2;
|
||||
Addr next_orig_addr;
|
||||
Addr next_trans_addr;
|
||||
|
||||
if (VG_(clo_verbosity) > 0
|
||||
&& (True || VG_(clo_trace_signals)))
|
||||
VG_(message)(Vg_DebugExtraMsg,
|
||||
"deliver signal %d immediately: BEGIN", sigNo );
|
||||
/* VG_(printf)("resumption addr is %p\n",
|
||||
VG_(baseBlock)[VGOFF_(m_eip)]); */
|
||||
|
||||
vg_push_signal_frame ( sigNo );
|
||||
n_bbs_done = 0;
|
||||
|
||||
/* Single-step the client (ie, run the handler) until it jumps to
|
||||
VG_(signalreturn_bogusRA) */
|
||||
|
||||
while (True) {
|
||||
|
||||
if (n_bbs_done >= VG_MAX_BBS_IN_IMMEDIATE_SIGNAL)
|
||||
VG_(unimplemented)(
|
||||
"handling signal whilst client blocked in syscall: "
|
||||
"handler runs too long"
|
||||
);
|
||||
|
||||
next_orig_addr = VG_(baseBlock)[VGOFF_(m_eip)];
|
||||
|
||||
if (next_orig_addr == (Addr)(&VG_(trap_here)))
|
||||
VG_(unimplemented)(
|
||||
"handling signal whilst client blocked in syscall: "
|
||||
"handler calls malloc (et al)"
|
||||
);
|
||||
|
||||
/* VG_(printf)("next orig addr = %p\n", next_orig_addr); */
|
||||
if (next_orig_addr == (Addr)(&VG_(signalreturn_bogusRA)))
|
||||
break;
|
||||
|
||||
next_trans_addr = VG_(search_transtab) ( next_orig_addr );
|
||||
if (next_trans_addr == (Addr)NULL) {
|
||||
VG_(create_translation_for) ( next_orig_addr );
|
||||
next_trans_addr = VG_(search_transtab) ( next_orig_addr );
|
||||
}
|
||||
|
||||
vg_assert(next_trans_addr != (Addr)NULL);
|
||||
next_orig_addr = VG_(run_singleton_translation)(next_trans_addr);
|
||||
VG_(baseBlock)[VGOFF_(m_eip)] = next_orig_addr;
|
||||
n_bbs_done++;
|
||||
}
|
||||
|
||||
sigNo2 = vg_pop_signal_frame();
|
||||
vg_assert(sigNo2 == sigNo);
|
||||
|
||||
if (VG_(clo_verbosity) > 0
|
||||
&& (True || VG_(clo_trace_signals)))
|
||||
VG_(message)(Vg_DebugExtraMsg,
|
||||
"deliver signal %d immediately: END, %d bbs done",
|
||||
sigNo, n_bbs_done );
|
||||
|
||||
/* Invalidate the tt_fast cache. We've been (potentially) adding
|
||||
translations and even possibly doing LRUs without keeping it up
|
||||
to date, so we'd better nuke it before going any further, to
|
||||
avoid inconsistencies with the main TT/TC structure. */
|
||||
VG_(invalidate_tt_fast)();
|
||||
}
|
||||
|
||||
|
||||
/* ----------- end of HACK ALERT ----------- */
|
||||
|
||||
|
||||
/* Receive a signal from the host, and either discard it or park it in
|
||||
the queue of pending signals. All other signals will be blocked
|
||||
when this handler runs. Runs with all host signals blocked, so as
|
||||
@ -405,8 +358,7 @@ void vg_deliver_signal_immediately ( Int sigNo )
|
||||
|
||||
static void VG_(oursignalhandler) ( Int sigNo )
|
||||
{
|
||||
Int ret;
|
||||
vki_ksigset_t block_procmask;
|
||||
Int dummy_local;
|
||||
vki_ksigset_t saved_procmask;
|
||||
|
||||
if (VG_(clo_trace_signals)) {
|
||||
@ -418,20 +370,24 @@ static void VG_(oursignalhandler) ( Int sigNo )
|
||||
/* Sanity check. Ensure we're really running on the signal stack
|
||||
we asked for. */
|
||||
if ( !(
|
||||
((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&ret))
|
||||
((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&dummy_local))
|
||||
&&
|
||||
((Char*)(&ret) < (Char*)(&(VG_(sigstack)[10000])))
|
||||
((Char*)(&dummy_local) < (Char*)(&(VG_(sigstack)[10000])))
|
||||
)
|
||||
) {
|
||||
VG_(message)(Vg_DebugMsg, "FATAL: signal delivered on the wrong stack?!");
|
||||
VG_(message)(Vg_DebugMsg, "A possible workaround follows. Please tell me");
|
||||
VG_(message)(Vg_DebugMsg, "(jseward@acm.org) if the suggested workaround doesn't help.");
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
"FATAL: signal delivered on the wrong stack?!");
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
"A possible workaround follows. Please tell me");
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
"(jseward@acm.org) if the suggested workaround doesn't help.");
|
||||
VG_(unimplemented)
|
||||
("support for progs compiled with -p/-pg; rebuild your prog without -p/-pg");
|
||||
("support for progs compiled with -p/-pg; "
|
||||
"rebuild your prog without -p/-pg");
|
||||
}
|
||||
|
||||
vg_assert((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&ret));
|
||||
vg_assert((Char*)(&ret) < (Char*)(&(VG_(sigstack)[10000])));
|
||||
vg_assert((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&dummy_local));
|
||||
vg_assert((Char*)(&dummy_local) < (Char*)(&(VG_(sigstack)[10000])));
|
||||
|
||||
if (sigNo == VKI_SIGABRT && VG_(sighandler)[sigNo] == NULL) {
|
||||
/* We get here if SIGABRT is delivered and the client hasn't
|
||||
@ -442,21 +398,19 @@ static void VG_(oursignalhandler) ( Int sigNo )
|
||||
VG_(end_msg)();
|
||||
}
|
||||
VG_(ksignal)(VKI_SIGABRT, VKI_SIG_DFL);
|
||||
VG_(interrupt_reason) = VG_Y_EXIT;
|
||||
VG_(longjmpd_on_signal) = VKI_SIGABRT;
|
||||
__builtin_longjmp(VG_(toploop_jmpbuf),1);
|
||||
__builtin_longjmp(VG_(scheduler_jmpbuf),1);
|
||||
}
|
||||
|
||||
/* Block all host signals. */
|
||||
VG_(ksigfillset)(&block_procmask);
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask);
|
||||
vg_assert(ret == 0);
|
||||
VG_(block_all_host_signals)( &saved_procmask );
|
||||
|
||||
if (VG_(sighandler)[sigNo] == NULL) {
|
||||
if (VG_(clo_trace_signals)) {
|
||||
VG_(add_to_msg)("unexpected!");
|
||||
VG_(end_msg)();
|
||||
}
|
||||
/* Note: we panic with all signals blocked here. Don't think
|
||||
that matters. */
|
||||
VG_(panic)("vg_oursignalhandler: unexpected signal");
|
||||
}
|
||||
|
||||
@ -478,47 +432,26 @@ static void VG_(oursignalhandler) ( Int sigNo )
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Ok, we'd better deliver it to the client, one way or another. */
|
||||
/* Ok, we'd better deliver it to the client. */
|
||||
vg_assert(VG_(sigpending)[sigNo] == VG_SIGIDLE);
|
||||
|
||||
if (VG_(syscall_depth) == 0) {
|
||||
/* The usual case; delivering a signal to the client, and the
|
||||
client is not currently in a syscall. Queue it up for
|
||||
delivery at some point in the future. */
|
||||
VG_(sigpending)[sigNo] = VG_(sighandler)[sigNo];
|
||||
if (VG_(clo_trace_signals)) {
|
||||
VG_(add_to_msg)("queued" );
|
||||
VG_(end_msg)();
|
||||
}
|
||||
} else {
|
||||
/* The nasty case, which was causing kmail to freeze up: the
|
||||
client is (presumably blocked) in a syscall. We have to
|
||||
deliver the signal right now, because it may be that
|
||||
running the sighandler is the only way that the syscall
|
||||
will be able to return. In which case, if we don't do
|
||||
that, the client will deadlock. */
|
||||
if (VG_(clo_trace_signals)) {
|
||||
VG_(add_to_msg)("delivering immediately" );
|
||||
VG_(end_msg)();
|
||||
}
|
||||
/* Note that this runs with all host signals blocked. */
|
||||
VG_(sigpending)[sigNo] = VG_(sighandler)[sigNo];
|
||||
vg_deliver_signal_immediately(sigNo);
|
||||
VG_(sigpending)[sigNo] = VG_SIGIDLE;
|
||||
/* VG_(printf)("resuming at %p\n", VG_(baseBlock)[VGOFF_(m_eip)]); */
|
||||
/* Queue it up for delivery at some point in the future. */
|
||||
VG_(sigpending)[sigNo] = VG_(sighandler)[sigNo];
|
||||
if (VG_(clo_trace_signals)) {
|
||||
VG_(add_to_msg)("queued" );
|
||||
VG_(end_msg)();
|
||||
}
|
||||
}
|
||||
|
||||
/* We've finished messing with the queue, so re-enable host signals. */
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL);
|
||||
/* We've finished messing with the queue, so re-enable host
|
||||
signals. */
|
||||
VG_(restore_host_signals)( &saved_procmask );
|
||||
|
||||
vg_assert(ret == 0);
|
||||
if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS
|
||||
|| sigNo == VKI_SIGFPE || sigNo == VKI_SIGILL) {
|
||||
/* Can't continue; must longjmp and thus enter the sighandler
|
||||
immediately. */
|
||||
/* Can't continue; must longjmp back to the scheduler and thus
|
||||
enter the sighandler immediately. */
|
||||
VG_(longjmpd_on_signal) = sigNo;
|
||||
__builtin_longjmp(VG_(toploop_jmpbuf),1);
|
||||
__builtin_longjmp(VG_(scheduler_jmpbuf),1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -559,17 +492,14 @@ void VG_(sigstartup_actions) ( void )
|
||||
{
|
||||
Int i, ret;
|
||||
|
||||
vki_ksigset_t block_procmask;
|
||||
vki_ksigset_t saved_procmask;
|
||||
vki_kstack_t altstack_info;
|
||||
vki_ksigaction sa;
|
||||
|
||||
/* VG_(printf)("SIGSTARTUP\n"); */
|
||||
/* VG_(printf)("SIGSTARTUP\n"); */
|
||||
/* Block all signals.
|
||||
saved_procmask remembers the previous mask. */
|
||||
VG_(ksigfillset)(&block_procmask);
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask);
|
||||
vg_assert(ret == 0);
|
||||
VG_(block_all_host_signals)( &saved_procmask );
|
||||
|
||||
/* Register an alternative stack for our own signal handler to run
|
||||
on. */
|
||||
@ -615,8 +545,7 @@ void VG_(sigstartup_actions) ( void )
|
||||
VG_(ksignal)(VKI_SIGABRT, &VG_(oursignalhandler));
|
||||
|
||||
/* Finally, restore the blocking mask. */
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL);
|
||||
vg_assert(ret == 0);
|
||||
VG_(restore_host_signals)( &saved_procmask );
|
||||
}
|
||||
|
||||
|
||||
@ -635,14 +564,10 @@ void VG_(sigshutdown_actions) ( void )
|
||||
{
|
||||
Int i, ret;
|
||||
|
||||
vki_ksigset_t block_procmask;
|
||||
vki_ksigset_t saved_procmask;
|
||||
vki_ksigaction sa;
|
||||
|
||||
/* Block all signals. */
|
||||
VG_(ksigfillset)(&block_procmask);
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask);
|
||||
vg_assert(ret == 0);
|
||||
VG_(block_all_host_signals)( &saved_procmask );
|
||||
|
||||
/* copy the sim signal actions to the real ones. */
|
||||
for (i = 1; i < VKI_KNSIG; i++) {
|
||||
@ -654,9 +579,7 @@ void VG_(sigshutdown_actions) ( void )
|
||||
ret = VG_(ksigaction)(i, &sa, NULL);
|
||||
}
|
||||
|
||||
/* Finally, copy the simulated process mask to the real one. */
|
||||
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL);
|
||||
vg_assert(ret == 0);
|
||||
VG_(restore_host_signals)( &saved_procmask );
|
||||
}
|
||||
|
||||
|
||||
@ -665,18 +588,16 @@ void VG_(sigshutdown_actions) ( void )
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* Do more error checking? */
|
||||
void VG_(do__NR_sigaction) ( void )
|
||||
void VG_(do__NR_sigaction) ( ThreadId tid )
|
||||
{
|
||||
UInt res;
|
||||
void* our_old_handler;
|
||||
vki_ksigaction* new_action;
|
||||
vki_ksigaction* old_action;
|
||||
UInt param1
|
||||
= VG_(baseBlock)[VGOFF_(m_ebx)]; /* int sigNo */
|
||||
UInt param2
|
||||
= VG_(baseBlock)[VGOFF_(m_ecx)]; /* k_sigaction* new_action */
|
||||
UInt param3
|
||||
= VG_(baseBlock)[VGOFF_(m_edx)]; /* k_sigaction* old_action */
|
||||
ThreadState* tst = VG_(get_thread_state)( tid );
|
||||
UInt param1 = tst->m_ebx; /* int sigNo */
|
||||
UInt param2 = tst->m_ecx; /* k_sigaction* new_action */
|
||||
UInt param3 = tst->m_edx; /* k_sigaction* old_action */
|
||||
new_action = (vki_ksigaction*)param2;
|
||||
old_action = (vki_ksigaction*)param3;
|
||||
|
||||
@ -722,7 +643,7 @@ void VG_(do__NR_sigaction) ( void )
|
||||
}
|
||||
}
|
||||
|
||||
KERNEL_DO_SYSCALL(res);
|
||||
KERNEL_DO_SYSCALL(tid,res);
|
||||
/* VG_(printf)("RES = %d\n", res); */
|
||||
|
||||
/* If the client asks for the old handler, maintain our fiction
|
||||
@ -750,7 +671,7 @@ void VG_(do__NR_sigaction) ( void )
|
||||
goto good;
|
||||
|
||||
good:
|
||||
VG_(baseBlock)[VGOFF_(m_eax)] = (UInt)0;
|
||||
tst->m_eax = (UInt)0;
|
||||
return;
|
||||
|
||||
bad_signo:
|
||||
|
||||
74
vg_syscall.S
74
vg_syscall.S
@ -41,10 +41,6 @@
|
||||
# m_state_static, and back afterwards.
|
||||
|
||||
VG_(do_syscall):
|
||||
cmpl $2, VG_(syscall_depth)
|
||||
jz do_syscall_DEPTH_2
|
||||
|
||||
# depth 1 copy follows ...
|
||||
# Save all the int registers of the real machines state on the
|
||||
# simulators stack.
|
||||
pushal
|
||||
@ -104,76 +100,6 @@ VG_(do_syscall):
|
||||
|
||||
ret
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
do_syscall_DEPTH_2:
|
||||
|
||||
# depth 2 copy follows ...
|
||||
# Save all the int registers of the real machines state on the
|
||||
# simulators stack.
|
||||
pushal
|
||||
|
||||
# and save the real FPU state too
|
||||
fwait
|
||||
fnsave VG_(real_fpu_state_saved_over_syscall_d2)
|
||||
frstor VG_(real_fpu_state_saved_over_syscall_d2)
|
||||
|
||||
# remember what the simulators stack pointer is
|
||||
movl %esp, VG_(esp_saved_over_syscall_d2)
|
||||
|
||||
# Now copy the simulated machines state into the real one
|
||||
# esp still refers to the simulators stack
|
||||
frstor VG_(m_state_static)+40
|
||||
movl VG_(m_state_static)+32, %eax
|
||||
pushl %eax
|
||||
popfl
|
||||
movl VG_(m_state_static)+0, %eax
|
||||
movl VG_(m_state_static)+4, %ecx
|
||||
movl VG_(m_state_static)+8, %edx
|
||||
movl VG_(m_state_static)+12, %ebx
|
||||
movl VG_(m_state_static)+16, %esp
|
||||
movl VG_(m_state_static)+20, %ebp
|
||||
movl VG_(m_state_static)+24, %esi
|
||||
movl VG_(m_state_static)+28, %edi
|
||||
|
||||
# esp now refers to the simulatees stack
|
||||
# Do the actual system call
|
||||
int $0x80
|
||||
|
||||
# restore stack as soon as possible
|
||||
# esp refers to simulatees stack
|
||||
movl %esp, VG_(m_state_static)+16
|
||||
movl VG_(esp_saved_over_syscall_d2), %esp
|
||||
# esp refers to simulators stack
|
||||
|
||||
# ... and undo everything else.
|
||||
# Copy real state back to simulated state.
|
||||
movl %eax, VG_(m_state_static)+0
|
||||
movl %ecx, VG_(m_state_static)+4
|
||||
movl %edx, VG_(m_state_static)+8
|
||||
movl %ebx, VG_(m_state_static)+12
|
||||
movl %ebp, VG_(m_state_static)+20
|
||||
movl %esi, VG_(m_state_static)+24
|
||||
movl %edi, VG_(m_state_static)+28
|
||||
pushfl
|
||||
popl %eax
|
||||
movl %eax, VG_(m_state_static)+32
|
||||
fwait
|
||||
fnsave VG_(m_state_static)+40
|
||||
frstor VG_(m_state_static)+40
|
||||
|
||||
# Restore the state of the simulator
|
||||
frstor VG_(real_fpu_state_saved_over_syscall_d2)
|
||||
popal
|
||||
|
||||
ret
|
||||
|
||||
|
||||
##--------------------------------------------------------------------##
|
||||
##--- end vg_syscall.S ---##
|
||||
##--------------------------------------------------------------------##
|
||||
|
||||
574
vg_syscall_mem.c
574
vg_syscall_mem.c
File diff suppressed because it is too large
Load Diff
@ -1607,7 +1607,7 @@ Addr dis_Grp5 ( UCodeBlock* cb, Int sz, Addr eip, Bool* isEnd )
|
||||
SMC_IF_ALL(cb);
|
||||
uInstr1(cb, JMP, 0, TempReg, t1);
|
||||
uCond(cb, CondAlways);
|
||||
LAST_UINSTR(cb).call_dispatch = True;
|
||||
LAST_UINSTR(cb).jmpkind = JmpCall;
|
||||
*isEnd = True;
|
||||
break;
|
||||
case 4: /* jmp Ev */
|
||||
@ -1654,7 +1654,7 @@ Addr dis_Grp5 ( UCodeBlock* cb, Int sz, Addr eip, Bool* isEnd )
|
||||
SMC_IF_ALL(cb);
|
||||
uInstr1(cb, JMP, 0, TempReg, t1);
|
||||
uCond(cb, CondAlways);
|
||||
LAST_UINSTR(cb).call_dispatch = True;
|
||||
LAST_UINSTR(cb).jmpkind = JmpCall;
|
||||
*isEnd = True;
|
||||
break;
|
||||
case 4: /* JMP Ev */
|
||||
@ -2859,32 +2859,6 @@ Addr dis_xadd_G_E ( UCodeBlock* cb,
|
||||
}
|
||||
|
||||
|
||||
/* Push %ECX, %EBX and %EAX, call helper_do_client_request, and put
|
||||
the resulting %EAX value back. */
|
||||
static
|
||||
void dis_ClientRequest ( UCodeBlock* cb )
|
||||
{
|
||||
Int tmpc = newTemp(cb);
|
||||
Int tmpb = newTemp(cb);
|
||||
Int tmpa = newTemp(cb);
|
||||
uInstr2(cb, GET, 4, ArchReg, R_ECX, TempReg, tmpc);
|
||||
uInstr2(cb, GET, 4, ArchReg, R_EBX, TempReg, tmpb);
|
||||
uInstr2(cb, GET, 4, ArchReg, R_EAX, TempReg, tmpa);
|
||||
uInstr0(cb, CALLM_S, 0);
|
||||
uInstr1(cb, PUSH, 4, TempReg, tmpc);
|
||||
uInstr1(cb, PUSH, 4, TempReg, tmpb);
|
||||
uInstr1(cb, PUSH, 4, TempReg, tmpa);
|
||||
uInstr1(cb, CALLM, 0, Lit16, VGOFF_(helper_do_client_request));
|
||||
uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty);
|
||||
uInstr1(cb, POP, 4, TempReg, tmpa);
|
||||
uInstr1(cb, CLEAR, 0, Lit16, 8);
|
||||
uInstr0(cb, CALLM_E, 0);
|
||||
uInstr2(cb, PUT, 4, TempReg, tmpa, ArchReg, R_EAX);
|
||||
if (dis)
|
||||
VG_(printf)("%%eax = client_request ( %%eax, %%ebx, %%ecx )\n");
|
||||
}
|
||||
|
||||
|
||||
/*------------------------------------------------------------*/
|
||||
/*--- Disassembling entire basic blocks ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
@ -2909,21 +2883,31 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd )
|
||||
if (dis) VG_(printf)("\t0x%x: ", eip);
|
||||
|
||||
/* Spot the client-request magic sequence, if required. */
|
||||
if (VG_(clo_client_perms)) {
|
||||
if (1 /*VG_(clo_client_perms)*/) {
|
||||
UChar* myeip = (UChar*)eip;
|
||||
/* Spot this:
|
||||
C1C01D roll $29, %eax
|
||||
C1C003 roll $3, %eax
|
||||
C1C01B roll $27, %eax
|
||||
C1C005 roll $5, %eax
|
||||
C1C81B rorl $27, %eax
|
||||
C1C805 rorl $5, %eax
|
||||
C1C00D roll $13, %eax
|
||||
C1C013 roll $19, %eax
|
||||
*/
|
||||
if (myeip[0] == 0xC1 && myeip[1] == 0xC0 && myeip[2] == 0x1D &&
|
||||
myeip[3] == 0xC1 && myeip[4] == 0xC0 && myeip[5] == 0x03 &&
|
||||
myeip[6] == 0xC1 && myeip[7] == 0xC0 && myeip[8] == 0x1B &&
|
||||
myeip[9] == 0xC1 && myeip[10] == 0xC0 && myeip[11] == 0x05) {
|
||||
vg_assert(VG_(clo_instrument));
|
||||
dis_ClientRequest(cb);
|
||||
eip += 12;
|
||||
if (myeip[ 0] == 0xC1 && myeip[ 1] == 0xC0 && myeip[ 2] == 0x1D &&
|
||||
myeip[ 3] == 0xC1 && myeip[ 4] == 0xC0 && myeip[ 5] == 0x03 &&
|
||||
myeip[ 6] == 0xC1 && myeip[ 7] == 0xC8 && myeip[ 8] == 0x1B &&
|
||||
myeip[ 9] == 0xC1 && myeip[10] == 0xC8 && myeip[11] == 0x05 &&
|
||||
myeip[12] == 0xC1 && myeip[13] == 0xC0 && myeip[14] == 0x0D &&
|
||||
myeip[15] == 0xC1 && myeip[16] == 0xC0 && myeip[17] == 0x13
|
||||
) {
|
||||
eip += 18;
|
||||
uInstr1(cb, JMP, 0, Literal, 0);
|
||||
uLiteral(cb, eip);
|
||||
uCond(cb, CondAlways);
|
||||
LAST_UINSTR(cb).jmpkind = JmpClientReq;
|
||||
*isEnd = True;
|
||||
if (dis)
|
||||
VG_(printf)("%%edx = client_request ( %%eax )\n");
|
||||
return eip;
|
||||
}
|
||||
}
|
||||
@ -2978,9 +2962,7 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd )
|
||||
uInstr2(cb, PUT, 4, TempReg, t1, ArchReg, R_ESP);
|
||||
uInstr1(cb, JMP, 0, TempReg, t2);
|
||||
uCond(cb, CondAlways);
|
||||
|
||||
if (d32 == 0)
|
||||
LAST_UINSTR(cb).ret_dispatch = True;
|
||||
LAST_UINSTR(cb).jmpkind = JmpRet;
|
||||
|
||||
*isEnd = True;
|
||||
if (dis) {
|
||||
@ -2992,22 +2974,6 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd )
|
||||
case 0xE8: /* CALL J4 */
|
||||
d32 = getUDisp32(eip); eip += 4;
|
||||
d32 += eip; /* eip now holds return-to addr, d32 is call-to addr */
|
||||
if (d32 == (Addr)&VG_(shutdown)) {
|
||||
/* Set vg_dispatch_ctr to 1, vg_interrupt_reason to VG_Y_EXIT,
|
||||
and get back to the dispatch loop. We ask for a jump to this
|
||||
CALL insn because vg_dispatch will ultimately transfer control
|
||||
to the real CPU, and we want this call to be the first insn
|
||||
it does. */
|
||||
uInstr0(cb, CALLM_S, 0);
|
||||
uInstr1(cb, CALLM, 0, Lit16, VGOFF_(helper_request_normal_exit));
|
||||
uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty);
|
||||
uInstr0(cb, CALLM_E, 0);
|
||||
uInstr1(cb, JMP, 0, Literal, 0);
|
||||
uLiteral(cb, eip-5);
|
||||
uCond(cb, CondAlways);
|
||||
*isEnd = True;
|
||||
if (dis) VG_(printf)("call 0x%x\n",d32);
|
||||
} else
|
||||
if (d32 == eip && getUChar(eip) >= 0x58
|
||||
&& getUChar(eip) <= 0x5F) {
|
||||
/* Specially treat the position-independent-code idiom
|
||||
@ -3040,7 +3006,7 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd )
|
||||
uInstr1(cb, JMP, 0, Literal, 0);
|
||||
uLiteral(cb, d32);
|
||||
uCond(cb, CondAlways);
|
||||
LAST_UINSTR(cb).call_dispatch = True;
|
||||
LAST_UINSTR(cb).jmpkind = JmpCall;
|
||||
*isEnd = True;
|
||||
if (dis) VG_(printf)("call 0x%x\n",d32);
|
||||
}
|
||||
@ -3179,14 +3145,10 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd )
|
||||
/* It's important that all ArchRegs carry their up-to-date value
|
||||
at this point. So we declare an end-of-block here, which
|
||||
forces any TempRegs caching ArchRegs to be flushed. */
|
||||
t1 = newTemp(cb);
|
||||
uInstr0(cb, CALLM_S, 0);
|
||||
uInstr1(cb, CALLM, 0, Lit16, VGOFF_(helper_do_syscall) );
|
||||
uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty);
|
||||
uInstr0(cb, CALLM_E, 0);
|
||||
uInstr1(cb, JMP, 0, Literal, 0);
|
||||
uLiteral(cb, eip);
|
||||
uCond(cb, CondAlways);
|
||||
LAST_UINSTR(cb).jmpkind = JmpSyscall;
|
||||
*isEnd = True;
|
||||
if (dis) VG_(printf)("int $0x80\n");
|
||||
break;
|
||||
|
||||
@ -153,8 +153,8 @@ void VG_(emptyUInstr) ( UInstr* u )
|
||||
u->val1 = u->val2 = u->val3 = 0;
|
||||
u->tag1 = u->tag2 = u->tag3 = NoValue;
|
||||
u->flags_r = u->flags_w = FlagsEmpty;
|
||||
u->call_dispatch = False;
|
||||
u->smc_check = u->signed_widen = u->ret_dispatch = False;
|
||||
u->jmpkind = JmpBoring;
|
||||
u->smc_check = u->signed_widen = False;
|
||||
u->lit32 = 0;
|
||||
u->opcode = 0;
|
||||
u->size = 0;
|
||||
@ -259,8 +259,7 @@ void copyAuxInfoFromTo ( UInstr* src, UInstr* dst )
|
||||
dst->extra4b = src->extra4b;
|
||||
dst->smc_check = src->smc_check;
|
||||
dst->signed_widen = src->signed_widen;
|
||||
dst->ret_dispatch = src->ret_dispatch;
|
||||
dst->call_dispatch = src->call_dispatch;
|
||||
dst->jmpkind = src->jmpkind;
|
||||
dst->flags_r = src->flags_r;
|
||||
dst->flags_w = src->flags_w;
|
||||
}
|
||||
@ -917,10 +916,15 @@ void VG_(ppUInstr) ( Int instrNo, UInstr* u )
|
||||
|
||||
case JMP: case CC2VAL:
|
||||
case PUSH: case POP: case CLEAR: case CALLM:
|
||||
if (u->opcode == JMP && u->ret_dispatch)
|
||||
VG_(printf)("-r");
|
||||
if (u->opcode == JMP && u->call_dispatch)
|
||||
VG_(printf)("-c");
|
||||
if (u->opcode == JMP) {
|
||||
switch (u->jmpkind) {
|
||||
case JmpCall: VG_(printf)("-c"); break;
|
||||
case JmpRet: VG_(printf)("-r"); break;
|
||||
case JmpSyscall: VG_(printf)("-sys"); break;
|
||||
case JmpClientReq: VG_(printf)("-cli"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
VG_(printf)("\t");
|
||||
ppUOperand(u, 1, u->size, False);
|
||||
break;
|
||||
|
||||
@ -533,9 +533,9 @@ void VG_(smc_check4) ( Addr a )
|
||||
|
||||
/* Force an exit before the next basic block, so the translation
|
||||
cache can be flushed appropriately. */
|
||||
VG_(dispatch_ctr_SAVED) = VG_(dispatch_ctr);
|
||||
VG_(dispatch_ctr) = 1;
|
||||
VG_(interrupt_reason) = VG_Y_SMC;
|
||||
// VG_(dispatch_ctr_SAVED) = VG_(dispatch_ctr);
|
||||
//VG_(dispatch_ctr) = 1;
|
||||
//VG_(interrupt_reason) = VG_Y_SMC;
|
||||
}
|
||||
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user