A big commit size-wise, but small concept-wise: removed the ThreadState type

from skin's view, replacing all instances with ThreadId.  Much cleaner.  Had to
change the way VG_(get_ExeContext)() worked a little.  Changed the core/skin
major interface because this breaks the old version.  Also fixed a few minor
related things here and there.


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@1782
This commit is contained in:
Nicholas Nethercote 2003-07-24 08:45:32 +00:00
parent 53220a1cbb
commit 0f871c249c
22 changed files with 653 additions and 757 deletions

View File

@ -561,7 +561,7 @@ Bool ac_check_readable_asciiz ( Addr a, Addr* bad_addr )
/*------------------------------------------------------------*/
static __inline__
void ac_check_is_accessible ( CorePart part, ThreadState* tst,
void ac_check_is_accessible ( CorePart part, ThreadId tid,
Char* s, Addr base, UInt size, Bool isWrite )
{
Bool ok;
@ -573,21 +573,21 @@ void ac_check_is_accessible ( CorePart part, ThreadState* tst,
if (!ok) {
switch (part) {
case Vg_CoreSysCall:
MAC_(record_param_error) ( tst, bad_addr, isWrite, s );
MAC_(record_param_error) ( tid, bad_addr, isWrite, s );
break;
case Vg_CoreSignal:
sk_assert(isWrite); /* Should only happen with isWrite case */
/* fall through */
case Vg_CorePThread:
MAC_(record_core_mem_error)( tst, isWrite, s );
MAC_(record_core_mem_error)( tid, isWrite, s );
break;
/* If we're being asked to jump to a silly address, record an error
message before potentially crashing the entire system. */
case Vg_CoreTranslate:
sk_assert(!isWrite); /* Should only happen with !isWrite case */
MAC_(record_jump_error)( tst, bad_addr );
MAC_(record_jump_error)( tid, bad_addr );
break;
default:
@ -599,21 +599,21 @@ void ac_check_is_accessible ( CorePart part, ThreadState* tst,
}
static
void ac_check_is_writable ( CorePart part, ThreadState* tst,
void ac_check_is_writable ( CorePart part, ThreadId tid,
Char* s, Addr base, UInt size )
{
ac_check_is_accessible ( part, tst, s, base, size, /*isWrite*/True );
ac_check_is_accessible ( part, tid, s, base, size, /*isWrite*/True );
}
static
void ac_check_is_readable ( CorePart part, ThreadState* tst,
void ac_check_is_readable ( CorePart part, ThreadId tid,
Char* s, Addr base, UInt size )
{
ac_check_is_accessible ( part, tst, s, base, size, /*isWrite*/False );
ac_check_is_accessible ( part, tid, s, base, size, /*isWrite*/False );
}
static
void ac_check_is_readable_asciiz ( CorePart part, ThreadState* tst,
void ac_check_is_readable_asciiz ( CorePart part, ThreadId tid,
Char* s, Addr str )
{
Bool ok = True;
@ -625,7 +625,7 @@ void ac_check_is_readable_asciiz ( CorePart part, ThreadState* tst,
sk_assert(part == Vg_CoreSysCall);
ok = ac_check_readable_asciiz ( (Addr)str, &bad_addr );
if (!ok) {
MAC_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
MAC_(record_param_error) ( tid, bad_addr, /*is_writable =*/False, s );
}
VGP_POPCC(VgpCheckMem);
@ -781,7 +781,7 @@ static void ac_ACCESS4_SLOWLY ( Addr a )
if (!MAC_(clo_partial_loads_ok)
|| ((a & 3) != 0)
|| (!a0ok && !a1ok && !a2ok && !a3ok)) {
MAC_(record_address_error)( /*tst*/NULL, a, 4, False );
MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, False );
return;
}
@ -808,7 +808,7 @@ static void ac_ACCESS2_SLOWLY ( Addr a )
/* If an address error has happened, report it. */
if (aerr) {
MAC_(record_address_error)( /*tst*/NULL, a, 2, False );
MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, False );
}
}
@ -822,7 +822,7 @@ static void ac_ACCESS1_SLOWLY ( Addr a )
/* If an address error has happened, report it. */
if (aerr) {
MAC_(record_address_error)( /*tst*/NULL, a, 1, False );
MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, False );
}
}
@ -927,7 +927,7 @@ void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size )
}
if (aerr) {
MAC_(record_address_error)( /*tst*/NULL, addr, size, False );
MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, False );
}
}
@ -1147,7 +1147,7 @@ Bool SK_(expensive_sanity_check) ( void )
/*--- Client requests ---*/
/*------------------------------------------------------------*/
Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg_block, UInt *ret )
Bool SK_(handle_client_request) ( ThreadId tid, UInt* arg_block, UInt *ret )
{
#define IGNORE(what) \
do { \
@ -1193,7 +1193,7 @@ Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg_block, UInt *ret )
return False;
default:
if (MAC_(handle_common_client_requests)(tst, arg_block, ret )) {
if (MAC_(handle_common_client_requests)(tid, arg_block, ret )) {
return True;
} else {
VG_(message)(Vg_UserMsg,

View File

@ -131,7 +131,7 @@ NON_FUND( void SK_(print_usage)(void) );
NON_FUND( void SK_(print_debug_usage)(void) );
/* Client request template function */
NON_FUND( Bool SK_(handle_client_request)(ThreadState* tst, UInt* arg_block,
NON_FUND( Bool SK_(handle_client_request)(ThreadId tid, UInt* arg_block,
UInt *ret) );
/* UCode extension */
@ -170,7 +170,7 @@ Bool VG_(sk_malloc_called_by_scheduler) = False;
malloc()-replacing skin cannot forget to implement SK_(malloc)() or
SK_(free)(). */
__attribute__ ((weak))
void* SK_(malloc)( ThreadState* tst, Int size )
void* SK_(malloc)( Int size )
{
if (VG_(sk_malloc_called_by_scheduler))
return VG_(cli_malloc)(4, size);
@ -179,7 +179,7 @@ void* SK_(malloc)( ThreadState* tst, Int size )
}
__attribute__ ((weak))
void SK_(free)( ThreadState* tst, void* p )
void SK_(free)( void* p )
{
/* see comment for SK_(malloc)() above */
if (VG_(sk_malloc_called_by_scheduler))
@ -188,15 +188,14 @@ void SK_(free)( ThreadState* tst, void* p )
malloc_panic(__PRETTY_FUNCTION__);
}
MALLOC( void* SK_(__builtin_new) ( ThreadState* tst, Int size ) );
MALLOC( void* SK_(__builtin_vec_new)( ThreadState* tst, Int size ) );
MALLOC( void* SK_(memalign) ( ThreadState* tst, Int align, Int size ) );
MALLOC( void* SK_(calloc) ( ThreadState* tst, Int nmemb, Int size ) );
MALLOC( void* SK_(__builtin_new) ( Int size ) );
MALLOC( void* SK_(__builtin_vec_new)( Int size ) );
MALLOC( void* SK_(memalign) ( Int align, Int size ) );
MALLOC( void* SK_(calloc) ( Int nmemb, Int size ) );
MALLOC( void SK_(__builtin_delete) ( ThreadState* tst, void* p ) );
MALLOC( void SK_(__builtin_vec_delete) ( ThreadState* tst, void* p ) );
MALLOC( void* SK_(realloc) ( ThreadState* tst, void* p,
Int new_size ) );
MALLOC( void SK_(__builtin_delete) ( void* p ) );
MALLOC( void SK_(__builtin_vec_delete) ( void* p ) );
MALLOC( void* SK_(realloc) ( void* p, Int new_size ) );
/*--------------------------------------------------------------------*/
/*--- end vg_defaults.c ---*/

View File

@ -166,32 +166,21 @@ Bool VG_(is_action_requested) ( Char* action, Bool* clo )
stored thread state, not from VG_(baseBlock).
*/
static __inline__
void construct_error ( Error* err, ThreadState* tst, ErrorKind ekind, Addr a,
Char* s, void* extra, ExeContext* where,
/*out*/Addr* m_eip, /*out*/Addr* m_esp,
/*out*/Addr* m_ebp )
void construct_error ( Error* err, ThreadId tid, ErrorKind ekind, Addr a,
Char* s, void* extra, ExeContext* where )
{
sk_assert(tid < VG_N_THREADS);
/* Core-only parts */
err->next = NULL;
err->supp = NULL;
err->count = 1;
err->tid = tid;
if (NULL == where)
err->where = VG_(get_ExeContext)( tst );
err->where = VG_(get_ExeContext)( tid );
else
err->where = where;
if (NULL == tst) {
err->tid = VG_(get_current_tid)();
*m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
*m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
*m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
} else {
err->tid = tst->tid;
*m_eip = tst->m_eip;
*m_esp = tst->m_esp;
*m_ebp = tst->m_ebp;
}
/* Skin-relevant parts */
err->ekind = ekind;
err->addr = a;
@ -199,7 +188,7 @@ void construct_error ( Error* err, ThreadState* tst, ErrorKind ekind, Addr a,
err->extra = extra;
/* sanity... */
vg_assert(err->tid >= 0 && err->tid < VG_N_THREADS);
vg_assert( tid < VG_N_THREADS );
}
void VG_(gen_suppression)(Error* err)
@ -251,13 +240,24 @@ void VG_(gen_suppression)(Error* err)
}
static
void do_actions_on_error(Error* err, Bool allow_GDB_attach,
Addr m_eip, Addr m_esp, Addr m_ebp )
void do_actions_on_error(Error* err, Bool allow_GDB_attach)
{
/* Perhaps we want a GDB attach at this point? */
if (allow_GDB_attach &&
VG_(is_action_requested)( "Attach to GDB", & VG_(clo_GDB_attach) ))
{
Addr m_eip, m_esp, m_ebp;
if (VG_(is_running_thread)( err->tid )) {
m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
} else {
ThreadState* tst = & VG_(threads)[ err->tid ];
m_eip = tst->m_eip;
m_esp = tst->m_esp;
m_ebp = tst->m_ebp;
}
VG_(swizzle_esp_then_start_GDB)( m_eip, m_esp, m_ebp );
}
/* Or maybe we want to generate the error's suppression? */
@ -274,10 +274,9 @@ static Bool is_first_shown_context = True;
/* Top-level entry point to the error management subsystem.
All detected errors are notified here; this routine decides if/when the
user should see the error. */
void VG_(maybe_record_error) ( ThreadState* tst,
void VG_(maybe_record_error) ( ThreadId tid,
ErrorKind ekind, Addr a, Char* s, void* extra )
{
Addr m_eip, m_esp, m_ebp;
Error err;
Error* p;
Error* p_prev;
@ -342,8 +341,7 @@ void VG_(maybe_record_error) ( ThreadState* tst,
}
/* Build ourselves the error */
construct_error ( &err, tst, ekind, a, s, extra, NULL,
&m_eip, &m_esp, &m_ebp );
construct_error ( &err, tid, ekind, a, s, extra, NULL );
/* First, see if we've got an error record matching this one. */
p = vg_errors;
@ -416,7 +414,7 @@ void VG_(maybe_record_error) ( ThreadState* tst,
pp_Error(p, False);
is_first_shown_context = False;
vg_n_errs_shown++;
do_actions_on_error(p, /*allow_GDB_attach*/True, m_eip, m_esp, m_ebp );
do_actions_on_error(p, /*allow_GDB_attach*/True);
} else {
vg_n_errs_suppressed++;
p->supp->count++;
@ -430,16 +428,14 @@ void VG_(maybe_record_error) ( ThreadState* tst,
suppressed. Bool `print_error' dictates whether to print the error.
Bool `count_error' dictates whether to count the error in VG_(n_errs_found)
*/
Bool VG_(unique_error) ( ThreadState* tst, ErrorKind ekind, Addr a, Char* s,
Bool VG_(unique_error) ( ThreadId tid, ErrorKind ekind, Addr a, Char* s,
void* extra, ExeContext* where, Bool print_error,
Bool allow_GDB_attach, Bool count_error )
{
Error err;
Addr m_eip, m_esp, m_ebp;
/* Build ourselves the error */
construct_error ( &err, tst, ekind, a, s, extra, where,
&m_eip, &m_esp, &m_ebp );
construct_error ( &err, tid, ekind, a, s, extra, where );
/* Unless it's suppressed, we're going to show it. Don't need to make
a copy, because it's only temporary anyway.
@ -459,7 +455,7 @@ Bool VG_(unique_error) ( ThreadState* tst, ErrorKind ekind, Addr a, Char* s,
pp_Error(&err, False);
is_first_shown_context = False;
}
do_actions_on_error(&err, allow_GDB_attach, m_eip, m_esp, m_ebp);
do_actions_on_error(&err, allow_GDB_attach);
return False;
@ -479,8 +475,7 @@ Bool VG_(unique_error) ( ThreadState* tst, ErrorKind ekind, Addr a, Char* s,
void VG_(record_pthread_error) ( ThreadId tid, Char* msg )
{
if (! VG_(needs).core_errors) return;
VG_(maybe_record_error)( &VG_(threads)[tid], PThreadErr, /*addr*/0, msg,
/*extra*/NULL );
VG_(maybe_record_error)( tid, PThreadErr, /*addr*/0, msg, /*extra*/NULL );
}
/*------------------------------*/

View File

@ -283,33 +283,33 @@ ExeContext* VG_(get_ExeContext2) ( Addr eip, Addr ebp,
return new_ec;
}
ExeContext* VG_(get_ExeContext) ( ThreadState *tst )
ExeContext* VG_(get_ExeContext) ( ThreadId tid )
{
ExeContext *ec;
if (tst == NULL) {
if (VG_(is_running_thread)(tid)) {
/* thread currently in baseblock */
ThreadId tid = VG_(get_current_tid)();
ec = VG_(get_ExeContext2)( VG_(baseBlock)[VGOFF_(m_eip)],
VG_(baseBlock)[VGOFF_(m_ebp)],
VG_(baseBlock)[VGOFF_(m_esp)],
VG_(threads)[tid].stack_highest_word);
} else {
/* thread in thread table */
ThreadState* tst = & VG_(threads)[ tid ];
ec = VG_(get_ExeContext2)( tst->m_eip, tst->m_ebp, tst->m_esp,
tst->stack_highest_word );
}
return ec;
}
Addr VG_(get_EIP) ( ThreadState *tst )
Addr VG_(get_EIP) ( ThreadId tid )
{
Addr ret;
if (tst == NULL)
if (VG_(is_running_thread)(tid))
ret = VG_(baseBlock)[VGOFF_(m_eip)];
else
ret = tst->m_eip;
ret = VG_(threads)[ tid ].m_eip;
return ret;
}

View File

@ -344,11 +344,11 @@ typedef
void (*ban_mem_stack) ( Addr a, UInt len );
void (*pre_mem_read) ( CorePart part, ThreadState* tst,
void (*pre_mem_read) ( CorePart part, ThreadId tid,
Char* s, Addr a, UInt size );
void (*pre_mem_read_asciiz) ( CorePart part, ThreadState* tst,
void (*pre_mem_read_asciiz) ( CorePart part, ThreadId tid,
Char* s, Addr a );
void (*pre_mem_write) ( CorePart part, ThreadState* tst,
void (*pre_mem_write) ( CorePart part, ThreadId tid,
Char* s, Addr a, UInt size );
/* Not implemented yet -- have to add in lots of places, which is a
pain. Won't bother unless/until there's a need. */
@ -469,15 +469,6 @@ extern Bool VG_(is_inside_segment_mmapd_by_low_level_MM)( Addr aa );
#define VG_USERREQ__MALLOC 0x2001
#define VG_USERREQ__FREE 0x2002
/*
In vg_skin.h, so skins can use it.
Call an arbitrary function with ThreadState as the first arg.
#define VG_USERREQ__CLIENT_tstCALL0 0x2101
#define VG_USERREQ__CLIENT_tstCALL1 0x2102
#define VG_USERREQ__CLIENT_tstCALL2 0x2103
#define VG_USERREQ__CLIENT_tstCALL3 0x2104
*/
/* (Fn, Arg): Create a new thread and run Fn applied to Arg in it. Fn
MUST NOT return -- ever. Eventually it will do either __QUIT or
__WAIT_JOINER. */
@ -711,7 +702,8 @@ typedef
ForkHandlerEntry;
struct _ThreadState {
typedef
struct _ThreadState {
/* ThreadId == 0 (and hence vg_threads[0]) is NEVER USED.
The thread identity is simply the index in vg_threads[].
ThreadId == 1 is the root thread and has the special property
@ -872,7 +864,8 @@ struct _ThreadState {
UInt sh_ebp;
UInt sh_esp;
UInt sh_eflags;
};
}
ThreadState;
/* The thread table. */
@ -884,6 +877,10 @@ extern Bool VG_(is_valid_tid) ( ThreadId tid );
/* Check that tid is in range. */
extern Bool VG_(is_valid_or_empty_tid) ( ThreadId tid );
/* Determine if 'tid' is that of the current running thread (Nb: returns
False if no thread is currently running. */
extern Bool VG_(is_running_thread)(ThreadId tid);
/* Copy the specified thread's state into VG_(baseBlock) in
preparation for running it. */
extern void VG_(load_thread_state)( ThreadId );
@ -1123,7 +1120,7 @@ struct _UCodeBlock {
extern UCodeBlock* VG_(alloc_UCodeBlock) ( void );
extern void VG_(translate) ( ThreadState* tst,
extern void VG_(translate) ( ThreadId tid,
Addr orig_addr,
UInt* orig_size,
Addr* trans_addr,

View File

@ -247,10 +247,10 @@ TRACK(die_mem_stack, Addr a, UInt len)
TRACK(ban_mem_stack, Addr a, UInt len)
TRACK(pre_mem_read, CorePart part, ThreadState* tst, Char* s, Addr a,
TRACK(pre_mem_read, CorePart part, ThreadId tid, Char* s, Addr a,
UInt size)
TRACK(pre_mem_read_asciiz, CorePart part, ThreadState* tst, Char* s, Addr a)
TRACK(pre_mem_write, CorePart part, ThreadState* tst, Char* s, Addr a,
TRACK(pre_mem_read_asciiz, CorePart part, ThreadId tid, Char* s, Addr a)
TRACK(pre_mem_write, CorePart part, ThreadId tid, Char* s, Addr a,
UInt size)
TRACK(post_mem_write, Addr a, UInt size)

View File

@ -45,7 +45,7 @@
/* Sidestep the normal check which disallows using valgrind.h directly. */
#define __VALGRIND_SOMESKIN_H
#include "valgrind.h" /* for VALGRIND_NON_SIMD_tstCALL[12] */
#include "valgrind.h" /* for VALGRIND_NON_SIMD_CALL[12] */
/*------------------------------------------------------------*/
/*--- Command line options ---*/
@ -150,7 +150,7 @@ void* malloc ( Int n )
MAYBE_SLOPPIFY(n);
if (VG_(is_running_on_simd_CPU)()) {
v = (void*)VALGRIND_NON_SIMD_tstCALL1( SK_(malloc), n );
v = (void*)VALGRIND_NON_SIMD_CALL1( SK_(malloc), n );
} else if (VG_(clo_alignment) != 4) {
v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n);
} else {
@ -169,7 +169,7 @@ void* __builtin_new ( Int n )
MAYBE_SLOPPIFY(n);
if (VG_(is_running_on_simd_CPU)()) {
v = (void*)VALGRIND_NON_SIMD_tstCALL1( SK_(__builtin_new), n );
v = (void*)VALGRIND_NON_SIMD_CALL1( SK_(__builtin_new), n );
} else if (VG_(clo_alignment) != 4) {
v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n);
} else {
@ -194,7 +194,7 @@ void* __builtin_vec_new ( Int n )
MAYBE_SLOPPIFY(n);
if (VG_(is_running_on_simd_CPU)()) {
v = (void*)VALGRIND_NON_SIMD_tstCALL1( SK_(__builtin_vec_new), n );
v = (void*)VALGRIND_NON_SIMD_CALL1( SK_(__builtin_vec_new), n );
} else if (VG_(clo_alignment) != 4) {
v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n);
} else {
@ -217,7 +217,7 @@ void free ( void* p )
if (p == NULL)
return;
if (VG_(is_running_on_simd_CPU)()) {
(void)VALGRIND_NON_SIMD_tstCALL1( SK_(free), p );
(void)VALGRIND_NON_SIMD_CALL1( SK_(free), p );
} else {
VG_(arena_free)(VG_AR_CLIENT, p);
}
@ -230,7 +230,7 @@ void __builtin_delete ( void* p )
if (p == NULL)
return;
if (VG_(is_running_on_simd_CPU)()) {
(void)VALGRIND_NON_SIMD_tstCALL1( SK_(__builtin_delete), p );
(void)VALGRIND_NON_SIMD_CALL1( SK_(__builtin_delete), p );
} else {
VG_(arena_free)(VG_AR_CLIENT, p);
}
@ -249,7 +249,7 @@ void __builtin_vec_delete ( void* p )
if (p == NULL)
return;
if (VG_(is_running_on_simd_CPU)()) {
(void)VALGRIND_NON_SIMD_tstCALL1( SK_(__builtin_vec_delete), p );
(void)VALGRIND_NON_SIMD_CALL1( SK_(__builtin_vec_delete), p );
} else {
VG_(arena_free)(VG_AR_CLIENT, p);
}
@ -270,7 +270,7 @@ void* calloc ( UInt nmemb, UInt size )
MAYBE_SLOPPIFY(size);
if (VG_(is_running_on_simd_CPU)()) {
v = (void*)VALGRIND_NON_SIMD_tstCALL2( SK_(calloc), nmemb, size );
v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(calloc), nmemb, size );
} else {
v = VG_(arena_calloc)(VG_AR_CLIENT, VG_(clo_alignment), nmemb, size);
}
@ -296,7 +296,7 @@ void* realloc ( void* ptrV, Int new_size )
return NULL;
}
if (VG_(is_running_on_simd_CPU)()) {
v = (void*)VALGRIND_NON_SIMD_tstCALL2( SK_(realloc), ptrV, new_size );
v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(realloc), ptrV, new_size );
} else {
v = VG_(arena_realloc)(VG_AR_CLIENT, ptrV, VG_(clo_alignment), new_size);
}
@ -314,7 +314,7 @@ void* memalign ( Int alignment, Int n )
MAYBE_SLOPPIFY(n);
if (VG_(is_running_on_simd_CPU)()) {
v = (void*)VALGRIND_NON_SIMD_tstCALL2( SK_(memalign), alignment, n );
v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(memalign), alignment, n );
} else {
v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, alignment, n);
}

View File

@ -332,8 +332,8 @@ void create_translation_for ( ThreadId tid, Addr orig_addr )
jumps[i] = (UShort)-1;
/* Make a translation, into temporary storage. */
VG_(translate)( &VG_(threads)[tid],
orig_addr, &orig_size, &trans_addr, &trans_size, jumps );
VG_(translate)( tid, orig_addr, /* in */
&orig_size, &trans_addr, &trans_size, jumps ); /* out */
/* Copy data at trans_addr into the translation cache. */
/* Since the .orig_size and .trans_size fields are
@ -363,19 +363,12 @@ ThreadId vg_alloc_ThreadState ( void )
/*NOTREACHED*/
}
ThreadState* VG_(get_ThreadState)( ThreadId tid )
Bool VG_(is_running_thread)(ThreadId tid)
{
vg_assert(tid >= 0 && tid < VG_N_THREADS);
return & VG_(threads)[tid];
ThreadId curr = VG_(get_current_tid)();
return (curr == tid && VG_INVALID_THREADID != tid);
}
ThreadState* VG_(get_current_thread_state) ( void )
{
vg_assert(VG_(is_valid_tid)(vg_tid_currently_in_baseBlock));
return & VG_(threads)[vg_tid_currently_in_baseBlock];
}
ThreadId VG_(get_current_tid) ( void )
{
if (!VG_(is_valid_tid)(vg_tid_currently_in_baseBlock))
@ -392,12 +385,6 @@ ThreadId VG_(get_current_or_recent_tid) ( void )
return vg_tid_last_in_baseBlock;
}
ThreadId VG_(get_tid_from_ThreadState) (ThreadState* tst)
{
vg_assert(tst >= &VG_(threads)[1] && tst < &VG_(threads)[VG_N_THREADS]);
return tst->tid;
}
/* Copy the saved state of a thread into VG_(baseBlock), ready for it
to be run. */
void VG_(load_thread_state) ( ThreadId tid )
@ -1626,7 +1613,7 @@ VgSchedReturnCode VG_(scheduler) ( void )
throwing away the result. */
VG_(printf)(
"======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n");
VG_(translate)( &VG_(threads)[tid],
VG_(translate)( tid,
VG_(threads)[tid].m_eip, NULL, NULL, NULL, NULL );
VG_(printf)("\n");
VG_(printf)(
@ -1766,7 +1753,7 @@ void maybe_rendezvous_joiners_and_joinees ( void )
thread_return = VG_(threads)[jnr].joiner_thread_return;
if (thread_return != NULL) {
/* CHECK thread_return writable */
VG_TRACK( pre_mem_write, Vg_CorePThread, &VG_(threads)[jnr],
VG_TRACK( pre_mem_write, Vg_CorePThread, jnr,
"pthread_join: thread_return",
(Addr)thread_return, sizeof(void*));
@ -1859,7 +1846,7 @@ void do__cleanup_pop ( ThreadId tid, CleanupEntry* cu )
return;
}
sp--;
VG_TRACK( pre_mem_write, Vg_CorePThread, & VG_(threads)[tid],
VG_TRACK( pre_mem_write, Vg_CorePThread, tid,
"cleanup pop", (Addr)cu, sizeof(CleanupEntry) );
*cu = VG_(threads)[tid].custack[sp];
VG_TRACK( post_mem_write, (Addr)cu, sizeof(CleanupEntry) );
@ -2257,8 +2244,7 @@ void do__apply_in_new_thread ( ThreadId parent_tid,
SET_PTHREQ_ESP(tid, VG_(threads)[tid].m_esp - 8);
VG_TRACK ( new_mem_stack, (Addr)VG_(threads)[tid].m_esp, 2 * 4 );
VG_TRACK ( pre_mem_write, Vg_CorePThread, & VG_(threads)[tid],
"new thread: stack",
VG_TRACK ( pre_mem_write, Vg_CorePThread, tid, "new thread: stack",
(Addr)VG_(threads)[tid].m_esp, 2 * 4 );
/* push arg and (bogus) return address */
@ -2918,8 +2904,7 @@ void do_pthread_key_create ( ThreadId tid,
vg_thread_keys[i].destructor = destructor;
/* check key for addressibility */
VG_TRACK( pre_mem_write, Vg_CorePThread, &VG_(threads)[tid],
"pthread_key_create: key",
VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_key_create: key",
(Addr)key, sizeof(pthread_key_t));
*key = i;
VG_TRACK( post_mem_write, (Addr)key, sizeof(pthread_key_t) );
@ -3024,15 +3009,14 @@ void do__get_key_destr_and_spec ( ThreadId tid,
SET_PTHREQ_RETVAL(tid, -1);
return;
}
VG_TRACK( pre_mem_write, Vg_CorePThread, & VG_(threads)[tid],
"get_key_destr_and_spec: cu", (Addr)cu,
sizeof(CleanupEntry) );
VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "get_key_destr_and_spec: cu",
(Addr)cu, sizeof(CleanupEntry) );
cu->fn = vg_thread_keys[key].destructor;
if (VG_(threads)[tid].specifics_ptr == NULL) {
cu->arg = NULL;
} else {
VG_TRACK( pre_mem_read, Vg_CorePThread, & VG_(threads)[tid],
VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
"get_key_destr_and_spec: key",
(Addr)(&VG_(threads)[tid].specifics_ptr[key]),
sizeof(void*) );
@ -3071,12 +3055,10 @@ void do_pthread_sigmask ( ThreadId tid,
&& VG_(threads)[tid].status == VgTs_Runnable);
if (newmask)
VG_TRACK( pre_mem_read, Vg_CorePThread, &VG_(threads)[tid],
"pthread_sigmask: newmask",
VG_TRACK( pre_mem_read, Vg_CorePThread, tid, "pthread_sigmask: newmask",
(Addr)newmask, sizeof(vki_ksigset_t));
if (oldmask)
VG_TRACK( pre_mem_write, Vg_CorePThread, &VG_(threads)[tid],
"pthread_sigmask: oldmask",
VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "pthread_sigmask: oldmask",
(Addr)oldmask, sizeof(vki_ksigset_t));
VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
@ -3204,7 +3186,7 @@ void do__set_fhstack_entry ( ThreadId tid, Int n, ForkHandlerEntry* fh )
vg_assert(VG_(is_valid_tid)(tid)
&& VG_(threads)[tid].status == VgTs_Runnable);
VG_TRACK( pre_mem_read, Vg_CorePThread, &VG_(threads)[tid],
VG_TRACK( pre_mem_read, Vg_CorePThread, tid,
"pthread_atfork: prepare/parent/child",
(Addr)fh, sizeof(ForkHandlerEntry));
@ -3230,8 +3212,7 @@ void do__get_fhstack_entry ( ThreadId tid, Int n, /*OUT*/
vg_assert(VG_(is_valid_tid)(tid)
&& VG_(threads)[tid].status == VgTs_Runnable);
VG_TRACK( pre_mem_write, Vg_CorePThread, &VG_(threads)[tid],
"fork: prepare/parent/child",
VG_TRACK( pre_mem_write, Vg_CorePThread, tid, "fork: prepare/parent/child",
(Addr)fh, sizeof(ForkHandlerEntry));
if (n < 0 || n >= VG_N_FORKHANDLERSTACK) {
@ -3271,7 +3252,6 @@ UInt VG_(get_exit_status_shadow) ( void )
static
void do_client_request ( ThreadId tid )
{
ThreadState* tst = &VG_(threads)[tid];
UInt* arg = (UInt*)(VG_(threads)[tid].m_eax);
UInt req_no = arg[0];
@ -3299,27 +3279,6 @@ void do_client_request ( ThreadId tid )
break;
}
case VG_USERREQ__CLIENT_tstCALL0: {
UInt (*f)(ThreadState*) = (void*)arg[1];
SET_CLCALL_RETVAL(tid, f ( tst ), (Addr)f );
break;
}
case VG_USERREQ__CLIENT_tstCALL1: {
UInt (*f)(ThreadState*, UInt) = (void*)arg[1];
SET_CLCALL_RETVAL(tid, f ( tst, arg[2] ), (Addr)f );
break;
}
case VG_USERREQ__CLIENT_tstCALL2: {
UInt (*f)(ThreadState*, UInt, UInt) = (void*)arg[1];
SET_CLCALL_RETVAL(tid, f ( tst, arg[2], arg[3] ), (Addr)f );
break;
}
case VG_USERREQ__CLIENT_tstCALL3: {
UInt (*f)(ThreadState*, UInt, UInt, UInt) = (void*)arg[1];
SET_CLCALL_RETVAL(tid, f ( tst, arg[2], arg[3], arg[4] ), (Addr)f );
break;
}
/* Note: for skins that replace malloc() et al, we want to call
the replacement versions. For those that don't, we want to call
VG_(cli_malloc)() et al. We do this by calling SK_(malloc)(), which
@ -3331,14 +3290,14 @@ void do_client_request ( ThreadId tid )
case VG_USERREQ__MALLOC:
VG_(sk_malloc_called_by_scheduler) = True;
SET_PTHREQ_RETVAL(
tid, (UInt)SK_(malloc) ( tst, arg[1] )
tid, (UInt)SK_(malloc) ( arg[1] )
);
VG_(sk_malloc_called_by_scheduler) = False;
break;
case VG_USERREQ__FREE:
VG_(sk_malloc_called_by_scheduler) = True;
SK_(free) ( tst, (void*)arg[1] );
SK_(free) ( (void*)arg[1] );
VG_(sk_malloc_called_by_scheduler) = False;
SET_PTHREQ_RETVAL(tid, 0); /* irrelevant */
break;
@ -3561,7 +3520,7 @@ void do_client_request ( ThreadId tid )
VG_(printf)("client request: code %d, addr %p, len %d\n",
arg[0], (void*)arg[1], arg[2] );
if (SK_(handle_client_request) ( &VG_(threads)[tid], arg, &ret ))
if (SK_(handle_client_request) ( tid, arg, &ret ))
SET_CLREQ_RETVAL(tid, ret);
} else {
static Bool whined = False;

View File

@ -991,7 +991,7 @@ void vg_push_signal_frame ( ThreadId tid, int sigNo )
== ((Char*)(esp_top_of_frame)) );
/* retaddr, sigNo, psigInfo, puContext fields are to be written */
VG_TRACK( pre_mem_write, Vg_CoreSignal, tst, "signal handler frame",
VG_TRACK( pre_mem_write, Vg_CoreSignal, tid, "signal handler frame",
(Addr)esp, 16 );
frame->retaddr = (UInt)(&VG_(signalreturn_bogusRA));
frame->sigNo = sigNo;

File diff suppressed because it is too large Load Diff

View File

@ -2307,7 +2307,7 @@ static void vg_realreg_liveness_analysis ( UCodeBlock* cb )
'tst' is the identity of the thread needing this block.
*/
void VG_(translate) ( /*IN*/ ThreadState* tst,
void VG_(translate) ( /*IN*/ ThreadId tid,
/*IN*/ Addr orig_addr,
/*OUT*/ UInt* orig_size,
/*OUT*/ Addr* trans_addr,
@ -2349,7 +2349,7 @@ void VG_(translate) ( /*IN*/ ThreadState* tst,
= VG_(overall_in_count) > notrace_until_limit;
if (!debugging_translation)
VG_TRACK( pre_mem_read, Vg_CoreTranslate, tst, "", orig_addr, 1 );
VG_TRACK( pre_mem_read, Vg_CoreTranslate, tid, "", orig_addr, 1 );
cb = VG_(alloc_UCodeBlock)();
cb->orig_eip = orig_addr;

View File

@ -1306,8 +1306,7 @@ static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
const LockSet *lockset_holding,
const LockSet *lockset_prev);
static void set_mutex_state(Mutex *mutex, MutexState state,
ThreadId tid, ThreadState *tst);
static void set_mutex_state(Mutex *mutex, MutexState state, ThreadId tid);
#define M_MUTEX_HASHSZ 1021
@ -1447,8 +1446,7 @@ static Bool check_cycle(const Mutex *start, const LockSet* lockset)
/* test to see if a mutex state change would be problematic; this
makes no changes to the mutex state. This should be called before
the locking thread has actually blocked. */
static void test_mutex_state(Mutex *mutex, MutexState state,
ThreadId tid, ThreadState *tst)
static void test_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
{
static const Bool debug = False;
@ -1518,8 +1516,7 @@ static void test_mutex_state(Mutex *mutex, MutexState state,
a result of any thread freeing memory; in this case set_mutex_state
does all the error reporting as well.
*/
static void set_mutex_state(Mutex *mutex, MutexState state,
ThreadId tid, ThreadState *tst)
static void set_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
{
static const Bool debug = False;
@ -1578,7 +1575,7 @@ static void set_mutex_state(Mutex *mutex, MutexState state,
break;
}
mutex->location = VG_(get_ExeContext)(tst);
mutex->location = VG_(get_ExeContext)(tid);
mutex->state = state;
}
@ -1701,8 +1698,8 @@ static void copy_address_range_state(Addr src, Addr dst, UInt len)
}
// SSS: put these somewhere better
static void eraser_mem_read (Addr a, UInt data_size, ThreadState *tst);
static void eraser_mem_write(Addr a, UInt data_size, ThreadState *tst);
static void eraser_mem_read (Addr a, UInt data_size, ThreadId tid);
static void eraser_mem_write(Addr a, UInt data_size, ThreadId tid);
#define REGPARM(x) __attribute__((regparm (x)))
@ -1720,24 +1717,25 @@ static void bus_lock(void);
static void bus_unlock(void);
static
void eraser_pre_mem_read(CorePart part, ThreadState* tst,
void eraser_pre_mem_read(CorePart part, ThreadId tid,
Char* s, UInt base, UInt size )
{
eraser_mem_read(base, size, tst);
if (tid > 50) { VG_(printf)("pid = %d, s = `%s`, part = %d\n", tid, s, part); VG_(skin_panic)("a");}
eraser_mem_read(base, size, tid);
}
static
void eraser_pre_mem_read_asciiz(CorePart part, ThreadState* tst,
void eraser_pre_mem_read_asciiz(CorePart part, ThreadId tid,
Char* s, UInt base )
{
eraser_mem_read(base, VG_(strlen)((Char*)base), tst);
eraser_mem_read(base, VG_(strlen)((Char*)base), tid);
}
static
void eraser_pre_mem_write(CorePart part, ThreadState* tst,
void eraser_pre_mem_write(CorePart part, ThreadId tid,
Char* s, UInt base, UInt size )
{
eraser_mem_write(base, size, tst);
eraser_mem_write(base, size, tid);
}
@ -1819,77 +1817,75 @@ UInt VG_(vg_malloc_redzone_szB) = 4;
shadow chunk on the appropriate list, and set all memory
protections correctly. */
static void add_HG_Chunk ( ThreadState* tst, Addr p, UInt size )
static void add_HG_Chunk ( ThreadId tid, Addr p, UInt size )
{
HG_Chunk* hc;
hc = VG_(malloc)(sizeof(HG_Chunk));
hc->data = p;
hc->size = size;
hc->where = VG_(get_ExeContext)(tst);
hc->tid = VG_(get_tid_from_ThreadState)(tst);
hc->where = VG_(get_ExeContext)(tid);
hc->tid = tid;
VG_(HT_add_node)( hg_malloc_list, (VgHashNode*)hc );
}
/* Allocate memory and note change in memory available */
static __inline__
void* alloc_and_new_mem ( ThreadState* tst, UInt size, UInt alignment,
Bool is_zeroed )
void* alloc_and_new_mem ( UInt size, UInt alignment, Bool is_zeroed )
{
Addr p;
p = (Addr)VG_(cli_malloc)(alignment, size);
add_HG_Chunk ( tst, p, size );
add_HG_Chunk ( VG_(get_current_or_recent_tid)(), p, size );
eraser_new_mem_heap( p, size, is_zeroed );
return (void*)p;
}
void* SK_(malloc) ( ThreadState* tst, Int n )
void* SK_(malloc) ( Int n )
{
return alloc_and_new_mem ( tst, n, VG_(clo_alignment), /*is_zeroed*/False );
return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
}
void* SK_(__builtin_new) ( ThreadState* tst, Int n )
void* SK_(__builtin_new) ( Int n )
{
return alloc_and_new_mem ( tst, n, VG_(clo_alignment), /*is_zeroed*/False );
return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
}
void* SK_(__builtin_vec_new) ( ThreadState* tst, Int n )
void* SK_(__builtin_vec_new) ( Int n )
{
return alloc_and_new_mem ( tst, n, VG_(clo_alignment), /*is_zeroed*/False );
return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
}
void* SK_(memalign) ( ThreadState* tst, Int align, Int n )
void* SK_(memalign) ( Int align, Int n )
{
return alloc_and_new_mem ( tst, n, align, /*is_zeroed*/False );
return alloc_and_new_mem ( n, align, /*is_zeroed*/False );
}
void* SK_(calloc) ( ThreadState* tst, Int nmemb, Int size1 )
void* SK_(calloc) ( Int nmemb, Int size1 )
{
void* p;
Int size, i;
size = nmemb * size1;
p = alloc_and_new_mem ( tst, size, VG_(clo_alignment), /*is_zeroed*/True );
p = alloc_and_new_mem ( size, VG_(clo_alignment), /*is_zeroed*/True );
for (i = 0; i < size; i++) /* calloc() is zeroed */
((UChar*)p)[i] = 0;
return p;
}
static
void die_and_free_mem ( ThreadState* tst, HG_Chunk* hc,
void die_and_free_mem ( ThreadId tid, HG_Chunk* hc,
HG_Chunk** prev_chunks_next_ptr )
{
ThreadId tid = VG_(get_tid_from_ThreadState)(tst);
Addr start = hc->data;
Addr end = start + hc->size;
Addr start = hc->data;
Addr end = start + hc->size;
Bool deadmx(Mutex *mx) {
if (mx->state != MxDead)
set_mutex_state(mx, MxDead, tid, tst);
set_mutex_state(mx, MxDead, tid);
return False;
}
@ -1901,7 +1897,7 @@ void die_and_free_mem ( ThreadState* tst, HG_Chunk* hc,
*prev_chunks_next_ptr = hc->next;
/* Record where freed */
hc->where = VG_(get_ExeContext) ( tst );
hc->where = VG_(get_ExeContext) ( tid );
/* maintain a small window so that the error reporting machinery
knows about this memory */
@ -1923,7 +1919,7 @@ void die_and_free_mem ( ThreadState* tst, HG_Chunk* hc,
static __inline__
void handle_free ( ThreadState* tst, void* p )
void handle_free ( void* p )
{
HG_Chunk* hc;
HG_Chunk** prev_chunks_next_ptr;
@ -1933,29 +1929,31 @@ void handle_free ( ThreadState* tst, void* p )
if (hc == NULL) {
return;
}
die_and_free_mem ( tst, hc, prev_chunks_next_ptr );
die_and_free_mem ( VG_(get_current_or_recent_tid)(),
hc, prev_chunks_next_ptr );
}
void SK_(free) ( ThreadState* tst, void* p )
void SK_(free) ( void* p )
{
handle_free(tst, p);
handle_free(p);
}
void SK_(__builtin_delete) ( ThreadState* tst, void* p )
void SK_(__builtin_delete) ( void* p )
{
handle_free(tst, p);
handle_free(p);
}
void SK_(__builtin_vec_delete) ( ThreadState* tst, void* p )
void SK_(__builtin_vec_delete) ( void* p )
{
handle_free(tst, p);
handle_free(p);
}
void* SK_(realloc) ( ThreadState* tst, void* p, Int new_size )
void* SK_(realloc) ( void* p, Int new_size )
{
HG_Chunk *hc;
HG_Chunk **prev_chunks_next_ptr;
Int i;
ThreadId tid = VG_(get_current_or_recent_tid)();
/* First try and find the block. */
hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UInt)p,
@ -1991,12 +1989,12 @@ void* SK_(realloc) ( ThreadState* tst, void* p, Int new_size )
((UChar*)p_new)[i] = ((UChar*)p)[i];
/* Free old memory */
die_and_free_mem ( tst, hc, prev_chunks_next_ptr );
die_and_free_mem ( tid, hc, prev_chunks_next_ptr );
/* this has to be after die_and_free_mem, otherwise the
former succeeds in shorting out the new block, not the
old, in the case when both are on the same list. */
add_HG_Chunk ( tst, p_new, new_size );
add_HG_Chunk ( tid, p_new, new_size );
return (void*)p_new;
}
@ -2433,7 +2431,7 @@ UInt SK_(update_extra)(Error* err)
return sizeof(HelgrindError);
}
static void record_eraser_error ( ThreadState *tst, Addr a, Bool is_write,
static void record_eraser_error ( ThreadId tid, Addr a, Bool is_write,
shadow_word prevstate )
{
shadow_word *sw;
@ -2447,7 +2445,7 @@ static void record_eraser_error ( ThreadState *tst, Addr a, Bool is_write,
err_extra.prevstate = prevstate;
if (clo_execontext)
err_extra.lasttouched = getExeContext(a);
VG_(maybe_record_error)( tst, EraserErr, a,
VG_(maybe_record_error)( tid, EraserErr, a,
(is_write ? "writing" : "reading"),
&err_extra);
@ -2471,7 +2469,7 @@ static void record_mutex_error(ThreadId tid, Mutex *mutex,
err_extra.lasttouched = EC(ec, virgin_sword, thread_seg[tid]);
err_extra.lasttid = tid;
VG_(maybe_record_error)(VG_(get_ThreadState)(tid), MutexErr,
VG_(maybe_record_error)(tid, MutexErr,
(Addr)mutex->mutexp, str, &err_extra);
}
@ -2491,8 +2489,7 @@ static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
err_extra.held_lockset = lockset_holding;
err_extra.prev_lockset = lockset_prev;
VG_(maybe_record_error)(VG_(get_ThreadState)(tid), LockGraphErr,
mutex->mutexp, "", &err_extra);
VG_(maybe_record_error)(tid, LockGraphErr, mutex->mutexp, "", &err_extra);
}
Bool SK_(eq_SkinError) ( VgRes not_used, Error* e1, Error* e2 )
@ -2771,7 +2768,7 @@ static void eraser_pre_mutex_lock(ThreadId tid, void* void_mutex)
{
Mutex *mutex = get_mutex((Addr)void_mutex);
test_mutex_state(mutex, MxLocked, tid, VG_(get_ThreadState)(tid));
test_mutex_state(mutex, MxLocked, tid);
}
static void eraser_post_mutex_lock(ThreadId tid, void* void_mutex)
@ -2780,7 +2777,7 @@ static void eraser_post_mutex_lock(ThreadId tid, void* void_mutex)
Mutex *mutex = get_mutex((Addr)void_mutex);
const LockSet* ls;
set_mutex_state(mutex, MxLocked, tid, VG_(get_ThreadState)(tid));
set_mutex_state(mutex, MxLocked, tid);
# if DEBUG_LOCKS
VG_(printf)("lock (%u, %p)\n", tid, mutex->mutexp);
@ -2815,8 +2812,8 @@ static void eraser_post_mutex_unlock(ThreadId tid, void* void_mutex)
Mutex *mutex = get_mutex((Addr)void_mutex);
const LockSet *ls;
test_mutex_state(mutex, MxUnlocked, tid, VG_(get_ThreadState)(tid));
set_mutex_state(mutex, MxUnlocked, tid, VG_(get_ThreadState)(tid));
test_mutex_state(mutex, MxUnlocked, tid);
set_mutex_state(mutex, MxUnlocked, tid);
if (!ismember(thread_locks[tid], mutex))
return;
@ -2883,7 +2880,7 @@ void dump_around_a(Addr a)
#define DEBUG_STATE(args...)
#endif
static void eraser_mem_read_word(Addr a, ThreadId tid, ThreadState *tst)
static void eraser_mem_read_word(Addr a, ThreadId tid)
{
shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
shadow_word prevstate;
@ -2968,7 +2965,7 @@ static void eraser_mem_read_word(Addr a, ThreadId tid, ThreadState *tst)
statechange = sword->other != prevstate.other;
if (isempty(ls)) {
record_eraser_error(tst, a, False /* !is_write */, prevstate);
record_eraser_error(tid, a, False /* !is_write */, prevstate);
}
goto done;
@ -2977,32 +2974,25 @@ static void eraser_mem_read_word(Addr a, ThreadId tid, ThreadState *tst)
EC_EIP eceip;
if (clo_execontext == EC_Some)
eceip = EIP(VG_(get_EIP)(tst), prevstate, tls);
eceip = EIP(VG_(get_EIP)(tid), prevstate, tls);
else
eceip = EC(VG_(get_ExeContext)(tst), prevstate, tls);
eceip = EC(VG_(get_ExeContext)(tid), prevstate, tls);
setExeContext(a, eceip);
}
}
static void eraser_mem_read(Addr a, UInt size, ThreadState *tst)
static void eraser_mem_read(Addr a, UInt size, ThreadId tid)
{
ThreadId tid;
Addr end;
Addr end;
end = ROUNDUP(a+size, 4);
a = ROUNDDN(a, 4);
if (tst == NULL)
tid = VG_(get_current_tid)();
else
tid = VG_(get_tid_from_ThreadState)(tst);
for ( ; a < end; a += 4)
eraser_mem_read_word(a, tid, tst);
eraser_mem_read_word(a, tid);
}
static void eraser_mem_write_word(Addr a, ThreadId tid, ThreadState *tst)
static void eraser_mem_write_word(Addr a, ThreadId tid)
{
ThreadLifeSeg *tls;
shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
@ -3080,7 +3070,7 @@ static void eraser_mem_write_word(Addr a, ThreadId tid, ThreadState *tst)
SHARED_MODIFIED:
if (isempty(unpackLockSet(sword->other))) {
record_eraser_error(tst, a, True /* is_write */, prevstate);
record_eraser_error(tid, a, True /* is_write */, prevstate);
}
goto done;
@ -3089,70 +3079,64 @@ static void eraser_mem_write_word(Addr a, ThreadId tid, ThreadState *tst)
EC_EIP eceip;
if (clo_execontext == EC_Some)
eceip = EIP(VG_(get_EIP)(tst), prevstate, tls);
eceip = EIP(VG_(get_EIP)(tid), prevstate, tls);
else
eceip = EC(VG_(get_ExeContext)(tst), prevstate, tls);
eceip = EC(VG_(get_ExeContext)(tid), prevstate, tls);
setExeContext(a, eceip);
}
}
static void eraser_mem_write(Addr a, UInt size, ThreadState *tst)
static void eraser_mem_write(Addr a, UInt size, ThreadId tid)
{
ThreadId tid;
Addr end;
end = ROUNDUP(a+size, 4);
a = ROUNDDN(a, 4);
if (tst == NULL)
tid = VG_(get_current_tid)();
else
tid = VG_(get_tid_from_ThreadState)(tst);
for ( ; a < end; a += 4)
eraser_mem_write_word(a, tid, tst);
eraser_mem_write_word(a, tid);
}
#undef DEBUG_STATE
static void eraser_mem_help_read_1(Addr a)
{
eraser_mem_read(a, 1, NULL);
eraser_mem_read(a, 1, VG_(get_current_tid)());
}
static void eraser_mem_help_read_2(Addr a)
{
eraser_mem_read(a, 2, NULL);
eraser_mem_read(a, 2, VG_(get_current_tid)());
}
static void eraser_mem_help_read_4(Addr a)
{
eraser_mem_read(a, 4, NULL);
eraser_mem_read(a, 4, VG_(get_current_tid)());
}
static void eraser_mem_help_read_N(Addr a, UInt size)
{
eraser_mem_read(a, size, NULL);
eraser_mem_read(a, size, VG_(get_current_tid)());
}
static void eraser_mem_help_write_1(Addr a, UInt val)
{
if (*(UChar *)a != val)
eraser_mem_write(a, 1, NULL);
eraser_mem_write(a, 1, VG_(get_current_tid)());
}
static void eraser_mem_help_write_2(Addr a, UInt val)
{
if (*(UShort *)a != val)
eraser_mem_write(a, 2, NULL);
eraser_mem_write(a, 2, VG_(get_current_tid)());
}
static void eraser_mem_help_write_4(Addr a, UInt val)
{
if (*(UInt *)a != val)
eraser_mem_write(a, 4, NULL);
eraser_mem_write(a, 4, VG_(get_current_tid)());
}
static void eraser_mem_help_write_N(Addr a, UInt size)
{
eraser_mem_write(a, size, NULL);
eraser_mem_write(a, size, VG_(get_current_tid)());
}
static void hg_thread_create(ThreadId parent, ThreadId child)
@ -3196,7 +3180,7 @@ static void bus_unlock(void)
/*--- Client requests ---*/
/*--------------------------------------------------------------------*/
Bool SK_(handle_client_request)(ThreadState *tst, UInt *args, UInt *ret)
Bool SK_(handle_client_request)(ThreadId tid, UInt *args, UInt *ret)
{
if (!VG_IS_SKIN_USERREQ('H','G',args[0]))
return False;

View File

@ -116,7 +116,7 @@ typedef unsigned char Bool;
interface; if the core and skin major versions don't match, Valgrind
will abort. The minor version indicates binary-compatible changes.
*/
#define VG_CORE_INTERFACE_MAJOR_VERSION 2
#define VG_CORE_INTERFACE_MAJOR_VERSION 3
#define VG_CORE_INTERFACE_MINOR_VERSION 0
extern const Int VG_(skin_interface_major_version);
@ -275,29 +275,35 @@ extern Bool VG_(within_m_state_static_OR_threads)(Addr a);
pthread_mutex_t.__m_owner and pthread_cond_t.__c_waiting. */
#define VG_INVALID_THREADID ((ThreadId)(0))
/* ThreadIds are simply indices into the vg_threads[] array. */
/* ThreadIds are simply indices into the VG_(threads)[] array. */
typedef
UInt
ThreadId;
/* struct _ThreadState defined elsewhere; ThreadState is abstract as its
definition is not important for skins. */
typedef
struct _ThreadState
ThreadState;
/* When looking for the current ThreadId, this is the safe option and
probably the one you want.
Details: Use this one from non-generated code, eg. from functions called
on events like 'new_mem_heap'. In such a case, the "current" thread is
temporarily suspended as Valgrind's dispatcher is running. This function
is also suitable to be called from generated code (ie. from UCode, or a C
function called directly from UCode).
If you use VG_(get_current_tid)() from non-generated code, it will return
0 signifying the invalid thread, which is probably not what you want. */
extern ThreadId VG_(get_current_or_recent_tid) ( void );
/* Use this one from generated code */
extern ThreadId VG_(get_current_tid) ( void );
/* Use this one from non-generated code -- if you use VG_(get_current_tid)(),
it will return 0 for the invalid thread, which is not what you want. */
extern ThreadId VG_(get_current_or_recent_tid) ( void );
extern ThreadId VG_(get_tid_from_ThreadState) ( ThreadState* );
extern ThreadState* VG_(get_ThreadState) ( ThreadId tid );
/* When looking for the current ThreadId, only use this one if you know what
you are doing.
Details: Use this one from generated code, eg. from C functions called
from UCode. (VG_(get_current_or_recent_tid)() is also suitable in that
case.) If you use this function from non-generated code, it will return
0 signifying the invalid thread, which is probably not what you want. */
extern ThreadId VG_(get_current_tid) ( void );
/* Searches through all thread's stacks to see if any match. Returns
* VG_INVALID_THREADID if none match. */
VG_INVALID_THREADID if none match. */
extern ThreadId VG_(first_matching_thread_stack)
( Bool (*p) ( Addr stack_min, Addr stack_max ));
@ -1249,16 +1255,17 @@ extern void VG_(pp_ExeContext) ( ExeContext* );
ExeContexts to see if we already have it, and if not, allocate a
new one. Either way, return a pointer to the context.
If called from generated code, `tst' can be NULL and it will use the
ThreadState of the current thread. If called from elsewhere, `tst'
should not be NULL.
If called from generated code, use VG_(get_current_tid)() to get the
current ThreadId. If called from non-generated code, the current
ThreadId should be passed in by the core.
*/
extern ExeContext* VG_(get_ExeContext) ( ThreadState *tst );
extern ExeContext* VG_(get_ExeContext) ( ThreadId tid );
/* Just grab the client's EIP, as a much smaller and cheaper
indication of where they are. ThreadState should be NULL if it's called
from within generated code. */
extern Addr VG_(get_EIP)( ThreadState *tst );
indication of where they are. Use is basically same as for
VG_(get_ExeContext)() above.
*/
extern Addr VG_(get_EIP)( ThreadId tid );
/*====================================================================*/
@ -1330,14 +1337,13 @@ void* VG_(get_error_extra) ( Error* err );
seen before. If it has, the existing error record will have its count
incremented.
If the error occurs in generated code, 'tst' should be NULL. If the
error occurs in non-generated code, 'tst' should be non-NULL. The
`extra' field can be stack-allocated; it will be copied by the core
if needed. But it won't be copied if it's NULL.
'tid' can be found as for VG_(get_ExeContext)(). The `extra' field can
be stack-allocated; it will be copied by the core if needed (but it
won't be copied if it's NULL).
If no 'a', 's' or 'extra' of interest needs to be recorded, just use
NULL for them. */
extern void VG_(maybe_record_error) ( ThreadState* tst, ErrorKind ekind,
extern void VG_(maybe_record_error) ( ThreadId tid, ErrorKind ekind,
Addr a, Char* s, void* extra );
/* Similar to VG_(maybe_record_error)(), except this one doesn't record the
@ -1347,7 +1353,7 @@ extern void VG_(maybe_record_error) ( ThreadState* tst, ErrorKind ekind,
hack that's useful sometimes if you just want to know if the error would
be suppressed without possibly printing it. `count_error' dictates
whether to add the error in the error total count (another mild hack). */
extern Bool VG_(unique_error) ( ThreadState* tst, ErrorKind ekind,
extern Bool VG_(unique_error) ( ThreadId tid, ErrorKind ekind,
Addr a, Char* s, void* extra,
ExeContext* where, Bool print_error,
Bool allow_GDB_attach, Bool count_error );
@ -1420,55 +1426,6 @@ typedef
extern VgSectKind VG_(seg_sect_kind)(Addr);
/*====================================================================*/
/*=== Calling functions from the sim'd CPU ===*/
/*====================================================================*/
#define VG_USERREQ__CLIENT_tstCALL0 0x2101
#define VG_USERREQ__CLIENT_tstCALL1 0x2102
#define VG_USERREQ__CLIENT_tstCALL2 0x2103
#define VG_USERREQ__CLIENT_tstCALL3 0x2104
/* These requests are like VALGRIND_NON_SIMD_CALL[0123] in valgrind.h,
except they insert the current ThreadState as the first argument to the
called function. */
#define VALGRIND_NON_SIMD_tstCALL0(_qyy_fn) \
({unsigned int _qyy_res; \
VALGRIND_MAGIC_SEQUENCE(_qyy_res, 0 /* default return */, \
VG_USERREQ__CLIENT_tstCALL0, \
_qyy_fn, \
0, 0, 0); \
_qyy_res; \
})
#define VALGRIND_NON_SIMD_tstCALL1(_qyy_fn, _qyy_arg1) \
({unsigned int _qyy_res; \
VALGRIND_MAGIC_SEQUENCE(_qyy_res, 0 /* default return */, \
VG_USERREQ__CLIENT_tstCALL1, \
_qyy_fn, \
_qyy_arg1, 0, 0); \
_qyy_res; \
})
#define VALGRIND_NON_SIMD_tstCALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
({unsigned int _qyy_res; \
VALGRIND_MAGIC_SEQUENCE(_qyy_res, 0 /* default return */, \
VG_USERREQ__CLIENT_tstCALL2, \
_qyy_fn, \
_qyy_arg1, _qyy_arg2, 0); \
_qyy_res; \
})
#define VALGRIND_NON_SIMD_tstCALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
({unsigned int _qyy_res; \
VALGRIND_MAGIC_SEQUENCE(_qyy_res, 0 /* default return */, \
VG_USERREQ__CLIENT_tstCALL3, \
_qyy_fn, \
_qyy_arg1, _qyy_arg2, _qyy_arg3); \
_qyy_res; \
})
/*====================================================================*/
/*=== Generic hash table ===*/
/*====================================================================*/
@ -1581,15 +1538,15 @@ extern UInt VG_(vg_malloc_redzone_szB);
/* If a skin links with vg_replace_malloc.c, the following functions will be
called appropriately when malloc() et al are called. */
extern void* SK_(malloc) ( ThreadState* tst, Int n );
extern void* SK_(__builtin_new) ( ThreadState* tst, Int n );
extern void* SK_(__builtin_vec_new) ( ThreadState* tst, Int n );
extern void* SK_(memalign) ( ThreadState* tst, Int align, Int n );
extern void* SK_(calloc) ( ThreadState* tst, Int nmemb, Int n );
extern void SK_(free) ( ThreadState* tst, void* p );
extern void SK_(__builtin_delete) ( ThreadState* tst, void* p );
extern void SK_(__builtin_vec_delete) ( ThreadState* tst, void* p );
extern void* SK_(realloc) ( ThreadState* tst, void* p, Int size );
extern void* SK_(malloc) ( Int n );
extern void* SK_(__builtin_new) ( Int n );
extern void* SK_(__builtin_vec_new) ( Int n );
extern void* SK_(memalign) ( Int align, Int n );
extern void* SK_(calloc) ( Int nmemb, Int n );
extern void SK_(free) ( void* p );
extern void SK_(__builtin_delete) ( void* p );
extern void SK_(__builtin_vec_delete) ( void* p );
extern void* SK_(realloc) ( void* p, Int size );
/* Can be called from SK_(malloc) et al to do the actual alloc/freeing. */
extern void* VG_(cli_malloc) ( UInt align, Int nbytes );
@ -1713,7 +1670,14 @@ typedef
/* Events happening in core to track. To be notified, pass a callback
function to the appropriate function. To ignore an event, don't do
anything (default is for events to be ignored). */
anything (default is for events to be ignored).
Note that most events aren't passed a ThreadId. To find out the ThreadId
of the affected thread, use VG_(get_current_or_recent_tid)(). For the
ones passed a ThreadId, use that instead, since
VG_(get_current_or_recent_tid)() might not give the right ThreadId in
that case.
*/
/* Memory events (Nb: to track heap allocation/freeing, a skin must replace
@ -1762,15 +1726,15 @@ EV VG_(track_die_mem_stack) ( void (*f)(Addr a, UInt len) );
EV VG_(track_ban_mem_stack) ( void (*f)(Addr a, UInt len) );
/* These ones occur around syscalls, signal handling, etc */
EV VG_(track_pre_mem_read) ( void (*f)(CorePart part, ThreadState* tst,
EV VG_(track_pre_mem_read) ( void (*f)(CorePart part, ThreadId tid,
Char* s, Addr a, UInt size) );
EV VG_(track_pre_mem_read_asciiz) ( void (*f)(CorePart part, ThreadState* tst,
EV VG_(track_pre_mem_read_asciiz) ( void (*f)(CorePart part, ThreadId tid,
Char* s, Addr a) );
EV VG_(track_pre_mem_write) ( void (*f)(CorePart part, ThreadState* tst,
EV VG_(track_pre_mem_write) ( void (*f)(CorePart part, ThreadId tid,
Char* s, Addr a, UInt size) );
/* Not implemented yet -- have to add in lots of places, which is a
pain. Won't bother unless/until there's a need. */
/* EV VG_(track_post_mem_read) ( void (*f)(ThreadState* tst, Char* s,
/* EV VG_(track_post_mem_read) ( void (*f)(ThreadId tid, Char* s,
Addr a, UInt size) ); */
EV VG_(track_post_mem_write) ( void (*f)(Addr a, UInt size) );
@ -1988,7 +1952,7 @@ extern void SK_(print_debug_usage) ( void );
not recognised. arg_block[0] holds the request number, any further args
from the request are in arg_block[1..]. 'ret' is for the return value...
it should probably be filled, if only with 0. */
extern Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg_block,
extern Bool SK_(handle_client_request) ( ThreadId tid, UInt* arg_block,
UInt *ret );

View File

@ -482,7 +482,7 @@ void MAC_(do_detect_memory_leaks) (
print_record = ( MAC_(clo_show_reachable) || Proper != p_min->loss_mode );
is_suppressed =
VG_(unique_error) ( /*tst*/NULL, LeakErr, (UInt)i+1,
VG_(unique_error) ( VG_(get_current_tid)(), LeakErr, (UInt)i+1,
(Char*)n_lossrecords, (void*) p_min,
p_min->allocated_at, print_record,
/*allow_GDB_attach*/False, /*count_error*/False );

View File

@ -127,7 +127,7 @@ MAC_Chunk* MAC_(first_matching_freed_MAC_Chunk) ( Bool (*p)(MAC_Chunk*) )
/* Allocate its shadow chunk, put it on the appropriate list. */
static
void add_MAC_Chunk ( ThreadState* tst, Addr p, UInt size, MAC_AllocKind kind )
void add_MAC_Chunk ( Addr p, UInt size, MAC_AllocKind kind )
{
MAC_Chunk* mc;
@ -135,7 +135,7 @@ void add_MAC_Chunk ( ThreadState* tst, Addr p, UInt size, MAC_AllocKind kind )
mc->data = p;
mc->size = size;
mc->allockind = kind;
mc->where = VG_(get_ExeContext)(tst);
mc->where = VG_(get_ExeContext)(VG_(get_current_or_recent_tid)());
/* Paranoia ... ensure this area is off-limits to the client, so
the mc->data field isn't visible to the leak checker. If memory
@ -155,15 +155,15 @@ void add_MAC_Chunk ( ThreadState* tst, Addr p, UInt size, MAC_AllocKind kind )
/* Allocate memory and note change in memory available */
__inline__
void MAC_(new_block) ( ThreadState* tst, Addr p, UInt size,
UInt rzB, Bool is_zeroed, MAC_AllocKind kind )
void MAC_(new_block) ( Addr p, UInt size,
UInt rzB, Bool is_zeroed, MAC_AllocKind kind )
{
VGP_PUSHCC(VgpCliMalloc);
cmalloc_n_mallocs ++;
cmalloc_bs_mallocd += size;
add_MAC_Chunk( tst, p, size, kind );
add_MAC_Chunk( p, size, kind );
MAC_(ban_mem_heap)( p-rzB, rzB );
MAC_(new_mem_heap)( p, size, is_zeroed );
@ -172,33 +172,33 @@ void MAC_(new_block) ( ThreadState* tst, Addr p, UInt size,
VGP_POPCC(VgpCliMalloc);
}
void* SK_(malloc) ( ThreadState* tst, Int n )
void* SK_(malloc) ( Int n )
{
if (n < 0) {
VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to malloc()", n );
return NULL;
} else {
Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
MAC_(new_block) ( tst, p, n, VG_(vg_malloc_redzone_szB),
MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
/*is_zeroed*/False, MAC_AllocMalloc );
return (void*)p;
}
}
void* SK_(__builtin_new) ( ThreadState* tst, Int n )
void* SK_(__builtin_new) ( Int n )
{
if (n < 0) {
VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to __builtin_new()", n);
return NULL;
} else {
Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
MAC_(new_block) ( tst, p, n, VG_(vg_malloc_redzone_szB),
MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
/*is_zeroed*/False, MAC_AllocNew );
return (void*)p;
}
}
void* SK_(__builtin_vec_new) ( ThreadState* tst, Int n )
void* SK_(__builtin_vec_new) ( Int n )
{
if (n < 0) {
VG_(message)(Vg_UserMsg,
@ -206,26 +206,26 @@ void* SK_(__builtin_vec_new) ( ThreadState* tst, Int n )
return NULL;
} else {
Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
MAC_(new_block) ( tst, p, n, VG_(vg_malloc_redzone_szB),
MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
/*is_zeroed*/False, MAC_AllocNewVec );
return (void*)p;
}
}
void* SK_(memalign) ( ThreadState* tst, Int align, Int n )
void* SK_(memalign) ( Int align, Int n )
{
if (n < 0) {
VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to memalign()", n);
return NULL;
} else {
Addr p = (Addr)VG_(cli_malloc)( align, n );
MAC_(new_block) ( tst, p, n, VG_(vg_malloc_redzone_szB),
MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
/*is_zeroed*/False, MAC_AllocMalloc );
return (void*)p;
}
}
void* SK_(calloc) ( ThreadState* tst, Int nmemb, Int size1 )
void* SK_(calloc) ( Int nmemb, Int size1 )
{
Int n, i;
@ -237,7 +237,7 @@ void* SK_(calloc) ( ThreadState* tst, Int nmemb, Int size1 )
return NULL;
} else {
Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
MAC_(new_block) ( tst, p, n, VG_(vg_malloc_redzone_szB),
MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
/*is_zeroed*/True, MAC_AllocMalloc );
for (i = 0; i < n; i++)
((UChar*)p)[i] = 0;
@ -246,7 +246,7 @@ void* SK_(calloc) ( ThreadState* tst, Int nmemb, Int size1 )
}
static
void die_and_free_mem ( ThreadState* tst, MAC_Chunk* mc,
void die_and_free_mem ( MAC_Chunk* mc,
MAC_Chunk** prev_chunks_next_ptr, UInt rzB )
{
/* Note: ban redzones again -- just in case user de-banned them
@ -262,7 +262,7 @@ void die_and_free_mem ( ThreadState* tst, MAC_Chunk* mc,
*prev_chunks_next_ptr = mc->next;
/* Record where freed */
mc->where = VG_(get_ExeContext) ( tst );
mc->where = VG_(get_ExeContext) ( VG_(get_current_or_recent_tid)() );
/* Put it out of harm's way for a while, if not from a client request */
if (MAC_AllocCustom != mc->allockind)
@ -273,11 +273,11 @@ void die_and_free_mem ( ThreadState* tst, MAC_Chunk* mc,
__inline__
void MAC_(handle_free) ( ThreadState* tst, Addr p, UInt rzB,
MAC_AllocKind kind )
void MAC_(handle_free) ( Addr p, UInt rzB, MAC_AllocKind kind )
{
MAC_Chunk* mc;
MAC_Chunk** prev_chunks_next_ptr;
ThreadId tid = VG_(get_current_or_recent_tid)();
VGP_PUSHCC(VgpCliMalloc);
@ -286,40 +286,41 @@ void MAC_(handle_free) ( ThreadState* tst, Addr p, UInt rzB,
mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UInt)p,
(VgHashNode***)&prev_chunks_next_ptr );
if (mc == NULL) {
MAC_(record_free_error) ( tst, p );
MAC_(record_free_error) ( tid, p );
VGP_POPCC(VgpCliMalloc);
return;
}
/* check if its a matching free() / delete / delete [] */
if (kind != mc->allockind) {
MAC_(record_freemismatch_error) ( tst, p );
MAC_(record_freemismatch_error) ( tid, p );
}
die_and_free_mem ( tst, mc, prev_chunks_next_ptr, rzB );
die_and_free_mem ( mc, prev_chunks_next_ptr, rzB );
VGP_POPCC(VgpCliMalloc);
}
void SK_(free) ( ThreadState* tst, void* p )
void SK_(free) ( void* p )
{
MAC_(handle_free)(tst, (Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocMalloc);
MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocMalloc);
}
void SK_(__builtin_delete) ( ThreadState* tst, void* p )
void SK_(__builtin_delete) ( void* p )
{
MAC_(handle_free)(tst, (Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNew);
MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNew);
}
void SK_(__builtin_vec_delete) ( ThreadState* tst, void* p )
void SK_(__builtin_vec_delete) ( void* p )
{
MAC_(handle_free)(tst, (Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNewVec);
MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNewVec);
}
void* SK_(realloc) ( ThreadState* tst, void* p, Int new_size )
void* SK_(realloc) ( void* p, Int new_size )
{
MAC_Chunk *mc;
MAC_Chunk **prev_chunks_next_ptr;
UInt i;
ThreadId tid = VG_(get_current_or_recent_tid)();
VGP_PUSHCC(VgpCliMalloc);
@ -338,7 +339,7 @@ void* SK_(realloc) ( ThreadState* tst, void* p, Int new_size )
(VgHashNode***)&prev_chunks_next_ptr );
if (mc == NULL) {
MAC_(record_free_error) ( tst, (Addr)p );
MAC_(record_free_error) ( tid, (Addr)p );
/* Perhaps we should return to the program regardless. */
VGP_POPCC(VgpCliMalloc);
return NULL;
@ -347,7 +348,7 @@ void* SK_(realloc) ( ThreadState* tst, void* p, Int new_size )
/* check if its a matching free() / delete / delete [] */
if (MAC_AllocMalloc != mc->allockind) {
/* can not realloc a range that was allocated with new or new [] */
MAC_(record_freemismatch_error) ( tst, (Addr)p );
MAC_(record_freemismatch_error) ( tid, (Addr)p );
/* but keep going anyway */
}
@ -383,13 +384,13 @@ void* SK_(realloc) ( ThreadState* tst, void* p, Int new_size )
((UChar*)p_new)[i] = ((UChar*)p)[i];
/* Free old memory */
die_and_free_mem ( tst, mc, prev_chunks_next_ptr,
die_and_free_mem ( mc, prev_chunks_next_ptr,
VG_(vg_malloc_redzone_szB) );
/* this has to be after die_and_free_mem, otherwise the
former succeeds in shorting out the new block, not the
old, in the case when both are on the same list. */
add_MAC_Chunk ( tst, p_new, new_size, MAC_AllocMalloc );
add_MAC_Chunk ( p_new, new_size, MAC_AllocMalloc );
VGP_POPCC(VgpCliMalloc);
return (void*)p_new;

View File

@ -391,7 +391,7 @@ static Bool is_just_below_ESP( Addr esp, Addr aa )
/* This one called from generated code and non-generated code. */
void MAC_(record_address_error) ( ThreadState* tst, Addr a, Int size,
void MAC_(record_address_error) ( ThreadId tid, Addr a, Int size,
Bool isWrite )
{
MAC_Error err_extra;
@ -409,75 +409,74 @@ void MAC_(record_address_error) ( ThreadState* tst, Addr a, Int size,
err_extra.size = size;
err_extra.addrinfo.akind = Undescribed;
err_extra.addrinfo.maybe_gcc = just_below_esp;
VG_(maybe_record_error)( tst, AddrErr, a, /*s*/NULL, &err_extra );
VG_(maybe_record_error)( tid, AddrErr, a, /*s*/NULL, &err_extra );
}
/* These ones are called from non-generated code */
/* This is for memory errors in pthread functions, as opposed to pthread API
errors which are found by the core. */
void MAC_(record_core_mem_error) ( ThreadState* tst, Bool isWrite, Char* msg )
void MAC_(record_core_mem_error) ( ThreadId tid, Bool isWrite, Char* msg )
{
MAC_Error err_extra;
MAC_(clear_MAC_Error)( &err_extra );
err_extra.isWrite = isWrite;
VG_(maybe_record_error)( tst, CoreMemErr, /*addr*/0, msg, &err_extra );
VG_(maybe_record_error)( tid, CoreMemErr, /*addr*/0, msg, &err_extra );
}
void MAC_(record_param_error) ( ThreadState* tst, Addr a, Bool isWrite,
void MAC_(record_param_error) ( ThreadId tid, Addr a, Bool isWrite,
Char* msg )
{
MAC_Error err_extra;
sk_assert(NULL != tst);
sk_assert(VG_INVALID_THREADID != tid);
MAC_(clear_MAC_Error)( &err_extra );
err_extra.addrinfo.akind = Undescribed;
err_extra.isWrite = isWrite;
VG_(maybe_record_error)( tst, ParamErr, a, msg, &err_extra );
VG_(maybe_record_error)( tid, ParamErr, a, msg, &err_extra );
}
void MAC_(record_jump_error) ( ThreadState* tst, Addr a )
void MAC_(record_jump_error) ( ThreadId tid, Addr a )
{
MAC_Error err_extra;
sk_assert(NULL != tst);
sk_assert(VG_INVALID_THREADID != tid);
MAC_(clear_MAC_Error)( &err_extra );
err_extra.axskind = ExecAxs;
err_extra.addrinfo.akind = Undescribed;
VG_(maybe_record_error)( tst, AddrErr, a, /*s*/NULL, &err_extra );
VG_(maybe_record_error)( tid, AddrErr, a, /*s*/NULL, &err_extra );
}
void MAC_(record_free_error) ( ThreadState* tst, Addr a )
void MAC_(record_free_error) ( ThreadId tid, Addr a )
{
MAC_Error err_extra;
sk_assert(NULL != tst);
sk_assert(VG_INVALID_THREADID != tid);
MAC_(clear_MAC_Error)( &err_extra );
err_extra.addrinfo.akind = Undescribed;
VG_(maybe_record_error)( tst, FreeErr, a, /*s*/NULL, &err_extra );
VG_(maybe_record_error)( tid, FreeErr, a, /*s*/NULL, &err_extra );
}
void MAC_(record_freemismatch_error) ( ThreadState* tst, Addr a )
void MAC_(record_freemismatch_error) ( ThreadId tid, Addr a )
{
MAC_Error err_extra;
sk_assert(NULL != tst);
sk_assert(VG_INVALID_THREADID != tid);
MAC_(clear_MAC_Error)( &err_extra );
err_extra.addrinfo.akind = Undescribed;
VG_(maybe_record_error)( tst, FreeMismatchErr, a, /*s*/NULL, &err_extra );
VG_(maybe_record_error)( tid, FreeMismatchErr, a, /*s*/NULL, &err_extra );
}
void MAC_(record_overlap_error) ( ThreadState* tst, Char* function )
// This one not passed a ThreadId, so it grabs it itself.
void MAC_(record_overlap_error) ( Char* function )
{
MAC_Error err_extra;
MAC_(clear_MAC_Error)( &err_extra );
VG_(maybe_record_error)( tst, OverlapErr, /*addr*/0, function, &err_extra );
VG_(maybe_record_error)( VG_(get_current_or_recent_tid)(),
OverlapErr, /*addr*/0, function, &err_extra );
}
@ -793,10 +792,14 @@ void MAC_(common_fini)(void (*leak_check)(void))
/*--- Common client request handling ---*/
/*------------------------------------------------------------*/
Bool MAC_(handle_common_client_requests)(ThreadState* tst, UInt* arg,
UInt* ret )
Bool MAC_(handle_common_client_requests)(ThreadId tid, UInt* arg, UInt* ret )
{
UInt* argv = (UInt*)arg;
// Not using 'tid' here because MAC_(new_block)() and MAC_(handle_free)()
// grab it themselves. But what they grab should match 'tid', check
// this.
sk_assert(tid == VG_(get_current_or_recent_tid)());
switch (arg[0]) {
case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
@ -816,14 +819,14 @@ Bool MAC_(handle_common_client_requests)(ThreadState* tst, UInt* arg,
UInt rzB = argv[3];
Bool is_zeroed = (Bool)argv[4];
MAC_(new_block) ( tst, p, sizeB, rzB, is_zeroed, MAC_AllocCustom );
MAC_(new_block) ( p, sizeB, rzB, is_zeroed, MAC_AllocCustom );
return True;
}
case VG_USERREQ__FREELIKE_BLOCK: {
Addr p = (Addr)argv[1];
UInt rzB = argv[2];
MAC_(handle_free) ( tst, p, rzB, MAC_AllocCustom );
MAC_(handle_free) ( p, rzB, MAC_AllocCustom );
return True;
}
default:

View File

@ -84,7 +84,7 @@ void complain2 ( Char* s, char* dst, const char* src )
{
Char buf[256];
snprintf(buf, 100, "%s(%p, %p)", s, dst, src );
VALGRIND_NON_SIMD_tstCALL1( MAC_(record_overlap_error), buf );
VALGRIND_NON_SIMD_CALL1( MAC_(record_overlap_error), buf );
}
static __inline__
@ -92,7 +92,7 @@ void complain3 ( Char* s, void* dst, const void* src, int n )
{
Char buf[256];
snprintf(buf, 100, "%s(%p, %p, %d)", s, dst, src, n );
VALGRIND_NON_SIMD_tstCALL1( MAC_(record_overlap_error), buf );
VALGRIND_NON_SIMD_CALL1( MAC_(record_overlap_error), buf );
}
char* strrchr ( const char* s, int c )

View File

@ -289,22 +289,20 @@ extern void MAC_(clear_MAC_Error) ( MAC_Error* err_extra );
extern Bool MAC_(shared_recognised_suppression) ( Char* name, Supp* su );
extern void MAC_(new_block) ( ThreadState* tst, Addr p, UInt size,
UInt rzB, Bool is_zeroed,
MAC_AllocKind kind );
extern void MAC_(handle_free) ( ThreadState* tst, Addr p, UInt rzB,
MAC_AllocKind kind );
extern void MAC_(new_block) ( Addr p, UInt size, UInt rzB,
Bool is_zeroed, MAC_AllocKind kind );
extern void MAC_(handle_free) ( Addr p, UInt rzB, MAC_AllocKind kind );
extern void MAC_(record_address_error) ( ThreadState* tst, Addr a,
extern void MAC_(record_address_error) ( ThreadId tid, Addr a,
Int size, Bool isWrite );
extern void MAC_(record_core_mem_error) ( ThreadState* tst, Bool isWrite,
extern void MAC_(record_core_mem_error) ( ThreadId tid, Bool isWrite,
Char* s );
extern void MAC_(record_param_error) ( ThreadState* tst, Addr a,
extern void MAC_(record_param_error) ( ThreadId tid, Addr a,
Bool isWriteLack, Char* msg );
extern void MAC_(record_jump_error) ( ThreadState* tst, Addr a );
extern void MAC_(record_free_error) ( ThreadState* tst, Addr a );
extern void MAC_(record_freemismatch_error)( ThreadState* tst, Addr a );
extern void MAC_(record_overlap_error) ( ThreadState* tst, Char* function );
extern void MAC_(record_jump_error) ( ThreadId tid, Addr a );
extern void MAC_(record_free_error) ( ThreadId tid, Addr a );
extern void MAC_(record_freemismatch_error)( ThreadId tid, Addr a );
extern void MAC_(record_overlap_error) ( Char* function );
extern void MAC_(pp_shared_SkinError) ( Error* err);
@ -313,8 +311,8 @@ extern MAC_Chunk* MAC_(first_matching_freed_MAC_Chunk)( Bool (*p)(MAC_Chunk*) );
extern void MAC_(common_pre_clo_init) ( void );
extern void MAC_(common_fini) ( void (*leak_check)(void) );
extern Bool MAC_(handle_common_client_requests)
( ThreadState* tst, UInt* arg_block, UInt* ret );
extern Bool MAC_(handle_common_client_requests) ( ThreadId tid,
UInt* arg_block, UInt* ret );
extern void MAC_(print_malloc_stats) ( void );

View File

@ -144,7 +144,7 @@ Bool MC_(client_perm_maybe_describe)( Addr a, AddrInfo* ai )
return False;
}
Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg, UInt* ret )
Bool SK_(handle_client_request) ( ThreadId tid, UInt* arg, UInt* ret )
{
Int i;
Bool ok;
@ -157,14 +157,14 @@ Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg, UInt* ret )
case VG_USERREQ__CHECK_WRITABLE: /* check writable */
ok = MC_(check_writable) ( arg[1], arg[2], &bad_addr );
if (!ok)
MC_(record_user_error) ( tst, bad_addr, True );
MC_(record_user_error) ( tid, bad_addr, True );
*ret = ok ? (UInt)NULL : bad_addr;
break;
case VG_USERREQ__CHECK_READABLE: /* check readable */
ok = MC_(check_readable) ( arg[1], arg[2], &bad_addr );
if (!ok)
MC_(record_user_error) ( tst, bad_addr, False );
MC_(record_user_error) ( tid, bad_addr, False );
*ret = ok ? (UInt)NULL : bad_addr;
break;
@ -179,7 +179,7 @@ Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg, UInt* ret )
vg_cgbs[i].kind = CG_NoAccess;
vg_cgbs[i].start = arg[1];
vg_cgbs[i].size = arg[2];
vg_cgbs[i].where = VG_(get_ExeContext) ( tst );
vg_cgbs[i].where = VG_(get_ExeContext) ( tid );
MC_(make_noaccess) ( arg[1], arg[2] );
*ret = i;
break;
@ -189,7 +189,7 @@ Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg, UInt* ret )
vg_cgbs[i].kind = CG_Writable;
vg_cgbs[i].start = arg[1];
vg_cgbs[i].size = arg[2];
vg_cgbs[i].where = VG_(get_ExeContext) ( tst );
vg_cgbs[i].where = VG_(get_ExeContext) ( tid );
MC_(make_writable) ( arg[1], arg[2] );
*ret = i;
break;
@ -199,7 +199,7 @@ Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg, UInt* ret )
vg_cgbs[i].kind = CG_Readable;
vg_cgbs[i].start = arg[1];
vg_cgbs[i].size = arg[2];
vg_cgbs[i].where = VG_(get_ExeContext) ( tst );
vg_cgbs[i].where = VG_(get_ExeContext) ( tid );
MC_(make_readable) ( arg[1], arg[2] );
*ret = i;
break;
@ -219,7 +219,7 @@ Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg, UInt* ret )
error. */
/* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
*ret = MC_(get_or_set_vbits_for_client)
( tst, arg[1], arg[2], arg[3], False /* get them */ );
( tid, arg[1], arg[2], arg[3], False /* get them */ );
break;
case VG_USERREQ__SET_VBITS:
@ -227,11 +227,11 @@ Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg, UInt* ret )
error. */
/* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
*ret = MC_(get_or_set_vbits_for_client)
( tst, arg[1], arg[2], arg[3], True /* set them */ );
( tid, arg[1], arg[2], arg[3], True /* set them */ );
break;
default:
if (MAC_(handle_common_client_requests)(tst, arg, ret )) {
if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
return True;
} else {
VG_(message)(Vg_UserMsg,

View File

@ -127,27 +127,26 @@ void SK_(pp_SkinError) ( Error* err )
/* Creates a copy of the `extra' part, updates the copy with address info if
necessary, and returns the copy. */
/* This one called from generated code and non-generated code. */
void MC_(record_value_error) ( ThreadState* tst, Int size )
void MC_(record_value_error) ( ThreadId tid, Int size )
{
MAC_Error err_extra;
MAC_(clear_MAC_Error)( &err_extra );
err_extra.size = size;
VG_(maybe_record_error)( tst, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
}
/* This called from non-generated code */
void MC_(record_user_error) ( ThreadState* tst, Addr a, Bool isWrite )
void MC_(record_user_error) ( ThreadId tid, Addr a, Bool isWrite )
{
MAC_Error err_extra;
sk_assert(NULL != tst);
sk_assert(VG_INVALID_THREADID != tid);
MAC_(clear_MAC_Error)( &err_extra );
err_extra.addrinfo.akind = Undescribed;
err_extra.isWrite = isWrite;
VG_(maybe_record_error)( tst, UserErr, a, /*s*/NULL, &err_extra );
VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
}
/*------------------------------------------------------------*/

View File

@ -144,7 +144,7 @@ extern Bool MC_(check_readable) ( Addr a, UInt len, Addr* bad_addr );
extern void MC_(detect_memory_leaks) ( void );
extern Int MC_(get_or_set_vbits_for_client) (
ThreadState* tst,
ThreadId tid,
Addr dataV,
Addr vbitsV,
UInt size,
@ -157,8 +157,8 @@ extern void MC_(show_client_block_stats) ( void );
/* Functions defined in mc_errcontext.c */
extern void MC_(record_value_error) ( ThreadState* tst, Int size );
extern void MC_(record_user_error) ( ThreadState* tst, Addr a, Bool isWrite );
extern void MC_(record_value_error) ( ThreadId tid, Int size );
extern void MC_(record_user_error) ( ThreadId tid, Addr a, Bool isWrite );
#endif

View File

@ -608,8 +608,8 @@ static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
/*------------------------------------------------------------*/
static
void mc_check_is_writable ( CorePart part, ThreadState* tst,
Char* s, Addr base, UInt size )
void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
Addr base, UInt size )
{
Bool ok;
Addr bad_addr;
@ -622,12 +622,12 @@ void mc_check_is_writable ( CorePart part, ThreadState* tst,
if (!ok) {
switch (part) {
case Vg_CoreSysCall:
MAC_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
MAC_(record_param_error) ( tid, bad_addr, /*isWrite =*/True, s );
break;
case Vg_CorePThread:
case Vg_CoreSignal:
MAC_(record_core_mem_error)( tst, /*isWrite=*/True, s );
MAC_(record_core_mem_error)( tid, /*isWrite=*/True, s );
break;
default:
@ -639,8 +639,8 @@ void mc_check_is_writable ( CorePart part, ThreadState* tst,
}
static
void mc_check_is_readable ( CorePart part, ThreadState* tst,
Char* s, Addr base, UInt size )
void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
Addr base, UInt size )
{
Bool ok;
Addr bad_addr;
@ -653,17 +653,17 @@ void mc_check_is_readable ( CorePart part, ThreadState* tst,
if (!ok) {
switch (part) {
case Vg_CoreSysCall:
MAC_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
MAC_(record_param_error) ( tid, bad_addr, /*isWrite =*/False, s );
break;
case Vg_CorePThread:
MAC_(record_core_mem_error)( tst, /*isWrite=*/False, s );
MAC_(record_core_mem_error)( tid, /*isWrite=*/False, s );
break;
/* If we're being asked to jump to a silly address, record an error
message before potentially crashing the entire system. */
case Vg_CoreTranslate:
MAC_(record_jump_error)( tst, bad_addr );
MAC_(record_jump_error)( tid, bad_addr );
break;
default:
@ -674,7 +674,7 @@ void mc_check_is_readable ( CorePart part, ThreadState* tst,
}
static
void mc_check_is_readable_asciiz ( CorePart part, ThreadState* tst,
void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
Char* s, Addr str )
{
Bool ok = True;
@ -686,7 +686,7 @@ void mc_check_is_readable_asciiz ( CorePart part, ThreadState* tst,
sk_assert(part == Vg_CoreSysCall);
ok = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
if (!ok) {
MAC_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
MAC_(record_param_error) ( tid, bad_addr, /*is_writable =*/False, s );
}
VGP_POPCC(VgpCheckMem);
@ -959,7 +959,7 @@ static UInt mc_rd_V4_SLOWLY ( Addr a )
if (!MAC_(clo_partial_loads_ok)
|| ((a & 3) != 0)
|| (!a0ok && !a1ok && !a2ok && !a3ok)) {
MAC_(record_address_error)( /*tst*/NULL, a, 4, False );
MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, False );
return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
| (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
}
@ -1002,7 +1002,7 @@ static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
/* If an address error has happened, report it. */
if (aerr)
MAC_(record_address_error)( /*tst*/NULL, a, 4, True );
MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, True );
}
static UInt mc_rd_V2_SLOWLY ( Addr a )
@ -1021,7 +1021,7 @@ static UInt mc_rd_V2_SLOWLY ( Addr a )
/* If an address error has happened, report it. */
if (aerr) {
MAC_(record_address_error)( /*tst*/NULL, a, 2, False );
MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, False );
vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
| (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
}
@ -1043,7 +1043,7 @@ static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
/* If an address error has happened, report it. */
if (aerr)
MAC_(record_address_error)( /*tst*/NULL, a, 2, True );
MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, True );
}
static UInt mc_rd_V1_SLOWLY ( Addr a )
@ -1060,7 +1060,7 @@ static UInt mc_rd_V1_SLOWLY ( Addr a )
/* If an address error has happened, report it. */
if (aerr) {
MAC_(record_address_error)( /*tst*/NULL, a, 1, False );
MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, False );
vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
| (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
}
@ -1079,7 +1079,7 @@ static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
/* If an address error has happened, report it. */
if (aerr)
MAC_(record_address_error)( /*tst*/NULL, a, 1, True );
MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, True );
}
@ -1090,22 +1090,22 @@ static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
void MC_(helperc_value_check0_fail) ( void )
{
MC_(record_value_error) ( /*tst*/NULL, 0 );
MC_(record_value_error) ( VG_(get_current_tid)(), 0 );
}
void MC_(helperc_value_check1_fail) ( void )
{
MC_(record_value_error) ( /*tst*/NULL, 1 );
MC_(record_value_error) ( VG_(get_current_tid)(), 1 );
}
void MC_(helperc_value_check2_fail) ( void )
{
MC_(record_value_error) ( /*tst*/NULL, 2 );
MC_(record_value_error) ( VG_(get_current_tid)(), 2 );
}
void MC_(helperc_value_check4_fail) ( void )
{
MC_(record_value_error) ( /*tst*/NULL, 4 );
MC_(record_value_error) ( VG_(get_current_tid)(), 4 );
}
@ -1311,10 +1311,10 @@ void mc_fpu_read_check_SLOWLY ( Addr addr, Int size )
}
if (aerr) {
MAC_(record_address_error)( /*tst*/NULL, addr, size, False );
MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, False );
} else {
if (verr)
MC_(record_value_error)( /*tst*/NULL, size );
MC_(record_value_error)( VG_(get_current_tid)(), size );
}
}
@ -1341,7 +1341,7 @@ void mc_fpu_write_check_SLOWLY ( Addr addr, Int size )
}
}
if (aerr) {
MAC_(record_address_error)( /*tst*/NULL, addr, size, True );
MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, True );
}
}
@ -1353,7 +1353,7 @@ void mc_fpu_write_check_SLOWLY ( Addr addr, Int size )
/* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
error, 3 == addressing error. */
Int MC_(get_or_set_vbits_for_client) (
ThreadState* tst,
ThreadId tid,
Addr dataV,
Addr vbitsV,
UInt size,
@ -1389,12 +1389,12 @@ Int MC_(get_or_set_vbits_for_client) (
}
}
if (!addressibleD) {
MAC_(record_address_error)( tst, (Addr)dataP, 4,
MAC_(record_address_error)( tid, (Addr)dataP, 4,
setting ? True : False );
return 3;
}
if (!addressibleV) {
MAC_(record_address_error)( tst, (Addr)vbitsP, 4,
MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
setting ? False : True );
return 3;
}
@ -1404,7 +1404,7 @@ Int MC_(get_or_set_vbits_for_client) (
/* setting */
for (i = 0; i < szW; i++) {
if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
MC_(record_value_error)(tst, 4);
MC_(record_value_error)(tid, 4);
set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
}
} else {