Merge in branches/DCAS.

This branch adds proper support for atomic instructions, proper in the
sense that the atomicity is preserved through the compilation
pipeline, and thus in the instrumented code.

These changes track the IR changes added by vex r1901.  They primarily
update the instrumentation functions in all tools to handle the
changes, with the exception of exp-ptrcheck, which needs some further
work in order to be able to run threaded code.



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@10392
This commit is contained in:
Julian Seward 2009-07-01 08:10:49 +00:00
parent c5fba35961
commit 3109865279
20 changed files with 1312 additions and 322 deletions

View File

@ -1032,6 +1032,27 @@ IRSB* cg_instrument ( VgCallbackClosure* closure,
break;
}
case Ist_CAS: {
/* We treat it as a read and a write of the location. I
think that is the same behaviour as it was before IRCAS
was introduced, since prior to that point, the Vex
front ends would translate a lock-prefixed instruction
into a (normal) read followed by a (normal) write. */
Int dataSize;
IRCAS* cas = st->Ist.CAS.details;
tl_assert(cas->addr != NULL);
tl_assert(cas->dataLo != NULL);
dataSize = sizeofIRType(typeOfIRExpr(tyenv, cas->dataLo));
if (cas->dataHi != NULL)
dataSize *= 2; /* since it's a doubleword-CAS */
/* I don't think this can ever happen, but play safe. */
if (dataSize > MIN_LINE_SIZE)
dataSize = MIN_LINE_SIZE;
addEvent_Dr( &cgs, curr_inode, dataSize, cas->addr );
addEvent_Dw( &cgs, curr_inode, dataSize, cas->addr );
break;
}
case Ist_Exit: {
/* Stuff to widen the guard expression to a host word, so
we can pass it to the branch predictor simulation

View File

@ -657,8 +657,14 @@ void CLG_(collectBlockInfo)(IRSB* sbIn,
static
void addConstMemStoreStmt( IRSB* bbOut, UWord addr, UInt val, IRType hWordTy)
{
/* JRS 2009june01: re IRTemp_INVALID, am assuming that this
function is used only to create instrumentation, and not to
copy/reconstruct IRStmt_Stores that were in the incoming IR
superblock. If that is not a correct assumption, then things
will break badly on PowerPC, esp w/ threaded apps. */
addStmtToIRSB( bbOut,
IRStmt_Store(CLGEndness,
IRTemp_INVALID,
IRExpr_Const(hWordTy == Ity_I32 ?
IRConst_U32( addr ) :
IRConst_U64( addr )),
@ -841,6 +847,24 @@ IRSB* CLG_(instrument)( VgCallbackClosure* closure,
break;
}
case Ist_CAS: {
/* We treat it as a read and a write of the location. I
think that is the same behaviour as it was before IRCAS
was introduced, since prior to that point, the Vex
front ends would translate a lock-prefixed instruction
into a (normal) read followed by a (normal) write. */
Int dataSize;
IRCAS* cas = st->Ist.CAS.details;
CLG_ASSERT(cas->addr && isIRAtom(cas->addr));
CLG_ASSERT(cas->dataLo);
dataSize = sizeofIRType(typeOfIRExpr(sbIn->tyenv, cas->dataLo));
if (cas->dataHi != NULL)
dataSize *= 2; /* since this is a doubleword-cas */
addEvent_Dr( &clgs, curr_inode, dataSize, cas->addr );
addEvent_Dw( &clgs, curr_inode, dataSize, cas->addr );
break;
}
case Ist_Exit: {
UInt jmps_passed;
@ -1101,7 +1125,8 @@ UInt syscalltime[VG_N_THREADS];
#endif
static
void CLG_(pre_syscalltime)(ThreadId tid, UInt syscallno)
void CLG_(pre_syscalltime)(ThreadId tid, UInt syscallno,
UWord* args, UInt nArgs)
{
if (CLG_(clo).collect_systime) {
#if CLG_MICROSYSTIME
@ -1115,7 +1140,8 @@ void CLG_(pre_syscalltime)(ThreadId tid, UInt syscallno)
}
static
void CLG_(post_syscalltime)(ThreadId tid, UInt syscallno, SysRes res)
void CLG_(post_syscalltime)(ThreadId tid, UInt syscallno,
UWord* args, UInt nArgs, SysRes res)
{
if (CLG_(clo).collect_systime &&
CLG_(current_state).bbcc) {

View File

@ -350,7 +350,7 @@ Bool VG_(machine_get_hwcaps)( void )
LibVEX_default_VexArchInfo(&vai);
#if defined(VGA_x86)
{ Bool have_sse1, have_sse2;
{ Bool have_sse1, have_sse2, have_cx8;
UInt eax, ebx, ecx, edx;
if (!VG_(has_cpuid)())
@ -368,6 +368,13 @@ Bool VG_(machine_get_hwcaps)( void )
have_sse1 = (edx & (1<<25)) != 0; /* True => have sse insns */
have_sse2 = (edx & (1<<26)) != 0; /* True => have sse2 insns */
/* cmpxchg8b is a minimum requirement now; if we don't have it we
must simply give up. But all CPUs since Pentium-I have it, so
that doesn't seem like much of a restriction. */
have_cx8 = (edx & (1<<8)) != 0; /* True => have cmpxchg8b */
if (!have_cx8)
return False;
if (have_sse2 && have_sse1) {
va = VexArchX86;
vai.hwcaps = VEX_HWCAPS_X86_SSE1;
@ -390,10 +397,40 @@ Bool VG_(machine_get_hwcaps)( void )
}
#elif defined(VGA_amd64)
vg_assert(VG_(has_cpuid)());
va = VexArchAMD64;
vai.hwcaps = 0; /*baseline - SSE2 */
return True;
{ Bool have_sse1, have_sse2, have_sse3, have_cx8, have_cx16;
UInt eax, ebx, ecx, edx;
if (!VG_(has_cpuid)())
/* we can't do cpuid at all. Give up. */
return False;
VG_(cpuid)(0, &eax, &ebx, &ecx, &edx);
if (eax < 1)
/* we can't ask for cpuid(x) for x > 0. Give up. */
return False;
/* get capabilities bits into edx */
VG_(cpuid)(1, &eax, &ebx, &ecx, &edx);
have_sse1 = (edx & (1<<25)) != 0; /* True => have sse insns */
have_sse2 = (edx & (1<<26)) != 0; /* True => have sse2 insns */
have_sse3 = (ecx & (1<<9)) != 0; /* True => have sse3 insns */
/* cmpxchg8b is a minimum requirement now; if we don't have it we
must simply give up. But all CPUs since Pentium-I have it, so
that doesn't seem like much of a restriction. */
have_cx8 = (edx & (1<<8)) != 0; /* True => have cmpxchg8b */
if (!have_cx8)
return False;
/* on amd64 we tolerate older cpus, which don't have cmpxchg16b */
have_cx16 = (ecx & (1<<13)) != 0; /* True => have cmpxchg16b */
va = VexArchAMD64;
vai.hwcaps = (have_sse3 ? VEX_HWCAPS_AMD64_SSE3 : 0)
| (have_cx16 ? VEX_HWCAPS_AMD64_CX16 : 0);
return True;
}
#elif defined(VGA_ppc32)
{
@ -549,7 +586,6 @@ Bool VG_(machine_get_hwcaps)( void )
VG_(sigaction)(VKI_SIGFPE, NULL, &saved_sigfpe_act);
tmp_sigfpe_act = saved_sigfpe_act;
/* NODEFER: signal handler does not return (from the kernel's point of
view), hence if it is to successfully catch a signal more than once,
we need the NODEFER flag. */

View File

@ -679,22 +679,6 @@ static UInt run_thread_for_a_while ( ThreadId tid )
trc = 0;
dispatch_ctr_SAVED = VG_(dispatch_ctr);
# if defined(VGA_ppc32) || defined(VGA_ppc64)
/* This is necessary due to the hacky way vex models reservations
on ppc. It's really quite incorrect for each thread to have its
own reservation flag/address, since it's really something that
all threads share (that's the whole point). But having shared
guest state is something we can't model with Vex. However, as
per PaulM's 2.4.0ppc, the reservation is modelled using a
reservation flag which is cleared at each context switch. So it
is indeed possible to get away with a per thread-reservation if
the thread's reservation is cleared before running it.
*/
/* Clear any existing reservation that this thread might have made
last time it was running. */
VG_(threads)[tid].arch.vex.guest_RESVN = 0;
# endif
# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
/* On AIX, we need to get a plausible value for SPRG3 for this
thread, since it's used I think as a thread-state pointer. It
@ -1169,6 +1153,10 @@ VgSchedReturnCode VG_(scheduler) ( ThreadId tid )
VG_(synth_fault)(tid);
break;
case VEX_TRC_JMP_SIGBUS:
VG_(synth_sigbus)(tid);
break;
case VEX_TRC_JMP_NODECODE:
VG_(message)(Vg_UserMsg,
"valgrind: Unrecognised instruction at address %#lx.",

View File

@ -1766,6 +1766,27 @@ void VG_(synth_sigill)(ThreadId tid, Addr addr)
deliver_signal(tid, &info, NULL);
}
// Synthesise a SIGBUS.
void VG_(synth_sigbus)(ThreadId tid)
{
vki_siginfo_t info;
vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
VG_(memset)(&info, 0, sizeof(info));
info.si_signo = VKI_SIGBUS;
/* There are several meanings to SIGBUS (as per POSIX, presumably),
but the most widely understood is "invalid address alignment",
so let's use that. */
info.si_code = VKI_BUS_ADRALN;
/* If we knew the invalid address in question, we could put it
in .si_addr. Oh well. */
/* info.VKI_SIGINFO_si_addr = (void*)addr; */
resume_scheduler(tid);
deliver_signal(tid, &info, NULL);
}
// Synthesise a SIGTRAP.
void VG_(synth_sigtrap)(ThreadId tid)
{

View File

@ -431,7 +431,6 @@ void getSyscallArgsFromGuestState ( /*OUT*/SyscallArgs* canonical,
canonical->arg7 = 0;
canonical->arg8 = 0;
#elif defined(VGP_ppc32_linux)
VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
canonical->sysno = gst->guest_GPR0;
@ -444,7 +443,6 @@ void getSyscallArgsFromGuestState ( /*OUT*/SyscallArgs* canonical,
canonical->arg7 = 0;
canonical->arg8 = 0;
#elif defined(VGP_ppc64_linux)
VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
canonical->sysno = gst->guest_GPR0;
@ -457,7 +455,6 @@ void getSyscallArgsFromGuestState ( /*OUT*/SyscallArgs* canonical,
canonical->arg7 = 0;
canonical->arg8 = 0;
#elif defined(VGP_ppc32_aix5)
VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
canonical->sysno = gst->guest_GPR2;
@ -1377,7 +1374,17 @@ void VG_(client_syscall) ( ThreadId tid, UInt trc )
/* Do any pre-syscall actions */
if (VG_(needs).syscall_wrapper) {
VG_TDICT_CALL(tool_pre_syscall, tid, sysno);
UWord tmpv[8];
tmpv[0] = sci->orig_args.arg1;
tmpv[1] = sci->orig_args.arg2;
tmpv[2] = sci->orig_args.arg3;
tmpv[3] = sci->orig_args.arg4;
tmpv[4] = sci->orig_args.arg5;
tmpv[5] = sci->orig_args.arg6;
tmpv[6] = sci->orig_args.arg7;
tmpv[7] = sci->orig_args.arg8;
VG_TDICT_CALL(tool_pre_syscall, tid, sysno,
&tmpv[0], sizeof(tmpv)/sizeof(tmpv[0]));
}
vg_assert(ent);
@ -1655,8 +1662,21 @@ void VG_(post_syscall) (ThreadId tid)
putSyscallStatusIntoGuestState( tid, &sci->status, &tst->arch.vex );
/* Do any post-syscall actions required by the tool. */
if (VG_(needs).syscall_wrapper)
VG_TDICT_CALL(tool_post_syscall, tid, sysno, sci->status.sres);
if (VG_(needs).syscall_wrapper) {
UWord tmpv[8];
tmpv[0] = sci->orig_args.arg1;
tmpv[1] = sci->orig_args.arg2;
tmpv[2] = sci->orig_args.arg3;
tmpv[3] = sci->orig_args.arg4;
tmpv[4] = sci->orig_args.arg5;
tmpv[5] = sci->orig_args.arg6;
tmpv[6] = sci->orig_args.arg7;
tmpv[7] = sci->orig_args.arg8;
VG_TDICT_CALL(tool_post_syscall, tid,
sysno,
&tmpv[0], sizeof(tmpv)/sizeof(tmpv[0]),
sci->status.sres);
}
/* The syscall is done. */
vg_assert(sci->status.what == SsComplete);

View File

@ -269,8 +269,8 @@ void VG_(needs_client_requests)(
}
void VG_(needs_syscall_wrapper)(
void(*pre) (ThreadId, UInt),
void(*post)(ThreadId, UInt, SysRes res)
void(*pre) (ThreadId, UInt, UWord*, UInt),
void(*post)(ThreadId, UInt, UWord*, UInt, SysRes res)
)
{
VG_(needs).syscall_wrapper = True;

View File

@ -73,6 +73,7 @@ extern void VG_(synth_fault_mapping)(ThreadId tid, Addr addr);
extern void VG_(synth_fault_perms) (ThreadId tid, Addr addr);
extern void VG_(synth_sigill) (ThreadId tid, Addr addr);
extern void VG_(synth_sigtrap) (ThreadId tid);
extern void VG_(synth_sigbus) (ThreadId tid);
/* Extend the stack to cover addr, if possible */
extern Bool VG_(extend_stack)(Addr addr, UInt maxsize);

View File

@ -138,8 +138,8 @@ typedef struct {
Bool (*tool_handle_client_request)(ThreadId, UWord*, UWord*);
// VG_(needs).syscall_wrapper
void (*tool_pre_syscall) (ThreadId, UInt);
void (*tool_post_syscall)(ThreadId, UInt, SysRes);
void (*tool_pre_syscall) (ThreadId, UInt, UWord*, UInt);
void (*tool_post_syscall)(ThreadId, UInt, UWord*, UInt, SysRes);
// VG_(needs).sanity_checks
Bool (*tool_cheap_sanity_check)(void);

View File

@ -450,7 +450,6 @@ IRSB* DRD_(instrument)(VgCallbackClosure* const closure,
IRSB* bb;
IRExpr** argv;
Bool instrument = True;
Bool bus_locked = False;
/* Set up BB */
bb = emptyIRSB();
@ -484,16 +483,6 @@ IRSB* DRD_(instrument)(VgCallbackClosure* const closure,
{
case Imbe_Fence:
break; /* not interesting */
case Imbe_BusLock:
case Imbe_SnoopedStoreBegin:
tl_assert(! bus_locked);
bus_locked = True;
break;
case Imbe_BusUnlock:
case Imbe_SnoopedStoreEnd:
tl_assert(bus_locked);
bus_locked = False;
break;
default:
tl_assert(0);
}
@ -501,7 +490,8 @@ IRSB* DRD_(instrument)(VgCallbackClosure* const closure,
break;
case Ist_Store:
if (instrument && ! bus_locked)
if (instrument && /* ignore stores resulting from st{d,w}cx. */
st->Ist.Store.resSC == IRTemp_INVALID)
{
instrument_store(bb,
st->Ist.Store.addr,
@ -547,8 +537,7 @@ IRSB* DRD_(instrument)(VgCallbackClosure* const closure,
argv);
addStmtToIRSB(bb, IRStmt_Dirty(di));
}
if ((mFx == Ifx_Write || mFx == Ifx_Modify)
&& ! bus_locked)
if (mFx == Ifx_Write || mFx == Ifx_Modify)
{
di = unsafeIRDirty_0_N(
/*regparms*/2,
@ -565,14 +554,32 @@ IRSB* DRD_(instrument)(VgCallbackClosure* const closure,
addStmtToIRSB(bb, st);
break;
case Ist_CAS:
if (instrument)
{
/* Just treat this as a read of the location. I believe
this is equivalent to the previous logic, which
observed bus-lock/unlock Ist_MBEs, and ignored all
writes within sections bracketed by bus-lock and
bus-unlock annotations. */
Int dataSize;
IRCAS* cas = st->Ist.CAS.details;
tl_assert(cas->addr != NULL);
tl_assert(cas->dataLo != NULL);
dataSize = sizeofIRType(typeOfIRExpr(bb->tyenv, cas->dataLo));
if (cas->dataHi != NULL)
dataSize *= 2; /* since it's a doubleword-CAS */
instrument_load(bb, cas->addr, dataSize);
}
addStmtToIRSB(bb, st);
break;
default:
addStmtToIRSB(bb, st);
break;
}
}
tl_assert(! bus_locked);
return bb;
}

View File

@ -1536,7 +1536,6 @@ static void get_IntRegInfo ( /*OUT*/IntRegInfo* iii, Int offset, Int szB )
if (o == GOF(CTR) && is4) goto exactly1;
if (o == GOF(CIA) && is4) goto none;
if (o == GOF(IP_AT_SYSCALL) && is4) goto none;
if (o == GOF(RESVN) && is4) goto none;
if (o == GOF(TISTART) && is4) goto none;
if (o == GOF(TILEN) && is4) goto none;
if (o == GOF(REDIR_SP) && is4) goto none;
@ -1700,7 +1699,6 @@ static void get_IntRegInfo ( /*OUT*/IntRegInfo* iii, Int offset, Int szB )
if (o == GOF(CTR) && is8) goto exactly1;
if (o == GOF(CIA) && is8) goto none;
if (o == GOF(IP_AT_SYSCALL) && is8) goto none;
if (o == GOF(RESVN) && is8) goto none;
if (o == GOF(TISTART) && is8) goto none;
if (o == GOF(TILEN) && is8) goto none;
if (o == GOF(REDIR_SP) && is8) goto none;
@ -2115,7 +2113,8 @@ void h_post_reg_write_clientcall(ThreadId tid, PtrdiffT guest_state_offset,
/*--- System calls ---*/
/*--------------------------------------------------------------------*/
void h_pre_syscall ( ThreadId tid, UInt sysno )
void h_pre_syscall ( ThreadId tid, UInt sysno,
UWord* args, UInt nArgs )
{
/* we don't do anything at the pre-syscall point */
}
@ -2415,6 +2414,9 @@ static void setup_post_syscall_table ( void )
# if defined(__NR_shmget)
ADD(1, __NR_shmget);
# endif
# if defined(__NR_ipc) && defined(VKI_SHMAT)
ADD(1, __NR_ipc); /* ppc{32,64}-linux horrors */
# endif
/* --------------- AIX5 --------------- */
@ -2473,7 +2475,8 @@ static void setup_post_syscall_table ( void )
}
void h_post_syscall ( ThreadId tid, UInt sysno, SysRes res )
void h_post_syscall ( ThreadId tid, UInt sysno,
UWord* args, UInt nArgs, SysRes res )
{
Word i, n;
UWordPair* pair;
@ -2517,14 +2520,9 @@ void h_post_syscall ( ThreadId tid, UInt sysno, SysRes res )
/* Deal with the common case */
pair = VG_(indexXA)( post_syscall_table, i );
if (pair->uw2 == 0) {
/* the common case */
VG_(set_syscall_return_shadows)(
tid, /* retval */ (UWord)NONPTR, 0,
/* error */ (UWord)NONPTR, 0
);
return;
}
if (pair->uw2 == 0)
/* the common case */
goto res_NONPTR_err_NONPTR;
/* Special handling for all remaining cases */
tl_assert(pair->uw2 == 1);
@ -2537,24 +2535,15 @@ void h_post_syscall ( ThreadId tid, UInt sysno, SysRes res )
syscall completes. */
post_reg_write_nonptr_or_unknown( tid, PC_OFF_FS_ZERO,
PC_SZB_FS_ZERO );
VG_(set_syscall_return_shadows)(
tid, /* retval */ (UWord)NONPTR, 0,
/* error */ (UWord)NONPTR, 0
);
return;
goto res_NONPTR_err_NONPTR;
}
# endif
# if defined(__NR_brk)
// With brk(), result (of kernel syscall, not glibc wrapper) is a heap
// pointer. Make the shadow UNKNOWN.
if (sysno == __NR_brk) {
VG_(set_syscall_return_shadows)(
tid, /* retval */ (UWord)UNKNOWN, 0,
/* error */ (UWord)NONPTR, 0
);
return;
}
if (sysno == __NR_brk)
goto res_UNKNOWN_err_NONPTR;
# endif
// With mmap, new_mem_mmap() has already been called and added the
@ -2573,13 +2562,9 @@ void h_post_syscall ( ThreadId tid, UInt sysno, SysRes res )
) {
if (sr_isError(res)) {
// mmap() had an error, return value is a small negative integer
VG_(set_syscall_return_shadows)( tid, /*val*/ (UWord)NONPTR, 0,
/*err*/ (UWord)NONPTR, 0 );
if (0) VG_(printf)("ZZZZZZZ mmap res -> NONPTR\n");
goto res_NONPTR_err_NONPTR;
} else {
VG_(set_syscall_return_shadows)( tid, /*val*/ (UWord)UNKNOWN, 0,
/*err*/ (UWord)NONPTR, 0 );
if (0) VG_(printf)("ZZZZZZZ mmap res -> UNKNOWN\n");
goto res_UNKNOWN_err_NONPTR;
}
return;
}
@ -2589,24 +2574,40 @@ void h_post_syscall ( ThreadId tid, UInt sysno, SysRes res )
# if defined(__NR_shmat)
if (sysno == __NR_shmat) {
if (sr_isError(res)) {
VG_(set_syscall_return_shadows)( tid, /*val*/ (UWord)NONPTR, 0,
/*err*/ (UWord)NONPTR, 0 );
if (0) VG_(printf)("ZZZZZZZ shmat res -> NONPTR\n");
goto res_NONPTR_err_NONPTR;
} else {
VG_(set_syscall_return_shadows)( tid, /*val*/ (UWord)UNKNOWN, 0,
/*err*/ (UWord)NONPTR, 0 );
if (0) VG_(printf)("ZZZZZZZ shmat res -> UNKNOWN\n");
goto res_UNKNOWN_err_NONPTR;
}
return;
}
# endif
# if defined(__NR_shmget)
if (sysno == __NR_shmget) {
if (sysno == __NR_shmget)
// FIXME: is this correct?
VG_(set_syscall_return_shadows)( tid, /*val*/ (UWord)UNKNOWN, 0,
/*err*/ (UWord)NONPTR, 0 );
return;
goto res_UNKNOWN_err_NONPTR;
# endif
# if defined(__NR_ipc) && defined(VKI_SHMAT)
/* perhaps this should be further conditionalised with
&& (defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
Note, this just copies the behaviour of __NR_shmget above.
JRS 2009 June 02: it seems that the return value from
sys_ipc(VKI_SHMAT, ...) doesn't have much relationship to the
result returned by the originating user-level shmat call. It's
different (and much lower) by a large but integral number of
pages. I don't have time to chase this right now. Observed on
ppc{32,64}-linux. Result appears to be false errors from apps
using shmat. Confusion though -- shouldn't be related to the
actual numeric values returned by the syscall, though, should
it? Confused. Maybe some bad interaction with a
nonpointer-or-unknown heuristic? */
if (sysno == __NR_ipc) {
if (args[0] == VKI_SHMAT) {
goto res_UNKNOWN_err_NONPTR;
} else {
goto res_NONPTR_err_NONPTR;
}
}
# endif
@ -2614,6 +2615,16 @@ void h_post_syscall ( ThreadId tid, UInt sysno, SysRes res )
post_syscall_table has .w2 == 1, which in turn implies there
should be special-case code for it above. */
tl_assert(0);
res_NONPTR_err_NONPTR:
VG_(set_syscall_return_shadows)( tid, /* retval */ (UWord)NONPTR, 0,
/* error */ (UWord)NONPTR, 0 );
return;
res_UNKNOWN_err_NONPTR:
VG_(set_syscall_return_shadows)( tid, /* retval */ (UWord)UNKNOWN, 0,
/* error */ (UWord)NONPTR, 0 );
return;
}
@ -2916,25 +2927,73 @@ void check_load1(Addr m, Seg* mptr_vseg)
// ------------------ Store handlers ------------------ //
/* On 32 bit targets, we will use:
check_store1 check_store2 check_store4_P
check_store1 check_store2 check_store4_P check_store4C_P
check_store4 (for 32-bit nonpointer stores)
check_store8_ms4B_ls4B (for 64-bit stores)
check_store16_ms4B_4B_4B_ls4B (for xmm/altivec stores)
On 64 bit targets, we will use:
check_store1 check_store2 check_store4 check_store8_P
check_store1 check_store2 check_store4 check_store4C
check_store8_P check_store_8C_P
check_store8_all8B (for 64-bit nonpointer stores)
check_store16_ms8B_ls8B (for xmm/altivec stores)
A "_P" handler writes a pointer to memory, and so has an extra
argument -- the pointer's shadow value. That implies that
check_store4_P is only to be called on a 32 bit host and
check_store8_P is only to be called on a 64 bit host. For all
check_store4{,C}_P is only to be called on a 32 bit host and
check_store8{,C}_P is only to be called on a 64 bit host. For all
other cases, and for the misaligned _P cases, the strategy is to
let the store go through, and then snoop around with
nonptr_or_unknown to fix up the shadow values of any affected
words. */
/* Helpers for store-conditionals. Ugly kludge :-(
They all return 1 if the SC was successful and 0 if it failed. */
static inline UWord do_store_conditional_32( Addr m/*dst*/, UInt t/*val*/ )
{
# if defined(VGA_ppc32) || defined(VGA_ppc64)
UWord success;
/* If this assertion fails, the underlying IR is (semantically) ill-formed
as per the IR spec for IRStmt_Store. */
tl_assert(VG_IS_4_ALIGNED(m));
__asm__ __volatile__(
"stwcx. %2,0,%1" "\n\t" /* data,0,addr */
"mfcr %0" "\n\t"
"srwi %0,%0,29" "\n\t" /* move relevant CR bit to LSB */
: /*out*/"=b"(success)
: /*in*/ "b"(m), "b"( (UWord)t )
: /*trash*/ "memory", "cc"
/* Note: srwi is OK even on 64-bit host because the we're
after bit 29 (normal numbering) and we mask off all the
other junk just below. */
);
return success & (UWord)1;
# else
tl_assert(0); /* not implemented on other platforms */
# endif
}
static inline UWord do_store_conditional_64( Addr m/*dst*/, ULong t/*val*/ )
{
# if defined(VGA_ppc64)
UWord success;
/* If this assertion fails, the underlying IR is (semantically) ill-formed
as per the IR spec for IRStmt_Store. */
tl_assert(VG_IS_8_ALIGNED(m));
__asm__ __volatile__(
"stdcx. %2,0,%1" "\n\t" /* data,0,addr */
"mfcr %0" "\n\t"
"srdi %0,%0,29" "\n\t" /* move relevant CR bit to LSB */
: /*out*/"=b"(success)
: /*in*/ "b"(m), "b"( (UWord)t )
: /*trash*/ "memory", "cc"
);
return success & (UWord)1;
# else
tl_assert(0); /* not implemented on other platforms */
# endif
}
/* Apply nonptr_or_unknown to all the words intersecting
[a, a+len). */
static VG_REGPARM(2)
@ -3066,6 +3125,29 @@ void check_store8_P(Addr m, Seg* mptr_vseg, UWord t, Seg* t_vseg)
}
}
// This handles 64 bit store-conditionals on 64 bit targets. It must
// not be called on 32 bit targets.
static VG_REGPARM(3)
UWord check_store8C_P(Addr m, Seg* mptr_vseg, UWord t, Seg* t_vseg)
{
UWord success;
tl_assert(sizeof(UWord) == 8); /* DO NOT REMOVE */
# if SC_SEGS
checkSeg(t_vseg);
checkSeg(mptr_vseg);
# endif
check_load_or_store(/*is_write*/True, m, 8, mptr_vseg);
// Actually *do* the STORE here
success = do_store_conditional_64( m, t );
if (VG_IS_8_ALIGNED(m)) {
set_mem_vseg( m, t_vseg );
} else {
// straddling two words
nonptr_or_unknown_range(m, 8);
}
return success;
}
// This handles 32 bit stores on 32 bit targets. It must
// not be called on 64 bit targets.
static VG_REGPARM(3)
@ -3087,6 +3169,29 @@ void check_store4_P(Addr m, Seg* mptr_vseg, UWord t, Seg* t_vseg)
}
}
// This handles 32 bit store-conditionals on 32 bit targets. It must
// not be called on 64 bit targets.
static VG_REGPARM(3)
UWord check_store4C_P(Addr m, Seg* mptr_vseg, UWord t, Seg* t_vseg)
{
UWord success;
tl_assert(sizeof(UWord) == 4); /* DO NOT REMOVE */
# if SC_SEGS
checkSeg(t_vseg);
checkSeg(mptr_vseg);
# endif
check_load_or_store(/*is_write*/True, m, 4, mptr_vseg);
// Actually *do* the STORE here
success = do_store_conditional_32( m, t );
if (VG_IS_4_ALIGNED(m)) {
set_mem_vseg( m, t_vseg );
} else {
// straddling two words
nonptr_or_unknown_range(m, 4);
}
return success;
}
// Used for both 32 bit and 64 bit targets.
static VG_REGPARM(3)
void check_store4(Addr m, Seg* mptr_vseg, UWord t)
@ -3100,6 +3205,23 @@ void check_store4(Addr m, Seg* mptr_vseg, UWord t)
nonptr_or_unknown_range(m, 4);
}
// Used for 32-bit store-conditionals on 64 bit targets only. It must
// not be called on 32 bit targets.
static VG_REGPARM(3)
UWord check_store4C(Addr m, Seg* mptr_vseg, UWord t)
{
UWord success;
tl_assert(sizeof(UWord) == 8); /* DO NOT REMOVE */
# if SC_SEGS
checkSeg(mptr_vseg);
# endif
check_load_or_store(/*is_write*/True, m, 4, mptr_vseg);
// Actually *do* the STORE here
success = do_store_conditional_32( m, t );
nonptr_or_unknown_range(m, 4);
return success;
}
// Used for both 32 bit and 64 bit targets.
static VG_REGPARM(3)
void check_store2(Addr m, Seg* mptr_vseg, UWord t)
@ -4084,8 +4206,8 @@ static void gen_nonptr_or_unknown_for_III( PCEnv* pce, IntRegInfo* iii )
}
}
/* Generate into 'ane', instrumentation for 'st'. Also copy 'st'
itself into 'ane' (the caller does not do so). This is somewhat
/* Generate into 'pce', instrumentation for 'st'. Also copy 'st'
itself into 'pce' (the caller does not do so). This is somewhat
complex and relies heavily on the assumption that the incoming IR
is in flat form.
@ -4243,20 +4365,54 @@ static void schemeS ( PCEnv* pce, IRStmt* st )
the post-hoc ugly hack of inspecting and "improving" the
shadow data after the store, in the case where it isn't an
aligned word store.
Only word-sized values are shadowed. If this is a
store-conditional, .resSC will denote a non-word-typed
temp, and so we don't need to shadow it. Assert about the
type, tho. However, since we're not re-emitting the
original IRStmt_Store, but rather doing it as part of the
helper function, we need to actually do a SC in the
helper, and assign the result bit to .resSC. Ugly.
*/
IRExpr* data = st->Ist.Store.data;
IRExpr* addr = st->Ist.Store.addr;
IRType d_ty = typeOfIRExpr(pce->bb->tyenv, data);
IRExpr* addrv = schemeEw_Atom( pce, addr );
IRTemp resSC = st->Ist.Store.resSC;
if (resSC != IRTemp_INVALID) {
tl_assert(typeOfIRTemp(pce->bb->tyenv, resSC) == Ity_I1);
/* viz, not something we want to shadow */
/* also, throw out all store-conditional cases that
we can't handle */
if (pce->gWordTy == Ity_I32 && d_ty != Ity_I32)
goto unhandled;
if (pce->gWordTy == Ity_I64 && d_ty != Ity_I32 && d_ty != Ity_I64)
goto unhandled;
}
if (pce->gWordTy == Ity_I32) {
/* ------ 32 bit host/guest (cough, cough) ------ */
switch (d_ty) {
/* Integer word case */
case Ity_I32: {
IRExpr* datav = schemeEw_Atom( pce, data );
gen_dirty_v_WWWW( pce,
&check_store4_P, "check_store4_P",
addr, addrv, data, datav );
if (resSC == IRTemp_INVALID) {
/* "normal" store */
gen_dirty_v_WWWW( pce,
&check_store4_P, "check_store4_P",
addr, addrv, data, datav );
} else {
/* store-conditional; need to snarf the success bit */
IRTemp resSC32
= gen_dirty_W_WWWW( pce,
&check_store4C_P,
"check_store4C_P",
addr, addrv, data, datav );
/* presumably resSC32 will really be Ity_I32. In
any case we'll get jumped by the IR sanity
checker if it's not, when it sees the
following statement. */
assign( 'I', pce, resSC, unop(Iop_32to1, mkexpr(resSC32)) );
}
break;
}
/* Integer subword cases */
@ -4345,17 +4501,39 @@ static void schemeS ( PCEnv* pce, IRStmt* st )
/* Integer word case */
case Ity_I64: {
IRExpr* datav = schemeEw_Atom( pce, data );
gen_dirty_v_WWWW( pce,
&check_store8_P, "check_store8_P",
addr, addrv, data, datav );
if (resSC == IRTemp_INVALID) {
/* "normal" store */
gen_dirty_v_WWWW( pce,
&check_store8_P, "check_store8_P",
addr, addrv, data, datav );
} else {
IRTemp resSC64
= gen_dirty_W_WWWW( pce,
&check_store8C_P,
"check_store8C_P",
addr, addrv, data, datav );
assign( 'I', pce, resSC, unop(Iop_64to1, mkexpr(resSC64)) );
}
break;
}
/* Integer subword cases */
case Ity_I32:
gen_dirty_v_WWW( pce,
&check_store4, "check_store4",
addr, addrv,
uwiden_to_host_word( pce, data ));
if (resSC == IRTemp_INVALID) {
/* "normal" store */
gen_dirty_v_WWW( pce,
&check_store4, "check_store4",
addr, addrv,
uwiden_to_host_word( pce, data ));
} else {
/* store-conditional; need to snarf the success bit */
IRTemp resSC64
= gen_dirty_W_WWW( pce,
&check_store4C,
"check_store4C",
addr, addrv,
uwiden_to_host_word( pce, data ));
assign( 'I', pce, resSC, unop(Iop_64to1, mkexpr(resSC64)) );
}
break;
case Ity_I16:
gen_dirty_v_WWW( pce,

View File

@ -82,8 +82,10 @@ void h_post_reg_write_demux ( CorePart part, ThreadId tid,
void h_post_reg_write_clientcall(ThreadId tid, PtrdiffT guest_state_offset,
SizeT size, Addr f );
void h_pre_syscall ( ThreadId tid, UInt syscallno );
void h_post_syscall ( ThreadId tid, UInt syscallno, SysRes res );
void h_pre_syscall ( ThreadId tid, UInt syscallno,
UWord* args, UInt nArgs );
void h_post_syscall ( ThreadId tid, UInt syscallno,
UWord* args, UInt nArgs, SysRes res );
/* Note that this also does the sg_ instrumentation. */
IRSB* h_instrument ( VgCallbackClosure* closure,

View File

@ -2226,6 +2226,33 @@ void sg_instrument_IRStmt ( /*MOD*/struct _SGEnv * env,
break;
}
case Ist_CAS: {
/* We treat it as a read and a write of the location. I
think that is the same behaviour as it was before IRCAS
was introduced, since prior to that point, the Vex front
ends would translate a lock-prefixed instruction into a
(normal) read followed by a (normal) write. */
if (env->firstRef) {
Int dataSize;
IRCAS* cas = st->Ist.CAS.details;
tl_assert(cas->addr != NULL);
tl_assert(cas->dataLo != NULL);
dataSize = sizeofIRType(typeOfIRExpr(sbOut->tyenv, cas->dataLo));
if (cas->dataHi != NULL)
dataSize *= 2; /* since it's a doubleword-CAS */
instrument_mem_access(
sbOut, cas->addr, dataSize, False/*!isStore*/,
sizeofIRType(hWordTy), env->curr_IP, layout
);
instrument_mem_access(
sbOut, cas->addr, dataSize, True/*isStore*/,
sizeofIRType(hWordTy), env->curr_IP, layout
);
env->firstRef = False;
}
break;
}
default:
tl_assert(0);

View File

@ -3603,40 +3603,6 @@ static void instrument_mem_access ( IRSB* bbOut,
}
//static void instrument_memory_bus_event ( IRSB* bbOut, IRMBusEvent event )
//{
// switch (event) {
// case Imbe_SnoopedStoreBegin:
// case Imbe_SnoopedStoreEnd:
// /* These arise from ppc stwcx. insns. They should perhaps be
// handled better. */
// break;
// case Imbe_Fence:
// break; /* not interesting */
// case Imbe_BusLock:
// case Imbe_BusUnlock:
// addStmtToIRSB(
// bbOut,
// IRStmt_Dirty(
// unsafeIRDirty_0_N(
// 0/*regparms*/,
// event == Imbe_BusLock ? "evh__bus_lock"
// : "evh__bus_unlock",
// VG_(fnptr_to_fnentry)(
// event == Imbe_BusLock ? &evh__bus_lock
// : &evh__bus_unlock
// ),
// mkIRExprVec_0()
// )
// )
// );
// break;
// default:
// tl_assert(0);
// }
//}
static
IRSB* hg_instrument ( VgCallbackClosure* closure,
IRSB* bbIn,
@ -3644,10 +3610,10 @@ IRSB* hg_instrument ( VgCallbackClosure* closure,
VexGuestExtents* vge,
IRType gWordTy, IRType hWordTy )
{
Int i;
IRSB* bbOut;
Bool x86busLocked = False;
Bool isSnoopedStore = False;
Int i;
IRSB* bbOut;
Addr64 cia; /* address of current insn */
IRStmt* st;
if (gWordTy != hWordTy) {
/* We don't currently support this case. */
@ -3667,8 +3633,16 @@ IRSB* hg_instrument ( VgCallbackClosure* closure,
i++;
}
// Get the first statement, and initial cia from it
tl_assert(bbIn->stmts_used > 0);
tl_assert(i < bbIn->stmts_used);
st = bbIn->stmts[i];
tl_assert(Ist_IMark == st->tag);
cia = st->Ist.IMark.addr;
st = NULL;
for (/*use current i*/; i < bbIn->stmts_used; i++) {
IRStmt* st = bbIn->stmts[i];
st = bbIn->stmts[i];
tl_assert(st);
tl_assert(isFlatIRStmt(st));
switch (st->tag) {
@ -3676,43 +3650,45 @@ IRSB* hg_instrument ( VgCallbackClosure* closure,
case Ist_AbiHint:
case Ist_Put:
case Ist_PutI:
case Ist_IMark:
case Ist_Exit:
/* None of these can contain any memory references. */
break;
case Ist_IMark:
/* no mem refs, but note the insn address. */
cia = st->Ist.IMark.addr;
break;
case Ist_MBE:
//instrument_memory_bus_event( bbOut, st->Ist.MBE.event );
switch (st->Ist.MBE.event) {
case Imbe_Fence:
break; /* not interesting */
/* Imbe_Bus{Lock,Unlock} arise from x86/amd64 LOCK
prefixed instructions. */
case Imbe_BusLock:
tl_assert(x86busLocked == False);
x86busLocked = True;
break;
case Imbe_BusUnlock:
tl_assert(x86busLocked == True);
x86busLocked = False;
break;
/* Imbe_SnoopedStore{Begin,End} arise from ppc
stwcx. instructions. */
case Imbe_SnoopedStoreBegin:
tl_assert(isSnoopedStore == False);
isSnoopedStore = True;
break;
case Imbe_SnoopedStoreEnd:
tl_assert(isSnoopedStore == True);
isSnoopedStore = False;
break;
default:
goto unhandled;
}
break;
case Ist_CAS: {
/* Atomic read-modify-write cycle. Just pretend it's a
read. */
IRCAS* cas = st->Ist.CAS.details;
Bool isDCAS = cas->dataHi != NULL;
instrument_mem_access(
bbOut,
cas->addr,
(isDCAS ? 2 : 1)
* sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
False/*!isStore*/,
sizeofIRType(hWordTy)
);
break;
}
case Ist_Store:
if (!x86busLocked && !isSnoopedStore)
/* It seems we pretend that store-conditionals don't
exist, viz, just ignore them ... */
if (st->Ist.Store.resSC == IRTemp_INVALID) {
instrument_mem_access(
bbOut,
st->Ist.Store.addr,
@ -3720,9 +3696,12 @@ IRSB* hg_instrument ( VgCallbackClosure* closure,
True/*isStore*/,
sizeofIRType(hWordTy)
);
}
break;
case Ist_WrTmp: {
/* ... whereas here we don't care whether a load is a
vanilla one or a load-linked. */
IRExpr* data = st->Ist.WrTmp.data;
if (data->tag == Iex_Load) {
instrument_mem_access(
@ -3751,11 +3730,6 @@ IRSB* hg_instrument ( VgCallbackClosure* closure,
sizeofIRType(hWordTy)
);
}
/* This isn't really correct. Really the
instrumentation should be only added when
(!x86busLocked && !isSnoopedStore), just like with
Ist_Store. Still, I don't think this is
particularly important. */
if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
instrument_mem_access(
bbOut, d->mAddr, dataSize, True/*isStore*/,

View File

@ -2393,7 +2393,7 @@ static POrd VtsID__getOrdering_WRK ( VtsID vi1, VtsID vi2 ) {
return ord;
}
static inline POrd VtsID__getOrdering ( VtsID vi1, VtsID vi2 ) {
return vi1 == vi2 ? POrd_EQ : VtsID__getOrdering_WRK(vi1, vi2);
return LIKELY(vi1 == vi2) ? POrd_EQ : VtsID__getOrdering_WRK(vi1, vi2);
}
/* compute binary join */
@ -2424,7 +2424,7 @@ static VtsID VtsID__join2_WRK ( VtsID vi1, VtsID vi2 ) {
return res;
}
static inline VtsID VtsID__join2 ( VtsID vi1, VtsID vi2 ) {
return vi1 == vi2 ? vi1 : VtsID__join2_WRK(vi1, vi2);
return LIKELY(vi1 == vi2) ? vi1 : VtsID__join2_WRK(vi1, vi2);
}
/* create a singleton VTS, namely [thr:1] */
@ -3653,7 +3653,7 @@ static inline SVal msm_read ( SVal svOld,
tl_assert(is_sane_SVal_C(svOld));
}
if (SVal__isC(svOld)) {
if (LIKELY(SVal__isC(svOld))) {
POrd ord;
VtsID tviR = acc_thr->viR;
VtsID tviW = acc_thr->viW;
@ -3661,7 +3661,7 @@ static inline SVal msm_read ( SVal svOld,
VtsID wmini = SVal__unC_Wmin(svOld);
ord = VtsID__getOrdering(rmini,tviR);
if (ord == POrd_EQ || ord == POrd_LT) {
if (LIKELY(ord == POrd_EQ || ord == POrd_LT)) {
/* no race */
/* Note: RWLOCK subtlety: use tviW, not tviR */
svNew = SVal__mkC( rmini, VtsID__join2(wmini, tviW) );
@ -3708,9 +3708,10 @@ static inline SVal msm_read ( SVal svOld,
if (CHECK_MSM) {
tl_assert(is_sane_SVal_C(svNew));
}
tl_assert(svNew != SVal_INVALID);
if (svNew != svOld && HG_(clo_show_conflicts)) {
if (SVal__isC(svOld) && SVal__isC(svNew)) {
if (UNLIKELY(svNew != svOld)) {
tl_assert(svNew != SVal_INVALID);
if (HG_(clo_show_conflicts)
&& SVal__isC(svOld) && SVal__isC(svNew)) {
event_map_bind( acc_addr, szB, False/*!isWrite*/, acc_thr );
stats__msm_read_change++;
}
@ -3734,13 +3735,13 @@ static inline SVal msm_write ( SVal svOld,
tl_assert(is_sane_SVal_C(svOld));
}
if (SVal__isC(svOld)) {
if (LIKELY(SVal__isC(svOld))) {
POrd ord;
VtsID tviW = acc_thr->viW;
VtsID wmini = SVal__unC_Wmin(svOld);
ord = VtsID__getOrdering(wmini,tviW);
if (ord == POrd_EQ || ord == POrd_LT) {
if (LIKELY(ord == POrd_EQ || ord == POrd_LT)) {
/* no race */
svNew = SVal__mkC( tviW, tviW );
goto out;
@ -3807,9 +3808,10 @@ static inline SVal msm_write ( SVal svOld,
if (CHECK_MSM) {
tl_assert(is_sane_SVal_C(svNew));
}
tl_assert(svNew != SVal_INVALID);
if (svNew != svOld && HG_(clo_show_conflicts)) {
if (SVal__isC(svOld) && SVal__isC(svNew)) {
if (UNLIKELY(svNew != svOld)) {
tl_assert(svNew != SVal_INVALID);
if (HG_(clo_show_conflicts)
&& SVal__isC(svOld) && SVal__isC(svNew)) {
event_map_bind( acc_addr, szB, True/*isWrite*/, acc_thr );
stats__msm_write_change++;
}
@ -3845,7 +3847,8 @@ void zsm_apply8___msm_read ( Thr* thr, Addr a ) {
}
svOld = cl->svals[cloff];
svNew = msm_read( svOld, thr,a,1 );
tl_assert(svNew != SVal_INVALID);
if (CHECK_ZSM)
tl_assert(svNew != SVal_INVALID);
cl->svals[cloff] = svNew;
}
@ -3868,7 +3871,8 @@ void zsm_apply8___msm_write ( Thr* thr, Addr a ) {
}
svOld = cl->svals[cloff];
svNew = msm_write( svOld, thr,a,1 );
tl_assert(svNew != SVal_INVALID);
if (CHECK_ZSM)
tl_assert(svNew != SVal_INVALID);
cl->svals[cloff] = svNew;
}
@ -3898,7 +3902,8 @@ void zsm_apply16___msm_read ( Thr* thr, Addr a ) {
}
svOld = cl->svals[cloff];
svNew = msm_read( svOld, thr,a,2 );
tl_assert(svNew != SVal_INVALID);
if (CHECK_ZSM)
tl_assert(svNew != SVal_INVALID);
cl->svals[cloff] = svNew;
return;
slowcase: /* misaligned, or must go further down the tree */
@ -3931,7 +3936,8 @@ void zsm_apply16___msm_write ( Thr* thr, Addr a ) {
}
svOld = cl->svals[cloff];
svNew = msm_write( svOld, thr,a,2 );
tl_assert(svNew != SVal_INVALID);
if (CHECK_ZSM)
tl_assert(svNew != SVal_INVALID);
cl->svals[cloff] = svNew;
return;
slowcase: /* misaligned, or must go further down the tree */
@ -3965,7 +3971,8 @@ void zsm_apply32___msm_read ( Thr* thr, Addr a ) {
}
svOld = cl->svals[cloff];
svNew = msm_read( svOld, thr,a,4 );
tl_assert(svNew != SVal_INVALID);
if (CHECK_ZSM)
tl_assert(svNew != SVal_INVALID);
cl->svals[cloff] = svNew;
return;
slowcase: /* misaligned, or must go further down the tree */
@ -3997,7 +4004,8 @@ void zsm_apply32___msm_write ( Thr* thr, Addr a ) {
}
svOld = cl->svals[cloff];
svNew = msm_write( svOld, thr,a,4 );
tl_assert(svNew != SVal_INVALID);
if (CHECK_ZSM)
tl_assert(svNew != SVal_INVALID);
cl->svals[cloff] = svNew;
return;
slowcase: /* misaligned, or must go further down the tree */
@ -4026,7 +4034,8 @@ void zsm_apply64___msm_read ( Thr* thr, Addr a ) {
}
svOld = cl->svals[cloff];
svNew = msm_read( svOld, thr,a,8 );
tl_assert(svNew != SVal_INVALID);
if (CHECK_ZSM)
tl_assert(svNew != SVal_INVALID);
cl->svals[cloff] = svNew;
return;
slowcase: /* misaligned, or must go further down the tree */
@ -4053,7 +4062,8 @@ void zsm_apply64___msm_write ( Thr* thr, Addr a ) {
}
svOld = cl->svals[cloff];
svNew = msm_write( svOld, thr,a,8 );
tl_assert(svNew != SVal_INVALID);
if (CHECK_ZSM)
tl_assert(svNew != SVal_INVALID);
cl->svals[cloff] = svNew;
return;
slowcase: /* misaligned, or must go further down the tree */

View File

@ -386,9 +386,19 @@ extern void VG_(needs_client_requests) (
/* Tool does stuff before and/or after system calls? */
// Nb: If either of the pre_ functions malloc() something to return, the
// corresponding post_ function had better free() it!
// Also, the args are the 'original args' -- that is, it may be
// that the syscall pre-wrapper will modify the args before the
// syscall happens. So these args are the original, un-modified
// args. Finally, nArgs merely indicates the length of args[..],
// it does not indicate how many of those values are actually
// relevant to the syscall. args[0 .. nArgs-1] is guaranteed
// to be defined and to contain all the args for this syscall,
// possibly including some trailing zeroes.
extern void VG_(needs_syscall_wrapper) (
void (* pre_syscall)(ThreadId tid, UInt syscallno),
void (*post_syscall)(ThreadId tid, UInt syscallno, SysRes res)
void (* pre_syscall)(ThreadId tid, UInt syscallno,
UWord* args, UInt nArgs),
void (*post_syscall)(ThreadId tid, UInt syscallno,
UWord* args, UInt nArgs, SysRes res)
);
/* Are tool-state sanity checks performed? */

View File

@ -784,6 +784,27 @@ IRSB* lk_instrument ( VgCallbackClosure* closure,
break;
}
case Ist_CAS: {
/* We treat it as a read and a write of the location. I
think that is the same behaviour as it was before IRCAS
was introduced, since prior to that point, the Vex
front ends would translate a lock-prefixed instruction
into a (normal) read followed by a (normal) write. */
if (clo_trace_mem) {
Int dataSize;
IRCAS* cas = st->Ist.CAS.details;
tl_assert(cas->addr != NULL);
tl_assert(cas->dataLo != NULL);
dataSize = sizeofIRType(typeOfIRExpr(tyenv, cas->dataLo));
if (cas->dataHi != NULL)
dataSize *= 2; /* since it's a doubleword-CAS */
addEvent_Dr( sbOut, cas->addr, dataSize );
addEvent_Dw( sbOut, cas->addr, dataSize );
}
addStmtToIRSB( sbOut, st );
break;
}
case Ist_Exit:
if (clo_basic_counts) {
// The condition of a branch was inverted by VEX if a taken

View File

@ -1899,12 +1899,14 @@ static void add_counter_update(IRSB* sbOut, Int n)
IRTemp t2 = newIRTemp(sbOut->tyenv, Ity_I64);
IRExpr* counter_addr = mkIRExpr_HWord( (HWord)&guest_instrs_executed );
IRStmt* st1 = IRStmt_WrTmp(t1, IRExpr_Load(END, Ity_I64, counter_addr));
IRStmt* st1 = IRStmt_WrTmp(t1, IRExpr_Load(False/*!isLL*/,
END, Ity_I64, counter_addr));
IRStmt* st2 =
IRStmt_WrTmp(t2,
IRExpr_Binop(Iop_Add64, IRExpr_RdTmp(t1),
IRExpr_Const(IRConst_U64(n))));
IRStmt* st3 = IRStmt_Store(END, counter_addr, IRExpr_RdTmp(t2));
IRStmt* st3 = IRStmt_Store(END, IRTemp_INVALID/*"not store-conditional"*/,
counter_addr, IRExpr_RdTmp(t2));
addStmtToIRSB( sbOut, st1 );
addStmtToIRSB( sbOut, st2 );

View File

@ -182,7 +182,6 @@ static Int get_otrack_shadow_offset_wrk ( Int offset, Int szB )
if (o == GOF(CIA) && sz == 8) return -1;
if (o == GOF(IP_AT_SYSCALL) && sz == 8) return -1; /* slot unused */
if (o == GOF(RESVN) && sz == 8) return -1;
if (o == GOF(FPROUND) && sz == 4) return -1;
if (o == GOF(EMWARN) && sz == 4) return -1;
if (o == GOF(TISTART) && sz == 8) return -1;
@ -341,7 +340,6 @@ static Int get_otrack_shadow_offset_wrk ( Int offset, Int szB )
if (o == GOF(CIA) && sz == 4) return -1;
if (o == GOF(IP_AT_SYSCALL) && sz == 4) return -1; /* slot unused */
if (o == GOF(RESVN) && sz == 4) return -1;
if (o == GOF(FPROUND) && sz == 4) return -1;
if (o == GOF(VRSAVE) && sz == 4) return -1;
if (o == GOF(EMWARN) && sz == 4) return -1;

File diff suppressed because it is too large Load Diff