Renamed sk_assert() as tl_assert().

git-svn-id: svn://svn.valgrind.org/valgrind/trunk@3055
This commit is contained in:
Nicholas Nethercote 2004-11-22 17:18:48 +00:00
parent 40de233989
commit cf9cf2a220
20 changed files with 299 additions and 299 deletions

View File

@ -233,7 +233,7 @@ static __inline__ UChar get_abits4_ALIGNED ( Addr a )
UChar abits8;
PROF_EVENT(24);
# ifdef VG_DEBUG_MEMORY
sk_assert(IS_ALIGNED4_ADDR(a));
tl_assert(IS_ALIGNED4_ADDR(a));
# endif
sm = primary_map[a >> 16];
sm_off = a & 0xFFFF;
@ -274,10 +274,10 @@ void set_address_range_perms ( Addr a, SizeT len, UInt example_a_bit )
indicate bugs in our machinery. 30,000,000 is arbitrary, but so
far all legitimate requests have fallen beneath that size. */
/* 4 Mar 02: this is just stupid; get rid of it. */
/* sk_assert(len < 30000000); */
/* tl_assert(len < 30000000); */
/* Check the permissions make sense. */
sk_assert(example_a_bit == VGM_BIT_VALID
tl_assert(example_a_bit == VGM_BIT_VALID
|| example_a_bit == VGM_BIT_INVALID);
/* In order that we can charge through the address space at 8
@ -317,7 +317,7 @@ void set_address_range_perms ( Addr a, SizeT len, UInt example_a_bit )
VGP_POPCC(VgpSetMem);
return;
}
sk_assert((a % 8) == 0 && len > 0);
tl_assert((a % 8) == 0 && len > 0);
/* Once aligned, go fast. */
while (True) {
@ -335,7 +335,7 @@ void set_address_range_perms ( Addr a, SizeT len, UInt example_a_bit )
VGP_POPCC(VgpSetMem);
return;
}
sk_assert((a % 8) == 0 && len > 0 && len < 8);
tl_assert((a % 8) == 0 && len > 0 && len < 8);
/* Finish the upper fragment. */
while (True) {
@ -350,7 +350,7 @@ void set_address_range_perms ( Addr a, SizeT len, UInt example_a_bit )
/* Check that zero page and highest page have not been written to
-- this could happen with buggy syscall wrappers. Today
(2001-04-26) had precisely such a problem with __NR_setitimer. */
sk_assert(SK_(cheap_sanity_check)());
tl_assert(SK_(cheap_sanity_check)());
VGP_POPCC(VgpSetMem);
}
@ -550,7 +550,7 @@ void ac_check_is_accessible ( CorePart part, ThreadId tid,
break;
case Vg_CoreSignal:
sk_assert(isWrite); /* Should only happen with isWrite case */
tl_assert(isWrite); /* Should only happen with isWrite case */
/* fall through */
case Vg_CorePThread:
MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
@ -559,7 +559,7 @@ void ac_check_is_accessible ( CorePart part, ThreadId tid,
/* If we're being asked to jump to a silly address, record an error
message before potentially crashing the entire system. */
case Vg_CoreTranslate:
sk_assert(!isWrite); /* Should only happen with !isWrite case */
tl_assert(!isWrite); /* Should only happen with !isWrite case */
MAC_(record_jump_error)( tid, bad_addr );
break;
@ -594,7 +594,7 @@ void ac_check_is_readable_asciiz ( CorePart part, ThreadId tid,
VGP_PUSHCC(VgpCheckMem);
sk_assert(part == Vg_CoreSysCall);
tl_assert(part == Vg_CoreSysCall);
ok = ac_check_readable_asciiz ( (Addr)str, &bad_addr );
if (!ok) {
MAC_(record_param_error) ( tid, bad_addr, /*IsReg*/False,
@ -793,7 +793,7 @@ static void ac_ACCESS4_SLOWLY ( Addr a, Bool isWrite )
(which is the default), and the address is 4-aligned.
If not, Case 2 will have applied.
*/
sk_assert(MAC_(clo_partial_loads_ok));
tl_assert(MAC_(clo_partial_loads_ok));
{
return;
}
@ -1001,11 +1001,11 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
break;
case SSE3ag_MemRd_RegWr:
sk_assert(u_in->size == 4 || u_in->size == 8);
tl_assert(u_in->size == 4 || u_in->size == 8);
helper = (Addr)ac_fpu_READ_check;
goto do_Access_ARG1;
do_Access_ARG1:
sk_assert(u_in->tag1 == TempReg);
tl_assert(u_in->tag1 == TempReg);
t_addr = u_in->val1;
t_size = newTemp(cb);
uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
@ -1016,11 +1016,11 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
break;
case MMX2_MemRd:
sk_assert(u_in->size == 4 || u_in->size == 8);
tl_assert(u_in->size == 4 || u_in->size == 8);
helper = (Addr)ac_fpu_READ_check;
goto do_Access_ARG2;
case MMX2_MemWr:
sk_assert(u_in->size == 4 || u_in->size == 8);
tl_assert(u_in->size == 4 || u_in->size == 8);
helper = (Addr)ac_fpu_WRITE_check;
goto do_Access_ARG2;
case FPU_R:
@ -1030,7 +1030,7 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
helper = (Addr)ac_fpu_WRITE_check;
goto do_Access_ARG2;
do_Access_ARG2:
sk_assert(u_in->tag2 == TempReg);
tl_assert(u_in->tag2 == TempReg);
t_addr = u_in->val2;
t_size = newTemp(cb);
uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
@ -1052,9 +1052,9 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
helper = (Addr)ac_fpu_WRITE_check;
goto do_Access_ARG3;
do_Access_ARG3:
sk_assert(u_in->size == 4 || u_in->size == 8
tl_assert(u_in->size == 4 || u_in->size == 8
|| u_in->size == 16 || u_in->size == 512);
sk_assert(u_in->tag3 == TempReg);
tl_assert(u_in->tag3 == TempReg);
t_addr = u_in->val3;
t_size = newTemp(cb);
uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
@ -1094,7 +1094,7 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
static
Bool ac_is_valid_64k_chunk ( UInt chunk_number )
{
sk_assert(chunk_number >= 0 && chunk_number < 65536);
tl_assert(chunk_number >= 0 && chunk_number < 65536);
if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
/* Definitely not in use. */
return False;
@ -1110,7 +1110,7 @@ static
Bool ac_is_valid_address ( Addr a )
{
UChar abits;
sk_assert(IS_ALIGNED4_ADDR(a));
tl_assert(IS_ALIGNED4_ADDR(a));
abits = get_abits4_ALIGNED(a);
if (abits == VGM_NIBBLE_VALID) {
return True;

View File

@ -370,7 +370,7 @@ BB_info* get_BB_info(UCodeBlock* cb_in, Addr orig_addr, Bool* bb_seen_before)
*bb_seen_before = ( NULL == bb_info ? False : True );
if (*bb_seen_before) {
// BB must have been translated before, but flushed from the TT
sk_assert(bb_info->n_instrs == n_instrs );
tl_assert(bb_info->n_instrs == n_instrs );
BB_retranslations++;
} else {
// BB never translated before (at this address, at least; could have
@ -391,9 +391,9 @@ void do_details( instr_info* n, Bool bb_seen_before,
{
lineCC* parent = get_lineCC(instr_addr);
if (bb_seen_before) {
sk_assert( n->instr_addr == instr_addr );
sk_assert( n->instr_size == instr_size );
sk_assert( n->data_size == data_size );
tl_assert( n->instr_addr == instr_addr );
tl_assert( n->instr_size == instr_size );
tl_assert( n->data_size == data_size );
// Don't assert that (n->parent == parent)... it's conceivable that
// the debug info might change; the other asserts should be enough to
// detect anything strange.
@ -424,7 +424,7 @@ void end_of_x86_instr(UCodeBlock* cb, instr_info* i_node, Bool bb_seen_before,
t_data_addr1 = INVALID_TEMPREG,
t_data_addr2 = INVALID_TEMPREG;
sk_assert(instr_size >= MIN_INSTR_SIZE &&
tl_assert(instr_size >= MIN_INSTR_SIZE &&
instr_size <= MAX_INSTR_SIZE);
#define IS_(X) (INVALID_TEMPREG != t_##X##_addr)
@ -432,29 +432,29 @@ void end_of_x86_instr(UCodeBlock* cb, instr_info* i_node, Bool bb_seen_before,
// Work out what kind of x86 instruction it is
if (!IS_(read) && !IS_(write)) {
sk_assert( 0 == data_size );
sk_assert(INV(t_read) && INV(t_write));
tl_assert( 0 == data_size );
tl_assert(INV(t_read) && INV(t_write));
helper = (Addr) & log_1I_0D_cache_access;
argc = 1;
} else if (IS_(read) && !IS_(write)) {
sk_assert( is_valid_data_size(data_size) );
sk_assert(!INV(t_read) && INV(t_write));
tl_assert( is_valid_data_size(data_size) );
tl_assert(!INV(t_read) && INV(t_write));
helper = (Addr) & log_1I_1Dr_cache_access;
argc = 2;
t_data_addr1 = t_read_addr;
} else if (!IS_(read) && IS_(write)) {
sk_assert( is_valid_data_size(data_size) );
sk_assert(INV(t_read) && !INV(t_write));
tl_assert( is_valid_data_size(data_size) );
tl_assert(INV(t_read) && !INV(t_write));
helper = (Addr) & log_1I_1Dw_cache_access;
argc = 2;
t_data_addr1 = t_write_addr;
} else {
sk_assert(IS_(read) && IS_(write));
sk_assert( is_valid_data_size(data_size) );
sk_assert(!INV(t_read) && !INV(t_write));
tl_assert(IS_(read) && IS_(write));
tl_assert( is_valid_data_size(data_size) );
tl_assert(!INV(t_read) && !INV(t_write));
if (t_read == t_write) {
helper = (Addr) & log_1I_1Dr_cache_access;
argc = 2;
@ -530,7 +530,7 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
// x86 instruction sizes are obtained from INCEIPs (for case 1) or
// from .extra4b field of the final JMP (for case 2 & 3).
if (instrumented_Jcc) sk_assert(u_in->opcode == JMP);
if (instrumented_Jcc) tl_assert(u_in->opcode == JMP);
switch (u_in->opcode) {
@ -599,18 +599,18 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
// JMP: insert instrumentation if the first JMP
case JMP:
if (instrumented_Jcc) {
sk_assert(CondAlways == u_in->cond);
sk_assert(i+1 == VG_(get_num_instrs)(cb_in));
tl_assert(CondAlways == u_in->cond);
tl_assert(i+1 == VG_(get_num_instrs)(cb_in));
VG_(copy_UInstr)(cb, u_in);
instrumented_Jcc = False; // rest
break;
} else {
// The first JMP... instrument.
if (CondAlways != u_in->cond) {
sk_assert(i+2 == VG_(get_num_instrs)(cb_in));
tl_assert(i+2 == VG_(get_num_instrs)(cb_in));
instrumented_Jcc = True;
} else {
sk_assert(i+1 == VG_(get_num_instrs)(cb_in));
tl_assert(i+1 == VG_(get_num_instrs)(cb_in));
}
// Get x86 instr size from final JMP.
x86_instr_size = VG_(get_last_instr)(cb_in)->extra4b;
@ -646,8 +646,8 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
}
// BB address should be the same as the first instruction's address.
sk_assert(bb_info->BB_addr == bb_info->instrs[0].instr_addr );
sk_assert(bb_info_i == bb_info->n_instrs);
tl_assert(bb_info->BB_addr == bb_info->instrs[0].instr_addr );
tl_assert(bb_info_i == bb_info->n_instrs);
VG_(free_UCodeBlock)(cb_in);
return cb;
@ -1033,7 +1033,7 @@ void SK_(discard_basic_block_info) ( Addr a, SizeT size )
// Get BB info, remove from table, free BB info. Simple!
bb_info = VG_(HT_get_node)(instr_info_table, a, &prev_next_ptr);
sk_assert(NULL != bb_info);
tl_assert(NULL != bb_info);
*prev_next_ptr = bb_info->next;
VG_(free)(bb_info);
}
@ -1130,7 +1130,7 @@ void SK_(pre_clo_init)(void)
VG_(register_compact_helper)((Addr) & log_1I_2D_cache_access);
/* Get working directory */
sk_assert( VG_(getcwd_alloc)(&base_dir) );
tl_assert( VG_(getcwd_alloc)(&base_dir) );
/* Block is big enough for dir name + cachegrind.out.<pid> */
cachegrind_out_file = VG_(malloc)((VG_(strlen)(base_dir) + 32)*sizeof(Char));

View File

@ -265,10 +265,10 @@ Int get_caches_from_CPUID(cache_t* I1c, cache_t* D1c, cache_t* L2c)
sigill_new.sa_flags = 0;
sigill_new.sa_restorer = NULL;
res = VG_(sigemptyset)( &sigill_new.sa_mask );
sk_assert(res == 0);
tl_assert(res == 0);
res = VG_(sigaction)( VKI_SIGILL, &sigill_new, &sigill_saved );
sk_assert(res == 0);
tl_assert(res == 0);
/* Trap for illegal instruction, in case it's a really old processor that
* doesn't support CPUID. */
@ -279,14 +279,14 @@ Int get_caches_from_CPUID(cache_t* I1c, cache_t* D1c, cache_t* L2c)
/* Restore old SIGILL handler */
res = VG_(sigaction)( VKI_SIGILL, &sigill_saved, NULL );
sk_assert(res == 0);
tl_assert(res == 0);
} else {
VG_(message)(Vg_DebugMsg, "CPUID instruction not supported");
/* Restore old SIGILL handler */
res = VG_(sigaction)( VKI_SIGILL, &sigill_saved, NULL );
sk_assert(res == 0);
tl_assert(res == 0);
return -1;
}

View File

@ -307,7 +307,7 @@ static __inline__
void construct_error ( Error* err, ThreadId tid, ErrorKind ekind, Addr a,
Char* s, void* extra, ExeContext* where )
{
sk_assert(tid < VG_N_THREADS);
tl_assert(tid < VG_N_THREADS);
/* Core-only parts */
err->next = NULL;

View File

@ -60,7 +60,7 @@ void VG_(ccall_0_0)(UCodeBlock* cb, Addr f)
// f(reg)
void VG_(ccall_R_0)(UCodeBlock* cb, Addr f, UInt t1, UInt regparms_n)
{
sk_assert(regparms_n <= 1);
tl_assert(regparms_n <= 1);
uInstr1(cb, CCALL, 0, TempReg, t1);
uCCall(cb, f, 1, regparms_n, /*retval*/False);
}
@ -76,8 +76,8 @@ void VG_(ccall_L_0)(UCodeBlock* cb, Addr f, UInt lit1, UInt regparms_n)
void VG_(ccall_R_R)(UCodeBlock* cb, Addr f, UInt t1, UInt t_ret,
UInt regparms_n)
{
sk_assert(regparms_n <= 1);
sk_assert(t1 < VG_(get_num_temps)(cb)); // help catch lits accidentally passed in
tl_assert(regparms_n <= 1);
tl_assert(t1 < VG_(get_num_temps)(cb)); // help catch lits accidentally passed in
uInstr3(cb, CCALL, 0, TempReg, t1, NoValue, 0, TempReg, t_ret);
uCCall(cb, f, 1, regparms_n, /*retval*/True);
}
@ -93,9 +93,9 @@ void VG_(ccall_L_R)(UCodeBlock* cb, Addr f, UInt lit1, UInt t_ret,
// f(reg, reg)
void VG_(ccall_RR_0)(UCodeBlock* cb, Addr f, UInt t1, UInt t2, UInt regparms_n)
{
sk_assert(regparms_n <= 2);
sk_assert(t1 < VG_(get_num_temps)(cb));
sk_assert(t2 < VG_(get_num_temps)(cb));
tl_assert(regparms_n <= 2);
tl_assert(t1 < VG_(get_num_temps)(cb));
tl_assert(t2 < VG_(get_num_temps)(cb));
uInstr2(cb, CCALL, 0, TempReg, t1, TempReg, t2);
uCCall(cb, f, 2, regparms_n, /*retval*/False);
}
@ -129,9 +129,9 @@ void VG_(ccall_LL_0)(UCodeBlock* cb, Addr f, UInt lit1, UInt lit2,
void VG_(ccall_RR_R)(UCodeBlock* cb, Addr f, UInt t1, UInt t2, UInt t_ret,
UInt regparms_n)
{
sk_assert(regparms_n <= 2);
sk_assert(t1 < VG_(get_num_temps)(cb));
sk_assert(t2 < VG_(get_num_temps)(cb));
tl_assert(regparms_n <= 2);
tl_assert(t1 < VG_(get_num_temps)(cb));
tl_assert(t2 < VG_(get_num_temps)(cb));
uInstr3(cb, CCALL, 0, TempReg, t1, TempReg, t2, TempReg, t_ret);
uCCall(cb, f, 2, regparms_n, /*retval*/True);
}
@ -165,10 +165,10 @@ void VG_(ccall_LL_R)(UCodeBlock* cb, Addr f, UInt lit1, UInt lit2, UInt t_ret,
void VG_(ccall_RRR_0)(UCodeBlock* cb, Addr f, UInt t1, UInt t2,
UInt t3, UInt regparms_n)
{
sk_assert(regparms_n <= 3);
sk_assert(t1 < VG_(get_num_temps)(cb));
sk_assert(t2 < VG_(get_num_temps)(cb));
sk_assert(t3 < VG_(get_num_temps)(cb));
tl_assert(regparms_n <= 3);
tl_assert(t1 < VG_(get_num_temps)(cb));
tl_assert(t2 < VG_(get_num_temps)(cb));
tl_assert(t3 < VG_(get_num_temps)(cb));
uInstr3(cb, CCALL, 0, TempReg, t1, TempReg, t2, TempReg, t3);
uCCall(cb, f, 3, regparms_n, /*retval*/False);
}

View File

@ -396,7 +396,7 @@ void ensure_mm_init ( void )
}
/* No particular reason for this figure, it's just smallish */
sk_assert(VG_(vg_malloc_redzone_szB) < 128);
tl_assert(VG_(vg_malloc_redzone_szB) < 128);
client_rz_szB = VG_(vg_malloc_redzone_szB);
/* Use checked red zones (of various sizes) for our internal stuff,

View File

@ -742,8 +742,8 @@ Addr VG_(client_alloc)(Addr addr, SizeT len, UInt prot, UInt sf_flags)
{
len = PGROUNDUP(len);
sk_assert(!(sf_flags & SF_FIXED));
sk_assert(0 == addr);
tl_assert(!(sf_flags & SF_FIXED));
tl_assert(0 == addr);
addr = (Addr)VG_(mmap)((void *)addr, len, prot,
VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS | VKI_MAP_CLIENT,

View File

@ -152,7 +152,7 @@ void VG_(parse_procselfmaps) (
UInt ino;
UWord foffset, maj, min;
sk_assert( '\0' != procmap_buf[0] && 0 != buf_n_tot);
tl_assert( '\0' != procmap_buf[0] && 0 != buf_n_tot);
if (0)
VG_(message)(Vg_DebugMsg, "raw:\n%s", procmap_buf );

View File

@ -53,7 +53,7 @@ static UInt n_lockorder_warnings = 0;
#define DEBUG_VIRGIN_READS 0 /* Dump around address on VIRGIN reads */
#if SLOW_ASSERTS
#define SK_ASSERT(x) sk_assert(x)
#define SK_ASSERT(x) tl_assert(x)
#else
#define SK_ASSERT(x)
#endif
@ -113,7 +113,7 @@ void VGE_(done_prof_mem) ( void )
}
#define PROF_EVENT(ev) \
do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
do { tl_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
event_ctr[ev]++; \
} while (False);
@ -412,13 +412,13 @@ static void addPriorTLS(ThreadId tid, ThreadId prior)
VG_(printf)("making TLS %p(%u) prior to TLS %p(%u)\n",
thread_seg[prior], prior, tls, tid);
sk_assert(thread_seg[tid] != NULL);
sk_assert(thread_seg[prior] != NULL);
tl_assert(thread_seg[tid] != NULL);
tl_assert(thread_seg[prior] != NULL);
if (tls->prior[0] == NULL)
tls->prior[0] = thread_seg[prior];
else {
sk_assert(tls->prior[1] == NULL);
tl_assert(tls->prior[1] == NULL);
tls->prior[1] = thread_seg[prior];
}
}
@ -514,7 +514,7 @@ void set_sword ( Addr a, shadow_word sword )
/* Use bits 31..16 for primary, 15..2 for secondary lookup */
sm = primary_map[a >> 16];
sk_assert(sm != &distinguished_secondary_map);
tl_assert(sm != &distinguished_secondary_map);
oldsw = &sm->swords[(a & 0xFFFC) >> 2];
if (oldsw->state == Vge_Excl && oldsw->other != TLSP_INDICATING_ALL) {
ThreadLifeSeg *tls = unpackTLS(oldsw->other);
@ -579,7 +579,7 @@ void init_nonvirgin_sword(Addr a)
ThreadId tid = VG_(get_current_or_recent_tid)();
ThreadLifeSeg *tls;
sk_assert(tid != VG_INVALID_THREADID);
tl_assert(tid != VG_INVALID_THREADID);
tls = thread_seg[tid];
sword = SW(Vge_Excl, packTLS(tls));
@ -596,7 +596,7 @@ void init_magically_inited_sword(Addr a)
{
shadow_word sword;
sk_assert(VG_INVALID_THREADID == VG_(get_current_tid)());
tl_assert(VG_INVALID_THREADID == VG_(get_current_tid)());
sword = SW(Vge_Virgin, TID_INDICATING_NONVIRGIN);
@ -695,7 +695,7 @@ static UInt hash_LockSet_w_wo(const LockSet *ls,
Int i;
UInt hash = ls->setsize + (with != NULL) - (without != NULL);
sk_assert(with == NULL || with != without);
tl_assert(with == NULL || with != without);
for(i = 0; with != NULL || i < ls->setsize; i++) {
const Mutex *mx = i >= ls->setsize ? NULL : ls->mutex[i];
@ -817,7 +817,7 @@ weird_LockSet_equals(const LockSet* a, const LockSet* b,
print_LockSet(" 2: b", b);
}
sk_assert(ia == a->setsize || mutex_cmp(a->mutex[ia], missing_mutex) >= 0);
tl_assert(ia == a->setsize || mutex_cmp(a->mutex[ia], missing_mutex) >= 0);
if (ib == b->setsize || mutex_cmp(missing_mutex, b->mutex[ib]) != 0)
return False;
@ -885,7 +885,7 @@ static void insert_LockSet(LockSet *set)
set->hash = hash;
sk_assert(lookup_LockSet(set) == NULL);
tl_assert(lookup_LockSet(set) == NULL);
set->next = lockset_hash[hash];
lockset_hash[hash] = set;
@ -1014,7 +1014,7 @@ LockSet *add_LockSet(const LockSet *ls, const Mutex *mx)
if (debug || LOCKSET_SANITY)
sanity_check_locksets("add-IN");
sk_assert(!ismember(ls, mx));
tl_assert(!ismember(ls, mx));
ret = alloc_LockSet(ls->setsize+1);
@ -1033,7 +1033,7 @@ LockSet *add_LockSet(const LockSet *ls, const Mutex *mx)
if (mx)
ret->mutex[j++] = mx;
sk_assert(j == ret->setsize);
tl_assert(j == ret->setsize);
if (debug || LOCKSET_SANITY) {
print_LockSet("add-OUT", ret);
@ -1060,7 +1060,7 @@ LockSet *remove_LockSet ( const LockSet *ls, const Mutex *mx )
if (debug || LOCKSET_SANITY)
sanity_check_locksets("remove-IN");
sk_assert(ismember(ls, mx));
tl_assert(ismember(ls, mx));
ret = alloc_LockSet(ls->setsize-1);
@ -1070,7 +1070,7 @@ LockSet *remove_LockSet ( const LockSet *ls, const Mutex *mx )
ret->mutex[j++] = ls->mutex[i];
}
sk_assert(j == ret->setsize);
tl_assert(j == ret->setsize);
if (debug || LOCKSET_SANITY) {
print_LockSet("remove-OUT", ret);
@ -1110,7 +1110,7 @@ static const LockSet *_intersect(const LockSet *a, const LockSet *b)
} else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
ia++;
} else {
sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
tl_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
ib++;
}
}
@ -1119,14 +1119,14 @@ static const LockSet *_intersect(const LockSet *a, const LockSet *b)
ret = alloc_LockSet(size);
for (iret = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
sk_assert(iret < ret->setsize);
tl_assert(iret < ret->setsize);
ret->mutex[iret++] = a->mutex[ia];
ia++;
ib++;
} else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
ia++;
} else {
sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
tl_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
ib++;
}
}
@ -1229,7 +1229,7 @@ static const LockSet *ls_union(const LockSet *a, const LockSet *b)
size++;
ia++;
} else {
sk_assert(cmp > 0);
tl_assert(cmp > 0);
size++;
ib++;
}
@ -1239,7 +1239,7 @@ static const LockSet *ls_union(const LockSet *a, const LockSet *b)
ret = alloc_LockSet(size);
for (iret = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
Int cmp;
sk_assert(iret < ret->setsize);
tl_assert(iret < ret->setsize);
if ((ia < a->setsize) && (ib < b->setsize))
cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
@ -1256,13 +1256,13 @@ static const LockSet *ls_union(const LockSet *a, const LockSet *b)
ret->mutex[iret++] = a->mutex[ia];
ia++;
} else {
sk_assert(cmp > 0);
tl_assert(cmp > 0);
ret->mutex[iret++] = b->mutex[ib];
ib++;
}
}
sk_assert(iret == ret->setsize);
tl_assert(iret == ret->setsize);
ret->hash = hash_LockSet(ret);
@ -1461,7 +1461,7 @@ static void test_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
switch(state) {
case MxLocked:
sk_assert(!check_cycle(mutex, mutex->lockdep));
tl_assert(!check_cycle(mutex, mutex->lockdep));
if (debug)
print_LockSet("thread holding", thread_locks[tid]);
@ -1539,7 +1539,7 @@ static void set_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
break;
}
sk_assert(!check_cycle(mutex, mutex->lockdep));
tl_assert(!check_cycle(mutex, mutex->lockdep));
mutex->tid = tid;
break;
@ -1557,7 +1557,7 @@ static void set_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
case MxDead:
if (mutex->state == MxLocked) {
/* forcably remove offending lock from thread's lockset */
sk_assert(ismember(thread_locks[mutex->tid], mutex));
tl_assert(ismember(thread_locks[mutex->tid], mutex));
thread_locks[mutex->tid] = remove_LockSet(thread_locks[mutex->tid], mutex);
mutex->tid = VG_INVALID_THREADID;
@ -1655,7 +1655,7 @@ void set_address_range_state ( Addr a, SizeT len /* in bytes */,
-- this could happen with buggy syscall wrappers. Today
(2001-04-26) had precisely such a problem with
__NR_setitimer. */
sk_assert(SK_(cheap_sanity_check)());
tl_assert(SK_(cheap_sanity_check)());
VGP_POPCC(VgpSARP);
}
@ -2074,9 +2074,9 @@ UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
break;
case GET:
sk_assert(u_in->tag1 == ArchReg);
sk_assert(u_in->tag2 == TempReg);
sk_assert(u_in->val2 < ntemps);
tl_assert(u_in->tag1 == ArchReg);
tl_assert(u_in->tag2 == TempReg);
tl_assert(u_in->val2 < ntemps);
stackref[u_in->val2] = (u_in->size == 4 &&
(u_in->val1 == R_STACK_PTR ||
@ -2086,7 +2086,7 @@ UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
case MOV:
if (u_in->size == 4 && u_in->tag1 == TempReg) {
sk_assert(u_in->tag2 == TempReg);
tl_assert(u_in->tag2 == TempReg);
stackref[u_in->val2] = stackref[u_in->val1];
}
VG_(copy_UInstr)(cb, u_in);
@ -2095,7 +2095,7 @@ UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
case LEA1:
case ADD: case SUB:
if (u_in->size == 4 && u_in->tag1 == TempReg) {
sk_assert(u_in->tag2 == TempReg);
tl_assert(u_in->tag2 == TempReg);
stackref[u_in->val2] |= stackref[u_in->val1];
}
VG_(copy_UInstr)(cb, u_in);
@ -2103,8 +2103,8 @@ UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
case LOAD: {
void (*help)(Addr);
sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sk_assert(u_in->tag1 == TempReg);
tl_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
tl_assert(u_in->tag1 == TempReg);
if (!clo_priv_stacks || !stackref[u_in->val1]) {
nonstk_ld++;
@ -2131,7 +2131,7 @@ UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
case MMX2_MemRd:
case FPU_R: {
sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
tl_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
8 == u_in->size || 10 == u_in->size || 108 == u_in->size);
t_size = newTemp(cb);
@ -2149,7 +2149,7 @@ UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
}
case MMX2a1_MemRd: {
sk_assert(8 == u_in->size);
tl_assert(8 == u_in->size);
t_size = newTemp(cb);
uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
@ -2172,7 +2172,7 @@ UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
case SSE3ag_MemRd_RegWr: {
Int addr = (u_in->opcode == SSE3ag_MemRd_RegWr) ? u_in->val1 : u_in->val3;
sk_assert(u_in->size == 4 || u_in->size == 8 || u_in->size == 16 || u_in->size == 512);
tl_assert(u_in->size == 4 || u_in->size == 8 || u_in->size == 16 || u_in->size == 512);
t_size = newTemp(cb);
uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
@ -2188,8 +2188,8 @@ UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
case STORE: {
void (*help)(Addr, UInt);
sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sk_assert(u_in->tag2 == TempReg);
tl_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
tl_assert(u_in->tag2 == TempReg);
if (!clo_priv_stacks || !stackref[u_in->val2]) {
nonstk_st++;
@ -2216,7 +2216,7 @@ UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
case MMX2_MemWr:
case FPU_W: {
sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
tl_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
8 == u_in->size || 10 == u_in->size || 108 == u_in->size);
t_size = newTemp(cb);
@ -2234,7 +2234,7 @@ UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
case SSE2a_MemWr:
case SSE3a_MemWr: {
sk_assert(4 == u_in->size || 8 == u_in->size || 16 == u_in->size ||
tl_assert(4 == u_in->size || 8 == u_in->size || 16 == u_in->size ||
512 == u_in->size);
t_size = newTemp(cb);
@ -2540,7 +2540,7 @@ Bool SK_(eq_SkinError) ( VgRes not_used, Error* e1, Error* e2 )
{
Char *e1s, *e2s;
sk_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
switch (VG_(get_error_kind)(e1)) {
case EraserErr:
@ -2672,7 +2672,7 @@ void SK_(pp_SkinError) ( Error* err )
case Vge_Excl: {
ThreadLifeSeg *tls = unpackTLS(extra->prevstate.other);
sk_assert(tls != unpackTLS(TLSP_INDICATING_ALL));
tl_assert(tls != unpackTLS(TLSP_INDICATING_ALL));
VG_(sprintf)(buf, "exclusively owned by thread %u", tls->tid);
break;
}
@ -2798,7 +2798,7 @@ Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp* su )
Bool SK_(error_matches_suppression)(Error* err, Supp* su)
{
sk_assert(VG_(get_supp_kind)(su) == EraserSupp);
tl_assert(VG_(get_supp_kind)(su) == EraserSupp);
return (VG_(get_error_kind)(err) == EraserErr);
}
@ -2949,7 +2949,7 @@ static void eraser_mem_read_word(Addr a, ThreadId tid)
};
tls = thread_seg[tid];
sk_assert(tls != NULL && tls->tid == tid);
tl_assert(tls != NULL && tls->tid == tid);
sword = get_sword_addr(a);
if (sword == SEC_MAP_ACCESS) {
@ -3059,7 +3059,7 @@ static void eraser_mem_write_word(Addr a, ThreadId tid)
};
tls = thread_seg[tid];
sk_assert(tls != NULL && tls->tid == tid);
tl_assert(tls != NULL && tls->tid == tid);
sword = get_sword_addr(a);
if (sword == SEC_MAP_ACCESS) {

View File

@ -475,7 +475,7 @@ extern Bool VG_(getcwd_alloc) ( Char** cwd );
/* Asserts permanently enabled -- no turning off with NDEBUG. Hurrah! */
#define VG__STRING(__str) #__str
#define sk_assert(expr) \
#define tl_assert(expr) \
((void) ((expr) ? 0 : \
(VG_(skin_assert_fail) (VG__STRING(expr), \
__FILE__, __LINE__, \
@ -1797,7 +1797,7 @@ extern void VG_(details_copyright_author) ( Char* copyright_author );
setting is optional. */
extern void VG_(details_avg_translation_sizeB) ( UInt size );
/* String printed if an `sk_assert' assertion fails or VG_(skin_panic)
/* String printed if an `tl_assert' assertion fails or VG_(skin_panic)
is called. Should probably be an email address. */
extern void VG_(details_bug_reports_to) ( Char* bug_reports_to );

View File

@ -89,7 +89,7 @@ void VGP_(tick) ( int sigNo )
Int cc;
vgp_nticks++;
cc = vgp_stack[vgp_sp];
sk_assert(cc >= 0 && cc < VGP_MAX_CCS);
tl_assert(cc >= 0 && cc < VGP_MAX_CCS);
vgp_counts[ cc ]++;
}
@ -101,7 +101,7 @@ void VGP_(init_profiling) ( void )
/* Register core events... tricky macro definition causes
VGP_(register_profile_event)() to be called once for each core event
in VGP_CORE_LIST. */
sk_assert(VgpUnc == 0);
tl_assert(VgpUnc == 0);
# define VGP_PAIR(n,name) VGP_(register_profile_event)(n,name)
VGP_CORE_LIST;
# undef VGP_PAIR

View File

@ -375,7 +375,7 @@ static XPt* new_XPt(Addr eip, XPt* parent, Bool is_bottom)
xpt->parent = parent;
// Check parent is not a bottom-XPt
sk_assert(parent == NULL || 0 != parent->max_children);
tl_assert(parent == NULL || 0 != parent->max_children);
xpt->n_children = 0;
@ -447,7 +447,7 @@ static XPt* get_XCon( ThreadId tid, Bool custom_malloc )
// Must be at least one alloc function, unless client used
// MALLOCLIKE_BLOCK
if (!custom_malloc) sk_assert(L > 0);
if (!custom_malloc) tl_assert(L > 0);
// Should be at least one non-alloc function. If not, try again.
if (L == n_eips) {
@ -482,8 +482,8 @@ static XPt* get_XCon( ThreadId tid, Bool custom_malloc )
while (True) {
if (nC == xpt->n_children) {
// not found, insert new XPt
sk_assert(xpt->max_children != 0);
sk_assert(xpt->n_children <= xpt->max_children);
tl_assert(xpt->max_children != 0);
tl_assert(xpt->n_children <= xpt->max_children);
// Expand 'children' if necessary
if (xpt->n_children == xpt->max_children) {
xpt->max_children *= 2;
@ -503,7 +503,7 @@ static XPt* get_XCon( ThreadId tid, Bool custom_malloc )
// Return found/built bottom-XPt.
if (reached_bottom) {
sk_assert(0 == xpt->children[nC]->n_children); // Must be bottom-XPt
tl_assert(0 == xpt->children[nC]->n_children); // Must be bottom-XPt
VGP_POPCC(VgpGetXPt);
return xpt->children[nC];
}
@ -519,17 +519,17 @@ static void update_XCon(XPt* xpt, Int space_delta)
{
VGP_PUSHCC(VgpUpdateXCon);
sk_assert(True == clo_heap);
sk_assert(0 != space_delta);
sk_assert(NULL != xpt);
sk_assert(0 == xpt->n_children); // must be bottom-XPt
tl_assert(True == clo_heap);
tl_assert(0 != space_delta);
tl_assert(NULL != xpt);
tl_assert(0 == xpt->n_children); // must be bottom-XPt
while (xpt != alloc_xpt) {
if (space_delta < 0) sk_assert(xpt->curr_space >= -space_delta);
if (space_delta < 0) tl_assert(xpt->curr_space >= -space_delta);
xpt->curr_space += space_delta;
xpt = xpt->parent;
}
if (space_delta < 0) sk_assert(alloc_xpt->curr_space >= -space_delta);
if (space_delta < 0) tl_assert(alloc_xpt->curr_space >= -space_delta);
alloc_xpt->curr_space += space_delta;
VGP_POPCC(VgpUpdateXCon);
@ -602,7 +602,7 @@ static void adjust(Queue* q)
{
void** old_elems;
sk_assert(q->tail == q->max_elems);
tl_assert(q->tail == q->max_elems);
if (q->head < 10) {
old_elems = q->elems;
q->max_elems *= 2;
@ -655,7 +655,7 @@ HP_Chunk* get_HP_Chunk(void* p, HP_Chunk*** prev_chunks_next_ptr)
static __inline__
void remove_HP_Chunk(HP_Chunk* hc, HP_Chunk** prev_chunks_next_ptr)
{
sk_assert(n_heap_blocks > 0);
tl_assert(n_heap_blocks > 0);
n_heap_blocks--;
*prev_chunks_next_ptr = hc->next;
}
@ -719,7 +719,7 @@ void die_block ( void* p, Bool custom_free )
hc = get_HP_Chunk( p, &remove_handle );
if (hc == NULL)
return; // must have been a bogus free(), or p==NULL
sk_assert(hc->data == (Addr)p);
tl_assert(hc->data == (Addr)p);
remove_HP_Chunk(hc, remove_handle);
if (clo_heap && hc->size != 0)
@ -796,7 +796,7 @@ void* SK_(realloc) ( void* p_old, SizeT new_size )
return NULL; // must have been a bogus free()
}
sk_assert(hc->data == (Addr)p_old);
tl_assert(hc->data == (Addr)p_old);
old_size = hc->size;
if (new_size <= old_size) {
@ -933,7 +933,7 @@ static void halve_censi(void)
FIND_CENSUS(j+1, jn);
while (jn < MAX_N_CENSI) {
Int timespan = censi[jn].ms_time - censi[jp].ms_time;
sk_assert(timespan >= 0);
tl_assert(timespan >= 0);
if (timespan < min_span) {
min_span = timespan;
min_j = j;
@ -1053,7 +1053,7 @@ static void hp_census(void)
VGP_PUSHCC(VgpCensusSnapshot);
xtree_size2 = do_space_snapshot(alloc_xpt->children[i],
census->xtree_snapshots[i], 0);
sk_assert(xtree_size == xtree_size2);
tl_assert(xtree_size == xtree_size2);
VGP_POPCC(VgpCensusSnapshot);
}
// VG_(printf)("\n\n");
@ -1120,7 +1120,7 @@ static void new_mem_stack_signal(Addr a, SizeT len)
static void die_mem_stack_signal(Addr a, SizeT len)
{
sk_assert(sigstacks_space >= len);
tl_assert(sigstacks_space >= len);
sigstacks_space -= len;
}
@ -1137,7 +1137,7 @@ Bool SK_(handle_client_request) ( ThreadId tid, UWord* argv, UWord* ret )
SizeT sizeB = argv[2];
*ret = 0;
res = new_block( p, sizeB, /*align -- ignored*/0, /*is_zeroed*/False );
sk_assert(res == p);
tl_assert(res == p);
return True;
}
case VG_USERREQ__FREELIKE_BLOCK: {
@ -1196,7 +1196,7 @@ void SK_(pre_clo_init)()
// Dummy node at top of the context structure.
alloc_xpt = new_XPt(0, NULL, /*is_bottom*/False);
sk_assert( VG_(getcwd_alloc)(&base_dir) );
tl_assert( VG_(getcwd_alloc)(&base_dir) );
}
void SK_(post_clo_init)(void)
@ -1454,7 +1454,7 @@ static void write_hp_file(void)
}
// Close file
sk_assert(fd >= 0);
tl_assert(fd >= 0);
VG_(close)(fd);
// Attempt to convert file using hp2ps
@ -1506,7 +1506,7 @@ static Char* make_perc(ULong spacetime, ULong total_spacetime)
static Char mbuf[32];
UInt p = 10;
sk_assert(0 != total_spacetime);
tl_assert(0 != total_spacetime);
percentify(spacetime * 100 * p / total_spacetime, p, 5, mbuf);
return mbuf;
}
@ -1522,7 +1522,7 @@ static UInt pp_XCon(Int fd, XPt* xpt)
Char* maybe_br = ( is_HTML ? "<br>" : "" );
Char* maybe_indent = ( is_HTML ? "&nbsp;&nbsp;" : "" );
sk_assert(NULL != xpt);
tl_assert(NULL != xpt);
while (True) {
rev_eips[i] = xpt->eip;
@ -1579,23 +1579,23 @@ static void pp_all_XPts2(Int fd, Queue* q, ULong heap_spacetime,
while (NULL != (xpt = (XPt*)dequeue(q))) {
// Check that non-top-level XPts have a zero .approx_ST field.
if (xpt->parent != alloc_xpt) sk_assert( 0 == xpt->approx_ST );
if (xpt->parent != alloc_xpt) tl_assert( 0 == xpt->approx_ST );
// Check that the sum of all children .exact_ST_dbld fields equals
// parent's (unless alloc_xpt, when it should == 0).
if (alloc_xpt == xpt) {
sk_assert(0 == xpt->exact_ST_dbld);
tl_assert(0 == xpt->exact_ST_dbld);
} else {
sum = 0;
for (i = 0; i < xpt->n_children; i++) {
sum += xpt->children[i]->exact_ST_dbld;
}
//sk_assert(sum == xpt->exact_ST_dbld);
//tl_assert(sum == xpt->exact_ST_dbld);
// It's possible that not all the children were included in the
// exact_ST_dbld calculations. Hopefully almost all of them were, and
// all the important ones.
// sk_assert(sum <= xpt->exact_ST_dbld);
// sk_assert(sum * 1.05 > xpt->exact_ST_dbld );
// tl_assert(sum <= xpt->exact_ST_dbld);
// tl_assert(sum * 1.05 > xpt->exact_ST_dbld );
// if (sum != xpt->exact_ST_dbld) {
// VG_(printf)("%ld, %ld\n", sum, xpt->exact_ST_dbld);
// }
@ -1618,7 +1618,7 @@ static void pp_all_XPts2(Int fd, Queue* q, ULong heap_spacetime,
perc);
}
n = pp_XCon(fd, xpt);
sk_assert(n == L);
tl_assert(n == L);
}
// Sort children by exact_ST_dbld
@ -1734,7 +1734,7 @@ write_text_file(ULong total_ST, ULong heap_ST)
if (clo_heap)
pp_all_XPts(fd, alloc_xpt, heap_ST, total_ST);
sk_assert(fd >= 0);
tl_assert(fd >= 0);
VG_(close)(fd);
VGP_POPCC(VgpPrintXPts);
@ -1762,17 +1762,17 @@ print_summary(ULong total_ST, ULong heap_ST, ULong heap_admin_ST,
( 0 == total_ST ? (Char*)"(n/a)"
: make_perc(heap_admin_ST, total_ST) ) );
sk_assert( VG_(HT_count_nodes)(malloc_list) == n_heap_blocks );
tl_assert( VG_(HT_count_nodes)(malloc_list) == n_heap_blocks );
// Stack(s) ----------------------------------------------------------
if (clo_stacks) {
sk_assert(0 != total_ST);
tl_assert(0 != total_ST);
VG_(message)(Vg_UserMsg, "stack(s): %s",
make_perc(stack_ST, total_ST) );
}
if (VG_(clo_verbosity) > 1) {
sk_assert(n_xpts > 0); // always have alloc_xpt
tl_assert(n_xpts > 0); // always have alloc_xpt
VG_(message)(Vg_DebugMsg, " allocs: %u", n_allocs);
VG_(message)(Vg_DebugMsg, "zeroallocs: %u (%d%%)", n_zero_allocs,
n_zero_allocs * 100 / n_allocs );

View File

@ -93,33 +93,33 @@ UInt vg_scan_all_valid_memory ( Bool is_valid_64k_chunk ( UInt ),
sigbus_new.sa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
sigbus_new.sa_restorer = NULL;
res = VG_(sigemptyset)( &sigbus_new.sa_mask );
sk_assert(res == 0);
tl_assert(res == 0);
sigsegv_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
sigsegv_new.sa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
sigsegv_new.sa_restorer = NULL;
res = VG_(sigemptyset)( &sigsegv_new.sa_mask );
sk_assert(res == 0+0);
tl_assert(res == 0+0);
res = VG_(sigemptyset)( &unblockmask_new );
res |= VG_(sigaddset)( &unblockmask_new, VKI_SIGBUS );
res |= VG_(sigaddset)( &unblockmask_new, VKI_SIGSEGV );
res |= VG_(sigaddset)( &unblockmask_new, VKI_SIGTERM );
sk_assert(res == 0+0+0);
tl_assert(res == 0+0+0);
res = VG_(sigaction)( VKI_SIGBUS, &sigbus_new, &sigbus_saved );
sk_assert(res == 0+0+0+0);
tl_assert(res == 0+0+0+0);
res = VG_(sigaction)( VKI_SIGSEGV, &sigsegv_new, &sigsegv_saved );
sk_assert(res == 0+0+0+0+0);
tl_assert(res == 0+0+0+0+0);
res = VG_(sigprocmask)( VKI_SIG_UNBLOCK, &unblockmask_new, &blockmask_saved );
sk_assert(res == 0+0+0+0+0+0);
tl_assert(res == 0+0+0+0+0+0);
/* The signal handlers are installed. Actually do the memory scan. */
numPages = 1 << (32-VKI_PAGE_SHIFT);
sk_assert(numPages == 1048576);
sk_assert(4096 == (1 << VKI_PAGE_SHIFT));
tl_assert(numPages == 1048576);
tl_assert(4096 == (1 << VKI_PAGE_SHIFT));
nWordsNotified = 0;
@ -178,13 +178,13 @@ UInt vg_scan_all_valid_memory ( Bool is_valid_64k_chunk ( UInt ),
/* Restore signal state to whatever it was before. */
res = VG_(sigaction)( VKI_SIGBUS, &sigbus_saved, NULL );
sk_assert(res == 0 +0);
tl_assert(res == 0 +0);
res = VG_(sigaction)( VKI_SIGSEGV, &sigsegv_saved, NULL );
sk_assert(res == 0 +0 +0);
tl_assert(res == 0 +0 +0);
res = VG_(sigprocmask)( VKI_SIG_SETMASK, &blockmask_saved, NULL );
sk_assert(res == 0 +0 +0 +0);
tl_assert(res == 0 +0 +0 +0);
return nWordsNotified;
}
@ -271,13 +271,13 @@ Int find_shadow_for ( Addr ptr,
lo = mid+1;
continue;
}
sk_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
tl_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
retVal = mid;
break;
}
# ifdef VG_DEBUG_LEAKCHECK
sk_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
tl_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
# endif
/* VG_(printf)("%d\n", retVal); */
return retVal;
@ -321,8 +321,8 @@ void vg_detect_memory_leaks_notify_addr ( Addr a, UInt word_at_a )
sh_no = find_shadow_for ( ptr, lc_shadows, lc_n_shadows );
if (sh_no != -1) {
/* Found a block at/into which ptr points. */
sk_assert(sh_no >= 0 && sh_no < lc_n_shadows);
sk_assert(ptr < lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
tl_assert(ptr < lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
/* Decide whether Proper-ly or Interior-ly reached. */
if (ptr == lc_shadows[sh_no]->data) {
if (0) VG_(printf)("pointer at %p to %p\n", a, word_at_a );
@ -400,17 +400,17 @@ void MAC_(do_detect_memory_leaks) (
/* Sanity check; assert that the blocks are now in order */
for (i = 0; i < lc_n_shadows-1; i++) {
sk_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
}
/* Sanity check -- make sure they don't overlap */
for (i = 0; i < lc_n_shadows-1; i++) {
sk_assert( lc_shadows[i]->data + lc_shadows[i]->size
tl_assert( lc_shadows[i]->data + lc_shadows[i]->size
< lc_shadows[i+1]->data );
}
if (lc_n_shadows == 0) {
sk_assert(lc_shadows == NULL);
tl_assert(lc_shadows == NULL);
if (VG_(clo_verbosity) >= 1) {
VG_(message)(Vg_UserMsg,
"No malloc'd blocks -- no leaks are possible.");
@ -489,7 +489,7 @@ void MAC_(do_detect_memory_leaks) (
p_min = p;
}
}
sk_assert(p_min != NULL);
tl_assert(p_min != NULL);
/* Ok to have tst==NULL; it's only used if --gdb-attach=yes, and
we disallow that when --leak-check=yes.

View File

@ -79,11 +79,11 @@ static void add_to_freed_queue ( MAC_Chunk* mc )
/* Put it at the end of the freed list */
if (freed_list_end == NULL) {
sk_assert(freed_list_start == NULL);
tl_assert(freed_list_start == NULL);
freed_list_end = freed_list_start = mc;
freed_list_volume = mc->size;
} else {
sk_assert(freed_list_end->next == NULL);
tl_assert(freed_list_end->next == NULL);
freed_list_end->next = mc;
freed_list_end = mc;
freed_list_volume += mc->size;
@ -94,13 +94,13 @@ static void add_to_freed_queue ( MAC_Chunk* mc )
volume below vg_clo_freelist_vol. */
while (freed_list_volume > MAC_(clo_freelist_vol)) {
sk_assert(freed_list_start != NULL);
sk_assert(freed_list_end != NULL);
tl_assert(freed_list_start != NULL);
tl_assert(freed_list_end != NULL);
sc1 = freed_list_start;
freed_list_volume -= sc1->size;
/* VG_(printf)("volume now %d\n", freed_list_volume); */
sk_assert(freed_list_volume >= 0);
tl_assert(freed_list_volume >= 0);
if (freed_list_start == freed_list_end) {
freed_list_start = freed_list_end = NULL;
@ -191,9 +191,9 @@ void* MAC_(new_block) ( Addr p, SizeT size, SizeT align, UInt rzB,
// Allocate and zero if necessary
if (p) {
sk_assert(MAC_AllocCustom == kind);
tl_assert(MAC_AllocCustom == kind);
} else {
sk_assert(MAC_AllocCustom != kind);
tl_assert(MAC_AllocCustom != kind);
p = (Addr)VG_(cli_malloc)( align, size );
if (!p) {
VGP_POPCC(VgpCliMalloc);

View File

@ -143,7 +143,7 @@ Bool SK_(eq_SkinError) ( VgRes res, Error* e1, Error* e2 )
MAC_Error* e2_extra = VG_(get_error_extra)(e2);
/* Guaranteed by calling function */
sk_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
switch (VG_(get_error_kind)(e1)) {
case CoreMemErr: {
@ -257,7 +257,7 @@ void MAC_(pp_AddrInfo) ( Addr a, AddrInfo* ai )
}
case Register:
// print nothing
sk_assert(0 == a);
tl_assert(0 == a);
break;
default:
VG_(skin_panic)("MAC_(pp_AddrInfo)");
@ -463,7 +463,7 @@ void MAC_(record_param_error) ( ThreadId tid, Addr a, Bool isReg,
{
MAC_Error err_extra;
sk_assert(VG_INVALID_THREADID != tid);
tl_assert(VG_INVALID_THREADID != tid);
MAC_(clear_MAC_Error)( &err_extra );
err_extra.addrinfo.akind = ( isReg ? Register : Undescribed );
err_extra.isUnaddr = isUnaddr;
@ -474,7 +474,7 @@ void MAC_(record_jump_error) ( ThreadId tid, Addr a )
{
MAC_Error err_extra;
sk_assert(VG_INVALID_THREADID != tid);
tl_assert(VG_INVALID_THREADID != tid);
MAC_(clear_MAC_Error)( &err_extra );
err_extra.axskind = ExecAxs;
err_extra.size = 1; // size only used for suppressions
@ -486,7 +486,7 @@ void MAC_(record_free_error) ( ThreadId tid, Addr a )
{
MAC_Error err_extra;
sk_assert(VG_INVALID_THREADID != tid);
tl_assert(VG_INVALID_THREADID != tid);
MAC_(clear_MAC_Error)( &err_extra );
err_extra.addrinfo.akind = Undescribed;
VG_(maybe_record_error)( tid, FreeErr, a, /*s*/NULL, &err_extra );
@ -496,7 +496,7 @@ void MAC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
{
MAC_Error err_extra;
sk_assert(VG_INVALID_THREADID != tid);
tl_assert(VG_INVALID_THREADID != tid);
MAC_(clear_MAC_Error)( &err_extra );
err_extra.addrinfo.akind = Undescribed;
VG_(maybe_record_error)( tid, IllegalMempoolErr, a, /*s*/NULL, &err_extra );
@ -506,7 +506,7 @@ void MAC_(record_freemismatch_error) ( ThreadId tid, Addr a )
{
MAC_Error err_extra;
sk_assert(VG_INVALID_THREADID != tid);
tl_assert(VG_INVALID_THREADID != tid);
MAC_(clear_MAC_Error)( &err_extra );
err_extra.addrinfo.akind = Undescribed;
VG_(maybe_record_error)( tid, FreeMismatchErr, a, /*s*/NULL, &err_extra );
@ -859,7 +859,7 @@ Bool MAC_(handle_common_client_requests)(ThreadId tid, UWord* arg, UWord* ret )
// Not using 'tid' here because MAC_(new_block)() and MAC_(handle_free)()
// grab it themselves. But what they grab should match 'tid', check
// this.
sk_assert(tid == VG_(get_current_or_recent_tid)());
tl_assert(tid == VG_(get_current_or_recent_tid)());
switch (arg[0]) {
case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */

View File

@ -178,7 +178,7 @@ typedef
extern UInt MAC_(event_ctr)[N_PROF_EVENTS];
# define PROF_EVENT(ev) \
do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
do { tl_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
MAC_(event_ctr)[ev]++; \
} while (False);

View File

@ -65,7 +65,7 @@ void SK_(pp_SkinError) ( Error* err )
Bool isReg = ( Register == err_extra->addrinfo.akind );
Char* s1 = ( isReg ? "contains" : "points to" );
Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
if (isReg) sk_assert(!err_extra->isUnaddr);
if (isReg) tl_assert(!err_extra->isUnaddr);
VG_(message)(Vg_UserMsg, "Syscall param %s %s %s byte(s)",
VG_(get_error_string)(err), s1, s2);
@ -114,7 +114,7 @@ void MC_(record_user_error) ( ThreadId tid, Addr a, Bool isWrite,
{
MAC_Error err_extra;
sk_assert(VG_INVALID_THREADID != tid);
tl_assert(VG_INVALID_THREADID != tid);
MAC_(clear_MAC_Error)( &err_extra );
err_extra.addrinfo.akind = Undescribed;
err_extra.isUnaddr = isUnaddr;

View File

@ -55,7 +55,7 @@ static void emit_testv_lit_reg ( Int sz, UInt lit, Int reg )
if (sz == 2) {
VG_(emitB) ( 0x66 );
} else {
sk_assert(sz == 4);
tl_assert(sz == 4);
}
VG_(emitB) ( 0xF7 ); /* Grp3 Ev */
VG_(emit_amode_ereg_greg) ( reg, 0 /* Grp3 subopcode for TEST */ );
@ -71,7 +71,7 @@ static void emit_testv_lit_offregmem ( Int sz, UInt lit, Int off, Int reg )
if (sz == 2) {
VG_(emitB) ( 0x66 );
} else {
sk_assert(sz == 4);
tl_assert(sz == 4);
}
VG_(emitB) ( 0xF7 ); /* Grp3 Ev */
VG_(emit_amode_offregmem_reg) ( off, reg, 0 /* Grp3 subopcode for TEST */ );
@ -138,7 +138,7 @@ static void synth_STOREV ( Int sz, Int tv_tag, Int tv_val, Int a_reg,
UInt argv[] = { a_reg, tv_val };
Tag tagv[] = { RealReg, tv_tag };
sk_assert(tv_tag == RealReg || tv_tag == Literal);
tl_assert(tv_tag == RealReg || tv_tag == Literal);
switch (sz) {
case 4: helper = (Addr) MC_(helperc_STOREV4); break;
case 2: helper = (Addr) MC_(helperc_STOREV2); break;
@ -173,11 +173,11 @@ static void synth_TESTV ( Int sz, Int tag, Int val )
the codegen scheme used below. Since there are a shortage of
compact helper slots, and since the size==1 case is never
actually used, we assert against it. */
sk_assert(sz == 0 || sz == 2 || sz == 4);
tl_assert(sz == 0 || sz == 2 || sz == 4);
VG_(init_target)(&tgt);
sk_assert(tag == ArchReg || tag == RealReg);
tl_assert(tag == ArchReg || tag == RealReg);
if (tag == ArchReg) {
switch (sz) {
case 4:
@ -282,17 +282,17 @@ static void synth_PUTV ( Int sz, Int srcTag, UInt lit_or_reg, Int arch )
UInt lit = lit_or_reg;
switch (sz) {
case 4:
sk_assert(lit == 0x00000000);
tl_assert(lit == 0x00000000);
VG_(emit_movv_lit_offregmem) ( 4, 0x00000000,
VG_(shadow_reg_offset)(arch), R_EBP );
break;
case 2:
sk_assert(lit == 0xFFFF0000);
tl_assert(lit == 0xFFFF0000);
VG_(emit_movv_lit_offregmem) ( 2, 0x0000,
VG_(shadow_reg_offset)(arch), R_EBP );
break;
case 1:
sk_assert(lit == 0xFFFFFF00);
tl_assert(lit == 0xFFFFFF00);
if (arch < 4) {
VG_(emit_movb_lit_offregmem) ( 0x00,
VG_(shadow_reg_offset)(arch), R_EBP );
@ -309,7 +309,7 @@ static void synth_PUTV ( Int sz, Int srcTag, UInt lit_or_reg, Int arch )
} else {
UInt reg;
sk_assert(srcTag == RealReg);
tl_assert(srcTag == RealReg);
if (sz == 1 && lit_or_reg >= 4) {
VG_(emit_swapl_reg_EAX) ( lit_or_reg );
@ -318,7 +318,7 @@ static void synth_PUTV ( Int sz, Int srcTag, UInt lit_or_reg, Int arch )
reg = lit_or_reg;
}
if (sz == 1) sk_assert(reg < 4);
if (sz == 1) tl_assert(reg < 4);
switch (sz) {
case 4:
@ -588,13 +588,13 @@ void SK_(emit_XUInstr) ( UInstr* u, RRegSet regs_live_before )
switch (u->opcode) {
case SETV:
sk_assert(u->tag1 == RealReg);
tl_assert(u->tag1 == RealReg);
synth_SETV ( u->size, u->val1 );
break;
case STOREV:
sk_assert(u->tag1 == RealReg || u->tag1 == Literal);
sk_assert(u->tag2 == RealReg);
tl_assert(u->tag1 == RealReg || u->tag1 == Literal);
tl_assert(u->tag2 == RealReg);
synth_STOREV ( u->size, u->tag1,
u->tag1==Literal ? u->lit32 : u->val1,
u->val2,
@ -602,8 +602,8 @@ void SK_(emit_XUInstr) ( UInstr* u, RRegSet regs_live_before )
break;
case LOADV:
sk_assert(u->tag1 == RealReg);
sk_assert(u->tag2 == RealReg);
tl_assert(u->tag1 == RealReg);
tl_assert(u->tag2 == RealReg);
if (0)
VG_(emit_AMD_prefetch_reg) ( u->val1 );
synth_LOADV ( u->size, u->val1, u->val2,
@ -611,33 +611,33 @@ void SK_(emit_XUInstr) ( UInstr* u, RRegSet regs_live_before )
break;
case TESTV:
sk_assert(u->tag1 == RealReg || u->tag1 == ArchReg);
tl_assert(u->tag1 == RealReg || u->tag1 == ArchReg);
synth_TESTV(u->size, u->tag1, u->val1);
break;
case GETV:
sk_assert(u->tag1 == ArchReg);
sk_assert(u->tag2 == RealReg);
tl_assert(u->tag1 == ArchReg);
tl_assert(u->tag2 == RealReg);
synth_GETV(u->size, u->val1, u->val2);
break;
case GETVF:
sk_assert(u->tag1 == RealReg);
sk_assert(u->size == 0);
tl_assert(u->tag1 == RealReg);
tl_assert(u->size == 0);
synth_GETVF(u->val1);
break;
case PUTV:
sk_assert(u->tag1 == RealReg || u->tag1 == Literal);
sk_assert(u->tag2 == ArchReg);
tl_assert(u->tag1 == RealReg || u->tag1 == Literal);
tl_assert(u->tag2 == ArchReg);
synth_PUTV(u->size, u->tag1,
u->tag1==Literal ? u->lit32 : u->val1,
u->val2 );
break;
case PUTVF:
sk_assert(u->tag1 == RealReg);
sk_assert(u->size == 0);
tl_assert(u->tag1 == RealReg);
tl_assert(u->size == 0);
synth_PUTVF(u->val1);
break;

View File

@ -225,7 +225,7 @@ static __inline__ UChar get_abits4_ALIGNED ( Addr a )
UChar abits8;
PROF_EVENT(24);
# ifdef VG_DEBUG_MEMORY
sk_assert(IS_ALIGNED4_ADDR(a));
tl_assert(IS_ALIGNED4_ADDR(a));
# endif
sm = primary_map[a >> 16];
sm_off = a & 0xFFFF;
@ -241,7 +241,7 @@ static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
UInt sm_off = a & 0xFFFF;
PROF_EVENT(25);
# ifdef VG_DEBUG_MEMORY
sk_assert(IS_ALIGNED4_ADDR(a));
tl_assert(IS_ALIGNED4_ADDR(a));
# endif
return ((UInt*)(sm->vbyte))[sm_off >> 2];
}
@ -256,7 +256,7 @@ static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
sm_off = a & 0xFFFF;
PROF_EVENT(23);
# ifdef VG_DEBUG_MEMORY
sk_assert(IS_ALIGNED4_ADDR(a));
tl_assert(IS_ALIGNED4_ADDR(a));
# endif
((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
}
@ -294,15 +294,15 @@ static void set_address_range_perms ( Addr a, SizeT len,
indicate bugs in our machinery. 30,000,000 is arbitrary, but so
far all legitimate requests have fallen beneath that size. */
/* 4 Mar 02: this is just stupid; get rid of it. */
/* sk_assert(len < 30000000); */
/* tl_assert(len < 30000000); */
/* Check the permissions make sense. */
sk_assert(example_a_bit == VGM_BIT_VALID
tl_assert(example_a_bit == VGM_BIT_VALID
|| example_a_bit == VGM_BIT_INVALID);
sk_assert(example_v_bit == VGM_BIT_VALID
tl_assert(example_v_bit == VGM_BIT_VALID
|| example_v_bit == VGM_BIT_INVALID);
if (example_a_bit == VGM_BIT_INVALID)
sk_assert(example_v_bit == VGM_BIT_INVALID);
tl_assert(example_v_bit == VGM_BIT_INVALID);
/* The validity bits to write. */
vbyte = example_v_bit==VGM_BIT_VALID
@ -347,7 +347,7 @@ static void set_address_range_perms ( Addr a, SizeT len,
VGP_POPCC(VgpSetMem);
return;
}
sk_assert((a % 8) == 0 && len > 0);
tl_assert((a % 8) == 0 && len > 0);
/* Once aligned, go fast. */
while (True) {
@ -367,7 +367,7 @@ static void set_address_range_perms ( Addr a, SizeT len,
VGP_POPCC(VgpSetMem);
return;
}
sk_assert((a % 8) == 0 && len > 0 && len < 8);
tl_assert((a % 8) == 0 && len > 0 && len < 8);
/* Finish the upper fragment. */
while (True) {
@ -383,7 +383,7 @@ static void set_address_range_perms ( Addr a, SizeT len,
/* Check that zero page and highest page have not been written to
-- this could happen with buggy syscall wrappers. Today
(2001-04-26) had precisely such a problem with __NR_setitimer. */
sk_assert(SK_(cheap_sanity_check)());
tl_assert(SK_(cheap_sanity_check)());
VGP_POPCC(VgpSetMem);
}
@ -699,7 +699,7 @@ void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
VGP_PUSHCC(VgpCheckMem);
sk_assert(part == Vg_CoreSysCall);
tl_assert(part == Vg_CoreSysCall);
res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
if (MC_Ok != res) {
Bool isUnaddr = ( MC_AddrErr == res ? True : False );
@ -768,7 +768,7 @@ static void mc_pre_reg_read(CorePart part, ThreadId tid, Char* s, UInt reg,
UWord mask;
// XXX: the only one at the moment
sk_assert(Vg_CoreSysCall == part);
tl_assert(Vg_CoreSysCall == part);
switch (size) {
case 4: mask = 0xffffffff; break;
@ -1009,7 +1009,7 @@ static UInt mc_rd_V4_SLOWLY ( Addr a )
(which is the default), and the address is 4-aligned.
If not, Case 2 will have applied.
*/
sk_assert(MAC_(clo_partial_loads_ok));
tl_assert(MAC_(clo_partial_loads_ok));
{
UInt vw = VGM_WORD_INVALID;
vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
@ -1467,7 +1467,7 @@ static Int mc_get_or_set_vbits_for_client (
static
Bool mc_is_valid_64k_chunk ( UInt chunk_number )
{
sk_assert(chunk_number >= 0 && chunk_number < 65536);
tl_assert(chunk_number >= 0 && chunk_number < 65536);
if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
/* Definitely not in use. */
return False;
@ -1484,7 +1484,7 @@ Bool mc_is_valid_address ( Addr a )
{
UInt vbytes;
UChar abits;
sk_assert(IS_ALIGNED4_ADDR(a));
tl_assert(IS_ALIGNED4_ADDR(a));
abits = get_abits4_ALIGNED(a);
vbytes = get_vbytes4_ALIGNED(a);
if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
@ -1554,7 +1554,7 @@ static void uint_to_bits ( UInt x, Char* str )
str[w++] = ' ';
}
str[w++] = 0;
sk_assert(w == 36);
tl_assert(w == 36);
}
/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
@ -1723,7 +1723,7 @@ Int vg_alloc_client_block ( void )
}
/* Ok, we have to allocate a new one. */
sk_assert(vg_cgb_used == vg_cgb_size);
tl_assert(vg_cgb_used == vg_cgb_size);
sz_new = (vg_cgbs == NULL) ? 10 : (2 * vg_cgb_size);
cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
@ -1880,7 +1880,7 @@ Bool SK_(handle_client_request) ( ThreadId tid, UWord* arg, UWord* ret )
if (vg_cgbs == NULL
|| arg[2] >= vg_cgb_used || vg_cgbs[arg[2]].kind == CG_NotInUse)
return 1;
sk_assert(arg[2] >= 0 && arg[2] < vg_cgb_used);
tl_assert(arg[2] >= 0 && arg[2] < vg_cgb_used);
vg_cgbs[arg[2]].kind = CG_NotInUse;
vg_cgb_discards++;
*ret = 0;

View File

@ -509,7 +509,7 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
goto literal;
break;
case LEA1:
sk_assert(u_in->size == 4);
tl_assert(u_in->size == 4);
goto literal;
default:
break;
@ -550,14 +550,14 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
all this is that instrumentation of USESEG is a no-op! */
case PUTSEG:
sk_assert(u_in->tag1 == TempReg);
tl_assert(u_in->tag1 == TempReg);
uInstr1(cb, TESTV, 2, TempReg, SHADOW(u_in->val1));
uInstr1(cb, SETV, 2, TempReg, SHADOW(u_in->val1));
VG_(copy_UInstr)(cb, u_in);
break;
case GETSEG:
sk_assert(u_in->tag2 == TempReg);
tl_assert(u_in->tag2 == TempReg);
uInstr1(cb, SETV, 2, TempReg, SHADOW(u_in->val2));
VG_(copy_UInstr)(cb, u_in);
break;
@ -647,7 +647,7 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
Therefore: lea1#(qa) = left(qa)
*/
case LEA1:
sk_assert(u_in->size == 4 && !VG_(any_flag_use)(u_in));
tl_assert(u_in->size == 4 && !VG_(any_flag_use)(u_in));
qs = SHADOW(u_in->val1);
qd = SHADOW(u_in->val2);
uInstr2(cb, MOV, 4, TempReg, qs, TempReg, qd);
@ -665,7 +665,7 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
*/
case LEA2: {
Int shift;
sk_assert(u_in->size == 4 && !VG_(any_flag_use)(u_in));
tl_assert(u_in->size == 4 && !VG_(any_flag_use)(u_in));
switch (u_in->extra4b) {
case 1: shift = 0; break;
case 2: shift = 1; break;
@ -706,7 +706,7 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
the eflags.
*/
case RCL: case RCR:
sk_assert(u_in->flags_r != FlagsEmpty);
tl_assert(u_in->flags_r != FlagsEmpty);
/* The following assertion looks like it makes sense, but is
actually wrong. Consider this:
rcll %eax
@ -715,7 +715,7 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
write of the rcll is annulled by the prior improvement pass.
Noticed by Kevin Ryde <user42@zip.com.au>
*/
/* sk_assert(u_in->flags_w != FlagsEmpty); */
/* tl_assert(u_in->flags_w != FlagsEmpty); */
qs = getOperandShadow(cb, 1, u_in->tag1, u_in->val1);
/* We can safely modify qs; cast it to 0-size. */
create_PCast(cb, 1, 0, qs);
@ -753,8 +753,8 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
case SHR: case SAR:
case ROL: case ROR: {
Int t_amount = INVALID_TEMPREG;
sk_assert(u_in->tag1 == TempReg || u_in->tag1 == Literal);
sk_assert(u_in->tag2 == TempReg);
tl_assert(u_in->tag1 == TempReg || u_in->tag1 == Literal);
tl_assert(u_in->tag2 == TempReg);
qd = SHADOW(u_in->val2);
/* Make qs hold shift-count# and make
@ -784,7 +784,7 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
/* One simple tag operation. */
case WIDEN:
sk_assert(u_in->tag1 == TempReg);
tl_assert(u_in->tag1 == TempReg);
create_Widen(cb, u_in->signed_widen, u_in->extra4b, u_in->size,
SHADOW(u_in->val1));
VG_(copy_UInstr)(cb, u_in);
@ -792,21 +792,21 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
/* not#(x) = x (since bitwise independent) */
case NOT:
sk_assert(u_in->tag1 == TempReg);
tl_assert(u_in->tag1 == TempReg);
VG_(copy_UInstr)(cb, u_in);
break;
/* neg#(x) = left(x) (derivable from case for SUB) */
case NEG:
sk_assert(u_in->tag1 == TempReg);
tl_assert(u_in->tag1 == TempReg);
create_Left(cb, u_in->size, SHADOW(u_in->val1));
VG_(copy_UInstr)(cb, u_in);
break;
/* bswap#(x) = bswap(x) */
case BSWAP:
sk_assert(u_in->tag1 == TempReg);
sk_assert(u_in->size == 4);
tl_assert(u_in->tag1 == TempReg);
tl_assert(u_in->size == 4);
qd = SHADOW(u_in->val1);
uInstr1(cb, BSWAP, 4, TempReg, qd);
VG_(copy_UInstr)(cb, u_in);
@ -814,8 +814,8 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
/* cc2val#(qd) = pcast-0-to-size(eflags#) */
case CC2VAL:
sk_assert(u_in->tag1 == TempReg);
sk_assert(u_in->flags_r != FlagsEmpty);
tl_assert(u_in->tag1 == TempReg);
tl_assert(u_in->flags_r != FlagsEmpty);
qt = create_GETVF(cb, u_in->size);
uInstr2(cb, MOV, 4, TempReg, qt, TempReg, SHADOW(u_in->val1));
VG_(copy_UInstr)(cb, u_in);
@ -827,11 +827,11 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
validity of the flags.
*/
case CMOV:
sk_assert(u_in->size == 4);
sk_assert(u_in->tag1 == TempReg);
sk_assert(u_in->tag2 == TempReg);
sk_assert(u_in->flags_r != FlagsEmpty);
sk_assert(u_in->flags_w == FlagsEmpty);
tl_assert(u_in->size == 4);
tl_assert(u_in->tag1 == TempReg);
tl_assert(u_in->tag2 == TempReg);
tl_assert(u_in->flags_r != FlagsEmpty);
tl_assert(u_in->flags_w == FlagsEmpty);
qs = SHADOW(u_in->val1);
qd = SHADOW(u_in->val2);
qt = create_GETVF(cb, 0);
@ -864,7 +864,7 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
create_UifU(cb, u_in->size, qs, qd);
create_Left(cb, u_in->size, qd);
if (u_in->opcode == ADC || u_in->opcode == SBB) {
sk_assert(u_in->flags_r != FlagsEmpty);
tl_assert(u_in->flags_r != FlagsEmpty);
qt = create_GETVF(cb, u_in->size);
create_UifU(cb, u_in->size, qt, qd);
}
@ -898,8 +898,8 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
qd = qt `DifD` qd
*/
case AND: case OR:
sk_assert(u_in->tag1 == TempReg);
sk_assert(u_in->tag2 == TempReg);
tl_assert(u_in->tag1 == TempReg);
tl_assert(u_in->tag2 == TempReg);
qd = SHADOW(u_in->val2);
qs = SHADOW(u_in->val1);
qt = newShadow(cb);
@ -1022,10 +1022,10 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
uInstr1(cb, TESTV, 4, TempReg, SHADOW(u_in->val1));
uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val1));
} else {
sk_assert(u_in->tag1 == Literal);
tl_assert(u_in->tag1 == Literal);
}
if (u_in->cond != CondAlways) {
sk_assert(u_in->flags_r != FlagsEmpty);
tl_assert(u_in->flags_r != FlagsEmpty);
qt = create_GETVF(cb, 0);
if (/* HACK */ bogusLiterals) {
if (0)
@ -1054,10 +1054,10 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
Bool is_load;
if (u_in->opcode == MMX2_MemRd || u_in->opcode == MMX2_MemWr)
sk_assert(u_in->size == 4 || u_in->size == 8);
tl_assert(u_in->size == 4 || u_in->size == 8);
is_load = u_in->opcode==FPU_R || u_in->opcode==MMX2_MemRd;
sk_assert(u_in->tag2 == TempReg);
tl_assert(u_in->tag2 == TempReg);
uInstr1(cb, TESTV, 4, TempReg, SHADOW(u_in->val2));
uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val2));
@ -1076,9 +1076,9 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
case MMX2a1_MemRd: {
Int t_size = INVALID_TEMPREG;
sk_assert(u_in->size == 8);
tl_assert(u_in->size == 8);
sk_assert(u_in->tag3 == TempReg);
tl_assert(u_in->tag3 == TempReg);
uInstr1(cb, TESTV, 4, TempReg, SHADOW(u_in->val3));
uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val3));
@ -1101,12 +1101,12 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
case SSE3e_RegWr:
case SSE3g1_RegWr:
case SSE3e1_RegRd:
sk_assert(u_in->tag3 == TempReg);
tl_assert(u_in->tag3 == TempReg);
if (u_in->opcode == SSE2e1_RegRd || u_in->opcode == SSE3e1_RegRd) {
sk_assert(u_in->size == 2);
tl_assert(u_in->size == 2);
} else {
sk_assert(u_in->size == 4);
tl_assert(u_in->size == 4);
}
/* Is it a read ? Better check the V bits right now. */
@ -1134,7 +1134,7 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
Bool is_load;
Int t_size;
sk_assert(u_in->size == 4 || u_in->size == 8
tl_assert(u_in->size == 4 || u_in->size == 8
|| u_in->size == 16 || u_in->size == 512);
t_size = INVALID_TEMPREG;
@ -1143,7 +1143,7 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
|| u_in->opcode==SSE2a1_MemRd
|| u_in->opcode==SSE3a1_MemRd;
sk_assert(u_in->tag3 == TempReg);
tl_assert(u_in->tag3 == TempReg);
uInstr1(cb, TESTV, 4, TempReg, SHADOW(u_in->val3));
uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val3));
@ -1163,8 +1163,8 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
{
Int t_size;
sk_assert(u_in->size == 4 || u_in->size == 8);
sk_assert(u_in->tag1 == TempReg);
tl_assert(u_in->size == 4 || u_in->size == 8);
tl_assert(u_in->tag1 == TempReg);
uInstr1(cb, TESTV, 4, TempReg, SHADOW(u_in->val1));
uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val1));
t_size = newTemp(cb);
@ -1203,8 +1203,8 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
MMX state, we'd better check that the (int) reg being
read here is defined. */
case MMX2_ERegRd:
sk_assert(u_in->tag2 == TempReg);
sk_assert(u_in->size == 4);
tl_assert(u_in->tag2 == TempReg);
tl_assert(u_in->size == 4);
uInstr1(cb, TESTV, 4, TempReg, SHADOW(u_in->val2));
uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val2));
VG_(copy_UInstr)(cb, u_in);
@ -1213,8 +1213,8 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in )
/* The MMX register is assumed to be fully defined, so
that's what this register becomes. */
case MMX2_ERegWr:
sk_assert(u_in->tag2 == TempReg);
sk_assert(u_in->size == 4);
tl_assert(u_in->tag2 == TempReg);
tl_assert(u_in->size == 4);
uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val2));
VG_(copy_UInstr)(cb, u_in);
break;
@ -1293,7 +1293,7 @@ static void vg_delete_redundant_SETVs ( UCodeBlock* cb )
if (u->opcode == GETV && VGC_IS_SHADOW(u->val2)
&& next_is_write[u->val2]) {
sk_assert(u->val2 < n_temps);
tl_assert(u->val2 < n_temps);
VG_(new_NOP)(u);
if (dis)
VG_(printf)(" at %2d: delete GETV\n", i);
@ -1301,7 +1301,7 @@ static void vg_delete_redundant_SETVs ( UCodeBlock* cb )
if (u->opcode == TAG1 && VGC_IS_SHADOW(u->val1)
&& next_is_write[u->val1]) {
sk_assert(u->val1 < n_temps);
tl_assert(u->val1 < n_temps);
VG_(new_NOP)(u);
if (dis)
VG_(printf)(" at %2d: delete TAG1\n", i);
@ -1309,7 +1309,7 @@ static void vg_delete_redundant_SETVs ( UCodeBlock* cb )
if (u->opcode == TAG2 && VGC_IS_SHADOW(u->val2)
&& next_is_write[u->val2]) {
sk_assert(u->val2 < n_temps);
tl_assert(u->val2 < n_temps);
VG_(new_NOP)(u);
if (dis)
VG_(printf)(" at %2d: delete TAG2\n", i);
@ -1329,7 +1329,7 @@ static void vg_delete_redundant_SETVs ( UCodeBlock* cb )
if (u->opcode == SETV) {
if (u->tag1 == TempReg) {
sk_assert(VGC_IS_SHADOW(u->val1));
tl_assert(VGC_IS_SHADOW(u->val1));
if (next_is_write[u->val1]) {
/* This write is pointless, so annul it. */
VG_(new_NOP)(u);
@ -1346,7 +1346,7 @@ static void vg_delete_redundant_SETVs ( UCodeBlock* cb )
} else {
/* Find out what this insn does to the temps. */
k = VG_(get_reg_usage)(u, TempReg, &tempUse[0], &isWrites[0]);
sk_assert(0 <= k && k <= VG_MAX_REGS_USED);
tl_assert(0 <= k && k <= VG_MAX_REGS_USED);
for (j = k-1; j >= 0; j--) {
next_is_write[ tempUse[j] ] = isWrites[j];
}
@ -1401,15 +1401,15 @@ static void vg_propagate_definedness ( UCodeBlock* cb )
/* Make a tag defined. */
case SETV:
sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
tl_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
def[u->val1] = u->size;
break;
/* Check definedness of a tag. */
case TESTV:
sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
tl_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
if (def[u->val1] <= 4) {
sk_assert(def[u->val1] == u->size);
tl_assert(def[u->val1] == u->size);
NOP_no_msg(u);
if (dis)
VG_(printf)(" at %2d: delete TESTV on defd arg\n", i);
@ -1420,19 +1420,19 @@ static void vg_propagate_definedness ( UCodeBlock* cb )
property through copies. Note that this isn't optional;
we *have* to do this to keep def[] correct. */
case MOV:
sk_assert(u->tag2 == TempReg);
tl_assert(u->tag2 == TempReg);
if (u->tag1 == TempReg) {
if (VGC_IS_SHADOW(u->val1)) {
sk_assert(VGC_IS_SHADOW(u->val2));
tl_assert(VGC_IS_SHADOW(u->val2));
def[u->val2] = def[u->val1];
}
}
break;
case PUTV:
sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
tl_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
if (def[u->val1] <= 4) {
sk_assert(def[u->val1] == u->size);
tl_assert(def[u->val1] == u->size);
u->tag1 = Literal;
u->val1 = 0;
switch (u->size) {
@ -1448,9 +1448,9 @@ static void vg_propagate_definedness ( UCodeBlock* cb )
break;
case STOREV:
sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
tl_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
if (def[u->val1] <= 4) {
sk_assert(def[u->val1] == u->size);
tl_assert(def[u->val1] == u->size);
u->tag1 = Literal;
u->val1 = 0;
switch (u->size) {
@ -1471,17 +1471,17 @@ static void vg_propagate_definedness ( UCodeBlock* cb )
/* Tag handling operations. */
case TAG2:
sk_assert(u->tag2 == TempReg && VGC_IS_SHADOW(u->val2));
sk_assert(u->tag3 == Lit16);
tl_assert(u->tag2 == TempReg && VGC_IS_SHADOW(u->val2));
tl_assert(u->tag3 == Lit16);
/* Ultra-paranoid "type" checking. */
switch (u->val3) {
case Tag_ImproveAND4_TQ: case Tag_ImproveAND2_TQ:
case Tag_ImproveAND1_TQ: case Tag_ImproveOR4_TQ:
case Tag_ImproveOR2_TQ: case Tag_ImproveOR1_TQ:
sk_assert(u->tag1 == TempReg && !VGC_IS_SHADOW(u->val1));
tl_assert(u->tag1 == TempReg && !VGC_IS_SHADOW(u->val1));
break;
default:
sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
tl_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
break;
}
switch (u->val3) {
@ -1495,12 +1495,12 @@ static void vg_propagate_definedness ( UCodeBlock* cb )
case Tag_UifU0:
sz = 0; goto do_UifU;
do_UifU:
sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
sk_assert(u->tag2 == TempReg && VGC_IS_SHADOW(u->val2));
tl_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
tl_assert(u->tag2 == TempReg && VGC_IS_SHADOW(u->val2));
if (def[u->val1] <= 4) {
/* UifU. The first arg is defined, so result is
simply second arg. Delete this operation. */
sk_assert(def[u->val1] == sz);
tl_assert(def[u->val1] == sz);
NOP_no_msg(u);
if (dis)
VG_(printf)(
@ -1511,7 +1511,7 @@ static void vg_propagate_definedness ( UCodeBlock* cb )
if (def[u->val2] <= 4) {
/* UifU. The second arg is defined, so result is
simply first arg. Copy to second. */
sk_assert(def[u->val2] == sz);
tl_assert(def[u->val2] == sz);
u->opcode = MOV;
u->size = 4;
u->tag3 = NoValue;
@ -1531,7 +1531,7 @@ static void vg_propagate_definedness ( UCodeBlock* cb )
/* Implements Q = T AND Q. So if Q is entirely defined,
ie all 0s, we get MOV T, Q. */
if (def[u->val2] <= 4) {
sk_assert(def[u->val2] == sz);
tl_assert(def[u->val2] == sz);
u->size = 4; /* Regardless of sz */
u->opcode = MOV;
u->tag3 = NoValue;
@ -1549,7 +1549,7 @@ static void vg_propagate_definedness ( UCodeBlock* cb )
break;
case TAG1:
sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
tl_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1));
if (def[u->val1] > 4) break;
/* We now know that the arg to the op is entirely defined.
If the op changes the size of the arg, we must replace
@ -1558,36 +1558,36 @@ static void vg_propagate_definedness ( UCodeBlock* cb )
switch (u->val3) {
/* Maintain the same size ... */
case Tag_Left4:
sk_assert(def[u->val1] == 4);
tl_assert(def[u->val1] == 4);
NOP_tag1_op(u);
break;
case Tag_PCast11:
sk_assert(def[u->val1] == 1);
tl_assert(def[u->val1] == 1);
NOP_tag1_op(u);
break;
/* Change size ... */
case Tag_PCast40:
sk_assert(def[u->val1] == 4);
tl_assert(def[u->val1] == 4);
SETV_tag1_op(u,0);
def[u->val1] = 0;
break;
case Tag_PCast14:
sk_assert(def[u->val1] == 1);
tl_assert(def[u->val1] == 1);
SETV_tag1_op(u,4);
def[u->val1] = 4;
break;
case Tag_PCast12:
sk_assert(def[u->val1] == 1);
tl_assert(def[u->val1] == 1);
SETV_tag1_op(u,2);
def[u->val1] = 2;
break;
case Tag_PCast10:
sk_assert(def[u->val1] == 1);
tl_assert(def[u->val1] == 1);
SETV_tag1_op(u,0);
def[u->val1] = 0;
break;
case Tag_PCast02:
sk_assert(def[u->val1] == 0);
tl_assert(def[u->val1] == 0);
SETV_tag1_op(u,2);
def[u->val1] = 2;
break;
@ -1605,10 +1605,10 @@ static void vg_propagate_definedness ( UCodeBlock* cb )
/* We don't know how to handle this uinstr. Be safe, and
set to VGC_VALUE or VGC_UNDEF all temps written by it. */
k = VG_(get_reg_usage)(u, TempReg, &tempUse[0], &isWrites[0]);
sk_assert(0 <= k && k <= VG_MAX_REGS_USED);
tl_assert(0 <= k && k <= VG_MAX_REGS_USED);
for (j = 0; j < k; j++) {
t = tempUse[j];
sk_assert(t >= 0 && t < n_temps);
tl_assert(t >= 0 && t < n_temps);
if (!isWrites[j]) {
/* t is read; ignore it. */
if (0&& VGC_IS_SHADOW(t) && def[t] <= 4)