Something I realised recently: in C, iterators are much better than

higher-order functions for traversing data structures.  The higher-order
approach is too clumsy due to the lack of polymorphism and closures;  you
have to use void* too much and it is more verbose than it should be.

Hence, I replaced all the uses of HT_first_match() and
HT_apply_to_all_nodes() with equivalent uses of the hashtable iterator.
Also replaced higher-order traversal functions for Memcheck's freed-list
and the thread stacks with iterators.  That last change changes the
core/tool interface, so I've increased the version number.







git-svn-id: svn://svn.valgrind.org/valgrind/trunk@4415
This commit is contained in:
Nicholas Nethercote 2005-08-15 01:52:02 +00:00
parent 20502a617e
commit fcdc60441e
8 changed files with 102 additions and 141 deletions

View File

@ -178,24 +178,27 @@ void VG_(apply_to_GP_regs)(void (*f)(UWord))
}
}
// Try and identify a thread whose stack satisfies the predicate p, or
// return VG_INVALID_THREADID if none do.
ThreadId VG_(first_matching_thread_stack)
( Bool (*p) ( Addr stack_min, Addr stack_max, void* d ),
void* d )
static ThreadId thread_stack_iter = VG_INVALID_THREADID;
void VG_(thread_stack_reset_iter)(void)
{
ThreadId tid;
for (tid = 1; tid < VG_N_THREADS; tid++) {
if (VG_(threads)[tid].status == VgTs_Empty) continue;
if ( p ( VG_(get_SP)(tid),
VG_(threads)[tid].client_stack_highest_word, d ) )
return tid;
}
return VG_INVALID_THREADID;
thread_stack_iter = 1;
}
Bool VG_(thread_stack_next)(ThreadId* tid, Addr* stack_min, Addr* stack_max)
{
ThreadId i;
for (i = thread_stack_iter; i < VG_N_THREADS; i++) {
if (VG_(threads)[i].status != VgTs_Empty) {
*tid = i;
*stack_min = VG_(get_SP)(i);
*stack_max = VG_(threads)[i].client_stack_highest_word;
thread_stack_iter = i + 1;
return True;
}
}
return False;
}
//////////////////////////////////////////////////////////////////
// Architecture specifics

View File

@ -70,11 +70,11 @@ extern void VG_(set_shadow_regs_area) ( ThreadId tid, OffT guest_state_offset,
// doing leak checking.
extern void VG_(apply_to_GP_regs)(void (*f)(UWord val));
// Searches through all thread stacks to see if any match. Returns
// VG_INVALID_THREADID if none match.
extern ThreadId VG_(first_matching_thread_stack)
( Bool (*p) ( Addr stack_min, Addr stack_max, void* d ),
void* d );
// This iterator lets you inspect each live thread's stack bounds. The
// params are all 'out' params. Returns False at the end.
extern void VG_(thread_stack_reset_iter) ( void );
extern Bool VG_(thread_stack_next) ( ThreadId* tid, Addr* stack_min,
Addr* stack_max );
#endif // __PUB_TOOL_MACHINE_H

View File

@ -40,7 +40,7 @@
/* The version number indicates binary-incompatible changes to the
interface; if the core and tool versions don't match, Valgrind
will abort. */
#define VG_CORE_INTERFACE_VERSION 8
#define VG_CORE_INTERFACE_VERSION 9
typedef struct _ToolInfo {
Int sizeof_ToolInfo;

View File

@ -835,13 +835,6 @@ static void* ms_realloc ( ThreadId tid, void* p_old, SizeT new_size )
static Census censi[MAX_N_CENSI];
static UInt curr_census = 0;
// Must return False so that all stacks are traversed
static Bool count_stack_size( Addr stack_min, Addr stack_max, void *cp )
{
*(UInt *)cp += (stack_max - stack_min);
return False;
}
static UInt get_xtree_size(XPt* xpt, UInt ix)
{
UInt i;
@ -1065,9 +1058,13 @@ static void hp_census(void)
// Stack(s) ---------------------------------------------------------
if (clo_stacks) {
ThreadId tid;
Addr stack_min, stack_max;
census->stacks_space = sigstacks_space;
// slightly abusing this function
VG_(first_matching_thread_stack)( count_stack_size, &census->stacks_space );
VG_(thread_stack_reset_iter)();
while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
census->stacks_space += (stack_max - stack_min);
}
}
// Finish, update interval if necessary -----------------------------

View File

@ -124,19 +124,9 @@ static void add_to_freed_queue ( MAC_Chunk* mc )
}
}
/* Return the first shadow chunk satisfying the predicate p. */
MAC_Chunk* MAC_(first_matching_freed_MAC_Chunk) ( Bool (*p)(MAC_Chunk*, void*),
void* d )
MAC_Chunk* MAC_(get_freed_list_head)(void)
{
MAC_Chunk* mc;
/* No point looking through freed blocks if we're not keeping
them around for a while... */
for (mc = freed_list_start; mc != NULL; mc = mc->next)
if (p(mc, d))
return mc;
return NULL;
return freed_list_start;
}
/* Allocate its shadow chunk, put it on the appropriate list. */
@ -447,20 +437,9 @@ void MAC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
}
static void destroy_mempool_nuke_chunk(VgHashNode *node, void *d)
{
MAC_Chunk *mc = (MAC_Chunk *)node;
MAC_Mempool *mp = (MAC_Mempool *)d;
/* Note: ban redzones again -- just in case user de-banned them
with a client request... */
MAC_(ban_mem_heap)(mc->data-mp->rzB, mp->rzB );
MAC_(die_mem_heap)(mc->data, mc->size );
MAC_(ban_mem_heap)(mc->data+mc->size, mp->rzB );
}
void MAC_(destroy_mempool)(Addr pool)
{
MAC_Chunk* mc;
MAC_Mempool* mp;
mp = VG_(HT_remove) ( MAC_(mempool_list), (UWord)pool );
@ -471,7 +450,16 @@ void MAC_(destroy_mempool)(Addr pool)
return;
}
VG_(HT_apply_to_all_nodes)(mp->chunks, destroy_mempool_nuke_chunk, mp);
// Clean up the chunks, one by one
VG_(HT_ResetIter)(mp->chunks);
while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
/* Note: ban redzones again -- just in case user de-banned them
with a client request... */
MAC_(ban_mem_heap)(mc->data-mp->rzB, mp->rzB );
MAC_(die_mem_heap)(mc->data, mc->size );
MAC_(ban_mem_heap)(mc->data+mc->size, mp->rzB );
}
// Destroy the chunk table
VG_(HT_destruct)(mp->chunks);
VG_(free)(mp);
@ -517,28 +505,11 @@ void MAC_(mempool_free)(Addr pool, Addr addr)
/*--- Statistics printing ---*/
/*------------------------------------------------------------*/
typedef
struct {
UInt nblocks;
SizeT nbytes;
}
MallocStats;
static void malloc_stats_count_chunk(VgHashNode* node, void* d)
{
MAC_Chunk* mc = (MAC_Chunk*)node;
MallocStats *ms = (MallocStats *)d;
ms->nblocks ++;
ms->nbytes += mc->size;
}
void MAC_(print_malloc_stats) ( void )
{
MallocStats ms;
ms.nblocks = 0;
ms.nbytes = 0;
MAC_Chunk* mc;
UInt nblocks = 0;
SizeT nbytes = 0;
if (VG_(clo_verbosity) == 0)
return;
@ -546,11 +517,15 @@ void MAC_(print_malloc_stats) ( void )
return;
/* Count memory still in use. */
VG_(HT_apply_to_all_nodes)(MAC_(malloc_list), malloc_stats_count_chunk, &ms);
VG_(HT_ResetIter)(MAC_(malloc_list));
while ( (mc = VG_(HT_Next)(MAC_(malloc_list))) ) {
nblocks++;
nbytes += mc->size;
}
VG_(message)(Vg_UserMsg,
"malloc/free: in use at exit: %d bytes in %d blocks.",
ms.nbytes, ms.nblocks);
nbytes, nblocks);
VG_(message)(Vg_UserMsg,
"malloc/free: %d allocs, %d frees, %u bytes allocated.",
cmalloc_n_mallocs,

View File

@ -410,35 +410,20 @@ void MAC_(pp_shared_Error) ( Error* err )
MemCheck for user blocks, which Addrcheck doesn't support. */
Bool (*MAC_(describe_addr_supp)) ( Addr a, AddrInfo* ai ) = NULL;
/* Callback for searching thread stacks */
static Bool addr_is_in_bounds(Addr stack_min, Addr stack_max, void *ap)
/* Function used when searching MAC_Chunk lists */
static Bool addr_is_in_MAC_Chunk(MAC_Chunk* mc, Addr a)
{
Addr a = *(Addr *)ap;
return (stack_min <= a && a <= stack_max);
}
/* Callback for searching free'd list */
static Bool addr_is_in_MAC_Chunk(MAC_Chunk* mc, void *ap)
{
Addr a = *(Addr *)ap;
return VG_(addr_is_in_block)( a, mc->data, mc->size,
MAC_MALLOC_REDZONE_SZB );
}
/* Callback for searching malloc'd lists */
static Bool addr_is_in_HashNode(VgHashNode* sh_ch, void *ap)
{
return addr_is_in_MAC_Chunk( (MAC_Chunk*)sh_ch, ap );
}
/* Describe an address as best you can, for error messages,
putting the result in ai. */
static void describe_addr ( Addr a, AddrInfo* ai )
{
MAC_Chunk* mc;
ThreadId tid;
Addr stack_min, stack_max;
/* Perhaps it's a user-def'd block ? (only check if requested, though) */
if (NULL != MAC_(describe_addr_supp)) {
@ -446,29 +431,36 @@ static void describe_addr ( Addr a, AddrInfo* ai )
return;
}
/* Perhaps it's on a thread's stack? */
tid = VG_(first_matching_thread_stack)(addr_is_in_bounds, &a);
if (tid != VG_INVALID_THREADID) {
ai->akind = Stack;
ai->stack_tid = tid;
return;
VG_(thread_stack_reset_iter)();
while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
if (stack_min <= a && a <= stack_max) {
ai->akind = Stack;
ai->stack_tid = tid;
return;
}
}
/* Search for a recently freed block which might bracket it. */
mc = MAC_(first_matching_freed_MAC_Chunk)(addr_is_in_MAC_Chunk, &a);
if (NULL != mc) {
ai->akind = Freed;
ai->blksize = mc->size;
ai->rwoffset = (Int)a - (Int)mc->data;
ai->lastchange = mc->where;
return;
mc = MAC_(get_freed_list_head)();
while (mc) {
if (addr_is_in_MAC_Chunk(mc, a)) {
ai->akind = Freed;
ai->blksize = mc->size;
ai->rwoffset = (Int)a - (Int)mc->data;
ai->lastchange = mc->where;
return;
}
mc = mc->next;
}
/* Search for a currently malloc'd block which might bracket it. */
mc = (MAC_Chunk*)VG_(HT_first_match)(MAC_(malloc_list), addr_is_in_HashNode, &a);
if (NULL != mc) {
ai->akind = Mallocd;
ai->blksize = mc->size;
ai->rwoffset = (Int)(a) - (Int)mc->data;
ai->lastchange = mc->where;
return;
VG_(HT_ResetIter)(MAC_(malloc_list));
while ( (mc = VG_(HT_Next)(MAC_(malloc_list))) ) {
if (addr_is_in_MAC_Chunk(mc, a)) {
ai->akind = Mallocd;
ai->blksize = mc->size;
ai->rwoffset = (Int)(a) - (Int)mc->data;
ai->lastchange = mc->where;
return;
}
}
/* Clueless ... */
ai->akind = Unknown;

View File

@ -402,7 +402,7 @@ extern void MAC_(record_illegal_mempool_error) ( ThreadId tid, Addr pool );
extern void MAC_(pp_shared_Error) ( Error* err);
extern MAC_Chunk* MAC_(first_matching_freed_MAC_Chunk)( Bool (*p)(MAC_Chunk*, void*), void* d );
extern MAC_Chunk* MAC_(get_freed_list_head)( void );
extern void MAC_(common_pre_clo_init) ( void );
extern void MAC_(common_fini) ( void (*leak_check)(ThreadId tid,

View File

@ -2308,14 +2308,6 @@ static void show_client_block_stats ( void )
);
}
static Bool find_addr(VgHashNode* sh_ch, void* ap)
{
MAC_Chunk *m = (MAC_Chunk*)sh_ch;
Addr a = *(Addr*)ap;
return VG_(addr_is_in_block)(a, m->data, m->size, MAC_MALLOC_REDZONE_SZB);
}
static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
{
UInt i;
@ -2330,30 +2322,32 @@ static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
/* OK - maybe it's a mempool, too? */
MAC_Mempool* mp = VG_(HT_lookup)(MAC_(mempool_list),
(UWord)cgbs[i].start);
if(mp != NULL) {
if(mp->chunks != NULL) {
MAC_Chunk *mc;
mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a);
if(mc != NULL) {
ai->akind = UserG;
ai->blksize = mc->size;
ai->rwoffset = (Int)(a) - (Int)mc->data;
ai->lastchange = mc->where;
return True;
if (mp != NULL) {
if (mp->chunks != NULL) {
MAC_Chunk* mc;
VG_(OSet_ResetIter)(mp->chunks);
while ( (mc = VG_(OSet_Next)(mp->chunks)) ) {
if (VG_(addr_is_in_block)(a, mc->data, mc->size,
MAC_MALLOC_REDZONE_SZB)) {
ai->akind = UserG;
ai->blksize = mc->size;
ai->rwoffset = (Int)(a) - (Int)mc->data;
ai->lastchange = mc->where;
return True;
}
}
}
ai->akind = Mempool;
ai->blksize = cgbs[i].size;
ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
ai->akind = Mempool;
ai->blksize = cgbs[i].size;
ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
ai->lastchange = cgbs[i].where;
return True;
}
ai->akind = UserG;
ai->blksize = cgbs[i].size;
ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
ai->akind = UserG;
ai->blksize = cgbs[i].size;
ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
ai->lastchange = cgbs[i].where;
ai->desc = cgbs[i].desc;
ai->desc = cgbs[i].desc;
return True;
}
}