Added meta mempool support into memcheck for describing a custom allocator which:

- Auto-frees all chunks assuming that destroying a pool destroys all
  objects in the pool
- Uses itself to allocate other memory blocks
Unit tests included.
Fixes BZ#367995
Patch by: Ruurd Beerstra <ruurd.beerstra@infor.com>



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@15984
This commit is contained in:
Ivo Raisr 2016-09-24 21:15:44 +00:00
parent cd2843e06d
commit f2b34df721
23 changed files with 597 additions and 26 deletions

6
NEWS
View File

@ -10,6 +10,11 @@ Release 3.12.0 is under development, not yet released.
* Memcheck: * Memcheck:
- Added meta mempool support for describing a custom allocator which:
- Auto-frees all chunks assuming that destroying a pool destroys all
objects in the pool
- Uses itself to allocate other memory blocks
* Helgrind: * Helgrind:
* Callgrind: * Callgrind:
@ -165,6 +170,7 @@ where XXXXXX is the bug number as listed below.
366138 Fix configure errors out when using Xcode 8 (clang 8.0.0) 366138 Fix configure errors out when using Xcode 8 (clang 8.0.0)
366344 Multiple unhandled instruction for Aarch64 366344 Multiple unhandled instruction for Aarch64
(0x0EE0E020, 0x1AC15800, 0x4E284801, 0x5E040023, 0x5E056060) (0x0EE0E020, 0x1AC15800, 0x4E284801, 0x5E040023, 0x5E056060)
367995 Integration of memcheck with custom memory allocator
368412 False positive result for altivec capability check 368412 False positive result for altivec capability check
368461 mmapunmap test fails on ppc64 368461 mmapunmap test fails on ppc64
368416 Add tc06_two_races_xml.exp output for ppc64 368416 Add tc06_two_races_xml.exp output for ppc64

View File

@ -7009,6 +7009,22 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \
pool, rzB, is_zeroed, 0, 0) pool, rzB, is_zeroed, 0, 0)
/* Create a memory pool with special flags. When the VALGRIND_MEMPOOL_AUTO_FREE
is passed, a MEMPOOL_DELETE will auto-free all chunks (so not reported as
leaks) for allocators that assume that destroying a pool destroys all
objects in the pool. When VALGRIND_MEMPOOL_METAPOOL is passed, the custom
allocator uses the pool blocks as superblocks to dole out MALLOC_LIKE blocks.
The resulting behaviour would normally be classified as overlapping blocks,
and cause assert-errors in valgrind.
These 2 MEMPOOL flags can be OR-ed together into the "flags" argument.
When flags is zero, the behaviour is identical to VALGRIND_CREATE_MEMPOOL.
*/
#define VALGRIND_MEMPOOL_AUTO_FREE 1
#define VALGRIND_MEMPOOL_METAPOOL 2
#define VALGRIND_CREATE_META_MEMPOOL(pool, rzB, is_zeroed, flags) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \
pool, rzB, is_zeroed, flags, 0)
/* Destroy a memory pool. */ /* Destroy a memory pool. */
#define VALGRIND_DESTROY_MEMPOOL(pool) \ #define VALGRIND_DESTROY_MEMPOOL(pool) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \

View File

@ -2319,6 +2319,40 @@ inform Memcheck about changes to the state of a mempool:</para>
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<varname>VALGRIND_CREATE_META_MEMPOOL(pool, rzB, is_zeroed, flags)</varname>:
This does the same as <varname>VALGRIND_CREATE_MEMPOOL</varname>,
but allows you to specify two seldom-used options for custom
allocators (or-ed together) in the <varname>flags<varname> argument:</para>
<itemizedlist>
<listitem>
<para>
<varname>VALGRIND_MEMPOOL_AUTO_FREE</varname>.
This indicates that items allocated from this
memory pool are automatically freed when
<varname>VALGRIND_MEMPOOL_FREE</varname>
is used on a block. This allows a custom allocator to delete
(part of) a memory pool without explicitly deleting all allocated
items. Without this option, such an action will report all items
in the pool as memory leaks.
</para>
</listitem>
<listitem>
<para>
<varname>VALGRIND_MEMPOOL_METAPOOL</varname>.
This indicates that memory that has been
marked as being allocated with
<varname>VALGRIND_MALLOCLIKE_BLOCK</varname> is used
by a custom allocator to pass out memory to an application (again
marked with <varname>VALGRIND_MALLOCLIKE_BLOCK</varname>).
Without this option, such overlapping memory blocks may trigger
a fatal error message in memcheck.
</para>
</listitem>
<itemizedlist>
</listitem>
<listitem> <listitem>
<para><varname>VALGRIND_DESTROY_MEMPOOL(pool)</varname>: <para><varname>VALGRIND_DESTROY_MEMPOOL(pool)</varname>:
This request tells Memcheck that a pool is being torn down. Memcheck This request tells Memcheck that a pool is being torn down. Memcheck

View File

@ -925,6 +925,30 @@ void MC_(record_user_error) ( ThreadId tid, Addr a,
VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra ); VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
} }
Bool MC_(is_mempool_block)(MC_Chunk* mc_search)
{
MC_Mempool* mp;
if (!MC_(mempool_list))
return False;
// A chunk can only come from a mempool if a custom allocator
// is used. No search required for other kinds.
if (mc_search->allockind == MC_AllocCustom) {
VG_(HT_ResetIter)( MC_(mempool_list) );
while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
MC_Chunk* mc;
VG_(HT_ResetIter)(mp->chunks);
while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
if (mc == mc_search)
return True;
}
}
}
return False;
}
/*------------------------------------------------------------*/ /*------------------------------------------------------------*/
/*--- Other error operations ---*/ /*--- Other error operations ---*/
/*------------------------------------------------------------*/ /*------------------------------------------------------------*/
@ -1016,7 +1040,8 @@ Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
// Forward declarations // Forward declarations
static Bool client_block_maybe_describe( Addr a, AddrInfo* ai ); static Bool client_block_maybe_describe( Addr a, AddrInfo* ai );
static Bool mempool_block_maybe_describe( Addr a, AddrInfo* ai ); static Bool mempool_block_maybe_describe( Addr a, Bool is_metapool,
AddrInfo* ai );
/* Describe an address as best you can, for error messages, /* Describe an address as best you can, for error messages,
@ -1031,10 +1056,12 @@ static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
if (client_block_maybe_describe( a, ai )) { if (client_block_maybe_describe( a, ai )) {
return; return;
} }
/* -- Perhaps it's in mempool block? -- */
if (mempool_block_maybe_describe( a, ai )) { /* -- Perhaps it's in mempool block (non-meta)? -- */
if (mempool_block_maybe_describe( a, /*is_metapool*/ False, ai)) {
return; return;
} }
/* Blocks allocated by memcheck malloc functions are either /* Blocks allocated by memcheck malloc functions are either
on the recently freed list or on the malloc-ed list. on the recently freed list or on the malloc-ed list.
Custom blocks can be on both : a recently freed block might Custom blocks can be on both : a recently freed block might
@ -1046,7 +1073,8 @@ static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
/* -- Search for a currently malloc'd block which might bracket it. -- */ /* -- Search for a currently malloc'd block which might bracket it. -- */
VG_(HT_ResetIter)(MC_(malloc_list)); VG_(HT_ResetIter)(MC_(malloc_list));
while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) { while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
if (addr_is_in_MC_Chunk_default_REDZONE_SZB(mc, a)) { if (!MC_(is_mempool_block)(mc) &&
addr_is_in_MC_Chunk_default_REDZONE_SZB(mc, a)) {
ai->tag = Addr_Block; ai->tag = Addr_Block;
ai->Addr.Block.block_kind = Block_Mallocd; ai->Addr.Block.block_kind = Block_Mallocd;
if (MC_(get_freed_block_bracketting)( a )) if (MC_(get_freed_block_bracketting)( a ))
@ -1063,7 +1091,7 @@ static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
} }
/* -- Search for a recently freed block which might bracket it. -- */ /* -- Search for a recently freed block which might bracket it. -- */
mc = MC_(get_freed_block_bracketting)( a ); mc = MC_(get_freed_block_bracketting)( a );
if (mc) { if (mc && !MC_(is_mempool_block)(mc)) {
ai->tag = Addr_Block; ai->tag = Addr_Block;
ai->Addr.Block.block_kind = Block_Freed; ai->Addr.Block.block_kind = Block_Freed;
ai->Addr.Block.block_desc = "block"; ai->Addr.Block.block_desc = "block";
@ -1075,6 +1103,16 @@ static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
return; return;
} }
/* -- Perhaps it's in a meta mempool block? -- */
/* This test is done last, because metapool blocks overlap with blocks
handed out to the application. That makes every heap address part of
a metapool block, so the interesting cases are handled first.
This final search is a last-ditch attempt. When found, it is probably
an error in the custom allocator itself. */
if (mempool_block_maybe_describe( a, /*is_metapool*/ True, ai )) {
return;
}
/* No block found. Search a non-heap block description. */ /* No block found. Search a non-heap block description. */
VG_(describe_addr) (a, ai); VG_(describe_addr) (a, ai);
} }
@ -1215,7 +1253,7 @@ static Bool client_block_maybe_describe( Addr a,
} }
static Bool mempool_block_maybe_describe( Addr a, static Bool mempool_block_maybe_describe( Addr a, Bool is_metapool,
/*OUT*/AddrInfo* ai ) /*OUT*/AddrInfo* ai )
{ {
MC_Mempool* mp; MC_Mempool* mp;
@ -1223,7 +1261,7 @@ static Bool mempool_block_maybe_describe( Addr a,
VG_(HT_ResetIter)( MC_(mempool_list) ); VG_(HT_ResetIter)( MC_(mempool_list) );
while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) { while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
if (mp->chunks != NULL) { if (mp->chunks != NULL && mp->metapool == is_metapool) {
MC_Chunk* mc; MC_Chunk* mc;
VG_(HT_ResetIter)(mp->chunks); VG_(HT_ResetIter)(mp->chunks);
while ( (mc = VG_(HT_Next)(mp->chunks)) ) { while ( (mc = VG_(HT_Next)(mp->chunks)) ) {

View File

@ -93,6 +93,9 @@ typedef
Addr pool; // pool identifier Addr pool; // pool identifier
SizeT rzB; // pool red-zone size SizeT rzB; // pool red-zone size
Bool is_zeroed; // allocations from this pool are zeroed Bool is_zeroed; // allocations from this pool are zeroed
Bool auto_free; // De-alloc block frees all chunks in block
Bool metapool; // These chunks are VALGRIND_MALLOC_LIKE
// memory, and used as pool.
VgHashTable *chunks; // chunks associated with this pool VgHashTable *chunks; // chunks associated with this pool
} }
MC_Mempool; MC_Mempool;
@ -105,7 +108,8 @@ void* MC_(new_block) ( ThreadId tid,
void MC_(handle_free) ( ThreadId tid, void MC_(handle_free) ( ThreadId tid,
Addr p, UInt rzB, MC_AllocKind kind ); Addr p, UInt rzB, MC_AllocKind kind );
void MC_(create_mempool) ( Addr pool, UInt rzB, Bool is_zeroed ); void MC_(create_mempool) ( Addr pool, UInt rzB, Bool is_zeroed,
Bool auto_free, Bool metapool );
void MC_(destroy_mempool) ( Addr pool ); void MC_(destroy_mempool) ( Addr pool );
void MC_(mempool_alloc) ( ThreadId tid, Addr pool, void MC_(mempool_alloc) ( ThreadId tid, Addr pool,
Addr addr, SizeT size ); Addr addr, SizeT size );
@ -114,6 +118,7 @@ void MC_(mempool_trim) ( Addr pool, Addr addr, SizeT size );
void MC_(move_mempool) ( Addr poolA, Addr poolB ); void MC_(move_mempool) ( Addr poolA, Addr poolB );
void MC_(mempool_change) ( Addr pool, Addr addrA, Addr addrB, SizeT size ); void MC_(mempool_change) ( Addr pool, Addr addrA, Addr addrB, SizeT size );
Bool MC_(mempool_exists) ( Addr pool ); Bool MC_(mempool_exists) ( Addr pool );
Bool MC_(is_mempool_block)( MC_Chunk* mc_search );
/* Searches for a recently freed block which might bracket Addr a. /* Searches for a recently freed block which might bracket Addr a.
Return the MC_Chunk* for this block or NULL if no bracketting block Return the MC_Chunk* for this block or NULL if no bracketting block

View File

@ -1760,6 +1760,25 @@ static void scan_memory_root_set(Addr searched, SizeT szB)
VG_(free)(seg_starts); VG_(free)(seg_starts);
} }
static MC_Mempool *find_mp_of_chunk (MC_Chunk* mc_search)
{
MC_Mempool* mp;
tl_assert( MC_(mempool_list) );
VG_(HT_ResetIter)( MC_(mempool_list) );
while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
MC_Chunk* mc;
VG_(HT_ResetIter)(mp->chunks);
while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
if (mc == mc_search)
return mp;
}
}
return NULL;
}
/*------------------------------------------------------------*/ /*------------------------------------------------------------*/
/*--- Top-level entry point. ---*/ /*--- Top-level entry point. ---*/
/*------------------------------------------------------------*/ /*------------------------------------------------------------*/
@ -1816,7 +1835,7 @@ void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams* lcp)
tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data); tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
} }
// Sanity check -- make sure they don't overlap. The one exception is that // Sanity check -- make sure they don't overlap. One exception is that
// we allow a MALLOCLIKE block to sit entirely within a malloc() block. // we allow a MALLOCLIKE block to sit entirely within a malloc() block.
// This is for bug 100628. If this occurs, we ignore the malloc() block // This is for bug 100628. If this occurs, we ignore the malloc() block
// for leak-checking purposes. This is a hack and probably should be done // for leak-checking purposes. This is a hack and probably should be done
@ -1825,6 +1844,9 @@ void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams* lcp)
// for mempool chunks, but if custom-allocated blocks are put in a separate // for mempool chunks, but if custom-allocated blocks are put in a separate
// table from normal heap blocks it makes free-mismatch checking more // table from normal heap blocks it makes free-mismatch checking more
// difficult. // difficult.
// Another exception: Metapool memory blocks overlap by definition. The meta-
// block is allocated (by a custom allocator), and chunks of that block are
// allocated again for use by the application: Not an error.
// //
// If this check fails, it probably means that the application // If this check fails, it probably means that the application
// has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
@ -1867,17 +1889,50 @@ void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams* lcp)
lc_n_chunks--; lc_n_chunks--;
} else { } else {
// Overlap is allowed ONLY when one of the two candicates is a block
// from a memory pool that has the metapool attribute set.
// All other mixtures trigger the error + assert.
MC_Mempool* mp;
Bool ch1_is_meta = False, ch2_is_meta = False;
Bool Inappropriate = False;
if (MC_(is_mempool_block)(ch1)) {
mp = find_mp_of_chunk(ch1);
if (mp && mp->metapool) {
ch1_is_meta = True;
}
}
if (MC_(is_mempool_block)(ch2)) {
mp = find_mp_of_chunk(ch2);
if (mp && mp->metapool) {
ch2_is_meta = True;
}
}
// If one of the blocks is a meta block, the other must be entirely
// within that meta block, or something is really wrong with the custom
// allocator.
if (ch1_is_meta != ch2_is_meta) {
if ( (ch1_is_meta && (start2 < start1 || end2 > end1)) ||
(ch2_is_meta && (start1 < start2 || end1 > end2)) ) {
Inappropriate = True;
}
}
if (ch1_is_meta == ch2_is_meta || Inappropriate) {
VG_(umsg)("Block 0x%lx..0x%lx overlaps with block 0x%lx..0x%lx\n", VG_(umsg)("Block 0x%lx..0x%lx overlaps with block 0x%lx..0x%lx\n",
start1, end1, start2, end2); start1, end1, start2, end2);
VG_(umsg)("Blocks allocation contexts:\n"), VG_(umsg)("Blocks allocation contexts:\n"),
VG_(pp_ExeContext)( MC_(allocated_at)(ch1)); VG_(pp_ExeContext)( MC_(allocated_at)(ch1));
VG_(umsg)("\n"), VG_(umsg)("\n"),
VG_(pp_ExeContext)( MC_(allocated_at)(ch2)); VG_(pp_ExeContext)( MC_(allocated_at)(ch2));
VG_(umsg)("This is usually caused by using VALGRIND_MALLOCLIKE_BLOCK"); VG_(umsg)("This is usually caused by using ");
VG_(umsg)("in an inappropriate way.\n"); VG_(umsg)("VALGRIND_MALLOCLIKE_BLOCK in an inappropriate way.\n");
tl_assert (0); tl_assert (0);
} }
} }
}
// Initialise lc_extras. // Initialise lc_extras.
if (lc_extras) { if (lc_extras) {

View File

@ -7032,8 +7032,13 @@ static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
Addr pool = (Addr)arg[1]; Addr pool = (Addr)arg[1];
UInt rzB = arg[2]; UInt rzB = arg[2];
Bool is_zeroed = (Bool)arg[3]; Bool is_zeroed = (Bool)arg[3];
UInt flags = arg[4];
MC_(create_mempool) ( pool, rzB, is_zeroed ); // The create_mempool function does not know these mempool flags,
// pass as booleans.
MC_(create_mempool) ( pool, rzB, is_zeroed,
(flags & VALGRIND_MEMPOOL_AUTO_FREE),
(flags & VALGRIND_MEMPOOL_METAPOOL) );
return True; return True;
} }

View File

@ -338,7 +338,8 @@ UInt MC_(n_where_pointers) (void)
/* Allocate memory and note change in memory available */ /* Allocate memory and note change in memory available */
void* MC_(new_block) ( ThreadId tid, void* MC_(new_block) ( ThreadId tid,
Addr p, SizeT szB, SizeT alignB, Addr p, SizeT szB, SizeT alignB,
Bool is_zeroed, MC_AllocKind kind, VgHashTable *table) Bool is_zeroed, MC_AllocKind kind,
VgHashTable *table)
{ {
MC_Chunk* mc; MC_Chunk* mc;
@ -674,14 +675,52 @@ void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
static void check_mempool_sane(MC_Mempool* mp); /*forward*/ static void check_mempool_sane(MC_Mempool* mp); /*forward*/
static void free_mallocs_in_mempool_block (MC_Mempool* mp,
Addr StartAddr,
Addr EndAddr)
{
MC_Chunk *mc;
ThreadId tid;
Bool found;
void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed) tl_assert(mp->auto_free);
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg,
"free_mallocs_in_mempool_block: Start 0x%lx size %lu\n",
StartAddr, (SizeT) (EndAddr - StartAddr));
}
tid = VG_(get_running_tid)();
do {
found = False;
VG_(HT_ResetIter)(MC_(malloc_list));
while (!found && (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
if (mc->data >= StartAddr && mc->data + mc->szB < EndAddr) {
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, "Auto-free of 0x%lx size=%lu\n",
mc->data, (mc->szB + 0UL));
}
mc = VG_(HT_remove) ( MC_(malloc_list), (UWord) mc->data);
die_and_free_mem(tid, mc, mp->rzB);
found = True;
}
}
} while (found);
}
void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed,
Bool auto_free, Bool metapool)
{ {
MC_Mempool* mp; MC_Mempool* mp;
if (VG_(clo_verbosity) > 2) { if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %u, %d)\n", VG_(message)(Vg_UserMsg,
pool, rzB, is_zeroed); "create_mempool(0x%lx, rzB=%u, zeroed=%d, autofree=%d, metapool=%d)\n",
pool, rzB, is_zeroed, auto_free, metapool);
VG_(get_and_pp_StackTrace) VG_(get_and_pp_StackTrace)
(VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH); (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
} }
@ -695,6 +734,8 @@ void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
mp->pool = pool; mp->pool = pool;
mp->rzB = rzB; mp->rzB = rzB;
mp->is_zeroed = is_zeroed; mp->is_zeroed = is_zeroed;
mp->auto_free = auto_free;
mp->metapool = metapool;
mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" ); mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
check_mempool_sane(mp); check_mempool_sane(mp);
@ -882,6 +923,10 @@ void MC_(mempool_free)(Addr pool, Addr addr)
return; return;
} }
if (mp->auto_free) {
free_mallocs_in_mempool_block(mp, mc->data, mc->data + (mc->szB + 0UL));
}
if (VG_(clo_verbosity) > 2) { if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, VG_(message)(Vg_UserMsg,
"mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n", "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",

View File

@ -61,7 +61,8 @@ dist_noinst_SCRIPTS = \
filter_stderr filter_xml \ filter_stderr filter_xml \
filter_strchr \ filter_strchr \
filter_varinfo3 \ filter_varinfo3 \
filter_memcheck filter_memcheck \
filter_overlaperror
noinst_HEADERS = leak.h noinst_HEADERS = leak.h
@ -155,6 +156,12 @@ EXTRA_DIST = \
leak-pool-3.vgtest leak-pool-3.stderr.exp \ leak-pool-3.vgtest leak-pool-3.stderr.exp \
leak-pool-4.vgtest leak-pool-4.stderr.exp \ leak-pool-4.vgtest leak-pool-4.stderr.exp \
leak-pool-5.vgtest leak-pool-5.stderr.exp \ leak-pool-5.vgtest leak-pool-5.stderr.exp \
leak-autofreepool-0.vgtest leak-autofreepool-0.stderr.exp \
leak-autofreepool-1.vgtest leak-autofreepool-1.stderr.exp \
leak-autofreepool-2.vgtest leak-autofreepool-2.stderr.exp \
leak-autofreepool-3.vgtest leak-autofreepool-3.stderr.exp \
leak-autofreepool-4.vgtest leak-autofreepool-4.stderr.exp \
leak-autofreepool-5.vgtest leak-autofreepool-5.stderr.exp \
leak-tree.vgtest leak-tree.stderr.exp \ leak-tree.vgtest leak-tree.stderr.exp \
leak-segv-jmp.vgtest leak-segv-jmp.stderr.exp \ leak-segv-jmp.vgtest leak-segv-jmp.stderr.exp \
lks.vgtest lks.stdout.exp lks.supp lks.stderr.exp \ lks.vgtest lks.stdout.exp lks.supp lks.stderr.exp \
@ -347,6 +354,7 @@ check_PROGRAMS = \
leak-cycle \ leak-cycle \
leak-delta \ leak-delta \
leak-pool \ leak-pool \
leak-autofreepool \
leak-tree \ leak-tree \
leak-segv-jmp \ leak-segv-jmp \
long-supps \ long-supps \

View File

@ -0,0 +1,4 @@
#! /bin/sh
./filter_allocs "$@" |
sed 's/\(Memcheck: mc_leakcheck.c:\)[0-9]*\(.*impossible.*happened.*\)/\1...\2/'

View File

@ -0,0 +1,17 @@
HEAP SUMMARY:
in use at exit: ... bytes in ... blocks
total heap usage: ... allocs, ... frees, ... bytes allocated
320 bytes in 20 blocks are definitely lost in loss record ... of ...
LEAK SUMMARY:
definitely lost: 320 bytes in 20 blocks
indirectly lost: 0 bytes in 0 blocks
possibly lost: 0 bytes in 0 blocks
still reachable: 0 bytes in 0 blocks
suppressed: 0 bytes in 0 blocks
For counts of detected and suppressed errors, rerun with: -v
ERROR SUMMARY: 1 errors from 1 contexts (suppressed: 0 from 0)

View File

@ -0,0 +1,4 @@
prog: leak-autofreepool
vgopts: --leak-check=full --show-possibly-lost=no --track-origins=yes
args: 0
stderr_filter: filter_allocs

View File

@ -0,0 +1,17 @@
HEAP SUMMARY:
in use at exit: ... bytes in ... blocks
total heap usage: ... allocs, ... frees, ... bytes allocated
320 bytes in 20 blocks are definitely lost in loss record ... of ...
LEAK SUMMARY:
definitely lost: 320 bytes in 20 blocks
indirectly lost: 0 bytes in 0 blocks
possibly lost: 0 bytes in 0 blocks
still reachable: 0 bytes in 0 blocks
suppressed: 0 bytes in 0 blocks
For counts of detected and suppressed errors, rerun with: -v
ERROR SUMMARY: 1 errors from 1 contexts (suppressed: 0 from 0)

View File

@ -0,0 +1,4 @@
prog: leak-autofreepool
vgopts: --leak-check=full --show-possibly-lost=no --track-origins=yes
args: 1
stderr_filter: filter_allocs

View File

@ -0,0 +1,10 @@
HEAP SUMMARY:
in use at exit: ... bytes in ... blocks
total heap usage: ... allocs, ... frees, ... bytes allocated
All heap blocks were freed -- no leaks are possible
For counts of detected and suppressed errors, rerun with: -v
ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 0 from 0)

View File

@ -0,0 +1,4 @@
prog: leak-autofreepool
vgopts: --leak-check=full --show-possibly-lost=no --track-origins=yes
args: 2
stderr_filter: filter_allocs

View File

@ -0,0 +1,10 @@
HEAP SUMMARY:
in use at exit: ... bytes in ... blocks
total heap usage: ... allocs, ... frees, ... bytes allocated
All heap blocks were freed -- no leaks are possible
For counts of detected and suppressed errors, rerun with: -v
ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 0 from 0)

View File

@ -0,0 +1,4 @@
prog: leak-autofreepool
vgopts: --leak-check=full --show-possibly-lost=no --track-origins=yes
args: 3
stderr_filter: filter_allocs

View File

@ -0,0 +1,17 @@
HEAP SUMMARY:
in use at exit: ... bytes in ... blocks
total heap usage: ... allocs, ... frees, ... bytes allocated
320 bytes in 20 blocks are definitely lost in loss record ... of ...
LEAK SUMMARY:
definitely lost: 320 bytes in 20 blocks
indirectly lost: 0 bytes in 0 blocks
possibly lost: 4,096 bytes in 1 blocks
still reachable: 0 bytes in 0 blocks
suppressed: 0 bytes in 0 blocks
For counts of detected and suppressed errors, rerun with: -v
ERROR SUMMARY: 2 errors from 2 contexts (suppressed: 0 from 0)

View File

@ -0,0 +1,4 @@
prog: leak-autofreepool
vgopts: --leak-check=full --show-possibly-lost=no --track-origins=yes
args: 4
stderr_filter: filter_allocs

View File

@ -0,0 +1,34 @@
HEAP SUMMARY:
in use at exit: ... bytes in ... blocks
total heap usage: ... allocs, ... frees, ... bytes allocated
Block 0x..........0x........ overlaps with block 0x..........0x........
Blocks allocation contexts:
...
...
This is usually caused by using VALGRIND_MALLOCLIKE_BLOCK in an inappropriate way.
Memcheck: mc_leakcheck.c:... (vgMemCheck_detect_memory_leaks): the 'impossible' happened.
host stacktrace:
...
sched status:
running_tid=1
Note: see also the FAQ in the source distribution.
It contains workarounds to several common problems.
In particular, if Valgrind aborted or crashed after
identifying problems in your program, there's a good chance
that fixing those problems will prevent Valgrind aborting or
crashing, especially if it happened in m_mallocfree.c.
If that doesn't help, please report this bug to: www.valgrind.org
In the bug report, send all the above text, the valgrind
version, and what OS and version you are using. Thanks.

View File

@ -0,0 +1,4 @@
prog: leak-autofreepool
vgopts: --leak-check=full --show-possibly-lost=no --track-origins=yes
args: 5
stderr_filter: filter_overlaperror

View File

@ -0,0 +1,226 @@
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include <string.h>
#include <stdio.h>
#include "../memcheck.h"
// Test VALGRIND_CREATE_META_MEMPOOL features, the VALGRIND_MEMPOOL_METAPOOL and
// VALGRIND_MEMPOOL_AUTO_FREE flags.
// Also show that without these, having a custom allocator that:
// - Allocates a MEMPOOL
// - Uses ITSELF to get large blocks to populate the pool (so these are marked
// as MALLOCLIKE blocks)
// - Then passes out MALLOCLIKE blocks out of these pool blocks
// Was not previously supported by the 'loose model' for mempools in memcheck
// because it spotted these (correctly) as overlapping blocks (test case 3
// below).
// The VALGRIND_MEMPOOL_METAPOOL says not to treat these as overlaps.
//
// Also, when one of these metapool blocks is freed, memcheck will not auto-free
// the MALLOCLIKE blocks allocated from the meta-pool, and report them as leaks.
// When VALGRIND_MEMPOOL_AUTO_FREE is passed, no such leaks are reported.
// This is for custom allocators that destroy a pool without freeing the objects
// allocated from it, because that is the defined behaviour of the allocator.
struct pool
{
size_t allocated;
size_t used;
uint8_t *buf;
};
struct cell
{
struct cell *next;
int x;
};
static struct pool _PlainPool, *PlainPool = &_PlainPool;
static struct pool _MetaPool, *MetaPool = &_MetaPool;
#define N 10
#define POOL_BLOCK_SIZE 4096
// For easy testing, the plain mempool uses N allocations, the
// metapool 2 * N (so 10 reported leaks are from the plain pool, 20 must be
// from the metapool.
static int MetaPoolFlags = 0;
static int CleanupBeforeExit = 0;
static struct cell *cells_plain[2 * N];
static struct cell *cells_meta[2 * N];
static char PlainBlock[POOL_BLOCK_SIZE];
static char MetaBlock[POOL_BLOCK_SIZE];
void create_meta_pool (void)
{
VALGRIND_CREATE_META_MEMPOOL(MetaPool, 0, 0, MetaPoolFlags);
VALGRIND_MEMPOOL_ALLOC(MetaPool, MetaBlock, POOL_BLOCK_SIZE);
MetaPool->buf = (uint8_t *) MetaBlock;
MetaPool->allocated = POOL_BLOCK_SIZE;
MetaPool->used = 0;
/* A pool-block is expected to have metadata, and the core of
valgrind sees a MALLOCLIKE_BLOCK that starts at the same address
as a MEMPOOLBLOCK as a MEMPOOLBLOCK, hence never as a leak.
Introduce such some simulated metadata.
*/
MetaPool->buf += sizeof(uint8_t);
MetaPool->used += sizeof(uint8_t);
}
static void create_plain_pool (void)
{
VALGRIND_CREATE_MEMPOOL(PlainPool, 0, 0);
PlainPool->buf = (uint8_t *) PlainBlock;
PlainPool->allocated = POOL_BLOCK_SIZE;
PlainPool->used = 0;
/* Same overhead */
PlainPool->buf += sizeof(uint8_t);
PlainPool->used += sizeof(uint8_t);
}
static void *allocate_meta_style (struct pool *p, size_t n)
{
void *a = p->buf + p->used;
assert(p->used + n < p->allocated);
// Simulate a custom allocator that allocates memory either directly for
// the application or for a custom memory pool: All are marked as MALLOCLIKE.
VALGRIND_MALLOCLIKE_BLOCK(a, n, 0, 0);
p->used += n;
return a;
}
static void *allocate_plain_style (struct pool *p, size_t n)
{
void *a = p->buf + p->used;
assert(p->used + n < p->allocated);
// And this is custom allocator that knows what it is allocating from a pool.
VALGRIND_MEMPOOL_ALLOC(p, a, n);
p->used += n;
return a;
}
/* flags */
static void set_flags ( int n )
{
switch (n) {
// Case 0: No special flags. VALGRIND_CREATE_META_MEMPOOL is same as
// VALGRIND_CREATE_MEMPOOL.
// When mempools are destroyed, the METAPOOL leaks because auto-free is
// missing. Must show 2*N (20) leaks.
// The VALGRIND_MEMPOOL_ALLOC items from the plain pool are automatically
// destroyed. CleanupBeforeExit means the metapool is freed and destroyed
// (simulating an app that cleans up before it exits), and when false it
// simply exits with the pool unaltered.
case 0:
MetaPoolFlags = 0;
CleanupBeforeExit = 1;
break;
// Case 1: VALGRIND_MEMPOOL_METAPOOL, no auto-free.
// Without explicit free, these MALLOCLIKE_BLOCK blocks are considered
// leaks. So this case should show same as case 0: 20 leaks.
case 1:
MetaPoolFlags = VALGRIND_MEMPOOL_METAPOOL;
CleanupBeforeExit = 1;
break;
// Same as before, but now the MALLOCLIKE blocks are auto-freed.
// Must show 0 leaks.
case 2:
MetaPoolFlags = VALGRIND_MEMPOOL_AUTO_FREE | VALGRIND_MEMPOOL_METAPOOL;
CleanupBeforeExit = 1;
break;
case 3:
// Just auto-free, with cleanup. The cleanup removes the overlapping
// blocks, so this is the same as case 2: No leaks, no problems.
MetaPoolFlags = VALGRIND_MEMPOOL_AUTO_FREE;
CleanupBeforeExit = 1;
break;
case 4:
// No auto-free, no cleanup. Leaves overlapping blocks detected
// by valgrind, but those are ignored because of the METAPOOL.
// So, no crash, no problems, but 20 leaks.
MetaPoolFlags = VALGRIND_MEMPOOL_METAPOOL;
CleanupBeforeExit = 0;
break;
case 5:
// Main reason for the VALGRIND_MEMPOOL_METAPOOL flags: When not
// specified, and the application has a memorypool that has MALLOC_LIKE
// overlapping allocations, that leaves block(s) that overlap.
// Causes a fatal error.
// The METAPOOL allows the overlap. Test must show that without that
// flag, a fatal error occurs.
MetaPoolFlags = 0;
CleanupBeforeExit = 0;
break;
default:
assert(0);
}
}
int main( int argc, char** argv )
{
int arg;
size_t i;
assert(argc == 2);
assert(argv[1]);
assert(strlen(argv[1]) == 1);
assert(argv[1][0] >= '0' && argv[1][0] <= '9');
arg = atoi( argv[1] );
set_flags( arg );
create_plain_pool();
create_meta_pool();
// N plain allocs
for (i = 0; i < N; ++i) {
cells_plain[i] = allocate_plain_style(PlainPool,sizeof(struct cell));
}
// 2*N meta allocs
for (i = 0; i < 2 * N; ++i) {
cells_meta[i] = allocate_meta_style(MetaPool,sizeof(struct cell));
}
// Leak the memory from the pools by losing the pointers.
for (i = 0; i < N; ++i) {
cells_plain[i] = NULL;
}
for (i = 0; i < 2 * N; ++i) {
cells_meta[i] = NULL;
}
// This must free MALLOCLIKE allocations from the pool when
// VALGRIND_MEMPOOL_AUTO_FREE
// is set for the pool and report leaks when not.
if (CleanupBeforeExit) {
VALGRIND_MEMPOOL_FREE(MetaPool, MetaBlock);
VALGRIND_DESTROY_MEMPOOL(MetaPool);
}
// Cleanup.
VALGRIND_DESTROY_MEMPOOL(PlainPool);
return 0;
}