Memory pool support.

git-svn-id: svn://svn.valgrind.org/valgrind/trunk@2428
This commit is contained in:
Robert Walsh 2004-06-19 18:12:36 +00:00
parent 827e3bebb6
commit 79b252dfdf
11 changed files with 528 additions and 30 deletions

View File

@ -1185,7 +1185,11 @@ Bool SK_(handle_client_request) ( ThreadId tid, UInt* arg_block, UInt *ret )
/* Overload memcheck client reqs */
if (!VG_IS_SKIN_USERREQ('M','C',arg[0])
&& VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
&& VG_USERREQ__FREELIKE_BLOCK != arg[0])
&& VG_USERREQ__FREELIKE_BLOCK != arg[0]
&& VG_USERREQ__CREATE_MEMPOOL != arg[0]
&& VG_USERREQ__DESTROY_MEMPOOL != arg[0]
&& VG_USERREQ__MEMPOOL_ALLOC != arg[0]
&& VG_USERREQ__MEMPOOL_FREE != arg[0])
return False;
switch (arg[0]) {

View File

@ -984,6 +984,23 @@ tool-specific documentation for explanations of the tool-specific macros).
with <code>VALGRIND_MALLOCLIKE_BLOCK</code>. Again, see
<code>memcheck/memcheck.h</code> for information on how to use it.
<p>
<li><code>VALGRIND_CREATE_MEMPOOL</code>: This is similar to
<code>VALGRIND_MALLOCLIKE_BLOCK</code>, but is tailored towards code
that uses memory pools. See the comments in <code>valgrind.h</code>
for information on how to use it.
<p>
<li><code>VALGRIND_DESTROY_MEMPOOL</code>: This should be used in
conjunction with <code>VALGRIND_CREATE_MEMPOOL</code> Again, see the
comments in <code>valgrind.h</code> for information on how to use it.
<p>
<li><code>VALGRIND_MEMPOOL_ALLOC</code>: This should be used in
conjunction with <code>VALGRIND_CREATE_MEMPOOL</code> Again, see the
comments in <code>valgrind.h</code> for information on how to use it.
<p>
<li><code>VALGRIND_MEMPOOL_FREE</code>: This should be used in
conjunction with <code>VALGRIND_CREATE_MEMPOOL</code> Again, see the
comments in <code>valgrind.h</code> for information on how to use it.
<p>
<li><code>VALGRIND_NON_SIMD_CALL[0123]</code>: executes a function of 0, 1, 2
or 3 args in the client program on the <i>real</i> CPU, not the virtual
CPU that Valgrind normally runs code on. These are used in various ways

View File

@ -163,6 +163,11 @@ typedef
malloc() et al, by using vg_replace_malloc.c. */
VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
VG_USERREQ__FREELIKE_BLOCK = 0x1302,
/* Memory pool support. */
VG_USERREQ__CREATE_MEMPOOL = 0x1303,
VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
VG_USERREQ__MEMPOOL_FREE = 0x1306,
/* Allow printfs to valgrind log. */
VG_USERREQ__PRINTF = 0x1401,
@ -322,4 +327,36 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
addr, rzB, 0, 0); \
}
/* Create a memory pool. */
#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
{unsigned int _qzz_res; \
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
VG_USERREQ__CREATE_MEMPOOL, \
pool, rzB, is_zeroed, 0); \
}
/* Destroy a memory pool. */
#define VALGRIND_DESTROY_MEMPOOL(pool) \
{unsigned int _qzz_res; \
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
VG_USERREQ__DESTROY_MEMPOOL, \
pool, 0, 0, 0); \
}
/* Associate a piece of memory with a memory pool. */
#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
{unsigned int _qzz_res; \
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
VG_USERREQ__MEMPOOL_ALLOC, \
pool, addr, size, 0); \
}
/* Disassociate a piece of memory from a memory pool. */
#define VALGRIND_MEMPOOL_FREE(pool, addr) \
{unsigned int _qzz_res; \
VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
VG_USERREQ__MEMPOOL_FREE, \
pool, addr, 0, 0); \
}
#endif /* __VALGRIND_H */

View File

@ -61,6 +61,10 @@ Bool (*MAC_(check_noaccess))( Addr a, UInt len, Addr* bad_addr ) = NULL;
/* Record malloc'd blocks. Nb: Addrcheck and Memcheck construct this
separately in their respective initialisation functions. */
VgHashTable MAC_(malloc_list) = NULL;
/* Memory pools. Nb: Addrcheck and Memcheck construct this separately
in their respective initialisation functions. */
VgHashTable MAC_(mempool_list) = NULL;
/* Records blocks after freeing. */
static MAC_Chunk* freed_list_start = NULL;
@ -127,7 +131,8 @@ MAC_Chunk* MAC_(first_matching_freed_MAC_Chunk) ( Bool (*p)(MAC_Chunk*) )
/* Allocate its shadow chunk, put it on the appropriate list. */
static
void add_MAC_Chunk ( Addr p, UInt size, MAC_AllocKind kind )
MAC_Chunk* add_MAC_Chunk ( Addr p, UInt size, MAC_AllocKind kind,
VgHashTable table)
{
MAC_Chunk* mc;
@ -146,7 +151,9 @@ void add_MAC_Chunk ( Addr p, UInt size, MAC_AllocKind kind )
VG_(skin_panic)("add_MAC_chunk: shadow area is accessible");
}
VG_(HT_add_node)( MAC_(malloc_list), (VgHashNode*)mc );
VG_(HT_add_node)( table, (VgHashNode*)mc );
return mc;
}
/*------------------------------------------------------------*/
@ -155,21 +162,26 @@ void add_MAC_Chunk ( Addr p, UInt size, MAC_AllocKind kind )
/* Allocate memory and note change in memory available */
__inline__
void MAC_(new_block) ( Addr p, UInt size,
UInt rzB, Bool is_zeroed, MAC_AllocKind kind )
MAC_Chunk* MAC_(new_block) ( Addr p, UInt size,
UInt rzB, Bool is_zeroed, MAC_AllocKind kind,
VgHashTable table)
{
MAC_Chunk *mc;
VGP_PUSHCC(VgpCliMalloc);
cmalloc_n_mallocs ++;
cmalloc_bs_mallocd += size;
add_MAC_Chunk( p, size, kind );
mc = add_MAC_Chunk( p, size, kind, table );
MAC_(ban_mem_heap)( p-rzB, rzB );
MAC_(new_mem_heap)( p, size, is_zeroed );
MAC_(ban_mem_heap)( p+size, rzB );
VGP_POPCC(VgpCliMalloc);
return mc;
}
void* SK_(malloc) ( Int n )
@ -180,7 +192,8 @@ void* SK_(malloc) ( Int n )
} else {
Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
/*is_zeroed*/False, MAC_AllocMalloc );
/*is_zeroed*/False, MAC_AllocMalloc,
MAC_(malloc_list));
return (void*)p;
}
}
@ -193,7 +206,8 @@ void* SK_(__builtin_new) ( Int n )
} else {
Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
/*is_zeroed*/False, MAC_AllocNew );
/*is_zeroed*/False, MAC_AllocNew,
MAC_(malloc_list));
return (void*)p;
}
}
@ -207,7 +221,8 @@ void* SK_(__builtin_vec_new) ( Int n )
} else {
Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
/*is_zeroed*/False, MAC_AllocNewVec );
/*is_zeroed*/False, MAC_AllocNewVec,
MAC_(malloc_list));
return (void*)p;
}
}
@ -220,7 +235,8 @@ void* SK_(memalign) ( Int align, Int n )
} else {
Addr p = (Addr)VG_(cli_malloc)( align, n );
MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
/*is_zeroed*/False, MAC_AllocMalloc );
/*is_zeroed*/False, MAC_AllocMalloc,
MAC_(malloc_list));
return (void*)p;
}
}
@ -238,7 +254,8 @@ void* SK_(calloc) ( Int nmemb, Int size1 )
} else {
Addr p = (Addr)VG_(cli_malloc)( VG_(clo_alignment), n );
MAC_(new_block) ( p, n, VG_(vg_malloc_redzone_szB),
/*is_zeroed*/True, MAC_AllocMalloc );
/*is_zeroed*/True, MAC_AllocMalloc,
MAC_(malloc_list));
for (i = 0; i < n; i++)
((UChar*)p)[i] = 0;
return (void*)p;
@ -261,17 +278,15 @@ void die_and_free_mem ( MAC_Chunk* mc,
describe_addr() which looks for it in malloclist. */
*prev_chunks_next_ptr = mc->next;
/* Record where freed */
mc->where = VG_(get_ExeContext) ( VG_(get_current_or_recent_tid)() );
/* Put it out of harm's way for a while, if not from a client request */
if (MAC_AllocCustom != mc->allockind)
if (MAC_AllocCustom != mc->allockind) {
/* Record where freed */
mc->where = VG_(get_ExeContext) ( VG_(get_current_or_recent_tid)() );
add_to_freed_queue ( mc );
else
} else
VG_(free) ( mc );
}
__inline__
void MAC_(handle_free) ( Addr p, UInt rzB, MAC_AllocKind kind )
{
@ -392,13 +407,119 @@ void* SK_(realloc) ( void* p, Int new_size )
/* this has to be after die_and_free_mem, otherwise the
former succeeds in shorting out the new block, not the
old, in the case when both are on the same list. */
add_MAC_Chunk ( p_new, new_size, MAC_AllocMalloc );
add_MAC_Chunk ( p_new, new_size, MAC_AllocMalloc, MAC_(malloc_list) );
VGP_POPCC(VgpCliMalloc);
return (void*)p_new;
}
}
/* Memory pool stuff. */
void MAC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
{
MAC_Mempool* mp;
mp = VG_(malloc)(sizeof(MAC_Mempool));
mp->pool = pool;
mp->rzB = rzB;
mp->is_zeroed = is_zeroed;
mp->chunks = VG_(HT_construct)();
/* Paranoia ... ensure this area is off-limits to the client, so
the mp->data field isn't visible to the leak checker. If memory
management is working correctly, anything pointer returned by
VG_(malloc) should be noaccess as far as the client is
concerned. */
if (!MAC_(check_noaccess)( (Addr)mp, sizeof(MAC_Mempool), NULL )) {
VG_(skin_panic)("MAC_(create_mempool): shadow area is accessible");
}
VG_(HT_add_node)( MAC_(mempool_list), (VgHashNode*)mp );
}
void MAC_(destroy_mempool)(Addr pool)
{
MAC_Mempool* mp;
MAC_Mempool** prev_next;
void nuke_chunk(VgHashNode *node)
{
MAC_Chunk *mc = (MAC_Chunk *)node;
/* Note: ban redzones again -- just in case user de-banned them
with a client request... */
MAC_(ban_mem_heap)(mc->data-mp->rzB, mp->rzB );
MAC_(die_mem_heap)(mc->data, mc->size );
MAC_(ban_mem_heap)(mc->data+mc->size, mp->rzB );
}
mp = (MAC_Mempool*)VG_(HT_get_node) ( MAC_(mempool_list), (UInt)pool,
(VgHashNode***)&prev_next );
if (mp == NULL) {
ThreadId tid = VG_(get_current_or_recent_tid)();
MAC_(record_illegal_mempool_error) ( tid, pool );
return;
}
*prev_next = mp->next;
VG_(HT_apply_to_all_nodes)(mp->chunks, nuke_chunk);
VG_(HT_destruct)(mp->chunks);
VG_(free)(mp);
}
void MAC_(mempool_alloc)(Addr pool, Addr addr, UInt size)
{
MAC_Mempool* mp;
MAC_Mempool** prev_next;
MAC_Chunk* mc;
mp = (MAC_Mempool*)VG_(HT_get_node) ( MAC_(mempool_list), (UInt)pool,
(VgHashNode***)&prev_next );
if (mp == NULL) {
ThreadId tid = VG_(get_current_or_recent_tid)();
MAC_(record_illegal_mempool_error) ( tid, pool );
return;
}
mc = MAC_(new_block)(addr, size, mp->rzB, mp->is_zeroed, MAC_AllocCustom,
mp->chunks);
}
void MAC_(mempool_free)(Addr pool, Addr addr)
{
MAC_Mempool* mp;
MAC_Mempool** prev_pool;
MAC_Chunk* mc;
MAC_Chunk** prev_chunk;
ThreadId tid = VG_(get_current_or_recent_tid)();
mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list), (UInt)pool,
(VgHashNode***)&prev_pool);
if (mp == NULL) {
MAC_(record_illegal_mempool_error)(tid, pool);
return;
}
mc = (MAC_Chunk*)VG_(HT_get_node)(mp->chunks, (UInt)addr,
(VgHashNode***)&prev_chunk);
if (mc == NULL) {
MAC_(record_free_error)(tid, (Addr)addr);
return;
}
die_and_free_mem(mc, prev_chunk, mp->rzB);
}
void MAC_(print_malloc_stats) ( void )
{
UInt nblocks = 0, nbytes = 0;

View File

@ -196,6 +196,9 @@ Bool SK_(eq_SkinError) ( VgRes res, Error* e1, Error* e2 )
VG_(skin_panic)("Shouldn't get LeakErr in SK_(eq_SkinError),\n"
"since it's handled with VG_(unique_error)()!");
case IllegalMempoolErr:
return True;
default:
VG_(printf)("Error:\n unknown error code %d\n",
VG_(get_error_kind)(e1));
@ -223,9 +226,15 @@ void MAC_(pp_AddrInfo) ( Addr a, AddrInfo* ai )
" Address 0x%x is not stack'd, malloc'd or (recently) free'd",a);
}
break;
case Freed: case Mallocd: case UserG: {
case Freed: case Mallocd: case UserG: case Mempool: {
UInt delta;
UChar* relative;
UChar* kind;
if (ai->akind == Mempool) {
kind = "mempool";
} else {
kind = "block";
}
if (ai->rwoffset < 0) {
delta = (UInt)(- ai->rwoffset);
relative = "before";
@ -237,8 +246,8 @@ void MAC_(pp_AddrInfo) ( Addr a, AddrInfo* ai )
relative = "inside";
}
VG_(message)(Vg_UserMsg,
" Address 0x%x is %d bytes %s a block of size %d %s",
a, delta, relative,
" Address 0x%x is %d bytes %s a %s of size %d %s",
a, delta, relative, kind,
ai->blksize,
ai->akind==Mallocd ? "alloc'd"
: ai->akind==Freed ? "free'd"
@ -314,6 +323,12 @@ void MAC_(pp_shared_SkinError) ( Error* err )
break;
}
case IllegalMempoolErr:
VG_(message)(Vg_UserMsg, "Illegal memory pool address");
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
break;
default:
VG_(printf)("Error:\n unknown Memcheck/Addrcheck error code %d\n",
VG_(get_error_kind)(err));
@ -470,6 +485,16 @@ void MAC_(record_free_error) ( ThreadId tid, Addr a )
VG_(maybe_record_error)( tid, FreeErr, a, /*s*/NULL, &err_extra );
}
void MAC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
{
MAC_Error err_extra;
sk_assert(VG_INVALID_THREADID != tid);
MAC_(clear_MAC_Error)( &err_extra );
err_extra.addrinfo.akind = Undescribed;
VG_(maybe_record_error)( tid, IllegalMempoolErr, a, /*s*/NULL, &err_extra );
}
void MAC_(record_freemismatch_error) ( ThreadId tid, Addr a )
{
MAC_Error err_extra;
@ -499,6 +524,7 @@ UInt SK_(update_extra)( Error* err )
case ParamErr:
case UserErr:
case FreeErr:
case IllegalMempoolErr:
case FreeMismatchErr: {
MAC_Error* extra = (MAC_Error*)VG_(get_error_extra)(err);
if (extra != NULL && Undescribed == extra->addrinfo.akind) {
@ -533,6 +559,7 @@ Bool MAC_(shared_recognised_suppression) ( Char* name, Supp* su )
else if (VG_STREQ(name, "Free")) skind = FreeSupp;
else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
else
return False;
@ -595,6 +622,9 @@ Bool SK_(error_matches_suppression)(Error* err, Supp* su)
case LeakSupp:
return (ekind == LeakErr);
case MempoolSupp:
return (ekind == IllegalMempoolErr);
default:
VG_(printf)("Error:\n"
" unknown suppression type %d\n",
@ -611,6 +641,7 @@ Char* SK_(get_error_name) ( Error* err )
case ParamErr: return "Param";
case UserErr: return NULL; /* Can't suppress User errors */
case FreeMismatchErr: return "Free";
case IllegalMempoolErr: return "Mempool";
case FreeErr: return "Free";
case AddrErr:
switch ( ((MAC_Error*)VG_(get_error_extra)(err))->size ) {
@ -784,6 +815,7 @@ static void done_prof_mem ( void ) { }
void MAC_(common_pre_clo_init)(void)
{
MAC_(malloc_list) = VG_(HT_construct)();
MAC_(mempool_list) = VG_(HT_construct)();
init_prof_mem();
}
@ -844,7 +876,8 @@ Bool MAC_(handle_common_client_requests)(ThreadId tid, UInt* arg, UInt* ret )
UInt rzB = arg[3];
Bool is_zeroed = (Bool)arg[4];
MAC_(new_block) ( p, sizeB, rzB, is_zeroed, MAC_AllocCustom );
MAC_(new_block) ( p, sizeB, rzB, is_zeroed, MAC_AllocCustom,
MAC_(malloc_list) );
return True;
}
case VG_USERREQ__FREELIKE_BLOCK: {
@ -859,6 +892,39 @@ Bool MAC_(handle_common_client_requests)(ThreadId tid, UInt* arg, UInt* ret )
*ret = (Addr)MAC_(record_overlap_error);
return True;
case VG_USERREQ__CREATE_MEMPOOL: {
Addr pool = (Addr)arg[1];
UInt rzB = arg[2];
Bool is_zeroed = (Bool)arg[3];
MAC_(create_mempool) ( pool, rzB, is_zeroed );
return True;
}
case VG_USERREQ__DESTROY_MEMPOOL: {
Addr pool = (Addr)arg[1];
MAC_(destroy_mempool) ( pool );
return True;
}
case VG_USERREQ__MEMPOOL_ALLOC: {
Addr pool = (Addr)arg[1];
Addr addr = (Addr)arg[2];
UInt size = arg[3];
MAC_(mempool_alloc) ( pool, addr, size );
return True;
}
case VG_USERREQ__MEMPOOL_FREE: {
Addr pool = (Addr)arg[1];
Addr addr = (Addr)arg[2];
MAC_(mempool_free) ( pool, addr );
return True;
}
default:
return False;
}

View File

@ -51,7 +51,8 @@ typedef
Stack,
Unknown, /* classification yielded nothing useful */
Freed, Mallocd,
UserG /* in a user-defined block; Addrcheck & Memcheck only */
UserG, /* in a user-defined block; Addrcheck & Memcheck only */
Mempool, /* in a mempool; Addrcheck & Memcheck only */
}
AddrKind;
@ -88,7 +89,9 @@ typedef
/* Overlapping blocks in memcpy(), strcpy(), etc */
OverlapSupp,
/* Something to be suppressed in a leak check. */
LeakSupp
LeakSupp,
/* Memory pool suppression. */
MempoolSupp,
}
MAC_SuppKind;
@ -100,7 +103,8 @@ typedef
ParamErr, UserErr, /* behaves like an anonymous ParamErr */
FreeErr, FreeMismatchErr,
OverlapErr,
LeakErr
LeakErr,
IllegalMempoolErr,
}
MAC_ErrorKind;
@ -153,6 +157,18 @@ typedef
}
MAC_Chunk;
/* Memory pool. Nb: first two fields must match core's VgHashNode. */
typedef
struct _MAC_Mempool {
struct _MAC_Mempool* next;
Addr pool; /* pool identifier */
UInt rzB; /* pool red-zone size */
Bool is_zeroed; /* allocations from this pool are zeroed */
VgHashTable chunks; /* chunks associated with this pool */
}
MAC_Mempool;
/*------------------------------------------------------------*/
/*--- Profiling of tools and memory events ---*/
/*------------------------------------------------------------*/
@ -270,6 +286,9 @@ extern void MAC_(print_common_debug_usage) ( void );
/* For tracking malloc'd blocks */
extern VgHashTable MAC_(malloc_list);
/* For tracking memory pools. */
extern VgHashTable MAC_(mempool_list);
/* Function pointers for the two tools to track interesting events. */
extern void (*MAC_(new_mem_heap)) ( Addr a, UInt len, Bool is_inited );
extern void (*MAC_(ban_mem_heap)) ( Addr a, UInt len );
@ -298,10 +317,16 @@ extern void MAC_(clear_MAC_Error) ( MAC_Error* err_extra );
extern Bool MAC_(shared_recognised_suppression) ( Char* name, Supp* su );
extern void MAC_(new_block) ( Addr p, UInt size, UInt rzB,
Bool is_zeroed, MAC_AllocKind kind );
extern MAC_Chunk* MAC_(new_block) ( Addr p, UInt size, UInt rzB,
Bool is_zeroed, MAC_AllocKind kind,
VgHashTable table);
extern void MAC_(handle_free) ( Addr p, UInt rzB, MAC_AllocKind kind );
extern void MAC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed);
extern void MAC_(destroy_mempool)(Addr pool);
extern void MAC_(mempool_alloc)(Addr pool, Addr addr, UInt size);
extern void MAC_(mempool_free)(Addr pool, Addr addr);
extern void MAC_(record_address_error) ( ThreadId tid, Addr a,
Int size, Bool isWrite );
extern void MAC_(record_core_mem_error) ( ThreadId tid, Bool isWrite,
@ -312,6 +337,7 @@ extern void MAC_(record_jump_error) ( ThreadId tid, Addr a );
extern void MAC_(record_free_error) ( ThreadId tid, Addr a );
extern void MAC_(record_freemismatch_error)( ThreadId tid, Addr a );
extern void MAC_(record_overlap_error) ( Char* function, OverlapExtra* oe );
extern void MAC_(record_illegal_mempool_error) ( ThreadId tid, Addr pool );
extern void MAC_(pp_shared_SkinError) ( Error* err);

View File

@ -134,6 +134,37 @@ Bool MC_(client_perm_maybe_describe)( Addr a, AddrInfo* ai )
if (vg_cgbs[i].kind == CG_NotInUse)
continue;
if (VG_(addr_is_in_block)(a, vg_cgbs[i].start, vg_cgbs[i].size)) {
MAC_Mempool **d, *mp;
/* OK - maybe it's a mempool, too? */
mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
(UInt)vg_cgbs[i].start,
(VgHashNode***)&d);
if(mp != NULL) {
if(mp->chunks != NULL) {
MAC_Chunk *mc;
Bool find_addr(VgHashNode* sh_ch)
{
MAC_Chunk *m = (MAC_Chunk*)sh_ch;
return VG_(addr_is_in_block)(a, m->data, m->size);
}
mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr);
if(mc != NULL) {
ai->akind = UserG;
ai->blksize = mc->size;
ai->rwoffset = (Int)(a) - (Int)mc->data;
ai->lastchange = mc->where;
return True;
}
}
ai->akind = Mempool;
ai->blksize = vg_cgbs[i].size;
ai->rwoffset = (Int)(a) - (Int)(vg_cgbs[i].start);
ai->lastchange = vg_cgbs[i].where;
return True;
}
ai->akind = UserG;
ai->blksize = vg_cgbs[i].size;
ai->rwoffset = (Int)(a) - (Int)(vg_cgbs[i].start);
@ -152,7 +183,11 @@ Bool SK_(handle_client_request) ( ThreadId tid, UInt* arg, UInt* ret )
if (!VG_IS_SKIN_USERREQ('M','C',arg[0])
&& VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
&& VG_USERREQ__FREELIKE_BLOCK != arg[0])
&& VG_USERREQ__FREELIKE_BLOCK != arg[0]
&& VG_USERREQ__CREATE_MEMPOOL != arg[0]
&& VG_USERREQ__DESTROY_MEMPOOL != arg[0]
&& VG_USERREQ__MEMPOOL_ALLOC != arg[0]
&& VG_USERREQ__MEMPOOL_FREE != arg[0])
return False;
switch (arg[0]) {

View File

@ -43,6 +43,7 @@ EXTRA_DIST = $(noinst_SCRIPTS) \
manuel3.stderr.exp manuel3.vgtest \
memalign_test.stderr.exp memalign_test.vgtest \
memcmptest.stderr.exp memcmptest.stdout.exp memcmptest.vgtest \
mempool.stderr.exp mempool.vgtest \
mismatches.stderr.exp mismatches.vgtest \
mmaptest.stderr.exp mmaptest.vgtest \
nanoleak.stderr.exp nanoleak.vgtest \
@ -78,8 +79,8 @@ check_PROGRAMS = \
doublefree error_counts errs1 exitprog execve \
fpeflags fprw fwrite inits inline \
malloc1 malloc2 malloc3 manuel1 manuel2 manuel3 \
memalign_test memcmptest mmaptest nanoleak new_nothrow null_socket \
overlap pushfpopf \
memalign_test memcmptest mempool mmaptest nanoleak new_nothrow \
null_socket overlap pushfpopf \
realloc1 realloc2 realloc3 sigaltstack signal2 supp1 supp2 suppfree \
trivialleak tronical weirdioctl \
mismatches new_override metadata threadederrno writev zeropage
@ -117,6 +118,7 @@ manuel3_SOURCES = manuel3.c
mmaptest_SOURCES = mmaptest.c
memalign_test_SOURCES = memalign_test.c
memcmptest_SOURCES = memcmptest.c
mempool_SOURCES = mempool.c
nanoleak_SOURCES = nanoleak.c
null_socket_SOURCES = null_socket.c
overlap_SOURCES = overlap.c

150
memcheck/tests/mempool.c Normal file
View File

@ -0,0 +1,150 @@
#include <unistd.h>
#include <sys/mman.h>
#include <assert.h>
#include <stdlib.h>
#include "../memcheck.h"
#define SUPERBLOCK_SIZE 100000
#define REDZONE_SIZE 8
static const int USE_MMAP = 0;
typedef struct _level_list
{
struct _level_list *next;
char *where;
} level_list;
typedef struct _pool {
char *mem;
char *where;
int size, left;
level_list *levels;
} pool;
pool *make_pool()
{
pool *p;
if(USE_MMAP) {
p = (pool *)mmap(0, sizeof(pool), PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
p->where = p->mem = (char *)mmap(NULL, SUPERBLOCK_SIZE,
PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
} else {
p = (pool *)malloc(sizeof(pool));
p->where = p->mem = (char *)malloc(SUPERBLOCK_SIZE);
}
p->size = p->left = SUPERBLOCK_SIZE;
p->levels = NULL;
VALGRIND_MAKE_NOACCESS(p->where, SUPERBLOCK_SIZE);
return p;
}
void push(pool *p)
{
level_list *l;
if(USE_MMAP)
l = (level_list *)mmap(0, sizeof(level_list),
PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
else
l = (level_list *)malloc(sizeof(level_list));
l->next = p->levels;
l->where = p->where;
VALGRIND_CREATE_MEMPOOL(l->where, REDZONE_SIZE, 0);
p->levels = l;
}
void pop(pool *p)
{
level_list *l = p->levels;
p->levels = l->next;
VALGRIND_DESTROY_MEMPOOL(l->where);
VALGRIND_MAKE_NOACCESS(l->where, p->where-l->where);
p->where = l->where;
if(USE_MMAP)
munmap(l, sizeof(level_list));
else
free(l);
}
void destroy_pool(pool *p)
{
level_list *l = p->levels;
while(l) {
pop(p);
}
if(USE_MMAP) {
munmap(p->mem, SUPERBLOCK_SIZE);
munmap(p, sizeof(pool));
} else {
free(p->mem);
free(p);
}
}
char *allocate(pool *p, int size)
{
char *where;
p->left -= size + (REDZONE_SIZE*2);
where = p->where + REDZONE_SIZE;
p->where += size + (REDZONE_SIZE*2);
VALGRIND_MEMPOOL_ALLOC(p->levels->where, where, size);
return where;
}
//-------------------------------------------------------------------------
// Rest
//-------------------------------------------------------------------------
void test(void)
{
char *x1, *x2, *x3, *x4, *x5;
pool *p = make_pool();
push(p);
x1 = allocate(p, 10);
x2 = allocate(p, 20);
push(p);
x3 = allocate(p, 10);
x4 = allocate(p, 20);
*x1 = 'a'; // valid
*x2 = 'b'; // valid
x1[-1] = 'h'; // invalid
x1[10] = 'i'; // invalid
pop(p);
*x3 = 'c'; // invalid
*x4 = 'd'; // invalid
*x1 = 'e'; // valid
*x2 = 'f'; // valid
x5 = allocate(p, 10);
*x5 = 'g'; // valid
// pop(p);
// *x5 = 'g'; // invalid
// destroy_pool(p);
}
int main(void)
{
test();
return 0;
}

View File

@ -0,0 +1,38 @@
Invalid write of size 1
at 0x........: test (mempool.c:124)
by 0x........: main (mempool.c:148)
Address 0x........ is 1 bytes before a block of size 10 client-defined
at 0x........: allocate (mempool.c:99)
by 0x........: test (mempool.c:115)
by 0x........: main (mempool.c:148)
Invalid write of size 1
at 0x........: test (mempool.c:125)
by 0x........: main (mempool.c:148)
Address 0x........ is 0 bytes after a block of size 10 client-defined
at 0x........: allocate (mempool.c:99)
by 0x........: test (mempool.c:115)
by 0x........: main (mempool.c:148)
Invalid write of size 1
at 0x........: test (mempool.c:129)
by 0x........: main (mempool.c:148)
Address 0x........ is 70 bytes inside a mempool of size 100000 client-defined
at 0x........: make_pool (mempool.c:43)
by 0x........: test (mempool.c:111)
by 0x........: main (mempool.c:148)
Invalid write of size 1
at 0x........: test (mempool.c:130)
by 0x........: main (mempool.c:148)
Address 0x........ is 96 bytes inside a mempool of size 100000 client-defined
at 0x........: make_pool (mempool.c:43)
by 0x........: test (mempool.c:111)
by 0x........: main (mempool.c:148)
20 bytes in 1 blocks are definitely lost in loss record 2 of 3
at 0x........: malloc (vg_replace_malloc.c:...)
by 0x........: make_pool (mempool.c:37)
by 0x........: test (mempool.c:111)
by 0x........: main (mempool.c:148)

View File

@ -0,0 +1,2 @@
prog: mempool
vgopts: -q --leak-check=yes