Completely overhauled the internals of Memcheck's error handling. All the

different error kinds were reusing the same struct for storing their
details.  Each one used some but not all the fields, and the AddrInfo was
similar, and it was very confusing.

So I changed MC_Error and AddrInfo to be tagged unions, like Vex's IRExpr and
IRStmt types.  The resulting code is a little more verbose but much easier
to understand.  I also split up several error kinds, which also made things
simpler.  The user-visible behaviour is identical except for a couple of
very minor things that I've documented in the NEWS file for the 3.3.0
release.

Ideally I'd get rid of the Addr and Char* fields in the core Error type,
which are not always used, and do them similarly within tools.  But that
would require changing the core/tool interface, so I'm leaving it for the
moment.




git-svn-id: svn://svn.valgrind.org/valgrind/trunk@6402
This commit is contained in:
Nicholas Nethercote 2006-12-16 00:54:12 +00:00
parent 52dfe4cb39
commit d162731a2c
15 changed files with 810 additions and 799 deletions

21
NEWS
View File

@ -1,3 +1,24 @@
Release 3.2.0 (XX XXX 2007)
~~~~~~~~~~~~~~~~~~~~~~~~~~~
3.2.0 is a feature release with ...
XXX:
* Note in NEWS that behaviour of GET_VBITS and SET_BBITS have changed --
they no longer issue addressability errors if either array is partially
unaddressable, they just return 3. Also, SET_VBITS doesn't report
definedness errors if any of the V bits are undefined.
Other user-visible changes:
- A new suppression kind has been introduced: "Jump". This is for
suppressing jump-to-invalid-address errors. Previously you had to use an
"Addr1" suppression, which didn't make much sense.
- The behaviour of Memcheck's client requests VALGRIND_GET_VBITS and
VALGRIND_SET_VBITS have changed slightly. They no longer issue
addressability errors -- if either array is partially unaddressable,
they just return 3 (as before). Also, SET_VBITS doesn't report
definedness errors if any of the V bits are undefined.
Release 3.2.1 (16 Sept 2006)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -101,9 +101,6 @@ static UWord em_supplist_cmps = 0;
/*--- Error type ---*/
/*------------------------------------------------------------*/
/* Note: it is imperative this doesn't overlap with (0..) at all, as tools
* effectively extend it by defining their own enums in the (0..) range. */
/* Errors. Extensible (via the 'extra' field). Tools can use a normal
enum (with element values in the normal range (0..)) for 'ekind'.
Functions for getting/setting the tool-relevant fields are in

View File

@ -16,7 +16,7 @@
#
# For Memcheck, the supp_kinds are:
#
# Param Value1 Value2 Value4 Value8 Value16
# Param Value1 Value2 Value4 Value8 Value16 Jump
# Free Addr1 Addr2 Addr4 Addr8 Addr16
# Cond (previously known as Value0)
#

View File

@ -16,7 +16,7 @@
#
# For Memcheck, the supp_kinds are:
#
# Param Value1 Value2 Value4 Value8 Value16
# Param Value1 Value2 Value4 Value8 Value16 Jump
# Free Addr1 Addr2 Addr4 Addr8 Addr16
# Cond (previously known as Value0)
#

View File

@ -16,7 +16,7 @@
#
# For Memcheck, the supp_kinds are:
#
# Param Value1 Value2 Value4 Value8 Value16
# Param Value1 Value2 Value4 Value8 Value16 Jump
# Free Addr1 Addr2 Addr4 Addr8 Addr16
# Cond (previously known as Value0)
#

View File

@ -16,7 +16,7 @@
#
# For Memcheck, the supp_kinds are:
#
# Param Value1 Value2 Value4 Value8 Value16
# Param Value1 Value2 Value4 Value8 Value16 Jump
# Free Addr1 Addr2 Addr4 Addr8 Addr16
# Cond (previously known as Value0)
#

View File

@ -634,6 +634,11 @@ Memcheck:suppression_type]]></programlisting>
memory access of 1, 2, 4, 8 or 16 bytes respectively.</para>
</listitem>
<listitem>
<para>Or: <varname>Jump</varname>, meaning an
jump to an unaddressable location error.</para>
</listitem>
<listitem>
<para>Or: <varname>Param</varname>, meaning an
invalid system call parameter error.</para>

View File

@ -56,7 +56,7 @@ typedef
struct _MC_Chunk {
struct _MC_Chunk* next;
Addr data; // ptr to actual block
SizeT size : (sizeof(UWord)*8)-2; // size requested; 30 or 62 bits
SizeT szB : (sizeof(UWord)*8)-2; // size requested; 30 or 62 bits
MC_AllocKind allockind : 2; // which wrapper did the allocation
ExeContext* where; // where it was allocated
}
@ -119,28 +119,6 @@ extern void MC_(__builtin_vec_delete) ( ThreadId tid, void* p );
extern void* MC_(realloc) ( ThreadId tid, void* p, SizeT new_size );
/*------------------------------------------------------------*/
/*--- Errors and suppressions ---*/
/*------------------------------------------------------------*/
/* Extra info for overlap errors */
typedef
struct {
Addr src;
Addr dst;
Int len; // -1 if unused
}
OverlapExtra;
extern void MC_(record_free_error) ( ThreadId tid, Addr a );
extern void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a );
extern void MC_(record_freemismatch_error) ( ThreadId tid, Addr a,
MC_Chunk* mc );
extern Bool MC_(record_leak_error) ( ThreadId tid,
void* leak_extra,
ExeContext* where,
Bool print_record );
/*------------------------------------------------------------*/
/*--- Profiling of memory events ---*/
/*------------------------------------------------------------*/
@ -201,6 +179,21 @@ extern HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
/*--- Leak checking ---*/
/*------------------------------------------------------------*/
/* A block is either
-- Proper-ly reached; a pointer to its start has been found
-- Interior-ly reached; only an interior pointer to it has been found
-- Unreached; so far, no pointers to any part of it have been found.
-- IndirectLeak; leaked, but referred to by another leaked block
*/
typedef
enum {
Unreached =0,
IndirectLeak =1,
Interior =2,
Proper =3
}
Reachedness;
/* For VALGRIND_COUNT_LEAKS client request */
extern SizeT MC_(bytes_leaked);
extern SizeT MC_(bytes_indirect);
@ -208,9 +201,6 @@ extern SizeT MC_(bytes_dubious);
extern SizeT MC_(bytes_reachable);
extern SizeT MC_(bytes_suppressed);
/* For leak checking */
extern void MC_(pp_LeakError)(void* extra);
typedef
enum {
LC_Off,
@ -219,12 +209,44 @@ typedef
}
LeakCheckMode;
/* A block record, used for generating err msgs. */
typedef
struct _LossRecord {
struct _LossRecord* next;
/* Where these lost blocks were allocated. */
ExeContext* allocated_at;
/* Their reachability. */
Reachedness loss_mode;
/* Number of blocks and total # bytes involved. */
SizeT total_bytes;
SizeT indirect_bytes;
UInt num_blocks;
}
LossRecord;
extern void MC_(do_detect_memory_leaks) (
ThreadId tid, LeakCheckMode mode,
Bool (*is_within_valid_secondary) ( Addr ),
Bool (*is_valid_aligned_word) ( Addr )
);
extern void MC_(pp_LeakError)(UInt n_this_record, UInt n_total_records,
LossRecord* l);
/*------------------------------------------------------------*/
/*--- Errors and suppressions ---*/
/*------------------------------------------------------------*/
extern void MC_(record_free_error) ( ThreadId tid, Addr a );
extern void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a );
extern void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc );
extern Bool MC_(record_leak_error) ( ThreadId tid,
UInt n_this_record,
UInt n_total_records,
LossRecord* lossRecord,
Bool print_record );
/*------------------------------------------------------------*/
/*--- Command line options + defaults ---*/
/*------------------------------------------------------------*/

View File

@ -109,21 +109,6 @@ static Addr* get_seg_starts ( /*OUT*/Int* n_acquired )
/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
/*------------------------------------------------------------*/
/* A block is either
-- Proper-ly reached; a pointer to its start has been found
-- Interior-ly reached; only an interior pointer to it has been found
-- Unreached; so far, no pointers to any part of it have been found.
-- IndirectLeak; leaked, but referred to by another leaked block
*/
typedef
enum {
Unreached =0,
IndirectLeak =1,
Interior =2,
Proper =3
}
Reachedness;
/* An entry in the mark stack */
typedef
struct {
@ -133,30 +118,6 @@ typedef
}
MarkStack;
/* A block record, used for generating err msgs. */
typedef
struct _LossRecord {
struct _LossRecord* next;
/* Where these lost blocks were allocated. */
ExeContext* allocated_at;
/* Their reachability. */
Reachedness loss_mode;
/* Number of blocks and total # bytes involved. */
SizeT total_bytes;
SizeT indirect_bytes;
UInt num_blocks;
}
LossRecord;
/* The 'extra' struct for leak errors. */
typedef
struct {
UInt n_this_record;
UInt n_total_records;
LossRecord* lossRecord;
}
LeakExtra;
/* Find the i such that ptr points at or inside the block described by
shadows[i]. Return -1 if none found. This assumes that shadows[]
has been sorted on the ->data field. */
@ -175,7 +136,7 @@ Int find_shadow_for_OLD ( Addr ptr,
for (i = 0; i < n_shadows; i++) {
PROF_EVENT(71, "find_shadow_for_OLD(loop)");
a_lo = shadows[i]->data;
a_hi = ((Addr)shadows[i]->data) + shadows[i]->size;
a_hi = ((Addr)shadows[i]->data) + shadows[i]->szB;
if (a_lo <= ptr && ptr < a_hi)
return i;
}
@ -201,14 +162,14 @@ Int find_shadow_for ( Addr ptr,
mid = (lo + hi) / 2;
a_mid_lo = shadows[mid]->data;
a_mid_hi = shadows[mid]->data + shadows[mid]->size;
a_mid_hi = shadows[mid]->data + shadows[mid]->szB;
/* Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
Special-case zero-sized blocks - treat them as if they had
size 1. Not doing so causes them to not cover any address
range at all and so will never be identified as the target of
any pointer, which causes them to be incorrectly reported as
definitely leaked. */
if (shadows[mid]->size == 0)
if (shadows[mid]->szB == 0)
a_mid_hi++;
if (ptr < a_mid_lo) {
@ -243,83 +204,6 @@ static SizeT lc_scanned;
static Bool (*lc_is_within_valid_secondary) (Addr addr);
static Bool (*lc_is_valid_aligned_word) (Addr addr);
static const HChar* str_lossmode ( Reachedness lossmode )
{
const HChar *loss = "?";
switch (lossmode) {
case Unreached: loss = "definitely lost"; break;
case IndirectLeak: loss = "indirectly lost"; break;
case Interior: loss = "possibly lost"; break;
case Proper: loss = "still reachable"; break;
}
return loss;
}
static const HChar* xml_kind ( Reachedness lossmode )
{
const HChar *loss = "?";
switch (lossmode) {
case Unreached: loss = "Leak_DefinitelyLost"; break;
case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
case Interior: loss = "Leak_PossiblyLost"; break;
case Proper: loss = "Leak_StillReachable"; break;
}
return loss;
}
/* Used for printing leak errors, avoids exposing the LossRecord type (which
comes in as void*, requiring a cast. */
void MC_(pp_LeakError)(void* vextra)
{
HChar* xpre = VG_(clo_xml) ? " <what>" : "";
HChar* xpost = VG_(clo_xml) ? "</what>" : "";
LeakExtra* extra = (LeakExtra*)vextra;
LossRecord* l = extra->lossRecord;
const Char *loss = str_lossmode(l->loss_mode);
if (VG_(clo_xml)) {
VG_(message)(Vg_UserMsg, " <kind>%t</kind>", xml_kind(l->loss_mode));
} else {
VG_(message)(Vg_UserMsg, "");
}
if (l->indirect_bytes) {
VG_(message)(Vg_UserMsg,
"%s%,lu (%,lu direct, %,lu indirect) bytes in %,u blocks"
" are %s in loss record %,u of %,u%s",
xpre,
l->total_bytes + l->indirect_bytes,
l->total_bytes, l->indirect_bytes, l->num_blocks,
loss, extra->n_this_record, extra->n_total_records,
xpost
);
if (VG_(clo_xml)) {
// Nb: don't put commas in these XML numbers
VG_(message)(Vg_UserMsg, " <leakedbytes>%lu</leakedbytes>",
l->total_bytes + l->indirect_bytes);
VG_(message)(Vg_UserMsg, " <leakedblocks>%u</leakedblocks>",
l->num_blocks);
}
} else {
VG_(message)(
Vg_UserMsg,
"%s%,lu bytes in %,u blocks are %s in loss record %,u of %,u%s",
xpre,
l->total_bytes, l->num_blocks,
loss, extra->n_this_record, extra->n_total_records,
xpost
);
if (VG_(clo_xml)) {
VG_(message)(Vg_UserMsg, " <leakedbytes>%d</leakedbytes>",
l->total_bytes);
VG_(message)(Vg_UserMsg, " <leakedblocks>%d</leakedblocks>",
l->num_blocks);
}
}
VG_(pp_ExeContext)(l->allocated_at);
}
SizeT MC_(bytes_leaked) = 0;
SizeT MC_(bytes_indirect) = 0;
@ -356,13 +240,13 @@ static void lc_markstack_push_WRK(Addr ptr, Int clique)
tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
tl_assert(ptr >= lc_shadows[sh_no]->data);
tl_assert(ptr < lc_shadows[sh_no]->data
+ lc_shadows[sh_no]->size
+ (lc_shadows[sh_no]->size==0 ? 1 : 0));
+ lc_shadows[sh_no]->szB
+ (lc_shadows[sh_no]->szB==0 ? 1 : 0));
if (lc_markstack[sh_no].state == Unreached) {
if (0)
VG_(printf)("pushing %p-%p\n", lc_shadows[sh_no]->data,
lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
lc_shadows[sh_no]->data + lc_shadows[sh_no]->szB);
tl_assert(lc_markstack[sh_no].next == -1);
lc_markstack[sh_no].next = lc_markstack_top;
@ -391,13 +275,13 @@ static void lc_markstack_push_WRK(Addr ptr, Int clique)
if (lc_markstack[sh_no].indirect)
VG_(printf)(" clique %d joining clique %d adding %d+%d bytes\n",
sh_no, clique,
lc_shadows[sh_no]->size, lc_markstack[sh_no].indirect);
lc_shadows[sh_no]->szB, lc_markstack[sh_no].indirect);
else
VG_(printf)(" %d joining %d adding %d\n",
sh_no, clique, lc_shadows[sh_no]->size);
sh_no, clique, lc_shadows[sh_no]->szB);
}
lc_markstack[clique].indirect += lc_shadows[sh_no]->size;
lc_markstack[clique].indirect += lc_shadows[sh_no]->szB;
lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
}
@ -503,7 +387,7 @@ static void lc_do_leakcheck(Int clique)
tl_assert(top >= 0 && top < lc_n_shadows);
tl_assert(lc_markstack[top].state != Unreached);
lc_scan_memory_WRK(lc_shadows[top]->data, lc_shadows[top]->size, clique);
lc_scan_memory_WRK(lc_shadows[top]->data, lc_shadows[top]->szB, clique);
}
}
@ -520,7 +404,6 @@ static void full_report(ThreadId tid)
LossRecord* errlist;
LossRecord* p;
Bool is_suppressed;
LeakExtra leak_extra;
/* Go through and group lost structures into cliques. For each
Unreached block, push it onto the mark stack, and find all the
@ -530,8 +413,8 @@ static void full_report(ThreadId tid)
pass), then the cliques are merged. */
for (i = 0; i < lc_n_shadows; i++) {
if (VG_DEBUG_CLIQUE)
VG_(printf)("cliques: %d at %p -> %s\n",
i, lc_shadows[i]->data, str_lossmode(lc_markstack[i].state));
VG_(printf)("cliques: %d at %p -> Loss state %d\n",
i, lc_shadows[i]->data, lc_markstack[i].state);
if (lc_markstack[i].state != Unreached)
continue;
@ -578,14 +461,14 @@ static void full_report(ThreadId tid)
}
if (p != NULL) {
p->num_blocks ++;
p->total_bytes += lc_shadows[i]->size;
p->total_bytes += lc_shadows[i]->szB;
p->indirect_bytes += lc_markstack[i].indirect;
} else {
n_lossrecords ++;
p = VG_(malloc)(sizeof(LossRecord));
p->loss_mode = lc_markstack[i].state;
p->allocated_at = where;
p->total_bytes = lc_shadows[i]->size;
p->total_bytes = lc_shadows[i]->szB;
p->indirect_bytes = lc_markstack[i].indirect;
p->num_blocks = 1;
p->next = errlist;
@ -619,11 +502,8 @@ static void full_report(ThreadId tid)
// Nb: because VG_(unique_error) does all the error processing
// immediately, and doesn't save the error, leakExtra can be
// stack-allocated.
leak_extra.n_this_record = i+1;
leak_extra.n_total_records = n_lossrecords;
leak_extra.lossRecord = p_min;
is_suppressed =
MC_(record_leak_error) ( tid, &leak_extra, p_min->allocated_at,
MC_(record_leak_error) ( tid, i+1, n_lossrecords, p_min,
print_record );
if (is_suppressed) {
@ -659,7 +539,7 @@ static void make_summary(void)
Int i;
for(i = 0; i < lc_n_shadows; i++) {
SizeT size = lc_shadows[i]->size;
SizeT size = lc_shadows[i]->szB;
switch(lc_markstack[i].state) {
case Unreached:
@ -745,8 +625,8 @@ find_active_shadows(UInt* n_shadows)
}
/* Possibly invalidate the malloc holding the end of this chunk. */
if (mc->size > 1) {
m = find_shadow_for(mc->data + (mc->size - 1), mallocs, n_mallocs);
if (mc->szB > 1) {
m = find_shadow_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
tl_assert(*n_shadows > 0);
--(*n_shadows);
@ -814,7 +694,7 @@ void MC_(do_detect_memory_leaks) (
/* Sanity check -- make sure they don't overlap */
for (i = 0; i < lc_n_shadows-1; i++) {
tl_assert( lc_shadows[i]->data + lc_shadows[i]->size
tl_assert( lc_shadows[i]->data + lc_shadows[i]->szB
<= lc_shadows[i+1]->data );
}
@ -834,7 +714,7 @@ void MC_(do_detect_memory_leaks) (
lc_min_mallocd_addr = lc_shadows[0]->data;
lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data
+ lc_shadows[lc_n_shadows-1]->size;
+ lc_shadows[lc_n_shadows-1]->szB;
lc_markstack = VG_(malloc)( lc_n_shadows * sizeof(*lc_markstack) );
for (i = 0; i < lc_n_shadows; i++) {

File diff suppressed because it is too large Load Diff

View File

@ -82,12 +82,12 @@ static void add_to_freed_queue ( MC_Chunk* mc )
if (freed_list_end == NULL) {
tl_assert(freed_list_start == NULL);
freed_list_end = freed_list_start = mc;
freed_list_volume = mc->size;
freed_list_volume = mc->szB;
} else {
tl_assert(freed_list_end->next == NULL);
freed_list_end->next = mc;
freed_list_end = mc;
freed_list_volume += mc->size;
freed_list_volume += mc->szB;
}
mc->next = NULL;
@ -101,7 +101,7 @@ static void add_to_freed_queue ( MC_Chunk* mc )
tl_assert(freed_list_end != NULL);
mc1 = freed_list_start;
freed_list_volume -= mc1->size;
freed_list_volume -= mc1->szB;
/* VG_(printf)("volume now %d\n", freed_list_volume); */
tl_assert(freed_list_volume >= 0);
@ -125,12 +125,12 @@ MC_Chunk* MC_(get_freed_list_head)(void)
/* Allocate its shadow chunk, put it on the appropriate list. */
static
MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT size,
MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
MC_AllocKind kind)
{
MC_Chunk* mc = VG_(malloc)(sizeof(MC_Chunk));
mc->data = p;
mc->size = size;
mc->szB = szB;
mc->allockind = kind;
mc->where = VG_(record_ExeContext)(tid);
@ -174,7 +174,7 @@ static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
/* Allocate memory and note change in memory available */
__inline__
void* MC_(new_block) ( ThreadId tid,
Addr p, SizeT size, SizeT align, UInt rzB,
Addr p, SizeT szB, SizeT alignB, UInt rzB,
Bool is_zeroed, MC_AllocKind kind, VgHashTable table)
{
cmalloc_n_mallocs ++;
@ -184,22 +184,22 @@ void* MC_(new_block) ( ThreadId tid,
tl_assert(MC_AllocCustom == kind);
} else {
tl_assert(MC_AllocCustom != kind);
p = (Addr)VG_(cli_malloc)( align, size );
p = (Addr)VG_(cli_malloc)( alignB, szB );
if (!p) {
return NULL;
}
if (is_zeroed) VG_(memset)((void*)p, 0, size);
if (is_zeroed) VG_(memset)((void*)p, 0, szB);
}
// Only update this stat if allocation succeeded.
cmalloc_bs_mallocd += size;
cmalloc_bs_mallocd += szB;
VG_(HT_add_node)( table, create_MC_Chunk(tid, p, size, kind) );
VG_(HT_add_node)( table, create_MC_Chunk(tid, p, szB, kind) );
if (is_zeroed)
MC_(make_mem_defined)( p, size );
MC_(make_mem_defined)( p, szB );
else
MC_(make_mem_undefined)( p, size );
MC_(make_mem_undefined)( p, szB );
return (void*)p;
}
@ -237,12 +237,12 @@ void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
}
}
void* MC_(memalign) ( ThreadId tid, SizeT align, SizeT n )
void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
{
if (complain_about_silly_args(n, "memalign")) {
return NULL;
} else {
return MC_(new_block) ( tid, 0, n, align,
return MC_(new_block) ( tid, 0, n, alignB,
MC_MALLOC_REDZONE_SZB, /*is_zeroed*/False, MC_AllocMalloc,
MC_(malloc_list));
}
@ -264,7 +264,7 @@ void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
{
/* Note: make redzones noaccess again -- just in case user made them
accessible with a client request... */
MC_(make_mem_noaccess)( mc->data-rzB, mc->size + 2*rzB );
MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
/* Put it out of harm's way for a while, if not from a client request */
if (MC_AllocCustom != mc->allockind) {
@ -289,7 +289,8 @@ void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
} else {
/* check if it is a matching free() / delete / delete [] */
if (kind != mc->allockind) {
MC_(record_freemismatch_error) ( tid, p, mc );
tl_assert(p == mc->data);
MC_(record_freemismatch_error) ( tid, mc );
}
die_and_free_mem ( tid, mc, rzB );
}
@ -313,17 +314,17 @@ void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNewVec);
}
void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_size )
void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
{
MC_Chunk* mc;
void* p_new;
SizeT old_size;
SizeT old_szB;
cmalloc_n_frees ++;
cmalloc_n_mallocs ++;
cmalloc_bs_mallocd += new_size;
cmalloc_bs_mallocd += new_szB;
if (complain_about_silly_args(new_size, "realloc"))
if (complain_about_silly_args(new_szB, "realloc"))
return NULL;
/* Remove the old block */
@ -337,38 +338,39 @@ void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_size )
/* check if its a matching free() / delete / delete [] */
if (MC_AllocMalloc != mc->allockind) {
/* can not realloc a range that was allocated with new or new [] */
MC_(record_freemismatch_error) ( tid, (Addr)p_old, mc );
tl_assert((Addr)p_old == mc->data);
MC_(record_freemismatch_error) ( tid, mc );
/* but keep going anyway */
}
old_size = mc->size;
old_szB = mc->szB;
if (old_size == new_size) {
if (old_szB == new_szB) {
/* size unchanged */
mc->where = VG_(record_ExeContext)(tid);
p_new = p_old;
} else if (old_size > new_size) {
} else if (old_szB > new_szB) {
/* new size is smaller */
MC_(make_mem_noaccess)( mc->data+new_size, mc->size-new_size );
mc->size = new_size;
MC_(make_mem_noaccess)( mc->data+new_szB, mc->szB-new_szB );
mc->szB = new_szB;
mc->where = VG_(record_ExeContext)(tid);
p_new = p_old;
} else {
/* new size is bigger */
/* Get new memory */
Addr a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
Addr a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
if (a_new) {
/* First half kept and copied, second half new, red zones as normal */
MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB, MC_MALLOC_REDZONE_SZB );
MC_(copy_address_range_state)( (Addr)p_old, a_new, mc->size );
MC_(make_mem_undefined)( a_new+mc->size, new_size-mc->size );
MC_(make_mem_noaccess) ( a_new+new_size, MC_MALLOC_REDZONE_SZB );
MC_(copy_address_range_state)( (Addr)p_old, a_new, mc->szB );
MC_(make_mem_undefined)( a_new+mc->szB, new_szB-mc->szB );
MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
/* Copy from old to new */
VG_(memcpy)((void*)a_new, p_old, mc->size);
VG_(memcpy)((void*)a_new, p_old, mc->szB);
/* Free old memory */
/* Nb: we have to allocate a new MC_Chunk for the new memory rather
@ -377,7 +379,7 @@ void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_size )
die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
// Allocate a new chunk.
mc = create_MC_Chunk( tid, a_new, new_size, MC_AllocMalloc );
mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );
}
p_new = (void*)a_new;
@ -453,7 +455,7 @@ void MC_(destroy_mempool)(Addr pool)
while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
/* Note: make redzones noaccess again -- just in case user made them
accessible with a client request... */
MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->size + 2*mp->rzB );
MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
}
// Destroy the chunk table
VG_(HT_destruct)(mp->chunks);
@ -517,7 +519,7 @@ check_mempool_sane(MC_Mempool* mp)
/* Sanity check -- make sure they don't overlap */
for (i = 0; i < n_chunks-1; i++) {
if (chunks[i]->data + chunks[i]->size > chunks[i+1]->data ) {
if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
VG_(message)(Vg_UserMsg,
"Mempool chunk %d / %d overlaps with its successor",
i+1, n_chunks);
@ -534,9 +536,9 @@ check_mempool_sane(MC_Mempool* mp)
"Mempool chunk %d / %d: %d bytes [%x,%x), allocated:",
i+1,
n_chunks,
chunks[i]->size,
chunks[i]->szB,
chunks[i]->data,
chunks[i]->data + chunks[i]->size);
chunks[i]->data + chunks[i]->szB);
VG_(pp_ExeContext)(chunks[i]->where);
}
@ -544,12 +546,12 @@ check_mempool_sane(MC_Mempool* mp)
VG_(free)(chunks);
}
void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT size)
void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
{
MC_Mempool* mp;
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, "mempool_alloc(%p, %p, %d)", pool, addr, size);
VG_(message)(Vg_UserMsg, "mempool_alloc(%p, %p, %d)", pool, addr, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@ -558,7 +560,7 @@ void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT size)
MC_(record_illegal_mempool_error) ( tid, pool );
} else {
check_mempool_sane(mp);
MC_(new_block)(tid, addr, size, /*ignored*/0, mp->rzB, mp->is_zeroed,
MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->rzB, mp->is_zeroed,
MC_AllocCustom, mp->chunks);
check_mempool_sane(mp);
}
@ -591,7 +593,7 @@ void MC_(mempool_free)(Addr pool, Addr addr)
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg,
"mempool_free(%p, %p) freed chunk of %d bytes",
pool, addr, mc->size);
pool, addr, mc->szB);
}
die_and_free_mem ( tid, mc, mp->rzB );
@ -599,7 +601,7 @@ void MC_(mempool_free)(Addr pool, Addr addr)
}
void MC_(mempool_trim)(Addr pool, Addr addr, SizeT size)
void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
{
MC_Mempool* mp;
MC_Chunk* mc;
@ -608,7 +610,7 @@ void MC_(mempool_trim)(Addr pool, Addr addr, SizeT size)
VgHashNode** chunks;
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, "mempool_trim(%p, %p, %d)", pool, addr, size);
VG_(message)(Vg_UserMsg, "mempool_trim(%p, %p, %d)", pool, addr, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@ -633,9 +635,9 @@ void MC_(mempool_trim)(Addr pool, Addr addr, SizeT size)
mc = (MC_Chunk*) chunks[i];
lo = mc->data;
hi = mc->size == 0 ? mc->data : mc->data + mc->size - 1;
hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
#define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + size))
#define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
@ -680,12 +682,12 @@ void MC_(mempool_trim)(Addr pool, Addr addr, SizeT size)
lo = mc->data;
}
if (mc->data + size > addr + size) {
max = mc->data + size;
hi = addr + size;
if (mc->data + szB > addr + szB) {
max = mc->data + szB;
hi = addr + szB;
} else {
max = addr + size;
hi = mc->data + size;
max = addr + szB;
hi = mc->data + szB;
}
tl_assert(min <= lo);
@ -701,7 +703,7 @@ void MC_(mempool_trim)(Addr pool, Addr addr, SizeT size)
}
mc->data = lo;
mc->size = (UInt) (hi - lo);
mc->szB = (UInt) (hi - lo);
VG_(HT_add_node)( mp->chunks, mc );
}
@ -734,7 +736,7 @@ void MC_(move_mempool)(Addr poolA, Addr poolB)
VG_(HT_add_node)( MC_(mempool_list), mp );
}
void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT size)
void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
{
MC_Mempool* mp;
MC_Chunk* mc;
@ -742,7 +744,7 @@ void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT size)
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, "mempool_change(%p, %p, %p, %d)",
pool, addrA, addrB, size);
pool, addrA, addrB, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@ -761,7 +763,7 @@ void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT size)
}
mc->data = addrB;
mc->size = size;
mc->szB = szB;
VG_(HT_add_node)( mp->chunks, mc );
check_mempool_sane(mp);
@ -798,7 +800,7 @@ void MC_(print_malloc_stats) ( void )
VG_(HT_ResetIter)(MC_(malloc_list));
while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
nblocks++;
nbytes += mc->size;
nbytes += mc->szB;
}
VG_(message)(Vg_UserMsg,

View File

@ -89,31 +89,12 @@ Bool is_overlap ( void* dst, const void* src, SizeT dstlen, SizeT srclen )
// This is a macro rather than a function because we don't want to have an
// extra function in the stack trace.
#define RECORD_OVERLAP_ERROR(s, p_extra) \
#define RECORD_OVERLAP_ERROR(s, src, dst, len) \
{ \
Word unused_res; \
VALGRIND_DO_CLIENT_REQUEST(unused_res, 0, \
_VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR, \
s, p_extra, 0, 0, 0); \
}
static __inline__
void complain2 ( Char* s, char* dst, const char* src )
{
OverlapExtra extra = {
.src = (Addr)src, .dst = (Addr)dst, .len = -1,
};
RECORD_OVERLAP_ERROR( s, &extra );
}
static __inline__
void complain3 ( Char* s, void* dst, const void* src, int n )
{
/* Must wrap it up here, because we cannot pass 4 args to core */
OverlapExtra extra = {
.src = (Addr)src, .dst = (Addr)dst, .len = n,
};
RECORD_OVERLAP_ERROR( s, &extra );
s, src, dst, len, 0); \
}
/* --------- Some handy Z-encoded names. --------- */
@ -197,7 +178,7 @@ STRCHR(m_ld_linux_x86_64_so_2, index)
src_orig, \
(Addr)dst-(Addr)dst_orig+1, \
(Addr)src-(Addr)src_orig+1)) \
complain2("strcat", dst_orig, src_orig); \
RECORD_OVERLAP_ERROR("strcat", dst_orig, src_orig, 0); \
\
return dst_orig; \
}
@ -225,7 +206,7 @@ STRCAT(m_libc_soname, strcat)
src_orig, \
(Addr)dst-(Addr)dst_orig+1, \
(Addr)src-(Addr)src_orig+1)) \
complain3("strncat", dst_orig, src_orig, n); \
RECORD_OVERLAP_ERROR("strncat", dst_orig, src_orig, n); \
\
return dst_orig; \
}
@ -279,7 +260,7 @@ STRLEN(m_ld_linux_x86_64_so_2, strlen)
src_orig, \
(Addr)dst-(Addr)dst_orig+1, \
(Addr)src-(Addr)src_orig+1)) \
complain2("strcpy", dst_orig, src_orig); \
RECORD_OVERLAP_ERROR("strcpy", dst_orig, src_orig, 0); \
\
return dst_orig; \
}
@ -301,7 +282,7 @@ STRCPY(m_libc_soname, strcpy)
/* Check for overlap after copying; all n bytes of dst are relevant, */ \
/* but only m+1 bytes of src if terminator was found */ \
if (is_overlap(dst_orig, src_orig, n, (m < n) ? m+1 : n)) \
complain3("strncpy", dst, src, n); \
RECORD_OVERLAP_ERROR("strncpy", dst, src, n); \
while (m++ < n) *dst++ = 0; /* must pad remainder with nulls */ \
\
return dst_orig; \
@ -386,7 +367,7 @@ MEMCHR(m_libc_soname, memchr)
return dst; \
\
if (is_overlap(dst, src, len, len)) \
complain3("memcpy", dst, src, len); \
RECORD_OVERLAP_ERROR("memcpy", dst, src, len); \
\
if ( dst > src ) { \
d = (char *)dst + len - 1; \
@ -469,7 +450,7 @@ MEMCMP(m_libc_soname, bcmp)
src_orig, \
(Addr)dst-(Addr)dst_orig+1, \
(Addr)src-(Addr)src_orig+1)) \
complain2("stpcpy", dst_orig, src_orig); \
RECORD_OVERLAP_ERROR("stpcpy", dst_orig, src_orig, 0); \
\
return dst; \
}

View File

@ -1,7 +1,7 @@
# This contains a match against a "???" entry
{
<insert a suppression name here>
Memcheck:Addr1
Memcheck:Jump
obj:*
fun:(below main)
}

View File

@ -16,7 +16,7 @@
#
# For Memcheck, the supp_kinds are:
#
# Param Value1 Value2 Value4 Value8 Value16
# Param Value1 Value2 Value4 Value8 Value16 Jump
# Free Addr1 Addr2 Addr4 Addr8 Addr16
# Cond (previously known as Value0)
#

View File

@ -14,9 +14,9 @@
# (optionally: caller3 name)
# }
#
# For memcheck, the supp_kinds are:
# For Memcheck, the supp_kinds are:
#
# Param Value1 Value2 Value4 Value8 Value16
# Param Value1 Value2 Value4 Value8 Value16 Jump
# Free Addr1 Addr2 Addr4 Addr8 Addr16
# Cond (previously known as Value0)
#