Merge memcheck/ changes from branches/MESSAGING_TIDYUP r10464.

See trunk r10465 commit message for details.



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@10467
This commit is contained in:
Julian Seward 2009-07-15 14:49:40 +00:00
parent c020a7e3b6
commit 4f730a3c8e
9 changed files with 511 additions and 341 deletions

View File

@ -41,6 +41,7 @@
#include "pub_tool_tooliface.h"
#include "pub_tool_threadstate.h"
#include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset)
#include "pub_tool_xarray.h"
#include "mc_include.h"
@ -109,11 +110,10 @@ struct _AddrInfo {
PtrdiffT offset;
} DataSym;
// Is described by Dwarf debug info. Arbitrary strings. Must
// be the same length.
// Is described by Dwarf debug info. XArray*s of HChar.
struct {
Char descr1[96];
Char descr2[96];
XArray* /* of HChar */ descr1;
XArray* /* of HChar */ descr2;
} Variable;
// Could only narrow it down to be the PLT/GOT/etc of a given
@ -258,6 +258,41 @@ struct _MC_Error {
/*--- Printing errors ---*/
/*------------------------------------------------------------*/
/* This is the "this error is due to be printed shortly; so have a
look at it any print any preamble you want" function. Which, in
Memcheck, we don't use. Hence a no-op.
*/
void MC_(before_pp_Error) ( Error* err ) {
}
/* Do a printf-style operation on either the XML or normal output
channel, depending on the setting of VG_(clo_xml).
*/
static void emit_WRK ( HChar* format, va_list vargs )
{
if (VG_(clo_xml)) {
VG_(vprintf_xml)(format, vargs);
} else {
VG_(vmessage)(Vg_UserMsg, format, vargs);
}
}
static void emit ( HChar* format, ... ) PRINTF_CHECK(1, 2);
static void emit ( HChar* format, ... )
{
va_list vargs;
va_start(vargs, format);
emit_WRK(format, vargs);
va_end(vargs);
}
static void emiN ( HChar* format, ... ) /* NO FORMAT CHECK */
{
va_list vargs;
va_start(vargs, format);
emit_WRK(format, vargs);
va_end(vargs);
}
static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
{
HChar* xpre = VG_(clo_xml) ? " <auxwhat>" : " ";
@ -266,23 +301,19 @@ static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
switch (ai->tag) {
case Addr_Unknown:
if (maybe_gcc) {
VG_(message)(Vg_UserMsg,
"%sAddress 0x%llx is just below the stack ptr. "
"To suppress, use: --workaround-gcc296-bugs=yes%s",
xpre, (ULong)a, xpost
);
emit( "%sAddress 0x%llx is just below the stack ptr. "
"To suppress, use: --workaround-gcc296-bugs=yes%s\n",
xpre, (ULong)a, xpost );
} else {
VG_(message)(Vg_UserMsg,
"%sAddress 0x%llx "
"is not stack'd, malloc'd or (recently) free'd%s",
xpre, (ULong)a, xpost);
emit( "%sAddress 0x%llx "
"is not stack'd, malloc'd or (recently) free'd%s\n",
xpre, (ULong)a, xpost );
}
break;
case Addr_Stack:
VG_(message)(Vg_UserMsg,
"%sAddress 0x%llx is on thread %d's stack%s",
xpre, (ULong)a, ai->Addr.Stack.tid, xpost);
emit( "%sAddress 0x%llx is on thread %d's stack%s\n",
xpre, (ULong)a, ai->Addr.Stack.tid, xpost );
break;
case Addr_Block: {
@ -301,47 +332,51 @@ static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
delta = rwoffset;
relative = "inside";
}
VG_(message)(Vg_UserMsg,
"%sAddress 0x%lx is %'lu bytes %s a %s of size %'lu %s%s",
emit(
"%sAddress 0x%lx is %'lu bytes %s a %s of size %'lu %s%s\n",
xpre,
a, delta, relative, ai->Addr.Block.block_desc,
block_szB,
ai->Addr.Block.block_kind==Block_Mallocd ? "alloc'd"
: ai->Addr.Block.block_kind==Block_Freed ? "free'd"
: "client-defined",
xpost);
xpost
);
VG_(pp_ExeContext)(ai->Addr.Block.lastchange);
break;
}
case Addr_DataSym:
VG_(message_no_f_c)(Vg_UserMsg,
"%sAddress 0x%llx is %llu bytes "
"inside data symbol \"%t\"%s",
xpre,
(ULong)a,
(ULong)ai->Addr.DataSym.offset,
ai->Addr.DataSym.name,
xpost);
emiN( "%sAddress 0x%llx is %llu bytes "
"inside data symbol \"%t\"%s\n",
xpre,
(ULong)a,
(ULong)ai->Addr.DataSym.offset,
ai->Addr.DataSym.name,
xpost );
break;
case Addr_Variable:
if (ai->Addr.Variable.descr1[0] != '\0')
VG_(message)(Vg_UserMsg, "%s%s%s",
xpre, ai->Addr.Variable.descr1, xpost);
if (ai->Addr.Variable.descr2[0] != '\0')
VG_(message)(Vg_UserMsg, "%s%s%s",
xpre, ai->Addr.Variable.descr2, xpost);
/* Note, no need for XML tags here, because descr1/2 will
already have <auxwhat> or <xauxwhat>s on them, in XML
mode. */
if (ai->Addr.Variable.descr1)
emit( "%s%s\n",
VG_(clo_xml) ? " " : " ",
(HChar*)VG_(indexXA)(ai->Addr.Variable.descr1, 0) );
if (ai->Addr.Variable.descr2)
emit( "%s%s\n",
VG_(clo_xml) ? " " : " ",
(HChar*)VG_(indexXA)(ai->Addr.Variable.descr2, 0) );
break;
case Addr_SectKind:
VG_(message_no_f_c)(Vg_UserMsg,
"%sAddress 0x%llx is in the %t segment of %t%s",
xpre,
(ULong)a,
VG_(pp_SectKind)(ai->Addr.SectKind.kind),
ai->Addr.SectKind.objname,
xpost);
emiN( "%sAddress 0x%llx is in the %t segment of %t%s\n",
xpre,
(ULong)a,
VG_(pp_SectKind)(ai->Addr.SectKind.kind),
ai->Addr.SectKind.objname,
xpost );
break;
default:
@ -373,28 +408,9 @@ static const HChar* xml_leak_kind ( Reachedness lossmode )
return loss;
}
static void mc_pp_msg( Char* xml_name, Error* err, const HChar* format, ... )
{
HChar* xpre = VG_(clo_xml) ? " <what>" : "";
HChar* xpost = VG_(clo_xml) ? "</what>" : "";
Char buf[256];
va_list vargs;
if (VG_(clo_xml))
VG_(message)(Vg_UserMsg, " <kind>%s</kind>", xml_name);
// Stick xpre and xpost on the front and back of the format string.
VG_(snprintf)(buf, 256, "%s%s%s", xpre, format, xpost);
va_start(vargs, format);
VG_(vmessage) ( Vg_UserMsg, buf, vargs );
va_end(vargs);
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
}
static void mc_pp_origin ( ExeContext* ec, UInt okind )
{
HChar* src = NULL;
HChar* xpre = VG_(clo_xml) ? " <what>" : " ";
HChar* xpost = VG_(clo_xml) ? "</what>" : "";
HChar* src = NULL;
tl_assert(ec);
switch (okind) {
@ -406,208 +422,331 @@ static void mc_pp_origin ( ExeContext* ec, UInt okind )
tl_assert(src); /* guards against invalid 'okind' */
if (VG_(clo_xml)) {
VG_(message)(Vg_UserMsg, " <origin>");
}
VG_(message)(Vg_UserMsg, "%sUninitialised value was created%s%s",
xpre, src, xpost);
VG_(pp_ExeContext)( ec );
if (VG_(clo_xml)) {
VG_(message)(Vg_UserMsg, " </origin>");
emit( " <auxwhat>Uninitialised value was created%s</auxwhat>\n",
src);
VG_(pp_ExeContext)( ec );
} else {
emit( " Uninitialised value was created%s\n", src);
VG_(pp_ExeContext)( ec );
}
}
void MC_(pp_Error) ( Error* err )
{
const Bool xml = VG_(clo_xml); /* a shorthand */
MC_Error* extra = VG_(get_error_extra)(err);
switch (VG_(get_error_kind)(err)) {
case Err_CoreMem: {
case Err_CoreMem:
/* What the hell *is* a CoreMemError? jrs 2005-May-18 */
/* As of 2006-Dec-14, it's caused by unaddressable bytes in a
signal handler frame. --njn */
mc_pp_msg("CoreMemError", err,
"%s contains unaddressable byte(s)",
VG_(get_error_string)(err));
// JRS 17 May 09: None of our regtests exercise this; hence AFAIK
// the following code is untested. Bad.
if (xml) {
emit( " <kind>CoreMemError</kind>\n" );
emiN( " <what>%t contains unaddressable byte(s)</what>\n",
VG_(get_error_string)(err));
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
} else {
emit( "%s contains unaddressable byte(s)\n",
VG_(get_error_string)(err));
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
}
break;
}
case Err_Value:
MC_(any_value_errors) = True;
if (1 || extra->Err.Value.otag == 0) {
mc_pp_msg("UninitValue", err,
"Use of uninitialised value of size %d",
extra->Err.Value.szB);
if (xml) {
emit( " <kind>UninitValue</kind>\n" );
emit( " <what>Use of uninitialised value of size %ld</what>\n",
extra->Err.Value.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (extra->Err.Value.origin_ec)
mc_pp_origin( extra->Err.Value.origin_ec,
extra->Err.Value.otag & 3 );
} else {
mc_pp_msg("UninitValue", err,
"Use of uninitialised value of size %d (otag %u)",
extra->Err.Value.szB, extra->Err.Value.otag);
/* Could also show extra->Err.Cond.otag if debugging origin
tracking */
emit( "Use of uninitialised value of size %ld\n",
extra->Err.Value.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (extra->Err.Value.origin_ec)
mc_pp_origin( extra->Err.Value.origin_ec,
extra->Err.Value.otag & 3 );
}
if (extra->Err.Value.origin_ec)
mc_pp_origin( extra->Err.Value.origin_ec,
extra->Err.Value.otag & 3 );
break;
case Err_Cond:
MC_(any_value_errors) = True;
if (1 || extra->Err.Cond.otag == 0) {
mc_pp_msg("UninitCondition", err,
"Conditional jump or move depends"
" on uninitialised value(s)");
if (xml) {
emit( " <kind>UninitCondition</kind>\n" );
emit( " <what>Conditional jump or move depends"
" on uninitialised value(s)</what>\n" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (extra->Err.Cond.origin_ec)
mc_pp_origin( extra->Err.Cond.origin_ec,
extra->Err.Cond.otag & 3 );
} else {
mc_pp_msg("UninitCondition", err,
"Conditional jump or move depends"
" on uninitialised value(s) (otag %u)",
extra->Err.Cond.otag);
/* Could also show extra->Err.Cond.otag if debugging origin
tracking */
emit( "Conditional jump or move depends"
" on uninitialised value(s)\n" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (extra->Err.Cond.origin_ec)
mc_pp_origin( extra->Err.Cond.origin_ec,
extra->Err.Cond.otag & 3 );
}
if (extra->Err.Cond.origin_ec)
mc_pp_origin( extra->Err.Cond.origin_ec,
extra->Err.Cond.otag & 3 );
break;
case Err_RegParam:
MC_(any_value_errors) = True;
mc_pp_msg("SyscallParam", err,
"Syscall param %s contains uninitialised byte(s)",
VG_(get_error_string)(err));
if (extra->Err.RegParam.origin_ec)
mc_pp_origin( extra->Err.RegParam.origin_ec,
extra->Err.RegParam.otag & 3 );
if (xml) {
emit( " <kind>SyscallParam</kind>\n" );
emiN( " <what>Syscall param %t contains "
"uninitialised byte(s)</what>\n",
VG_(get_error_string)(err) );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (extra->Err.RegParam.origin_ec)
mc_pp_origin( extra->Err.RegParam.origin_ec,
extra->Err.RegParam.otag & 3 );
} else {
emit( "Syscall param %s contains uninitialised byte(s)\n",
VG_(get_error_string)(err) );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (extra->Err.RegParam.origin_ec)
mc_pp_origin( extra->Err.RegParam.origin_ec,
extra->Err.RegParam.otag & 3 );
}
break;
case Err_MemParam:
if (!extra->Err.MemParam.isAddrErr)
MC_(any_value_errors) = True;
mc_pp_msg("SyscallParam", err,
"Syscall param %s points to %s byte(s)",
VG_(get_error_string)(err),
( extra->Err.MemParam.isAddrErr
? "unaddressable" : "uninitialised" ));
mc_pp_AddrInfo(VG_(get_error_address)(err),
&extra->Err.MemParam.ai, False);
if (extra->Err.MemParam.origin_ec && !extra->Err.MemParam.isAddrErr)
mc_pp_origin( extra->Err.MemParam.origin_ec,
extra->Err.MemParam.otag & 3 );
if (xml) {
emit( " <kind>SyscallParam</kind>\n" );
emiN( " <what>Syscall param %t points to %s byte(s)</what>\n",
VG_(get_error_string)(err),
extra->Err.MemParam.isAddrErr
? "unaddressable" : "uninitialised" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo(VG_(get_error_address)(err),
&extra->Err.MemParam.ai, False);
if (extra->Err.MemParam.origin_ec
&& !extra->Err.MemParam.isAddrErr)
mc_pp_origin( extra->Err.MemParam.origin_ec,
extra->Err.MemParam.otag & 3 );
} else {
emit( "Syscall param %s points to %s byte(s)\n",
VG_(get_error_string)(err),
extra->Err.MemParam.isAddrErr
? "unaddressable" : "uninitialised" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo(VG_(get_error_address)(err),
&extra->Err.MemParam.ai, False);
if (extra->Err.MemParam.origin_ec
&& !extra->Err.MemParam.isAddrErr)
mc_pp_origin( extra->Err.MemParam.origin_ec,
extra->Err.MemParam.otag & 3 );
}
break;
case Err_User:
if (!extra->Err.User.isAddrErr)
MC_(any_value_errors) = True;
mc_pp_msg("ClientCheck", err,
"%s byte(s) found during client check request",
( extra->Err.User.isAddrErr
? "Unaddressable" : "Uninitialised" ));
mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
False);
if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
mc_pp_origin( extra->Err.User.origin_ec,
extra->Err.User.otag & 3 );
if (xml) {
emit( " <kind>ClientCheck</kind>\n" );
emit( " <what>%s byte(s) found "
"during client check request</what>\n",
extra->Err.User.isAddrErr
? "Unaddressable" : "Uninitialised" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
False);
if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
mc_pp_origin( extra->Err.User.origin_ec,
extra->Err.User.otag & 3 );
} else {
emit( "%s byte(s) found during client check request\n",
extra->Err.User.isAddrErr
? "Unaddressable" : "Uninitialised" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
False);
if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
mc_pp_origin( extra->Err.User.origin_ec,
extra->Err.User.otag & 3 );
}
break;
case Err_Free:
mc_pp_msg("InvalidFree", err,
"Invalid free() / delete / delete[]");
mc_pp_AddrInfo(VG_(get_error_address)(err),
&extra->Err.Free.ai, False);
if (xml) {
emit( " <kind>InvalidFree</kind>\n" );
emit( " <what>Invalid free() / delete / delete[]</what>\n" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo( VG_(get_error_address)(err),
&extra->Err.Free.ai, False );
} else {
emit( "Invalid free() / delete / delete[]\n" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo( VG_(get_error_address)(err),
&extra->Err.Free.ai, False );
}
break;
case Err_FreeMismatch:
mc_pp_msg("MismatchedFree", err,
"Mismatched free() / delete / delete []");
mc_pp_AddrInfo(VG_(get_error_address)(err),
&extra->Err.FreeMismatch.ai, False);
if (xml) {
emit( " <kind>MismatchedFree</kind>\n" );
emit( " <what>Mismatched free() / delete / delete []</what>\n" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo(VG_(get_error_address)(err),
&extra->Err.FreeMismatch.ai, False);
} else {
emit( "Mismatched free() / delete / delete []\n" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo(VG_(get_error_address)(err),
&extra->Err.FreeMismatch.ai, False);
}
break;
case Err_Addr:
if (extra->Err.Addr.isWrite) {
mc_pp_msg("InvalidWrite", err,
"Invalid write of size %d",
extra->Err.Addr.szB);
if (xml) {
emit( " <kind>Invalid%s</kind>\n",
extra->Err.Addr.isWrite ? "Write" : "Read" );
emit( " <what>Invalid %s of size %ld</what>\n",
extra->Err.Addr.isWrite ? "write" : "read",
extra->Err.Addr.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo( VG_(get_error_address)(err),
&extra->Err.Addr.ai,
extra->Err.Addr.maybe_gcc );
} else {
mc_pp_msg("InvalidRead", err,
"Invalid read of size %d",
extra->Err.Addr.szB);
emit( "Invalid %s of size %ld\n",
extra->Err.Addr.isWrite ? "write" : "read",
extra->Err.Addr.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo( VG_(get_error_address)(err),
&extra->Err.Addr.ai,
extra->Err.Addr.maybe_gcc );
}
mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Addr.ai,
extra->Err.Addr.maybe_gcc);
break;
case Err_Jump:
mc_pp_msg("InvalidJump", err,
"Jump to the invalid address stated on the next line");
mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Jump.ai,
False);
if (xml) {
emit( " <kind>InvalidJump</kind>\n" );
emit( " <what>Jump to the invalid address stated "
"on the next line</what>\n" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo( VG_(get_error_address)(err), &extra->Err.Jump.ai,
False );
} else {
emit( "Jump to the invalid address stated on the next line\n" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo( VG_(get_error_address)(err), &extra->Err.Jump.ai,
False );
}
break;
case Err_Overlap:
if (extra->Err.Overlap.szB == 0)
mc_pp_msg("Overlap", err,
"Source and destination overlap in %s(%p, %p)",
VG_(get_error_string)(err),
extra->Err.Overlap.dst, extra->Err.Overlap.src);
else
mc_pp_msg("Overlap", err,
"Source and destination overlap in %s(%p, %p, %d)",
VG_(get_error_string)(err),
extra->Err.Overlap.dst, extra->Err.Overlap.src,
extra->Err.Overlap.szB);
if (xml) {
emit( " <kind>Overlap</kind>\n" );
if (extra->Err.Overlap.szB == 0) {
emiN( " <what>Source and destination overlap "
"in %t(%#lx, %#lx)\n</what>\n",
VG_(get_error_string)(err),
extra->Err.Overlap.dst, extra->Err.Overlap.src );
} else {
emit( " <what>Source and destination overlap "
"in %s(%#lx, %#lx, %d)</what>\n",
VG_(get_error_string)(err),
extra->Err.Overlap.dst, extra->Err.Overlap.src,
extra->Err.Overlap.szB );
}
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
} else {
if (extra->Err.Overlap.szB == 0) {
emiN( "Source and destination overlap in %t(%#lx, %#lx)\n",
VG_(get_error_string)(err),
extra->Err.Overlap.dst, extra->Err.Overlap.src );
} else {
emit( "Source and destination overlap in %s(%#lx, %#lx, %d)\n",
VG_(get_error_string)(err),
extra->Err.Overlap.dst, extra->Err.Overlap.src,
extra->Err.Overlap.szB );
}
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
}
break;
case Err_IllegalMempool:
mc_pp_msg("InvalidMemPool", err,
"Illegal memory pool address");
mc_pp_AddrInfo(VG_(get_error_address)(err),
&extra->Err.IllegalMempool.ai, False);
// JRS 17 May 09: None of our regtests exercise this; hence AFAIK
// the following code is untested. Bad.
if (xml) {
emit( " <kind>InvalidMemPool</kind>\n" );
emit( " <what>Illegal memory pool address</what>\n" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo( VG_(get_error_address)(err),
&extra->Err.IllegalMempool.ai, False );
} else {
emit( "Illegal memory pool address\n" );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
mc_pp_AddrInfo( VG_(get_error_address)(err),
&extra->Err.IllegalMempool.ai, False );
}
break;
case Err_Leak: {
HChar* xpre = VG_(clo_xml) ? " <what>" : "";
HChar* xpost = VG_(clo_xml) ? "</what>" : "";
UInt n_this_record = extra->Err.Leak.n_this_record;
UInt n_total_records = extra->Err.Leak.n_total_records;
LossRecord* lr = extra->Err.Leak.lr;
if (VG_(clo_xml)) {
VG_(message_no_f_c)(Vg_UserMsg, " <kind>%t</kind>",
xml_leak_kind(lr->key.state));
} else {
VG_(message)(Vg_UserMsg, "");
}
if (lr->indirect_szB > 0) {
VG_(message)(Vg_UserMsg,
"%s%'lu (%'lu direct, %'lu indirect) bytes in %'u blocks"
" are %s in loss record %'u of %'u%s",
xpre,
lr->szB + lr->indirect_szB, lr->szB, lr->indirect_szB,
lr->num_blocks,
str_leak_lossmode(lr->key.state), n_this_record, n_total_records,
xpost
);
if (VG_(clo_xml)) {
if (xml) {
emit(" <kind>%s</kind>\n", xml_leak_kind(lr->key.state));
if (lr->indirect_szB > 0) {
emit( " <xwhat>\n" );
emit( " <text>%'lu (%'lu direct, %'lu indirect) bytes "
"in %'u blocks"
" are %s in loss record %'u of %'u</text>\n",
lr->szB + lr->indirect_szB, lr->szB, lr->indirect_szB,
lr->num_blocks,
str_leak_lossmode(lr->key.state),
n_this_record, n_total_records );
// Nb: don't put commas in these XML numbers
VG_(message)(Vg_UserMsg, " <leakedbytes>%lu</leakedbytes>",
lr->szB + lr->indirect_szB);
VG_(message)(Vg_UserMsg, " <leakedblocks>%u</leakedblocks>",
lr->num_blocks);
emit( " <leakedbytes>%lu</leakedbytes>\n",
lr->szB + lr->indirect_szB );
emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks );
emit( " </xwhat>\n" );
} else {
emit( " <xwhat>\n" );
emit( " <text>%'lu bytes in %'u blocks"
" are %s in loss record %'u of %'u</text>\n",
lr->szB, lr->num_blocks,
str_leak_lossmode(lr->key.state),
n_this_record, n_total_records );
emit( " <leakedbytes>%ld</leakedbytes>\n", lr->szB);
emit( " <leakedblocks>%d</leakedblocks>\n", lr->num_blocks);
emit( " </xwhat>\n" );
}
} else {
VG_(message)(
Vg_UserMsg,
"%s%'lu bytes in %'u blocks are %s in loss record %'u of %'u%s",
xpre,
lr->szB, lr->num_blocks,
str_leak_lossmode(lr->key.state), n_this_record, n_total_records,
xpost
);
if (VG_(clo_xml)) {
VG_(message)(Vg_UserMsg, " <leakedbytes>%ld</leakedbytes>",
lr->szB);
VG_(message)(Vg_UserMsg, " <leakedblocks>%d</leakedblocks>",
lr->num_blocks);
VG_(pp_ExeContext)(lr->key.allocated_at);
} else { /* ! if (xml) */
emit("\n");
if (lr->indirect_szB > 0) {
emit(
"%'lu (%'lu direct, %'lu indirect) bytes in %'u blocks"
" are %s in loss record %'u of %'u\n",
lr->szB + lr->indirect_szB, lr->szB, lr->indirect_szB,
lr->num_blocks, str_leak_lossmode(lr->key.state),
n_this_record, n_total_records
);
} else {
emit(
"%'lu bytes in %'u blocks are %s in loss record %'u of %'u\n",
lr->szB, lr->num_blocks, str_leak_lossmode(lr->key.state),
n_this_record, n_total_records
);
}
}
VG_(pp_ExeContext)(lr->key.allocated_at);
VG_(pp_ExeContext)(lr->key.allocated_at);
} /* if (xml) */
break;
}
@ -931,11 +1070,11 @@ static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
tl_assert(Addr_Undescribed == ai->tag);
/* Perhaps it's a user-def'd block? */
/* -- Perhaps it's a user-def'd block? -- */
if (client_block_maybe_describe( a, ai )) {
return;
}
/* Search for a recently freed block which might bracket it. */
/* -- Search for a recently freed block which might bracket it. -- */
mc = MC_(get_freed_list_head)();
while (mc) {
if (addr_is_in_MC_Chunk(mc, a)) {
@ -949,7 +1088,7 @@ static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
}
mc = mc->next;
}
/* Search for a currently malloc'd block which might bracket it. */
/* -- Search for a currently malloc'd block which might bracket it. -- */
VG_(HT_ResetIter)(MC_(malloc_list));
while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
if (addr_is_in_MC_Chunk(mc, a)) {
@ -962,27 +1101,41 @@ static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
return;
}
}
/* Perhaps the variable type/location data describes it? */
tl_assert(sizeof(ai->Addr.Variable.descr1)
== sizeof(ai->Addr.Variable.descr2));
VG_(memset)( &ai->Addr.Variable.descr1,
0, sizeof(ai->Addr.Variable.descr1));
VG_(memset)( &ai->Addr.Variable.descr2,
0, sizeof(ai->Addr.Variable.descr2));
if (VG_(get_data_description)(
&ai->Addr.Variable.descr1[0],
&ai->Addr.Variable.descr2[0],
sizeof(ai->Addr.Variable.descr1)-1,
a )) {
/* -- Perhaps the variable type/location data describes it? -- */
ai->Addr.Variable.descr1
= VG_(newXA)( VG_(malloc), "mc.da.descr1",
VG_(free), sizeof(HChar) );
ai->Addr.Variable.descr2
= VG_(newXA)( VG_(malloc), "mc.da.descr2",
VG_(free), sizeof(HChar) );
(void) VG_(get_data_description)( ai->Addr.Variable.descr1,
ai->Addr.Variable.descr2, a );
/* If there's nothing in descr1/2, free them. Why is it safe to to
VG_(indexXA) at zero here? Because VG_(get_data_description)
guarantees to zero terminate descr1/2 regardless of the outcome
of the call. So there's always at least one element in each XA
after the call.
*/
if (0 == VG_(strlen)( VG_(indexXA)( ai->Addr.Variable.descr1, 0 ))) {
VG_(deleteXA)( ai->Addr.Variable.descr1 );
ai->Addr.Variable.descr1 = NULL;
}
if (0 == VG_(strlen)( VG_(indexXA)( ai->Addr.Variable.descr2, 0 ))) {
VG_(deleteXA)( ai->Addr.Variable.descr2 );
ai->Addr.Variable.descr2 = NULL;
}
/* Assume (assert) that VG_(get_data_description) fills in descr1
before it fills in descr2 */
if (ai->Addr.Variable.descr1 == NULL)
tl_assert(ai->Addr.Variable.descr2 == NULL);
/* So did we get lucky? */
if (ai->Addr.Variable.descr1 != NULL) {
ai->tag = Addr_Variable;
tl_assert( ai->Addr.Variable.descr1
[ sizeof(ai->Addr.Variable.descr1)-1 ] == 0);
tl_assert( ai->Addr.Variable.descr2
[ sizeof(ai->Addr.Variable.descr2)-1 ] == 0);
return;
}
/* Have a look at the low level data symbols - perhaps it's in
there. */
/* -- Have a look at the low level data symbols - perhaps it's in
there. -- */
VG_(memset)( &ai->Addr.DataSym.name,
0, sizeof(ai->Addr.DataSym.name));
if (VG_(get_datasym_and_offset)(
@ -994,7 +1147,7 @@ static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
[ sizeof(ai->Addr.DataSym.name)-1 ] == 0);
return;
}
/* Perhaps it's on a thread's stack? */
/* -- Perhaps it's on a thread's stack? -- */
VG_(thread_stack_reset_iter)(&tid);
while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
if (stack_min - VG_STACK_REDZONE_SZB <= a && a <= stack_max) {
@ -1003,7 +1156,7 @@ static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
return;
}
}
/* last ditch attempt at classification */
/* -- last ditch attempt at classification -- */
tl_assert( sizeof(ai->Addr.SectKind.objname) > 4 );
VG_(memset)( &ai->Addr.SectKind.objname,
0, sizeof(ai->Addr.SectKind.objname));
@ -1017,7 +1170,7 @@ static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
[ sizeof(ai->Addr.SectKind.objname)-1 ] == 0);
return;
}
/* Clueless ... */
/* -- Clueless ... -- */
ai->tag = Addr_Unknown;
return;
}

View File

@ -305,9 +305,10 @@ extern Bool MC_(any_value_errors);
/* Standard functions for error and suppressions as required by the
core/tool iface */
Bool MC_(eq_Error) ( VgRes res, Error* e1, Error* e2 );
void MC_(pp_Error) ( Error* err );
UInt MC_(update_Error_extra)( Error* err );
Bool MC_(eq_Error) ( VgRes res, Error* e1, Error* e2 );
void MC_(before_pp_Error) ( Error* err );
void MC_(pp_Error) ( Error* err );
UInt MC_(update_Error_extra) ( Error* err );
Bool MC_(is_recognised_suppression) ( Char* name, Supp* su );

View File

@ -237,7 +237,6 @@
#define VG_DEBUG_LEAKCHECK 0
#define VG_DEBUG_CLIQUE 0
#define UMSG(args...) VG_(message)(Vg_UserMsg, ##args)
/*------------------------------------------------------------*/
/*--- Getting the initial chunks, and searching them. ---*/
@ -875,28 +874,31 @@ static void print_results(ThreadId tid, Bool is_full_check)
}
if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
UMSG("");
UMSG("LEAK SUMMARY:");
UMSG(" definitely lost: %'lu bytes in %'lu blocks.",
MC_(bytes_leaked), MC_(blocks_leaked) );
UMSG(" indirectly lost: %'lu bytes in %'lu blocks.",
MC_(bytes_indirect), MC_(blocks_indirect) );
UMSG(" possibly lost: %'lu bytes in %'lu blocks.",
MC_(bytes_dubious), MC_(blocks_dubious) );
UMSG(" still reachable: %'lu bytes in %'lu blocks.",
MC_(bytes_reachable), MC_(blocks_reachable) );
UMSG(" suppressed: %'lu bytes in %'lu blocks.",
MC_(bytes_suppressed), MC_(blocks_suppressed) );
VG_(umsg)("\n");
VG_(umsg)("LEAK SUMMARY:\n");
VG_(umsg)(" definitely lost: %'lu bytes in %'lu blocks.\n",
MC_(bytes_leaked), MC_(blocks_leaked) );
VG_(umsg)(" indirectly lost: %'lu bytes in %'lu blocks.\n",
MC_(bytes_indirect), MC_(blocks_indirect) );
VG_(umsg)(" possibly lost: %'lu bytes in %'lu blocks.\n",
MC_(bytes_dubious), MC_(blocks_dubious) );
VG_(umsg)(" still reachable: %'lu bytes in %'lu blocks.\n",
MC_(bytes_reachable), MC_(blocks_reachable) );
VG_(umsg)(" suppressed: %'lu bytes in %'lu blocks.\n",
MC_(bytes_suppressed), MC_(blocks_suppressed) );
if (!is_full_check &&
(MC_(blocks_leaked) + MC_(blocks_indirect) +
MC_(blocks_dubious) + MC_(blocks_reachable)) > 0) {
UMSG("Rerun with --leak-check=full to see details of leaked memory.");
VG_(umsg)("Rerun with --leak-check=full to see details "
"of leaked memory.\n");
}
if (is_full_check &&
MC_(blocks_reachable) > 0 && !MC_(clo_show_reachable))
{
UMSG("Reachable blocks (those to which a pointer was found) are not shown.");
UMSG("To see them, rerun with: --leak-check=full --show-reachable=yes");
VG_(umsg)("Reachable blocks (those to which a pointer "
"was found) are not shown.\n");
VG_(umsg)("To see them, rerun with: --leak-check=full "
"--show-reachable=yes\n");
}
}
}
@ -916,7 +918,7 @@ void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
if (lc_n_chunks == 0) {
tl_assert(lc_chunks == NULL);
if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
UMSG("All heap blocks were freed -- no leaks are possible.");
VG_(umsg)("All heap blocks were freed -- no leaks are possible.\n");
}
return;
}
@ -945,9 +947,9 @@ void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
(ch1->data == ch2->data && ch1->szB == ch2->szB)
);
if (nonsense_overlap) {
UMSG("Block [0x%lx, 0x%lx) overlaps with block [0x%lx, 0x%lx)",
ch1->data, (ch1->data + ch1->szB),
ch2->data, (ch2->data + ch2->szB));
VG_(umsg)("Block [0x%lx, 0x%lx) overlaps with block [0x%lx, 0x%lx)\n",
ch1->data, (ch1->data + ch1->szB),
ch2->data, (ch2->data + ch2->szB));
}
tl_assert (!nonsense_overlap);
}
@ -968,7 +970,8 @@ void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
// Verbosity.
if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
UMSG( "searching for pointers to %'d not-freed blocks.", lc_n_chunks );
VG_(umsg)( "searching for pointers to %'d not-freed blocks.\n",
lc_n_chunks );
// Scan the memory root-set, pushing onto the mark stack any blocks
// pointed to.
@ -1012,7 +1015,7 @@ void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
seg_size = seg->end - seg->start + 1;
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_DebugMsg,
" Scanning root segment: %#lx..%#lx (%lu)",
" Scanning root segment: %#lx..%#lx (%lu)\n",
seg->start, seg->end, seg_size);
}
lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True, -1);
@ -1027,7 +1030,7 @@ void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
lc_process_markstack(/*clique*/-1);
if (VG_(clo_verbosity) > 0 && !VG_(clo_xml))
UMSG("checked %'lu bytes.", lc_scanned_szB);
VG_(umsg)("checked %'lu bytes.\n", lc_scanned_szB);
// Trace all the leaked blocks to determine which are directly leaked and
// which are indirectly leaked. For each Unreached block, push it onto

View File

@ -947,7 +947,7 @@ static void gcSecVBitTable(void)
if (VG_(clo_verbosity) > 1) {
Char percbuf[6];
VG_(percentify)(n_survivors, n_nodes, 1, 6, percbuf);
VG_(message)(Vg_DebugMsg, "memcheck GC: %d nodes, %d survivors (%s)",
VG_(message)(Vg_DebugMsg, "memcheck GC: %d nodes, %d survivors (%s)\n",
n_nodes, n_survivors, percbuf);
}
@ -955,7 +955,7 @@ static void gcSecVBitTable(void)
if (n_survivors > (secVBitLimit * MAX_SURVIVOR_PROPORTION)) {
secVBitLimit *= TABLE_GROWTH_FACTOR;
if (VG_(clo_verbosity) > 1)
VG_(message)(Vg_DebugMsg, "memcheck GC: increase table size to %d",
VG_(message)(Vg_DebugMsg, "memcheck GC: increase table size to %d\n",
secVBitLimit);
}
}
@ -1359,7 +1359,7 @@ static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
if (vabits16 == VA_BITS16_UNDEFINED) s = "undefined";
if (vabits16 == VA_BITS16_DEFINED ) s = "defined";
VG_(message)(Vg_UserMsg, "Warning: set address range perms: "
"large range [0x%lx, 0x%lx) (%s)",
"large range [0x%lx, 0x%lx) (%s)\n",
a, a + lenT, s);
}
}
@ -4682,7 +4682,7 @@ static Bool mc_process_cmd_line_options(Char* arg)
*/
if (0 == VG_(strcmp)(arg, "--undef-value-errors=no")) {
if (MC_(clo_mc_level) == 3) {
VG_(message)(Vg_DebugMsg, "%s", bad_level_msg);
VG_(message)(Vg_DebugMsg, "%s\n", bad_level_msg);
return False;
} else {
MC_(clo_mc_level) = 1;
@ -4701,7 +4701,7 @@ static Bool mc_process_cmd_line_options(Char* arg)
}
if (0 == VG_(strcmp)(arg, "--track-origins=yes")) {
if (MC_(clo_mc_level) == 1) {
VG_(message)(Vg_DebugMsg, "%s", bad_level_msg);
VG_(message)(Vg_DebugMsg, "%s\n", bad_level_msg);
return False;
} else {
MC_(clo_mc_level) = 3;
@ -4746,16 +4746,16 @@ static Bool mc_process_cmd_line_options(Char* arg)
Addr limit = 0x4000000; /* 64M - entirely arbitrary limit */
if (e <= s) {
VG_(message)(Vg_DebugMsg,
"ERROR: --ignore-ranges: end <= start in range:");
"ERROR: --ignore-ranges: end <= start in range:\n");
VG_(message)(Vg_DebugMsg,
" 0x%lx-0x%lx", s, e);
" 0x%lx-0x%lx\n", s, e);
return False;
}
if (e - s > limit) {
VG_(message)(Vg_DebugMsg,
"ERROR: --ignore-ranges: suspiciously large range:");
"ERROR: --ignore-ranges: suspiciously large range:\n");
VG_(message)(Vg_DebugMsg,
" 0x%lx-0x%lx (size %ld)", s, e, (UWord)(e-s));
" 0x%lx-0x%lx (size %ld)\n", s, e, (UWord)(e-s));
return False;
}
}
@ -4876,7 +4876,7 @@ Int alloc_client_block ( void )
static void show_client_block_stats ( void )
{
VG_(message)(Vg_DebugMsg,
"general CBs: %llu allocs, %llu discards, %llu maxinuse, %llu search",
"general CBs: %llu allocs, %llu discards, %llu maxinuse, %llu search\n",
cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
);
}
@ -5114,9 +5114,11 @@ static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
default:
VG_(message)(Vg_UserMsg,
"Warning: unknown memcheck client request code %llx",
(ULong)arg[0]);
VG_(message)(
Vg_UserMsg,
"Warning: unknown memcheck client request code %llx\n",
(ULong)arg[0]
);
return False;
}
return True;
@ -5555,7 +5557,7 @@ static void mc_post_clo_init ( void )
static void print_SM_info(char* type, int n_SMs)
{
VG_(message)(Vg_DebugMsg,
" memcheck: SMs: %s = %d (%ldk, %ldM)",
" memcheck: SMs: %s = %d (%ldk, %ldM)\n",
type,
n_SMs,
n_SMs * sizeof(SecMap) / 1024UL,
@ -5569,10 +5571,10 @@ static void mc_fini ( Int exitcode )
if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
if (MC_(clo_leak_check) == LC_Off)
VG_(message)(Vg_UserMsg,
"For a detailed leak analysis, rerun with: --leak-check=yes");
"For a detailed leak analysis, rerun with: --leak-check=yes\n");
VG_(message)(Vg_UserMsg,
"For counts of detected errors, rerun with: -v");
"For counts of detected errors, rerun with: -v\n");
}
@ -5580,7 +5582,7 @@ static void mc_fini ( Int exitcode )
&& MC_(clo_mc_level) == 2) {
VG_(message)(Vg_UserMsg,
"Use --track-origins=yes to see where "
"uninitialised values come from");
"uninitialised values come from\n");
}
if (MC_(clo_leak_check) != LC_Off)
@ -5592,21 +5594,21 @@ static void mc_fini ( Int exitcode )
SizeT max_secVBit_szB, max_SMs_szB, max_shmem_szB;
VG_(message)(Vg_DebugMsg,
" memcheck: sanity checks: %d cheap, %d expensive",
" memcheck: sanity checks: %d cheap, %d expensive\n",
n_sanity_cheap, n_sanity_expensive );
VG_(message)(Vg_DebugMsg,
" memcheck: auxmaps: %lld auxmap entries (%lldk, %lldM) in use",
" memcheck: auxmaps: %lld auxmap entries (%lldk, %lldM) in use\n",
n_auxmap_L2_nodes,
n_auxmap_L2_nodes * 64,
n_auxmap_L2_nodes / 16 );
VG_(message)(Vg_DebugMsg,
" memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10",
" memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10\n",
n_auxmap_L1_searches, n_auxmap_L1_cmps,
(10ULL * n_auxmap_L1_cmps)
/ (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
);
VG_(message)(Vg_DebugMsg,
" memcheck: auxmaps_L2: %lld searches, %lld nodes",
" memcheck: auxmaps_L2: %lld searches, %lld nodes\n",
n_auxmap_L2_searches, n_auxmap_L2_nodes
);
@ -5627,47 +5629,47 @@ static void mc_fini ( Int exitcode )
max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
VG_(message)(Vg_DebugMsg,
" memcheck: max sec V bit nodes: %d (%ldk, %ldM)",
" memcheck: max sec V bit nodes: %d (%ldk, %ldM)\n",
max_secVBit_nodes, max_secVBit_szB / 1024,
max_secVBit_szB / (1024 * 1024));
VG_(message)(Vg_DebugMsg,
" memcheck: set_sec_vbits8 calls: %llu (new: %llu, updates: %llu)",
" memcheck: set_sec_vbits8 calls: %llu (new: %llu, updates: %llu)\n",
sec_vbits_new_nodes + sec_vbits_updates,
sec_vbits_new_nodes, sec_vbits_updates );
VG_(message)(Vg_DebugMsg,
" memcheck: max shadow mem size: %ldk, %ldM",
" memcheck: max shadow mem size: %ldk, %ldM\n",
max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
if (MC_(clo_mc_level) >= 3) {
VG_(message)(Vg_DebugMsg,
" ocacheL1: %'12lu refs %'12lu misses (%'lu lossage)",
" ocacheL1: %'12lu refs %'12lu misses (%'lu lossage)\n",
stats_ocacheL1_find,
stats_ocacheL1_misses,
stats_ocacheL1_lossage );
VG_(message)(Vg_DebugMsg,
" ocacheL1: %'12lu at 0 %'12lu at 1",
" ocacheL1: %'12lu at 0 %'12lu at 1\n",
stats_ocacheL1_find - stats_ocacheL1_misses
- stats_ocacheL1_found_at_1
- stats_ocacheL1_found_at_N,
stats_ocacheL1_found_at_1 );
VG_(message)(Vg_DebugMsg,
" ocacheL1: %'12lu at 2+ %'12lu move-fwds",
" ocacheL1: %'12lu at 2+ %'12lu move-fwds\n",
stats_ocacheL1_found_at_N,
stats_ocacheL1_movefwds );
VG_(message)(Vg_DebugMsg,
" ocacheL1: %'12lu sizeB %'12u useful",
" ocacheL1: %'12lu sizeB %'12u useful\n",
(UWord)sizeof(OCache),
4 * OC_W32S_PER_LINE * OC_LINES_PER_SET * OC_N_SETS );
VG_(message)(Vg_DebugMsg,
" ocacheL2: %'12lu refs %'12lu misses",
" ocacheL2: %'12lu refs %'12lu misses\n",
stats__ocacheL2_refs,
stats__ocacheL2_misses );
VG_(message)(Vg_DebugMsg,
" ocacheL2: %'9lu max nodes %'9lu curr nodes",
" ocacheL2: %'9lu max nodes %'9lu curr nodes\n",
stats__ocacheL2_n_nodes_max,
stats__ocacheL2_n_nodes );
VG_(message)(Vg_DebugMsg,
" niacache: %'12lu refs %'12lu misses",
" niacache: %'12lu refs %'12lu misses\n",
stats__nia_cache_queries, stats__nia_cache_misses);
} else {
tl_assert(ocacheL1 == NULL);
@ -5677,7 +5679,7 @@ static void mc_fini ( Int exitcode )
if (0) {
VG_(message)(Vg_DebugMsg,
"------ Valgrind's client block stats follow ---------------" );
"------ Valgrind's client block stats follow ---------------\n" );
show_client_block_stats();
}
}
@ -5701,6 +5703,7 @@ static void mc_pre_clo_init(void)
VG_(needs_core_errors) ();
VG_(needs_tool_errors) (MC_(eq_Error),
MC_(before_pp_Error),
MC_(pp_Error),
True,/*show TIDs for errors*/
MC_(update_Error_extra),
@ -5727,6 +5730,7 @@ static void mc_pre_clo_init(void)
MC_(realloc),
MC_(malloc_usable_size),
MC_MALLOC_REDZONE_SZB );
VG_(needs_xml_output) ();
VG_(track_new_mem_startup) ( mc_new_mem_startup );

View File

@ -163,7 +163,7 @@ static Bool complain_about_silly_args(SizeT sizeB, Char* fn)
// (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
if ((SSizeT)sizeB < 0) {
if (!VG_(clo_xml))
VG_(message)(Vg_UserMsg, "Warning: silly arg (%ld) to %s()",
VG_(message)(Vg_UserMsg, "Warning: silly arg (%ld) to %s()\n",
(SSizeT)sizeB, fn );
return True;
}
@ -175,7 +175,7 @@ static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
if (!VG_(clo_xml))
VG_(message)(Vg_UserMsg,
"Warning: silly args (%ld,%ld) to calloc()",
"Warning: silly args (%ld,%ld) to calloc()\n",
(SSizeT)n, (SSizeT)sizeB);
return True;
}
@ -496,7 +496,7 @@ void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
MC_Mempool* mp;
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)",
VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
pool, rzB, is_zeroed);
VG_(get_and_pp_StackTrace)
(VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
@ -531,7 +531,7 @@ void MC_(destroy_mempool)(Addr pool)
MC_Mempool* mp;
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)", pool);
VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
VG_(get_and_pp_StackTrace)
(VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@ -592,7 +592,7 @@ check_mempool_sane(MC_Mempool* mp)
}
}
VG_(message)(Vg_UserMsg,
VG_(message)(Vg_UserMsg,
"Total mempools active: %d pools, %d chunks\n",
total_pools, total_chunks);
tick = 0;
@ -607,7 +607,7 @@ check_mempool_sane(MC_Mempool* mp)
if (chunks[i]->data > chunks[i+1]->data) {
VG_(message)(Vg_UserMsg,
"Mempool chunk %d / %d is out of order "
"wrt. its successor",
"wrt. its successor\n",
i+1, n_chunks);
bad = 1;
}
@ -617,7 +617,7 @@ check_mempool_sane(MC_Mempool* mp)
for (i = 0; i < n_chunks-1; i++) {
if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
VG_(message)(Vg_UserMsg,
"Mempool chunk %d / %d overlaps with its successor",
"Mempool chunk %d / %d overlaps with its successor\n",
i+1, n_chunks);
bad = 1;
}
@ -625,11 +625,12 @@ check_mempool_sane(MC_Mempool* mp)
if (bad) {
VG_(message)(Vg_UserMsg,
"Bad mempool (%d chunks), dumping chunks for inspection:",
n_chunks);
"Bad mempool (%d chunks), dumping chunks for inspection:\n",
n_chunks);
for (i = 0; i < n_chunks; ++i) {
VG_(message)(Vg_UserMsg,
"Mempool chunk %d / %d: %ld bytes [%lx,%lx), allocated:",
"Mempool chunk %d / %d: %ld bytes "
"[%lx,%lx), allocated:\n",
i+1,
n_chunks,
chunks[i]->szB + 0UL,
@ -647,7 +648,8 @@ void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
MC_Mempool* mp;
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)", pool, addr, szB);
VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
pool, addr, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@ -675,7 +677,7 @@ void MC_(mempool_free)(Addr pool, Addr addr)
}
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)", pool, addr);
VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@ -688,7 +690,7 @@ void MC_(mempool_free)(Addr pool, Addr addr)
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg,
"mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes",
"mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
pool, addr, mc->szB + 0UL);
}
@ -706,7 +708,8 @@ void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
VgHashNode** chunks;
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)", pool, addr, szB);
VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
pool, addr, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@ -815,7 +818,7 @@ void MC_(move_mempool)(Addr poolA, Addr poolB)
MC_Mempool* mp;
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)", poolA, poolB);
VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
VG_(get_and_pp_StackTrace)
(VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@ -839,7 +842,7 @@ void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
ThreadId tid = VG_(get_running_tid)();
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)",
VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
pool, addrA, addrB, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
@ -900,14 +903,16 @@ void MC_(print_malloc_stats) ( void )
}
VG_(message)(Vg_UserMsg,
"malloc/free: in use at exit: %'llu bytes in %'lu blocks.",
nbytes, nblocks);
"malloc/free: in use at exit: %'llu bytes in %'lu blocks.\n",
nbytes, nblocks
);
VG_(message)(Vg_UserMsg,
"malloc/free: %'lu allocs, %'lu frees, %'llu bytes allocated.",
cmalloc_n_mallocs,
cmalloc_n_frees, cmalloc_bs_mallocd);
"malloc/free: %'lu allocs, %'lu frees, %'llu bytes allocated.\n",
cmalloc_n_mallocs,
cmalloc_n_frees, cmalloc_bs_mallocd
);
if (VG_(clo_verbosity) > 1)
VG_(message)(Vg_UserMsg, "");
VG_(message)(Vg_UserMsg, "\n");
}
/*--------------------------------------------------------------------*/

View File

@ -2,7 +2,8 @@
<valgrindoutput>
<protocolversion>3</protocolversion>
<protocolversion>4</protocolversion>
<protocoltool>memcheck</protocoltool>
<preamble>
<line>...</line>
@ -78,6 +79,11 @@
</stack>
</error>
<status>
<state>FINISHED</state>
<time>...</time>
</status>
<errorcounts>
<pair>
<count>...</count>
@ -85,11 +91,6 @@
</pair>
</errorcounts>
<status>
<state>FINISHED</state>
<time>...</time>
</status>
<suppcounts>...</suppcounts>
</valgrindoutput>

View File

@ -1,3 +1,3 @@
prog: long_namespace_xml
vgopts: --xml=yes
vgopts: --xml=yes --xml-fd=2 --log-file=/dev/null
stderr_filter: filter_xml

View File

@ -2,7 +2,8 @@
<valgrindoutput>
<protocolversion>3</protocolversion>
<protocolversion>4</protocolversion>
<protocoltool>memcheck</protocoltool>
<preamble>
<line>...</line>
@ -341,47 +342,20 @@
<what>Syscall param exit(status) contains uninitialised byte(s)</what>
</error>
<errorcounts>
<pair>
<count>...</count>
<unique>0x........</unique>
</pair>
<pair>
<count>...</count>
<unique>0x........</unique>
</pair>
<pair>
<count>...</count>
<unique>0x........</unique>
</pair>
<pair>
<count>...</count>
<unique>0x........</unique>
</pair>
<pair>
<count>...</count>
<unique>0x........</unique>
</pair>
<pair>
<count>...</count>
<unique>0x........</unique>
</pair>
</errorcounts>
<status>
<state>FINISHED</state>
<time>...</time>
</status>
<suppcounts>...</suppcounts>
<error>
<unique>0x........</unique>
<tid>...</tid>
<kind>Leak_DefinitelyLost</kind>
<what>396 bytes in 1 blocks are definitely lost in loss record ... of ...</what>
<leakedbytes>396</leakedbytes>
<leakedblocks>1</leakedblocks>
<xwhat>
<text>396 bytes in 1 blocks are definitely lost in loss record ... of ...</text>
<leakedbytes>396</leakedbytes>
<leakedblocks>1</leakedblocks>
</xwhat>
<stack>
<frame>
<ip>0x........</ip>
@ -426,5 +400,34 @@
</stack>
</error>
<errorcounts>
<pair>
<count>...</count>
<unique>0x........</unique>
</pair>
<pair>
<count>...</count>
<unique>0x........</unique>
</pair>
<pair>
<count>...</count>
<unique>0x........</unique>
</pair>
<pair>
<count>...</count>
<unique>0x........</unique>
</pair>
<pair>
<count>...</count>
<unique>0x........</unique>
</pair>
<pair>
<count>...</count>
<unique>0x........</unique>
</pair>
</errorcounts>
<suppcounts>...</suppcounts>
</valgrindoutput>

View File

@ -1,3 +1,3 @@
prog: xml1
vgopts: --xml=yes
vgopts: --xml=yes --xml-fd=2 --log-file=/dev/null
stderr_filter: filter_xml