mirror of
https://github.com/Zenithsiz/ftmemsim-valgrind.git
synced 2026-02-04 02:18:37 +00:00
neatens other things up. Also, it adds the --gen-suppressions option for automatically generating suppressions for each error. Note that it changes the core/skin interface: SK_(dup_extra_and_update)() is replaced by SK_(update_extra)(), and SK_(get_error_name)() and SK_(print_extra_suppression_info)() are added. ----------------------------------------------------------------------------- details ----------------------------------------------------------------------------- Removed ac_common.c -- it just #included another .c file; moved the #include into ac_main.c. Introduced "mac_" prefixes for files shared between Addrcheck and Memcheck, to make it clearer which code is shared. Also using a "MAC_" prefix for functions and variables and types that are shared. Addrcheck doesn't see the "MC_" prefix at all. Factored out almost-identical mc_describe_addr() and describe_addr() (AddrCheck's version) into MAC_(describe_addr)(). Got rid of the "pp_ExeContext" closure passed to SK_(pp_SkinError)(), it wasn't really necessary. Introduced MAC_(pp_shared_SkinError)() for the error printing code shared by Addrcheck and Memcheck. Fixed some bogus stuff in Addrcheck error messages about "uninitialised bytes" (there because of an imperfect conversion from Memcheck). Moved the leak checker out of core (vg_memory.c), into mac_leakcheck.c. - This meant the hacky way of recording Leak errors, which was different to normal errors, could be changed to something better: introduced a function VG_(unique_error)(), which unlike VG_(maybe_record_error)() just prints the error (unless suppressed) but doesn't record it. Used for leaks; a much better solution all round as it allowed me to remove a lot of almost-identical code from leak handling (is_suppressible_leak(), leaksupp_matches_callers()). - As part of this, changed the horrible SK_(dup_extra_and_update) into the slightly less horrible SK_(update_extra), which returns the size of the `extra' part for the core to duplicate. - Also renamed it from VG_(generic_detect_memory_leaks)() to MAC_(do_detect_memory_leaks). In making the code nicer w.r.t suppressions and error reporting, I tied it a bit more closely to Memcheck/Addrcheck, and got rid of some of the args. It's not really "generic" any more, but then it never really was. (This could be undone, but there doesn't seem to be much point.) STREQ and STREQN were #defined in several places, and in two different ways. Made global macros VG_STREQ, VG_CLO_STREQ and VG_CLO_STREQN in vg_skin.h. Added the --gen-suppressions code. This required adding the functions SK_(get_error_name)() and SK_(print_extra_suppression_info)() for skins that use the error handling need. Added documentation for --gen-suppressions, and fixed some other minor document problems. Various other minor related changes too. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@1517
464 lines
23 KiB
C
464 lines
23 KiB
C
|
|
/*--------------------------------------------------------------------*/
|
|
/*--- Declarations shared between MemCheck and AddrCheck. ---*/
|
|
/*--- mac_shared.h ---*/
|
|
/*--------------------------------------------------------------------*/
|
|
|
|
/*
|
|
This file is part of MemCheck, a heavyweight Valgrind skin for
|
|
detecting memory errors, and AddrCheck, a lightweight Valgrind skin
|
|
for detecting memory errors.
|
|
|
|
Copyright (C) 2000-2002 Julian Seward
|
|
jseward@acm.org
|
|
|
|
This program is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU General Public License as
|
|
published by the Free Software Foundation; either version 2 of the
|
|
License, or (at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful, but
|
|
WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program; if not, write to the Free Software
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
|
02111-1307, USA.
|
|
|
|
The GNU General Public License is contained in the file COPYING.
|
|
*/
|
|
|
|
/* Note: This header contains the declarations shared between
|
|
Addrcheck and Memcheck, and is #included by both. */
|
|
|
|
#ifndef __MAC_SHARED_H
|
|
#define __MAC_SHARED_H
|
|
|
|
#include "vg_skin.h"
|
|
|
|
#define MAC_(str) VGAPPEND(vgMAC_,str)
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Errors and suppressions ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
/* The classification of a faulting address. */
|
|
typedef
|
|
enum {
|
|
Undescribed, /* as-yet unclassified */
|
|
Stack,
|
|
Unknown, /* classification yielded nothing useful */
|
|
Freed, Mallocd,
|
|
UserG /* in a user-defined block; Addrcheck & Memcheck only */
|
|
}
|
|
AddrKind;
|
|
|
|
/* Records info about a faulting address. */
|
|
typedef
|
|
struct {
|
|
/* ALL */
|
|
AddrKind akind;
|
|
/* Freed, Mallocd */
|
|
Int blksize;
|
|
/* Freed, Mallocd */
|
|
Int rwoffset;
|
|
/* Freed, Mallocd */
|
|
ExeContext* lastchange;
|
|
/* Stack */
|
|
ThreadId stack_tid;
|
|
/* True if is just-below %esp -- could be a gcc bug. */
|
|
Bool maybe_gcc;
|
|
}
|
|
AddrInfo;
|
|
|
|
typedef
|
|
enum {
|
|
/* Bad syscall params */
|
|
ParamSupp,
|
|
/* Memory errors in core (pthread ops, signal handling) */
|
|
CoreMemSupp,
|
|
/* Use of invalid values of given size (MemCheck only) */
|
|
Value0Supp, Value1Supp, Value2Supp, Value4Supp, Value8Supp,
|
|
/* Invalid read/write attempt at given size */
|
|
Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp,
|
|
/* Invalid or mismatching free */
|
|
FreeSupp,
|
|
/* Something to be suppressed in a leak check. */
|
|
LeakSupp
|
|
}
|
|
MAC_SuppKind;
|
|
|
|
/* What kind of error it is. */
|
|
typedef
|
|
enum { ValueErr, /* Memcheck only */
|
|
CoreMemErr,
|
|
AddrErr,
|
|
ParamErr, UserErr, /* behaves like an anonymous ParamErr */
|
|
FreeErr, FreeMismatchErr,
|
|
LeakErr
|
|
}
|
|
MAC_ErrorKind;
|
|
|
|
/* What kind of memory access is involved in the error? */
|
|
typedef
|
|
enum { ReadAxs, WriteAxs, ExecAxs }
|
|
AxsKind;
|
|
|
|
/* Extra context for memory errors */
|
|
typedef
|
|
struct {
|
|
/* AddrErr */
|
|
AxsKind axskind;
|
|
/* AddrErr, ValueErr */
|
|
Int size;
|
|
/* AddrErr, FreeErr, FreeMismatchErr, ParamErr, UserErr */
|
|
AddrInfo addrinfo;
|
|
/* ParamErr, UserErr, CoreMemErr */
|
|
Bool isWrite;
|
|
}
|
|
MAC_Error;
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Profiling of skins and memory events ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
typedef
|
|
enum {
|
|
VgpCheckMem = VgpFini+1,
|
|
VgpSetMem,
|
|
VgpESPAdj
|
|
}
|
|
VgpSkinCC;
|
|
|
|
/* Define to collect detailed performance info. */
|
|
/* #define MAC_PROFILE_MEMORY */
|
|
|
|
#ifdef MAC_PROFILE_MEMORY
|
|
# define N_PROF_EVENTS 150
|
|
|
|
extern UInt MAC_(event_ctr)[N_PROF_EVENTS];
|
|
|
|
# define PROF_EVENT(ev) \
|
|
do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
|
|
MAC_(event_ctr)[ev]++; \
|
|
} while (False);
|
|
|
|
#else
|
|
|
|
# define PROF_EVENT(ev) /* */
|
|
|
|
#endif /* MAC_PROFILE_MEMORY */
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- V and A bits ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
#define IS_DISTINGUISHED_SM(smap) \
|
|
((smap) == &distinguished_secondary_map)
|
|
|
|
#define ENSURE_MAPPABLE(addr,caller) \
|
|
do { \
|
|
if (IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
|
|
primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
|
|
/* VG_(printf)("new 2map because of %p\n", addr); */ \
|
|
} \
|
|
} while(0)
|
|
|
|
#define BITARR_SET(aaa_p,iii_p) \
|
|
do { \
|
|
UInt iii = (UInt)iii_p; \
|
|
UChar* aaa = (UChar*)aaa_p; \
|
|
aaa[iii >> 3] |= (1 << (iii & 7)); \
|
|
} while (0)
|
|
|
|
#define BITARR_CLEAR(aaa_p,iii_p) \
|
|
do { \
|
|
UInt iii = (UInt)iii_p; \
|
|
UChar* aaa = (UChar*)aaa_p; \
|
|
aaa[iii >> 3] &= ~(1 << (iii & 7)); \
|
|
} while (0)
|
|
|
|
#define BITARR_TEST(aaa_p,iii_p) \
|
|
(0 != (((UChar*)aaa_p)[ ((UInt)iii_p) >> 3 ] \
|
|
& (1 << (((UInt)iii_p) & 7)))) \
|
|
|
|
|
|
#define VGM_BIT_VALID 0
|
|
#define VGM_BIT_INVALID 1
|
|
|
|
#define VGM_NIBBLE_VALID 0
|
|
#define VGM_NIBBLE_INVALID 0xF
|
|
|
|
#define VGM_BYTE_VALID 0
|
|
#define VGM_BYTE_INVALID 0xFF
|
|
|
|
#define VGM_WORD_VALID 0
|
|
#define VGM_WORD_INVALID 0xFFFFFFFF
|
|
|
|
#define VGM_EFLAGS_VALID 0xFFFFFFFE
|
|
#define VGM_EFLAGS_INVALID 0xFFFFFFFF /* not used */
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Command line options + defaults ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
/* Memcheck defines a couple more. */
|
|
|
|
/* Allow loads from partially-valid addresses? default: YES */
|
|
extern Bool MAC_(clo_partial_loads_ok);
|
|
|
|
/* Max volume of the freed blocks queue. */
|
|
extern Int MAC_(clo_freelist_vol);
|
|
|
|
/* Do leak check at exit? default: NO */
|
|
extern Bool MAC_(clo_leak_check);
|
|
|
|
/* How closely should we compare ExeContexts in leak records? default: 2 */
|
|
extern VgRes MAC_(clo_leak_resolution);
|
|
|
|
/* In leak check, show reachable-but-not-freed blocks? default: NO */
|
|
extern Bool MAC_(clo_show_reachable);
|
|
|
|
/* Assume accesses immediately below %esp are due to gcc-2.96 bugs.
|
|
* default: NO*/
|
|
extern Bool MAC_(clo_workaround_gcc296_bugs);
|
|
|
|
extern Bool MAC_(process_common_cmd_line_option)(Char* arg);
|
|
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Functions ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
extern void MAC_(set_where) ( ShadowChunk* sc, ExeContext* ec );
|
|
extern ExeContext *MAC_(get_where) ( ShadowChunk* sc );
|
|
|
|
extern void MAC_(pp_AddrInfo) ( Addr a, AddrInfo* ai );
|
|
|
|
extern void MAC_(clear_MAC_Error) ( MAC_Error* err_extra );
|
|
|
|
extern Bool (*MAC_(describe_addr_supp)) ( Addr a, AddrInfo* ai );
|
|
|
|
extern Bool MAC_(shared_recognised_suppression) ( Char* name, Supp* su );
|
|
|
|
extern void MAC_(record_address_error) ( Addr a, Int size, Bool isWrite );
|
|
extern void MAC_(record_core_mem_error) ( ThreadState* tst, Bool isWrite,
|
|
Char* s );
|
|
extern void MAC_(record_param_error) ( ThreadState* tst, Addr a,
|
|
Bool isWriteLack, Char* msg );
|
|
extern void MAC_(record_jump_error) ( ThreadState* tst, Addr a );
|
|
extern void MAC_(record_free_error) ( ThreadState* tst, Addr a );
|
|
extern void MAC_(record_freemismatch_error)( ThreadState* tst, Addr a );
|
|
|
|
extern void MAC_(pp_shared_SkinError) ( Error* err);
|
|
|
|
extern void MAC_(init_prof_mem) ( void );
|
|
extern void MAC_(done_prof_mem) ( void );
|
|
|
|
extern Int MAC_(count_freelist) ( void ) __attribute__ ((unused));
|
|
extern void MAC_(freelist_sanity) ( void ) __attribute__ ((unused));
|
|
extern ShadowChunk* MAC_(any_matching_freed_ShadowChunks)
|
|
( Bool (*p)(ShadowChunk*) );
|
|
|
|
/* For leak checking */
|
|
extern void MAC_(pp_LeakError)(void* vl, UInt n_this_record,
|
|
UInt n_total_records);
|
|
|
|
extern void MAC_(do_detect_memory_leaks) (
|
|
Bool is_valid_64k_chunk ( UInt ),
|
|
Bool is_valid_address ( Addr )
|
|
);
|
|
|
|
extern __attribute__((regparm(1))) void MAC_(new_mem_stack_4) ( Addr old_ESP );
|
|
extern __attribute__((regparm(1))) void MAC_(die_mem_stack_4) ( Addr old_ESP );
|
|
extern __attribute__((regparm(1))) void MAC_(new_mem_stack_8) ( Addr old_ESP );
|
|
extern __attribute__((regparm(1))) void MAC_(die_mem_stack_8) ( Addr old_ESP );
|
|
extern __attribute__((regparm(1))) void MAC_(new_mem_stack_12) ( Addr old_ESP );
|
|
extern __attribute__((regparm(1))) void MAC_(die_mem_stack_12) ( Addr old_ESP );
|
|
extern __attribute__((regparm(1))) void MAC_(new_mem_stack_16) ( Addr old_ESP );
|
|
extern __attribute__((regparm(1))) void MAC_(die_mem_stack_16) ( Addr old_ESP );
|
|
extern __attribute__((regparm(1))) void MAC_(new_mem_stack_32) ( Addr old_ESP );
|
|
extern __attribute__((regparm(1))) void MAC_(die_mem_stack_32) ( Addr old_ESP );
|
|
extern void MAC_(die_mem_stack) ( Addr a, UInt len );
|
|
extern void MAC_(new_mem_stack) ( Addr a, UInt len );
|
|
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Stack pointer adjustment ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
/* Some noble preprocessor abuse, to enable Memcheck and Addrcheck to
|
|
share this code, but not call the same functions.
|
|
|
|
Note that this code is executed very frequently and must be highly
|
|
optimised, which is why I resort to the preprocessor to achieve the
|
|
factoring, rather than eg. using function pointers.
|
|
*/
|
|
|
|
#define ESP_UPDATE_HANDLERS(ALIGNED4_NEW, ALIGNED4_DIE, \
|
|
ALIGNED8_NEW, ALIGNED8_DIE, \
|
|
UNALIGNED_NEW, UNALIGNED_DIE) \
|
|
\
|
|
void __attribute__((regparm(1))) MAC_(new_mem_stack_4)(Addr new_ESP) \
|
|
{ \
|
|
PROF_EVENT(110); \
|
|
if (IS_ALIGNED4_ADDR(new_ESP)) { \
|
|
ALIGNED4_NEW ( new_ESP ); \
|
|
} else { \
|
|
UNALIGNED_NEW ( new_ESP, 4 ); \
|
|
} \
|
|
} \
|
|
\
|
|
void __attribute__((regparm(1))) MAC_(die_mem_stack_4)(Addr new_ESP) \
|
|
{ \
|
|
PROF_EVENT(120); \
|
|
if (IS_ALIGNED4_ADDR(new_ESP)) { \
|
|
ALIGNED4_DIE ( new_ESP-4 ); \
|
|
} else { \
|
|
UNALIGNED_DIE ( new_ESP-4, 4 ); \
|
|
} \
|
|
} \
|
|
\
|
|
void __attribute__((regparm(1))) MAC_(new_mem_stack_8)(Addr new_ESP) \
|
|
{ \
|
|
PROF_EVENT(111); \
|
|
if (IS_ALIGNED8_ADDR(new_ESP)) { \
|
|
ALIGNED8_NEW ( new_ESP ); \
|
|
} else if (IS_ALIGNED4_ADDR(new_ESP)) { \
|
|
ALIGNED4_NEW ( new_ESP ); \
|
|
ALIGNED4_NEW ( new_ESP+4 ); \
|
|
} else { \
|
|
UNALIGNED_NEW ( new_ESP, 8 ); \
|
|
} \
|
|
} \
|
|
\
|
|
void __attribute__((regparm(1))) MAC_(die_mem_stack_8)(Addr new_ESP) \
|
|
{ \
|
|
PROF_EVENT(121); \
|
|
if (IS_ALIGNED8_ADDR(new_ESP)) { \
|
|
ALIGNED8_DIE ( new_ESP-8 ); \
|
|
} else if (IS_ALIGNED4_ADDR(new_ESP)) { \
|
|
ALIGNED4_DIE ( new_ESP-8 ); \
|
|
ALIGNED4_DIE ( new_ESP-4 ); \
|
|
} else { \
|
|
UNALIGNED_DIE ( new_ESP-8, 8 ); \
|
|
} \
|
|
} \
|
|
\
|
|
void __attribute__((regparm(1))) MAC_(new_mem_stack_12)(Addr new_ESP) \
|
|
{ \
|
|
PROF_EVENT(112); \
|
|
if (IS_ALIGNED8_ADDR(new_ESP)) { \
|
|
ALIGNED8_NEW ( new_ESP ); \
|
|
ALIGNED4_NEW ( new_ESP+8 ); \
|
|
} else if (IS_ALIGNED4_ADDR(new_ESP)) { \
|
|
ALIGNED4_NEW ( new_ESP ); \
|
|
ALIGNED8_NEW ( new_ESP+4 ); \
|
|
} else { \
|
|
UNALIGNED_NEW ( new_ESP, 12 ); \
|
|
} \
|
|
} \
|
|
\
|
|
void __attribute__((regparm(1))) MAC_(die_mem_stack_12)(Addr new_ESP) \
|
|
{ \
|
|
PROF_EVENT(122); \
|
|
/* Note the -12 in the test */ \
|
|
if (IS_ALIGNED8_ADDR(new_ESP-12)) { \
|
|
ALIGNED8_DIE ( new_ESP-12 ); \
|
|
ALIGNED4_DIE ( new_ESP-4 ); \
|
|
} else if (IS_ALIGNED4_ADDR(new_ESP)) { \
|
|
ALIGNED4_DIE ( new_ESP-12 ); \
|
|
ALIGNED8_DIE ( new_ESP-8 ); \
|
|
} else { \
|
|
UNALIGNED_DIE ( new_ESP-12, 12 ); \
|
|
} \
|
|
} \
|
|
\
|
|
void __attribute__((regparm(1))) MAC_(new_mem_stack_16)(Addr new_ESP) \
|
|
{ \
|
|
PROF_EVENT(113); \
|
|
if (IS_ALIGNED8_ADDR(new_ESP)) { \
|
|
ALIGNED8_NEW ( new_ESP ); \
|
|
ALIGNED8_NEW ( new_ESP+8 ); \
|
|
} else if (IS_ALIGNED4_ADDR(new_ESP)) { \
|
|
ALIGNED4_NEW ( new_ESP ); \
|
|
ALIGNED8_NEW ( new_ESP+4 ); \
|
|
ALIGNED4_NEW ( new_ESP+12 ); \
|
|
} else { \
|
|
UNALIGNED_NEW ( new_ESP, 16 ); \
|
|
} \
|
|
} \
|
|
\
|
|
void __attribute__((regparm(1))) MAC_(die_mem_stack_16)(Addr new_ESP) \
|
|
{ \
|
|
PROF_EVENT(123); \
|
|
if (IS_ALIGNED8_ADDR(new_ESP)) { \
|
|
ALIGNED8_DIE ( new_ESP-16 ); \
|
|
ALIGNED8_DIE ( new_ESP-8 ); \
|
|
} else if (IS_ALIGNED4_ADDR(new_ESP)) { \
|
|
ALIGNED4_DIE ( new_ESP-16 ); \
|
|
ALIGNED8_DIE ( new_ESP-12 ); \
|
|
ALIGNED4_DIE ( new_ESP-4 ); \
|
|
} else { \
|
|
UNALIGNED_DIE ( new_ESP-16, 16 ); \
|
|
} \
|
|
} \
|
|
\
|
|
void __attribute__((regparm(1))) MAC_(new_mem_stack_32)(Addr new_ESP) \
|
|
{ \
|
|
PROF_EVENT(114); \
|
|
if (IS_ALIGNED8_ADDR(new_ESP)) { \
|
|
ALIGNED8_NEW ( new_ESP ); \
|
|
ALIGNED8_NEW ( new_ESP+8 ); \
|
|
ALIGNED8_NEW ( new_ESP+16 ); \
|
|
ALIGNED8_NEW ( new_ESP+24 ); \
|
|
} else if (IS_ALIGNED4_ADDR(new_ESP)) { \
|
|
ALIGNED4_NEW ( new_ESP ); \
|
|
ALIGNED8_NEW ( new_ESP+4 ); \
|
|
ALIGNED8_NEW ( new_ESP+12 ); \
|
|
ALIGNED8_NEW ( new_ESP+20 ); \
|
|
ALIGNED4_NEW ( new_ESP+28 ); \
|
|
} else { \
|
|
UNALIGNED_NEW ( new_ESP, 32 ); \
|
|
} \
|
|
} \
|
|
\
|
|
void __attribute__((regparm(1))) MAC_(die_mem_stack_32)(Addr new_ESP) \
|
|
{ \
|
|
PROF_EVENT(124); \
|
|
if (IS_ALIGNED8_ADDR(new_ESP)) { \
|
|
ALIGNED8_DIE ( new_ESP-32 ); \
|
|
ALIGNED8_DIE ( new_ESP-24 ); \
|
|
ALIGNED8_DIE ( new_ESP-16 ); \
|
|
ALIGNED8_DIE ( new_ESP- 8 ); \
|
|
} else if (IS_ALIGNED4_ADDR(new_ESP)) { \
|
|
ALIGNED4_DIE ( new_ESP-32 ); \
|
|
ALIGNED8_DIE ( new_ESP-28 ); \
|
|
ALIGNED8_DIE ( new_ESP-20 ); \
|
|
ALIGNED8_DIE ( new_ESP-12 ); \
|
|
ALIGNED4_DIE ( new_ESP-4 ); \
|
|
} else { \
|
|
UNALIGNED_DIE ( new_ESP-32, 32 ); \
|
|
} \
|
|
} \
|
|
\
|
|
void MAC_(new_mem_stack) ( Addr a, UInt len ) \
|
|
{ \
|
|
PROF_EVENT(115); \
|
|
UNALIGNED_NEW ( a, len ); \
|
|
} \
|
|
\
|
|
void MAC_(die_mem_stack) ( Addr a, UInt len ) \
|
|
{ \
|
|
PROF_EVENT(125); \
|
|
UNALIGNED_DIE ( a, len ); \
|
|
}
|
|
|
|
#endif /* __MAC_SHARED_H */
|
|
|
|
/*--------------------------------------------------------------------*/
|
|
/*--- end mac_shared.h ---*/
|
|
/*--------------------------------------------------------------------*/
|