mirror of
https://github.com/Zenithsiz/ftmemsim-valgrind.git
synced 2026-02-07 12:44:45 +00:00
32- and 64-bit targets, little- and big-endian. It does more or less work on x86 as-is, although is unusably slow since I have knocked out all the fast-path cases and am concentrating on getting the baseline functionality correct. The fast cases will go back in in due course. The fundamental idea is to retain the old 2-level indexing for speed, even on a 64-bit target. Since that's clearly unviable on a 64-bit target, the primary map handles only first N gigabytes of address space (probably to be set to 16, 32 or 64G). Addresses above that are handled slowly using an auxiliary primary map which explicitly lists (base, &-of-secondary-map) pairs. The goal is to have the address-space-manager try and put everything below the 16/32/64G boundary, so we hit the fast cases almost all the time. Performance of the 32-bit case should be unaffected since the fast map will always cover at least the lowest 4G of address space. There are many word-size and endianness cleanups. Jeremy's distinguished-map space-compression scheme is retained, in modified form, as it is simple and seems effective at reducing Memcheck's space use. Note this is all subject to rapid change. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@3535
2419 lines
78 KiB
C
2419 lines
78 KiB
C
|
|
/*--------------------------------------------------------------------*/
|
|
/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
|
|
/*--- accessibility (A) and validity (V) status of each byte. ---*/
|
|
/*--- mc_main.c ---*/
|
|
/*--------------------------------------------------------------------*/
|
|
|
|
/*
|
|
This file is part of MemCheck, a heavyweight Valgrind tool for
|
|
detecting memory errors.
|
|
|
|
Copyright (C) 2000-2005 Julian Seward
|
|
jseward@acm.org
|
|
|
|
This program is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU General Public License as
|
|
published by the Free Software Foundation; either version 2 of the
|
|
License, or (at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful, but
|
|
WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program; if not, write to the Free Software
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
|
02111-1307, USA.
|
|
|
|
The GNU General Public License is contained in the file COPYING.
|
|
*/
|
|
|
|
/* TODO urgently
|
|
|
|
sanity check:
|
|
auxmap only covers address space that the primary doesn't
|
|
auxmap entries non-duplicated (expensive)
|
|
|
|
types of helper functions
|
|
|
|
set_address_range_perms to notice when a distinguished secondary
|
|
will work, and use that (viz, re-implement compression scheme)
|
|
|
|
profile
|
|
|
|
reinstate fast-path cases
|
|
*/
|
|
|
|
|
|
#include "mc_include.h"
|
|
#include "memcheck.h" /* for client requests */
|
|
//#include "vg_profile.c"
|
|
|
|
|
|
typedef enum {
|
|
MC_Ok = 5, MC_AddrErr = 6, MC_ValueErr = 7
|
|
} MC_ReadResult;
|
|
|
|
#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
|
|
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Basic A/V bitmap representation. ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
/* --------------- Basic configuration --------------- */
|
|
|
|
/* The number of entries in the primary map can be altered. However
|
|
we hardwire the assumption that each secondary map covers precisely
|
|
64k of address space. */
|
|
|
|
#define N_PRIMARY_BITS 16
|
|
#define N_PRIMARY_MAPS ((1 << N_PRIMARY_BITS)-1)
|
|
|
|
#define MAX_PRIMARY_ADDRESS (Addr)(((Addr)65536) * N_PRIMARY_MAPS)
|
|
|
|
|
|
/* --------------- Secondary maps --------------- */
|
|
|
|
typedef
|
|
struct {
|
|
UChar abits[8192];
|
|
UChar vbyte[65536];
|
|
}
|
|
SecMap;
|
|
|
|
/* 3 distinguished secondary maps, one for no-access, one for
|
|
accessible but undefined, and one for accessible and defined.
|
|
Distinguished secondaries may never be modified.
|
|
*/
|
|
#define SM_DIST_NOACCESS 0
|
|
#define SM_DIST_ACCESS_UNDEFINED 1
|
|
#define SM_DIST_ACCESS_DEFINED 2
|
|
|
|
static SecMap sm_distinguished[3];
|
|
|
|
static inline Bool is_distinguished_sm ( SecMap* sm ) {
|
|
return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
|
|
}
|
|
|
|
/* dist_sm points to one of our three distinguished secondaries. Make
|
|
a copy of it so that we can write to it.
|
|
*/
|
|
static SecMap* copy_for_writing ( SecMap* dist_sm )
|
|
{
|
|
SecMap* new_sm;
|
|
tl_assert(dist_sm == &sm_distinguished[0]
|
|
|| dist_sm == &sm_distinguished[1]
|
|
|| dist_sm == &sm_distinguished[2]);
|
|
|
|
new_sm = VG_(shadow_alloc)(sizeof(SecMap));
|
|
VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
|
|
return new_sm;
|
|
}
|
|
|
|
|
|
/* --------------- Primary maps --------------- */
|
|
|
|
/* The main primary map. This covers some initial part of the address
|
|
space, addresses 0 .. (N_PRIMARY_MAPS << 16)-1. The rest of it is
|
|
handled using the auxiliary primary map.
|
|
*/
|
|
static SecMap* primary_map[N_PRIMARY_MAPS];
|
|
|
|
|
|
/* An entry in the auxiliary primary map. base must be a 64k-aligned
|
|
value, and sm points at the relevant secondary map. As with the
|
|
main primary map, the secondary may be either a real secondary, or
|
|
one of the three distinguished secondaries.
|
|
*/
|
|
typedef
|
|
struct {
|
|
Addr base;
|
|
SecMap* sm;
|
|
}
|
|
AuxMapEnt;
|
|
|
|
/* An expanding array of AuxMapEnts. */
|
|
#define N_AUXMAPS 500 /* HACK */
|
|
static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
|
|
static Int auxmap_size = N_AUXMAPS;
|
|
static Int auxmap_used = 0;
|
|
static AuxMapEnt* auxmap = &hacky_auxmaps[0];
|
|
|
|
/* Auxmap statistics */
|
|
static ULong n_auxmap_searches = 0;
|
|
static ULong n_auxmap_cmps = 0;
|
|
|
|
|
|
/* Find an entry in the auxiliary map. If an entry is found, move it
|
|
one step closer to the front of the array, then return its address.
|
|
If an entry is not found, allocate one. Note carefully that
|
|
because a each call potentially rearranges the entries, each call
|
|
to this function invalidates ALL AuxMapEnt*s previously obtained by
|
|
calling this fn.
|
|
*/
|
|
static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
|
|
{
|
|
UWord i;
|
|
tl_assert(a > MAX_PRIMARY_ADDRESS);
|
|
|
|
a &= ~(Addr)0xFFFF;
|
|
|
|
/* Search .. */
|
|
n_auxmap_searches++;
|
|
for (i = 0; i < auxmap_used; i++) {
|
|
if (auxmap[i].base == a)
|
|
break;
|
|
}
|
|
n_auxmap_cmps += (ULong)(i+1);
|
|
|
|
if (i < auxmap_used) {
|
|
/* Found it. Nudge it a bit closer to the front. */
|
|
if (i > 0) {
|
|
AuxMapEnt tmp = auxmap[i-1];
|
|
auxmap[i-1] = auxmap[i];
|
|
auxmap[i] = tmp;
|
|
i--;
|
|
}
|
|
return &auxmap[i];
|
|
}
|
|
|
|
/* We didn't find it. Hmm. This is a new piece of address space.
|
|
We'll need to allocate a new AuxMap entry for it. */
|
|
if (auxmap_used >= auxmap_size) {
|
|
tl_assert(auxmap_used == auxmap_size);
|
|
/* Out of auxmap entries. */
|
|
tl_assert2(0, "failed to expand the auxmap table");
|
|
}
|
|
|
|
tl_assert(auxmap_used < auxmap_size);
|
|
|
|
auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
|
|
auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
|
|
|
|
if (0)
|
|
VG_(printf)("new auxmap, base = 0x%llx\n",
|
|
(ULong)auxmap[auxmap_used].base );
|
|
|
|
auxmap_used++;
|
|
return &auxmap[auxmap_used-1];
|
|
}
|
|
|
|
|
|
/* --------------- SecMap fundamentals --------------- */
|
|
|
|
/* Produce the secmap for 'a', either from the primary map or by
|
|
ensuring there is an entry for it in the aux primary map. The
|
|
secmap may be a distinguished one as the caller will only want to
|
|
be able to read it.
|
|
*/
|
|
static SecMap* get_secmap_readable ( Addr a )
|
|
{
|
|
if (a <= MAX_PRIMARY_ADDRESS) {
|
|
UWord pm_off = a >> 16;
|
|
return primary_map[ pm_off ];
|
|
} else {
|
|
AuxMapEnt* am = find_or_alloc_in_auxmap(a);
|
|
return am->sm;
|
|
}
|
|
}
|
|
|
|
/* Produce the secmap for 'a', either from the primary map or by
|
|
ensuring there is an entry for it in the aux primary map. The
|
|
secmap may not be a distinguished one, since the caller will want
|
|
to be able to write it. If it is a distinguished secondary, make a
|
|
writable copy of it, install it, and return the copy instead. (COW
|
|
semantics).
|
|
*/
|
|
static SecMap* get_secmap_writable ( Addr a )
|
|
{
|
|
if (a <= MAX_PRIMARY_ADDRESS) {
|
|
UWord pm_off = a >> 16;
|
|
if (is_distinguished_sm(primary_map[ pm_off ]))
|
|
primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
|
|
return primary_map[pm_off];
|
|
} else {
|
|
AuxMapEnt* am = find_or_alloc_in_auxmap(a);
|
|
if (is_distinguished_sm(am->sm))
|
|
am->sm = copy_for_writing(am->sm);
|
|
return am->sm;
|
|
}
|
|
}
|
|
|
|
|
|
/* --------------- Endianness helpers --------------- */
|
|
|
|
/* Returns the offset in memory of the byteno-th most significant byte
|
|
in a wordszB-sized word, given the specified endianness. */
|
|
static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
|
|
UWord byteno ) {
|
|
return bigendian ? (wordszB-1-byteno) : byteno;
|
|
}
|
|
|
|
|
|
/* --------------- Fundamental functions --------------- */
|
|
|
|
static
|
|
void get_abit_and_vbyte ( /*OUT*/UWord* abit,
|
|
/*OUT*/UWord* vbyte,
|
|
Addr a )
|
|
{
|
|
SecMap* sm = get_secmap_readable(a);
|
|
*vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
|
|
*abit = read_bit_array(sm->abits, a & 0xFFFF);
|
|
}
|
|
|
|
static
|
|
UWord get_abit ( Addr a )
|
|
{
|
|
SecMap* sm = get_secmap_readable(a);
|
|
return read_bit_array(sm->abits, a & 0xFFFF);
|
|
}
|
|
|
|
static
|
|
void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
|
|
{
|
|
SecMap* sm = get_secmap_writable(a);
|
|
sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
|
|
write_bit_array(sm->abits, a & 0xFFFF, abit);
|
|
}
|
|
|
|
static
|
|
void set_vbyte ( Addr a, UWord vbyte )
|
|
{
|
|
SecMap* sm = get_secmap_writable(a);
|
|
sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
|
|
}
|
|
|
|
|
|
/* --------------- Load/store slow cases. --------------- */
|
|
|
|
static
|
|
ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
|
|
{
|
|
/* Make up a result V word, which contains the loaded data for
|
|
valid addresses and Undefined for invalid addresses. Iterate
|
|
over the bytes in the word, from the most significant down to
|
|
the least. */
|
|
ULong vw = VGM_WORD64_INVALID;
|
|
SizeT i = szB-1;
|
|
SizeT n_addrs_bad = 0;
|
|
Addr ai;
|
|
Bool aok;
|
|
UWord abit, vbyte;
|
|
|
|
PROF_EVENT(70);
|
|
tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
|
|
|
|
while (True) {
|
|
ai = a+byte_offset_w(szB,bigendian,i);
|
|
get_abit_and_vbyte(&abit, &vbyte, ai);
|
|
aok = abit == VGM_BIT_VALID;
|
|
if (!aok)
|
|
n_addrs_bad++;
|
|
vw <<= 8;
|
|
vw |= 0xFF & (aok ? vbyte : VGM_BYTE_INVALID);
|
|
if (i == 0) break;
|
|
i--;
|
|
}
|
|
|
|
if (n_addrs_bad > 0)
|
|
MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
|
|
|
|
//if (n_addrs_bad == n)
|
|
// vw = VGM_WORD64_VALID;
|
|
return vw;
|
|
}
|
|
|
|
|
|
static
|
|
void mc_STOREVn_slow ( Addr a, SizeT szB, UWord vbytes, Bool bigendian )
|
|
{
|
|
SizeT i;
|
|
SizeT n_addrs_bad = 0;
|
|
UWord abit;
|
|
Bool aok;
|
|
Addr ai;
|
|
|
|
PROF_EVENT(71);
|
|
tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
|
|
|
|
/* Dump vbytes in memory, iterating from least to most significant
|
|
byte. At the same time establish addressibility of the
|
|
location. */
|
|
for (i = 0; i < szB; i++) {
|
|
ai = a+byte_offset_w(szB,bigendian,i);
|
|
abit = get_abit(ai);
|
|
aok = abit == VGM_BIT_VALID;
|
|
if (!aok)
|
|
n_addrs_bad++;
|
|
set_vbyte(ai, vbytes & 0xFF );
|
|
vbytes >>= 8;
|
|
}
|
|
|
|
/* If an address error has happened, report it. */
|
|
if (n_addrs_bad > 0)
|
|
MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
|
|
}
|
|
|
|
|
|
///////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
|
|
//zz #if 0 /* this is the old implementation */
|
|
//zz
|
|
//zz /* Define to debug the mem audit system. */
|
|
//zz /* #define VG_DEBUG_MEMORY */
|
|
//zz
|
|
//zz
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz /*--- Low-level support for memory checking. ---*/
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz
|
|
//zz /* All reads and writes are checked against a memory map, which
|
|
//zz records the state of all memory in the process. The memory map is
|
|
//zz organised like this:
|
|
//zz
|
|
//zz The top 16 bits of an address are used to index into a top-level
|
|
//zz map table, containing 65536 entries. Each entry is a pointer to a
|
|
//zz second-level map, which records the accesibililty and validity
|
|
//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
|
|
//zz address. Each byte is represented by nine bits, one indicating
|
|
//zz accessibility, the other eight validity. So each second-level map
|
|
//zz contains 73728 bytes. This two-level arrangement conveniently
|
|
//zz divides the 4G address space into 64k lumps, each size 64k bytes.
|
|
//zz
|
|
//zz All entries in the primary (top-level) map must point to a valid
|
|
//zz secondary (second-level) map. Since most of the 4G of address
|
|
//zz space will not be in use -- ie, not mapped at all -- there is a
|
|
//zz distinguished secondary map, which indicates `not addressible and
|
|
//zz not valid' writeable for all bytes. Entries in the primary map for
|
|
//zz which the entire 64k is not in use at all point at this
|
|
//zz distinguished map.
|
|
//zz
|
|
//zz There are actually 4 distinguished secondaries. These are used to
|
|
//zz represent a memory range which is either not addressable (validity
|
|
//zz doesn't matter), addressable+not valid, addressable+valid.
|
|
//zz
|
|
//zz [...] lots of stuff deleted due to out of date-ness
|
|
//zz
|
|
//zz As a final optimisation, the alignment and address checks for
|
|
//zz 4-byte loads and stores are combined in a neat way. The primary
|
|
//zz map is extended to have 262144 entries (2^18), rather than 2^16.
|
|
//zz The top 3/4 of these entries are permanently set to the
|
|
//zz distinguished secondary map. For a 4-byte load/store, the
|
|
//zz top-level map is indexed not with (addr >> 16) but instead f(addr),
|
|
//zz where
|
|
//zz
|
|
//zz f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
|
|
//zz = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
|
|
//zz = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
|
|
//zz
|
|
//zz ie the lowest two bits are placed above the 16 high address bits.
|
|
//zz If either of these two bits are nonzero, the address is misaligned;
|
|
//zz this will select a secondary map from the upper 3/4 of the primary
|
|
//zz map. Because this is always the distinguished secondary map, a
|
|
//zz (bogus) address check failure will result. The failure handling
|
|
//zz code can then figure out whether this is a genuine addr check
|
|
//zz failure or whether it is a possibly-legitimate access at a
|
|
//zz misaligned address.
|
|
//zz */
|
|
//zz
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz /*--- Function declarations. ---*/
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz
|
|
//zz static ULong mc_rd_V8_SLOWLY ( Addr a );
|
|
//zz static UInt mc_rd_V4_SLOWLY ( Addr a );
|
|
//zz static UInt mc_rd_V2_SLOWLY ( Addr a );
|
|
//zz static UInt mc_rd_V1_SLOWLY ( Addr a );
|
|
//zz
|
|
//zz static void mc_wr_V8_SLOWLY ( Addr a, ULong vbytes );
|
|
//zz static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
|
|
//zz static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
|
|
//zz static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
|
|
//zz
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz /*--- Data defns. ---*/
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz
|
|
//zz typedef
|
|
//zz struct {
|
|
//zz UChar abits[SECONDARY_SIZE/8];
|
|
//zz UChar vbyte[SECONDARY_SIZE];
|
|
//zz }
|
|
//zz SecMap;
|
|
//zz
|
|
//zz
|
|
//zz static SecMap* primary_map[ /*PRIMARY_SIZE*/ PRIMARY_SIZE*4 ];
|
|
//zz
|
|
//zz #define DSM_IDX(a, v) ((((a)&1) << 1) + ((v)&1))
|
|
//zz
|
|
//zz /* 4 secondary maps, but one is redundant (because the !addressable &&
|
|
//zz valid state is meaningless) */
|
|
//zz static const SecMap distinguished_secondary_maps[4] = {
|
|
//zz #define INIT(a, v) \
|
|
//zz [ DSM_IDX(a, v) ] = { { [0 ... (SECONDARY_SIZE/8)-1] = BIT_EXPAND(a) }, \
|
|
//zz { [0 ... SECONDARY_SIZE-1] = BIT_EXPAND(a|v) } }
|
|
//zz INIT(VGM_BIT_VALID, VGM_BIT_VALID),
|
|
//zz INIT(VGM_BIT_VALID, VGM_BIT_INVALID),
|
|
//zz INIT(VGM_BIT_INVALID, VGM_BIT_VALID),
|
|
//zz INIT(VGM_BIT_INVALID, VGM_BIT_INVALID),
|
|
//zz #undef INIT
|
|
//zz };
|
|
//zz #define N_SECONDARY_MAPS (sizeof(distinguished_secondary_maps)/sizeof(*distinguished_secondary_maps))
|
|
//zz
|
|
//zz #define DSM(a,v) ((SecMap *)&distinguished_secondary_maps[DSM_IDX(a, v)])
|
|
//zz
|
|
//zz #define DSM_NOTADDR DSM(VGM_BIT_INVALID, VGM_BIT_INVALID)
|
|
//zz #define DSM_ADDR_NOTVALID DSM(VGM_BIT_VALID, VGM_BIT_INVALID)
|
|
//zz #define DSM_ADDR_VALID DSM(VGM_BIT_VALID, VGM_BIT_VALID)
|
|
|
|
static void init_shadow_memory ( void )
|
|
{
|
|
Int i;
|
|
SecMap* sm;
|
|
|
|
/* Build the 3 distinguished secondaries */
|
|
tl_assert(VGM_BIT_INVALID == 1);
|
|
tl_assert(VGM_BIT_VALID == 0);
|
|
tl_assert(VGM_BYTE_INVALID == 0xFF);
|
|
tl_assert(VGM_BYTE_VALID == 0);
|
|
|
|
/* Set A invalid, V invalid. */
|
|
sm = &sm_distinguished[SM_DIST_NOACCESS];
|
|
for (i = 0; i < 65536; i++)
|
|
sm->vbyte[i] = VGM_BYTE_INVALID;
|
|
for (i = 0; i < 8192; i++)
|
|
sm->abits[i] = VGM_BYTE_INVALID;
|
|
|
|
/* Set A valid, V invalid. */
|
|
sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
|
|
for (i = 0; i < 65536; i++)
|
|
sm->vbyte[i] = VGM_BYTE_INVALID;
|
|
for (i = 0; i < 8192; i++)
|
|
sm->abits[i] = VGM_BYTE_VALID;
|
|
|
|
/* Set A valid, V valid. */
|
|
sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
|
|
for (i = 0; i < 65536; i++)
|
|
sm->vbyte[i] = VGM_BYTE_VALID;
|
|
for (i = 0; i < 8192; i++)
|
|
sm->abits[i] = VGM_BYTE_VALID;
|
|
|
|
/* Set up the primary map. */
|
|
/* These entries gradually get overwritten as the used address
|
|
space expands. */
|
|
for (i = 0; i < N_PRIMARY_MAPS; i++)
|
|
primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
|
|
|
|
/* auxmap_size = auxmap_used = 0;
|
|
no ... these are statically initialised */
|
|
|
|
tl_assert( TL_(expensive_sanity_check)() );
|
|
}
|
|
|
|
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz /*--- Basic bitmap management, reading and writing. ---*/
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz
|
|
//zz /* Allocate and initialise a secondary map. */
|
|
//zz
|
|
//zz static SecMap* alloc_secondary_map ( __attribute__ ((unused))
|
|
//zz Char* caller,
|
|
//zz const SecMap *prototype)
|
|
//zz {
|
|
//zz SecMap* map;
|
|
//zz PROF_EVENT(10);
|
|
//zz
|
|
//zz map = (SecMap *)VG_(shadow_alloc)(sizeof(SecMap));
|
|
//zz
|
|
//zz VG_(memcpy)(map, prototype, sizeof(*map));
|
|
//zz
|
|
//zz /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
|
|
//zz return map;
|
|
//zz }
|
|
//zz
|
|
//zz
|
|
//zz /* Basic reading/writing of the bitmaps, for byte-sized accesses. */
|
|
//zz
|
|
//zz static __inline__ UChar get_abit ( Addr a )
|
|
//zz {
|
|
//zz SecMap* sm = primary_map[PM_IDX(a)];
|
|
//zz UInt sm_off = SM_OFF(a);
|
|
//zz PROF_EVENT(20);
|
|
//zz # if 0
|
|
//zz if (IS_DISTINGUISHED_SM(sm))
|
|
//zz VG_(message)(Vg_DebugMsg,
|
|
//zz "accessed distinguished 2ndary (A)map! 0x%x\n", a);
|
|
//zz # endif
|
|
//zz return BITARR_TEST(sm->abits, sm_off)
|
|
//zz ? VGM_BIT_INVALID : VGM_BIT_VALID;
|
|
//zz }
|
|
//zz
|
|
//zz static __inline__ UChar get_vbyte ( Addr a )
|
|
//zz {
|
|
//zz SecMap* sm = primary_map[PM_IDX(a)];
|
|
//zz UInt sm_off = SM_OFF(a);
|
|
//zz PROF_EVENT(21);
|
|
//zz # if 0
|
|
//zz if (IS_DISTINGUISHED_SM(sm))
|
|
//zz VG_(message)(Vg_DebugMsg,
|
|
//zz "accessed distinguished 2ndary (V)map! 0x%x\n", a);
|
|
//zz # endif
|
|
//zz return sm->vbyte[sm_off];
|
|
//zz }
|
|
//zz
|
|
//zz static /* __inline__ */ void set_abit ( Addr a, UChar abit )
|
|
//zz {
|
|
//zz SecMap* sm;
|
|
//zz UInt sm_off;
|
|
//zz PROF_EVENT(22);
|
|
//zz ENSURE_MAPPABLE(a, "set_abit");
|
|
//zz sm = primary_map[PM_IDX(a)];
|
|
//zz sm_off = SM_OFF(a);
|
|
//zz if (abit)
|
|
//zz BITARR_SET(sm->abits, sm_off);
|
|
//zz else
|
|
//zz BITARR_CLEAR(sm->abits, sm_off);
|
|
//zz }
|
|
//zz
|
|
//zz static __inline__ void set_vbyte ( Addr a, UChar vbyte )
|
|
//zz {
|
|
//zz SecMap* sm;
|
|
//zz UInt sm_off;
|
|
//zz PROF_EVENT(23);
|
|
//zz ENSURE_MAPPABLE(a, "set_vbyte");
|
|
//zz sm = primary_map[PM_IDX(a)];
|
|
//zz sm_off = SM_OFF(a);
|
|
//zz sm->vbyte[sm_off] = vbyte;
|
|
//zz }
|
|
//zz
|
|
//zz
|
|
//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
|
|
//zz
|
|
//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
|
|
//zz {
|
|
//zz SecMap* sm;
|
|
//zz UInt sm_off;
|
|
//zz UChar abits8;
|
|
//zz PROF_EVENT(24);
|
|
//zz # ifdef VG_DEBUG_MEMORY
|
|
//zz tl_assert(VG_IS_4_ALIGNED(a));
|
|
//zz # endif
|
|
//zz sm = primary_map[PM_IDX(a)];
|
|
//zz sm_off = SM_OFF(a);
|
|
//zz abits8 = sm->abits[sm_off >> 3];
|
|
//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
|
|
//zz abits8 &= 0x0F;
|
|
//zz return abits8;
|
|
//zz }
|
|
//zz
|
|
//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
|
|
//zz {
|
|
//zz SecMap* sm = primary_map[PM_IDX(a)];
|
|
//zz UInt sm_off = SM_OFF(a);
|
|
//zz PROF_EVENT(25);
|
|
//zz # ifdef VG_DEBUG_MEMORY
|
|
//zz tl_assert(VG_IS_4_ALIGNED(a));
|
|
//zz # endif
|
|
//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
|
|
//zz }
|
|
//zz
|
|
//zz
|
|
//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
|
|
//zz {
|
|
//zz SecMap* sm;
|
|
//zz UInt sm_off;
|
|
//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
|
|
//zz sm = primary_map[PM_IDX(a)];
|
|
//zz sm_off = SM_OFF(a);
|
|
//zz PROF_EVENT(23);
|
|
//zz # ifdef VG_DEBUG_MEMORY
|
|
//zz tl_assert(VG_IS_4_ALIGNED(a));
|
|
//zz # endif
|
|
//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
|
|
//zz }
|
|
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Setting permissions over address ranges. ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
static void set_address_range_perms ( Addr a, SizeT len,
|
|
UWord example_a_bit,
|
|
UWord example_v_bit )
|
|
{
|
|
SizeT i;
|
|
|
|
UWord example_vbyte = 1 & example_v_bit;
|
|
example_vbyte |= (example_vbyte << 1);
|
|
example_vbyte |= (example_vbyte << 2);
|
|
example_vbyte |= (example_vbyte << 4);
|
|
|
|
tl_assert(sizeof(SizeT) == sizeof(Addr));
|
|
|
|
if (0 && len >= 4096)
|
|
VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
|
|
(ULong)a, len, example_a_bit, example_v_bit);
|
|
|
|
if (len == 0)
|
|
return;
|
|
|
|
for (i = 0; i < len; i++) {
|
|
set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
|
|
}
|
|
}
|
|
|
|
//zz {
|
|
//zz UChar vbyte, abyte8;
|
|
//zz UInt vword4, sm_off;
|
|
//zz SecMap* sm;
|
|
//zz
|
|
//zz PROF_EVENT(30);
|
|
//zz
|
|
//zz if (len == 0)
|
|
//zz return;
|
|
//zz
|
|
//zz if (VG_(clo_verbosity) > 0) {
|
|
//zz if (len > 100 * 1000 * 1000) {
|
|
//zz VG_(message)(Vg_UserMsg,
|
|
//zz "Warning: set address range perms: "
|
|
//zz "large range %u, a %d, v %d",
|
|
//zz len, example_a_bit, example_v_bit );
|
|
//zz }
|
|
//zz }
|
|
//zz
|
|
//zz VGP_PUSHCC(VgpSetMem);
|
|
//zz
|
|
//zz /* Requests to change permissions of huge address ranges may
|
|
//zz indicate bugs in our machinery. 30,000,000 is arbitrary, but so
|
|
//zz far all legitimate requests have fallen beneath that size. */
|
|
//zz /* 4 Mar 02: this is just stupid; get rid of it. */
|
|
//zz /* tl_assert(len < 30000000); */
|
|
//zz
|
|
//zz /* Check the permissions make sense. */
|
|
//zz tl_assert(example_a_bit == VGM_BIT_VALID
|
|
//zz || example_a_bit == VGM_BIT_INVALID);
|
|
//zz tl_assert(example_v_bit == VGM_BIT_VALID
|
|
//zz || example_v_bit == VGM_BIT_INVALID);
|
|
//zz if (example_a_bit == VGM_BIT_INVALID)
|
|
//zz tl_assert(example_v_bit == VGM_BIT_INVALID);
|
|
//zz
|
|
//zz /* The validity bits to write. */
|
|
//zz vbyte = example_v_bit==VGM_BIT_VALID
|
|
//zz ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
|
|
//zz
|
|
//zz /* In order that we can charge through the address space at 8
|
|
//zz bytes/main-loop iteration, make up some perms. */
|
|
//zz abyte8 = BIT_EXPAND(example_a_bit);
|
|
//zz vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
|
|
//zz
|
|
//zz # ifdef VG_DEBUG_MEMORY
|
|
//zz /* Do it ... */
|
|
//zz while (True) {
|
|
//zz PROF_EVENT(31);
|
|
//zz if (len == 0) break;
|
|
//zz set_abit ( a, example_a_bit );
|
|
//zz set_vbyte ( a, vbyte );
|
|
//zz a++;
|
|
//zz len--;
|
|
//zz }
|
|
//zz
|
|
//zz # else
|
|
//zz /* Slowly do parts preceding 8-byte alignment. */
|
|
//zz while (True) {
|
|
//zz PROF_EVENT(31);
|
|
//zz if (len == 0) break;
|
|
//zz if ((a % 8) == 0) break;
|
|
//zz set_abit ( a, example_a_bit );
|
|
//zz set_vbyte ( a, vbyte );
|
|
//zz a++;
|
|
//zz len--;
|
|
//zz }
|
|
//zz
|
|
//zz if (len == 0) {
|
|
//zz VGP_POPCC(VgpSetMem);
|
|
//zz return;
|
|
//zz }
|
|
//zz tl_assert((a % 8) == 0 && len > 0);
|
|
//zz
|
|
//zz /* Now align to the next primary_map entry */
|
|
//zz for (; (a & SECONDARY_MASK) && len >= 8; a += 8, len -= 8) {
|
|
//zz
|
|
//zz PROF_EVENT(32);
|
|
//zz /* If the primary is already pointing to a distinguished map
|
|
//zz with the same properties as we're trying to set, then leave
|
|
//zz it that way. */
|
|
//zz if (primary_map[PM_IDX(a)] == DSM(example_a_bit, example_v_bit))
|
|
//zz continue;
|
|
//zz
|
|
//zz ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
|
|
//zz sm = primary_map[PM_IDX(a)];
|
|
//zz sm_off = SM_OFF(a);
|
|
//zz sm->abits[sm_off >> 3] = abyte8;
|
|
//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
|
|
//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
|
|
//zz }
|
|
//zz
|
|
//zz /* Now set whole secondary maps to the right distinguished value.
|
|
//zz
|
|
//zz Note that if the primary already points to a non-distinguished
|
|
//zz secondary, then don't replace the reference. That would just
|
|
//zz leak memory.
|
|
//zz */
|
|
//zz for(; len >= SECONDARY_SIZE; a += SECONDARY_SIZE, len -= SECONDARY_SIZE) {
|
|
//zz sm = primary_map[PM_IDX(a)];
|
|
//zz
|
|
//zz if (IS_DISTINGUISHED_SM(sm))
|
|
//zz primary_map[PM_IDX(a)] = DSM(example_a_bit, example_v_bit);
|
|
//zz else {
|
|
//zz VG_(memset)(sm->abits, abyte8, sizeof(sm->abits));
|
|
//zz VG_(memset)(sm->vbyte, vbyte, sizeof(sm->vbyte));
|
|
//zz }
|
|
//zz }
|
|
//zz
|
|
//zz /* Now finish off any remains */
|
|
//zz for (; len >= 8; a += 8, len -= 8) {
|
|
//zz PROF_EVENT(32);
|
|
//zz
|
|
//zz /* If the primary is already pointing to a distinguished map
|
|
//zz with the same properties as we're trying to set, then leave
|
|
//zz it that way. */
|
|
//zz if (primary_map[PM_IDX(a)] == DSM(example_a_bit, example_v_bit))
|
|
//zz continue;
|
|
//zz
|
|
//zz ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
|
|
//zz sm = primary_map[PM_IDX(a)];
|
|
//zz sm_off = SM_OFF(a);
|
|
//zz sm->abits[sm_off >> 3] = abyte8;
|
|
//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
|
|
//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
|
|
//zz }
|
|
//zz
|
|
//zz /* Finish the upper fragment. */
|
|
//zz while (True) {
|
|
//zz PROF_EVENT(33);
|
|
//zz if (len == 0) break;
|
|
//zz set_abit ( a, example_a_bit );
|
|
//zz set_vbyte ( a, vbyte );
|
|
//zz a++;
|
|
//zz len--;
|
|
//zz }
|
|
//zz # endif
|
|
//zz
|
|
//zz /* Check that zero page and highest page have not been written to
|
|
//zz -- this could happen with buggy syscall wrappers. Today
|
|
//zz (2001-04-26) had precisely such a problem with __NR_setitimer. */
|
|
//zz tl_assert(TL_(cheap_sanity_check)());
|
|
//zz VGP_POPCC(VgpSetMem);
|
|
//zz }
|
|
|
|
/* Set permissions for address ranges ... */
|
|
|
|
static void mc_make_noaccess ( Addr a, SizeT len )
|
|
{
|
|
PROF_EVENT(35);
|
|
DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
|
|
set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
|
|
}
|
|
|
|
static void mc_make_writable ( Addr a, SizeT len )
|
|
{
|
|
PROF_EVENT(36);
|
|
DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
|
|
set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
|
|
}
|
|
|
|
static void mc_make_readable ( Addr a, SizeT len )
|
|
{
|
|
PROF_EVENT(37);
|
|
DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
|
|
set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
|
|
}
|
|
|
|
static __inline__
|
|
void make_aligned_word32_writable(Addr a)
|
|
{
|
|
mc_make_writable(a, 4);
|
|
//zz SecMap* sm;
|
|
//zz UInt sm_off;
|
|
//zz UChar mask;
|
|
//zz
|
|
//zz VGP_PUSHCC(VgpESPAdj);
|
|
//zz ENSURE_MAPPABLE(a, "make_aligned_word_writable");
|
|
//zz sm = primary_map[PM_IDX(a)];
|
|
//zz sm_off = SM_OFF(a);
|
|
//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
|
|
//zz mask = 0x0F;
|
|
//zz mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
|
|
//zz /* mask now contains 1s where we wish to make address bits invalid (0s). */
|
|
//zz sm->abits[sm_off >> 3] &= ~mask;
|
|
//zz VGP_POPCC(VgpESPAdj);
|
|
}
|
|
|
|
static __inline__
|
|
void make_aligned_word32_noaccess(Addr a)
|
|
{
|
|
mc_make_noaccess(a, 4);
|
|
//zz SecMap* sm;
|
|
//zz UInt sm_off;
|
|
//zz UChar mask;
|
|
//zz
|
|
//zz VGP_PUSHCC(VgpESPAdj);
|
|
//zz ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
|
|
//zz sm = primary_map[PM_IDX(a)];
|
|
//zz sm_off = SM_OFF(a);
|
|
//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
|
|
//zz mask = 0x0F;
|
|
//zz mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
|
|
//zz /* mask now contains 1s where we wish to make address bits invalid (1s). */
|
|
//zz sm->abits[sm_off >> 3] |= mask;
|
|
//zz VGP_POPCC(VgpESPAdj);
|
|
}
|
|
|
|
/* Nb: by "aligned" here we mean 8-byte aligned */
|
|
static __inline__
|
|
void make_aligned_word64_writable(Addr a)
|
|
{
|
|
mc_make_writable(a, 8);
|
|
//zz SecMap* sm;
|
|
//zz UInt sm_off;
|
|
//zz
|
|
//zz VGP_PUSHCC(VgpESPAdj);
|
|
//zz ENSURE_MAPPABLE(a, "make_aligned_doubleword_writable");
|
|
//zz sm = primary_map[PM_IDX(a)];
|
|
//zz sm_off = SM_OFF(a);
|
|
//zz sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
|
|
//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
|
|
//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
|
|
//zz VGP_POPCC(VgpESPAdj);
|
|
}
|
|
|
|
static __inline__
|
|
void make_aligned_word64_noaccess(Addr a)
|
|
{
|
|
mc_make_noaccess(a, 8);
|
|
//zz SecMap* sm;
|
|
//zz UInt sm_off;
|
|
//zz
|
|
//zz VGP_PUSHCC(VgpESPAdj);
|
|
//zz ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
|
|
//zz sm = primary_map[PM_IDX(a)];
|
|
//zz sm_off = SM_OFF(a);
|
|
//zz sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
|
|
//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
|
|
//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
|
|
//zz VGP_POPCC(VgpESPAdj);
|
|
}
|
|
|
|
/* The stack-pointer update handling functions */
|
|
SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
|
|
make_aligned_word32_noaccess,
|
|
make_aligned_word64_writable,
|
|
make_aligned_word64_noaccess,
|
|
mc_make_writable,
|
|
mc_make_noaccess
|
|
);
|
|
|
|
/* Block-copy permissions (needed for implementing realloc()). */
|
|
static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
|
|
{
|
|
SizeT i;
|
|
UWord abit, vbyte;
|
|
|
|
DEBUG("mc_copy_address_range_state\n");
|
|
|
|
PROF_EVENT(40);
|
|
for (i = 0; i < len; i++) {
|
|
PROF_EVENT(41);
|
|
get_abit_and_vbyte( &abit, &vbyte, src+i );
|
|
set_abit_and_vbyte( dst+i, abit, vbyte );
|
|
}
|
|
}
|
|
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Checking memory ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
/* Check permissions for address range. If inadequate permissions
|
|
exist, *bad_addr is set to the offending address, so the caller can
|
|
know what it is. */
|
|
|
|
/* Returns True if [a .. a+len) is not addressible. Otherwise,
|
|
returns False, and if bad_addr is non-NULL, sets *bad_addr to
|
|
indicate the lowest failing address. Functions below are
|
|
similar. */
|
|
static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
|
|
{
|
|
SizeT i;
|
|
UWord abit;
|
|
PROF_EVENT(42);
|
|
for (i = 0; i < len; i++) {
|
|
PROF_EVENT(43);
|
|
abit = get_abit(a);
|
|
if (abit == VGM_BIT_VALID) {
|
|
if (bad_addr != NULL)
|
|
*bad_addr = a;
|
|
return False;
|
|
}
|
|
a++;
|
|
}
|
|
return True;
|
|
}
|
|
|
|
static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
|
|
{
|
|
SizeT i;
|
|
UWord abit;
|
|
PROF_EVENT(42);
|
|
for (i = 0; i < len; i++) {
|
|
PROF_EVENT(43);
|
|
abit = get_abit(a);
|
|
if (abit == VGM_BIT_INVALID) {
|
|
if (bad_addr != NULL) *bad_addr = a;
|
|
return False;
|
|
}
|
|
a++;
|
|
}
|
|
return True;
|
|
}
|
|
|
|
static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
|
|
{
|
|
SizeT i;
|
|
UWord abit;
|
|
UWord vbyte;
|
|
|
|
PROF_EVENT(44);
|
|
DEBUG("mc_check_readable\n");
|
|
for (i = 0; i < len; i++) {
|
|
PROF_EVENT(45);
|
|
get_abit_and_vbyte(&abit, &vbyte, a);
|
|
// Report addressability errors in preference to definedness errors
|
|
// by checking the A bits first.
|
|
if (abit != VGM_BIT_VALID) {
|
|
if (bad_addr != NULL)
|
|
*bad_addr = a;
|
|
return MC_AddrErr;
|
|
}
|
|
if (vbyte != VGM_BYTE_VALID) {
|
|
if (bad_addr != NULL)
|
|
*bad_addr = a;
|
|
return MC_ValueErr;
|
|
}
|
|
a++;
|
|
}
|
|
return MC_Ok;
|
|
}
|
|
|
|
|
|
/* Check a zero-terminated ascii string. Tricky -- don't want to
|
|
examine the actual bytes, to find the end, until we're sure it is
|
|
safe to do so. */
|
|
|
|
static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
|
|
{
|
|
UWord abit;
|
|
UWord vbyte;
|
|
PROF_EVENT(46);
|
|
DEBUG("mc_check_readable_asciiz\n");
|
|
while (True) {
|
|
PROF_EVENT(47);
|
|
get_abit_and_vbyte(&abit, &vbyte, a);
|
|
// As in mc_check_readable(), check A bits first
|
|
if (abit != VGM_BIT_VALID) {
|
|
if (bad_addr != NULL)
|
|
*bad_addr = a;
|
|
return MC_AddrErr;
|
|
}
|
|
if (vbyte != VGM_BYTE_VALID) {
|
|
if (bad_addr != NULL)
|
|
*bad_addr = a;
|
|
return MC_ValueErr;
|
|
}
|
|
/* Ok, a is safe to read. */
|
|
if (* ((UChar*)a) == 0)
|
|
return MC_Ok;
|
|
a++;
|
|
}
|
|
}
|
|
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Memory event handlers ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
static
|
|
void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
|
|
Addr base, SizeT size )
|
|
{
|
|
Bool ok;
|
|
Addr bad_addr;
|
|
|
|
VGP_PUSHCC(VgpCheckMem);
|
|
|
|
/* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
|
|
base,base+size-1); */
|
|
ok = mc_check_writable ( base, size, &bad_addr );
|
|
if (!ok) {
|
|
switch (part) {
|
|
case Vg_CoreSysCall:
|
|
MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
|
|
/*isUnaddr*/True, s );
|
|
break;
|
|
|
|
case Vg_CorePThread:
|
|
case Vg_CoreSignal:
|
|
MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
|
|
break;
|
|
|
|
default:
|
|
VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
|
|
}
|
|
}
|
|
|
|
VGP_POPCC(VgpCheckMem);
|
|
}
|
|
|
|
static
|
|
void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
|
|
Addr base, SizeT size )
|
|
{
|
|
Addr bad_addr;
|
|
MC_ReadResult res;
|
|
|
|
VGP_PUSHCC(VgpCheckMem);
|
|
|
|
/* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
|
|
base,base+size-1); */
|
|
res = mc_check_readable ( base, size, &bad_addr );
|
|
if (MC_Ok != res) {
|
|
Bool isUnaddr = ( MC_AddrErr == res ? True : False );
|
|
|
|
switch (part) {
|
|
case Vg_CoreSysCall:
|
|
MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
|
|
isUnaddr, s );
|
|
break;
|
|
|
|
case Vg_CorePThread:
|
|
MAC_(record_core_mem_error)( tid, isUnaddr, s );
|
|
break;
|
|
|
|
/* If we're being asked to jump to a silly address, record an error
|
|
message before potentially crashing the entire system. */
|
|
case Vg_CoreTranslate:
|
|
MAC_(record_jump_error)( tid, bad_addr );
|
|
break;
|
|
|
|
default:
|
|
VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
|
|
}
|
|
}
|
|
VGP_POPCC(VgpCheckMem);
|
|
}
|
|
|
|
static
|
|
void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
|
|
Char* s, Addr str )
|
|
{
|
|
MC_ReadResult res;
|
|
Addr bad_addr;
|
|
/* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
|
|
|
|
VGP_PUSHCC(VgpCheckMem);
|
|
|
|
tl_assert(part == Vg_CoreSysCall);
|
|
res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
|
|
if (MC_Ok != res) {
|
|
Bool isUnaddr = ( MC_AddrErr == res ? True : False );
|
|
MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
|
|
}
|
|
|
|
VGP_POPCC(VgpCheckMem);
|
|
}
|
|
|
|
static
|
|
void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
|
|
{
|
|
/* Ignore the permissions, just make it readable. Seems to work... */
|
|
DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
|
|
a,(ULong)len,rr,ww,xx);
|
|
mc_make_readable(a, len);
|
|
}
|
|
|
|
static
|
|
void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
|
|
{
|
|
if (is_inited) {
|
|
mc_make_readable(a, len);
|
|
} else {
|
|
mc_make_writable(a, len);
|
|
}
|
|
}
|
|
|
|
static
|
|
void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
|
|
{
|
|
mc_make_readable(a, len);
|
|
}
|
|
|
|
static
|
|
void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
|
|
{
|
|
mc_make_readable(a, len);
|
|
}
|
|
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Register event handlers ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
/* When some chunk of guest state is written, mark the corresponding
|
|
shadow area as valid. This is used to initialise arbitrarily large
|
|
chunks of guest state, hence the (somewhat arbitrary) 512 limit.
|
|
*/
|
|
static void mc_post_reg_write ( CorePart part, ThreadId tid,
|
|
OffT offset, SizeT size)
|
|
{
|
|
UChar area[512];
|
|
tl_assert(size <= 512);
|
|
VG_(memset)(area, VGM_BYTE_VALID, size);
|
|
VG_(set_shadow_regs_area)( tid, offset, size, area );
|
|
}
|
|
|
|
static
|
|
void mc_post_reg_write_clientcall ( ThreadId tid,
|
|
OffT offset, SizeT size,
|
|
Addr f)
|
|
{
|
|
mc_post_reg_write(/*dummy*/0, tid, offset, size);
|
|
}
|
|
|
|
/* Look at the definedness of the guest's shadow state for
|
|
[offset, offset+len). If any part of that is undefined, record
|
|
a parameter error.
|
|
*/
|
|
static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
|
|
OffT offset, SizeT size)
|
|
{
|
|
Int i;
|
|
Bool bad;
|
|
|
|
UChar area[16];
|
|
tl_assert(size <= 16);
|
|
|
|
VG_(get_shadow_regs_area)( tid, offset, size, area );
|
|
|
|
bad = False;
|
|
for (i = 0; i < size; i++) {
|
|
if (area[i] != VGM_BYTE_VALID) {
|
|
bad = False;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (bad)
|
|
MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
|
|
}
|
|
|
|
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz /*--- Functions called directly from generated code. ---*/
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz
|
|
//zz static __inline__ UInt rotateRight16 ( UInt x )
|
|
//zz {
|
|
//zz /* Amazingly, gcc turns this into a single rotate insn. */
|
|
//zz return (x >> 16) | (x << 16);
|
|
//zz }
|
|
//zz
|
|
//zz
|
|
//zz static __inline__ UInt shiftRight16 ( UInt x )
|
|
//zz {
|
|
//zz return x >> 16;
|
|
//zz }
|
|
//zz
|
|
//zz
|
|
//zz /* Read/write 1/2/4/8 sized V bytes, and emit an address error if
|
|
//zz needed. */
|
|
//zz
|
|
//zz /* MC_(helperc_{LD,ST}V{1,2,4,8}) handle the common case fast.
|
|
//zz Under all other circumstances, it defers to the relevant _SLOWLY
|
|
//zz function, which can handle all situations.
|
|
//zz */
|
|
|
|
/* ------------------------ Size = 8 ------------------------ */
|
|
|
|
VGA_REGPARM(1)
|
|
ULong MC_(helperc_LOADV8) ( Addr a )
|
|
{
|
|
return mc_LOADVn_slow( a, 8, False/*littleendian*/ );
|
|
//zz # ifdef VG_DEBUG_MEMORY
|
|
//zz return mc_rd_V8_SLOWLY(a);
|
|
//zz # else
|
|
//zz if (VG_IS_8_ALIGNED(a)) {
|
|
//zz UInt sec_no = shiftRight16(a) & 0xFFFF;
|
|
//zz SecMap* sm = primary_map[sec_no];
|
|
//zz UInt a_off = (SM_OFF(a)) >> 3;
|
|
//zz UChar abits = sm->abits[a_off];
|
|
//zz if (abits == VGM_BYTE_VALID) {
|
|
//zz /* a is 8-aligned, mapped, and addressible. */
|
|
//zz UInt v_off = SM_OFF(a);
|
|
//zz /* LITTLE-ENDIAN */
|
|
//zz UInt vLo = ((UInt*)(sm->vbyte))[ (v_off >> 2) ];
|
|
//zz UInt vHi = ((UInt*)(sm->vbyte))[ (v_off >> 2) + 1 ];
|
|
//zz return ( ((ULong)vHi) << 32 ) | ((ULong)vLo);
|
|
//zz } else {
|
|
//zz return mc_rd_V8_SLOWLY(a);
|
|
//zz }
|
|
//zz }
|
|
//zz else
|
|
//zz if (VG_IS_4_ALIGNED(a)) {
|
|
//zz /* LITTLE-ENDIAN */
|
|
//zz UInt vLo = MC_(helperc_LOADV4)(a+0);
|
|
//zz UInt vHi = MC_(helperc_LOADV4)(a+4);
|
|
//zz return ( ((ULong)vHi) << 32 ) | ((ULong)vLo);
|
|
//zz }
|
|
//zz else
|
|
//zz return mc_rd_V8_SLOWLY(a);
|
|
//zz # endif
|
|
}
|
|
|
|
VGA_REGPARM(1)
|
|
void MC_(helperc_STOREV8) ( Addr a, ULong vbytes )
|
|
{
|
|
mc_STOREVn_slow( a, 8, vbytes, False/*littleendian*/ );
|
|
//zz # ifdef VG_DEBUG_MEMORY
|
|
//zz mc_wr_V8_SLOWLY(a, vbytes);
|
|
//zz # else
|
|
//zz if (VG_IS_8_ALIGNED(a)) {
|
|
//zz UInt sec_no = shiftRight16(a) & 0xFFFF;
|
|
//zz SecMap* sm = primary_map[sec_no];
|
|
//zz UInt a_off = (SM_OFF(a)) >> 3;
|
|
//zz if (!IS_DISTINGUISHED_SM(sm) && sm->abits[a_off] == VGM_BYTE_VALID) {
|
|
//zz /* a is 8-aligned, mapped, and addressible. */
|
|
//zz UInt v_off = SM_OFF(a);
|
|
//zz UInt vHi = (UInt)(vbytes >> 32);
|
|
//zz UInt vLo = (UInt)vbytes;
|
|
//zz /* LITTLE-ENDIAN */
|
|
//zz ((UInt*)(sm->vbyte))[ (v_off >> 2) ] = vLo;
|
|
//zz ((UInt*)(sm->vbyte))[ (v_off >> 2) + 1 ] = vHi;
|
|
//zz } else {
|
|
//zz mc_wr_V8_SLOWLY(a, vbytes);
|
|
//zz }
|
|
//zz return;
|
|
//zz }
|
|
//zz else
|
|
//zz if (VG_IS_4_ALIGNED(a)) {
|
|
//zz UInt vHi = (UInt)(vbytes >> 32);
|
|
//zz UInt vLo = (UInt)vbytes;
|
|
//zz /* LITTLE-ENDIAN */
|
|
//zz MC_(helperc_STOREV4)(a+0, vLo);
|
|
//zz MC_(helperc_STOREV4)(a+4, vHi);
|
|
//zz return;
|
|
//zz }
|
|
//zz else
|
|
//zz mc_wr_V8_SLOWLY(a, vbytes);
|
|
//zz # endif
|
|
}
|
|
|
|
/* ------------------------ Size = 4 ------------------------ */
|
|
|
|
VGA_REGPARM(1)
|
|
UInt MC_(helperc_LOADV4) ( Addr a )
|
|
{
|
|
return (UInt)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
|
|
//zz # ifdef VG_DEBUG_MEMORY
|
|
//zz return mc_rd_V4_SLOWLY(a);
|
|
//zz # else
|
|
//zz UInt sec_no = rotateRight16(a) & 0x3FFFF;
|
|
//zz SecMap* sm = primary_map[sec_no];
|
|
//zz UInt a_off = (SM_OFF(a)) >> 3;
|
|
//zz UChar abits = sm->abits[a_off];
|
|
//zz abits >>= (a & 4);
|
|
//zz abits &= 15;
|
|
//zz PROF_EVENT(60);
|
|
//zz if (abits == VGM_NIBBLE_VALID) {
|
|
//zz /* Handle common case quickly: a is suitably aligned, is mapped,
|
|
//zz and is addressible. */
|
|
//zz UInt v_off = SM_OFF(a);
|
|
//zz return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
|
|
//zz } else {
|
|
//zz /* Slow but general case. */
|
|
//zz return mc_rd_V4_SLOWLY(a);
|
|
//zz }
|
|
//zz # endif
|
|
}
|
|
|
|
VGA_REGPARM(2)
|
|
void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
|
|
{
|
|
mc_STOREVn_slow( a, 4, vbytes, False/*littleendian*/ );
|
|
//zz # ifdef VG_DEBUG_MEMORY
|
|
//zz mc_wr_V4_SLOWLY(a, vbytes);
|
|
//zz # else
|
|
//zz UInt sec_no = rotateRight16(a) & 0x3FFFF;
|
|
//zz SecMap* sm = primary_map[sec_no];
|
|
//zz UInt a_off = (SM_OFF(a)) >> 3;
|
|
//zz UChar abits = sm->abits[a_off];
|
|
//zz abits >>= (a & 4);
|
|
//zz abits &= 15;
|
|
//zz PROF_EVENT(61);
|
|
//zz if (!IS_DISTINGUISHED_SM(sm) && abits == VGM_NIBBLE_VALID) {
|
|
//zz /* Handle common case quickly: a is suitably aligned, is mapped,
|
|
//zz and is addressible. */
|
|
//zz UInt v_off = SM_OFF(a);
|
|
//zz ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
|
|
//zz } else {
|
|
//zz /* Slow but general case. */
|
|
//zz mc_wr_V4_SLOWLY(a, vbytes);
|
|
//zz }
|
|
//zz # endif
|
|
}
|
|
|
|
/* ------------------------ Size = 2 ------------------------ */
|
|
|
|
VGA_REGPARM(1)
|
|
UInt MC_(helperc_LOADV2) ( Addr a )
|
|
{
|
|
return (UInt)mc_LOADVn_slow( a, 2, False/*littleendian*/ );
|
|
//zz # ifdef VG_DEBUG_MEMORY
|
|
//zz return mc_rd_V2_SLOWLY(a);
|
|
//zz # else
|
|
//zz UInt sec_no = rotateRight16(a) & 0x1FFFF;
|
|
//zz SecMap* sm = primary_map[sec_no];
|
|
//zz UInt a_off = (SM_OFF(a)) >> 3;
|
|
//zz PROF_EVENT(62);
|
|
//zz if (sm->abits[a_off] == VGM_BYTE_VALID) {
|
|
//zz /* Handle common case quickly. */
|
|
//zz UInt v_off = SM_OFF(a);
|
|
//zz return 0xFFFF0000
|
|
//zz |
|
|
//zz (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
|
|
//zz } else {
|
|
//zz /* Slow but general case. */
|
|
//zz return mc_rd_V2_SLOWLY(a);
|
|
//zz }
|
|
//zz # endif
|
|
}
|
|
|
|
VGA_REGPARM(2)
|
|
void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
|
|
{
|
|
mc_STOREVn_slow( a, 2, vbytes, False/*littleendian*/ );
|
|
//zz # ifdef VG_DEBUG_MEMORY
|
|
//zz mc_wr_V2_SLOWLY(a, vbytes);
|
|
//zz # else
|
|
//zz UInt sec_no = rotateRight16(a) & 0x1FFFF;
|
|
//zz SecMap* sm = primary_map[sec_no];
|
|
//zz UInt a_off = (SM_OFF(a)) >> 3;
|
|
//zz PROF_EVENT(63);
|
|
//zz if (!IS_DISTINGUISHED_SM(sm) && sm->abits[a_off] == VGM_BYTE_VALID) {
|
|
//zz /* Handle common case quickly. */
|
|
//zz UInt v_off = SM_OFF(a);
|
|
//zz ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
|
|
//zz } else {
|
|
//zz /* Slow but general case. */
|
|
//zz mc_wr_V2_SLOWLY(a, vbytes);
|
|
//zz }
|
|
//zz # endif
|
|
}
|
|
|
|
/* ------------------------ Size = 1 ------------------------ */
|
|
|
|
VGA_REGPARM(1)
|
|
UInt MC_(helperc_LOADV1) ( Addr a )
|
|
{
|
|
return (UInt)mc_LOADVn_slow( a, 1, False/*littleendian*/ );
|
|
//zz # ifdef VG_DEBUG_MEMORY
|
|
//zz return mc_rd_V1_SLOWLY(a);
|
|
//zz # else
|
|
//zz UInt sec_no = shiftRight16(a);
|
|
//zz SecMap* sm = primary_map[sec_no];
|
|
//zz UInt a_off = (SM_OFF(a)) >> 3;
|
|
//zz PROF_EVENT(64);
|
|
//zz if (sm->abits[a_off] == VGM_BYTE_VALID) {
|
|
//zz /* Handle common case quickly. */
|
|
//zz UInt v_off = SM_OFF(a);
|
|
//zz return 0xFFFFFF00
|
|
//zz |
|
|
//zz (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
|
|
//zz } else {
|
|
//zz /* Slow but general case. */
|
|
//zz return mc_rd_V1_SLOWLY(a);
|
|
//zz }
|
|
//zz # endif
|
|
}
|
|
|
|
VGA_REGPARM(2)
|
|
void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
|
|
{
|
|
mc_STOREVn_slow( a, 1, vbytes, False/*littleendian*/ );
|
|
//zz # ifdef VG_DEBUG_MEMORY
|
|
//zz mc_wr_V1_SLOWLY(a, vbytes);
|
|
//zz # else
|
|
//zz UInt sec_no = shiftRight16(a);
|
|
//zz SecMap* sm = primary_map[sec_no];
|
|
//zz UInt a_off = (SM_OFF(a)) >> 3;
|
|
//zz PROF_EVENT(65);
|
|
//zz if (!IS_DISTINGUISHED_SM(sm) && sm->abits[a_off] == VGM_BYTE_VALID) {
|
|
//zz /* Handle common case quickly. */
|
|
//zz UInt v_off = SM_OFF(a);
|
|
//zz ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
|
|
//zz } else {
|
|
//zz /* Slow but general case. */
|
|
//zz mc_wr_V1_SLOWLY(a, vbytes);
|
|
//zz }
|
|
//zz # endif
|
|
}
|
|
|
|
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz /*--- Fallback functions to handle cases that the above ---*/
|
|
//zz /*--- VG_(helperc_{LD,ST}V{1,2,4,8}) can't manage. ---*/
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz
|
|
//zz /* ------------------------ Size = 8 ------------------------ */
|
|
//zz
|
|
//zz static ULong mc_rd_V8_SLOWLY ( Addr a )
|
|
//zz {
|
|
//zz Bool a0ok, a1ok, a2ok, a3ok, a4ok, a5ok, a6ok, a7ok;
|
|
//zz UInt vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7;
|
|
//zz
|
|
//zz PROF_EVENT(70);
|
|
//zz
|
|
//zz /* First establish independently the addressibility of the 4 bytes
|
|
//zz involved. */
|
|
//zz a0ok = get_abit(a+0) == VGM_BIT_VALID;
|
|
//zz a1ok = get_abit(a+1) == VGM_BIT_VALID;
|
|
//zz a2ok = get_abit(a+2) == VGM_BIT_VALID;
|
|
//zz a3ok = get_abit(a+3) == VGM_BIT_VALID;
|
|
//zz a4ok = get_abit(a+4) == VGM_BIT_VALID;
|
|
//zz a5ok = get_abit(a+5) == VGM_BIT_VALID;
|
|
//zz a6ok = get_abit(a+6) == VGM_BIT_VALID;
|
|
//zz a7ok = get_abit(a+7) == VGM_BIT_VALID;
|
|
//zz
|
|
//zz /* Also get the validity bytes for the address. */
|
|
//zz vb0 = (UInt)get_vbyte(a+0);
|
|
//zz vb1 = (UInt)get_vbyte(a+1);
|
|
//zz vb2 = (UInt)get_vbyte(a+2);
|
|
//zz vb3 = (UInt)get_vbyte(a+3);
|
|
//zz vb4 = (UInt)get_vbyte(a+4);
|
|
//zz vb5 = (UInt)get_vbyte(a+5);
|
|
//zz vb6 = (UInt)get_vbyte(a+6);
|
|
//zz vb7 = (UInt)get_vbyte(a+7);
|
|
//zz
|
|
//zz /* Now distinguish 3 cases */
|
|
//zz
|
|
//zz /* Case 1: the address is completely valid, so:
|
|
//zz - no addressing error
|
|
//zz - return V bytes as read from memory
|
|
//zz */
|
|
//zz if (a0ok && a1ok && a2ok && a3ok && a4ok && a5ok && a6ok && a7ok) {
|
|
//zz ULong vw = VGM_WORD64_INVALID;
|
|
//zz vw <<= 8; vw |= vb7;
|
|
//zz vw <<= 8; vw |= vb6;
|
|
//zz vw <<= 8; vw |= vb5;
|
|
//zz vw <<= 8; vw |= vb4;
|
|
//zz vw <<= 8; vw |= vb3;
|
|
//zz vw <<= 8; vw |= vb2;
|
|
//zz vw <<= 8; vw |= vb1;
|
|
//zz vw <<= 8; vw |= vb0;
|
|
//zz return vw;
|
|
//zz }
|
|
//zz
|
|
//zz /* Case 2: the address is completely invalid.
|
|
//zz - emit addressing error
|
|
//zz - return V word indicating validity.
|
|
//zz This sounds strange, but if we make loads from invalid addresses
|
|
//zz give invalid data, we also risk producing a number of confusing
|
|
//zz undefined-value errors later, which confuses the fact that the
|
|
//zz error arose in the first place from an invalid address.
|
|
//zz */
|
|
//zz /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
|
|
//zz if (!MAC_(clo_partial_loads_ok)
|
|
//zz || ((a & 7) != 0)
|
|
//zz || (!a0ok && !a1ok && !a2ok && !a3ok && !a4ok && !a5ok && !a6ok && !a7ok)) {
|
|
//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 8, False );
|
|
//zz return VGM_WORD64_VALID;
|
|
//zz }
|
|
//zz
|
|
//zz /* Case 3: the address is partially valid.
|
|
//zz - no addressing error
|
|
//zz - returned V word is invalid where the address is invalid,
|
|
//zz and contains V bytes from memory otherwise.
|
|
//zz Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
|
|
//zz (which is the default), and the address is 4-aligned.
|
|
//zz If not, Case 2 will have applied.
|
|
//zz */
|
|
//zz tl_assert(MAC_(clo_partial_loads_ok));
|
|
//zz {
|
|
//zz ULong vw = VGM_WORD64_INVALID;
|
|
//zz vw <<= 8; vw |= (a7ok ? vb7 : VGM_BYTE_INVALID);
|
|
//zz vw <<= 8; vw |= (a6ok ? vb6 : VGM_BYTE_INVALID);
|
|
//zz vw <<= 8; vw |= (a5ok ? vb5 : VGM_BYTE_INVALID);
|
|
//zz vw <<= 8; vw |= (a4ok ? vb4 : VGM_BYTE_INVALID);
|
|
//zz vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
|
|
//zz vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
|
|
//zz vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
|
|
//zz vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
|
|
//zz return vw;
|
|
//zz }
|
|
//zz }
|
|
//zz
|
|
//zz static void mc_wr_V8_SLOWLY ( Addr a, ULong vbytes )
|
|
//zz {
|
|
//zz /* Check the address for validity. */
|
|
//zz Bool aerr = False;
|
|
//zz PROF_EVENT(71);
|
|
//zz
|
|
//zz if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
|
|
//zz if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
|
|
//zz if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
|
|
//zz if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
|
|
//zz if (get_abit(a+4) != VGM_BIT_VALID) aerr = True;
|
|
//zz if (get_abit(a+5) != VGM_BIT_VALID) aerr = True;
|
|
//zz if (get_abit(a+6) != VGM_BIT_VALID) aerr = True;
|
|
//zz if (get_abit(a+7) != VGM_BIT_VALID) aerr = True;
|
|
//zz
|
|
//zz /* Store the V bytes, remembering to do it little-endian-ly. */
|
|
//zz set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
|
|
//zz set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
|
|
//zz set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
|
|
//zz set_vbyte( a+3, vbytes & 0x000000FF ); vbytes >>= 8;
|
|
//zz set_vbyte( a+4, vbytes & 0x000000FF ); vbytes >>= 8;
|
|
//zz set_vbyte( a+5, vbytes & 0x000000FF ); vbytes >>= 8;
|
|
//zz set_vbyte( a+6, vbytes & 0x000000FF ); vbytes >>= 8;
|
|
//zz set_vbyte( a+7, vbytes & 0x000000FF );
|
|
//zz
|
|
//zz /* If an address error has happened, report it. */
|
|
//zz if (aerr)
|
|
//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 8, True );
|
|
//zz }
|
|
//zz
|
|
//zz /* ------------------------ Size = 4 ------------------------ */
|
|
//zz
|
|
//zz static UInt mc_rd_V4_SLOWLY ( Addr a )
|
|
//zz {
|
|
//zz Bool a0ok, a1ok, a2ok, a3ok;
|
|
//zz UInt vb0, vb1, vb2, vb3;
|
|
//zz
|
|
//zz PROF_EVENT(70);
|
|
//zz
|
|
//zz /* First establish independently the addressibility of the 4 bytes
|
|
//zz involved. */
|
|
//zz a0ok = get_abit(a+0) == VGM_BIT_VALID;
|
|
//zz a1ok = get_abit(a+1) == VGM_BIT_VALID;
|
|
//zz a2ok = get_abit(a+2) == VGM_BIT_VALID;
|
|
//zz a3ok = get_abit(a+3) == VGM_BIT_VALID;
|
|
//zz
|
|
//zz /* Also get the validity bytes for the address. */
|
|
//zz vb0 = (UInt)get_vbyte(a+0);
|
|
//zz vb1 = (UInt)get_vbyte(a+1);
|
|
//zz vb2 = (UInt)get_vbyte(a+2);
|
|
//zz vb3 = (UInt)get_vbyte(a+3);
|
|
//zz
|
|
//zz /* Now distinguish 3 cases */
|
|
//zz
|
|
//zz /* Case 1: the address is completely valid, so:
|
|
//zz - no addressing error
|
|
//zz - return V bytes as read from memory
|
|
//zz */
|
|
//zz if (a0ok && a1ok && a2ok && a3ok) {
|
|
//zz UInt vw = VGM_WORD_INVALID;
|
|
//zz vw <<= 8; vw |= vb3;
|
|
//zz vw <<= 8; vw |= vb2;
|
|
//zz vw <<= 8; vw |= vb1;
|
|
//zz vw <<= 8; vw |= vb0;
|
|
//zz return vw;
|
|
//zz }
|
|
//zz
|
|
//zz /* Case 2: the address is completely invalid.
|
|
//zz - emit addressing error
|
|
//zz - return V word indicating validity.
|
|
//zz This sounds strange, but if we make loads from invalid addresses
|
|
//zz give invalid data, we also risk producing a number of confusing
|
|
//zz undefined-value errors later, which confuses the fact that the
|
|
//zz error arose in the first place from an invalid address.
|
|
//zz */
|
|
//zz /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
|
|
//zz if (!MAC_(clo_partial_loads_ok)
|
|
//zz || ((a & 3) != 0)
|
|
//zz || (!a0ok && !a1ok && !a2ok && !a3ok)) {
|
|
//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 4, False );
|
|
//zz return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
|
|
//zz | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
|
|
//zz }
|
|
//zz
|
|
//zz /* Case 3: the address is partially valid.
|
|
//zz - no addressing error
|
|
//zz - returned V word is invalid where the address is invalid,
|
|
//zz and contains V bytes from memory otherwise.
|
|
//zz Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
|
|
//zz (which is the default), and the address is 4-aligned.
|
|
//zz If not, Case 2 will have applied.
|
|
//zz */
|
|
//zz tl_assert(MAC_(clo_partial_loads_ok));
|
|
//zz {
|
|
//zz UInt vw = VGM_WORD_INVALID;
|
|
//zz vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
|
|
//zz vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
|
|
//zz vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
|
|
//zz vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
|
|
//zz return vw;
|
|
//zz }
|
|
//zz }
|
|
//zz
|
|
//zz static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
|
|
//zz {
|
|
//zz /* Check the address for validity. */
|
|
//zz Bool aerr = False;
|
|
//zz PROF_EVENT(71);
|
|
//zz
|
|
//zz if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
|
|
//zz if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
|
|
//zz if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
|
|
//zz if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
|
|
//zz
|
|
//zz /* Store the V bytes, remembering to do it little-endian-ly. */
|
|
//zz set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
|
|
//zz set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
|
|
//zz set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
|
|
//zz set_vbyte( a+3, vbytes & 0x000000FF );
|
|
//zz
|
|
//zz /* If an address error has happened, report it. */
|
|
//zz if (aerr)
|
|
//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 4, True );
|
|
//zz }
|
|
//zz
|
|
//zz /* ------------------------ Size = 2 ------------------------ */
|
|
//zz
|
|
//zz static UInt mc_rd_V2_SLOWLY ( Addr a )
|
|
//zz {
|
|
//zz /* Check the address for validity. */
|
|
//zz UInt vw = VGM_WORD_INVALID;
|
|
//zz Bool aerr = False;
|
|
//zz PROF_EVENT(72);
|
|
//zz
|
|
//zz if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
|
|
//zz if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
|
|
//zz
|
|
//zz /* Fetch the V bytes, remembering to do it little-endian-ly. */
|
|
//zz vw <<= 8; vw |= (UInt)get_vbyte(a+1);
|
|
//zz vw <<= 8; vw |= (UInt)get_vbyte(a+0);
|
|
//zz
|
|
//zz /* If an address error has happened, report it. */
|
|
//zz if (aerr) {
|
|
//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 2, False );
|
|
//zz vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
|
|
//zz | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
|
|
//zz }
|
|
//zz return vw;
|
|
//zz }
|
|
//zz
|
|
//zz static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
|
|
//zz {
|
|
//zz /* Check the address for validity. */
|
|
//zz Bool aerr = False;
|
|
//zz PROF_EVENT(73);
|
|
//zz
|
|
//zz if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
|
|
//zz if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
|
|
//zz
|
|
//zz /* Store the V bytes, remembering to do it little-endian-ly. */
|
|
//zz set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
|
|
//zz set_vbyte( a+1, vbytes & 0x000000FF );
|
|
//zz
|
|
//zz /* If an address error has happened, report it. */
|
|
//zz if (aerr)
|
|
//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 2, True );
|
|
//zz }
|
|
//zz
|
|
//zz /* ------------------------ Size = 1 ------------------------ */
|
|
//zz
|
|
//zz static UInt mc_rd_V1_SLOWLY ( Addr a )
|
|
//zz {
|
|
//zz /* Check the address for validity. */
|
|
//zz UInt vw = VGM_WORD_INVALID;
|
|
//zz Bool aerr = False;
|
|
//zz PROF_EVENT(74);
|
|
//zz
|
|
//zz if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
|
|
//zz
|
|
//zz /* Fetch the V byte. */
|
|
//zz vw <<= 8; vw |= (UInt)get_vbyte(a+0);
|
|
//zz
|
|
//zz /* If an address error has happened, report it. */
|
|
//zz if (aerr) {
|
|
//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 1, False );
|
|
//zz vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
|
|
//zz | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
|
|
//zz }
|
|
//zz return vw;
|
|
//zz }
|
|
//zz
|
|
//zz static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
|
|
//zz {
|
|
//zz /* Check the address for validity. */
|
|
//zz Bool aerr = False;
|
|
//zz PROF_EVENT(75);
|
|
//zz if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
|
|
//zz
|
|
//zz /* Store the V bytes, remembering to do it little-endian-ly. */
|
|
//zz set_vbyte( a+0, vbytes & 0x000000FF );
|
|
//zz
|
|
//zz /* If an address error has happened, report it. */
|
|
//zz if (aerr)
|
|
//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 1, True );
|
|
//zz }
|
|
|
|
|
|
/* ---------------------------------------------------------------------
|
|
Called from generated code, or from the assembly helpers.
|
|
Handlers for value check failures.
|
|
------------------------------------------------------------------ */
|
|
|
|
void MC_(helperc_value_check0_fail) ( void )
|
|
{
|
|
MC_(record_value_error) ( VG_(get_running_tid)(), 0 );
|
|
}
|
|
|
|
void MC_(helperc_value_check1_fail) ( void )
|
|
{
|
|
MC_(record_value_error) ( VG_(get_running_tid)(), 1 );
|
|
}
|
|
|
|
//zz void MC_(helperc_value_check2_fail) ( void )
|
|
//zz {
|
|
//zz MC_(record_value_error) ( VG_(get_running_tid)(), 2 );
|
|
//zz }
|
|
|
|
void MC_(helperc_value_check4_fail) ( void )
|
|
{
|
|
MC_(record_value_error) ( VG_(get_running_tid)(), 4 );
|
|
}
|
|
|
|
VGA_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
|
|
{
|
|
MC_(record_value_error) ( VG_(get_running_tid)(), (Int)sz );
|
|
}
|
|
|
|
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz /*--- Metadata get/set functions, for client requests. ---*/
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz
|
|
//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
|
|
//zz error, 3 == addressing error. */
|
|
//zz static Int mc_get_or_set_vbits_for_client (
|
|
//zz ThreadId tid,
|
|
//zz Addr dataV,
|
|
//zz Addr vbitsV,
|
|
//zz SizeT size,
|
|
//zz Bool setting /* True <=> set vbits, False <=> get vbits */
|
|
//zz )
|
|
//zz {
|
|
//zz Bool addressibleD = True;
|
|
//zz Bool addressibleV = True;
|
|
//zz UInt* data = (UInt*)dataV;
|
|
//zz UInt* vbits = (UInt*)vbitsV;
|
|
//zz SizeT szW = size / 4; /* sigh */
|
|
//zz SizeT i;
|
|
//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
|
|
//zz UInt* vbitsP = NULL; /* ditto */
|
|
//zz
|
|
//zz /* Check alignment of args. */
|
|
//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
|
|
//zz return 2;
|
|
//zz if ((size & 3) != 0)
|
|
//zz return 2;
|
|
//zz
|
|
//zz /* Check that arrays are addressible. */
|
|
//zz for (i = 0; i < szW; i++) {
|
|
//zz dataP = &data[i];
|
|
//zz vbitsP = &vbits[i];
|
|
//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
|
|
//zz addressibleD = False;
|
|
//zz break;
|
|
//zz }
|
|
//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
|
|
//zz addressibleV = False;
|
|
//zz break;
|
|
//zz }
|
|
//zz }
|
|
//zz if (!addressibleD) {
|
|
//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
|
|
//zz setting ? True : False );
|
|
//zz return 3;
|
|
//zz }
|
|
//zz if (!addressibleV) {
|
|
//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
|
|
//zz setting ? False : True );
|
|
//zz return 3;
|
|
//zz }
|
|
//zz
|
|
//zz /* Do the copy */
|
|
//zz if (setting) {
|
|
//zz /* setting */
|
|
//zz for (i = 0; i < szW; i++) {
|
|
//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
|
|
//zz MC_(record_value_error)(tid, 4);
|
|
//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
|
|
//zz }
|
|
//zz } else {
|
|
//zz /* getting */
|
|
//zz for (i = 0; i < szW; i++) {
|
|
//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
|
|
//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
|
|
//zz }
|
|
//zz }
|
|
//zz
|
|
//zz return 1;
|
|
//zz }
|
|
//zz
|
|
//zz
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz /*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
|
|
//zz /*------------------------------------------------------------*/
|
|
//zz
|
|
//zz /* For the memory leak detector, say whether an entire 64k chunk of
|
|
//zz address space is possibly in use, or not. If in doubt return
|
|
//zz True.
|
|
//zz */
|
|
//zz static
|
|
//zz Bool mc_is_valid_64k_chunk ( UInt chunk_number )
|
|
//zz {
|
|
//zz tl_assert(chunk_number >= 0 && chunk_number < PRIMARY_SIZE);
|
|
//zz if (primary_map[chunk_number] == DSM_NOTADDR) {
|
|
//zz /* Definitely not in use. */
|
|
//zz return False;
|
|
//zz } else {
|
|
//zz return True;
|
|
//zz }
|
|
//zz }
|
|
//zz
|
|
//zz
|
|
//zz /* For the memory leak detector, say whether or not a given word
|
|
//zz address is to be regarded as valid. */
|
|
//zz static
|
|
//zz Bool mc_is_valid_address ( Addr a )
|
|
//zz {
|
|
//zz UInt vbytes;
|
|
//zz UChar abits;
|
|
//zz tl_assert(VG_IS_4_ALIGNED(a));
|
|
//zz abits = get_abits4_ALIGNED(a);
|
|
//zz vbytes = get_vbytes4_ALIGNED(a);
|
|
//zz if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
|
|
//zz return True;
|
|
//zz } else {
|
|
//zz return False;
|
|
//zz }
|
|
//zz }
|
|
|
|
|
|
/* Leak detector for this tool. We don't actually do anything, merely
|
|
run the generic leak detector with suitable parameters for this
|
|
tool. */
|
|
static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
|
|
{
|
|
VG_(printf)("memcheck: leak detection currently disabled\n");
|
|
// MAC_(do_detect_memory_leaks) (
|
|
// tid, mode, mc_is_valid_64k_chunk, mc_is_valid_address );
|
|
}
|
|
|
|
|
|
/* ---------------------------------------------------------------------
|
|
Sanity check machinery (permanently engaged).
|
|
------------------------------------------------------------------ */
|
|
|
|
Bool TL_(cheap_sanity_check) ( void )
|
|
{
|
|
/* nothing useful we can rapidly check */
|
|
return True;
|
|
}
|
|
|
|
Bool TL_(expensive_sanity_check) ( void )
|
|
{
|
|
Int i;
|
|
SecMap* sm;
|
|
|
|
/* Check the 3 distinguished SMs. */
|
|
|
|
/* Check A invalid, V invalid. */
|
|
sm = &sm_distinguished[SM_DIST_NOACCESS];
|
|
for (i = 0; i < 65536; i++)
|
|
if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
|
|
return False;
|
|
for (i = 0; i < 8192; i++)
|
|
if (!(sm->abits[i] == VGM_BYTE_INVALID))
|
|
return False;
|
|
|
|
/* Check A valid, V invalid. */
|
|
sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
|
|
for (i = 0; i < 65536; i++)
|
|
if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
|
|
return False;
|
|
for (i = 0; i < 8192; i++)
|
|
if (!(sm->abits[i] == VGM_BYTE_VALID))
|
|
return False;
|
|
|
|
/* Check A valid, V valid. */
|
|
sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
|
|
for (i = 0; i < 65536; i++)
|
|
if (!(sm->vbyte[i] == VGM_BYTE_VALID))
|
|
return False;
|
|
for (i = 0; i < 8192; i++)
|
|
if (!(sm->abits[i] == VGM_BYTE_VALID))
|
|
return False;
|
|
|
|
if (auxmap_used > auxmap_size)
|
|
return False;
|
|
|
|
return True;
|
|
}
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
/////////////////////////////////////////////////////////////////
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Command line args ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
Bool MC_(clo_avoid_strlen_errors) = True;
|
|
|
|
Bool TL_(process_cmd_line_option)(Char* arg)
|
|
{
|
|
VG_BOOL_CLO(arg, "--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
|
|
else
|
|
return MAC_(process_common_cmd_line_option)(arg);
|
|
|
|
return True;
|
|
}
|
|
|
|
void TL_(print_usage)(void)
|
|
{
|
|
MAC_(print_common_usage)();
|
|
VG_(printf)(
|
|
" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
|
|
);
|
|
}
|
|
|
|
void TL_(print_debug_usage)(void)
|
|
{
|
|
MAC_(print_common_debug_usage)();
|
|
VG_(printf)(
|
|
" --cleanup=no|yes improve after instrumentation? [yes]\n"
|
|
);
|
|
}
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Client requests ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
/* Client block management:
|
|
|
|
This is managed as an expanding array of client block descriptors.
|
|
Indices of live descriptors are issued to the client, so it can ask
|
|
to free them later. Therefore we cannot slide live entries down
|
|
over dead ones. Instead we must use free/inuse flags and scan for
|
|
an empty slot at allocation time. This in turn means allocation is
|
|
relatively expensive, so we hope this does not happen too often.
|
|
|
|
An unused block has start == size == 0
|
|
*/
|
|
|
|
typedef
|
|
struct {
|
|
Addr start;
|
|
SizeT size;
|
|
ExeContext* where;
|
|
Char* desc;
|
|
}
|
|
CGenBlock;
|
|
|
|
/* This subsystem is self-initialising. */
|
|
static UInt cgb_size = 0;
|
|
static UInt cgb_used = 0;
|
|
static CGenBlock* cgbs = NULL;
|
|
|
|
/* Stats for this subsystem. */
|
|
static UInt cgb_used_MAX = 0; /* Max in use. */
|
|
static UInt cgb_allocs = 0; /* Number of allocs. */
|
|
static UInt cgb_discards = 0; /* Number of discards. */
|
|
static UInt cgb_search = 0; /* Number of searches. */
|
|
|
|
|
|
static
|
|
Int alloc_client_block ( void )
|
|
{
|
|
UInt i, sz_new;
|
|
CGenBlock* cgbs_new;
|
|
|
|
cgb_allocs++;
|
|
|
|
for (i = 0; i < cgb_used; i++) {
|
|
cgb_search++;
|
|
if (cgbs[i].start == 0 && cgbs[i].size == 0)
|
|
return i;
|
|
}
|
|
|
|
/* Not found. Try to allocate one at the end. */
|
|
if (cgb_used < cgb_size) {
|
|
cgb_used++;
|
|
return cgb_used-1;
|
|
}
|
|
|
|
/* Ok, we have to allocate a new one. */
|
|
tl_assert(cgb_used == cgb_size);
|
|
sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
|
|
|
|
cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
|
|
for (i = 0; i < cgb_used; i++)
|
|
cgbs_new[i] = cgbs[i];
|
|
|
|
if (cgbs != NULL)
|
|
VG_(free)( cgbs );
|
|
cgbs = cgbs_new;
|
|
|
|
cgb_size = sz_new;
|
|
cgb_used++;
|
|
if (cgb_used > cgb_used_MAX)
|
|
cgb_used_MAX = cgb_used;
|
|
return cgb_used-1;
|
|
}
|
|
|
|
|
|
static void show_client_block_stats ( void )
|
|
{
|
|
VG_(message)(Vg_DebugMsg,
|
|
"general CBs: %d allocs, %d discards, %d maxinuse, %d search",
|
|
cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
|
|
);
|
|
}
|
|
|
|
static Bool find_addr(VgHashNode* sh_ch, void* ap)
|
|
{
|
|
MAC_Chunk *m = (MAC_Chunk*)sh_ch;
|
|
Addr a = *(Addr*)ap;
|
|
|
|
return VG_(addr_is_in_block)(a, m->data, m->size);
|
|
}
|
|
|
|
static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
|
|
{
|
|
UInt i;
|
|
/* VG_(printf)("try to identify %d\n", a); */
|
|
|
|
/* Perhaps it's a general block ? */
|
|
for (i = 0; i < cgb_used; i++) {
|
|
if (cgbs[i].start == 0 && cgbs[i].size == 0)
|
|
continue;
|
|
if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size)) {
|
|
MAC_Mempool **d, *mp;
|
|
|
|
/* OK - maybe it's a mempool, too? */
|
|
mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
|
|
(UWord)cgbs[i].start,
|
|
(void*)&d);
|
|
if(mp != NULL) {
|
|
if(mp->chunks != NULL) {
|
|
MAC_Chunk *mc;
|
|
|
|
mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a);
|
|
if(mc != NULL) {
|
|
ai->akind = UserG;
|
|
ai->blksize = mc->size;
|
|
ai->rwoffset = (Int)(a) - (Int)mc->data;
|
|
ai->lastchange = mc->where;
|
|
return True;
|
|
}
|
|
}
|
|
ai->akind = Mempool;
|
|
ai->blksize = cgbs[i].size;
|
|
ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
|
|
ai->lastchange = cgbs[i].where;
|
|
return True;
|
|
}
|
|
ai->akind = UserG;
|
|
ai->blksize = cgbs[i].size;
|
|
ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
|
|
ai->lastchange = cgbs[i].where;
|
|
ai->desc = cgbs[i].desc;
|
|
return True;
|
|
}
|
|
}
|
|
return False;
|
|
}
|
|
|
|
Bool TL_(handle_client_request) ( ThreadId tid, UWord* arg, UWord* ret )
|
|
{
|
|
Int i;
|
|
Bool ok;
|
|
Addr bad_addr;
|
|
|
|
if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
|
|
&& VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
|
|
&& VG_USERREQ__FREELIKE_BLOCK != arg[0]
|
|
&& VG_USERREQ__CREATE_MEMPOOL != arg[0]
|
|
&& VG_USERREQ__DESTROY_MEMPOOL != arg[0]
|
|
&& VG_USERREQ__MEMPOOL_ALLOC != arg[0]
|
|
&& VG_USERREQ__MEMPOOL_FREE != arg[0])
|
|
return False;
|
|
|
|
switch (arg[0]) {
|
|
case VG_USERREQ__CHECK_WRITABLE: /* check writable */
|
|
ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
|
|
if (!ok)
|
|
MC_(record_user_error) ( tid, bad_addr, /*isWrite*/True,
|
|
/*isUnaddr*/True );
|
|
*ret = ok ? (UWord)NULL : bad_addr;
|
|
break;
|
|
|
|
case VG_USERREQ__CHECK_READABLE: { /* check readable */
|
|
MC_ReadResult res;
|
|
res = mc_check_readable ( arg[1], arg[2], &bad_addr );
|
|
if (MC_AddrErr == res)
|
|
MC_(record_user_error) ( tid, bad_addr, /*isWrite*/False,
|
|
/*isUnaddr*/True );
|
|
else if (MC_ValueErr == res)
|
|
MC_(record_user_error) ( tid, bad_addr, /*isWrite*/False,
|
|
/*isUnaddr*/False );
|
|
*ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
|
|
break;
|
|
}
|
|
|
|
case VG_USERREQ__DO_LEAK_CHECK:
|
|
mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
|
|
*ret = 0; /* return value is meaningless */
|
|
break;
|
|
|
|
case VG_USERREQ__MAKE_NOACCESS: /* make no access */
|
|
mc_make_noaccess ( arg[1], arg[2] );
|
|
*ret = -1;
|
|
break;
|
|
|
|
case VG_USERREQ__MAKE_WRITABLE: /* make writable */
|
|
mc_make_writable ( arg[1], arg[2] );
|
|
*ret = -1;
|
|
break;
|
|
|
|
case VG_USERREQ__MAKE_READABLE: /* make readable */
|
|
mc_make_readable ( arg[1], arg[2] );
|
|
*ret = -1;
|
|
break;
|
|
|
|
case VG_USERREQ__CREATE_BLOCK: /* describe a block */
|
|
if (arg[1] != 0 && arg[2] != 0) {
|
|
i = alloc_client_block();
|
|
/* VG_(printf)("allocated %d %p\n", i, cgbs); */
|
|
cgbs[i].start = arg[1];
|
|
cgbs[i].size = arg[2];
|
|
cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
|
|
cgbs[i].where = VG_(record_ExeContext) ( tid );
|
|
|
|
*ret = i;
|
|
} else
|
|
*ret = -1;
|
|
break;
|
|
|
|
case VG_USERREQ__DISCARD: /* discard */
|
|
if (cgbs == NULL
|
|
|| arg[2] >= cgb_used ||
|
|
(cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
|
|
*ret = 1;
|
|
} else {
|
|
tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
|
|
cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
|
|
VG_(free)(cgbs[arg[2]].desc);
|
|
cgb_discards++;
|
|
*ret = 0;
|
|
}
|
|
break;
|
|
|
|
//zz case VG_USERREQ__GET_VBITS:
|
|
//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
|
|
//zz error. */
|
|
//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
|
|
//zz *ret = mc_get_or_set_vbits_for_client
|
|
//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
|
|
//zz break;
|
|
//zz
|
|
//zz case VG_USERREQ__SET_VBITS:
|
|
//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
|
|
//zz error. */
|
|
//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
|
|
//zz *ret = mc_get_or_set_vbits_for_client
|
|
//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
|
|
//zz break;
|
|
|
|
default:
|
|
if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
|
|
return True;
|
|
} else {
|
|
VG_(message)(Vg_UserMsg,
|
|
"Warning: unknown memcheck client request code %llx",
|
|
(ULong)arg[0]);
|
|
return False;
|
|
}
|
|
}
|
|
return True;
|
|
}
|
|
|
|
/*------------------------------------------------------------*/
|
|
/*--- Setup ---*/
|
|
/*------------------------------------------------------------*/
|
|
|
|
void TL_(pre_clo_init)(void)
|
|
{
|
|
VG_(details_name) ("Memcheck");
|
|
VG_(details_version) (NULL);
|
|
VG_(details_description) ("a memory error detector");
|
|
VG_(details_copyright_author)(
|
|
"Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
|
|
VG_(details_bug_reports_to) (VG_BUGS_TO);
|
|
VG_(details_avg_translation_sizeB) ( 370 );
|
|
|
|
VG_(basic_tool_funcs) (TL_(post_clo_init),
|
|
TL_(instrument),
|
|
TL_(fini));
|
|
|
|
VG_(needs_core_errors) ();
|
|
VG_(needs_tool_errors) (TL_(eq_Error),
|
|
TL_(pp_Error),
|
|
TL_(update_extra),
|
|
TL_(recognised_suppression),
|
|
TL_(read_extra_suppression_info),
|
|
TL_(error_matches_suppression),
|
|
TL_(get_error_name),
|
|
TL_(print_extra_suppression_info));
|
|
VG_(needs_libc_freeres) ();
|
|
VG_(needs_command_line_options)(TL_(process_cmd_line_option),
|
|
TL_(print_usage),
|
|
TL_(print_debug_usage));
|
|
VG_(needs_client_requests) (TL_(handle_client_request));
|
|
VG_(needs_sanity_checks) (TL_(cheap_sanity_check),
|
|
TL_(expensive_sanity_check));
|
|
VG_(needs_shadow_memory) ();
|
|
|
|
VG_(malloc_funcs) (TL_(malloc),
|
|
TL_(__builtin_new),
|
|
TL_(__builtin_vec_new),
|
|
TL_(memalign),
|
|
TL_(calloc),
|
|
TL_(free),
|
|
TL_(__builtin_delete),
|
|
TL_(__builtin_vec_delete),
|
|
TL_(realloc),
|
|
MALLOC_REDZONE_SZB );
|
|
|
|
MAC_( new_mem_heap) = & mc_new_mem_heap;
|
|
MAC_( ban_mem_heap) = & mc_make_noaccess;
|
|
MAC_(copy_mem_heap) = & mc_copy_address_range_state;
|
|
MAC_( die_mem_heap) = & mc_make_noaccess;
|
|
MAC_(check_noaccess) = & mc_check_noaccess;
|
|
|
|
VG_(init_new_mem_startup) ( & mc_new_mem_startup );
|
|
VG_(init_new_mem_stack_signal) ( & mc_make_writable );
|
|
VG_(init_new_mem_brk) ( & mc_make_writable );
|
|
VG_(init_new_mem_mmap) ( & mc_new_mem_mmap );
|
|
|
|
VG_(init_copy_mem_remap) ( & mc_copy_address_range_state );
|
|
|
|
VG_(init_die_mem_stack_signal) ( & mc_make_noaccess );
|
|
VG_(init_die_mem_brk) ( & mc_make_noaccess );
|
|
VG_(init_die_mem_munmap) ( & mc_make_noaccess );
|
|
|
|
VG_(init_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
|
|
VG_(init_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
|
|
VG_(init_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
|
|
VG_(init_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
|
|
VG_(init_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
|
|
VG_(init_new_mem_stack) ( & MAC_(new_mem_stack) );
|
|
|
|
VG_(init_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
|
|
VG_(init_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
|
|
VG_(init_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
|
|
VG_(init_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
|
|
VG_(init_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
|
|
VG_(init_die_mem_stack) ( & MAC_(die_mem_stack) );
|
|
|
|
VG_(init_ban_mem_stack) ( & mc_make_noaccess );
|
|
|
|
VG_(init_pre_mem_read) ( & mc_check_is_readable );
|
|
VG_(init_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
|
|
VG_(init_pre_mem_write) ( & mc_check_is_writable );
|
|
VG_(init_post_mem_write) ( & mc_post_mem_write );
|
|
|
|
VG_(init_pre_reg_read) ( & mc_pre_reg_read );
|
|
|
|
VG_(init_post_reg_write) ( & mc_post_reg_write );
|
|
VG_(init_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
|
|
|
|
VG_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
|
|
VG_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
|
|
VG_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
|
|
|
|
/* Additional block description for VG_(describe_addr)() */
|
|
MAC_(describe_addr_supp) = client_perm_maybe_describe;
|
|
|
|
init_shadow_memory();
|
|
MAC_(common_pre_clo_init)();
|
|
}
|
|
|
|
void TL_(post_clo_init) ( void )
|
|
{
|
|
}
|
|
|
|
void TL_(fini) ( Int exitcode )
|
|
{
|
|
MAC_(common_fini)( mc_detect_memory_leaks );
|
|
|
|
if (VG_(clo_verbosity) > 1) {
|
|
VG_(message)(Vg_DebugMsg,
|
|
"memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
|
|
auxmap_used,
|
|
64 * auxmap_used, auxmap_used / 16 );
|
|
VG_(message)(Vg_DebugMsg,
|
|
"memcheck: auxmaps: %lld searches, %lld comparisons",
|
|
n_auxmap_searches, n_auxmap_cmps );
|
|
}
|
|
|
|
if (0) {
|
|
VG_(message)(Vg_DebugMsg,
|
|
"------ Valgrind's client block stats follow ---------------" );
|
|
show_client_block_stats();
|
|
}
|
|
}
|
|
|
|
VG_DETERMINE_INTERFACE_VERSION(TL_(pre_clo_init), 9./8)
|
|
|
|
/*--------------------------------------------------------------------*/
|
|
/*--- end mc_main.c ---*/
|
|
/*--------------------------------------------------------------------*/
|