mirror of
https://github.com/Zenithsiz/ftmemsim-valgrind.git
synced 2026-02-03 10:05:29 +00:00
64-bit cleanness: Converted malloc() et al to use SizeT rather than Int.
This required some tricks with casting to maintain Memcheck's silly (ie. negative) arg checking. The allocator was also changed accordingly. It should now be able to allocate more than 4GB blocks on 64-bit platforms. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@2906
This commit is contained in:
parent
55a7ef5758
commit
e245f2aeb0
@ -442,16 +442,16 @@ typedef Int ArenaId;
|
||||
// Round-up size for --sloppy-malloc=yes.
|
||||
#define VG_SLOPPY_MALLOC_SZB 4
|
||||
|
||||
extern void* VG_(arena_malloc) ( ArenaId arena, Int nbytes );
|
||||
extern void* VG_(arena_malloc) ( ArenaId arena, SizeT nbytes );
|
||||
extern void VG_(arena_free) ( ArenaId arena, void* ptr );
|
||||
extern void* VG_(arena_calloc) ( ArenaId arena, Int alignment,
|
||||
Int nmemb, Int nbytes );
|
||||
extern void* VG_(arena_realloc) ( ArenaId arena, void* ptr, Int alignment,
|
||||
Int size );
|
||||
extern void* VG_(arena_malloc_aligned) ( ArenaId aid, Int req_alignB,
|
||||
Int req_pszB );
|
||||
extern void* VG_(arena_calloc) ( ArenaId arena, SizeT alignment,
|
||||
SizeT nmemb, SizeT nbytes );
|
||||
extern void* VG_(arena_realloc) ( ArenaId arena, void* ptr, SizeT alignment,
|
||||
SizeT size );
|
||||
extern void* VG_(arena_malloc_aligned) ( ArenaId aid, SizeT req_alignB,
|
||||
SizeT req_pszB );
|
||||
|
||||
extern Int VG_(arena_payload_szB) ( ArenaId aid, void* payload );
|
||||
extern SizeT VG_(arena_payload_szB) ( ArenaId aid, void* payload );
|
||||
|
||||
extern void VG_(sanity_check_malloc_all) ( void );
|
||||
|
||||
|
||||
@ -290,12 +290,12 @@ void, init_shadow_page, Addr p
|
||||
## ================================================================================
|
||||
## malloc and friends
|
||||
:malloc
|
||||
void*, malloc, Int n
|
||||
void*, __builtin_new, Int n
|
||||
void*, __builtin_vec_new, Int n
|
||||
void*, memalign, Int align, Int n
|
||||
void*, calloc, Int nmemb, Int n
|
||||
void*, malloc, SizeT n
|
||||
void*, __builtin_new, SizeT n
|
||||
void*, __builtin_vec_new, SizeT n
|
||||
void*, memalign, SizeT align, SizeT n
|
||||
void*, calloc, SizeT nmemb, SizeT n
|
||||
void, free, void* p
|
||||
void, __builtin_delete, void* p
|
||||
void, __builtin_vec_delete, void* p
|
||||
void*, realloc, void* p, Int size
|
||||
void*, realloc, void* p, SizeT size
|
||||
|
||||
@ -79,7 +79,7 @@ Bool VG_(sk_malloc_called_by_scheduler) = False;
|
||||
malloc()-replacing tool cannot forget to implement SK_(malloc)() or
|
||||
SK_(free)(). */
|
||||
__attribute__ ((weak))
|
||||
void* SK_(malloc)( Int size )
|
||||
void* SK_(malloc)( SizeT size )
|
||||
{
|
||||
if (VG_(sk_malloc_called_by_scheduler))
|
||||
return VG_(cli_malloc)(VG_MIN_MALLOC_SZB, size);
|
||||
|
||||
@ -41,31 +41,31 @@
|
||||
|
||||
#define VG_N_MALLOC_LISTS 16 // do not change this
|
||||
|
||||
// On 64-bit systems size_t is 64-bits, so bigger than this is possible.
|
||||
// We can worry about that when it happens...
|
||||
#define MAX_PSZB 0x7ffffff0
|
||||
// The amount you can ask for is limited only by sizeof(SizeT)...
|
||||
#define MAX_PSZB (~((SizeT)0x0))
|
||||
|
||||
typedef UChar UByte;
|
||||
|
||||
/* Block layout:
|
||||
|
||||
this block total szB (sizeof(Int) bytes)
|
||||
this block total szB (sizeof(SizeT) bytes)
|
||||
freelist previous ptr (sizeof(void*) bytes)
|
||||
red zone bytes (depends on .rz_szB field of Arena)
|
||||
(payload bytes)
|
||||
red zone bytes (depends on .rz_szB field of Arena)
|
||||
freelist next ptr (sizeof(void*) bytes)
|
||||
this block total szB (sizeof(Int) bytes)
|
||||
this block total szB (sizeof(SizeT) bytes)
|
||||
|
||||
Total size in bytes (bszB) and payload size in bytes (pszB)
|
||||
are related by:
|
||||
|
||||
bszB == pszB + 2*sizeof(Int) + 2*sizeof(void*) + 2*a->rz_szB
|
||||
bszB == pszB + 2*sizeof(SizeT) + 2*sizeof(void*) + 2*a->rz_szB
|
||||
|
||||
Furthermore, both size fields in the block are negative if it is
|
||||
not in use, and positive if it is in use. A block size of zero
|
||||
is not possible, because a block always has at least two Ints and two
|
||||
pointers of overhead.
|
||||
Furthermore, both size fields in the block have their least-sifnificant
|
||||
bit set if the block is not in use, and unset if it is in use.
|
||||
(The bottom 3 or so bits are always free for this because of alignment.)
|
||||
A block size of zero is not possible, because a block always has at
|
||||
least two SizeTs and two pointers of overhead.
|
||||
|
||||
Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
|
||||
achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
|
||||
@ -100,9 +100,10 @@ typedef
|
||||
typedef
|
||||
struct _Superblock {
|
||||
struct _Superblock* next;
|
||||
Int n_payload_bytes;
|
||||
SizeT n_payload_bytes;
|
||||
UByte padding[ VG_MIN_MALLOC_SZB -
|
||||
((sizeof(void*) + sizeof(Int)) % VG_MIN_MALLOC_SZB) ];
|
||||
((sizeof(struct _Superblock*) + sizeof(SizeT)) %
|
||||
VG_MIN_MALLOC_SZB) ];
|
||||
UByte payload_bytes[0];
|
||||
}
|
||||
Superblock;
|
||||
@ -113,14 +114,14 @@ typedef
|
||||
struct {
|
||||
Char* name;
|
||||
Bool clientmem; // Allocates in the client address space?
|
||||
Int rz_szB; // Red zone size in bytes
|
||||
Int min_sblock_szB; // Minimum superblock size in bytes
|
||||
UInt rz_szB; // Red zone size in bytes
|
||||
SizeT min_sblock_szB; // Minimum superblock size in bytes
|
||||
Block* freelist[VG_N_MALLOC_LISTS];
|
||||
Superblock* sblocks;
|
||||
// Stats only.
|
||||
UInt bytes_on_loan;
|
||||
UInt bytes_mmaped;
|
||||
UInt bytes_on_loan_max;
|
||||
SizeT bytes_on_loan;
|
||||
SizeT bytes_mmaped;
|
||||
SizeT bytes_on_loan_max;
|
||||
}
|
||||
Arena;
|
||||
|
||||
@ -129,48 +130,50 @@ typedef
|
||||
/*--- Low-level functions for working with Blocks. ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
|
||||
#define SIZE_T_0x1 ((SizeT)0x1)
|
||||
|
||||
// Mark a bszB as in-use, and not in-use.
|
||||
static __inline__
|
||||
Int mk_inuse_bszB ( Int bszB )
|
||||
SizeT mk_inuse_bszB ( SizeT bszB )
|
||||
{
|
||||
vg_assert(bszB != 0);
|
||||
return (bszB < 0) ? -bszB : bszB;
|
||||
return bszB & (~SIZE_T_0x1);
|
||||
}
|
||||
static __inline__
|
||||
Int mk_free_bszB ( Int bszB )
|
||||
SizeT mk_free_bszB ( SizeT bszB )
|
||||
{
|
||||
vg_assert(bszB != 0);
|
||||
return (bszB < 0) ? bszB : -bszB;
|
||||
return bszB | SIZE_T_0x1;
|
||||
}
|
||||
|
||||
// Remove the in-use/not-in-use attribute from a bszB, leaving just
|
||||
// the size.
|
||||
static __inline__
|
||||
Int mk_plain_bszB ( Int bszB )
|
||||
SizeT mk_plain_bszB ( SizeT bszB )
|
||||
{
|
||||
vg_assert(bszB != 0);
|
||||
return (bszB < 0) ? -bszB : bszB;
|
||||
return bszB & (~SIZE_T_0x1);
|
||||
}
|
||||
|
||||
// Does this bszB have the in-use attribute?
|
||||
static __inline__
|
||||
Bool is_inuse_bszB ( Int bszB )
|
||||
Bool is_inuse_bszB ( SizeT bszB )
|
||||
{
|
||||
vg_assert(bszB != 0);
|
||||
return (bszB < 0) ? False : True;
|
||||
return (0 != (bszB & SIZE_T_0x1)) ? False : True;
|
||||
}
|
||||
|
||||
|
||||
// Set and get the lower size field of a block.
|
||||
static __inline__
|
||||
void set_bszB_lo ( Block* b, Int bszB )
|
||||
void set_bszB_lo ( Block* b, SizeT bszB )
|
||||
{
|
||||
*(Int*)&b[0] = bszB;
|
||||
*(SizeT*)&b[0] = bszB;
|
||||
}
|
||||
static __inline__
|
||||
Int get_bszB_lo ( Block* b )
|
||||
SizeT get_bszB_lo ( Block* b )
|
||||
{
|
||||
return *(Int*)&b[0];
|
||||
return *(SizeT*)&b[0];
|
||||
}
|
||||
|
||||
// Get the address of the last byte in a block
|
||||
@ -183,33 +186,51 @@ UByte* last_byte ( Block* b )
|
||||
|
||||
// Set and get the upper size field of a block.
|
||||
static __inline__
|
||||
void set_bszB_hi ( Block* b, Int bszB )
|
||||
void set_bszB_hi ( Block* b, SizeT bszB )
|
||||
{
|
||||
UByte* b2 = (UByte*)b;
|
||||
UByte* lb = last_byte(b);
|
||||
vg_assert(lb == &b2[mk_plain_bszB(bszB) - 1]);
|
||||
*(Int*)&lb[-sizeof(Int) + 1] = bszB;
|
||||
*(SizeT*)&lb[-sizeof(SizeT) + 1] = bszB;
|
||||
}
|
||||
static __inline__
|
||||
Int get_bszB_hi ( Block* b )
|
||||
SizeT get_bszB_hi ( Block* b )
|
||||
{
|
||||
UByte* lb = last_byte(b);
|
||||
return *(Int*)&lb[-sizeof(Int) + 1];
|
||||
return *(SizeT*)&lb[-sizeof(SizeT) + 1];
|
||||
}
|
||||
|
||||
|
||||
// Return the lower, upper and total overhead in bytes for a block.
|
||||
// These are determined purely by which arena the block lives in.
|
||||
static __inline__
|
||||
UInt overhead_szB_lo ( Arena* a )
|
||||
{
|
||||
return sizeof(SizeT) + sizeof(void*) + a->rz_szB;
|
||||
}
|
||||
static __inline__
|
||||
UInt overhead_szB_hi ( Arena* a )
|
||||
{
|
||||
return a->rz_szB + sizeof(void*) + sizeof(SizeT);
|
||||
}
|
||||
static __inline__
|
||||
UInt overhead_szB ( Arena* a )
|
||||
{
|
||||
return overhead_szB_lo(a) + overhead_szB_hi(a);
|
||||
}
|
||||
|
||||
// Given the addr of a block, return the addr of its payload.
|
||||
static __inline__
|
||||
UByte* get_block_payload ( Arena* a, Block* b )
|
||||
{
|
||||
UByte* b2 = (UByte*)b;
|
||||
return & b2[sizeof(Int) + sizeof(void*) + a->rz_szB];
|
||||
return & b2[ overhead_szB_lo(a) ];
|
||||
}
|
||||
// Given the addr of a block's payload, return the addr of the block itself.
|
||||
static __inline__
|
||||
Block* get_payload_block ( Arena* a, UByte* payload )
|
||||
{
|
||||
return (Block*)&payload[-sizeof(Int) - sizeof(void*) - a->rz_szB];
|
||||
return (Block*)&payload[ -overhead_szB_lo(a) ];
|
||||
}
|
||||
|
||||
|
||||
@ -218,25 +239,25 @@ static __inline__
|
||||
void set_prev_b ( Block* b, Block* prev_p )
|
||||
{
|
||||
UByte* b2 = (UByte*)b;
|
||||
*(Block**)&b2[sizeof(Int)] = prev_p;
|
||||
*(Block**)&b2[sizeof(SizeT)] = prev_p;
|
||||
}
|
||||
static __inline__
|
||||
void set_next_b ( Block* b, Block* next_p )
|
||||
{
|
||||
UByte* lb = last_byte(b);
|
||||
*(Block**)&lb[-sizeof(Int) - sizeof(void*) + 1] = next_p;
|
||||
*(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1] = next_p;
|
||||
}
|
||||
static __inline__
|
||||
Block* get_prev_b ( Block* b )
|
||||
{
|
||||
UByte* b2 = (UByte*)b;
|
||||
return *(Block**)&b2[sizeof(Int)];
|
||||
return *(Block**)&b2[sizeof(SizeT)];
|
||||
}
|
||||
static __inline__
|
||||
Block* get_next_b ( Block* b )
|
||||
{
|
||||
UByte* lb = last_byte(b);
|
||||
return *(Block**)&lb[-sizeof(Int) - sizeof(void*) + 1];
|
||||
return *(Block**)&lb[-sizeof(SizeT) - sizeof(void*) + 1];
|
||||
}
|
||||
|
||||
|
||||
@ -245,76 +266,56 @@ static __inline__
|
||||
Block* get_predecessor_block ( Block* b )
|
||||
{
|
||||
UByte* b2 = (UByte*)b;
|
||||
Int bszB = mk_plain_bszB( (*(Int*)&b2[-sizeof(Int)]) );
|
||||
SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
|
||||
return (Block*)&b2[-bszB];
|
||||
}
|
||||
|
||||
// Read and write the lower and upper red-zone bytes of a block.
|
||||
static __inline__
|
||||
void set_rz_lo_byte ( Arena* a, Block* b, Int rz_byteno, UByte v )
|
||||
void set_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
|
||||
{
|
||||
UByte* b2 = (UByte*)b;
|
||||
b2[sizeof(Int) + sizeof(void*) + rz_byteno] = v;
|
||||
b2[sizeof(SizeT) + sizeof(void*) + rz_byteno] = v;
|
||||
}
|
||||
static __inline__
|
||||
void set_rz_hi_byte ( Arena* a, Block* b, Int rz_byteno, UByte v )
|
||||
void set_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno, UByte v )
|
||||
{
|
||||
UByte* lb = last_byte(b);
|
||||
lb[-sizeof(Int) - sizeof(void*) - rz_byteno] = v;
|
||||
lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno] = v;
|
||||
}
|
||||
static __inline__
|
||||
UByte get_rz_lo_byte ( Arena* a, Block* b, Int rz_byteno )
|
||||
UByte get_rz_lo_byte ( Arena* a, Block* b, UInt rz_byteno )
|
||||
{
|
||||
UByte* b2 = (UByte*)b;
|
||||
return b2[sizeof(Int) + sizeof(void*) + rz_byteno];
|
||||
return b2[sizeof(SizeT) + sizeof(void*) + rz_byteno];
|
||||
}
|
||||
static __inline__
|
||||
UByte get_rz_hi_byte ( Arena* a, Block* b, Int rz_byteno )
|
||||
UByte get_rz_hi_byte ( Arena* a, Block* b, UInt rz_byteno )
|
||||
{
|
||||
UByte* lb = last_byte(b);
|
||||
return lb[-sizeof(Int) - sizeof(void*) - rz_byteno];
|
||||
return lb[-sizeof(SizeT) - sizeof(void*) - rz_byteno];
|
||||
}
|
||||
|
||||
|
||||
/* Return the lower, upper and total overhead in bytes for a block.
|
||||
These are determined purely by which arena the block lives in. */
|
||||
static __inline__
|
||||
Int overhead_szB_lo ( Arena* a )
|
||||
{
|
||||
return sizeof(Int) + sizeof(void*) + a->rz_szB;
|
||||
}
|
||||
static __inline__
|
||||
Int overhead_szB_hi ( Arena* a )
|
||||
{
|
||||
return sizeof(void*) + sizeof(Int) + a->rz_szB;
|
||||
}
|
||||
static __inline__
|
||||
Int overhead_szB ( Arena* a )
|
||||
{
|
||||
return overhead_szB_lo(a) + overhead_szB_hi(a);
|
||||
}
|
||||
|
||||
// Return the minimum bszB for a block in this arena. Can have zero-length
|
||||
// payloads, so it's the size of the admin bytes.
|
||||
static __inline__
|
||||
Int min_useful_bszB ( Arena* a )
|
||||
UInt min_useful_bszB ( Arena* a )
|
||||
{
|
||||
return overhead_szB(a);
|
||||
}
|
||||
|
||||
// Convert payload size <--> block size (both in bytes).
|
||||
static __inline__
|
||||
Int pszB_to_bszB ( Arena* a, Int pszB )
|
||||
SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
|
||||
{
|
||||
vg_assert(pszB >= 0);
|
||||
return pszB + overhead_szB(a);
|
||||
}
|
||||
static __inline__
|
||||
Int bszB_to_pszB ( Arena* a, Int bszB )
|
||||
SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
|
||||
{
|
||||
Int pszB = bszB - overhead_szB(a);
|
||||
vg_assert(pszB >= 0);
|
||||
return pszB;
|
||||
vg_assert(bszB >= overhead_szB(a));
|
||||
return bszB - overhead_szB(a);
|
||||
}
|
||||
|
||||
|
||||
@ -338,12 +339,12 @@ static Arena* arenaId_to_ArenaP ( ArenaId arena )
|
||||
// Initialise an arena. rz_szB is the minimum redzone size; it might be
|
||||
// made bigger to ensure that VG_MIN_MALLOC_ALIGNMENT is observed.
|
||||
static
|
||||
void arena_init ( ArenaId aid, Char* name, Int rz_szB, Int min_sblock_szB )
|
||||
void arena_init ( ArenaId aid, Char* name, UInt rz_szB, SizeT min_sblock_szB )
|
||||
{
|
||||
Int i;
|
||||
SizeT i;
|
||||
Arena* a = arenaId_to_ArenaP(aid);
|
||||
|
||||
vg_assert(rz_szB >= 0);
|
||||
vg_assert(rz_szB < 128); // ensure reasonable size
|
||||
vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
|
||||
a->name = name;
|
||||
a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
|
||||
@ -366,7 +367,7 @@ void arena_init ( ArenaId aid, Char* name, Int rz_szB, Int min_sblock_szB )
|
||||
/* Print vital stats for an arena. */
|
||||
void VG_(print_all_arena_stats) ( void )
|
||||
{
|
||||
Int i;
|
||||
UInt i;
|
||||
for (i = 0; i < VG_N_ARENAS; i++) {
|
||||
Arena* a = arenaId_to_ArenaP(i);
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
@ -383,7 +384,7 @@ void VG_(print_all_arena_stats) ( void )
|
||||
static
|
||||
void ensure_mm_init ( void )
|
||||
{
|
||||
static Int client_rz_szB;
|
||||
static UInt client_rz_szB;
|
||||
static Bool init_done = False;
|
||||
|
||||
if (init_done) {
|
||||
@ -396,7 +397,6 @@ void ensure_mm_init ( void )
|
||||
|
||||
/* No particular reason for this figure, it's just smallish */
|
||||
sk_assert(VG_(vg_malloc_redzone_szB) < 128);
|
||||
sk_assert(VG_(vg_malloc_redzone_szB) >= 0);
|
||||
client_rz_szB = VG_(vg_malloc_redzone_szB);
|
||||
|
||||
/* Use checked red zones (of various sizes) for our internal stuff,
|
||||
@ -434,7 +434,7 @@ void ensure_mm_init ( void )
|
||||
|
||||
// Align ptr p upwards to an align-sized boundary.
|
||||
static
|
||||
void* align_upwards ( void* p, Int align )
|
||||
void* align_upwards ( void* p, SizeT align )
|
||||
{
|
||||
Addr a = (Addr)p;
|
||||
if ((a % align) == 0) return (void*)a;
|
||||
@ -444,7 +444,7 @@ void* align_upwards ( void* p, Int align )
|
||||
// If not enough memory available, either aborts (for non-client memory)
|
||||
// or returns 0 (for client memory).
|
||||
static
|
||||
Superblock* newSuperblock ( Arena* a, Int cszB )
|
||||
Superblock* newSuperblock ( Arena* a, SizeT cszB )
|
||||
{
|
||||
// The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up.
|
||||
static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZB];
|
||||
@ -519,13 +519,13 @@ Bool VG_(clo_trace_malloc) = False;
|
||||
|
||||
/* Minimum alignment in functions that don't specify alignment explicitly.
|
||||
default: 0, i.e. use VG_MIN_MALLOC_SZB. */
|
||||
Int VG_(clo_alignment) = VG_MIN_MALLOC_SZB;
|
||||
UInt VG_(clo_alignment) = VG_MIN_MALLOC_SZB;
|
||||
|
||||
|
||||
Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
|
||||
{
|
||||
if (VG_CLO_STREQN(12, arg, "--alignment=")) {
|
||||
VG_(clo_alignment) = (Int)VG_(atoll)(&arg[12]);
|
||||
VG_(clo_alignment) = (UInt)VG_(atoll)(&arg[12]);
|
||||
|
||||
if (VG_(clo_alignment) < VG_MIN_MALLOC_SZB
|
||||
|| VG_(clo_alignment) > 4096
|
||||
@ -572,9 +572,8 @@ void VG_(replacement_malloc_print_debug_usage)(void)
|
||||
|
||||
// Convert a payload size in bytes to a freelist number.
|
||||
static
|
||||
Int pszB_to_listNo ( Int pszB )
|
||||
UInt pszB_to_listNo ( SizeT pszB )
|
||||
{
|
||||
vg_assert(pszB >= 0);
|
||||
vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
|
||||
pszB /= VG_MIN_MALLOC_SZB;
|
||||
if (pszB <= 2) return 0;
|
||||
@ -597,19 +596,19 @@ Int pszB_to_listNo ( Int pszB )
|
||||
|
||||
// What is the minimum payload size for a given list?
|
||||
static
|
||||
Int listNo_to_pszB_min ( Int listNo )
|
||||
SizeT listNo_to_pszB_min ( UInt listNo )
|
||||
{
|
||||
Int pszB = 0;
|
||||
vg_assert(listNo >= 0 && listNo <= VG_N_MALLOC_LISTS);
|
||||
SizeT pszB = 0;
|
||||
vg_assert(listNo <= VG_N_MALLOC_LISTS);
|
||||
while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB;
|
||||
return pszB;
|
||||
}
|
||||
|
||||
// What is the maximum payload size for a given list?
|
||||
static
|
||||
Int listNo_to_pszB_max ( Int listNo )
|
||||
SizeT listNo_to_pszB_max ( UInt listNo )
|
||||
{
|
||||
vg_assert(listNo >= 0 && listNo <= VG_N_MALLOC_LISTS);
|
||||
vg_assert(listNo <= VG_N_MALLOC_LISTS);
|
||||
if (listNo == VG_N_MALLOC_LISTS-1) {
|
||||
return MAX_PSZB;
|
||||
} else {
|
||||
@ -623,12 +622,12 @@ Int listNo_to_pszB_max ( Int listNo )
|
||||
lower address, with the idea of attempting to recycle the same
|
||||
blocks rather than cruise through the address space. */
|
||||
static
|
||||
void swizzle ( Arena* a, Int lno )
|
||||
void swizzle ( Arena* a, UInt lno )
|
||||
{
|
||||
Block* p_best;
|
||||
Block* pp;
|
||||
Block* pn;
|
||||
Int i;
|
||||
UInt i;
|
||||
|
||||
p_best = a->freelist[lno];
|
||||
if (p_best == NULL) return;
|
||||
@ -656,12 +655,12 @@ void swizzle ( Arena* a, Int lno )
|
||||
#define VG_REDZONE_LO_MASK 0x31
|
||||
#define VG_REDZONE_HI_MASK 0x7c
|
||||
|
||||
// Do some crude sanity checks on a chunk.
|
||||
// Do some crude sanity checks on a Block.
|
||||
static
|
||||
Bool blockSane ( Arena* a, Block* b )
|
||||
{
|
||||
# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
|
||||
Int i;
|
||||
UInt i;
|
||||
if (get_bszB_lo(b) != get_bszB_hi(b))
|
||||
{BLEAT("sizes");return False;}
|
||||
if (!a->clientmem && is_inuse_bszB(get_bszB_lo(b))) {
|
||||
@ -682,7 +681,8 @@ Bool blockSane ( Arena* a, Block* b )
|
||||
static
|
||||
void ppSuperblocks ( Arena* a )
|
||||
{
|
||||
Int i, b_bszB, blockno;
|
||||
UInt i, blockno;
|
||||
SizeT b_bszB;
|
||||
Block* b;
|
||||
Superblock* sb = a->sblocks;
|
||||
blockno = 1;
|
||||
@ -707,13 +707,14 @@ void ppSuperblocks ( Arena* a )
|
||||
// Sanity check both the superblocks and the chains.
|
||||
static void sanity_check_malloc_arena ( ArenaId aid )
|
||||
{
|
||||
Int i, superblockctr, b_bszB, b_pszB, blockctr_sb, blockctr_li;
|
||||
Int blockctr_sb_free, listno, list_min_pszB, list_max_pszB;
|
||||
UInt i, superblockctr, blockctr_sb, blockctr_li;
|
||||
UInt blockctr_sb_free, listno;
|
||||
SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
|
||||
Superblock* sb;
|
||||
Bool thisFree, lastWasFree;
|
||||
Block* b;
|
||||
Block* b_prev;
|
||||
UInt arena_bytes_on_loan;
|
||||
SizeT arena_bytes_on_loan;
|
||||
Arena* a;
|
||||
|
||||
# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
|
||||
@ -821,7 +822,7 @@ static void sanity_check_malloc_arena ( ArenaId aid )
|
||||
|
||||
void VG_(sanity_check_malloc_all) ( void )
|
||||
{
|
||||
Int i;
|
||||
UInt i;
|
||||
for (i = 0; i < VG_N_ARENAS; i++)
|
||||
sanity_check_malloc_arena ( i );
|
||||
}
|
||||
@ -835,7 +836,7 @@ Bool VG_(is_empty_arena) ( ArenaId aid )
|
||||
Arena* a;
|
||||
Superblock* sb;
|
||||
Block* b;
|
||||
Int b_bszB;
|
||||
SizeT b_bszB;
|
||||
|
||||
ensure_mm_init();
|
||||
a = arenaId_to_ArenaP(aid);
|
||||
@ -861,10 +862,9 @@ Bool VG_(is_empty_arena) ( ArenaId aid )
|
||||
// relevant free list.
|
||||
|
||||
static
|
||||
void mkFreeBlock ( Arena* a, Block* b, Int bszB, Int b_lno )
|
||||
void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
|
||||
{
|
||||
Int pszB = bszB_to_pszB(a, bszB);
|
||||
vg_assert(pszB >= 0);
|
||||
SizeT pszB = bszB_to_pszB(a, bszB);
|
||||
vg_assert(b_lno == pszB_to_listNo(pszB));
|
||||
// Set the size fields and indicate not-in-use.
|
||||
set_bszB_lo(b, mk_free_bszB(bszB));
|
||||
@ -891,9 +891,9 @@ void mkFreeBlock ( Arena* a, Block* b, Int bszB, Int b_lno )
|
||||
// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
|
||||
// appropriately.
|
||||
static
|
||||
void mkInuseBlock ( Arena* a, Block* b, UInt bszB )
|
||||
void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
|
||||
{
|
||||
Int i;
|
||||
UInt i;
|
||||
vg_assert(bszB >= min_useful_bszB(a));
|
||||
set_bszB_lo(b, mk_inuse_bszB(bszB));
|
||||
set_bszB_hi(b, mk_inuse_bszB(bszB));
|
||||
@ -912,9 +912,9 @@ void mkInuseBlock ( Arena* a, Block* b, UInt bszB )
|
||||
|
||||
// Remove a block from a given list. Does no sanity checking.
|
||||
static
|
||||
void unlinkBlock ( Arena* a, Block* b, Int listno )
|
||||
void unlinkBlock ( Arena* a, Block* b, UInt listno )
|
||||
{
|
||||
vg_assert(listno >= 0 && listno < VG_N_MALLOC_LISTS);
|
||||
vg_assert(listno < VG_N_MALLOC_LISTS);
|
||||
if (get_prev_b(b) == b) {
|
||||
// Only one element in the list; treat it specially.
|
||||
vg_assert(get_next_b(b) == b);
|
||||
@ -938,15 +938,16 @@ void unlinkBlock ( Arena* a, Block* b, Int listno )
|
||||
|
||||
// Align the request size.
|
||||
static __inline__
|
||||
Int align_req_pszB ( Int req_pszB )
|
||||
SizeT align_req_pszB ( SizeT req_pszB )
|
||||
{
|
||||
Int n = VG_MIN_MALLOC_SZB-1;
|
||||
SizeT n = VG_MIN_MALLOC_SZB-1;
|
||||
return ((req_pszB + n) & (~n));
|
||||
}
|
||||
|
||||
void* VG_(arena_malloc) ( ArenaId aid, Int req_pszB )
|
||||
void* VG_(arena_malloc) ( ArenaId aid, SizeT req_pszB )
|
||||
{
|
||||
Int req_bszB, frag_bszB, b_bszB, lno;
|
||||
SizeT req_bszB, frag_bszB, b_bszB;
|
||||
UInt lno;
|
||||
Superblock* new_sb;
|
||||
Block* b = NULL;
|
||||
Arena* a;
|
||||
@ -957,7 +958,7 @@ void* VG_(arena_malloc) ( ArenaId aid, Int req_pszB )
|
||||
ensure_mm_init();
|
||||
a = arenaId_to_ArenaP(aid);
|
||||
|
||||
vg_assert(0 <= req_pszB && req_pszB < MAX_PSZB);
|
||||
vg_assert(req_pszB < MAX_PSZB);
|
||||
req_pszB = align_req_pszB(req_pszB);
|
||||
req_bszB = pszB_to_bszB(a, req_pszB);
|
||||
|
||||
@ -992,7 +993,7 @@ void* VG_(arena_malloc) ( ArenaId aid, Int req_pszB )
|
||||
obtained_block:
|
||||
// Ok, we can allocate from b, which lives in list lno.
|
||||
vg_assert(b != NULL);
|
||||
vg_assert(lno >= 0 && lno < VG_N_MALLOC_LISTS);
|
||||
vg_assert(lno < VG_N_MALLOC_LISTS);
|
||||
vg_assert(a->freelist[lno] != NULL);
|
||||
b_bszB = mk_plain_bszB(get_bszB_lo(b));
|
||||
// req_bszB is the size of the block we are after. b_bszB is the
|
||||
@ -1039,7 +1040,8 @@ void VG_(arena_free) ( ArenaId aid, void* ptr )
|
||||
UByte* sb_end;
|
||||
Block* other;
|
||||
Block* b;
|
||||
Int b_bszB, b_pszB, other_bszB, b_listno;
|
||||
SizeT b_bszB, b_pszB, other_bszB;
|
||||
UInt b_listno;
|
||||
Arena* a;
|
||||
|
||||
VGP_PUSHCC(VgpMalloc);
|
||||
@ -1158,12 +1160,12 @@ void VG_(arena_free) ( ArenaId aid, void* ptr )
|
||||
. . . . . . .
|
||||
|
||||
*/
|
||||
void* VG_(arena_malloc_aligned) ( ArenaId aid, Int req_alignB, Int req_pszB )
|
||||
void* VG_(arena_malloc_aligned) ( ArenaId aid, SizeT req_alignB, SizeT req_pszB )
|
||||
{
|
||||
Int base_pszB_req, base_pszB_act, frag_bszB;
|
||||
SizeT base_pszB_req, base_pszB_act, frag_bszB;
|
||||
Block *base_b, *align_b;
|
||||
UByte *base_p, *align_p;
|
||||
UInt saved_bytes_on_loan;
|
||||
SizeT saved_bytes_on_loan;
|
||||
Arena* a;
|
||||
|
||||
VGP_PUSHCC(VgpMalloc);
|
||||
@ -1171,7 +1173,7 @@ void* VG_(arena_malloc_aligned) ( ArenaId aid, Int req_alignB, Int req_pszB )
|
||||
ensure_mm_init();
|
||||
a = arenaId_to_ArenaP(aid);
|
||||
|
||||
vg_assert(0 <= req_pszB && req_pszB < MAX_PSZB);
|
||||
vg_assert(req_pszB < MAX_PSZB);
|
||||
|
||||
// Check that the requested alignment seems reasonable; that is, is
|
||||
// a power of 2.
|
||||
@ -1252,7 +1254,7 @@ void* VG_(arena_malloc_aligned) ( ArenaId aid, Int req_alignB, Int req_pszB )
|
||||
}
|
||||
|
||||
|
||||
Int VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
|
||||
SizeT VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
|
||||
{
|
||||
Arena* a = arenaId_to_ArenaP(aid);
|
||||
Block* b = get_payload_block(a, ptr);
|
||||
@ -1264,15 +1266,16 @@ Int VG_(arena_payload_szB) ( ArenaId aid, void* ptr )
|
||||
/*--- Services layered on top of malloc/free. ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
|
||||
void* VG_(arena_calloc) ( ArenaId aid, Int alignB, Int nmemb, Int nbytes )
|
||||
void* VG_(arena_calloc) ( ArenaId aid, SizeT alignB, SizeT nmemb, SizeT nbytes )
|
||||
{
|
||||
Int i, size;
|
||||
UInt i;
|
||||
SizeT size;
|
||||
UChar* p;
|
||||
|
||||
VGP_PUSHCC(VgpMalloc);
|
||||
|
||||
size = nmemb * nbytes;
|
||||
vg_assert(size >= 0);
|
||||
vg_assert(size >= nmemb && size >= nbytes); // check against overflow
|
||||
|
||||
if (alignB == VG_MIN_MALLOC_SZB)
|
||||
p = VG_(arena_malloc) ( aid, size );
|
||||
@ -1288,10 +1291,11 @@ void* VG_(arena_calloc) ( ArenaId aid, Int alignB, Int nmemb, Int nbytes )
|
||||
|
||||
|
||||
void* VG_(arena_realloc) ( ArenaId aid, void* ptr,
|
||||
Int req_alignB, Int req_pszB )
|
||||
SizeT req_alignB, SizeT req_pszB )
|
||||
{
|
||||
Arena* a;
|
||||
Int old_bszB, old_pszB, i;
|
||||
SizeT old_bszB, old_pszB;
|
||||
UInt i;
|
||||
UChar *p_old, *p_new;
|
||||
Block* b;
|
||||
|
||||
@ -1300,7 +1304,7 @@ void* VG_(arena_realloc) ( ArenaId aid, void* ptr,
|
||||
ensure_mm_init();
|
||||
a = arenaId_to_ArenaP(aid);
|
||||
|
||||
vg_assert(0 <= req_pszB && req_pszB < MAX_PSZB);
|
||||
vg_assert(req_pszB < MAX_PSZB);
|
||||
|
||||
b = get_payload_block(a, ptr);
|
||||
vg_assert(blockSane(a, b));
|
||||
@ -1338,7 +1342,7 @@ void* VG_(arena_realloc) ( ArenaId aid, void* ptr,
|
||||
|
||||
// All just wrappers to avoid exposing arenas to tools.
|
||||
|
||||
void* VG_(malloc) ( Int nbytes )
|
||||
void* VG_(malloc) ( SizeT nbytes )
|
||||
{
|
||||
return VG_(arena_malloc) ( VG_AR_TOOL, nbytes );
|
||||
}
|
||||
@ -1348,23 +1352,23 @@ void VG_(free) ( void* ptr )
|
||||
VG_(arena_free) ( VG_AR_TOOL, ptr );
|
||||
}
|
||||
|
||||
void* VG_(calloc) ( Int nmemb, Int nbytes )
|
||||
void* VG_(calloc) ( SizeT nmemb, SizeT nbytes )
|
||||
{
|
||||
return VG_(arena_calloc) ( VG_AR_TOOL, VG_MIN_MALLOC_SZB, nmemb, nbytes );
|
||||
}
|
||||
|
||||
void* VG_(realloc) ( void* ptr, Int size )
|
||||
void* VG_(realloc) ( void* ptr, SizeT size )
|
||||
{
|
||||
return VG_(arena_realloc) ( VG_AR_TOOL, ptr, VG_MIN_MALLOC_SZB, size );
|
||||
}
|
||||
|
||||
void* VG_(malloc_aligned) ( Int req_alignB, Int req_pszB )
|
||||
void* VG_(malloc_aligned) ( SizeT req_alignB, SizeT req_pszB )
|
||||
{
|
||||
return VG_(arena_malloc_aligned) ( VG_AR_TOOL, req_alignB, req_pszB );
|
||||
}
|
||||
|
||||
|
||||
void* VG_(cli_malloc) ( UInt align, Int nbytes )
|
||||
void* VG_(cli_malloc) ( SizeT align, SizeT nbytes )
|
||||
{
|
||||
// 'align' should be valid by now. VG_(arena_malloc_aligned)() will
|
||||
// abort if it's not.
|
||||
@ -1380,7 +1384,7 @@ void VG_(cli_free) ( void* p )
|
||||
}
|
||||
|
||||
|
||||
Bool VG_(addr_is_in_block)( Addr a, Addr start, UInt size )
|
||||
Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size )
|
||||
{
|
||||
return (start - VG_(vg_malloc_redzone_szB) <= a
|
||||
&& a < start + size + VG_(vg_malloc_redzone_szB));
|
||||
|
||||
@ -1810,7 +1810,7 @@ UInt VG_(vg_malloc_redzone_szB) = 8;
|
||||
shadow chunk on the appropriate list, and set all memory
|
||||
protections correctly. */
|
||||
|
||||
static void add_HG_Chunk ( ThreadId tid, Addr p, UInt size )
|
||||
static void add_HG_Chunk ( ThreadId tid, Addr p, SizeT size )
|
||||
{
|
||||
HG_Chunk* hc;
|
||||
|
||||
@ -1825,7 +1825,7 @@ static void add_HG_Chunk ( ThreadId tid, Addr p, UInt size )
|
||||
|
||||
/* Allocate memory and note change in memory available */
|
||||
static __inline__
|
||||
void* alloc_and_new_mem ( Int size, UInt alignment, Bool is_zeroed )
|
||||
void* alloc_and_new_mem ( SizeT size, SizeT alignment, Bool is_zeroed )
|
||||
{
|
||||
Addr p;
|
||||
|
||||
@ -1842,27 +1842,27 @@ void* alloc_and_new_mem ( Int size, UInt alignment, Bool is_zeroed )
|
||||
return (void*)p;
|
||||
}
|
||||
|
||||
void* SK_(malloc) ( Int n )
|
||||
void* SK_(malloc) ( SizeT n )
|
||||
{
|
||||
return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
|
||||
}
|
||||
|
||||
void* SK_(__builtin_new) ( Int n )
|
||||
void* SK_(__builtin_new) ( SizeT n )
|
||||
{
|
||||
return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
|
||||
}
|
||||
|
||||
void* SK_(__builtin_vec_new) ( Int n )
|
||||
void* SK_(__builtin_vec_new) ( SizeT n )
|
||||
{
|
||||
return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
|
||||
}
|
||||
|
||||
void* SK_(memalign) ( Int align, Int n )
|
||||
void* SK_(memalign) ( SizeT align, SizeT n )
|
||||
{
|
||||
return alloc_and_new_mem ( n, align, /*is_zeroed*/False );
|
||||
}
|
||||
|
||||
void* SK_(calloc) ( Int nmemb, Int size )
|
||||
void* SK_(calloc) ( SizeT nmemb, SizeT size )
|
||||
{
|
||||
return alloc_and_new_mem ( nmemb*size, VG_(clo_alignment),
|
||||
/*is_zeroed*/True );
|
||||
@ -1944,7 +1944,7 @@ void SK_(__builtin_vec_delete) ( void* p )
|
||||
handle_free(p);
|
||||
}
|
||||
|
||||
void* SK_(realloc) ( void* p, Int new_size )
|
||||
void* SK_(realloc) ( void* p, SizeT new_size )
|
||||
{
|
||||
HG_Chunk *hc;
|
||||
HG_Chunk **prev_chunks_next_ptr;
|
||||
|
||||
@ -54,6 +54,9 @@ typedef signed long long Long; // 64 64
|
||||
|
||||
typedef UWord Addr; // 32 64
|
||||
|
||||
typedef UWord SizeT; // 32 64
|
||||
typedef Word SSizeT; // 32 64
|
||||
|
||||
typedef UChar Bool; // 8 8
|
||||
#define False ((Bool)0)
|
||||
#define True ((Bool)1)
|
||||
|
||||
@ -351,11 +351,11 @@ extern Int VG_(rename) ( Char* old_name, Char* new_name );
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* stdlib.h */
|
||||
|
||||
extern void* VG_(malloc) ( Int nbytes );
|
||||
extern void* VG_(malloc) ( SizeT nbytes );
|
||||
extern void VG_(free) ( void* p );
|
||||
extern void* VG_(calloc) ( Int n, Int nbytes );
|
||||
extern void* VG_(realloc) ( void* p, Int size );
|
||||
extern void* VG_(malloc_aligned) ( Int align_bytes, Int nbytes );
|
||||
extern void* VG_(calloc) ( SizeT n, SizeT nbytes );
|
||||
extern void* VG_(realloc) ( void* p, SizeT size );
|
||||
extern void* VG_(malloc_aligned) ( SizeT align_bytes, SizeT nbytes );
|
||||
|
||||
extern void VG_(print_malloc_stats) ( void );
|
||||
|
||||
@ -1735,18 +1735,18 @@ extern UInt VG_(get_exit_status_shadow) ( void );
|
||||
/* Arena size for valgrind's own malloc(); default value is 0, but can
|
||||
be overridden by tool -- but must be done so *statically*, eg:
|
||||
|
||||
Int VG_(vg_malloc_redzone_szB) = 4;
|
||||
UInt VG_(vg_malloc_redzone_szB) = 4;
|
||||
|
||||
It can't be done from a function like SK_(pre_clo_init)(). So it can't,
|
||||
for example, be controlled with a command line option, unfortunately. */
|
||||
extern UInt VG_(vg_malloc_redzone_szB);
|
||||
|
||||
/* Can be called from SK_(malloc) et al to do the actual alloc/freeing. */
|
||||
extern void* VG_(cli_malloc) ( UInt align, Int nbytes );
|
||||
extern void* VG_(cli_malloc) ( SizeT align, SizeT nbytes );
|
||||
extern void VG_(cli_free) ( void* p );
|
||||
|
||||
/* Check if an address is within a range, allowing for redzones at edges */
|
||||
extern Bool VG_(addr_is_in_block)( Addr a, Addr start, UInt size );
|
||||
extern Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size );
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Some options that can be used by a tool if malloc() et al are replaced.
|
||||
@ -1759,7 +1759,7 @@ extern Bool VG_(clo_sloppy_malloc);
|
||||
extern Bool VG_(clo_trace_malloc);
|
||||
/* Minimum alignment in functions that don't specify alignment explicitly.
|
||||
default: 0, i.e. use default of the machine (== 4) */
|
||||
extern Int VG_(clo_alignment);
|
||||
extern UInt VG_(clo_alignment);
|
||||
|
||||
extern Bool VG_(replacement_malloc_process_cmd_line_option) ( Char* arg );
|
||||
extern void VG_(replacement_malloc_print_usage) ( void );
|
||||
|
||||
@ -178,7 +178,7 @@ typedef
|
||||
struct _HP_Chunk {
|
||||
struct _HP_Chunk* next;
|
||||
Addr data; // Ptr to actual block
|
||||
UInt size; // Size requested
|
||||
SizeT size; // Size requested
|
||||
XPt* where; // Where allocated; bottom-XPt
|
||||
}
|
||||
HP_Chunk;
|
||||
@ -344,7 +344,7 @@ static XPt* alloc_xpt;
|
||||
|
||||
// Cheap allocation for blocks that never need to be freed. Saves about 10%
|
||||
// for Konqueror startup with --depth=40.
|
||||
static void* perm_malloc(UInt n_bytes)
|
||||
static void* perm_malloc(SizeT n_bytes)
|
||||
{
|
||||
static Addr hp = 0; // current heap pointer
|
||||
static Addr hp_lim = 0; // maximum usable byte in current block
|
||||
@ -664,7 +664,7 @@ void remove_HP_Chunk(HP_Chunk* hc, HP_Chunk** prev_chunks_next_ptr)
|
||||
static void hp_census(void);
|
||||
|
||||
static
|
||||
void* new_block ( void* p, Int size, UInt align, Bool is_zeroed )
|
||||
void* new_block ( void* p, SizeT size, SizeT align, Bool is_zeroed )
|
||||
{
|
||||
HP_Chunk* hc;
|
||||
Bool custom_alloc = (NULL == p);
|
||||
@ -738,27 +738,27 @@ void die_block ( void* p, Bool custom_free )
|
||||
}
|
||||
|
||||
|
||||
void* SK_(malloc) ( Int n )
|
||||
void* SK_(malloc) ( SizeT n )
|
||||
{
|
||||
return new_block( NULL, n, VG_(clo_alignment), /*is_zeroed*/False );
|
||||
}
|
||||
|
||||
void* SK_(__builtin_new) ( Int n )
|
||||
void* SK_(__builtin_new) ( SizeT n )
|
||||
{
|
||||
return new_block( NULL, n, VG_(clo_alignment), /*is_zeroed*/False );
|
||||
}
|
||||
|
||||
void* SK_(__builtin_vec_new) ( Int n )
|
||||
void* SK_(__builtin_vec_new) ( SizeT n )
|
||||
{
|
||||
return new_block( NULL, n, VG_(clo_alignment), /*is_zeroed*/False );
|
||||
}
|
||||
|
||||
void* SK_(calloc) ( Int m, Int size )
|
||||
void* SK_(calloc) ( SizeT m, SizeT size )
|
||||
{
|
||||
return new_block( NULL, m*size, VG_(clo_alignment), /*is_zeroed*/True );
|
||||
}
|
||||
|
||||
void *SK_(memalign)( Int align, Int n )
|
||||
void *SK_(memalign)( SizeT align, SizeT n )
|
||||
{
|
||||
return new_block( NULL, n, align, False );
|
||||
}
|
||||
@ -778,13 +778,13 @@ void SK_(__builtin_vec_delete) ( void* p )
|
||||
die_block( p, /*custom_free*/False );
|
||||
}
|
||||
|
||||
void* SK_(realloc) ( void* p_old, Int new_size )
|
||||
void* SK_(realloc) ( void* p_old, SizeT new_size )
|
||||
{
|
||||
HP_Chunk* hc;
|
||||
HP_Chunk** remove_handle;
|
||||
Int i;
|
||||
void* p_new;
|
||||
UInt old_size;
|
||||
SizeT old_size;
|
||||
XPt *old_where, *new_where;
|
||||
|
||||
VGP_PUSHCC(VgpCliMalloc);
|
||||
|
||||
@ -37,9 +37,9 @@
|
||||
/*------------------------------------------------------------*/
|
||||
|
||||
/* Stats ... */
|
||||
static UInt cmalloc_n_mallocs = 0;
|
||||
static UInt cmalloc_n_frees = 0;
|
||||
static UInt cmalloc_bs_mallocd = 0;
|
||||
static SizeT cmalloc_n_mallocs = 0;
|
||||
static SizeT cmalloc_n_frees = 0;
|
||||
static SizeT cmalloc_bs_mallocd = 0;
|
||||
|
||||
/* We want a 16B redzone on heap blocks for Addrcheck and Memcheck */
|
||||
UInt VG_(vg_malloc_redzone_szB) = 16;
|
||||
@ -132,7 +132,7 @@ MAC_Chunk* MAC_(first_matching_freed_MAC_Chunk) ( Bool (*p)(MAC_Chunk*, void*),
|
||||
|
||||
/* Allocate its shadow chunk, put it on the appropriate list. */
|
||||
static
|
||||
void add_MAC_Chunk ( Addr p, UInt size, MAC_AllocKind kind, VgHashTable table)
|
||||
void add_MAC_Chunk ( Addr p, SizeT size, MAC_AllocKind kind, VgHashTable table)
|
||||
{
|
||||
MAC_Chunk* mc;
|
||||
|
||||
@ -158,9 +158,31 @@ void add_MAC_Chunk ( Addr p, UInt size, MAC_AllocKind kind, VgHashTable table)
|
||||
/*--- client_malloc(), etc ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
|
||||
static Bool complain_about_silly_args(SizeT sizeB, Char* fn)
|
||||
{
|
||||
// Cast to a signed type to catch any unexpectedly negative args. We're
|
||||
// assuming here that the size asked for is not greater than 2^31 bytes
|
||||
// (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
|
||||
if ((SSizeT)sizeB < 0) {
|
||||
VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to %s()", sizeB, fn );
|
||||
return True;
|
||||
}
|
||||
return False;
|
||||
}
|
||||
|
||||
static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
|
||||
{
|
||||
if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
|
||||
VG_(message)(Vg_UserMsg, "Warning: silly args (%d,%d) to calloc()",
|
||||
n, sizeB);
|
||||
return True;
|
||||
}
|
||||
return False;
|
||||
}
|
||||
|
||||
/* Allocate memory and note change in memory available */
|
||||
__inline__
|
||||
void* MAC_(new_block) ( Addr p, UInt size, UInt align, UInt rzB,
|
||||
void* MAC_(new_block) ( Addr p, SizeT size, SizeT align, UInt rzB,
|
||||
Bool is_zeroed, MAC_AllocKind kind, VgHashTable table)
|
||||
{
|
||||
VGP_PUSHCC(VgpCliMalloc);
|
||||
@ -191,10 +213,9 @@ void* MAC_(new_block) ( Addr p, UInt size, UInt align, UInt rzB,
|
||||
return (void*)p;
|
||||
}
|
||||
|
||||
void* SK_(malloc) ( Int n )
|
||||
void* SK_(malloc) ( SizeT n )
|
||||
{
|
||||
if (n < 0) {
|
||||
VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to malloc()", n );
|
||||
if (complain_about_silly_args(n, "malloc")) {
|
||||
return NULL;
|
||||
} else {
|
||||
return MAC_(new_block) ( 0, n, VG_(clo_alignment),
|
||||
@ -203,10 +224,9 @@ void* SK_(malloc) ( Int n )
|
||||
}
|
||||
}
|
||||
|
||||
void* SK_(__builtin_new) ( Int n )
|
||||
void* SK_(__builtin_new) ( SizeT n )
|
||||
{
|
||||
if (n < 0) {
|
||||
VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to __builtin_new()", n);
|
||||
if (complain_about_silly_args(n, "__builtin_new")) {
|
||||
return NULL;
|
||||
} else {
|
||||
return MAC_(new_block) ( 0, n, VG_(clo_alignment),
|
||||
@ -215,11 +235,9 @@ void* SK_(__builtin_new) ( Int n )
|
||||
}
|
||||
}
|
||||
|
||||
void* SK_(__builtin_vec_new) ( Int n )
|
||||
void* SK_(__builtin_vec_new) ( SizeT n )
|
||||
{
|
||||
if (n < 0) {
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"Warning: silly arg (%d) to __builtin_vec_new()", n );
|
||||
if (complain_about_silly_args(n, "__builtin_vec_new")) {
|
||||
return NULL;
|
||||
} else {
|
||||
return MAC_(new_block) ( 0, n, VG_(clo_alignment),
|
||||
@ -228,10 +246,9 @@ void* SK_(__builtin_vec_new) ( Int n )
|
||||
}
|
||||
}
|
||||
|
||||
void* SK_(memalign) ( Int align, Int n )
|
||||
void* SK_(memalign) ( SizeT align, SizeT n )
|
||||
{
|
||||
if (n < 0) {
|
||||
VG_(message)(Vg_UserMsg, "Warning: silly arg (%d) to memalign()", n);
|
||||
if (complain_about_silly_args(n, "memalign")) {
|
||||
return NULL;
|
||||
} else {
|
||||
return MAC_(new_block) ( 0, n, align,
|
||||
@ -240,11 +257,9 @@ void* SK_(memalign) ( Int align, Int n )
|
||||
}
|
||||
}
|
||||
|
||||
void* SK_(calloc) ( Int nmemb, Int size1 )
|
||||
void* SK_(calloc) ( SizeT nmemb, SizeT size1 )
|
||||
{
|
||||
if (nmemb < 0 || size1 < 0) {
|
||||
VG_(message)(Vg_UserMsg, "Warning: silly args (%d,%d) to calloc()",
|
||||
nmemb, size1 );
|
||||
if (complain_about_silly_args2(nmemb, size1)) {
|
||||
return NULL;
|
||||
} else {
|
||||
return MAC_(new_block) ( 0, nmemb*size1, VG_(clo_alignment),
|
||||
@ -255,7 +270,7 @@ void* SK_(calloc) ( Int nmemb, Int size1 )
|
||||
|
||||
static
|
||||
void die_and_free_mem ( MAC_Chunk* mc,
|
||||
MAC_Chunk** prev_chunks_next_ptr, UInt rzB )
|
||||
MAC_Chunk** prev_chunks_next_ptr, SizeT rzB )
|
||||
{
|
||||
/* Note: ban redzones again -- just in case user de-banned them
|
||||
with a client request... */
|
||||
@ -321,7 +336,7 @@ void SK_(__builtin_vec_delete) ( void* p )
|
||||
MAC_(handle_free)((Addr)p, VG_(vg_malloc_redzone_szB), MAC_AllocNewVec);
|
||||
}
|
||||
|
||||
void* SK_(realloc) ( void* p, Int new_size )
|
||||
void* SK_(realloc) ( void* p, SizeT new_size )
|
||||
{
|
||||
MAC_Chunk *mc;
|
||||
MAC_Chunk **prev_chunks_next_ptr;
|
||||
@ -334,11 +349,8 @@ void* SK_(realloc) ( void* p, Int new_size )
|
||||
cmalloc_n_mallocs ++;
|
||||
cmalloc_bs_mallocd += new_size;
|
||||
|
||||
if (new_size < 0) {
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"Warning: silly arg (%d) to realloc()", new_size );
|
||||
if (complain_about_silly_args(new_size, "realloc"))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* First try and find the block. */
|
||||
mc = (MAC_Chunk*)VG_(HT_get_node) ( MAC_(malloc_list), (UInt)p,
|
||||
@ -465,7 +477,7 @@ void MAC_(destroy_mempool)(Addr pool)
|
||||
VG_(free)(mp);
|
||||
}
|
||||
|
||||
void MAC_(mempool_alloc)(Addr pool, Addr addr, UInt size)
|
||||
void MAC_(mempool_alloc)(Addr pool, Addr addr, SizeT size)
|
||||
{
|
||||
MAC_Mempool* mp;
|
||||
MAC_Mempool** prev_next;
|
||||
@ -514,8 +526,8 @@ void MAC_(mempool_free)(Addr pool, Addr addr)
|
||||
|
||||
typedef
|
||||
struct {
|
||||
UInt nblocks;
|
||||
UInt nbytes;
|
||||
UInt nblocks;
|
||||
SizeT nbytes;
|
||||
}
|
||||
MallocStats;
|
||||
|
||||
|
||||
@ -304,14 +304,14 @@ extern void MAC_(clear_MAC_Error) ( MAC_Error* err_extra );
|
||||
|
||||
extern Bool MAC_(shared_recognised_suppression) ( Char* name, Supp* su );
|
||||
|
||||
extern void* MAC_(new_block) ( Addr p, UInt size, UInt align, UInt rzB,
|
||||
extern void* MAC_(new_block) ( Addr p, SizeT size, SizeT align, UInt rzB,
|
||||
Bool is_zeroed, MAC_AllocKind kind,
|
||||
VgHashTable table);
|
||||
extern void MAC_(handle_free) ( Addr p, UInt rzB, MAC_AllocKind kind );
|
||||
|
||||
extern void MAC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed);
|
||||
extern void MAC_(destroy_mempool)(Addr pool);
|
||||
extern void MAC_(mempool_alloc)(Addr pool, Addr addr, UInt size);
|
||||
extern void MAC_(mempool_alloc)(Addr pool, Addr addr, SizeT size);
|
||||
extern void MAC_(mempool_free)(Addr pool, Addr addr);
|
||||
|
||||
extern void MAC_(record_address_error) ( ThreadId tid, Addr a,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user