From daf507dee25a52c2374d14916daaeda1dea7c1c2 Mon Sep 17 00:00:00 2001 From: Tom Hughes Date: Thu, 6 Oct 2005 09:00:17 +0000 Subject: [PATCH] Fix realloc wrappers to handle the out of memory case properly - if the call to VG_(cli_malloc) returns NULL then don't try and copy the data or register a new block and just leave the old block in place instead, but still return NULL to the caller. Fixes bug 109487 and it's duplicates. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@4875 --- helgrind/hg_main.c | 28 ++++++++++++++-------------- massif/ms_main.c | 30 +++++++++++++++++------------- memcheck/mac_malloc_wrappers.c | 31 +++++++++++++++++-------------- 3 files changed, 48 insertions(+), 41 deletions(-) diff --git a/helgrind/hg_main.c b/helgrind/hg_main.c index 77dc1f1ec..db3498fe7 100644 --- a/helgrind/hg_main.c +++ b/helgrind/hg_main.c @@ -1977,7 +1977,6 @@ static void* hg_realloc ( ThreadId tid, void* p, SizeT new_size ) { HG_Chunk *hc; HG_Chunk **prev_chunks_next_ptr; - Int i; /* First try and find the block. */ hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UWord)p, @@ -2005,22 +2004,23 @@ static void* hg_realloc ( ThreadId tid, void* p, SizeT new_size ) /* Get new memory */ p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size); - /* First half kept and copied, second half new */ - copy_address_range_state( (Addr)p, p_new, hc->size ); - hg_new_mem_heap ( p_new+hc->size, new_size-hc->size, - /*inited*/False ); + if (p_new) { + /* First half kept and copied, second half new */ + copy_address_range_state( (Addr)p, p_new, hc->size ); + hg_new_mem_heap ( p_new+hc->size, new_size-hc->size, + /*inited*/False ); - /* Copy from old to new */ - for (i = 0; i < hc->size; i++) - ((UChar*)p_new)[i] = ((UChar*)p)[i]; + /* Copy from old to new */ + VG_(memcpy)((void *)p_new, p, hc->size); - /* Free old memory */ - die_and_free_mem ( tid, hc, prev_chunks_next_ptr ); + /* Free old memory */ + die_and_free_mem ( tid, hc, prev_chunks_next_ptr ); - /* this has to be after die_and_free_mem, otherwise the - former succeeds in shorting out the new block, not the - old, in the case when both are on the same list. */ - add_HG_Chunk ( tid, p_new, new_size ); + /* this has to be after die_and_free_mem, otherwise the + former succeeds in shorting out the new block, not the + old, in the case when both are on the same list. */ + add_HG_Chunk ( tid, p_new, new_size ); + } return (void*)p_new; } diff --git a/massif/ms_main.c b/massif/ms_main.c index cc4c13831..c373c866b 100644 --- a/massif/ms_main.c +++ b/massif/ms_main.c @@ -802,22 +802,26 @@ static void* ms_realloc ( ThreadId tid, void* p_old, SizeT new_size ) } else { // new size is bigger; make new block, copy shared contents, free old p_new = VG_(cli_malloc)(VG_(clo_alignment), new_size); - VG_(memcpy)(p_new, p_old, old_size); - VG_(cli_free)(p_old); + if (p_new) { + VG_(memcpy)(p_new, p_old, old_size); + VG_(cli_free)(p_old); + } } - - old_where = hc->where; - new_where = get_XCon( tid, /*custom_malloc*/False); - // Update HP_Chunk - hc->data = (Addr)p_new; - hc->size = new_size; - hc->where = new_where; + if (p_new) { + old_where = hc->where; + new_where = get_XCon( tid, /*custom_malloc*/False); - // Update XPt curr_space fields - if (clo_heap) { - if (0 != old_size) update_XCon(old_where, -old_size); - if (0 != new_size) update_XCon(new_where, new_size); + // Update HP_Chunk + hc->data = (Addr)p_new; + hc->size = new_size; + hc->where = new_where; + + // Update XPt curr_space fields + if (clo_heap) { + if (0 != old_size) update_XCon(old_where, -old_size); + if (0 != new_size) update_XCon(new_where, new_size); + } } // Now insert the new hc (with a possibly new 'data' field) into diff --git a/memcheck/mac_malloc_wrappers.c b/memcheck/mac_malloc_wrappers.c index 019e248c1..875c062b8 100644 --- a/memcheck/mac_malloc_wrappers.c +++ b/memcheck/mac_malloc_wrappers.c @@ -390,23 +390,26 @@ void* MAC_(realloc) ( ThreadId tid, void* p_old, SizeT new_size ) /* Get new memory */ Addr a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size); - /* First half kept and copied, second half new, red zones as normal */ - MAC_(ban_mem_heap) ( a_new-MAC_MALLOC_REDZONE_SZB, MAC_MALLOC_REDZONE_SZB ); - MAC_(copy_mem_heap)( (Addr)p_old, a_new, mc->size ); - MAC_(new_mem_heap) ( a_new+mc->size, new_size-mc->size, /*init'd*/False ); - MAC_(ban_mem_heap) ( a_new+new_size, MAC_MALLOC_REDZONE_SZB ); + if (a_new) { + /* First half kept and copied, second half new, red zones as normal */ + MAC_(ban_mem_heap) ( a_new-MAC_MALLOC_REDZONE_SZB, MAC_MALLOC_REDZONE_SZB ); + MAC_(copy_mem_heap)( (Addr)p_old, a_new, mc->size ); + MAC_(new_mem_heap) ( a_new+mc->size, new_size-mc->size, /*init'd*/False ); + MAC_(ban_mem_heap) ( a_new+new_size, MAC_MALLOC_REDZONE_SZB ); - /* Copy from old to new */ - VG_(memcpy)((void*)a_new, p_old, mc->size); + /* Copy from old to new */ + VG_(memcpy)((void*)a_new, p_old, mc->size); - /* Free old memory */ - /* Nb: we have to allocate a new MAC_Chunk for the new memory rather - than recycling the old one, so that any erroneous accesses to the - old memory are reported. */ - die_and_free_mem ( tid, mc, MAC_MALLOC_REDZONE_SZB ); + /* Free old memory */ + /* Nb: we have to allocate a new MAC_Chunk for the new memory rather + than recycling the old one, so that any erroneous accesses to the + old memory are reported. */ + die_and_free_mem ( tid, mc, MAC_MALLOC_REDZONE_SZB ); + + // Allocate a new chunk. + mc = create_MAC_Chunk( tid, a_new, new_size, MAC_AllocMalloc ); + } - // Allocate a new chunk. - mc = create_MAC_Chunk( tid, a_new, new_size, MAC_AllocMalloc ); p_new = (void*)a_new; }