Allow multiple rw and rx mappings in the ELF debuginfo reader.

Fixes #296318 (patch on comment 8).  (Jiří Hruška, jirka@fud.cz)


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@12735
This commit is contained in:
Julian Seward 2012-07-13 11:24:05 +00:00
parent 096ad47ee9
commit 989f74ff0d
6 changed files with 417 additions and 366 deletions

View File

@ -178,6 +178,9 @@ DebugInfo* alloc_DebugInfo( const UChar* filename )
di = ML_(dinfo_zalloc)("di.debuginfo.aDI.1", sizeof(DebugInfo));
di->handle = handle_counter++;
di->fsm.filename = ML_(dinfo_strdup)("di.debuginfo.aDI.2", filename);
di->fsm.maps = VG_(newXA)(
ML_(dinfo_zalloc), "di.debuginfo.aDI.3",
ML_(dinfo_free), sizeof(struct _DebugInfoMapping));
/* Everything else -- pointers, sizes, arrays -- is zeroed by
ML_(dinfo_zalloc). Now set up the debugging-output flags. */
@ -204,6 +207,7 @@ static void free_DebugInfo ( DebugInfo* di )
GExpr* gexpr;
vg_assert(di != NULL);
if (di->fsm.maps) VG_(deleteXA)(di->fsm.maps);
if (di->fsm.filename) ML_(dinfo_free)(di->fsm.filename);
if (di->soname) ML_(dinfo_free)(di->soname);
if (di->loctab) ML_(dinfo_free)(di->loctab);
@ -385,32 +389,20 @@ static Bool ranges_overlap (Addr s1, SizeT len1, Addr s2, SizeT len2 )
}
/* Do the basic rx_ and rw_ mappings of the two DebugInfos overlap in
any way? */
/* Do the basic mappings of the two DebugInfos overlap in any way? */
static Bool do_DebugInfos_overlap ( DebugInfo* di1, DebugInfo* di2 )
{
Word i, j;
vg_assert(di1);
vg_assert(di2);
if (di1->fsm.have_rx_map && di2->fsm.have_rx_map
&& ranges_overlap(di1->fsm.rx_map_avma, di1->fsm.rx_map_size,
di2->fsm.rx_map_avma, di2->fsm.rx_map_size))
return True;
if (di1->fsm.have_rx_map && di2->fsm.have_rw_map
&& ranges_overlap(di1->fsm.rx_map_avma, di1->fsm.rx_map_size,
di2->fsm.rw_map_avma, di2->fsm.rw_map_size))
return True;
if (di1->fsm.have_rw_map && di2->fsm.have_rx_map
&& ranges_overlap(di1->fsm.rw_map_avma, di1->fsm.rw_map_size,
di2->fsm.rx_map_avma, di2->fsm.rx_map_size))
return True;
if (di1->fsm.have_rw_map && di2->fsm.have_rw_map
&& ranges_overlap(di1->fsm.rw_map_avma, di1->fsm.rw_map_size,
di2->fsm.rw_map_avma, di2->fsm.rw_map_size))
return True;
for (i = 0; i < VG_(sizeXA)(di1->fsm.maps); i++) {
struct _DebugInfoMapping* map1 = VG_(indexXA)(di1->fsm.maps, i);
for (j = 0; j < VG_(sizeXA)(di2->fsm.maps); j++) {
struct _DebugInfoMapping* map2 = VG_(indexXA)(di2->fsm.maps, j);
if (ranges_overlap(map1->avma, map1->size, map2->avma, map2->size))
return True;
}
}
return False;
}
@ -441,8 +433,7 @@ static void discard_marked_DebugInfos ( void )
/* Discard any elements of debugInfo_list which overlap with diRef.
Clearly diRef must have its rx_ and rw_ mapping information set to
something sane. */
Clearly diRef must have its mapping information set to something sane. */
static void discard_DebugInfos_which_overlap_with ( DebugInfo* diRef )
{
DebugInfo* di;
@ -490,41 +481,67 @@ static DebugInfo* find_or_create_DebugInfo_for ( UChar* filename )
static void check_CFSI_related_invariants ( DebugInfo* di )
{
DebugInfo* di2 = NULL;
Bool has_nonempty_rx = False;
Bool cfsi_fits = False;
Word i, j;
vg_assert(di);
/* This fn isn't called until after debuginfo for this object has
been successfully read. And that shouldn't happen until we have
both a r-x and rw- mapping for the object. Hence: */
vg_assert(di->fsm.have_rx_map);
vg_assert(di->fsm.have_rw_map);
/* degenerate case: r-x section is empty */
if (di->fsm.rx_map_size == 0) {
for (i = 0; i < VG_(sizeXA)(di->fsm.maps); i++) {
struct _DebugInfoMapping* map = VG_(indexXA)(di->fsm.maps, i);
/* We are interested in r-x mappings only */
if (!map->rx)
continue;
/* degenerate case: r-x section is empty */
if (map->size == 0)
continue;
has_nonempty_rx = True;
/* normal case: r-x section is nonempty */
/* invariant (0) */
vg_assert(map->size > 0);
/* invariant (1) */
for (di2 = debugInfo_list; di2; di2 = di2->next) {
if (di2 == di)
continue;
for (j = 0; j < VG_(sizeXA)(di2->fsm.maps); j++) {
struct _DebugInfoMapping* map2 = VG_(indexXA)(di2->fsm.maps, j);
if (!map2->rx || map2->size == 0)
continue;
vg_assert(!ranges_overlap(map->avma, map->size,
map2->avma, map2->size));
}
}
di2 = NULL;
/* invariant (2) */
if (di->cfsi) {
vg_assert(di->cfsi_minavma <= di->cfsi_maxavma); /* duh! */
/* Assume the csfi fits completely into one individual mapping
for now. This might need to be improved/reworked later. */
if (di->cfsi_minavma >= map->avma &&
di->cfsi_maxavma < map->avma + map->size)
cfsi_fits = True;
}
}
/* degenerate case: all r-x sections are empty */
if (!has_nonempty_rx) {
vg_assert(di->cfsi == NULL);
return;
}
/* normal case: r-x section is nonempty */
/* invariant (0) */
vg_assert(di->fsm.rx_map_size > 0);
/* invariant (1) */
for (di2 = debugInfo_list; di2; di2 = di2->next) {
if (di2 == di)
continue;
if (di2->fsm.rx_map_size == 0)
continue;
vg_assert(
di->fsm.rx_map_avma + di->fsm.rx_map_size <= di2->fsm.rx_map_avma
|| di2->fsm.rx_map_avma + di2->fsm.rx_map_size <= di->fsm.rx_map_avma
);
}
di2 = NULL;
/* invariant (2) */
if (di->cfsi) {
vg_assert(di->cfsi_minavma <= di->cfsi_maxavma); /* duh! */
vg_assert(di->cfsi_minavma >= di->fsm.rx_map_avma);
vg_assert(di->cfsi_maxavma < di->fsm.rx_map_avma + di->fsm.rx_map_size);
}
/* invariant (2) - cont. */
if (di->cfsi)
vg_assert(cfsi_fits);
/* invariants (3) and (4) */
if (di->cfsi) {
Word i;
vg_assert(di->cfsi_used > 0);
vg_assert(di->cfsi_size > 0);
for (i = 0; i < di->cfsi_used; i++) {
@ -601,9 +618,9 @@ static ULong di_notify_ACHIEVE_ACCEPT_STATE ( struct _DebugInfo* di )
TRACE_SYMTAB("\n");
/* We're going to read symbols and debug info for the avma
ranges [rx_map_avma, +rx_map_size) and [rw_map_avma,
+rw_map_size). First get rid of any other DebugInfos which
overlap either of those ranges (to avoid total confusion). */
ranges specified in the _DebugInfoFsm mapping array. First
get rid of any other DebugInfos which overlap any of those
ranges (to avoid total confusion). */
discard_DebugInfos_which_overlap_with( di );
/* .. and acquire new info. */
@ -874,41 +891,20 @@ ULong VG_(di_notify_mmap)( Addr a, Bool allow_SkFileV, Int use_fd )
di = find_or_create_DebugInfo_for( filename );
vg_assert(di);
if (is_rx_map) {
/* We have a text-like mapping. Note the details. */
if (!di->fsm.have_rx_map) {
di->fsm.have_rx_map = True;
di->fsm.rx_map_avma = a;
di->fsm.rx_map_size = seg->end + 1 - seg->start;
di->fsm.rx_map_foff = seg->offset;
} else {
/* FIXME: complain about a second text-like mapping */
}
}
/* Note the details about the mapping. */
struct _DebugInfoMapping map;
map.avma = a;
map.size = seg->end + 1 - seg->start;
map.foff = seg->offset;
map.rx = is_rx_map;
map.rw = is_rw_map;
map.ro = is_ro_map;
VG_(addToXA)(di->fsm.maps, &map);
if (is_rw_map) {
/* We have a data-like mapping. Note the details. */
if (!di->fsm.have_rw_map) {
di->fsm.have_rw_map = True;
di->fsm.rw_map_avma = a;
di->fsm.rw_map_size = seg->end + 1 - seg->start;
di->fsm.rw_map_foff = seg->offset;
} else {
/* FIXME: complain about a second data-like mapping */
}
}
if (is_ro_map) {
/* We have a r-- mapping. Note the details (OSX 10.7, 32-bit only) */
if (!di->fsm.have_ro_map) {
di->fsm.have_ro_map = True;
di->fsm.ro_map_avma = a;
di->fsm.ro_map_size = seg->end + 1 - seg->start;
di->fsm.ro_map_foff = seg->offset;
} else {
/* FIXME: complain about a second r-- mapping */
}
}
/* Update flags about what kind of mappings we've already seen. */
di->fsm.have_rx_map |= is_rx_map;
di->fsm.have_rw_map |= is_rw_map;
di->fsm.have_ro_map |= is_ro_map;
/* So, finally, are we in an accept state? */
if (di->fsm.have_rx_map && di->fsm.have_rw_map && !di->have_dinfo) {
@ -977,6 +973,8 @@ void VG_(di_notify_vm_protect)( Addr a, SizeT len, UInt prot )
is found, conclude we're in an accept state and read debuginfo
accordingly. */
DebugInfo* di;
struct _DebugInfoMapping *map = NULL;
Word i;
for (di = debugInfo_list; di; di = di->next) {
vg_assert(di->fsm.filename);
if (di->have_dinfo)
@ -987,36 +985,45 @@ void VG_(di_notify_vm_protect)( Addr a, SizeT len, UInt prot )
continue; /* rx- mapping already exists */
if (!di->fsm.have_rw_map)
continue; /* need to have a rw- mapping */
if (di->fsm.ro_map_avma != a || di->fsm.ro_map_size != len)
continue; /* this isn't an upgrade of the r-- mapping */
/* Try to find a mapping matching the memory area. */
for (i = 0; i < VG_(sizeXA)(di->fsm.maps); i++) {
map = (struct _DebugInfoMapping*)VG_(indexXA)(di->fsm.maps, i);
if (map->ro && map->avma == a && map->size == len)
break;
map = NULL;
}
if (!map)
continue; /* this isn't an upgrade of an r-- mapping */
/* looks like we're in luck! */
break;
}
if (di == NULL)
return; /* didn't find anything */
/* Do the upgrade. Copy the RO map info into the RX map info and
pretend we never saw the RO map at all. */
vg_assert(di->fsm.have_rw_map);
/* Do the upgrade. Simply update the flags of the mapping
and pretend we never saw the RO map at all. */
vg_assert(di->fsm.have_ro_map);
vg_assert(!di->fsm.have_rx_map);
map->rx = True;
map->ro = False;
di->fsm.have_rx_map = True;
di->fsm.rx_map_avma = di->fsm.ro_map_avma;
di->fsm.rx_map_size = di->fsm.ro_map_size;
di->fsm.rx_map_foff = di->fsm.ro_map_foff;
di->fsm.have_ro_map = False;
di->fsm.ro_map_avma = 0;
di->fsm.ro_map_size = 0;
di->fsm.ro_map_foff = 0;
/* See if there are any more ro mappings */
for (i = 0; i < VG_(sizeXA)(di->fsm.maps); i++) {
map = (struct _DebugInfoMapping*)VG_(indexXA)(di->fsm.maps, i);
if (map->ro) {
di->fsm.have_ro_map = True;
break;
}
}
/* And since we're now in an accept state, read debuginfo. Finally. */
ULong di_handle __attribute__((unused))
= di_notify_ACHIEVE_ACCEPT_STATE( di );
/* di_handle is ignored. That's not a problem per se -- it just
means nobody will ever be able to refer to this debuginfo by
handle since nobody will know what the handle value is. */
/* Check if we're now in an accept state and read debuginfo. Finally. */
if (di->fsm.have_rx_map && di->fsm.have_rw_map && !di->have_dinfo) {
ULong di_handle __attribute__((unused))
= di_notify_ACHIEVE_ACCEPT_STATE( di );
/* di_handle is ignored. That's not a problem per se -- it just
means nobody will ever be able to refer to this debuginfo by
handle since nobody will know what the handle value is. */
}
}
@ -1273,6 +1280,31 @@ void VG_(di_discard_ALL_debuginfo)( void )
}
struct _DebugInfoMapping* ML_(find_rx_mapping) ( struct _DebugInfo* di,
Addr lo, Addr hi )
{
Word i;
vg_assert(lo <= hi);
/* Optimization: Try to use the last matched rx mapping first */
if ( di->last_rx_map
&& lo >= di->last_rx_map->avma
&& hi < di->last_rx_map->avma + di->last_rx_map->size)
return di->last_rx_map;
for (i = 0; i < VG_(sizeXA)(di->fsm.maps); i++) {
struct _DebugInfoMapping* map = VG_(indexXA)(di->fsm.maps, i);
if ( map->rx && map->size > 0
&& lo >= map->avma && hi < map->avma + map->size) {
di->last_rx_map = map;
return map;
}
}
return NULL;
}
/*------------------------------------------------------------*/
/*--- Use of symbol table & location info to create ---*/
/*--- plausible-looking stack dumps. ---*/
@ -1300,9 +1332,7 @@ static void search_all_symtabs ( Addr ptr, /*OUT*/DebugInfo** pdi,
See Comment_Regarding_Text_Range_Checks in storage.c for
details. */
inRange = di->fsm.have_rx_map
&& di->fsm.rx_map_size > 0
&& di->fsm.rx_map_avma <= ptr
&& ptr < di->fsm.rx_map_avma + di->fsm.rx_map_size;
&& (ML_(find_rx_mapping)(di, ptr, ptr) != NULL);
} else {
inRange = (di->data_present
&& di->data_size > 0

View File

@ -421,13 +421,9 @@ ML_(cmp_for_DiAddrRange_range) ( const void* keyV, const void* elemV );
true. The initial state is one in which we have no observations,
so have_rx_map and have_rw_map are both false.
This is all rather ad-hoc; for example it has no way to record more
than one rw or rx mapping for a given object, not because such
events have never been observed, but because we've never needed to
note more than the first one of any such in order when to decide to
read debug info. It may be that in future we need to track more
state in order to make the decision, so this struct would then get
expanded.
This all started as a rather ad-hoc solution, but was further
expanded to handle weird object layouts, e.g. more than one rw
or rx mapping for one binary.
The normal sequence of events is one of
@ -444,28 +440,22 @@ ML_(cmp_for_DiAddrRange_range) ( const void* keyV, const void* elemV );
where the upgrade is done by a call to vm_protect. Hence we
need to also track this possibility.
*/
struct _DebugInfoMapping
{
Addr avma; /* these fields record the file offset, length */
SizeT size; /* and map address of each mapping */
OffT foff;
Bool rx, rw, ro; /* memory access flags for this mapping */
};
struct _DebugInfoFSM
{
/* --- all targets --- */
UChar* filename; /* in mallocville (VG_AR_DINFO) */
UChar* filename; /* in mallocville (VG_AR_DINFO) */
XArray* maps; /* XArray of _DebugInfoMapping structs */
Bool have_rx_map; /* did we see a r?x mapping yet for the file? */
Bool have_rw_map; /* did we see a rw? mapping yet for the file? */
Addr rx_map_avma; /* these fields record the file offset, length */
SizeT rx_map_size; /* and map address of the r?x mapping we believe */
OffT rx_map_foff; /* is the .text segment mapping */
Addr rw_map_avma; /* ditto, for the rw? mapping we believe is the */
SizeT rw_map_size; /* .data segment mapping */
OffT rw_map_foff;
/* --- OSX 10.7, 32-bit only --- */
Bool have_ro_map; /* did we see a r-- mapping yet for the file? */
Addr ro_map_avma; /* file offset, length, avma for said mapping */
SizeT ro_map_size;
OffT ro_map_foff;
};
@ -545,17 +535,17 @@ struct _DebugInfo {
Comment_on_IMPORTANT_CFSI_REPRESENTATIONAL_INVARIANTS: we require that
either (rx_map_size == 0 && cfsi == NULL) (the degenerate case)
either (size of all rx maps == 0 && cfsi == NULL) (the degenerate case)
or the normal case, which is the AND of the following:
(0) rx_map_size > 0
(1) no two DebugInfos with rx_map_size > 0
have overlapping [rx_map_avma,+rx_map_size)
(2) [cfsi_minavma,cfsi_maxavma] does not extend
beyond [rx_map_avma,+rx_map_size); that is, the former is a
subrange or equal to the latter.
(0) size of at least one rx mapping > 0
(1) no two DebugInfos with some rx mapping of size > 0
have overlapping rx mappings
(2) [cfsi_minavma,cfsi_maxavma] does not extend beyond
[avma,+size) of one rx mapping; that is, the former
is a subrange or equal to the latter.
(3) all DiCfSI in the cfsi array all have ranges that fall within
[rx_map_avma,+rx_map_size).
[avma,+size) of that rx mapping.
(4) all DiCfSI in the cfsi array are non-overlapping
The cumulative effect of these restrictions is to ensure that
@ -808,6 +798,11 @@ struct _DebugInfo {
/* An array of guarded DWARF3 expressions. */
XArray* admin_gexprs;
/* Cached last rx mapping matched and returned by ML_(find_rx_mapping).
This helps performance a lot during ML_(addLineInfo) etc., which can
easily be invoked hundreds of thousands of times. */
struct _DebugInfoMapping* last_rx_map;
};
/* --------------------- functions --------------------- */
@ -876,6 +871,11 @@ extern Word ML_(search_one_cfitab) ( struct _DebugInfo* di, Addr ptr );
if not found. Binary search. */
extern Word ML_(search_one_fpotab) ( struct _DebugInfo* di, Addr ptr );
/* Helper function for the most often needed searching for an rx mapping
containing the specified address range. */
extern struct _DebugInfoMapping* ML_(find_rx_mapping) ( struct _DebugInfo* di,
Addr lo, Addr hi );
/* ------ Misc ------ */
/* Show a non-fatal debug info reading error. Use vg_panic if

View File

@ -554,9 +554,8 @@ Bool get_elf_symbol_info (
background. */
Bool in_rx;
vg_assert(di->fsm.have_rx_map);
in_rx = (!(*sym_avma_out + *sym_size_out <= di->fsm.rx_map_avma
|| *sym_avma_out >= di->fsm.rx_map_avma
+ di->fsm.rx_map_size));
in_rx = (ML_(find_rx_mapping)(di, *sym_avma_out,
*sym_avma_out + *sym_size_out) != NULL);
if (in_text)
vg_assert(in_rx);
if (!in_rx) {
@ -1266,12 +1265,6 @@ Word file_offset_from_svma ( /*OUT*/Bool* ok,
supplied DebugInfo.
*/
/* Temporarily holds information copied out of PT_LOAD entries
in ML_(read_elf_debug_info. */
typedef
struct { Addr svma_base; Addr svma_limit; PtrdiffT bias; }
RangeAndBias;
Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
{
/* This function is long and complex. That, and the presence of
@ -1283,7 +1276,7 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
/* TOPLEVEL */
Bool res, ok;
SysRes fd, sres;
Word i;
Word i, j;
Bool dynbss_present = False;
Bool sdynbss_present = False;
@ -1312,19 +1305,19 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
UChar* shdr_strtab_img = NULL;
/* SVMAs covered by rx and rw segments and corresponding biases.
We keep separate lists of rx and rw areas. Each can have up to
N_RX_RW_AREAS entries. Normally each object would provide just
one rx and one rw area, but Mike Hommey's elfhack creates
objects with two rx PT_LOAD entries, hence the generality. */
const Int N_RX_RW_AREAS = 2;
Normally each object would provide just one rx and one rw area,
but various ELF mangling tools create objects with multiple
such entries, hence the generality. */
typedef
struct {
Addr svma_base;
Addr svma_limit;
PtrdiffT bias;
Bool exec;
}
RangeAndBias;
RangeAndBias rx[N_RX_RW_AREAS];
RangeAndBias rw[N_RX_RW_AREAS];
Word n_rx = 0; /* 0 .. N_RX_RW_AREAS */
Word n_rw = 0; /* 0 .. N_RX_RW_AREAS */
/* Pointless paranoia: */
VG_(memset)( rx, 0, sizeof(rx) );
VG_(memset)( rw, 0, sizeof(rw) );
XArray* /* of RangeAndBias */ svma_ranges = NULL;
/* Build ID */
Char* buildid = NULL;
@ -1332,8 +1325,6 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
vg_assert(di);
vg_assert(di->fsm.have_rx_map == True);
vg_assert(di->fsm.have_rw_map == True);
vg_assert(di->fsm.rx_map_size > 0);
vg_assert(di->fsm.rw_map_size > 0);
vg_assert(di->have_dinfo == False);
vg_assert(di->fsm.filename);
vg_assert(!di->symtab);
@ -1343,19 +1334,35 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
vg_assert(!di->strchunks);
vg_assert(!di->soname);
/* If these don't hold true, it means that m_syswrap/m_aspacemgr
managed to do a mapping where the start isn't page aligned.
Which sounds pretty bogus to me. */
vg_assert(VG_IS_PAGE_ALIGNED(di->fsm.rx_map_avma));
vg_assert(VG_IS_PAGE_ALIGNED(di->fsm.rw_map_avma));
{
Bool has_nonempty_rx = False;
Bool has_nonempty_rw = False;
for (i = 0; i < VG_(sizeXA)(di->fsm.maps); i++) {
struct _DebugInfoMapping* map = VG_(indexXA)(di->fsm.maps, i);
if (map->rx) {
if (map->size > 0)
has_nonempty_rx = True;
} else if (map->rw) {
if (map->size > 0)
has_nonempty_rw = True;
} else
continue;
/* If this doesn't hold true, it means that m_syswrap/m_aspacemgr
managed to do a mapping where the start isn't page aligned.
Which sounds pretty bogus to me. */
vg_assert(VG_IS_PAGE_ALIGNED(map->avma));
}
vg_assert(has_nonempty_rx);
vg_assert(has_nonempty_rw);
}
/* ----------------------------------------------------------
At this point, there is very little information in the
DebugInfo. We only know that something that looks like an ELF
file has been mapped rx-ishly as recorded with the di->*rx_map*
fields and has also been mapped rw-ishly as recorded with the
di->*rw_map* fields. First we examine the file's ELF Program
Header, and, by comparing that against the di->*r{w,x}_map*
file has been mapped rx-ishly and rw-ishly as recorded in the
di->fsm.maps array items. First we examine the file's ELF
Program Header, and, by comparing that against the di->fsm.maps
info, try to figure out the AVMAs for the sections we care
about, that should have been mapped: text, data, sdata, bss,
got, plt, and toc.
@ -1365,8 +1372,8 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
oimage = (Addr)NULL;
if (VG_(clo_verbosity) > 1 || VG_(clo_trace_redir))
VG_(message)(Vg_DebugMsg, "Reading syms from %s (%#lx)\n",
di->fsm.filename, di->fsm.rx_map_avma );
VG_(message)(Vg_DebugMsg, "Reading syms from %s\n",
di->fsm.filename );
/* mmap the object image aboard, so that we can read symbols and
line number info out of it. It will be munmapped immediately
@ -1441,10 +1448,18 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
phdr_img, phdr_nent, phdr_ent_szB);
TRACE_SYMTAB("shdr: img %p nent %ld ent_szB %ld\n",
shdr_img, shdr_nent, shdr_ent_szB);
TRACE_SYMTAB("rx_map: avma %#lx size %lu foff %lu\n",
di->fsm.rx_map_avma, di->fsm.rx_map_size, di->fsm.rx_map_foff);
TRACE_SYMTAB("rw_map: avma %#lx size %lu foff %lu\n",
di->fsm.rw_map_avma, di->fsm.rw_map_size, di->fsm.rw_map_foff);
for (i = 0; i < VG_(sizeXA)(di->fsm.maps); i++) {
struct _DebugInfoMapping* map = VG_(indexXA)(di->fsm.maps, i);
if (map->rx)
TRACE_SYMTAB("rx_map: avma %#lx size %lu foff %lu\n",
map->avma, map->size, map->foff);
}
for (i = 0; i < VG_(sizeXA)(di->fsm.maps); i++) {
struct _DebugInfoMapping* map = VG_(indexXA)(di->fsm.maps, i);
if (map->rw)
TRACE_SYMTAB("rw_map: avma %#lx size %lu foff %lu\n",
map->avma, map->size, map->foff);
}
if (phdr_nent == 0
|| !contained_within(
@ -1478,10 +1493,12 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
TRACE_SYMTAB("shdr: string table at %p\n", shdr_strtab_img );
svma_ranges = VG_(newXA)(ML_(dinfo_zalloc), "di.relfdi.1",
ML_(dinfo_free), sizeof(RangeAndBias));
/* TOPLEVEL */
/* Look through the program header table, and:
- copy information from suitable PT_LOAD entries into rx[] or
rw[]
- copy information from suitable PT_LOAD entries into svma_ranges
- find (or fake up) the .soname for this object.
*/
TRACE_SYMTAB("\n");
@ -1496,10 +1513,8 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
/* Make sure the PT_LOADable entries are in order and
non-overlapping. This in turn means the address ranges
slurped into rx[] and rw[] are in order and
slurped into svma_ranges are in order and
non-overlapping. */
vg_assert(n_rx >= 0 && n_rx <= N_RX_RW_AREAS);
vg_assert(n_rw >= 0 && n_rw <= N_RX_RW_AREAS);
if (phdr->p_type == PT_LOAD) {
TRACE_SYMTAB("PT_LOAD[%ld]: p_vaddr %#lx (prev %#lx)\n",
@ -1516,42 +1531,41 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
goto out;
}
prev_svma = phdr->p_vaddr;
if (phdr->p_offset >= di->fsm.rx_map_foff
&& phdr->p_offset < di->fsm.rx_map_foff + di->fsm.rx_map_size
&& phdr->p_offset + phdr->p_filesz
<= di->fsm.rx_map_foff + di->fsm.rx_map_size
&& (phdr->p_flags & (PF_R | PF_W | PF_X)) == (PF_R | PF_X)) {
if (n_rx == N_RX_RW_AREAS) {
ML_(symerr)(di, True,
"N_RX_RW_AREAS is too low; "
"increase and recompile.");
if (phdr->p_memsz > 0) {
Bool loaded = False;
for (j = 0; j < VG_(sizeXA)(di->fsm.maps); j++) {
struct _DebugInfoMapping* map = VG_(indexXA)(di->fsm.maps, j);
if ( (map->rx || map->rw)
&& phdr->p_offset >= map->foff
&& phdr->p_offset < map->foff + map->size
&& phdr->p_offset + phdr->p_filesz <= map->foff
+ map->size) {
RangeAndBias item;
item.svma_base = phdr->p_vaddr;
item.svma_limit = phdr->p_vaddr + phdr->p_memsz;
item.bias = map->avma - map->foff
+ phdr->p_offset - phdr->p_vaddr;
if ( map->rw
&& (phdr->p_flags & (PF_R | PF_W)) == (PF_R | PF_W)) {
item.exec = False;
VG_(addToXA)(svma_ranges, &item);
TRACE_SYMTAB("PT_LOAD[%ld]: acquired as rw\n", i);
loaded = True;
}
if ( map->rx
&& (phdr->p_flags & (PF_R | PF_X)) == (PF_R | PF_X)) {
item.exec = True;
VG_(addToXA)(svma_ranges, &item);
TRACE_SYMTAB("PT_LOAD[%ld]: acquired as rx\n", i);
loaded = True;
}
}
}
if (!loaded) {
ML_(symerr)(di, False,
"ELF section outside all mapped regions");
goto out;
}
rx[n_rx].svma_base = phdr->p_vaddr;
rx[n_rx].svma_limit = phdr->p_vaddr + phdr->p_memsz;
rx[n_rx].bias = di->fsm.rx_map_avma - di->fsm.rx_map_foff
+ phdr->p_offset - phdr->p_vaddr;
n_rx++;
TRACE_SYMTAB("PT_LOAD[%ld]: acquired as rx\n", i);
}
else
if (phdr->p_offset >= di->fsm.rw_map_foff
&& phdr->p_offset < di->fsm.rw_map_foff + di->fsm.rw_map_size
&& phdr->p_offset + phdr->p_filesz
<= di->fsm.rw_map_foff + di->fsm.rw_map_size
&& (phdr->p_flags & (PF_R | PF_W | PF_X)) == (PF_R | PF_W)) {
if (n_rw == N_RX_RW_AREAS) {
ML_(symerr)(di, True,
"N_RX_RW_AREAS is too low; "
"increase and recompile.");
goto out;
}
rw[n_rw].svma_base = phdr->p_vaddr;
rw[n_rw].svma_limit = phdr->p_vaddr + phdr->p_memsz;
rw[n_rw].bias = di->fsm.rw_map_avma - di->fsm.rw_map_foff
+ phdr->p_offset - phdr->p_vaddr;
n_rw++;
TRACE_SYMTAB("PT_LOAD[%ld]: acquired as rw\n", i);
}
}
@ -1564,7 +1578,6 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
+ phdr->p_offset);
Word stroff = -1;
UChar* strtab = NULL;
Word j;
for (j = 0; dyn_img[j].d_tag != DT_NULL; j++) {
switch (dyn_img[j].d_tag) {
case DT_SONAME: {
@ -1608,33 +1621,36 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
di->soname = ML_(dinfo_strdup)("di.redi.2", "NONE");
}
vg_assert(n_rx >= 0 && n_rx <= N_RX_RW_AREAS);
vg_assert(n_rw >= 0 && n_rw <= N_RX_RW_AREAS);
for (i = 0; i < n_rx; i++) {
vg_assert(rx[i].svma_limit != 0);
}
for (i = 0; i < n_rw; i++) {
vg_assert(rw[i].svma_limit != 0);
}
vg_assert(VG_(sizeXA)(svma_ranges) != 0);
/* Now read the section table. */
TRACE_SYMTAB("\n");
TRACE_SYMTAB("------ Examining the section headers ------\n");
TRACE_SYMTAB("rx: at %#lx are mapped foffsets %ld .. %ld\n",
di->fsm.rx_map_avma,
di->fsm.rx_map_foff,
di->fsm.rx_map_foff + di->fsm.rx_map_size - 1 );
for (i = 0; i < n_rx; i++) {
TRACE_SYMTAB("rx[%ld]: contains svmas %#lx .. %#lx with bias %#lx\n",
i, rx[i].svma_base, rx[i].svma_limit - 1, rx[i].bias );
for (i = 0; i < VG_(sizeXA)(di->fsm.maps); i++) {
struct _DebugInfoMapping* map = VG_(indexXA)(di->fsm.maps, i);
if (map->rx)
TRACE_SYMTAB("rx: at %#lx are mapped foffsets %ld .. %ld\n",
map->avma, map->foff, map->foff + map->size - 1 );
}
TRACE_SYMTAB("rw: at %#lx are mapped foffsets %ld .. %ld\n",
di->fsm.rw_map_avma,
di->fsm.rw_map_foff,
di->fsm.rw_map_foff + di->fsm.rw_map_size - 1 );
for (i = 0; i < n_rw; i++) {
TRACE_SYMTAB("rw[%ld]: contains svmas %#lx .. %#lx with bias %#lx\n",
i, rw[i].svma_base, rw[i].svma_limit - 1, rw[i].bias );
TRACE_SYMTAB("rx: contains these svma regions:\n");
for (i = 0; i < VG_(sizeXA)(svma_ranges); i++) {
RangeAndBias* reg = VG_(indexXA)(svma_ranges, i);
if (reg->exec)
TRACE_SYMTAB(" svmas %#lx .. %#lx with bias %#lx\n",
reg->svma_base, reg->svma_limit - 1, reg->bias );
}
for (i = 0; i < VG_(sizeXA)(di->fsm.maps); i++) {
struct _DebugInfoMapping* map = VG_(indexXA)(di->fsm.maps, i);
if (map->rw)
TRACE_SYMTAB("rw: at %#lx are mapped foffsets %ld .. %ld\n",
map->avma, map->foff, map->foff + map->size - 1 );
}
TRACE_SYMTAB("rw: contains these svma regions:\n");
for (i = 0; i < VG_(sizeXA)(svma_ranges); i++) {
RangeAndBias* reg = VG_(indexXA)(svma_ranges, i);
if (!reg->exec)
TRACE_SYMTAB(" svmas %#lx .. %#lx with bias %#lx\n",
reg->svma_base, reg->svma_limit - 1, reg->bias );
}
/* TOPLEVEL */
@ -1653,19 +1669,17 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
leave the relevant pointer at NULL. */
RangeAndBias* inrx = NULL;
RangeAndBias* inrw = NULL;
{ Word j;
for (j = 0; j < n_rx; j++) {
if (svma >= rx[j].svma_base && svma < rx[j].svma_limit) {
inrx = &rx[j];
break;
}
}
for (j = 0; j < n_rw; j++) {
if (svma >= rw[j].svma_base && svma < rw[j].svma_limit) {
inrw = &rw[j];
break;
}
}
for (j = 0; j < VG_(sizeXA)(svma_ranges); j++) {
RangeAndBias* rng = VG_(indexXA)(svma_ranges, j);
if (svma >= rng->svma_base && svma < rng->svma_limit) {
if (!inrx && rng->exec) {
inrx = rng;
} else if (!inrw && !rng->exec) {
inrw = rng;
}
if (inrx && inrw)
break;
}
}
TRACE_SYMTAB(" [sec %2ld] %s %s al%2u foff %6ld .. %6ld "
@ -2268,10 +2282,8 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
UChar* shdr_strtab_dimg = NULL;
/* SVMAs covered by rx and rw segments and corresponding bias. */
/* Addr rx_dsvma_base = 0; */ /* UNUSED */
Addr rx_dsvma_limit = 0;
PtrdiffT rx_dbias = 0;
/* Addr rw_dsvma_base = 0; */ /* UNUSED */
Addr rw_dsvma_limit = 0;
PtrdiffT rw_dbias = 0;
@ -2324,28 +2336,24 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
= INDEX_BIS( (void*)(dimage + ehdr_dimg->e_phoff),
i, phdr_ent_szB );
if (phdr->p_type == PT_LOAD) {
if (rx_dsvma_limit == 0
&& phdr->p_offset >= di->fsm.rx_map_foff
&& phdr->p_offset
< di->fsm.rx_map_foff + di->fsm.rx_map_size
&& phdr->p_offset + phdr->p_filesz
<= di->fsm.rx_map_foff + di->fsm.rx_map_size) {
/* rx_dsvma_base = phdr->p_vaddr; */ /* UNUSED */
rx_dsvma_limit = phdr->p_vaddr + phdr->p_memsz;
rx_dbias = di->fsm.rx_map_avma - di->fsm.rx_map_foff
+ phdr->p_offset - phdr->p_vaddr;
}
else
if (rw_dsvma_limit == 0
&& phdr->p_offset >= di->fsm.rw_map_foff
&& phdr->p_offset
< di->fsm.rw_map_foff + di->fsm.rw_map_size
&& phdr->p_offset + phdr->p_filesz
<= di->fsm.rw_map_foff + di->fsm.rw_map_size) {
/* rw_dsvma_base = phdr->p_vaddr; */ /* UNUSED */
rw_dsvma_limit = phdr->p_vaddr + phdr->p_memsz;
rw_dbias = di->fsm.rw_map_avma - di->fsm.rw_map_foff
+ phdr->p_offset - phdr->p_vaddr;
for (j = 0; j < VG_(sizeXA)(di->fsm.maps); j++) {
struct _DebugInfoMapping* map = VG_(indexXA)(di->fsm.maps, j);
if ( phdr->p_offset >= map->foff
&& phdr->p_offset < map->foff + map->size
&& phdr->p_offset + phdr->p_filesz < map->foff
+ map->size) {
if (map->rx && rx_dsvma_limit == 0) {
rx_dsvma_limit = phdr->p_vaddr + phdr->p_memsz;
rx_dbias = map->avma - map->foff + phdr->p_offset
- phdr->p_vaddr;
}
if (map->rw && rw_dsvma_limit == 0) {
rw_dsvma_limit = phdr->p_vaddr + phdr->p_memsz;
rw_dbias = map->avma - map->foff + phdr->p_offset
- phdr->p_vaddr;
}
break;
}
}
}
}
@ -2562,7 +2570,6 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
exp-sgcheck.) */
if (0 && (VG_(needs).var_info || VG_(clo_read_var_info))) {
UWord nVars = 0;
Word j;
if (di->varinfo) {
for (j = 0; j < VG_(sizeXA)(di->varinfo); j++) {
OSet* /* of DiAddrRange */ scope
@ -2596,6 +2603,10 @@ Bool ML_(read_elf_debug_info) ( struct _DebugInfo* di )
}
m_res = VG_(am_munmap_valgrind) ( oimage, n_oimage );
vg_assert(!sr_isError(m_res));
if (svma_ranges)
VG_(deleteXA)(svma_ranges);
return res;
} /* out: */

View File

@ -676,19 +676,35 @@ Bool ML_(read_macho_debug_info)( struct _DebugInfo* di )
ImageInfo ii; /* main file */
ImageInfo iid; /* auxiliary .dSYM file */
Bool ok;
Word i;
struct _DebugInfoMapping* rx_map = NULL;
struct _DebugInfoMapping* rw_map = NULL;
/* mmap the object file to look for di->soname and di->text_bias
and uuid and nlist and STABS */
if (VG_(clo_verbosity) > 1)
VG_(message)(Vg_DebugMsg,
"%s (%#lx)\n", di->fsm.filename, di->fsm.rx_map_avma );
/* This should be ensured by our caller (that we're in the accept
state). */
vg_assert(di->fsm.have_rx_map);
vg_assert(di->fsm.have_rw_map);
for (i = 0; i < VG_(sizeXA)(di->fsm.maps); i++) {
struct _DebugInfoMapping* map = VG_(indexXA)(di->fsm.maps, i);
if (map->rx && !rx_map)
rx_map = map;
if (map->rw && !rw_map)
rw_map = map;
if (rx_map && rw_map)
break;
}
vg_assert(rx_map);
vg_assert(rw_map);
if (VG_(clo_verbosity) > 1)
VG_(message)(Vg_DebugMsg,
"%s (rx at %#lx, rw at %#lx)\n", di->fsm.filename,
rx_map->avma, rw_map->avma );
VG_(memset)(&ii, 0, sizeof(ii));
VG_(memset)(&iid, 0, sizeof(iid));
VG_(memset)(&uuid, 0, sizeof(uuid));
@ -779,7 +795,7 @@ Bool ML_(read_macho_debug_info)( struct _DebugInfo* di )
&& seg->fileoff == 0 && seg->filesize != 0) {
di->text_present = True;
di->text_svma = (Addr)seg->vmaddr;
di->text_avma = di->fsm.rx_map_avma;
di->text_avma = rx_map->avma;
di->text_size = seg->vmsize;
di->text_bias = di->text_avma - di->text_svma;
/* Make the _debug_ values be the same as the
@ -796,7 +812,7 @@ Bool ML_(read_macho_debug_info)( struct _DebugInfo* di )
/* && DDD:seg->fileoff == 0 */ && seg->filesize != 0) {
di->data_present = True;
di->data_svma = (Addr)seg->vmaddr;
di->data_avma = di->fsm.rw_map_avma;
di->data_avma = rw_map->avma;
di->data_size = seg->vmsize;
di->data_bias = di->data_avma - di->data_svma;
di->data_debug_svma = di->data_svma;
@ -829,7 +845,7 @@ Bool ML_(read_macho_debug_info)( struct _DebugInfo* di )
struct NLIST *syms;
UChar *strs;
XArray* /* DiSym */ candSyms = NULL;
Word i, nCandSyms;
Word nCandSyms;
if (ii.macho_img_szB < symcmd->stroff + symcmd->strsize
|| ii.macho_img_szB < symcmd->symoff + symcmd->nsyms

View File

@ -86,8 +86,8 @@
doesn't make much sense. Here, we use text_bias as empirically
producing the most ranges that fall inside the text segments for a
multi-dll program. Of course, it could still be nonsense :-) */
#define BIAS_FOR_SYMBOLS (di->fsm.rx_map_avma)
#define BIAS_FOR_LINETAB (di->fsm.rx_map_avma)
#define BIAS_FOR_SYMBOLS (di->text_avma)
#define BIAS_FOR_LINETAB (di->text_avma)
#define BIAS_FOR_LINETAB2 (di->text_bias)
#define BIAS_FOR_FPO (di->text_bias)
/* Using di->text_bias for the FPOs causes 981 in range and 1 out of
@ -2259,8 +2259,6 @@ Bool ML_(read_pdb_debug_info)(
+ OFFSET_OF(IMAGE_NT_HEADERS, OptionalHeader)
+ ntheaders_avma->FileHeader.SizeOfOptionalHeader;
di->fsm.rx_map_avma = (Addr)obj_avma;
/* Iterate over PE(?) headers. Try to establish the text_bias,
that's all we really care about. */
for ( i = 0;
@ -2283,6 +2281,12 @@ Bool ML_(read_pdb_debug_info)(
VG_(message)(Vg_DebugMsg,
" ::: mapped_avma is %#lx\n", mapped_avma);
struct _DebugInfoMapping map;
map.avma = mapped_avma;
map.size = pe_sechdr_avma->Misc.VirtualSize;
map.foff = pe_sechdr_avma->PointerToRawData;
map.ro = False;
if (pe_sechdr_avma->Characteristics & IMAGE_SCN_CNT_CODE) {
/* Ignore uninitialised code sections - if you have
incremental linking enabled in Visual Studio then you will
@ -2290,60 +2294,44 @@ Bool ML_(read_pdb_debug_info)(
the real text section and valgrind will compute the wrong
avma value and hence the wrong bias. */
if (!(pe_sechdr_avma->Characteristics & IMAGE_SCN_CNT_UNINITIALIZED_DATA)) {
map.rx = True;
map.rw = False;
VG_(addToXA)(di->fsm.maps, &map);
di->fsm.have_rx_map = True;
if (di->fsm.rx_map_avma == 0) {
di->fsm.rx_map_avma = mapped_avma;
}
if (di->fsm.rx_map_size==0) {
di->fsm.rx_map_foff = pe_sechdr_avma->PointerToRawData;
}
di->text_present = True;
if (di->text_avma==0) {
if (di->text_avma == 0) {
di->text_svma = pe_sechdr_avma->VirtualAddress;
di->text_avma = mapped_avma;
di->text_size = pe_sechdr_avma->Misc.VirtualSize;
} else {
di->text_size = mapped_end_avma - di->text_avma;
}
di->text_size += pe_sechdr_avma->Misc.VirtualSize;
di->fsm.rx_map_size += pe_sechdr_avma->Misc.VirtualSize;
}
}
else if (pe_sechdr_avma->Characteristics
& IMAGE_SCN_CNT_INITIALIZED_DATA) {
map.rx = False;
map.rw = True;
VG_(addToXA)(di->fsm.maps, &map);
di->fsm.have_rw_map = True;
if (di->fsm.rw_map_avma == 0) {
di->fsm.rw_map_avma = mapped_avma;
}
if (di->fsm.rw_map_size==0) {
di->fsm.rw_map_foff = pe_sechdr_avma->PointerToRawData;
}
di->data_present = True;
if (di->data_avma==0) {
if (di->data_avma == 0) {
di->data_avma = mapped_avma;
di->data_size = pe_sechdr_avma->Misc.VirtualSize;
} else {
di->data_size = mapped_end_avma - di->data_avma;
}
di->fsm.rw_map_size += pe_sechdr_avma->Misc.VirtualSize;
di->data_size += pe_sechdr_avma->Misc.VirtualSize;
}
else if (pe_sechdr_avma->Characteristics
& IMAGE_SCN_CNT_UNINITIALIZED_DATA) {
di->bss_present = True;
di->bss_avma = mapped_avma;
di->bss_size = pe_sechdr_avma->Misc.VirtualSize;
}
mapped_avma = VG_PGROUNDDN(mapped_avma);
mapped_end_avma = VG_PGROUNDUP(mapped_end_avma);
/* Urr. These tests are bogus; ->fsm.rx_map_avma is not necessarily
the start of the text section. */
if ((1 /*VG_(needs).data_syms*/
|| (pe_sechdr_avma->Characteristics & IMAGE_SCN_CNT_CODE))
&& mapped_avma >= di->fsm.rx_map_avma
&& mapped_avma <= (di->fsm.rx_map_avma+di->text_size)
&& mapped_end_avma > (di->fsm.rx_map_avma+di->text_size)) {
UInt newsz = mapped_end_avma - di->fsm.rx_map_avma;
if (newsz > di->text_size) {
/* extending the mapping is always needed for PE files
under WINE */
di->text_size = newsz;
di->fsm.rx_map_size = newsz;
if (di->bss_avma == 0) {
di->bss_avma = mapped_avma;
di->bss_size = pe_sechdr_avma->Misc.VirtualSize;
} else {
di->bss_size = mapped_end_avma - di->bss_avma;
}
}
}
@ -2364,14 +2352,20 @@ Bool ML_(read_pdb_debug_info)(
}
if (VG_(clo_verbosity) > 1) {
VG_(message)(Vg_DebugMsg,
"rx_map: avma %#lx size %7lu foff %llu\n",
di->fsm.rx_map_avma, di->fsm.rx_map_size,
(Off64T)di->fsm.rx_map_foff);
VG_(message)(Vg_DebugMsg,
"rw_map: avma %#lx size %7lu foff %llu\n",
di->fsm.rw_map_avma, di->fsm.rw_map_size,
(Off64T)di->fsm.rw_map_foff);
for (i = 0; i < VG_(sizeXA)(di->fsm.maps); i++) {
struct _DebugInfoMapping* map = VG_(indexXA)(di->fsm.maps, i);
if (map->rx)
VG_(message)(Vg_DebugMsg,
"rx_map: avma %#lx size %7lu foff %llu\n",
map->avma, map->size, (Off64T)map->foff);
}
for (i = 0; i < VG_(sizeXA)(di->fsm.maps); i++) {
struct _DebugInfoMapping* map = VG_(indexXA)(di->fsm.maps, i);
if (map->rw)
VG_(message)(Vg_DebugMsg,
"rw_map: avma %#lx size %7lu foff %llu\n",
map->avma, map->size, (Off64T)map->foff);
}
VG_(message)(Vg_DebugMsg,
" text: avma %#lx svma %#lx size %7lu bias %#lx\n",

View File

@ -385,8 +385,7 @@ void ML_(addLineInfo) ( struct _DebugInfo* di,
See "Comment_Regarding_Text_Range_Checks" elsewhere in this file
for background and rationale. */
vg_assert(di->fsm.have_rx_map && di->fsm.have_rw_map);
if (next-1 < di->fsm.rx_map_avma
|| this >= di->fsm.rx_map_avma + di->fsm.rx_map_size ) {
if (ML_(find_rx_mapping)(di, this, next - 1) == NULL) {
if (0)
VG_(message)(Vg_DebugMsg,
"warning: ignoring line info entry falling "
@ -436,6 +435,8 @@ void ML_(addDiCfSI) ( struct _DebugInfo* di, DiCfSI* cfsi_orig )
UInt new_sz, i;
DiCfSI* new_tab;
SSizeT delta;
struct _DebugInfoMapping* map;
struct _DebugInfoMapping* map2;
/* copy the original, so we can mess with it */
DiCfSI cfsi = *cfsi_orig;
@ -456,27 +457,30 @@ void ML_(addDiCfSI) ( struct _DebugInfo* di, DiCfSI* cfsi_orig )
vg_assert(cfsi.len < 5000000);
vg_assert(di->fsm.have_rx_map && di->fsm.have_rw_map);
/* If we have an empty r-x mapping (is that possible?) then the
DiCfSI can't possibly fall inside it. In which case skip. */
if (di->fsm.rx_map_size == 0)
return;
/* Find mapping where at least one end of the CFSI falls into. */
map = ML_(find_rx_mapping)(di, cfsi.base, cfsi.base);
map2 = ML_(find_rx_mapping)(di, cfsi.base + cfsi.len - 1,
cfsi.base + cfsi.len - 1);
if (map == NULL)
map = map2;
else if (map2 == NULL)
map2 = map;
/* Rule out ones which are completely outside the r-x mapped area.
/* Rule out ones which are completely outside the r-x mapped area
(or which span across different areas).
See "Comment_Regarding_Text_Range_Checks" elsewhere in this file
for background and rationale. */
if (cfsi.base + cfsi.len - 1 < di->fsm.rx_map_avma
|| cfsi.base >= di->fsm.rx_map_avma + di->fsm.rx_map_size) {
if (map == NULL || map != map2) {
static Int complaints = 10;
if (VG_(clo_trace_cfi) || complaints > 0) {
complaints--;
if (VG_(clo_verbosity) > 1) {
VG_(message)(
Vg_DebugMsg,
"warning: DiCfSI %#lx .. %#lx outside segment %#lx .. %#lx\n",
"warning: DiCfSI %#lx .. %#lx outside mapped rw segments (%s)\n",
cfsi.base,
cfsi.base + cfsi.len - 1,
di->text_avma,
di->text_avma + di->text_size - 1
di->soname
);
}
if (VG_(clo_trace_cfi))
@ -493,27 +497,27 @@ void ML_(addDiCfSI) ( struct _DebugInfo* di, DiCfSI* cfsi_orig )
will fail. See
"Comment_on_IMPORTANT_CFSI_REPRESENTATIONAL_INVARIANTS" in
priv_storage.h for background. */
if (cfsi.base < di->fsm.rx_map_avma) {
if (cfsi.base < map->avma) {
/* Lower end is outside the mapped area. Hence upper end must
be inside it. */
if (0) VG_(printf)("XXX truncate lower\n");
vg_assert(cfsi.base + cfsi.len - 1 >= di->fsm.rx_map_avma);
delta = (SSizeT)(di->fsm.rx_map_avma - cfsi.base);
vg_assert(cfsi.base + cfsi.len - 1 >= map->avma);
delta = (SSizeT)(map->avma - cfsi.base);
vg_assert(delta > 0);
vg_assert(delta < (SSizeT)cfsi.len);
cfsi.base += delta;
cfsi.len -= delta;
}
else
if (cfsi.base + cfsi.len - 1 > di->fsm.rx_map_avma
+ di->fsm.rx_map_size - 1) {
if (cfsi.base + cfsi.len - 1 > map->avma + map->size - 1) {
/* Upper end is outside the mapped area. Hence lower end must be
inside it. */
if (0) VG_(printf)("XXX truncate upper\n");
vg_assert(cfsi.base <= di->fsm.rx_map_avma + di->fsm.rx_map_size - 1);
vg_assert(cfsi.base <= map->avma + map->size - 1);
delta = (SSizeT)( (cfsi.base + cfsi.len - 1)
- (di->fsm.rx_map_avma + di->fsm.rx_map_size - 1) );
vg_assert(delta > 0); vg_assert(delta < (SSizeT)cfsi.len);
- (map->avma + map->size - 1) );
vg_assert(delta > 0);
vg_assert(delta < (SSizeT)cfsi.len);
cfsi.len -= delta;
}
@ -526,9 +530,9 @@ void ML_(addDiCfSI) ( struct _DebugInfo* di, DiCfSI* cfsi_orig )
vg_assert(cfsi.len > 0);
/* Similar logic applies for the next two assertions. */
vg_assert(cfsi.base >= di->fsm.rx_map_avma);
vg_assert(cfsi.base >= map->avma);
vg_assert(cfsi.base + cfsi.len - 1
<= di->fsm.rx_map_avma + di->fsm.rx_map_size - 1);
<= map->avma + map->size - 1);
if (di->cfsi_used == di->cfsi_size) {
new_sz = 2 * di->cfsi_size;
@ -918,16 +922,12 @@ void ML_(addVar)( struct _DebugInfo* di,
and it is re-checked at the start of
ML_(read_elf_debug_info). */
vg_assert(di->fsm.have_rx_map && di->fsm.have_rw_map);
if (level > 0
&& (aMax < di->fsm.rx_map_avma
|| aMin >= di->fsm.rx_map_avma + di->fsm.rx_map_size)) {
if (level > 0 && ML_(find_rx_mapping)(di, aMin, aMax) == NULL) {
if (VG_(clo_verbosity) >= 0) {
VG_(message)(Vg_DebugMsg,
"warning: addVar: in range %#lx .. %#lx outside "
"segment %#lx .. %#lx (%s)\n",
aMin, aMax,
di->text_avma, di->text_avma + di->text_size -1,
name
"all rx mapped areas (%s)\n",
aMin, aMax, name
);
}
return;
@ -1858,7 +1858,7 @@ Word ML_(search_one_cfitab) ( struct _DebugInfo* di, Addr ptr )
Word ML_(search_one_fpotab) ( struct _DebugInfo* di, Addr ptr )
{
Addr const addr = ptr - di->fsm.rx_map_avma;
Addr const addr = ptr - di->text_avma;
Addr a_mid_lo, a_mid_hi;
Word mid, size,
lo = 0,