Change the behaviour of --partial-loads-ok=yes to avoid false

negatives, by marking the V bits that come from out of range parts of
the access as undefined; and hence any use of them leads to an value
error.  Prior to this they were marked as defined and could be used
without error.

Behaviour of --partial-loads-ok=no (the default case) is unchanged.

Also add some testing thereof.

Fixes #294523.  Modified version of a patch and testcase by Patrick
J. LoPresti (lopresti@gmail.com).



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@12430
This commit is contained in:
Julian Seward 2012-03-08 14:51:01 +00:00
parent f2bc8e8162
commit cd43bae6a4
11 changed files with 202 additions and 36 deletions

View File

@ -1141,19 +1141,6 @@ static
__attribute__((noinline)) __attribute__((noinline))
ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian ) ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
{ {
/* Make up a 64-bit result V word, which contains the loaded data for
valid addresses and Defined for invalid addresses. Iterate over
the bytes in the word, from the most significant down to the
least. */
ULong vbits64 = V_BITS64_UNDEFINED;
SizeT szB = nBits / 8;
SSizeT i; // Must be signed.
SizeT n_addrs_bad = 0;
Addr ai;
Bool partial_load_exemption_applies;
UChar vbits8;
Bool ok;
PROF_EVENT(30, "mc_LOADVn_slow"); PROF_EVENT(30, "mc_LOADVn_slow");
/* ------------ BEGIN semi-fast cases ------------ */ /* ------------ BEGIN semi-fast cases ------------ */
@ -1188,37 +1175,92 @@ ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
} }
/* ------------ END semi-fast cases ------------ */ /* ------------ END semi-fast cases ------------ */
ULong vbits64 = V_BITS64_UNDEFINED; /* result */
ULong pessim64 = V_BITS64_DEFINED; /* only used when p-l-ok=yes */
SSizeT szB = nBits / 8;
SSizeT i; /* Must be signed. */
SizeT n_addrs_bad = 0;
Addr ai;
UChar vbits8;
Bool ok;
tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8); tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
/* Make up a 64-bit result V word, which contains the loaded data
for valid addresses and Defined for invalid addresses. Iterate
over the bytes in the word, from the most significant down to
the least. The vbits to return are calculated into vbits64.
Also compute the pessimising value to be used when
--partial-loads-ok=yes. n_addrs_bad is redundant (the relevant
info can be gleaned from pessim64) but is used as a
cross-check. */
for (i = szB-1; i >= 0; i--) { for (i = szB-1; i >= 0; i--) {
PROF_EVENT(31, "mc_LOADVn_slow(loop)"); PROF_EVENT(31, "mc_LOADVn_slow(loop)");
ai = a + byte_offset_w(szB, bigendian, i); ai = a + byte_offset_w(szB, bigendian, i);
ok = get_vbits8(ai, &vbits8); ok = get_vbits8(ai, &vbits8);
if (!ok) n_addrs_bad++;
vbits64 <<= 8; vbits64 <<= 8;
vbits64 |= vbits8; vbits64 |= vbits8;
if (!ok) n_addrs_bad++;
pessim64 <<= 8;
pessim64 |= (ok ? V_BITS8_DEFINED : V_BITS8_UNDEFINED);
} }
/* This is a hack which avoids producing errors for code which /* In the common case, all the addresses involved are valid, so we
insists in stepping along byte strings in aligned word-sized just return the computed V bits and have done. */
chunks, and there is a partially defined word at the end. (eg, if (LIKELY(n_addrs_bad == 0))
optimised strlen). Such code is basically broken at least WRT return vbits64;
semantics of ANSI C, but sometimes users don't have the option
to fix it, and so this option is provided. Note it is now
defaulted to not-engaged.
A load from a partially-addressible place is allowed if: /* If there's no possibility of getting a partial-loads-ok
- the command-line flag is set exemption, report the error and quit. */
if (!MC_(clo_partial_loads_ok)) {
MC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
return vbits64;
}
/* The partial-loads-ok excemption might apply. Find out if it
does. If so, don't report an addressing error, but do return
Undefined for the bytes that are out of range, so as to avoid
false negatives. If it doesn't apply, just report an addressing
error in the usual way. */
/* Some code steps along byte strings in aligned word-sized chunks
even when there is only a partially defined word at the end (eg,
optimised strlen). This is allowed by the memory model of
modern machines, since an aligned load cannot span two pages and
thus cannot "partially fault". Despite such behaviour being
declared undefined by ANSI C/C++.
Therefore, a load from a partially-addressible place is allowed
if all of the following hold:
- the command-line flag is set [by default, it isn't]
- it's a word-sized, word-aligned load - it's a word-sized, word-aligned load
- at least one of the addresses in the word *is* valid - at least one of the addresses in the word *is* valid
*/
partial_load_exemption_applies
= MC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
&& VG_IS_WORD_ALIGNED(a)
&& n_addrs_bad < VG_WORDSIZE;
if (n_addrs_bad > 0 && !partial_load_exemption_applies) Since this suppresses the addressing error, we avoid false
MC_(record_address_error)( VG_(get_running_tid)(), a, szB, False ); negatives by marking bytes undefined when they come from an
invalid address.
*/
/* "at least one of the addresses is invalid" */
tl_assert(pessim64 != V_BITS64_DEFINED);
if (szB == VG_WORDSIZE && VG_IS_WORD_ALIGNED(a)
&& n_addrs_bad < VG_WORDSIZE) {
/* Exemption applies. Use the previously computed pessimising
value for vbits64 and return the combined result, but don't
flag an addressing error. The pessimising value is Defined
for valid addresses and Undefined for invalid addresses. */
/* for assumption that doing bitwise or implements UifU */
tl_assert(V_BIT_UNDEFINED == 1 && V_BIT_DEFINED == 0);
/* (really need "UifU" here...)
vbits64 UifU= pessim64 (is pessimised by it, iow) */
vbits64 |= pessim64;
return vbits64;
}
/* Exemption doesn't apply. Flag an addressing error in the normal
way. */
MC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
return vbits64; return vbits64;
} }

View File

@ -187,15 +187,19 @@ EXTRA_DIST = \
supp2.stderr.exp supp2.vgtest \ supp2.stderr.exp supp2.vgtest \
supp.supp \ supp.supp \
suppfree.stderr.exp suppfree.vgtest \ suppfree.stderr.exp suppfree.vgtest \
test-plo-no.vgtest test-plo-no.stdout.exp \
test-plo-no.stderr.exp-le64 test-plo-no.stderr.exp-le32 \
test-plo-yes.vgtest test-plo-yes.stdout.exp \
test-plo-yes.stderr.exp-le64 test-plo-yes.stderr.exp-le32 \
trivialleak.stderr.exp trivialleak.vgtest trivialleak.stderr.exp2 \ trivialleak.stderr.exp trivialleak.vgtest trivialleak.stderr.exp2 \
unit_libcbase.stderr.exp unit_libcbase.vgtest \ unit_libcbase.stderr.exp unit_libcbase.vgtest \
unit_oset.stderr.exp unit_oset.stdout.exp unit_oset.vgtest \ unit_oset.stderr.exp unit_oset.stdout.exp unit_oset.vgtest \
varinfo1.vgtest varinfo1.stdout.exp varinfo1.stderr.exp varinfo1.stderr.exp-ppc64\ varinfo1.vgtest varinfo1.stdout.exp varinfo1.stderr.exp varinfo1.stderr.exp-ppc64 \
varinfo2.vgtest varinfo2.stdout.exp varinfo2.stderr.exp varinfo2.stderr.exp-ppc64\ varinfo2.vgtest varinfo2.stdout.exp varinfo2.stderr.exp varinfo2.stderr.exp-ppc64 \
varinfo3.vgtest varinfo3.stdout.exp varinfo3.stderr.exp varinfo3.stderr.exp-ppc64\ varinfo3.vgtest varinfo3.stdout.exp varinfo3.stderr.exp varinfo3.stderr.exp-ppc64 \
varinfo4.vgtest varinfo4.stdout.exp varinfo4.stderr.exp varinfo4.stderr.exp-ppc64\ varinfo4.vgtest varinfo4.stdout.exp varinfo4.stderr.exp varinfo4.stderr.exp-ppc64 \
varinfo5.vgtest varinfo5.stdout.exp varinfo5.stderr.exp varinfo5.stderr.exp-ppc64\ varinfo5.vgtest varinfo5.stdout.exp varinfo5.stderr.exp varinfo5.stderr.exp-ppc64 \
varinfo6.vgtest varinfo6.stdout.exp varinfo6.stderr.exp varinfo6.stderr.exp-ppc64\ varinfo6.vgtest varinfo6.stdout.exp varinfo6.stderr.exp varinfo6.stderr.exp-ppc64 \
vcpu_bz2.stdout.exp vcpu_bz2.stderr.exp vcpu_bz2.vgtest \ vcpu_bz2.stdout.exp vcpu_bz2.stderr.exp vcpu_bz2.vgtest \
vcpu_fbench.stdout.exp vcpu_fbench.stderr.exp vcpu_fbench.vgtest \ vcpu_fbench.stdout.exp vcpu_fbench.stderr.exp vcpu_fbench.vgtest \
vcpu_fnfns.stdout.exp vcpu_fnfns.stdout.exp-glibc28-amd64 \ vcpu_fnfns.stdout.exp vcpu_fnfns.stdout.exp-glibc28-amd64 \
@ -266,6 +270,7 @@ check_PROGRAMS = \
strchr \ strchr \
str_tester \ str_tester \
supp_unknown supp1 supp2 suppfree \ supp_unknown supp1 supp2 suppfree \
test-plo \
trivialleak \ trivialleak \
unit_libcbase unit_oset \ unit_libcbase unit_oset \
varinfo1 varinfo2 varinfo3 varinfo4 \ varinfo1 varinfo2 varinfo3 varinfo4 \

View File

@ -0,0 +1 @@
XXX put 32 bit results in here

View File

@ -0,0 +1,18 @@
Invalid read of size 8
...
Address 0x........ is 0 bytes inside a block of size 5 alloc'd
at 0x........: memalign (vg_replace_malloc.c:...)
...
Invalid read of size 8
...
Address 0x........ is 0 bytes inside a block of size 5 alloc'd
at 0x........: memalign (vg_replace_malloc.c:...)
...
Invalid read of size 8
...
Address 0x........ is 8 bytes inside a block of size 24 free'd
at 0x........: free (vg_replace_malloc.c:...)
...

View File

View File

@ -0,0 +1,2 @@
prog: test-plo
vgopts: -q

View File

@ -0,0 +1 @@
XXX put 32 bit results in here

View File

@ -0,0 +1,9 @@
Conditional jump or move depends on uninitialised value(s)
...
Invalid read of size 8
...
Address 0x........ is 8 bytes inside a block of size 24 free'd
at 0x........: free (vg_replace_malloc.c:...)
...

View File

View File

@ -0,0 +1,2 @@
prog: test-plo
vgopts: -q --partial-loads-ok=yes

86
memcheck/tests/test-plo.c Normal file
View File

@ -0,0 +1,86 @@
#include <malloc.h>
#include <stdio.h>
#include <assert.h>
typedef unsigned long long int ULong;
typedef unsigned long int UWord;
__attribute__((noinline))
static int my_ffsll ( ULong x )
{
int i;
for (i = 0; i < 64; i++) {
if ((x & 1ULL) == 1ULL)
break;
x >>= 1;
}
return i+1;
}
/* Find length of string, assuming it is aligned and shorter than 8
characters. Little-endian only. */
__attribute__((noinline))
static int aligned_strlen(char *s)
{
/* This is for 64-bit platforms */
assert(sizeof(ULong) == 8);
/* ..and only works for aligned input */
assert(((unsigned long)s & 0x7) == 0);
/* read 8 bytes */
ULong val = *(ULong*)s;
/* Subtract one from each byte */
ULong val2 = val - 0x0101010101010101ULL;
/* Find lowest byte whose high bit changed */
val2 ^= val;
val2 &= 0x8080808080808080ULL;
return (my_ffsll(val2) / 8) - 1;
}
__attribute__((noinline)) void foo ( int x )
{
__asm__ __volatile__("":::"memory");
}
int
main(int argc, char *argv[])
{
char *buf = memalign(8, 5);
buf[0] = 'a';
buf[1] = 'b';
buf[2] = 'c';
buf[3] = 'd';
buf[4] = '\0';
/* --partial-loads-ok=no: expect addr error (here) */
/* --partial-loads-ok=yes: expect no error */
if (aligned_strlen(buf) == 4)
foo(44);
/* --partial-loads-ok=no: expect addr error (here) */
/* --partial-loads-ok=yes: expect value error (in my_ffsll) */
buf[4] = 'x';
if (aligned_strlen(buf) == 0)
foo(37);
free(buf);
/* Also, we need to check that a completely out-of-range,
word-sized load gives an addressing error regardless of the
start of --partial-loads-ok=. *And* that the resulting
value is completely defined. */
UWord* words = malloc(3 * sizeof(UWord));
free(words);
/* Should ALWAYS give an addr error. */
UWord w = words[1];
/* Should NEVER give an error (you might expect a value one, but no.) */
if (w == 0x31415927) {
fprintf(stderr,
"Elvis is alive and well and living in Milton Keynes.\n");
}
return 0;
}