mirror of
https://github.com/Zenithsiz/ftmemsim-valgrind.git
synced 2026-02-04 02:18:37 +00:00
Removed now-defunct read/write checks for FPU ops.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@3074
This commit is contained in:
parent
196e8dfd25
commit
86751c1c2e
@ -69,9 +69,6 @@ extern REGPARM(1) UInt MC_(helperc_LOADV2) ( Addr );
|
||||
extern REGPARM(1) UInt MC_(helperc_LOADV4) ( Addr );
|
||||
extern REGPARM(1) ULong MC_(helperc_LOADV8) ( Addr );
|
||||
|
||||
extern REGPARM(2) void MC_(fpu_write_check) ( Addr addr, SizeT size );
|
||||
extern REGPARM(2) void MC_(fpu_read_check) ( Addr addr, SizeT size );
|
||||
|
||||
/* Functions defined in mc_errcontext.c */
|
||||
extern void MC_(record_value_error) ( ThreadId tid, Int size );
|
||||
extern void MC_(record_user_error) ( ThreadId tid, Addr a, Bool isWrite,
|
||||
|
||||
@ -103,9 +103,6 @@ static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
|
||||
static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
|
||||
static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
|
||||
|
||||
static void mc_fpu_read_check_SLOWLY ( Addr addr, SizeT size );
|
||||
static void mc_fpu_write_check_SLOWLY ( Addr addr, SizeT size );
|
||||
|
||||
/*------------------------------------------------------------*/
|
||||
/*--- Data defns. ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
@ -1362,243 +1359,6 @@ REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
|
||||
}
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
FPU load and store checks, called from generated code.
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
REGPARM(2)
|
||||
void MC_(fpu_read_check) ( Addr addr, SizeT size )
|
||||
{
|
||||
/* Ensure the read area is both addressible and valid (ie,
|
||||
readable). If there's an address error, don't report a value
|
||||
error too; but if there isn't an address error, check for a
|
||||
value error.
|
||||
|
||||
Try to be reasonably fast on the common case; wimp out and defer
|
||||
to mc_fpu_read_check_SLOWLY for everything else. */
|
||||
|
||||
SecMap* sm;
|
||||
UInt sm_off, v_off, a_off;
|
||||
Addr addr4;
|
||||
|
||||
PROF_EVENT(80);
|
||||
|
||||
# ifdef VG_DEBUG_MEMORY
|
||||
mc_fpu_read_check_SLOWLY ( addr, size );
|
||||
# else
|
||||
|
||||
if (size == 4) {
|
||||
if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
|
||||
PROF_EVENT(81);
|
||||
/* Properly aligned. */
|
||||
sm = primary_map[addr >> 16];
|
||||
sm_off = addr & 0xFFFF;
|
||||
a_off = sm_off >> 3;
|
||||
if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
|
||||
/* Properly aligned and addressible. */
|
||||
v_off = addr & 0xFFFF;
|
||||
if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
|
||||
goto slow4;
|
||||
/* Properly aligned, addressible and with valid data. */
|
||||
return;
|
||||
slow4:
|
||||
mc_fpu_read_check_SLOWLY ( addr, 4 );
|
||||
return;
|
||||
}
|
||||
|
||||
if (size == 8) {
|
||||
if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
|
||||
PROF_EVENT(82);
|
||||
/* Properly aligned. Do it in two halves. */
|
||||
addr4 = addr + 4;
|
||||
/* First half. */
|
||||
sm = primary_map[addr >> 16];
|
||||
sm_off = addr & 0xFFFF;
|
||||
a_off = sm_off >> 3;
|
||||
if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
|
||||
/* First half properly aligned and addressible. */
|
||||
v_off = addr & 0xFFFF;
|
||||
if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
|
||||
goto slow8;
|
||||
/* Second half. */
|
||||
sm = primary_map[addr4 >> 16];
|
||||
sm_off = addr4 & 0xFFFF;
|
||||
a_off = sm_off >> 3;
|
||||
if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
|
||||
/* Second half properly aligned and addressible. */
|
||||
v_off = addr4 & 0xFFFF;
|
||||
if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
|
||||
goto slow8;
|
||||
/* Both halves properly aligned, addressible and with valid
|
||||
data. */
|
||||
return;
|
||||
slow8:
|
||||
mc_fpu_read_check_SLOWLY ( addr, 8 );
|
||||
return;
|
||||
}
|
||||
|
||||
/* Can't be bothered to huff'n'puff to make these (allegedly) rare
|
||||
cases go quickly. */
|
||||
if (size == 2) {
|
||||
PROF_EVENT(83);
|
||||
mc_fpu_read_check_SLOWLY ( addr, 2 );
|
||||
return;
|
||||
}
|
||||
|
||||
if (size == 16 /*SSE*/
|
||||
|| size == 10 || size == 28 || size == 108 || size == 512) {
|
||||
PROF_EVENT(84);
|
||||
mc_fpu_read_check_SLOWLY ( addr, size );
|
||||
return;
|
||||
}
|
||||
|
||||
VG_(printf)("size is %d\n", size);
|
||||
VG_(tool_panic)("MC_(fpu_read_check): unhandled size");
|
||||
# endif
|
||||
}
|
||||
|
||||
|
||||
REGPARM(2)
|
||||
void MC_(fpu_write_check) ( Addr addr, SizeT size )
|
||||
{
|
||||
/* Ensure the written area is addressible, and moan if otherwise.
|
||||
If it is addressible, make it valid, otherwise invalid.
|
||||
*/
|
||||
|
||||
SecMap* sm;
|
||||
UInt sm_off, v_off, a_off;
|
||||
Addr addr4;
|
||||
|
||||
PROF_EVENT(85);
|
||||
|
||||
# ifdef VG_DEBUG_MEMORY
|
||||
mc_fpu_write_check_SLOWLY ( addr, size );
|
||||
# else
|
||||
|
||||
if (size == 4) {
|
||||
if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
|
||||
PROF_EVENT(86);
|
||||
/* Properly aligned. */
|
||||
sm = primary_map[addr >> 16];
|
||||
sm_off = addr & 0xFFFF;
|
||||
a_off = sm_off >> 3;
|
||||
if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
|
||||
/* Properly aligned and addressible. Make valid. */
|
||||
v_off = addr & 0xFFFF;
|
||||
((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
|
||||
return;
|
||||
slow4:
|
||||
mc_fpu_write_check_SLOWLY ( addr, 4 );
|
||||
return;
|
||||
}
|
||||
|
||||
if (size == 8) {
|
||||
if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
|
||||
PROF_EVENT(87);
|
||||
/* Properly aligned. Do it in two halves. */
|
||||
addr4 = addr + 4;
|
||||
/* First half. */
|
||||
sm = primary_map[addr >> 16];
|
||||
sm_off = addr & 0xFFFF;
|
||||
a_off = sm_off >> 3;
|
||||
if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
|
||||
/* First half properly aligned and addressible. Make valid. */
|
||||
v_off = addr & 0xFFFF;
|
||||
((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
|
||||
/* Second half. */
|
||||
sm = primary_map[addr4 >> 16];
|
||||
sm_off = addr4 & 0xFFFF;
|
||||
a_off = sm_off >> 3;
|
||||
if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
|
||||
/* Second half properly aligned and addressible. */
|
||||
v_off = addr4 & 0xFFFF;
|
||||
((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
|
||||
/* Properly aligned, addressible and with valid data. */
|
||||
return;
|
||||
slow8:
|
||||
mc_fpu_write_check_SLOWLY ( addr, 8 );
|
||||
return;
|
||||
}
|
||||
|
||||
/* Can't be bothered to huff'n'puff to make these (allegedly) rare
|
||||
cases go quickly. */
|
||||
if (size == 2) {
|
||||
PROF_EVENT(88);
|
||||
mc_fpu_write_check_SLOWLY ( addr, 2 );
|
||||
return;
|
||||
}
|
||||
|
||||
if (size == 16 /*SSE*/
|
||||
|| size == 10 || size == 28 || size == 108 || size == 512) {
|
||||
PROF_EVENT(89);
|
||||
mc_fpu_write_check_SLOWLY ( addr, size );
|
||||
return;
|
||||
}
|
||||
|
||||
VG_(printf)("size is %d\n", size);
|
||||
VG_(tool_panic)("MC_(fpu_write_check): unhandled size");
|
||||
# endif
|
||||
}
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
Slow, general cases for FPU load and store checks.
|
||||
------------------------------------------------------------------ */
|
||||
|
||||
/* Generic version. Test for both addr and value errors, but if
|
||||
there's an addr error, don't report a value error even if it
|
||||
exists. */
|
||||
|
||||
void mc_fpu_read_check_SLOWLY ( Addr addr, SizeT size )
|
||||
{
|
||||
Int i;
|
||||
Bool aerr = False;
|
||||
Bool verr = False;
|
||||
PROF_EVENT(90);
|
||||
for (i = 0; i < size; i++) {
|
||||
PROF_EVENT(91);
|
||||
if (get_abit(addr+i) != VGM_BIT_VALID)
|
||||
aerr = True;
|
||||
if (get_vbyte(addr+i) != VGM_BYTE_VALID)
|
||||
verr = True;
|
||||
}
|
||||
|
||||
if (aerr) {
|
||||
MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, False );
|
||||
} else {
|
||||
if (verr)
|
||||
MC_(record_value_error)( VG_(get_current_tid)(), size );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Generic version. Test for addr errors. Valid addresses are
|
||||
given valid values, and invalid addresses invalid values. */
|
||||
|
||||
void mc_fpu_write_check_SLOWLY ( Addr addr, SizeT size )
|
||||
{
|
||||
Int i;
|
||||
Addr a_here;
|
||||
Bool a_ok;
|
||||
Bool aerr = False;
|
||||
PROF_EVENT(92);
|
||||
for (i = 0; i < size; i++) {
|
||||
PROF_EVENT(93);
|
||||
a_here = addr+i;
|
||||
a_ok = get_abit(a_here) == VGM_BIT_VALID;
|
||||
if (a_ok) {
|
||||
set_vbyte(a_here, VGM_BYTE_VALID);
|
||||
} else {
|
||||
set_vbyte(a_here, VGM_BYTE_INVALID);
|
||||
aerr = True;
|
||||
}
|
||||
}
|
||||
if (aerr) {
|
||||
MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, True );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*------------------------------------------------------------*/
|
||||
/*--- Metadata get/set functions, for client requests. ---*/
|
||||
/*------------------------------------------------------------*/
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user