Fix a bunch of 64-bit cases required amd64. Stop to ponder whether

there is a better way to handle the 'pessimising cast' family of
operations in such a way that Vex's back-end instruction selectors can
generate better code than they do now, with less verbosity and general
confusingness in the insn selectors.



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@3536
This commit is contained in:
Julian Seward 2005-04-20 22:31:26 +00:00
parent fbc1c4c2ff
commit 2d1062ccdb
3 changed files with 60 additions and 25 deletions

View File

@ -61,13 +61,13 @@ extern void MC_(helperc_value_check1_fail) ( void );
extern void MC_(helperc_value_check0_fail) ( void );
extern VGA_REGPARM(1) void MC_(helperc_STOREV8) ( Addr, ULong );
extern VGA_REGPARM(2) void MC_(helperc_STOREV4) ( Addr, UInt );
extern VGA_REGPARM(2) void MC_(helperc_STOREV2) ( Addr, UInt );
extern VGA_REGPARM(2) void MC_(helperc_STOREV1) ( Addr, UInt );
extern VGA_REGPARM(2) void MC_(helperc_STOREV4) ( Addr, UWord );
extern VGA_REGPARM(2) void MC_(helperc_STOREV2) ( Addr, UWord );
extern VGA_REGPARM(2) void MC_(helperc_STOREV1) ( Addr, UWord );
extern VGA_REGPARM(1) UInt MC_(helperc_LOADV1) ( Addr );
extern VGA_REGPARM(1) UInt MC_(helperc_LOADV2) ( Addr );
extern VGA_REGPARM(1) UInt MC_(helperc_LOADV4) ( Addr );
extern VGA_REGPARM(1) UWord MC_(helperc_LOADV1) ( Addr );
extern VGA_REGPARM(1) UWord MC_(helperc_LOADV2) ( Addr );
extern VGA_REGPARM(1) UWord MC_(helperc_LOADV4) ( Addr );
extern VGA_REGPARM(1) ULong MC_(helperc_LOADV8) ( Addr );
/* Functions defined in mc_errcontext.c */

View File

@ -1193,8 +1193,8 @@ void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
static void mc_post_reg_write ( CorePart part, ThreadId tid,
OffT offset, SizeT size)
{
UChar area[512];
tl_assert(size <= 512);
UChar area[1024];
tl_assert(size <= 1024);
VG_(memset)(area, VGM_BYTE_VALID, size);
VG_(set_shadow_regs_area)( tid, offset, size, area );
}
@ -1235,10 +1235,21 @@ static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
}
//zz /*------------------------------------------------------------*/
//zz /*--- Functions called directly from generated code. ---*/
//zz /*------------------------------------------------------------*/
//zz
/*------------------------------------------------------------*/
/*--- Functions called directly from generated code. ---*/
/*------------------------------------------------------------*/
/* Types: LOADV4, LOADV2, LOADV1 are:
UWord fn ( Addr a )
so they return 32-bits on 32-bit machines and 64-bits on
64-bit machines. Addr has the same size as a host word.
LOADV8 is always ULong fn ( Addr a )
Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
are a UWord, and for STOREV8 they are a ULong.
*/
//zz static __inline__ UInt rotateRight16 ( UInt x )
//zz {
//zz /* Amazingly, gcc turns this into a single rotate insn. */
@ -1338,9 +1349,9 @@ void MC_(helperc_STOREV8) ( Addr a, ULong vbytes )
/* ------------------------ Size = 4 ------------------------ */
VGA_REGPARM(1)
UInt MC_(helperc_LOADV4) ( Addr a )
UWord MC_(helperc_LOADV4) ( Addr a )
{
return (UInt)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
return (UWord)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
//zz # ifdef VG_DEBUG_MEMORY
//zz return mc_rd_V4_SLOWLY(a);
//zz # else
@ -1364,9 +1375,9 @@ UInt MC_(helperc_LOADV4) ( Addr a )
}
VGA_REGPARM(2)
void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
void MC_(helperc_STOREV4) ( Addr a, UWord vbytes )
{
mc_STOREVn_slow( a, 4, vbytes, False/*littleendian*/ );
mc_STOREVn_slow( a, 4, (ULong)vbytes, False/*littleendian*/ );
//zz # ifdef VG_DEBUG_MEMORY
//zz mc_wr_V4_SLOWLY(a, vbytes);
//zz # else
@ -1392,9 +1403,9 @@ void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
/* ------------------------ Size = 2 ------------------------ */
VGA_REGPARM(1)
UInt MC_(helperc_LOADV2) ( Addr a )
UWord MC_(helperc_LOADV2) ( Addr a )
{
return (UInt)mc_LOADVn_slow( a, 2, False/*littleendian*/ );
return (UWord)mc_LOADVn_slow( a, 2, False/*littleendian*/ );
//zz # ifdef VG_DEBUG_MEMORY
//zz return mc_rd_V2_SLOWLY(a);
//zz # else
@ -1416,9 +1427,9 @@ UInt MC_(helperc_LOADV2) ( Addr a )
}
VGA_REGPARM(2)
void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
void MC_(helperc_STOREV2) ( Addr a, UWord vbytes )
{
mc_STOREVn_slow( a, 2, vbytes, False/*littleendian*/ );
mc_STOREVn_slow( a, 2, (ULong)vbytes, False/*littleendian*/ );
//zz # ifdef VG_DEBUG_MEMORY
//zz mc_wr_V2_SLOWLY(a, vbytes);
//zz # else
@ -1440,9 +1451,9 @@ void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
/* ------------------------ Size = 1 ------------------------ */
VGA_REGPARM(1)
UInt MC_(helperc_LOADV1) ( Addr a )
UWord MC_(helperc_LOADV1) ( Addr a )
{
return (UInt)mc_LOADVn_slow( a, 1, False/*littleendian*/ );
return (UWord)mc_LOADVn_slow( a, 1, False/*littleendian*/ );
//zz # ifdef VG_DEBUG_MEMORY
//zz return mc_rd_V1_SLOWLY(a);
//zz # else
@ -1464,9 +1475,9 @@ UInt MC_(helperc_LOADV1) ( Addr a )
}
VGA_REGPARM(2)
void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
void MC_(helperc_STOREV1) ( Addr a, UWord vbytes )
{
mc_STOREVn_slow( a, 1, vbytes, False/*littleendian*/ );
mc_STOREVn_slow( a, 1, (ULong)vbytes, False/*littleendian*/ );
//zz # ifdef VG_DEBUG_MEMORY
//zz mc_wr_V1_SLOWLY(a, vbytes);
//zz # else

View File

@ -192,7 +192,8 @@ static IRType shadowType ( IRType ty )
case Ity_I8:
case Ity_I16:
case Ity_I32:
case Ity_I64: return ty;
case Ity_I64:
case Ity_I128: return ty;
case Ity_F32: return Ity_I32;
case Ity_F64: return Ity_I64;
case Ity_V128: return Ity_V128;
@ -1554,6 +1555,13 @@ IRAtom* expr2vbits_Binop ( MCEnv* mce,
case Iop_32HLto64:
return assignNew(mce, Ity_I64, binop(op, vatom1, vatom2));
case Iop_MullS64:
case Iop_MullU64: {
IRAtom* vLo64 = mkLeft64(mce, mkUifU64(mce, vatom1,vatom2));
IRAtom* vHi64 = mkPCastTo(mce, Ity_I64, vLo64);
return assignNew(mce, Ity_I128, binop(Iop_64HLto128, vHi64, vLo64));
}
case Iop_MullS32:
case Iop_MullU32: {
IRAtom* vLo32 = mkLeft32(mce, mkUifU32(mce, vatom1,vatom2));
@ -1750,6 +1758,8 @@ IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
case Iop_32Uto64:
case Iop_V128to64:
case Iop_V128HIto64:
case Iop_128HIto64:
case Iop_128to64:
return assignNew(mce, Ity_I64, unop(op, vatom));
case Iop_64to32:
@ -1977,6 +1987,12 @@ IRExpr* zwidenToHostWord ( MCEnv* mce, IRAtom* vatom )
case Ity_I8: return assignNew(mce, tyH, unop(Iop_8Uto32, vatom));
default: goto unhandled;
}
} else
if (tyH == Ity_I64) {
switch (ty) {
case Ity_I32: return assignNew(mce, tyH, unop(Iop_32Uto64, vatom));
default: goto unhandled;
}
} else {
goto unhandled;
}
@ -2399,6 +2415,14 @@ IRBB* TL_(instrument) ( IRBB* bb_in, VexGuestLayout* layout,
VG_(tool_panic)("host/guest word size mismatch");
}
/* Check we're not completely nuts */
tl_assert(sizeof(UWord) == sizeof(void*));
tl_assert(sizeof(Word) == sizeof(void*));
tl_assert(sizeof(ULong) == 8);
tl_assert(sizeof(Long) == 8);
tl_assert(sizeof(UInt) == 4);
tl_assert(sizeof(Int) == 4);
/* Set up BB */
bb = emptyIRBB();
bb->tyenv = dopyIRTypeEnv(bb_in->tyenv);