mirror of
https://github.com/Zenithsiz/ftmemsim-valgrind.git
synced 2026-02-15 15:14:21 +00:00
Loads more x86 insn selector hacking.
git-svn-id: svn://svn.valgrind.org/vex/trunk@94
This commit is contained in:
@@ -389,6 +389,17 @@ static IRExpr* mk_calculate_eflags_c ( void )
|
||||
Ity_Bit, and the thunk is only updated iff guard evaluates to 1 at
|
||||
run-time. If guard is NULL, the update is always done. */
|
||||
|
||||
static IRExpr* widenUTo32 ( IRExpr* e )
|
||||
{
|
||||
switch (typeOfIRExpr(irbb->tyenv,e)) {
|
||||
case Ity_I32: return e;
|
||||
case Ity_I16: return unop(Iop_16Uto32,e);
|
||||
case Ity_I8: return unop(Iop_8Uto32,e);
|
||||
default: vpanic("widenUto32");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* This is for add/sub/adc/sbb/and/or/xor, where we need to
|
||||
store the result value and the non-lvalue operand. */
|
||||
|
||||
@@ -413,8 +424,8 @@ static void setFlags_SRC_DST1 ( IROp op8,
|
||||
}
|
||||
stmt( IRStmt_Put( OFFB_CC_OP, mkU32(ccOp)) );
|
||||
stmt( IRStmt_Put( OFFB_CC_SRC, logic ? mkU32(0)
|
||||
: mkexpr(src)) );
|
||||
stmt( IRStmt_Put( OFFB_CC_DST, mkexpr(dst1)) );
|
||||
: widenUTo32(mkexpr(src))) );
|
||||
stmt( IRStmt_Put( OFFB_CC_DST, widenUTo32(mkexpr(dst1))) );
|
||||
}
|
||||
|
||||
|
||||
@@ -422,23 +433,13 @@ static void setFlags_SRC_DST1 ( IROp op8,
|
||||
result except shifted one bit less. And then only when the guard
|
||||
says we can. */
|
||||
|
||||
static IRExpr* widenUTo32 ( IRExpr* e )
|
||||
{
|
||||
switch (typeOfIRExpr(irbb->tyenv,e)) {
|
||||
case Ity_I32: return e;
|
||||
case Ity_I16: return unop(Iop_16Uto32,e);
|
||||
case Ity_I8: return unop(Iop_8Uto32,e);
|
||||
default: vpanic("widenUto32");
|
||||
}
|
||||
}
|
||||
|
||||
static void setFlags_DSTus_DST1 ( IROp op8,
|
||||
IRTemp dstUS,
|
||||
IRTemp dst1,
|
||||
IRType ty,
|
||||
IRTemp guard )
|
||||
{
|
||||
Int ccOp = ty==Ity_I8 ? 0 : (ty==Ity_I16 ? 1 : 2);
|
||||
Int ccOp = ty==Ity_I8 ? 0 : (ty==Ity_I16 ? 1 : 2);
|
||||
|
||||
vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
|
||||
vassert(guard);
|
||||
|
||||
@@ -15,6 +15,119 @@
|
||||
#include "x86h_defs.h"
|
||||
|
||||
|
||||
/*---------------------------------------------------------*/
|
||||
/*--- Stuff for pattern matching on IR. This isn't ---*/
|
||||
/*--- x86 specific, and should be moved elsewhere. ---*/
|
||||
/*---------------------------------------------------------*/
|
||||
|
||||
#define DECLARE_PATTERN(_patt) \
|
||||
static IRExpr* _patt = NULL
|
||||
|
||||
#define DEFINE_PATTERN(_patt,_expr) \
|
||||
do { \
|
||||
if (!(_patt)) { \
|
||||
vassert(LibVEX_GetAllocMode() == AllocModeTEMPORARY); \
|
||||
LibVEX_SetAllocMode(AllocModePERMANENT); \
|
||||
_patt = (_expr); \
|
||||
LibVEX_SetAllocMode(AllocModeTEMPORARY); \
|
||||
vassert(LibVEX_GetAllocMode() == AllocModeTEMPORARY); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define N_MATCH_BINDERS 4
|
||||
typedef
|
||||
struct {
|
||||
IRExpr* bindee[N_MATCH_BINDERS];
|
||||
}
|
||||
MatchInfo;
|
||||
|
||||
|
||||
static void setBindee ( MatchInfo* mi, Int n, IRExpr* bindee )
|
||||
{
|
||||
if (n < 0 || n >= N_MATCH_BINDERS)
|
||||
vpanic("setBindee: out of range index");
|
||||
if (mi->bindee[n] != NULL)
|
||||
vpanic("setBindee: bindee already set");
|
||||
mi->bindee[n] = bindee;
|
||||
}
|
||||
|
||||
static Bool matchWrk ( MatchInfo* mi, IRExpr* p/*attern*/, IRExpr* e/*xpr*/ )
|
||||
{
|
||||
switch (p->tag) {
|
||||
case Iex_Binder: /* aha, what we were looking for. */
|
||||
setBindee(mi, p->Iex.Binder.binder, e);
|
||||
return True;
|
||||
case Iex_Unop:
|
||||
if (e->tag != Iex_Unop) return False;
|
||||
if (p->Iex.Unop.op != e->Iex.Unop.op) return False;
|
||||
if (!matchWrk(mi, p->Iex.Unop.arg, e->Iex.Unop.arg))
|
||||
return False;
|
||||
return True;
|
||||
case Iex_Binop:
|
||||
if (e->tag != Iex_Binop) return False;
|
||||
if (p->Iex.Binop.op != e->Iex.Binop.op) return False;
|
||||
if (!matchWrk(mi, p->Iex.Binop.arg1, e->Iex.Binop.arg1))
|
||||
return False;
|
||||
if (!matchWrk(mi, p->Iex.Binop.arg2, e->Iex.Binop.arg2))
|
||||
return False;
|
||||
return True;
|
||||
case Iex_Const:
|
||||
if (e->tag != Iex_Const) return False;
|
||||
switch (p->Iex.Const.con->tag) {
|
||||
case Ico_U8: return e->Iex.Const.con->tag==Ico_U8
|
||||
? (p->Iex.Const.con->Ico.U8
|
||||
== e->Iex.Const.con->Ico.U8)
|
||||
: False;
|
||||
case Ico_U16: return e->Iex.Const.con->tag==Ico_U16
|
||||
? (p->Iex.Const.con->Ico.U16
|
||||
== e->Iex.Const.con->Ico.U16)
|
||||
: False;
|
||||
case Ico_U32: return e->Iex.Const.con->tag==Ico_U32
|
||||
? (p->Iex.Const.con->Ico.U32
|
||||
== e->Iex.Const.con->Ico.U32)
|
||||
: False;
|
||||
case Ico_U64: return e->Iex.Const.con->tag==Ico_U64
|
||||
? (p->Iex.Const.con->Ico.U64
|
||||
== e->Iex.Const.con->Ico.U64)
|
||||
: False;
|
||||
}
|
||||
vpanic("matchIRExpr.Iex_Const");
|
||||
/*NOTREACHED*/
|
||||
default:
|
||||
ppIRExpr(p);
|
||||
vpanic("match");
|
||||
}
|
||||
}
|
||||
|
||||
static Bool matchIRExpr ( MatchInfo* mi, IRExpr* p/*attern*/, IRExpr* e/*xpr*/ )
|
||||
{
|
||||
Int i;
|
||||
for (i = 0; i < N_MATCH_BINDERS; i++)
|
||||
mi->bindee[i] = NULL;
|
||||
return matchWrk(mi, p, e);
|
||||
}
|
||||
|
||||
/*-----*/
|
||||
/* These are duplicated in x86toIR.c */
|
||||
static IRExpr* unop ( IROp op, IRExpr* a )
|
||||
{
|
||||
return IRExpr_Unop(op, a);
|
||||
}
|
||||
|
||||
static IRExpr* binop ( IROp op, IRExpr* a1, IRExpr* a2 )
|
||||
{
|
||||
return IRExpr_Binop(op, a1, a2);
|
||||
}
|
||||
|
||||
static IRExpr* bind ( Int binder )
|
||||
{
|
||||
return IRExpr_Binder(binder);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/*---------------------------------------------------------*/
|
||||
/*--- ISelEnv ---*/
|
||||
/*---------------------------------------------------------*/
|
||||
@@ -98,7 +211,7 @@ static X86RM* iselIntExpr_RM ( ISelEnv* env, IRExpr* e );
|
||||
static X86AMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e );
|
||||
|
||||
|
||||
static X86Instr* mk_MOV_RR ( HReg src, HReg dst )
|
||||
static X86Instr* mk_MOVsd_RR ( HReg src, HReg dst )
|
||||
{
|
||||
vassert(hregClass(src) == HRcInt);
|
||||
vassert(hregClass(dst) == HRcInt);
|
||||
@@ -120,16 +233,21 @@ static X86Instr* mk_MOV_RR ( HReg src, HReg dst )
|
||||
*/
|
||||
static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
|
||||
{
|
||||
MatchInfo mi;
|
||||
DECLARE_PATTERN(p_32to1_then_1Uto8);
|
||||
|
||||
vassert(e);
|
||||
IRType ty = typeOfIRExpr(env->type_env,e);
|
||||
vassert(ty == Ity_I32 || Ity_I16 || Ity_I8);
|
||||
|
||||
switch (e->tag) {
|
||||
|
||||
/* --------- TEMPs --------- */
|
||||
case Iex_Tmp: {
|
||||
return lookupIRTemp(env, e->Iex.Tmp.tmp);
|
||||
}
|
||||
|
||||
/* --------- LOADs --------- */
|
||||
case Iex_LDle: {
|
||||
HReg dst = newVRegI(env);
|
||||
X86AMode* amode = iselIntExpr_AMode ( env, e->Iex.LDle.addr );
|
||||
@@ -142,9 +260,14 @@ static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
|
||||
addInstr(env, X86Instr_LoadEX(2,False,amode,dst));
|
||||
return dst;
|
||||
}
|
||||
if (ty == Ity_I8) {
|
||||
addInstr(env, X86Instr_LoadEX(1,False,amode,dst));
|
||||
return dst;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* --------- BINARY OPs --------- */
|
||||
case Iex_Binop: {
|
||||
X86AluOp aluOp;
|
||||
X86ShiftOp shOp;
|
||||
@@ -167,7 +290,7 @@ static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
|
||||
HReg dst = newVRegI(env);
|
||||
HReg reg = iselIntExpr_R(env, e->Iex.Binop.arg1);
|
||||
X86RMI* rmi = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
|
||||
addInstr(env, mk_MOV_RR(reg,dst));
|
||||
addInstr(env, mk_MOVsd_RR(reg,dst));
|
||||
addInstr(env, X86Instr_Alu32R(aluOp, rmi, dst));
|
||||
return dst;
|
||||
}
|
||||
@@ -182,22 +305,45 @@ static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
|
||||
HReg dst = newVRegI(env);
|
||||
HReg regL = iselIntExpr_R(env, e->Iex.Binop.arg1);
|
||||
HReg regR = iselIntExpr_R(env, e->Iex.Binop.arg2);
|
||||
addInstr(env, mk_MOV_RR(regL,dst));
|
||||
addInstr(env, mk_MOV_RR(regR,hregX86_ECX()));
|
||||
addInstr(env, mk_MOVsd_RR(regL,dst));
|
||||
addInstr(env, mk_MOVsd_RR(regR,hregX86_ECX()));
|
||||
addInstr(env, X86Instr_Sh32(shOp, 0/* %cl */, X86RM_Reg(dst)));
|
||||
return dst;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* --------- UNARY OPs --------- */
|
||||
case Iex_Unop: {
|
||||
/* 1Uto8(32to1(expr32)) */
|
||||
DEFINE_PATTERN(p_32to1_then_1Uto8,
|
||||
unop(Iop_1Uto8,unop(Iop_32to1,bind(0))));
|
||||
if (matchIRExpr(&mi,p_32to1_then_1Uto8,e)) {
|
||||
IRExpr* expr32 = mi.bindee[0];
|
||||
HReg dst = newVRegI(env);
|
||||
HReg src = iselIntExpr_R(env, expr32);
|
||||
addInstr(env, mk_MOVsd_RR(src,dst) );
|
||||
addInstr(env, X86Instr_Alu32R(Xalu_AND,
|
||||
X86RMI_Imm(1), dst));
|
||||
return dst;
|
||||
}
|
||||
|
||||
switch (e->Iex.Unop.op) {
|
||||
case Iop_8Uto32: {
|
||||
case Iop_8Uto32:
|
||||
case Iop_16Uto32: {
|
||||
HReg dst = newVRegI(env);
|
||||
HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
|
||||
addInstr(env, mk_MOV_RR(src,dst) );
|
||||
UInt mask = e->Iex.Unop.op==Iop_8Uto32 ? 0xFF : 0xFFFF;
|
||||
addInstr(env, mk_MOVsd_RR(src,dst) );
|
||||
addInstr(env, X86Instr_Alu32R(Xalu_AND,
|
||||
X86RMI_Imm(0xFF), dst));
|
||||
X86RMI_Imm(mask), dst));
|
||||
return dst;
|
||||
}
|
||||
case Iop_Not32: {
|
||||
HReg dst = newVRegI(env);
|
||||
HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
|
||||
addInstr(env, mk_MOVsd_RR(src,dst) );
|
||||
addInstr(env, X86Instr_Not32(X86RM_Reg(dst)));
|
||||
return dst;
|
||||
}
|
||||
default:
|
||||
@@ -206,6 +352,7 @@ static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
|
||||
break;
|
||||
}
|
||||
|
||||
/* --------- GETs --------- */
|
||||
case Iex_Get: {
|
||||
if (ty == Ity_I32) {
|
||||
HReg dst = newVRegI(env);
|
||||
@@ -227,6 +374,7 @@ static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
|
||||
break;
|
||||
}
|
||||
|
||||
/* --------- CCALLs --------- */
|
||||
case Iex_CCall: {
|
||||
Int i, nargs;
|
||||
UInt target;
|
||||
@@ -258,6 +406,7 @@ static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
|
||||
return hregX86_EAX();
|
||||
}
|
||||
|
||||
/* --------- LITERALs --------- */
|
||||
/* 32/16/8-bit literals */
|
||||
case Iex_Const: {
|
||||
X86RMI* rmi = iselIntExpr_RMI ( env, e );
|
||||
@@ -266,13 +415,14 @@ static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
|
||||
return r;
|
||||
}
|
||||
|
||||
/* --------- MULTIPLEXes --------- */
|
||||
case Iex_Mux0X: {
|
||||
if (ty == Ity_I32
|
||||
&& typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
|
||||
HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX);
|
||||
X86RM* r0 = iselIntExpr_RM(env, e->Iex.Mux0X.expr0);
|
||||
HReg dst = newVRegI(env);
|
||||
addInstr(env, mk_MOV_RR(rX,dst));
|
||||
addInstr(env, mk_MOVsd_RR(rX,dst));
|
||||
HReg r8 = iselIntExpr_R(env, e->Iex.Mux0X.cond);
|
||||
addInstr(env, X86Instr_Alu32R(Xalu_TEST,
|
||||
X86RMI_Imm(0xFF), r8));
|
||||
@@ -431,6 +581,46 @@ static X86RM* iselIntExpr_RM ( ISelEnv* env, IRExpr* e )
|
||||
}
|
||||
|
||||
|
||||
/* Generate code to evaluated a bit-typed expression, returning the
|
||||
condition code which would correspond when the expression would
|
||||
notionally have returned 1. */
|
||||
|
||||
static X86CondCode iselCondCode ( ISelEnv* env, IRExpr* e )
|
||||
{
|
||||
MatchInfo mi;
|
||||
DECLARE_PATTERN(p_32to1);
|
||||
DECLARE_PATTERN(p_eq32_zero);
|
||||
|
||||
vassert(e);
|
||||
vassert(typeOfIRExpr(env->type_env,e) == Ity_Bit);
|
||||
|
||||
/* pattern: 32to1(expr32) */
|
||||
DEFINE_PATTERN(p_32to1,
|
||||
unop(Iop_32to1,bind(0))
|
||||
);
|
||||
if (matchIRExpr(&mi,p_32to1,e)) {
|
||||
HReg src = iselIntExpr_R(env, mi.bindee[0]);
|
||||
HReg dst = newVRegI(env);
|
||||
addInstr(env, mk_MOVsd_RR(src,dst));
|
||||
addInstr(env, X86Instr_Alu32R(Xalu_AND,X86RMI_Imm(1),dst));
|
||||
return Xcc_NZ;
|
||||
}
|
||||
|
||||
/* pattern: CmpEQ32(expr32,0) */
|
||||
DEFINE_PATTERN(p_eq32_zero,
|
||||
binop( Iop_CmpEQ32, bind(0), IRExpr_Const(IRConst_U32(0)) )
|
||||
);
|
||||
if (matchIRExpr(&mi,p_eq32_zero,e)) {
|
||||
HReg src = iselIntExpr_R(env, mi.bindee[0]);
|
||||
addInstr(env, X86Instr_Alu32R(Xalu_CMP,X86RMI_Imm(0),src));
|
||||
return Xcc_NZ;
|
||||
}
|
||||
|
||||
ppIRExpr(e);
|
||||
vpanic("iselCondCode");
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*---------------------------------------------------------*/
|
||||
/*--- ISEL: Statements ---*/
|
||||
@@ -450,10 +640,17 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt )
|
||||
vassert(tya == Ity_I32);
|
||||
if (tyd == Ity_I32) {
|
||||
X86AMode* am = iselIntExpr_AMode(env, stmt->Ist.STle.addr);
|
||||
X86RI* ri = iselIntExpr_RI(env, stmt->Ist.STle.data);
|
||||
X86RI* ri = iselIntExpr_RI(env, stmt->Ist.STle.data);
|
||||
addInstr(env, X86Instr_Alu32M(Xalu_MOV,ri,am));
|
||||
return;
|
||||
}
|
||||
if (tyd == Ity_I8) {
|
||||
X86AMode* am = iselIntExpr_AMode(env, stmt->Ist.STle.addr);
|
||||
HReg r = iselIntExpr_R(env, stmt->Ist.STle.data);
|
||||
addInstr(env, X86Instr_Store(1,r,am));
|
||||
return;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -471,6 +668,16 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt )
|
||||
));
|
||||
return;
|
||||
}
|
||||
if (ty == Ity_I8) {
|
||||
/* We're going to write to memory, so compute the RHS into an
|
||||
X86RI. */
|
||||
HReg r = iselIntExpr_R(env, stmt->Ist.Put.expr);
|
||||
addInstr(env, X86Instr_Store(
|
||||
1,r,
|
||||
X86AMode_IR(stmt->Ist.Put.offset,
|
||||
hregX86_EBP())));
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -489,19 +696,10 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt )
|
||||
case Ist_Exit: {
|
||||
if (stmt->Ist.Exit.dst->tag != Ico_U32)
|
||||
vpanic("isel_x86: Ist_Exit: dst is not a 32-bit value");
|
||||
/* For the moment, only handle conditions of the form
|
||||
32to1(...). */
|
||||
IRExpr* cond = stmt->Ist.Exit.cond;
|
||||
if (cond->tag == Iex_Unop && cond->Iex.Unop.op == Iop_32to1) {
|
||||
cond = cond->Iex.Unop.arg;
|
||||
} else {
|
||||
break; /* give up */
|
||||
}
|
||||
HReg reg = iselIntExpr_R( env, cond );
|
||||
/* Set the Z flag -- as the inverse of the lowest bit of cond */
|
||||
addInstr(env, X86Instr_Alu32R(Xalu_AND,X86RMI_Imm(1),reg));
|
||||
addInstr(env, X86Instr_GotoNZ(
|
||||
True, X86RI_Imm(stmt->Ist.Exit.dst->Ico.U32)));
|
||||
|
||||
X86RI* dst = iselIntExpr_RI(env, IRExpr_Const(stmt->Ist.Exit.dst));
|
||||
X86CondCode cc = iselCondCode(env,stmt->Ist.Exit.cond);
|
||||
addInstr(env, X86Instr_Goto(cc, dst));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -524,7 +722,7 @@ static void iselNext ( ISelEnv* env, IRExpr* next, IRJumpKind jk )
|
||||
vex_printf("\n");
|
||||
|
||||
ri = iselIntExpr_RI(env, next);
|
||||
addInstr(env, X86Instr_GotoNZ(False,ri));
|
||||
addInstr(env, X86Instr_Goto(Xcc_ALWAYS,ri));
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -67,6 +67,33 @@ void getAllocableRegs_X86 ( Int* nregs, HReg** arr )
|
||||
}
|
||||
|
||||
|
||||
/* --------- Condition codes, Intel encoding. --------- */
|
||||
|
||||
void ppX86CondCode ( X86CondCode cond )
|
||||
{
|
||||
switch (cond) {
|
||||
case Xcc_O: vex_printf("o"); break;
|
||||
case Xcc_NO: vex_printf("no"); break;
|
||||
case Xcc_B: vex_printf("b"); break;
|
||||
case Xcc_NB: vex_printf("nb"); break;
|
||||
case Xcc_Z: vex_printf("z"); break;
|
||||
case Xcc_NZ: vex_printf("nz"); break;
|
||||
case Xcc_BE: vex_printf("be"); break;
|
||||
case Xcc_NBE: vex_printf("nbe"); break;
|
||||
case Xcc_S: vex_printf("s"); break;
|
||||
case Xcc_NS: vex_printf("ns"); break;
|
||||
case Xcc_P: vex_printf("p"); break;
|
||||
case Xcc_NP: vex_printf("np"); break;
|
||||
case Xcc_L: vex_printf("l"); break;
|
||||
case Xcc_NL: vex_printf("nl"); break;
|
||||
case Xcc_LE: vex_printf("le"); break;
|
||||
case Xcc_NLE: vex_printf("nle"); break;
|
||||
case Xcc_ALWAYS: vex_printf("ALWAYS"); break;
|
||||
default: vpanic("ppX86CondCode");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* --------- X86AMode: memory address expressions. --------- */
|
||||
|
||||
X86AMode* X86AMode_IR ( UInt imm32, HReg reg ) {
|
||||
@@ -76,7 +103,6 @@ X86AMode* X86AMode_IR ( UInt imm32, HReg reg ) {
|
||||
am->Xam.IR.reg = reg;
|
||||
return am;
|
||||
}
|
||||
|
||||
X86AMode* X86AMode_IRRS ( UInt imm32, HReg base, HReg index, Int shift ) {
|
||||
X86AMode* am = LibVEX_Alloc(sizeof(X86AMode));
|
||||
am->tag = Xam_IRRS;
|
||||
@@ -143,14 +169,12 @@ X86RMI* X86RMI_Imm ( UInt imm32 ) {
|
||||
op->Xrmi.Imm.imm32 = imm32;
|
||||
return op;
|
||||
}
|
||||
|
||||
X86RMI* X86RMI_Reg ( HReg reg ) {
|
||||
X86RMI* op = LibVEX_Alloc(sizeof(X86RMI));
|
||||
op->tag = Xrmi_Reg;
|
||||
op->Xrmi.Reg.reg = reg;
|
||||
return op;
|
||||
}
|
||||
|
||||
X86RMI* X86RMI_Mem ( X86AMode* am ) {
|
||||
X86RMI* op = LibVEX_Alloc(sizeof(X86RMI));
|
||||
op->tag = Xrmi_Mem;
|
||||
@@ -216,7 +240,6 @@ X86RI* X86RI_Imm ( UInt imm32 ) {
|
||||
op->Xri.Imm.imm32 = imm32;
|
||||
return op;
|
||||
}
|
||||
|
||||
X86RI* X86RI_Reg ( HReg reg ) {
|
||||
X86RI* op = LibVEX_Alloc(sizeof(X86RI));
|
||||
op->tag = Xri_Reg;
|
||||
@@ -273,7 +296,6 @@ X86RM* X86RM_Reg ( HReg reg ) {
|
||||
op->Xrm.Reg.reg = reg;
|
||||
return op;
|
||||
}
|
||||
|
||||
X86RM* X86RM_Mem ( X86AMode* am ) {
|
||||
X86RM* op = LibVEX_Alloc(sizeof(X86RM));
|
||||
op->tag = Xrm_Mem;
|
||||
@@ -370,7 +392,6 @@ X86Instr* X86Instr_Alu32R ( X86AluOp op, X86RMI* src, HReg dst ) {
|
||||
i->Xin.Alu32R.dst = dst;
|
||||
return i;
|
||||
}
|
||||
|
||||
X86Instr* X86Instr_Alu32M ( X86AluOp op, X86RI* src, X86AMode* dst ) {
|
||||
X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
|
||||
i->tag = Xin_Alu32M;
|
||||
@@ -379,7 +400,12 @@ X86Instr* X86Instr_Alu32M ( X86AluOp op, X86RI* src, X86AMode* dst ) {
|
||||
i->Xin.Alu32M.dst = dst;
|
||||
return i;
|
||||
}
|
||||
|
||||
X86Instr* X86Instr_Not32 ( X86RM* dst ) {
|
||||
X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
|
||||
i->tag = Xin_Not32;
|
||||
i->Xin.Not32.dst = dst;
|
||||
return i;
|
||||
}
|
||||
X86Instr* X86Instr_Sh32 ( X86ShiftOp op, UInt src, X86RM* dst ) {
|
||||
X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
|
||||
i->tag = Xin_Sh32;
|
||||
@@ -388,29 +414,25 @@ X86Instr* X86Instr_Sh32 ( X86ShiftOp op, UInt src, X86RM* dst ) {
|
||||
i->Xin.Sh32.dst = dst;
|
||||
return i;
|
||||
}
|
||||
|
||||
X86Instr* X86Instr_Push( X86RMI* src ) {
|
||||
X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
|
||||
i->tag = Xin_Push;
|
||||
i->Xin.Push.src = src;
|
||||
return i;
|
||||
}
|
||||
|
||||
X86Instr* X86Instr_Call ( HReg target ) {
|
||||
X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
|
||||
i->tag = Xin_Call;
|
||||
i->Xin.Call.target = target;
|
||||
return i;
|
||||
}
|
||||
|
||||
X86Instr* X86Instr_GotoNZ ( Bool onlyWhenNZ, X86RI* dst ) {
|
||||
X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
|
||||
i->tag = Xin_GotoNZ;
|
||||
i->Xin.GotoNZ.onlyWhenNZ = onlyWhenNZ;
|
||||
i->Xin.GotoNZ.dst = dst;
|
||||
X86Instr* X86Instr_Goto ( X86CondCode cond, X86RI* dst ) {
|
||||
X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
|
||||
i->tag = Xin_Goto;
|
||||
i->Xin.Goto.cond = cond;
|
||||
i->Xin.Goto.dst = dst;
|
||||
return i;
|
||||
}
|
||||
|
||||
X86Instr* X86Instr_CMovZ ( X86RM* src, HReg dst ) {
|
||||
X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
|
||||
i->tag = Xin_CMovZ;
|
||||
@@ -418,7 +440,6 @@ X86Instr* X86Instr_CMovZ ( X86RM* src, HReg dst ) {
|
||||
i->Xin.CMovZ.dst = dst;
|
||||
return i;
|
||||
}
|
||||
|
||||
X86Instr* X86Instr_LoadEX ( UChar szSmall, Bool syned,
|
||||
X86AMode* src, HReg dst ) {
|
||||
X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
|
||||
@@ -430,6 +451,15 @@ X86Instr* X86Instr_LoadEX ( UChar szSmall, Bool syned,
|
||||
vassert(szSmall == 1 || szSmall == 2);
|
||||
return i;
|
||||
}
|
||||
X86Instr* X86Instr_Store ( UChar sz, HReg src, X86AMode* dst ) {
|
||||
X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
|
||||
i->tag = Xin_Store;
|
||||
i->Xin.Store.sz = sz;
|
||||
i->Xin.Store.src = src;
|
||||
i->Xin.Store.dst = dst;
|
||||
vassert(sz == 1 || sz == 2);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
||||
void ppX86Instr ( X86Instr* i ) {
|
||||
@@ -448,6 +478,10 @@ void ppX86Instr ( X86Instr* i ) {
|
||||
vex_printf(",");
|
||||
ppX86AMode(i->Xin.Alu32M.dst);
|
||||
return;
|
||||
case Xin_Not32:
|
||||
vex_printf("notl ");
|
||||
ppX86RM(i->Xin.Not32.dst);
|
||||
return;
|
||||
case Xin_Sh32:
|
||||
ppX86ShiftOp(i->Xin.Sh32.op);
|
||||
vex_printf("l ");
|
||||
@@ -465,15 +499,17 @@ void ppX86Instr ( X86Instr* i ) {
|
||||
vex_printf("call *");
|
||||
ppHRegX86(i->Xin.Call.target);
|
||||
break;
|
||||
case Xin_GotoNZ:
|
||||
if (i->Xin.GotoNZ.onlyWhenNZ) {
|
||||
vex_printf("if (%%eflags.Z) { movl ");
|
||||
ppX86RI(i->Xin.GotoNZ.dst);
|
||||
vex_printf(",%%eax ; ret }");
|
||||
} else {
|
||||
case Xin_Goto:
|
||||
if (i->Xin.Goto.cond == Xcc_ALWAYS) {
|
||||
vex_printf("movl ");
|
||||
ppX86RI(i->Xin.GotoNZ.dst);
|
||||
ppX86RI(i->Xin.Goto.dst);
|
||||
vex_printf(",%%eax ; ret");
|
||||
} else {
|
||||
vex_printf("if (%%eflags.");
|
||||
ppX86CondCode(i->Xin.Goto.cond);
|
||||
vex_printf(") { movl ");
|
||||
ppX86RI(i->Xin.Goto.dst);
|
||||
vex_printf(",%%eax ; ret }");
|
||||
}
|
||||
return;
|
||||
case Xin_CMovZ:
|
||||
@@ -490,6 +526,12 @@ void ppX86Instr ( X86Instr* i ) {
|
||||
vex_printf(",");
|
||||
ppHRegX86(i->Xin.LoadEX.dst);
|
||||
return;
|
||||
case Xin_Store:
|
||||
vex_printf("mov%c ", i->Xin.Store.sz==1 ? 'b' : 'w');
|
||||
ppHRegX86(i->Xin.Store.src);
|
||||
vex_printf(",");
|
||||
ppX86AMode(i->Xin.Store.dst);
|
||||
return;
|
||||
default:
|
||||
vpanic("ppX86Instr");
|
||||
}
|
||||
@@ -535,8 +577,8 @@ void getRegUsage_X86Instr (HRegUsage* u, X86Instr* i)
|
||||
addHRegUse(u, HRmWrite, hregX86_ECX());
|
||||
addHRegUse(u, HRmWrite, hregX86_EDX());
|
||||
return;
|
||||
case Xin_GotoNZ:
|
||||
addRegUsage_X86RI(u, i->Xin.GotoNZ.dst);
|
||||
case Xin_Goto:
|
||||
addRegUsage_X86RI(u, i->Xin.Goto.dst);
|
||||
addHRegUse(u, HRmWrite, hregX86_EAX());
|
||||
return;
|
||||
default:
|
||||
@@ -559,8 +601,8 @@ void mapRegs_X86Instr (HRegRemap* m, X86Instr* i)
|
||||
case Xin_Sh32:
|
||||
mapRegs_X86RM(m, i->Xin.Sh32.dst);
|
||||
return;
|
||||
case Xin_GotoNZ:
|
||||
mapRegs_X86RI(m, i->Xin.GotoNZ.dst);
|
||||
case Xin_Goto:
|
||||
mapRegs_X86RI(m, i->Xin.Goto.dst);
|
||||
return;
|
||||
default:
|
||||
ppX86Instr(i);
|
||||
|
||||
@@ -28,6 +28,41 @@ extern HReg hregX86_ESI ( void );
|
||||
extern HReg hregX86_EDI ( void );
|
||||
|
||||
|
||||
/* --------- Condition codes, Intel encoding. --------- */
|
||||
|
||||
typedef
|
||||
enum {
|
||||
Xcc_O = 0, /* overflow */
|
||||
Xcc_NO = 1, /* no overflow */
|
||||
|
||||
Xcc_B = 2, /* below */
|
||||
Xcc_NB = 3, /* not below */
|
||||
|
||||
Xcc_Z = 4, /* zero */
|
||||
Xcc_NZ = 5, /* not zero */
|
||||
|
||||
Xcc_BE = 6, /* below or equal */
|
||||
Xcc_NBE = 7, /* not below or equal */
|
||||
|
||||
Xcc_S = 8, /* negative */
|
||||
Xcc_NS = 9, /* not negative */
|
||||
|
||||
Xcc_P = 10, /* parity even */
|
||||
Xcc_NP = 11, /* not parity even */
|
||||
|
||||
Xcc_L = 12, /* jump less */
|
||||
Xcc_NL = 13, /* not less */
|
||||
|
||||
Xcc_LE = 14, /* less or equal */
|
||||
Xcc_NLE = 15, /* not less or equal */
|
||||
|
||||
Xcc_ALWAYS = 16 /* the usual hack */
|
||||
}
|
||||
X86CondCode;
|
||||
|
||||
extern void ppX86CondCode ( X86CondCode );
|
||||
|
||||
|
||||
/* --------- Memory address expressions (amodes). --------- */
|
||||
|
||||
typedef
|
||||
@@ -190,12 +225,14 @@ typedef
|
||||
enum {
|
||||
Xin_Alu32R, /* 32-bit mov/arith/logical, dst=REG */
|
||||
Xin_Alu32M, /* 32-bit mov/arith/logical, dst=MEM */
|
||||
Xin_Not32, /* 32-bit not */
|
||||
Xin_Sh32, /* 32-bit shift/rotate, dst=REG or MEM */
|
||||
Xin_Push, /* push (32-bit?) value on stack */
|
||||
Xin_Call, /* call to address in register */
|
||||
Xin_GotoNZ, /* conditional/unconditional jmp to dst */
|
||||
Xin_Goto, /* conditional/unconditional jmp to dst */
|
||||
Xin_CMovZ, /* conditional move when Z flag set */
|
||||
Xin_LoadEX /* mov{s,z}{b,w}l from mem to reg */
|
||||
Xin_LoadEX, /* mov{s,z}{b,w}l from mem to reg */
|
||||
Xin_Store /* store 16/8 bit value in memory */
|
||||
}
|
||||
X86InstrTag;
|
||||
|
||||
@@ -215,6 +252,9 @@ typedef
|
||||
X86RI* src;
|
||||
X86AMode* dst;
|
||||
} Alu32M;
|
||||
struct {
|
||||
X86RM* dst;
|
||||
} Not32;
|
||||
struct {
|
||||
X86ShiftOp op;
|
||||
UInt src; /* shift amount, or 0 means %cl */
|
||||
@@ -226,12 +266,12 @@ typedef
|
||||
struct {
|
||||
HReg target;
|
||||
} Call;
|
||||
/* Pseudo-insn. Goto dst, optionally only when Z flag is
|
||||
clear. */
|
||||
/* Pseudo-insn. Goto dst, on given condition (which could be
|
||||
Xcc_ALWAYS). */
|
||||
struct {
|
||||
Bool onlyWhenNZ;
|
||||
X86RI* dst;
|
||||
} GotoNZ;
|
||||
X86CondCode cond;
|
||||
X86RI* dst;
|
||||
} Goto;
|
||||
/* Mov src to dst (both 32-bit regs?) when the Z flag is
|
||||
set. */
|
||||
struct {
|
||||
@@ -245,20 +285,28 @@ typedef
|
||||
X86AMode* src;
|
||||
HReg dst;
|
||||
} LoadEX;
|
||||
/* 16/8 bit stores, which are troublesome (particularly
|
||||
8-bit) */
|
||||
struct {
|
||||
UChar sz; /* only 1 or 2 */
|
||||
HReg src;
|
||||
X86AMode* dst;
|
||||
} Store;
|
||||
} Xin;
|
||||
}
|
||||
X86Instr;
|
||||
|
||||
extern X86Instr* X86Instr_Alu32R ( X86AluOp, X86RMI*, HReg );
|
||||
extern X86Instr* X86Instr_Alu32M ( X86AluOp, X86RI*, X86AMode* );
|
||||
extern X86Instr* X86Instr_Not32 ( X86RM* dst );
|
||||
extern X86Instr* X86Instr_Sh32 ( X86ShiftOp, UInt, X86RM* );
|
||||
extern X86Instr* X86Instr_Push ( X86RMI* );
|
||||
extern X86Instr* X86Instr_Call ( HReg );
|
||||
extern X86Instr* X86Instr_GotoNZ ( Bool onlyWhenNZ, X86RI* dst );
|
||||
extern X86Instr* X86Instr_Goto ( X86CondCode cond, X86RI* dst );
|
||||
extern X86Instr* X86Instr_CMovZ ( X86RM* src, HReg dst );
|
||||
extern X86Instr* X86Instr_LoadEX ( UChar szSmall, Bool syned,
|
||||
X86AMode* src, HReg dst );
|
||||
|
||||
extern X86Instr* X86Instr_Store ( UChar sz, HReg src, X86AMode* dst );
|
||||
|
||||
extern void ppX86Instr ( X86Instr* );
|
||||
|
||||
|
||||
@@ -120,6 +120,9 @@ void ppIRExpr ( IRExpr* e )
|
||||
{
|
||||
Int i;
|
||||
switch (e->tag) {
|
||||
case Iex_Binder:
|
||||
vex_printf("BIND-%d", e->Iex.Binder.binder);
|
||||
break;
|
||||
case Iex_Get:
|
||||
vex_printf( "GET(%d,", e->Iex.Get.offset);
|
||||
ppIRType(e->Iex.Get.ty);
|
||||
@@ -294,6 +297,12 @@ IRConst* IRConst_U64 ( ULong u64 )
|
||||
|
||||
/* Constructors -- IRExpr */
|
||||
|
||||
IRExpr* IRExpr_Binder ( Int binder ) {
|
||||
IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
|
||||
e->tag = Iex_Binder;
|
||||
e->Iex.Binder.binder = binder;
|
||||
return e;
|
||||
}
|
||||
IRExpr* IRExpr_Get ( Int off, IRType ty ) {
|
||||
IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
|
||||
e->tag = Iex_Get;
|
||||
@@ -566,6 +575,8 @@ IRType typeOfIRExpr ( IRTypeEnv* tyenv, IRExpr* e )
|
||||
return e->Iex.CCall.retty;
|
||||
case Iex_Mux0X:
|
||||
return typeOfIRExpr(tyenv, e->Iex.Mux0X.expr0);
|
||||
case Iex_Binder:
|
||||
vpanic("typeOfIRExpr: Binder is not a valid expression");
|
||||
default:
|
||||
ppIRExpr(e);
|
||||
vpanic("typeOfIRExpr");
|
||||
|
||||
@@ -39,17 +39,19 @@ void LibVEX_Init (
|
||||
{
|
||||
vassert(!vex_initdone);
|
||||
vassert(failure_exit);
|
||||
vex_failure_exit = failure_exit;
|
||||
vassert(log_bytes);
|
||||
vex_log_bytes = log_bytes;
|
||||
vassert(debuglevel >= 0);
|
||||
vex_debuglevel = debuglevel;
|
||||
vassert(verbosity >= 0);
|
||||
vex_verbosity = verbosity;
|
||||
vex_valgrind_support = valgrind_support;
|
||||
vassert(guest_insns_per_bb >= 1 && guest_insns_per_bb <= 100);
|
||||
|
||||
vex_failure_exit = failure_exit;
|
||||
vex_log_bytes = log_bytes;
|
||||
vex_debuglevel = debuglevel;
|
||||
vex_verbosity = verbosity;
|
||||
vex_valgrind_support = valgrind_support;
|
||||
vex_guest_insns_per_bb = guest_insns_per_bb;
|
||||
vex_initdone = True;
|
||||
vex_initdone = True;
|
||||
LibVEX_SetAllocMode ( AllocModeTEMPORARY );
|
||||
}
|
||||
|
||||
|
||||
@@ -80,15 +82,16 @@ TranslateResult LibVEX_Translate (
|
||||
/* Stuff we need to know for reg-alloc. */
|
||||
HReg* available_real_regs;
|
||||
Int n_available_real_regs;
|
||||
Bool (*isMove) (HInstr*, HReg*, HReg*);
|
||||
void (*getRegUsage) (HRegUsage*, HInstr*);
|
||||
void (*mapRegs) (HRegRemap*, HInstr*);
|
||||
HInstr* (*genSpill) ( HReg, Int );
|
||||
HInstr* (*genReload) ( HReg, Int );
|
||||
void (*ppInstr) ( HInstr* );
|
||||
void (*ppReg) ( HReg );
|
||||
HInstrArray* (*iselBB) ( IRBB* );
|
||||
IRBB* (*bbToIR) ( UChar*, Addr64, Int*, Bool(*)(Addr64), Bool );
|
||||
Bool (*isMove) (HInstr*, HReg*, HReg*);
|
||||
void (*getRegUsage) (HRegUsage*, HInstr*);
|
||||
void (*mapRegs) (HRegRemap*, HInstr*);
|
||||
HInstr* (*genSpill) ( HReg, Int );
|
||||
HInstr* (*genReload) ( HReg, Int );
|
||||
void (*ppInstr) ( HInstr* );
|
||||
void (*ppReg) ( HReg );
|
||||
HInstrArray* (*iselBB) ( IRBB* );
|
||||
IRBB* (*bbToIR) ( UChar*, Addr64, Int*,
|
||||
Bool(*)(Addr64), Bool );
|
||||
|
||||
Bool host_is_bigendian = False;
|
||||
IRBB* irbb;
|
||||
@@ -97,7 +100,7 @@ TranslateResult LibVEX_Translate (
|
||||
Int i;
|
||||
|
||||
vassert(vex_initdone);
|
||||
LibVEX_Clear(False);
|
||||
LibVEX_ClearTemporary(False);
|
||||
|
||||
/* First off, check that the guest and host insn sets
|
||||
are supported. */
|
||||
@@ -135,7 +138,7 @@ TranslateResult LibVEX_Translate (
|
||||
|
||||
if (irbb == NULL) {
|
||||
/* Access failure. */
|
||||
LibVEX_Clear(False);
|
||||
LibVEX_ClearTemporary(False);
|
||||
return TransAccessFail;
|
||||
}
|
||||
sanityCheckIRBB(irbb, Ity_I32);
|
||||
@@ -146,7 +149,7 @@ TranslateResult LibVEX_Translate (
|
||||
|
||||
/* Turn it into virtual-registerised code. */
|
||||
vcode = iselBB ( irbb );
|
||||
LibVEX_Clear(True); return TransOK;
|
||||
LibVEX_ClearTemporary(True); return TransOK;
|
||||
|
||||
vex_printf("\n-------- Virtual registerised code --------\n");
|
||||
for (i = 0; i < vcode->arr_used; i++) {
|
||||
@@ -170,7 +173,7 @@ LibVEX_Clear(True); return TransOK;
|
||||
vex_printf("\n");
|
||||
|
||||
/* Assemble, etc. */
|
||||
LibVEX_Clear(True);
|
||||
LibVEX_ClearTemporary(True);
|
||||
|
||||
return TransOK;
|
||||
}
|
||||
|
||||
@@ -24,19 +24,41 @@
|
||||
MByte/sec. Once the size increases enough to fall out of the cache
|
||||
into memory, the rate falls by about a factor of 3.
|
||||
*/
|
||||
#define N_STORAGE_BYTES 50000
|
||||
#define N_TEMPORARY_BYTES 50000
|
||||
|
||||
static Char temporary[N_TEMPORARY_BYTES];
|
||||
static Int temporary_used = 0;
|
||||
|
||||
#define N_PERMANENT_BYTES 1000
|
||||
|
||||
static Char permanent[N_TEMPORARY_BYTES];
|
||||
static Int permanent_used = 0;
|
||||
|
||||
static Char storage[N_STORAGE_BYTES];
|
||||
static Int storage_used = 0;
|
||||
|
||||
/* Gather statistics. */
|
||||
static Int storage_bytes_allocd = 0;
|
||||
static Int storage_count_allocs = 0;
|
||||
static Int temporary_bytes_allocd = 0;
|
||||
static Int temporary_count_allocs = 0;
|
||||
|
||||
static ULong storage_bytes_allocd_TOT = 0;
|
||||
static ULong storage_count_allocs_TOT = 0;
|
||||
static ULong temporary_bytes_allocd_TOT = 0;
|
||||
static ULong temporary_count_allocs_TOT = 0;
|
||||
|
||||
/* The current allocation mode. */
|
||||
static AllocMode mode = AllocModeTEMPORARY;
|
||||
|
||||
|
||||
/* Exported to library client. */
|
||||
|
||||
void LibVEX_SetAllocMode ( AllocMode m )
|
||||
{
|
||||
mode = m;
|
||||
}
|
||||
|
||||
/* Exported to library client. */
|
||||
|
||||
AllocMode LibVEX_GetAllocMode ( void )
|
||||
{
|
||||
return mode;
|
||||
}
|
||||
|
||||
/* Exported to library client. */
|
||||
|
||||
@@ -51,32 +73,41 @@ void* LibVEX_Alloc ( Int nbytes )
|
||||
} else {
|
||||
if (nbytes == 0) nbytes = 8;
|
||||
nbytes = (nbytes + 7) & ~7;
|
||||
if (storage_used + nbytes > N_STORAGE_BYTES)
|
||||
vpanic("VEX storage exhausted.\n"
|
||||
"Increase N_STORAGE_BYTES and recompile.");
|
||||
storage_count_allocs++;
|
||||
storage_bytes_allocd += nbytes;
|
||||
storage_used += nbytes;
|
||||
return (void*)(&storage[storage_used - nbytes]);
|
||||
if (mode == AllocModeTEMPORARY) {
|
||||
if (temporary_used + nbytes > N_TEMPORARY_BYTES)
|
||||
vpanic("VEX temporary storage exhausted.\n"
|
||||
"Increase N_TEMPORARY_BYTES and recompile.");
|
||||
temporary_count_allocs++;
|
||||
temporary_bytes_allocd += nbytes;
|
||||
temporary_used += nbytes;
|
||||
return (void*)(&temporary[temporary_used - nbytes]);
|
||||
} else {
|
||||
if (permanent_used + nbytes > N_PERMANENT_BYTES)
|
||||
vpanic("VEX permanent storage exhausted.\n"
|
||||
"Increase N_PERMANENT_BYTES and recompile.");
|
||||
permanent_used += nbytes;
|
||||
return (void*)(&permanent[permanent_used - nbytes]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Exported to library client. */
|
||||
|
||||
void LibVEX_Clear ( Bool verb )
|
||||
void LibVEX_ClearTemporary ( Bool verb )
|
||||
{
|
||||
vassert(vex_initdone);
|
||||
storage_bytes_allocd_TOT += (ULong)storage_bytes_allocd;
|
||||
storage_count_allocs_TOT += (ULong)storage_count_allocs;
|
||||
temporary_bytes_allocd_TOT += (ULong)temporary_bytes_allocd;
|
||||
temporary_count_allocs_TOT += (ULong)temporary_count_allocs;
|
||||
if (verb) {
|
||||
vex_printf("vex storage: total %lld (%lld), curr %d (%d)\n",
|
||||
(Long)storage_bytes_allocd_TOT,
|
||||
(Long)storage_count_allocs_TOT,
|
||||
storage_bytes_allocd, storage_count_allocs );
|
||||
vex_printf("vex storage: P %d, T total %lld (%lld), T curr %d (%d)\n",
|
||||
permanent_used,
|
||||
(Long)temporary_bytes_allocd_TOT,
|
||||
(Long)temporary_count_allocs_TOT,
|
||||
temporary_bytes_allocd, temporary_count_allocs );
|
||||
}
|
||||
storage_used = 0;
|
||||
storage_bytes_allocd = 0;
|
||||
storage_count_allocs = 0;
|
||||
temporary_used = 0;
|
||||
temporary_bytes_allocd = 0;
|
||||
temporary_count_allocs = 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -40,7 +40,18 @@ extern void LibVEX_Init (
|
||||
|
||||
/* Storage management: clear the area, and allocate from it. */
|
||||
|
||||
extern void LibVEX_Clear ( Bool show_stats );
|
||||
/* By default allocation occurs in the temporary area. However, it is
|
||||
possible to switch to permanent area allocation if that's what you
|
||||
want. Permanent area allocation is very limited, tho. */
|
||||
|
||||
typedef
|
||||
enum { AllocModeTEMPORARY, AllocModePERMANENT }
|
||||
AllocMode;
|
||||
|
||||
extern void LibVEX_SetAllocMode ( AllocMode );
|
||||
extern AllocMode LibVEX_GetAllocMode ( void );
|
||||
|
||||
extern void LibVEX_ClearTemporary ( Bool show_stats );
|
||||
|
||||
extern void* LibVEX_Alloc ( Int nbytes );
|
||||
|
||||
|
||||
@@ -121,7 +121,9 @@ data Expr
|
||||
| CONST Const -- 8/16/32/64-bit int constant
|
||||
*/
|
||||
typedef
|
||||
enum { Iex_Get, Iex_Tmp, Iex_Binop, Iex_Unop, Iex_LDle,
|
||||
enum { Iex_Binder, /* Used only in pattern matching.
|
||||
Not an expression. */
|
||||
Iex_Get, Iex_Tmp, Iex_Binop, Iex_Unop, Iex_LDle,
|
||||
Iex_Const, Iex_CCall, Iex_Mux0X }
|
||||
IRExprTag;
|
||||
|
||||
@@ -129,6 +131,9 @@ typedef
|
||||
struct _IRExpr {
|
||||
IRExprTag tag;
|
||||
union {
|
||||
struct {
|
||||
Int binder;
|
||||
} Binder;
|
||||
struct {
|
||||
Int offset;
|
||||
IRType ty;
|
||||
@@ -166,14 +171,15 @@ typedef
|
||||
}
|
||||
IRExpr;
|
||||
|
||||
extern IRExpr* IRExpr_Get ( Int off, IRType ty );
|
||||
extern IRExpr* IRExpr_Tmp ( IRTemp tmp );
|
||||
extern IRExpr* IRExpr_Binop ( IROp op, IRExpr* arg1, IRExpr* arg2 );
|
||||
extern IRExpr* IRExpr_Unop ( IROp op, IRExpr* arg );
|
||||
extern IRExpr* IRExpr_LDle ( IRType ty, IRExpr* addr );
|
||||
extern IRExpr* IRExpr_Const ( IRConst* con );
|
||||
extern IRExpr* IRExpr_CCall ( Char* name, IRType retty, IRExpr** args );
|
||||
extern IRExpr* IRExpr_Mux0X ( IRExpr* cond, IRExpr* expr0, IRExpr* exprX );
|
||||
extern IRExpr* IRExpr_Binder ( Int binder );
|
||||
extern IRExpr* IRExpr_Get ( Int off, IRType ty );
|
||||
extern IRExpr* IRExpr_Tmp ( IRTemp tmp );
|
||||
extern IRExpr* IRExpr_Binop ( IROp op, IRExpr* arg1, IRExpr* arg2 );
|
||||
extern IRExpr* IRExpr_Unop ( IROp op, IRExpr* arg );
|
||||
extern IRExpr* IRExpr_LDle ( IRType ty, IRExpr* addr );
|
||||
extern IRExpr* IRExpr_Const ( IRConst* con );
|
||||
extern IRExpr* IRExpr_CCall ( Char* name, IRType retty, IRExpr** args );
|
||||
extern IRExpr* IRExpr_Mux0X ( IRExpr* cond, IRExpr* expr0, IRExpr* exprX );
|
||||
|
||||
extern void ppIRExpr ( IRExpr* );
|
||||
|
||||
@@ -224,7 +230,7 @@ typedef
|
||||
IRExpr* data;
|
||||
} STle;
|
||||
struct {
|
||||
IRExpr* cond;
|
||||
IRExpr* cond;
|
||||
IRConst* dst;
|
||||
} Exit;
|
||||
} Ist;
|
||||
|
||||
Reference in New Issue
Block a user