Merge in enough changes from the old Vex tree to make stage2 link, at

least.



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@3061
This commit is contained in:
Julian Seward 2004-11-22 19:01:47 +00:00
parent 454ab569fe
commit b3498dd85c
24 changed files with 710 additions and 16784 deletions

View File

@ -82,7 +82,8 @@ stage2_extra= \
demangle/libdemangle.a \
${VG_PLATFORM}/libplatform.a \
${VG_ARCH}/libarch.a \
${VG_OS}/libos.a
${VG_OS}/libos.a \
@VEX_DIR@/libvex.a
## Test repeated in both arms of the if-then-else because older versions of
## automake don't seem to like having += within an if-then-else.

View File

@ -239,6 +239,8 @@ extern Int VG_(main_pgrp);
extern Int VG_(fd_soft_limit);
extern Int VG_(fd_hard_limit);
/* Vex iropt control */
extern VexControl VG_(clo_vex_control);
/* Should we stop collecting errors if too many appear? default: YES */
extern Bool VG_(clo_error_limit);
/* Enquire about whether to attach to a debugger at errors? default: NO */
@ -1088,8 +1090,6 @@ extern void VG_(demangle) ( Char* orig, Char* result, Int result_size );
Exports of vg_from_ucode.c
------------------------------------------------------------------ */
extern UChar* VG_(emit_code) ( UCodeBlock* cb, Int* nbytes, UShort jumps[VG_MAX_JUMPS] );
extern void VG_(print_ccall_stats) ( void );
extern void VG_(print_UInstr_histogram) ( void );
@ -1102,26 +1102,13 @@ extern Addr VG_(get_jmp_dest) ( Addr jumpsite );
Bool VG_(cpu_has_feature)(UInt feat);
extern Int VG_(disBB) ( UCodeBlock* cb, Addr ip0 );
/* ---------------------------------------------------------------------
Exports of vg_translate.c
------------------------------------------------------------------ */
/* Expandable arrays of uinstrs. */
struct _UCodeBlock {
Addr orig_eip;
Int used;
Int size;
UInstr* instrs;
Int nextTemp;
};
extern Bool VG_(translate) ( ThreadId tid, Addr orig_addr, Bool debugging );
extern void VG_(sanity_check_UInstr) ( UInt n, UInstr* u );
extern void VG_(print_reg_alloc_stats) ( void );
/* ---------------------------------------------------------------------
Exports of vg_execontext.c.
@ -1647,6 +1634,9 @@ GEN_SYSCALL_WRAPPER(sys_mq_getsetattr); // * P?
#undef GEN_SYSCALL_WRAPPER
// Macros used in syscall wrappers
/* PRRAn == "pre-register-read-argument"
PRRSN == "pre-register-read-syscall"
*/
#define PRRSN \
TL_(pre_reg_read)(Vg_CoreSysCall, tid, "(syscallno)", \
@ -1704,12 +1694,6 @@ GEN_SYSCALL_WRAPPER(sys_mq_getsetattr); // * P?
#define POST_MEM_WRITE(zzaddr, zzlen) \
VG_TRACK( post_mem_write, zzaddr, zzlen)
/*--------------------------------------------------------------------*/
/*--- end vg_syscalls.c ---*/
/*--------------------------------------------------------------------*/
/* ---------------------------------------------------------------------
Exports of vg_transtab.c
------------------------------------------------------------------ */
@ -1719,8 +1703,7 @@ extern Addr VG_(tt_fast)[VG_TT_FAST_SIZE];
extern void VG_(init_tt_tc) ( void );
extern void VG_(add_to_trans_tab) ( Addr orig_addr, Int orig_size,
Addr trans_addr, Int trans_size,
UShort jumps[VG_MAX_JUMPS]);
Addr trans_addr, Int trans_size );
extern Addr VG_(search_transtab) ( Addr original_addr );
extern void VG_(invalidate_translations) ( Addr start, UInt range,
@ -1807,6 +1790,11 @@ extern void VGA_(init_high_baseBlock) ( Addr client_ip, Addr sp_at_startup );
extern void VGA_(load_state) ( arch_thread_t*, ThreadId tid );
extern void VGA_(save_state) ( arch_thread_t*, ThreadId tid );
// Register state access
extern void VGA_(set_thread_shadow_archreg) ( ThreadId tid, UInt archreg, UInt val );
extern UInt VGA_(get_thread_shadow_archreg) ( ThreadId tid, UInt archreg );
extern UInt VGA_(get_shadow_archreg) ( UInt archreg );
// Thread stuff
extern void VGA_(clear_thread) ( arch_thread_t* );
extern void VGA_(init_thread) ( arch_thread_t* );
@ -1830,7 +1818,7 @@ extern Int VGA_(ptrace_setregs_from_tst) ( Int pid, arch_thread_t* arch );
// Making coredumps
extern void VGA_(fill_elfregs_from_BB) ( struct vki_user_regs_struct* regs );
extern void VGA_(fill_elfregs_from_tst) ( struct vki_user_regs_struct* regs,
const arch_thread_t* arch );
arch_thread_t* arch );
extern void VGA_(fill_elffpregs_from_BB) ( vki_elf_fpregset_t* fpu );
extern void VGA_(fill_elffpregs_from_tst) ( vki_elf_fpregset_t* fpu,
const arch_thread_t* arch );

View File

@ -24,7 +24,7 @@ void, post_clo_init
## always results in the same output, because basic blocks can be
## retranslated. Unless you're doing something really strange...
## 'orig_addr' is the address of the first instruction in the block.
UCodeBlock*, instrument, UCodeBlock* cb, Addr orig_addr
IRBB*, instrument, IRBB* bb, VexGuestLayout* layout, IRType hWordTy
## Finish up, print out any results, etc. `exitcode' is program's exit
## code. The shadow (if the `shadow_regs' need is set) can be found with
@ -130,17 +130,6 @@ void, print_debug_usage
Bool, handle_client_request, ThreadId tid, UWord* arg_block, UWord* ret
## ------------------------------------------------------------------
## VG_(needs).extends_UCode
## 'X' prefix indicates eXtended UCode.
Int, get_Xreg_usage, UInstr* u, Tag tag, Int* regs, Bool* isWrites
void, emit_XUInstr, UInstr* u, RRegSet regs_live_before
Bool, sane_XUInstr, Bool beforeRA, Bool beforeLiveness, UInstr* u
Char *, name_XUOpcode, Opcode opc
void, pp_XUInstr, UInstr* u
## ------------------------------------------------------------------
## VG_(needs).syscall_wrapper

View File

@ -307,9 +307,9 @@ void get_needed_regs(ThreadId tid, Addr* ip, Addr* fp, Addr* sp,
{
if (VG_(is_running_thread)(tid)) {
/* thread currently in baseblock */
*ip = VG_(baseBlock)[VGOFF_INSTR_PTR];
*fp = VG_(baseBlock)[VGOFF_FRAME_PTR];
*sp = VG_(baseBlock)[VGOFF_STACK_PTR];
*ip = BASEBLOCK_INSTR_PTR;
*fp = BASEBLOCK_FRAME_PTR;
*sp = BASEBLOCK_STACK_PTR;
*stack_highest_word = VG_(threads)[tid].stack_highest_word;
} else {
/* thread in thread table */
@ -367,7 +367,7 @@ Addr VG_(get_EIP) ( ThreadId tid )
Addr ret;
if (VG_(is_running_thread)(tid))
ret = VG_(baseBlock)[VGOFF_INSTR_PTR];
ret = BASEBLOCK_INSTR_PTR;
else
ret = ARCH_INSTR_PTR(VG_(threads)[ tid ].arch);

File diff suppressed because it is too large Load Diff

View File

@ -29,228 +29,6 @@
The GNU General Public License is contained in the file COPYING.
*/
/* We only import tool.h here, because this file only provides functions
for doing things that could be done directly by the tool -- it's just to
make tools' lives easier, rather than let them do something they
couldn't otherwise do. */
#include "tool.h"
void VG_(lit_to_reg)(UCodeBlock* cb, UInt lit, UInt t)
{
uInstr2 (cb, MOV, 4, Literal, 0, TempReg, t);
uLiteral(cb, lit);
}
UInt VG_(lit_to_newreg)(UCodeBlock* cb, UInt lit)
{
UInt t = newTemp(cb);
uInstr2 (cb, MOV, 4, Literal, 0, TempReg, t);
uLiteral(cb, lit);
return t;
}
// f()
void VG_(ccall_0_0)(UCodeBlock* cb, Addr f)
{
uInstr0(cb, CCALL, 0);
uCCall(cb, f, 0, 0, /*retval*/False);
}
// f(reg)
void VG_(ccall_R_0)(UCodeBlock* cb, Addr f, UInt t1, UInt regparms_n)
{
tl_assert(regparms_n <= 1);
uInstr1(cb, CCALL, 0, TempReg, t1);
uCCall(cb, f, 1, regparms_n, /*retval*/False);
}
// f(lit)
void VG_(ccall_L_0)(UCodeBlock* cb, Addr f, UInt lit1, UInt regparms_n)
{
UInt t1 = VG_(lit_to_newreg)(cb, lit1);
VG_(ccall_R_0)(cb, f, t1, regparms_n);
}
// reg = f(reg)
void VG_(ccall_R_R)(UCodeBlock* cb, Addr f, UInt t1, UInt t_ret,
UInt regparms_n)
{
tl_assert(regparms_n <= 1);
tl_assert(t1 < VG_(get_num_temps)(cb)); // help catch lits accidentally passed in
uInstr3(cb, CCALL, 0, TempReg, t1, NoValue, 0, TempReg, t_ret);
uCCall(cb, f, 1, regparms_n, /*retval*/True);
}
// reg = f(lit)
void VG_(ccall_L_R)(UCodeBlock* cb, Addr f, UInt lit1, UInt t_ret,
UInt regparms_n)
{
UInt t1 = VG_(lit_to_newreg)(cb, lit1);
VG_(ccall_R_R)(cb, f, t1, t_ret, regparms_n);
}
// f(reg, reg)
void VG_(ccall_RR_0)(UCodeBlock* cb, Addr f, UInt t1, UInt t2, UInt regparms_n)
{
tl_assert(regparms_n <= 2);
tl_assert(t1 < VG_(get_num_temps)(cb));
tl_assert(t2 < VG_(get_num_temps)(cb));
uInstr2(cb, CCALL, 0, TempReg, t1, TempReg, t2);
uCCall(cb, f, 2, regparms_n, /*retval*/False);
}
// f(reg, lit)
void VG_(ccall_RL_0)(UCodeBlock* cb, Addr f, UInt t1, UInt lit2,
UInt regparms_n)
{
UInt t2 = VG_(lit_to_newreg)(cb, lit2);
VG_(ccall_RR_0)(cb, f, t1, t2, regparms_n);
}
// f(lit, reg)
void VG_(ccall_LR_0)(UCodeBlock* cb, Addr f, UInt lit1, UInt t2,
UInt regparms_n)
{
UInt t1 = VG_(lit_to_newreg)(cb, lit1);
VG_(ccall_RR_0)(cb, f, t1, t2, regparms_n);
}
// f(lit, lit)
void VG_(ccall_LL_0)(UCodeBlock* cb, Addr f, UInt lit1, UInt lit2,
UInt regparms_n)
{
UInt t1 = VG_(lit_to_newreg)(cb, lit1);
UInt t2 = VG_(lit_to_newreg)(cb, lit2);
VG_(ccall_RR_0)(cb, f, t1, t2, regparms_n);
}
// reg = f(reg, reg)
void VG_(ccall_RR_R)(UCodeBlock* cb, Addr f, UInt t1, UInt t2, UInt t_ret,
UInt regparms_n)
{
tl_assert(regparms_n <= 2);
tl_assert(t1 < VG_(get_num_temps)(cb));
tl_assert(t2 < VG_(get_num_temps)(cb));
uInstr3(cb, CCALL, 0, TempReg, t1, TempReg, t2, TempReg, t_ret);
uCCall(cb, f, 2, regparms_n, /*retval*/True);
}
// reg = f(reg, lit)
void VG_(ccall_RL_R)(UCodeBlock* cb, Addr f, UInt t1, UInt lit2, UInt t_ret,
UInt regparms_n)
{
UInt t2 = VG_(lit_to_newreg)(cb, lit2);
VG_(ccall_RR_R)(cb, f, t1, t2, t_ret, regparms_n);
}
// reg = f(lit, reg)
void VG_(ccall_LR_R)(UCodeBlock* cb, Addr f, UInt lit1, UInt t2, UInt t_ret,
UInt regparms_n)
{
UInt t1 = VG_(lit_to_newreg)(cb, lit1);
VG_(ccall_RR_R)(cb, f, t1, t2, t_ret, regparms_n);
}
// reg = f(lit, lit)
void VG_(ccall_LL_R)(UCodeBlock* cb, Addr f, UInt lit1, UInt lit2, UInt t_ret,
UInt regparms_n)
{
UInt t1 = VG_(lit_to_newreg)(cb, lit2);
UInt t2 = VG_(lit_to_newreg)(cb, lit2);
VG_(ccall_RR_R)(cb, f, t1, t2, t_ret, regparms_n);
}
// f(reg, reg, reg)
void VG_(ccall_RRR_0)(UCodeBlock* cb, Addr f, UInt t1, UInt t2,
UInt t3, UInt regparms_n)
{
tl_assert(regparms_n <= 3);
tl_assert(t1 < VG_(get_num_temps)(cb));
tl_assert(t2 < VG_(get_num_temps)(cb));
tl_assert(t3 < VG_(get_num_temps)(cb));
uInstr3(cb, CCALL, 0, TempReg, t1, TempReg, t2, TempReg, t3);
uCCall(cb, f, 3, regparms_n, /*retval*/False);
}
// f(reg, lit, lit)
void VG_(ccall_RLL_0)(UCodeBlock* cb, Addr f, UInt t1, UInt lit2,
UInt lit3, UInt regparms_n)
{
UInt t2 = VG_(lit_to_newreg)(cb, lit2);
UInt t3 = VG_(lit_to_newreg)(cb, lit3);
VG_(ccall_RRR_0)(cb, f, t1, t2, t3, regparms_n);
}
// f(lit, reg, reg)
void VG_(ccall_LRR_0)(UCodeBlock* cb, Addr f, UInt lit1, UInt t2,
UInt t3, UInt regparms_n)
{
UInt t1 = VG_(lit_to_newreg)(cb, lit1);
VG_(ccall_RRR_0)(cb, f, t1, t2, t3, regparms_n);
}
// f(lit, lit, reg)
void VG_(ccall_LLR_0)(UCodeBlock* cb, Addr f, UInt lit1, UInt lit2,
UInt t3, UInt regparms_n)
{
UInt t1 = VG_(lit_to_newreg)(cb, lit1);
UInt t2 = VG_(lit_to_newreg)(cb, lit2);
VG_(ccall_RRR_0)(cb, f, t1, t2, t3, regparms_n);
}
// f(lit, lit, lit)
void VG_(ccall_LLL_0)(UCodeBlock* cb, Addr f, UInt lit1, UInt lit2,
UInt lit3, UInt regparms_n)
{
UInt t1 = VG_(lit_to_newreg)(cb, lit1);
UInt t2 = VG_(lit_to_newreg)(cb, lit2);
UInt t3 = VG_(lit_to_newreg)(cb, lit3);
VG_(ccall_RRR_0)(cb, f, t1, t2, t3, regparms_n);
}
void VG_(reg_to_globvar)(UCodeBlock* cb, UInt t, UInt* globvar_ptr)
{
Int t_gv = VG_(lit_to_newreg)(cb, (UInt)globvar_ptr);
uInstr2(cb, STORE, 4, TempReg, t, TempReg, t_gv);
}
void VG_(lit_to_globvar)(UCodeBlock* cb, UInt lit, UInt* globvar_ptr)
{
Int t_lit = VG_(lit_to_newreg)(cb, lit);
VG_(reg_to_globvar)(cb, t_lit, globvar_ptr);
}
/*--------------------------------------------------------------------
Old versions of these functions, for backwards compatibility
--------------------------------------------------------------------*/
void VG_(call_helper_0_0)(UCodeBlock* cb, Addr f)
{
VG_(ccall_0_0)(cb, f);
}
void VG_(call_helper_1_0)(UCodeBlock* cb, Addr f, UInt arg1, UInt regparms_n)
{
VG_(ccall_L_0)(cb, f, arg1, regparms_n);
}
void VG_(call_helper_2_0)(UCodeBlock* cb, Addr f, UInt arg1, UInt arg2,
UInt regparms_n)
{
VG_(ccall_LL_0)(cb, f, arg1, arg2, regparms_n);
}
void VG_(set_global_var)(UCodeBlock* cb, Addr globvar_ptr, UInt val)
{
VG_(lit_to_globvar)(cb, val, (UInt*)globvar_ptr);
}
void VG_(set_global_var_tempreg)(UCodeBlock* cb, Addr globvar_ptr, UInt t_val)
{
VG_(reg_to_globvar)(cb, t_val, (UInt*)globvar_ptr);
}
/*--------------------------------------------------------------------*/
/*--- end vg_instrument.c ---*/
/*--------------------------------------------------------------------*/

View File

@ -150,7 +150,7 @@ Char** VG_(client_envp);
UInt VG_(dispatch_ctr);
/* 64-bit counter for the number of basic blocks done. */
ULong VG_(bbs_done);
ULong VG_(bbs_done) = 0;
/* Tell the logging mechanism whether we are logging to a file
descriptor or a socket descriptor. */
@ -188,19 +188,10 @@ static void print_all_stats ( void )
// Scheduler stats
VG_(print_scheduler_stats)();
// Reg-alloc stats
VG_(print_reg_alloc_stats)();
VG_(message)(Vg_DebugMsg,
" sanity: %d cheap, %d expensive checks.",
sanity_fast_count, sanity_slow_count );
// C call stats
VG_(print_ccall_stats)();
// UInstr histogram
if (VG_(clo_verbosity) > 3)
VG_(print_UInstr_histogram)();
// Memory stats
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_DebugMsg, "");
@ -337,7 +328,7 @@ void VG_(unimplemented) ( Char* msg )
Addr VG_(get_stack_pointer) ( void )
{
return VG_(baseBlock)[VGOFF_STACK_PTR];
return BASEBLOCK_STACK_PTR;
}
/* Debugging thing .. can be called from assembly with OYNK macro. */
@ -1477,6 +1468,7 @@ void as_closepadfile(int padfile)
/*====================================================================*/
/* Define, and set defaults. */
VexControl VG_(clo_vex_control);
Bool VG_(clo_error_limit) = True;
Bool VG_(clo_db_attach) = False;
Char* VG_(clo_db_command) = VG_CLO_DEFAULT_DBCOMMAND;
@ -1577,7 +1569,7 @@ void usage ( Bool debug_help )
" --profile=no|yes profile? (tool must be built for it) [no]\n"
" --chain-bb=no|yes do basic-block chaining? [yes]\n"
" --branchpred=yes|no generate branch prediction hints [no]\n"
" --trace-codegen=<XXXXX> show generated code? (X = 0|1) [00000]\n"
" --trace-codegen=<XXXXXXXX> show generated code? (X = 0|1) [00000000]\n"
" --trace-syscalls=no|yes show all system calls? [no]\n"
" --trace-signals=no|yes show signal handling details? [no]\n"
" --trace-symtab=no|yes show symbol table details? [no]\n"
@ -1585,6 +1577,13 @@ void usage ( Bool debug_help )
" --trace-pthread=none|some|all show pthread event details? [none]\n"
" --wait-for-gdb=yes|no pause on startup to wait for gdb attach\n"
"\n"
" --vex-iropt-verbosity 0 .. 9 [0]\n"
" --vex-iropt-level 0 .. 2 [2]\n"
" --vex-iropt-precise-memory-exns [no]\n"
" --vex-iropt-unroll-thresh 0 .. 400 [120]\n"
" --vex-guest-max-insns 1 .. 100 [50]\n"
" --vex-guest-chase-thresh 0 .. 99 [10]\n"
"\n"
" debugging options for Valgrind tools that report errors\n"
" --dump-error=<number> show translation for basic block associated\n"
" with <number>'th error context [0=show none]\n"
@ -1631,6 +1630,8 @@ static void pre_process_cmd_line_options
{
UInt i;
LibVEX_default_VexControl(& VG_(clo_vex_control));
/* parse the options we have (only the options we care about now) */
for (i = 1; i < vg_argc; i++) {
@ -1765,6 +1766,19 @@ static void process_cmd_line_options( UInt* client_auxv, const char* toolname )
else VG_BNUM_CLO("--num-callers", VG_(clo_backtrace_size), 1,
VG_DEEPEST_BACKTRACE)
else VG_BNUM_CLO("--vex-iropt-verbosity",
VG_(clo_vex_control).iropt_verbosity, 0, 10)
else VG_BNUM_CLO("--vex-iropt-level",
VG_(clo_vex_control).iropt_level, 0, 2)
else VG_BOOL_CLO("--vex-iropt-precise-memory-exns",
VG_(clo_vex_control).iropt_precise_memory_exns)
else VG_BNUM_CLO("--vex-iropt-unroll-thresh",
VG_(clo_vex_control).iropt_unroll_thresh, 0, 400)
else VG_BNUM_CLO("--vex-guest-max-insns",
VG_(clo_vex_control).guest_max_insns, 1, 100)
else VG_BNUM_CLO("--vex-guest-chase-thresh",
VG_(clo_vex_control).guest_chase_thresh, 0, 99)
// for backwards compatibility, replaced by --log-fd
else if (VG_CLO_STREQN(13, arg, "--logfile-fd=")) {
VG_(clo_log_to) = VgLogTo_Fd;
@ -1815,12 +1829,12 @@ static void process_cmd_line_options( UInt* client_auxv, const char* toolname )
if (5 != VG_(strlen)(opt)) {
VG_(message)(Vg_UserMsg,
"--trace-codegen argument must have 5 digits");
"--trace-codegen argument must have 8 digits");
VG_(bad_option)(arg);
}
for (j = 0; j < 5; j++) {
for (j = 0; j < 8; j++) {
if ('0' == opt[j]) { /* do nothing */ }
else if ('1' == opt[j]) VG_(clo_trace_codegen) |= (1 << j);
else if ('1' == opt[j]) VG_(clo_trace_codegen) |= (1 << (7-j));
else {
VG_(message)(Vg_UserMsg, "--trace-codegen argument can only "
"contain 0s and 1s");
@ -2897,6 +2911,9 @@ int main(int argc, char **argv)
VG_(threads)[last_run_tid].status == VgTs_WaitJoiner);
VG_(nuke_all_threads_except)(VG_INVALID_THREADID);
/* Print Vex storage stats */
LibVEX_ClearTemporary( True/*show stats*/ );
//--------------------------------------------------------------
// Exit, according to the scheduler's return code
//--------------------------------------------------------------

View File

@ -670,7 +670,7 @@ Segment *VG_(next_segment)(Segment *s)
REGPARM(1)
void VG_(unknown_SP_update)(Addr new_SP)
{
Addr old_SP = VG_(get_archreg)(R_STACK_PTR);
Addr old_SP = BASEBLOCK_STACK_PTR;
Word delta = (Word)new_SP - (Word)old_SP;
if (delta < -(VG_HUGE_DELTA) || VG_HUGE_DELTA < delta) {

View File

@ -153,7 +153,7 @@ ThreadId VG_(first_matching_thread_stack)
VG_(baseBlock). */
if (vg_tid_currently_in_baseBlock != VG_INVALID_THREADID) {
tid = vg_tid_currently_in_baseBlock;
if ( p ( VG_(baseBlock)[VGOFF_STACK_PTR],
if ( p ( BASEBLOCK_STACK_PTR,
VG_(threads)[tid].stack_highest_word, d ) )
return tid;
else
@ -321,6 +321,9 @@ static
UInt run_thread_for_a_while ( ThreadId tid )
{
volatile UInt trc = 0;
volatile Int dispatch_ctr_SAVED = VG_(dispatch_ctr);
volatile Int done_this_time;
vg_assert(VG_(is_valid_tid)(tid));
vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
vg_assert(!scheduler_jmpbuf_valid);
@ -348,6 +351,12 @@ UInt run_thread_for_a_while ( ThreadId tid )
vg_assert(!scheduler_jmpbuf_valid);
save_thread_state ( tid );
done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 0;
vg_assert(done_this_time >= 0);
VG_(bbs_done) += (ULong)done_this_time;
VGP_POPCC(VgpRun);
return trc;
}
@ -723,7 +732,6 @@ VgSchedReturnCode do_scheduler ( Int* exitcode, ThreadId* last_run_tid )
{
ThreadId tid, tid_next;
UInt trc;
UInt dispatch_ctr_SAVED;
Int done_this_time, n_in_bounded_wait;
Int n_exists, n_waiting_for_reaper;
Addr trans_addr;
@ -843,9 +851,6 @@ VgSchedReturnCode do_scheduler ( Int* exitcode, ThreadId* last_run_tid )
*/
VG_(dispatch_ctr) = VG_SCHEDULING_QUANTUM + 1;
/* ... and remember what we asked for. */
dispatch_ctr_SAVED = VG_(dispatch_ctr);
/* paranoia ... */
vg_assert(VG_(threads)[tid].tid == tid);
@ -885,7 +890,7 @@ VgSchedReturnCode do_scheduler ( Int* exitcode, ThreadId* last_run_tid )
if (trc == VG_TRC_INNER_FASTMISS) {
Addr ip = ARCH_INSTR_PTR(VG_(threads)[tid].arch);
vg_assert(VG_(dispatch_ctr) > 0);
vg_assert(VG_(dispatch_ctr) > 1);
/* Trivial event. Miss in the fast-cache. Do a full
lookup for it. */
@ -1052,10 +1057,6 @@ VgSchedReturnCode do_scheduler ( Int* exitcode, ThreadId* last_run_tid )
non-completely-trivial reason. First, update basic-block
counters. */
done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr);
vg_assert(done_this_time > 0);
VG_(bbs_done) += (ULong)done_this_time;
if (0 && trc != VG_TRC_INNER_FASTMISS)
VG_(message)(Vg_DebugMsg, "thread %d: completed %d bbs, trc %d",
tid, done_this_time, (Int)trc );
@ -1080,7 +1081,7 @@ VgSchedReturnCode do_scheduler ( Int* exitcode, ThreadId* last_run_tid )
/* Timeslice is out. Let a new thread be scheduled,
simply by doing nothing, causing us to arrive back at
Phase 1. */
vg_assert(VG_(dispatch_ctr) == 0);
vg_assert(VG_(dispatch_ctr) == 1);
break;
case VG_TRC_UNRESUMABLE_SIGNAL:
@ -2868,12 +2869,12 @@ void do__get_stack_info ( ThreadId tid, ThreadId which, StackInfo* si )
void VG_(set_return_from_syscall_shadow) ( ThreadId tid, UInt ret_shadow )
{
VG_(set_thread_shadow_archreg)(tid, R_SYSCALL_RET, ret_shadow);
VGA_(set_thread_shadow_archreg)(tid, R_SYSCALL_RET, ret_shadow);
}
UInt VG_(get_exit_status_shadow) ( void )
{
return VG_(get_shadow_archreg)(R_SYSCALL_ARG1);
return VGA_(get_shadow_archreg)(R_SYSCALL_ARG1);
}
void VG_(intercept_libc_freeres_wrapper)(Addr addr)

View File

@ -1134,7 +1134,7 @@ static void fill_prpsinfo(const ThreadState *tst, struct vki_elf_prpsinfo *prpsi
}
}
static void fill_prstatus(const ThreadState *tst, struct vki_elf_prstatus *prs, const vki_siginfo_t *si)
static void fill_prstatus(ThreadState *tst, struct vki_elf_prstatus *prs, const vki_siginfo_t *si)
{
struct vki_user_regs_struct *regs;
@ -1697,6 +1697,7 @@ void vg_sync_signalhandler ( Int sigNo, vki_siginfo_t *info, struct vki_ucontext
*/
if (VG_(clo_trace_signals)) {
VG_(message)(Vg_DebugMsg, "");
VG_(message)(Vg_DebugMsg, "signal %d arrived ... si_code=%d",
sigNo, info->si_code );
}
@ -1731,7 +1732,7 @@ void vg_sync_signalhandler ( Int sigNo, vki_siginfo_t *info, struct vki_ucontext
ThreadId tid = VG_(get_current_or_recent_tid)();
Addr fault = (Addr)info->_sifields._sigfault._addr;
Addr esp = VG_(is_running_thread)(tid)
? VG_(baseBlock)[VGOFF_STACK_PTR]
? BASEBLOCK_STACK_PTR
: ARCH_STACK_PTR(VG_(threads)[tid].arch);
Segment *seg;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -153,7 +153,7 @@ static void for_each_tc(Int sector, void (*fn)(TCEntry *));
/*------------------ T-CHAINING HELPERS ------------------*/
#if 0
static
void for_each_jumpsite(TCEntry *tce, void (*fn)(Addr))
{
@ -201,7 +201,7 @@ void unchain_sector(Int s, Addr base, UInt len)
for_each_tc(s, unchain_tce_for_sector);
}
#endif
/*------------------ TT HELPERS ------------------*/
@ -354,16 +354,16 @@ void discard_oldest_sector ( void )
Char msg[100];
Int s = find_oldest_sector();
if (s != -1) {
Int i;
//Int i;
vg_assert(s >= 0 && s < VG_TC_N_SECTORS);
VG_(sprintf)(msg, "before discard of sector %d (%d bytes)",
s, vg_tc_used[s]);
for(i = 0; i < VG_TC_N_SECTORS; i++) {
if (i != s && vg_tc[i] != NULL)
unchain_sector(i, (Addr)vg_tc[s], vg_tc_used[s]);
}
//for(i = 0; i < VG_TC_N_SECTORS; i++) {
// if (i != s && vg_tc[i] != NULL)
// unchain_sector(i, (Addr)vg_tc[s], vg_tc_used[s]);
// }
pp_tt_tc_status ( msg );
overall_out_count += vg_tc_stats_count[s];
@ -554,8 +554,7 @@ Int VG_(get_bbs_translated) ( void )
pointer, which is inserted here.
*/
void VG_(add_to_trans_tab) ( Addr orig_addr, Int orig_size,
Addr trans_addr, Int trans_size,
UShort jumps[VG_MAX_JUMPS])
Addr trans_addr, Int trans_size )
{
Int i, nBytes, trans_size_aligned;
TCEntry* tce;
@ -582,14 +581,11 @@ void VG_(add_to_trans_tab) ( Addr orig_addr, Int orig_size,
tce->orig_addr = orig_addr;
tce->orig_size = (UShort)orig_size; /* what's the point of storing this? */
tce->trans_size = (UShort)trans_size_aligned;
for (i = 0; i < VG_MAX_JUMPS; i++) {
tce->jump_sites[i] = jumps[i];
}
for (i = 0; i < trans_size; i++) {
tce->payload[i] = ((UChar*)trans_addr)[i];
}
unchain_tce(tce);
//unchain_tce(tce);
add_tt_entry(tce);
/* Update stats. */
@ -637,7 +633,7 @@ void VG_(invalidate_translations) ( Addr start, UInt range, Bool unchain_blocks
{
Addr i_start, i_end, o_start, o_end;
UInt out_count, out_osize, out_tsize;
Int i, j;
Int i; //, j;
TCEntry* tce;
# ifdef DEBUG_TRANSTAB
VG_(sanity_check_tt_tc)();
@ -662,13 +658,13 @@ void VG_(invalidate_translations) ( Addr start, UInt range, Bool unchain_blocks
vg_tt[i].orig_addr = VG_TTE_DELETED;
tce->orig_addr = VG_TTE_DELETED;
if (unchain_blocks) {
/* make sure no other blocks chain to the one we just discarded */
for(j = 0; j < VG_TC_N_SECTORS; j++) {
if (vg_tc[j] != NULL)
unchain_sector(j, (Addr)tce->payload, tce->trans_size);
}
}
// if (unchain_blocks) {
// /* make sure no other blocks chain to the one we just discarded */
// for(j = 0; j < VG_TC_N_SECTORS; j++) {
// if (vg_tc[j] != NULL)
// unchain_sector(j, (Addr)tce->payload, tce->trans_size);
// }
// }
overall_out_count ++;
overall_out_osize += tce->orig_size;

View File

@ -40,16 +40,17 @@
------------------------------------------------------------------ */
// Accessors for the arch_thread_t
#define PLATFORM_SYSCALL_NUM(regs) ((regs).m_eax)
#define PLATFORM_SYSCALL_RET(regs) ((regs).m_eax)
#define PLATFORM_SYSCALL_ARG1(regs) ((regs).m_ebx)
#define PLATFORM_SYSCALL_ARG2(regs) ((regs).m_ecx)
#define PLATFORM_SYSCALL_ARG3(regs) ((regs).m_edx)
#define PLATFORM_SYSCALL_ARG4(regs) ((regs).m_esi)
#define PLATFORM_SYSCALL_ARG5(regs) ((regs).m_edi)
#define PLATFORM_SYSCALL_ARG6(regs) ((regs).m_ebp)
#define PLATFORM_SYSCALL_NUM(regs) ((regs).vex.guest_EAX)
#define PLATFORM_SYSCALL_RET(regs) ((regs).vex.guest_EAX)
#define PLATFORM_SYSCALL_ARG1(regs) ((regs).vex.guest_EBX)
#define PLATFORM_SYSCALL_ARG2(regs) ((regs).vex.guest_ECX)
#define PLATFORM_SYSCALL_ARG3(regs) ((regs).vex.guest_EDX)
#define PLATFORM_SYSCALL_ARG4(regs) ((regs).vex.guest_ESI)
#define PLATFORM_SYSCALL_ARG5(regs) ((regs).vex.guest_EDI)
#define PLATFORM_SYSCALL_ARG6(regs) ((regs).vex.guest_EBP)
#define PLATFORM_SET_SYSCALL_RESULT(regs, val) ((regs).m_eax = (val))
#define PLATFORM_SET_SYSCALL_RESULT(regs, val) \
((regs).vex.guest_EAX = (val))
// Interesting register numbers
#define R_SYSCALL_NUM R_EAX

View File

@ -132,14 +132,14 @@ asm(
void VGA_(thread_syscall)(Int syscallno, arch_thread_t *arch,
enum PXState *state , enum PXState poststate)
{
do_thread_syscall(syscallno, // syscall no.
arch->m_ebx, // arg 1
arch->m_ecx, // arg 2
arch->m_edx, // arg 3
arch->m_esi, // arg 4
arch->m_edi, // arg 5
arch->m_ebp, // arg 6
&arch->m_eax, // result
do_thread_syscall(syscallno, // syscall no.
arch->vex.guest_EBX, // arg 1
arch->vex.guest_ECX, // arg 2
arch->vex.guest_EDX, // arg 3
arch->vex.guest_ESI, // arg 4
arch->vex.guest_EDI, // arg 5
arch->vex.guest_EBP, // arg 6
&arch->vex.guest_EAX, // result
state, // state to update
poststate); // state when syscall has finished
}
@ -149,7 +149,7 @@ void VGA_(thread_syscall)(Int syscallno, arch_thread_t *arch,
// Back up to restart a system call.
void VGA_(restart_syscall)(arch_thread_t *arch)
{
arch->m_eip -= 2; // sizeof(int $0x80)
arch->vex.guest_EIP -= 2; // sizeof(int $0x80)
/* Make sure our caller is actually sane, and we're really backing
back over a syscall.
@ -157,12 +157,12 @@ void VGA_(restart_syscall)(arch_thread_t *arch)
int $0x80 == CD 80
*/
{
UChar *p = (UChar *)arch->m_eip;
UChar *p = (UChar *)arch->vex.guest_EIP;
if (p[0] != 0xcd || p[1] != 0x80)
VG_(message)(Vg_DebugMsg,
"?! restarting over syscall at %p %02x %02x\n",
arch->m_eip, p[0], p[1]);
arch->vex.guest_EIP, p[0], p[1]);
vg_assert(p[0] == 0xcd && p[1] == 0x80);
}

View File

@ -34,32 +34,47 @@
#include "core_arch_asm.h" // arch-specific asm stuff
#include "tool_arch.h" // arch-specific tool stuff
#include "libvex_guest_x86.h"
/* ---------------------------------------------------------------------
Interesting registers
------------------------------------------------------------------ */
// Accessors for the arch_thread_t
#define ARCH_INSTR_PTR(regs) ((regs).m_eip)
#define ARCH_STACK_PTR(regs) ((regs).m_esp)
#define ARCH_FRAME_PTR(regs) ((regs).m_ebp)
/* Generate a pointer into baseBlock via which we can prod the
Vex guest state. */
#define BASEBLOCK_VEX \
((VexGuestX86State*)(&VG_(baseBlock)[VGOFF_(m_vex)]))
#define ARCH_CLREQ_ARGS(regs) ((regs).m_eax)
#define ARCH_PTHREQ_RET(regs) ((regs).m_edx)
#define ARCH_CLREQ_RET(regs) ((regs).m_edx)
/* Ditto the Vex shadow guest state. */
#define BASEBLOCK_VEX_SHADOW \
((VexGuestX86State*)(&VG_(baseBlock)[VGOFF_(m_vex_shadow)]))
// Accessors for the arch_thread_t
#define ARCH_INSTR_PTR(regs) ((regs).vex.guest_EIP)
#define ARCH_STACK_PTR(regs) ((regs).vex.guest_ESP)
#define ARCH_FRAME_PTR(regs) ((regs).vex.guest_EBP)
#define ARCH_CLREQ_ARGS(regs) ((regs).vex.guest_EAX)
#define ARCH_PTHREQ_RET(regs) ((regs).vex.guest_EDX)
#define ARCH_CLREQ_RET(regs) ((regs).vex.guest_EDX)
// Accessors for the baseBlock
#define R_STACK_PTR R_ESP
#define R_FRAME_PTR R_EBP
#define R_CLREQ_RET R_EDX
#define R_PTHREQ_RET R_EDX
// Stack frame layout and linkage
#define FIRST_STACK_FRAME(ebp) (ebp)
#define STACK_FRAME_RET(ebp) (((UWord*)ebp)[1])
#define STACK_FRAME_NEXT(ebp) (((UWord*)ebp)[0])
#define STACK_FRAME_RET(ebp) (((UInt*)ebp)[1])
#define STACK_FRAME_NEXT(ebp) (((UInt*)ebp)[0])
// Offsets of interesting registers
#define VGOFF_INSTR_PTR VGOFF_(m_eip)
#define VGOFF_STACK_PTR VGOFF_(m_esp)
#define VGOFF_FRAME_PTR VGOFF_(m_ebp)
// Baseblock access to interesting registers
#define BASEBLOCK_INSTR_PTR BASEBLOCK_VEX->guest_EIP
#define BASEBLOCK_STACK_PTR BASEBLOCK_VEX->guest_ESP
#define BASEBLOCK_FRAME_PTR BASEBLOCK_VEX->guest_EBP
// Get stack pointer and frame pointer
#define ARCH_GET_REAL_STACK_PTR(esp) do { \
@ -76,40 +91,12 @@
-------------------------------------------------- */
/* State of the simulated CPU. */
extern Int VGOFF_(m_eax);
extern Int VGOFF_(m_ecx);
extern Int VGOFF_(m_edx);
extern Int VGOFF_(m_ebx);
extern Int VGOFF_(m_esp);
extern Int VGOFF_(m_ebp);
extern Int VGOFF_(m_esi);
extern Int VGOFF_(m_edi);
extern Int VGOFF_(m_eflags);
extern Int VGOFF_(m_ssestate);
extern Int VGOFF_(m_eip);
extern Int VGOFF_(m_dflag); /* D flag is handled specially */
extern Int VGOFF_(m_cs);
extern Int VGOFF_(m_ss);
extern Int VGOFF_(m_ds);
extern Int VGOFF_(m_es);
extern Int VGOFF_(m_fs);
extern Int VGOFF_(m_gs);
extern Int VGOFF_(m_vex);
extern Int VGOFF_(m_vex_shadow);
/* Reg-alloc spill area (VG_MAX_SPILLSLOTS words long). */
extern Int VGOFF_(spillslots);
/* Records the valid bits for the 8 integer regs & flags reg. */
extern Int VGOFF_(sh_eax);
extern Int VGOFF_(sh_ecx);
extern Int VGOFF_(sh_edx);
extern Int VGOFF_(sh_ebx);
extern Int VGOFF_(sh_esp);
extern Int VGOFF_(sh_ebp);
extern Int VGOFF_(sh_esi);
extern Int VGOFF_(sh_edi);
extern Int VGOFF_(sh_eflags);
/* -----------------------------------------------------
Read-only parts of baseBlock.
@ -134,61 +121,6 @@ extern Int VGOFF_(helper_undefined_instruction);
#define VG_ELF_CLASS ELFCLASS32
/* ---------------------------------------------------------------------
Exports of vg_helpers.S
------------------------------------------------------------------ */
/* Mul, div, etc, -- we don't codegen these directly. */
extern void VG_(helper_idiv_64_32);
extern void VG_(helper_div_64_32);
extern void VG_(helper_idiv_32_16);
extern void VG_(helper_div_32_16);
extern void VG_(helper_idiv_16_8);
extern void VG_(helper_div_16_8);
extern void VG_(helper_imul_32_64);
extern void VG_(helper_mul_32_64);
extern void VG_(helper_imul_16_32);
extern void VG_(helper_mul_16_32);
extern void VG_(helper_imul_8_16);
extern void VG_(helper_mul_8_16);
extern void VG_(helper_CLD);
extern void VG_(helper_STD);
extern void VG_(helper_get_dirflag);
extern void VG_(helper_CLC);
extern void VG_(helper_STC);
extern void VG_(helper_CMC);
extern void VG_(helper_shldl);
extern void VG_(helper_shldw);
extern void VG_(helper_shrdl);
extern void VG_(helper_shrdw);
extern void VG_(helper_IN);
extern void VG_(helper_OUT);
extern void VG_(helper_RDTSC);
extern void VG_(helper_CPUID);
extern void VG_(helper_bsfw);
extern void VG_(helper_bsfl);
extern void VG_(helper_bsrw);
extern void VG_(helper_bsrl);
extern void VG_(helper_fstsw_AX);
extern void VG_(helper_SAHF);
extern void VG_(helper_LAHF);
extern void VG_(helper_DAS);
extern void VG_(helper_DAA);
extern void VG_(helper_AAS);
extern void VG_(helper_AAA);
extern void VG_(helper_AAD);
extern void VG_(helper_AAM);
extern void VG_(helper_cmpxchg8b);
/* ---------------------------------------------------------------------
LDT type
------------------------------------------------------------------ */
@ -227,21 +159,6 @@ typedef struct _LDT_ENTRY {
which need to go here to avoid ugly circularities.
------------------------------------------------------------------ */
/* How big is the saved SSE/SSE2 state? Note that this subsumes the
FPU state. On machines without SSE, we just save/restore the FPU
state into the first part of this area. */
/* A general comment about SSE save/restore: It appears that the 7th
word (which is the MXCSR) has to be &ed with 0x0000FFBF in order
that restoring from it later does not cause a GP fault (which is
delivered as a segfault). I guess this will have to be done
any time we do fxsave :-( 7th word means word offset 6 or byte
offset 24 from the start address of the save area.
*/
#define VG_SIZE_OF_SSESTATE 512
/* ... and in words ... */
#define VG_SIZE_OF_SSESTATE_W ((VG_SIZE_OF_SSESTATE+3)/4)
// Architecture-specific part of a ThreadState
// XXX: eventually this should be made abstract, ie. the fields not visible
// to the core... then VgLdtEntry can be made non-visible to the core
@ -255,50 +172,15 @@ typedef struct {
deallocate this at thread exit. */
VgLdtEntry* ldt;
/* TLS table. This consists of a small number (currently 3) of
entries from the Global Descriptor Table. */
VgLdtEntry tls[VKI_GDT_ENTRY_TLS_ENTRIES];
/* Saved machine context. Note the FPU state, %EIP and segment
registers are not shadowed.
/* Saved machine context. */
VexGuestX86State vex;
Although the segment registers are 16 bits long, storage
management here and in VG_(baseBlock) is
simplified if we pretend they are 32 bits. */
UInt m_cs;
UInt m_ss;
UInt m_ds;
UInt m_es;
UInt m_fs;
UInt m_gs;
UInt m_eax;
UInt m_ebx;
UInt m_ecx;
UInt m_edx;
UInt m_esi;
UInt m_edi;
UInt m_ebp;
UInt m_esp;
UInt m_eflags;
UInt m_eip;
/* The SSE/FPU state. This array does not (necessarily) have the
required 16-byte alignment required to get stuff in/out by
fxsave/fxrestore. So we have to do it "by hand".
*/
UInt m_sse[VG_SIZE_OF_SSESTATE_W];
UInt sh_eax;
UInt sh_ebx;
UInt sh_ecx;
UInt sh_edx;
UInt sh_esi;
UInt sh_edi;
UInt sh_ebp;
UInt sh_esp;
UInt sh_eflags;
/* Saved shadow context. */
VexGuestX86State vex_shadow;
}
arch_thread_t;
@ -317,7 +199,7 @@ struct arch_thread_aux {
------------------------------------------------------------------ */
// Total number of spill slots available for register allocation.
#define VG_MAX_SPILLSLOTS 24
#define VG_MAX_SPILLSLOTS 100
// Valgrind's signal stack size, in words.
#define VG_SIGSTACK_SIZE_W 10000

View File

@ -28,6 +28,8 @@
The GNU General Public License is contained in the file COPYING.
*/
#if 0
#include "core_asm.h"
/*
@ -77,6 +79,8 @@ VG_(cpuid):
/* Let the linker know we don't need an executable stack */
.section .note.GNU-stack,"",@progbits
#endif /* 0 */
##--------------------------------------------------------------------##
##--- end ---##
##--------------------------------------------------------------------##

View File

@ -1,7 +1,7 @@
##--------------------------------------------------------------------##
##--- The core dispatch loop, for jumping to a code address. ---##
##--- x86/dispatch.S ---##
##--- vg_dispatch.S ---##
##--------------------------------------------------------------------##
/*
@ -94,8 +94,25 @@ VG_(run_innerloop):
movl VGOFF_(m_eip), %esi
movl (%ebp, %esi, 4), %eax
dispatch_main:
/* Jump here to do a new dispatch.
/* fall into main loop */
dispatch_boring:
/* save the jump address at VG_(baseBlock)[VGOFF_(m_eip)] */
movl VGOFF_(m_eip), %esi
movl %eax, (%ebp, %esi, 4)
/* Are we out of timeslice? If yes, defer to scheduler. */
subl $1, VG_(dispatch_ctr)
jz counter_is_zero
/* try a fast lookup in the translation cache */
TT_LOOKUP(%ebx, fast_lookup_failed)
/* Found a match. Call the tce.payload field (+VG_CODE_OFFSET) */
addl $VG_CODE_OFFSET, %ebx
call *%ebx
/*
%eax holds destination (original) address.
%ebp indicates further details of the control transfer
requested to the address in %eax.
@ -110,27 +127,7 @@ dispatch_main:
If %ebp has any other value, we panic.
*/
/*cmpl $VG_(baseBlock), %ebp*/
/*jnz dispatch_exceptional*/
/* fall into main loop */
dispatch_boring:
/* save the jump address at VG_(baseBlock)[VGOFF_(m_eip)] */
movl VGOFF_(m_eip), %esi
movl %eax, (%ebp, %esi, 4)
/* Are we out of timeslice? If yes, defer to scheduler. */
cmpl $0, VG_(dispatch_ctr)
jz counter_is_zero
/* try a fast lookup in the translation cache */
TT_LOOKUP(%ebx, fast_lookup_failed)
/* Found a match. Call the tce.payload field (+VG_CODE_OFFSET) */
addl $VG_CODE_OFFSET, %ebx
incl VG_(unchained_jumps_done) /* update stats */
call *%ebx
cmpl $VG_(baseBlock), %ebp
jz dispatch_boring
@ -139,11 +136,13 @@ dispatch_boring:
fast_lookup_failed:
/* %EIP is up to date here since dispatch_boring dominates */
addl $1, VG_(dispatch_ctr)
movl $VG_TRC_INNER_FASTMISS, %eax
jmp run_innerloop_exit
counter_is_zero:
/* %EIP is up to date here since dispatch_boring dominates */
addl $1, VG_(dispatch_ctr)
movl $VG_TRC_INNER_COUNTERZERO, %eax
jmp run_innerloop_exit
@ -179,44 +178,6 @@ dispatch_exceptional:
movl %ebp, %eax
jmp run_innerloop_exit
/*
This is the translation chainer, our run-time linker, if you like.
VG_(patch_me) patches the call instruction in the jump site
with a jump to the generated code for the branch target. %eax
contains the original program's EIP - if we get a hit in
tt_fast, then the call is patched into a jump; otherwise it
simply drops back into the dispatch loop for normal
processing.
The callsite is expected to look like:
call VG_(patch_me)
it will be transformed into
jmp $TARGETADDR
The environment we're expecting on entry is:
%eax = branch target address (original code EIP)
*(%esp) = just after call
*/
.globl VG_(patch_me)
VG_(patch_me):
/* try a fast lookup in the translation cache */
TT_LOOKUP(%ebx, 1f)
/* Patch call instruction at callsite into a chained jmp */
popl %eax /* eax = just after (VG_PATCHME_CALLSZ byte) call */
addl $VG_CODE_OFFSET, %ebx /* ebx = target eip */
subl %eax, %ebx /* ebx = delta */
movb $0xE9, -(VG_PATCHME_CALLSZ-0)(%eax) /* 0xe9 = jmp */
movl %ebx, -(VG_PATCHME_CALLSZ-1)(%eax) /* store delta */
addl %eax, %ebx
incl VG_(bb_enchain_count) /* update stats */
jmp *%ebx /* jmp to dest */
/* tt_fast miss: return into main dispatch loop */
1: addl $4, %esp /* remove our call address */
ret /* return into main dispatch loop above */
.data
panic_msg_ebp:
@ -228,5 +189,5 @@ panic_msg_ebp:
.section .note.GNU-stack,"",@progbits
##--------------------------------------------------------------------##
##--- end ---##
##--- end vg_dispatch.S ---##
##--------------------------------------------------------------------##

View File

@ -82,639 +82,7 @@ VG_(tramp_syscall_offset):
.long syscall_start - VG_(trampoline_code_start)
.text
/* ------------------ REAL CPU HELPERS ------------------ */
/* The rest of this lot run on the real CPU. */
/* Various helper routines, for instructions which are just too
darn tedious for the JITter to output code in-line:
* integer division
* integer multiplication
* setting and getting obscure eflags
* double-length shifts
* eight byte compare and exchange
All routines use a standard calling convention designed for
calling from translations, in which the incoming args are
underneath the return address, the callee saves _all_ registers,
and the incoming parameters can be modified, to return results.
*/
/* Fetch the time-stamp-ctr reg.
On entry:
dummy, replaced by %EAX value
dummy, replaced by %EDX value
RA <- %esp
*/
.global VG_(helper_RDTSC)
VG_(helper_RDTSC):
pushl %eax
pushl %edx
rdtsc
movl %edx, 12(%esp)
movl %eax, 16(%esp)
popl %edx
popl %eax
ret
/*
Fetch a byte/word/dword from given port
On entry:
size 1, 2 or 4
port, replaced by result
RA
*/
.global VG_(helper_IN)
VG_(helper_IN):
pushl %eax
pushl %edx
movl 16(%esp), %eax
movl 12(%esp), %edx
pushfl
cmpl $4, %eax
je in_dword
cmpl $2, %eax
je in_word
in_byte:
inb (%dx), %al
jmp in_done
in_word:
in (%dx), %ax
jmp in_done
in_dword:
inl (%dx),%eax
in_done:
popfl
movl %eax,12(%esp)
popl %edx
popl %eax
ret
/*
Write a byte/word/dword to given port
On entry:
size 1, 2 or 4
port
value
RA
*/
.global VG_(helper_OUT)
VG_(helper_OUT):
pushl %eax
pushl %edx
movl 16(%esp), %edx
movl 12(%esp), %eax
pushfl
cmpl $4, 20(%esp)
je out_dword
cmpl $2, 20(%esp)
je out_word
out_byte:
outb %al,(%dx)
jmp out_done
out_word:
out %ax,(%dx)
jmp out_done
out_dword:
outl %eax,(%dx)
out_done:
popfl
popl %edx
popl %eax
ret
/* Do the CPUID instruction.
On entry:
dummy, replaced by %EAX value
dummy, replaced by %EBX value
dummy, replaced by %ECX value
dummy, replaced by %EDX value
RA <- %esp
We save registers and package up the args so we can call a C helper
for all this.
*/
.global VG_(helper_CPUID)
VG_(helper_CPUID):
pushl %ebp
movl %esp,%ebp
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
pushf
lea 2*4(%ebp),%eax /* &edx */
pushl %eax
addl $4,%eax /* &ecx */
pushl %eax
addl $4,%eax /* &ebx */
pushl %eax
addl $4,%eax /* &eax */
pushl %eax
pushl (%eax) /* eax */
call VG_(helperc_CPUID)
addl $20,%esp
popf
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
popl %eax
popl %ebp
ret
/* Fetch the FPU status register.
On entry:
dummy, replaced by result
RA <- %esp
*/
.global VG_(helper_fstsw_AX)
VG_(helper_fstsw_AX):
pushl %eax
pushl %esi
movl VGOFF_(m_ssestate), %esi
pushfl
cmpb $0, VG_(have_ssestate)
jz aa1nosse
fxrstor (%ebp, %esi, 4)
jmp aa1merge
aa1nosse:
frstor (%ebp, %esi, 4)
aa1merge:
popfl
fstsw %ax
popl %esi
movw %ax, 8(%esp)
popl %eax
ret
/* Copy %ah into %eflags.
On entry:
value of %eax
RA <- %esp
*/
.global VG_(helper_SAHF)
VG_(helper_SAHF):
pushl %eax
movl 8(%esp), %eax
sahf
popl %eax
ret
/* Copy %eflags into %ah.
On entry:
value of %eax
RA <- %esp
*/
.global VG_(helper_LAHF)
VG_(helper_LAHF):
pushl %eax
movl 8(%esp), %eax
lahf
movl %eax, 8(%esp)
popl %eax
ret
/* Do %al = DAS(%al). Note that the passed param has %AL as the least
significant 8 bits, since it was generated with GETB %AL,
some-temp. Fortunately %al is the least significant 8 bits of
%eax anyway, which is why it's safe to work with %eax as a
whole.
On entry:
value of %eax
RA <- %esp
*/
.global VG_(helper_DAS)
VG_(helper_DAS):
pushl %eax
movl 8(%esp), %eax
das
movl %eax, 8(%esp)
popl %eax
ret
/* Similarly, do %al = DAA(%al). */
.global VG_(helper_DAA)
VG_(helper_DAA):
pushl %eax
movl 8(%esp), %eax
daa
movl %eax, 8(%esp)
popl %eax
ret
/* Similarly, do %ax = AAS(%ax). */
.global VG_(helper_AAS)
VG_(helper_AAS):
pushl %eax
movl 8(%esp), %eax
aas
movl %eax, 8(%esp)
popl %eax
ret
/* Similarly, do %ax = AAA(%ax). */
.global VG_(helper_AAA)
VG_(helper_AAA):
pushl %eax
movl 8(%esp), %eax
aaa
movl %eax, 8(%esp)
popl %eax
ret
/* Similarly, do %ax = AAD(%ax). */
.global VG_(helper_AAD)
VG_(helper_AAD):
pushl %eax
movl 8(%esp), %eax
aad
movl %eax, 8(%esp)
popl %eax
ret
/* Similarly, do %ax = AAM(%ax). */
.global VG_(helper_AAM)
VG_(helper_AAM):
pushl %eax
movl 8(%esp), %eax
aam
movl %eax, 8(%esp)
popl %eax
ret
/* Bit scan forwards/reverse. Sets flags (??).
On entry:
value, replaced by result
RA <- %esp
*/
.global VG_(helper_bsrw)
VG_(helper_bsrw):
pushl %eax
movw 12(%esp), %ax
bsrw 8(%esp), %ax
movw %ax, 12(%esp)
popl %eax
ret
.global VG_(helper_bsrl)
VG_(helper_bsrl):
pushl %eax
movl 12(%esp), %eax
bsrl 8(%esp), %eax
movl %eax, 12(%esp)
popl %eax
ret
.global VG_(helper_bsfw)
VG_(helper_bsfw):
pushl %eax
movw 12(%esp), %ax
bsfw 8(%esp), %ax
movw %ax, 12(%esp)
popl %eax
ret
.global VG_(helper_bsfl)
VG_(helper_bsfl):
pushl %eax
movl 12(%esp), %eax
bsfl 8(%esp), %eax
movl %eax, 12(%esp)
popl %eax
ret
/* 32-bit double-length shift left/right.
On entry:
amount
src
dst
RA <- %esp
*/
.global VG_(helper_shldl)
VG_(helper_shldl):
pushl %eax
pushl %ebx
pushl %ecx
movb 24(%esp), %cl
movl 20(%esp), %ebx
movl 16(%esp), %eax
shldl %cl, %ebx, %eax
movl %eax, 16(%esp)
popl %ecx
popl %ebx
popl %eax
ret
.global VG_(helper_shldw)
VG_(helper_shldw):
pushl %eax
pushl %ebx
pushl %ecx
movb 24(%esp), %cl
movw 20(%esp), %bx
movw 16(%esp), %ax
shldw %cl, %bx, %ax
movw %ax, 16(%esp)
popl %ecx
popl %ebx
popl %eax
ret
.global VG_(helper_shrdl)
VG_(helper_shrdl):
pushl %eax
pushl %ebx
pushl %ecx
movb 24(%esp), %cl
movl 20(%esp), %ebx
movl 16(%esp), %eax
shrdl %cl, %ebx, %eax
movl %eax, 16(%esp)
popl %ecx
popl %ebx
popl %eax
ret
.global VG_(helper_shrdw)
VG_(helper_shrdw):
pushl %eax
pushl %ebx
pushl %ecx
movb 24(%esp), %cl
movw 20(%esp), %bx
movw 16(%esp), %ax
shrdw %cl, %bx, %ax
movw %ax, 16(%esp)
popl %ecx
popl %ebx
popl %eax
ret
/* Get the direction flag, and return either 1 or -1. */
.global VG_(helper_get_dirflag)
VG_(helper_get_dirflag):
pushl %eax
movl VGOFF_(m_dflag), %eax
movl (%ebp, %eax, 4), %eax
movl %eax, 8(%esp)
popl %eax
ret
/* Clear/set the direction flag. */
.global VG_(helper_CLD)
VG_(helper_CLD):
pushl %eax
movl VGOFF_(m_dflag), %eax
movl $1, (%ebp, %eax, 4)
popl %eax
ret
.global VG_(helper_STD)
VG_(helper_STD):
pushl %eax
movl VGOFF_(m_dflag), %eax
movl $-1, (%ebp, %eax, 4)
popl %eax
ret
/* Clear/set/complement the carry flag. */
.global VG_(helper_CLC)
VG_(helper_CLC):
clc
ret
.global VG_(helper_STC)
VG_(helper_STC):
stc
ret
.global VG_(helper_CMC)
VG_(helper_CMC):
cmc
ret
/* Signed 32-to-64 multiply. */
.globl VG_(helper_imul_32_64)
VG_(helper_imul_32_64):
pushl %eax
pushl %edx
movl 16(%esp), %eax
imull 12(%esp)
movl %eax, 16(%esp)
movl %edx, 12(%esp)
popl %edx
popl %eax
ret
/* Signed 16-to-32 multiply. */
.globl VG_(helper_imul_16_32)
VG_(helper_imul_16_32):
pushl %eax
pushl %edx
movw 16(%esp), %ax
imulw 12(%esp)
movw %ax, 16(%esp)
movw %dx, 12(%esp)
popl %edx
popl %eax
ret
/* Signed 8-to-16 multiply. */
.globl VG_(helper_imul_8_16)
VG_(helper_imul_8_16):
pushl %eax
pushl %edx
movb 16(%esp), %al
imulb 12(%esp)
movw %ax, 16(%esp)
popl %edx
popl %eax
ret
/* Unsigned 32-to-64 multiply. */
.globl VG_(helper_mul_32_64)
VG_(helper_mul_32_64):
pushl %eax
pushl %edx
movl 16(%esp), %eax
mull 12(%esp)
movl %eax, 16(%esp)
movl %edx, 12(%esp)
popl %edx
popl %eax
ret
/* Unsigned 16-to-32 multiply. */
.globl VG_(helper_mul_16_32)
VG_(helper_mul_16_32):
pushl %eax
pushl %edx
movw 16(%esp), %ax
mulw 12(%esp)
movw %ax, 16(%esp)
movw %dx, 12(%esp)
popl %edx
popl %eax
ret
/* Unsigned 8-to-16 multiply. */
.globl VG_(helper_mul_8_16)
VG_(helper_mul_8_16):
pushl %eax
pushl %edx
movb 16(%esp), %al
mulb 12(%esp)
movw %ax, 16(%esp)
popl %edx
popl %eax
ret
/* Unsigned 64-into-32 divide. */
.globl VG_(helper_div_64_32)
VG_(helper_div_64_32):
pushl %eax
pushl %edx
movl 16(%esp),%eax
movl 12(%esp),%edx
divl 20(%esp)
movl %eax,16(%esp)
movl %edx,12(%esp)
popl %edx
popl %eax
ret
/* Signed 64-into-32 divide. */
.globl VG_(helper_idiv_64_32)
VG_(helper_idiv_64_32):
pushl %eax
pushl %edx
movl 16(%esp),%eax
movl 12(%esp),%edx
idivl 20(%esp)
movl %eax,16(%esp)
movl %edx,12(%esp)
popl %edx
popl %eax
ret
/* Unsigned 32-into-16 divide. */
.globl VG_(helper_div_32_16)
VG_(helper_div_32_16):
pushl %eax
pushl %edx
movw 16(%esp),%ax
movw 12(%esp),%dx
divw 20(%esp)
movw %ax,16(%esp)
movw %dx,12(%esp)
popl %edx
popl %eax
ret
/* Signed 32-into-16 divide. */
.globl VG_(helper_idiv_32_16)
VG_(helper_idiv_32_16):
pushl %eax
pushl %edx
movw 16(%esp),%ax
movw 12(%esp),%dx
idivw 20(%esp)
movw %ax,16(%esp)
movw %dx,12(%esp)
popl %edx
popl %eax
ret
/* Unsigned 16-into-8 divide. */
.globl VG_(helper_div_16_8)
VG_(helper_div_16_8):
pushl %eax
movw 12(%esp),%ax
divb 16(%esp)
movb %ah,12(%esp)
movb %al,8(%esp)
popl %eax
ret
/* Signed 16-into-8 divide. */
.globl VG_(helper_idiv_16_8)
VG_(helper_idiv_16_8):
pushl %eax
movw 12(%esp),%ax
idivb 16(%esp)
movb %ah,12(%esp)
movb %al,8(%esp)
popl %eax
ret
/* Eight byte compare and exchange. */
.globl VG_(helper_cmpxchg8b)
VG_(helper_cmpxchg8b):
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 20(%esp), %eax
movl 24(%esp), %edx
movl 28(%esp), %ebx
movl 32(%esp), %ecx
cmpxchg8b 36(%esp)
movl %eax, 20(%esp)
movl %edx, 24(%esp)
movl %ebx, 28(%esp)
movl %ecx, 32(%esp)
popl %edx
popl %ecx
popl %ebx
popl %eax
ret
/* Undefined instruction (generates SIGILL) */

View File

@ -30,6 +30,8 @@
#include "core.h"
#include "libvex_guest_x86.h"
/*------------------------------------------------------------*/
/*--- Signal frame ---*/
/*------------------------------------------------------------*/
@ -79,29 +81,10 @@ typedef
/* Safely-saved version of sigNo, as described above. */
Int sigNo_private;
/* Saved processor state. */
UInt m_sse[VG_SIZE_OF_SSESTATE_W];
UInt m_eax;
UInt m_ecx;
UInt m_edx;
UInt m_ebx;
UInt m_ebp;
UInt m_esp;
UInt m_esi;
UInt m_edi;
UInt m_eflags;
Addr m_eip;
UInt sh_eax;
UInt sh_ebx;
UInt sh_ecx;
UInt sh_edx;
UInt sh_esi;
UInt sh_edi;
UInt sh_ebp;
UInt sh_esp;
UInt sh_eflags;
VexGuestX86State vex;
VexGuestX86State vex_shadow;
/* saved signal mask to be restored when handler returns */
vki_sigset_t mask;
@ -136,29 +119,29 @@ static void synth_ucontext(ThreadId tid, const vki_siginfo_t *si,
uc->uc_sigmask = *set;
uc->uc_stack = tst->altstack;
#define SC(reg) sc->reg = tst->arch.m_##reg
SC(gs);
SC(fs);
SC(es);
SC(ds);
#define SC2(reg,REG) sc->reg = tst->arch.vex.guest_##REG
SC2(gs,GS);
SC2(fs,FS);
SC2(es,ES);
SC2(ds,DS);
SC(edi);
SC(esi);
SC(ebp);
SC(esp);
SC(ebx);
SC(edx);
SC(ecx);
SC(eax);
SC2(edi,EDI);
SC2(esi,ESI);
SC2(ebp,EBP);
SC2(esp,ESP);
SC2(ebx,EBX);
SC2(edx,EDX);
SC2(ecx,ECX);
SC2(eax,EAX);
SC(eip);
SC(cs);
SC(eflags);
SC(ss);
SC2(eip,EIP);
SC2(cs,CS);
sc->eflags = LibVEX_GuestX86_get_eflags(&tst->arch.vex);
SC2(ss,SS);
/* XXX esp_at_signal */
/* XXX trapno */
/* XXX err */
#undef SC
#undef SC2
sc->cr2 = (UInt)si->_sifields._sigfault._addr;
}
@ -174,7 +157,6 @@ void VGA_(push_signal_frame)(ThreadId tid, Addr esp_top_of_frame,
{
Addr esp;
ThreadState* tst;
Int i;
VgSigFrame* frame;
Int sigNo = siginfo->si_signo;
@ -237,31 +219,9 @@ void VGA_(push_signal_frame)(ThreadId tid, Addr esp_top_of_frame,
frame->magicPI = 0x31415927;
for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
frame->m_sse[i] = tst->arch.m_sse[i];
frame->m_eax = tst->arch.m_eax;
frame->m_ecx = tst->arch.m_ecx;
frame->m_edx = tst->arch.m_edx;
frame->m_ebx = tst->arch.m_ebx;
frame->m_ebp = tst->arch.m_ebp;
frame->m_esp = tst->arch.m_esp;
frame->m_esi = tst->arch.m_esi;
frame->m_edi = tst->arch.m_edi;
frame->m_eflags = tst->arch.m_eflags;
frame->m_eip = tst->arch.m_eip;
if (VG_(needs).shadow_regs) {
frame->sh_eax = tst->arch.sh_eax;
frame->sh_ecx = tst->arch.sh_ecx;
frame->sh_edx = tst->arch.sh_edx;
frame->sh_ebx = tst->arch.sh_ebx;
frame->sh_ebp = tst->arch.sh_ebp;
frame->sh_esp = tst->arch.sh_esp;
frame->sh_esi = tst->arch.sh_esi;
frame->sh_edi = tst->arch.sh_edi;
frame->sh_eflags = tst->arch.sh_eflags;
}
frame->vex = tst->arch.vex;
if (VG_(needs).shadow_regs)
frame->vex_shadow = tst->arch.vex_shadow;
frame->mask = tst->sig_mask;
@ -283,19 +243,18 @@ void VGA_(push_signal_frame)(ThreadId tid, Addr esp_top_of_frame,
/* tst->m_esp = esp; */
SET_SIGNAL_ESP(tid, esp);
tst->arch.m_eip = (Addr) handler;
tst->arch.vex.guest_EIP = (Addr) handler;
/* This thread needs to be marked runnable, but we leave that the
caller to do. */
if (0)
VG_(printf)("pushed signal frame; %%ESP now = %p, next %%EBP = %p, status=%d\n",
esp, tst->arch.m_eip, tst->status);
esp, tst->arch.vex.guest_EIP, tst->status);
}
Int VGA_(pop_signal_frame)(ThreadId tid)
{
Addr esp;
Int i;
VgSigFrame* frame;
ThreadState* tst;
@ -303,7 +262,7 @@ Int VGA_(pop_signal_frame)(ThreadId tid)
tst = & VG_(threads)[tid];
/* Correctly reestablish the frame base address. */
esp = tst->arch.m_esp;
esp = tst->arch.vex.guest_ESP;
frame = (VgSigFrame*)
(esp -4 /* because the handler's RET pops the RA */
+20 /* because signalreturn_bogusRA pushes 5 words */);
@ -312,37 +271,15 @@ Int VGA_(pop_signal_frame)(ThreadId tid)
vg_assert(frame->magicE == 0x27182818);
if (VG_(clo_trace_signals))
VG_(message)(Vg_DebugMsg,
"vg_pop_signal_frame (thread %d): valid magic; EIP=%p", tid, frame->m_eip);
"vg_pop_signal_frame (thread %d): valid magic; EIP=%p", tid, frame->vex.guest_EIP);
/* Mark the frame structure as nonaccessible. */
VG_TRACK( die_mem_stack_signal, (Addr)frame, sizeof(VgSigFrame) );
/* restore machine state */
for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
tst->arch.m_sse[i] = frame->m_sse[i];
tst->arch.m_eax = frame->m_eax;
tst->arch.m_ecx = frame->m_ecx;
tst->arch.m_edx = frame->m_edx;
tst->arch.m_ebx = frame->m_ebx;
tst->arch.m_ebp = frame->m_ebp;
tst->arch.m_esp = frame->m_esp;
tst->arch.m_esi = frame->m_esi;
tst->arch.m_edi = frame->m_edi;
tst->arch.m_eflags = frame->m_eflags;
tst->arch.m_eip = frame->m_eip;
if (VG_(needs).shadow_regs) {
tst->arch.sh_eax = frame->sh_eax;
tst->arch.sh_ecx = frame->sh_ecx;
tst->arch.sh_edx = frame->sh_edx;
tst->arch.sh_ebx = frame->sh_ebx;
tst->arch.sh_ebp = frame->sh_ebp;
tst->arch.sh_esp = frame->sh_esp;
tst->arch.sh_esi = frame->sh_esi;
tst->arch.sh_edi = frame->sh_edi;
tst->arch.sh_eflags = frame->sh_eflags;
}
tst->arch.vex = frame->vex;
if (VG_(needs).shadow_regs)
tst->arch.vex_shadow = frame->vex_shadow;
/* And restore the thread's status to what it was before the signal
was delivered. */
@ -364,50 +301,51 @@ Int VGA_(pop_signal_frame)(ThreadId tid)
void VGA_(fill_elfregs_from_BB)(struct vki_user_regs_struct* regs)
{
regs->eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
regs->esp = VG_(baseBlock)[VGOFF_(m_esp)];
regs->eip = VG_(baseBlock)[VGOFF_(m_eip)];
regs->eflags = LibVEX_GuestX86_get_eflags(BASEBLOCK_VEX);
regs->esp = BASEBLOCK_VEX->guest_ESP;
regs->eip = BASEBLOCK_VEX->guest_EIP;
regs->ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
regs->ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
regs->edx = VG_(baseBlock)[VGOFF_(m_edx)];
regs->esi = VG_(baseBlock)[VGOFF_(m_esi)];
regs->edi = VG_(baseBlock)[VGOFF_(m_edi)];
regs->ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
regs->eax = VG_(baseBlock)[VGOFF_(m_eax)];
regs->ebx = BASEBLOCK_VEX->guest_EBX;
regs->ecx = BASEBLOCK_VEX->guest_ECX;
regs->edx = BASEBLOCK_VEX->guest_EDX;
regs->esi = BASEBLOCK_VEX->guest_ESI;
regs->edi = BASEBLOCK_VEX->guest_EDI;
regs->ebp = BASEBLOCK_VEX->guest_EBP;
regs->eax = BASEBLOCK_VEX->guest_EAX;
regs->cs = VG_(baseBlock)[VGOFF_(m_cs)];
regs->ds = VG_(baseBlock)[VGOFF_(m_ds)];
regs->ss = VG_(baseBlock)[VGOFF_(m_ss)];
regs->es = VG_(baseBlock)[VGOFF_(m_es)];
regs->fs = VG_(baseBlock)[VGOFF_(m_fs)];
regs->gs = VG_(baseBlock)[VGOFF_(m_gs)];
regs->cs = BASEBLOCK_VEX->guest_CS;
regs->ds = BASEBLOCK_VEX->guest_DS;
regs->ss = BASEBLOCK_VEX->guest_SS;
regs->es = BASEBLOCK_VEX->guest_ES;
regs->fs = BASEBLOCK_VEX->guest_FS;
regs->gs = BASEBLOCK_VEX->guest_GS;
}
void VGA_(fill_elfregs_from_tst)(struct vki_user_regs_struct* regs,
const arch_thread_t* arch)
arch_thread_t* arch)
{
regs->eflags = arch->m_eflags;
regs->esp = arch->m_esp;
regs->eip = arch->m_eip;
regs->eflags = LibVEX_GuestX86_get_eflags(&arch->vex);
regs->esp = arch->vex.guest_ESP;
regs->eip = arch->vex.guest_EIP;
regs->ebx = arch->m_ebx;
regs->ecx = arch->m_ecx;
regs->edx = arch->m_edx;
regs->esi = arch->m_esi;
regs->edi = arch->m_edi;
regs->ebp = arch->m_ebp;
regs->eax = arch->m_eax;
regs->ebx = arch->vex.guest_EBX;
regs->ecx = arch->vex.guest_ECX;
regs->edx = arch->vex.guest_EDX;
regs->esi = arch->vex.guest_ESI;
regs->edi = arch->vex.guest_EDI;
regs->ebp = arch->vex.guest_EBP;
regs->eax = arch->vex.guest_EAX;
regs->cs = arch->m_cs;
regs->ds = arch->m_ds;
regs->ss = arch->m_ss;
regs->es = arch->m_es;
regs->fs = arch->m_fs;
regs->gs = arch->m_gs;
regs->cs = arch->vex.guest_CS;
regs->ds = arch->vex.guest_DS;
regs->ss = arch->vex.guest_SS;
regs->es = arch->vex.guest_ES;
regs->fs = arch->vex.guest_FS;
regs->gs = arch->vex.guest_GS;
}
#if 0
static void fill_fpu(vki_elf_fpregset_t *fpu, const Char *from)
{
if (VG_(have_ssestate)) {
@ -425,27 +363,28 @@ static void fill_fpu(vki_elf_fpregset_t *fpu, const Char *from)
} else
VG_(memcpy)(fpu, from, sizeof(*fpu));
}
#endif
void VGA_(fill_elffpregs_from_BB)( vki_elf_fpregset_t* fpu )
{
fill_fpu(fpu, (const Char *)&VG_(baseBlock)[VGOFF_(m_ssestate)]);
//fill_fpu(fpu, (const Char *)&VG_(baseBlock)[VGOFF_(m_ssestate)]);
}
void VGA_(fill_elffpregs_from_tst)( vki_elf_fpregset_t* fpu,
const arch_thread_t* arch)
{
fill_fpu(fpu, (const Char *)&arch->m_sse);
//fill_fpu(fpu, (const Char *)&arch->m_sse);
}
void VGA_(fill_elffpxregs_from_BB) ( vki_elf_fpxregset_t* xfpu )
{
VG_(memcpy)(xfpu, &VG_(baseBlock)[VGOFF_(m_ssestate)], sizeof(*xfpu));
//VG_(memcpy)(xfpu, &VG_(baseBlock)[VGOFF_(m_ssestate)], sizeof(*xfpu));
}
void VGA_(fill_elffpxregs_from_tst) ( vki_elf_fpxregset_t* xfpu,
const arch_thread_t* arch )
{
VG_(memcpy)(xfpu, arch->m_sse, sizeof(*xfpu));
//VG_(memcpy)(xfpu, arch->m_sse, sizeof(*xfpu));
}
/*--------------------------------------------------------------------*/

View File

@ -32,97 +32,23 @@
#include "x86_private.h"
#include <sys/ptrace.h>
#include "libvex_guest_x86.h"
/*------------------------------------------------------------*/
/*--- baseBlock setup and operations ---*/
/*------------------------------------------------------------*/
/* The variables storing offsets. */
Int VGOFF_(m_vex) = INVALID_OFFSET;
Int VGOFF_(m_vex_shadow) = INVALID_OFFSET;
Int VGOFF_(m_eax) = INVALID_OFFSET;
Int VGOFF_(m_ecx) = INVALID_OFFSET;
Int VGOFF_(m_edx) = INVALID_OFFSET;
Int VGOFF_(m_ebx) = INVALID_OFFSET;
Int VGOFF_(m_esp) = INVALID_OFFSET;
Int VGOFF_(m_ebp) = INVALID_OFFSET;
Int VGOFF_(m_esi) = INVALID_OFFSET;
Int VGOFF_(m_edi) = INVALID_OFFSET;
Int VGOFF_(m_eflags) = INVALID_OFFSET;
Int VGOFF_(m_dflag) = INVALID_OFFSET;
Int VGOFF_(m_ssestate) = INVALID_OFFSET;
Int VGOFF_(ldt) = INVALID_OFFSET;
Int VGOFF_(tls_ptr) = INVALID_OFFSET;
Int VGOFF_(m_cs) = INVALID_OFFSET;
Int VGOFF_(m_ss) = INVALID_OFFSET;
Int VGOFF_(m_ds) = INVALID_OFFSET;
Int VGOFF_(m_es) = INVALID_OFFSET;
Int VGOFF_(m_fs) = INVALID_OFFSET;
Int VGOFF_(m_gs) = INVALID_OFFSET;
Int VGOFF_(m_eip) = INVALID_OFFSET;
Int VGOFF_(spillslots) = INVALID_OFFSET;
Int VGOFF_(sh_eax) = INVALID_OFFSET;
Int VGOFF_(sh_ecx) = INVALID_OFFSET;
Int VGOFF_(sh_edx) = INVALID_OFFSET;
Int VGOFF_(sh_ebx) = INVALID_OFFSET;
Int VGOFF_(sh_esp) = INVALID_OFFSET;
Int VGOFF_(sh_ebp) = INVALID_OFFSET;
Int VGOFF_(sh_esi) = INVALID_OFFSET;
Int VGOFF_(sh_edi) = INVALID_OFFSET;
Int VGOFF_(sh_eflags) = INVALID_OFFSET;
Int VGOFF_(helper_idiv_64_32) = INVALID_OFFSET;
Int VGOFF_(helper_div_64_32) = INVALID_OFFSET;
Int VGOFF_(helper_idiv_32_16) = INVALID_OFFSET;
Int VGOFF_(helper_div_32_16) = INVALID_OFFSET;
Int VGOFF_(helper_idiv_16_8) = INVALID_OFFSET;
Int VGOFF_(helper_div_16_8) = INVALID_OFFSET;
Int VGOFF_(helper_imul_32_64) = INVALID_OFFSET;
Int VGOFF_(helper_mul_32_64) = INVALID_OFFSET;
Int VGOFF_(helper_imul_16_32) = INVALID_OFFSET;
Int VGOFF_(helper_mul_16_32) = INVALID_OFFSET;
Int VGOFF_(helper_imul_8_16) = INVALID_OFFSET;
Int VGOFF_(helper_mul_8_16) = INVALID_OFFSET;
Int VGOFF_(helper_CLD) = INVALID_OFFSET;
Int VGOFF_(helper_STD) = INVALID_OFFSET;
Int VGOFF_(helper_get_dirflag) = INVALID_OFFSET;
Int VGOFF_(helper_CLC) = INVALID_OFFSET;
Int VGOFF_(helper_STC) = INVALID_OFFSET;
Int VGOFF_(helper_CMC) = INVALID_OFFSET;
Int VGOFF_(helper_shldl) = INVALID_OFFSET;
Int VGOFF_(helper_shldw) = INVALID_OFFSET;
Int VGOFF_(helper_shrdl) = INVALID_OFFSET;
Int VGOFF_(helper_shrdw) = INVALID_OFFSET;
Int VGOFF_(helper_IN) = INVALID_OFFSET;
Int VGOFF_(helper_OUT) = INVALID_OFFSET;
Int VGOFF_(helper_RDTSC) = INVALID_OFFSET;
Int VGOFF_(helper_CPUID) = INVALID_OFFSET;
Int VGOFF_(helper_BSWAP) = INVALID_OFFSET;
Int VGOFF_(helper_bsfw) = INVALID_OFFSET;
Int VGOFF_(helper_bsfl) = INVALID_OFFSET;
Int VGOFF_(helper_bsrw) = INVALID_OFFSET;
Int VGOFF_(helper_bsrl) = INVALID_OFFSET;
Int VGOFF_(helper_fstsw_AX) = INVALID_OFFSET;
Int VGOFF_(helper_SAHF) = INVALID_OFFSET;
Int VGOFF_(helper_LAHF) = INVALID_OFFSET;
Int VGOFF_(helper_DAS) = INVALID_OFFSET;
Int VGOFF_(helper_DAA) = INVALID_OFFSET;
Int VGOFF_(helper_AAS) = INVALID_OFFSET;
Int VGOFF_(helper_AAA) = INVALID_OFFSET;
Int VGOFF_(helper_AAD) = INVALID_OFFSET;
Int VGOFF_(helper_AAM) = INVALID_OFFSET;
Int VGOFF_(helper_cmpxchg8b) = INVALID_OFFSET;
static Int extractDflag(UInt eflags)
{
return ( eflags & EFlagD ? -1 : 1 );
}
static UInt insertDflag(UInt eflags, Int d)
{
vg_assert(d == 1 || d == -1);
eflags &= ~EFlagD;
if (d < 0) eflags |= EFlagD;
return eflags;
}
/* Here we assign actual offsets. It's important on x86 to get the most
popular referents within 128 bytes of the start, so we can take
@ -132,88 +58,52 @@ static UInt insertDflag(UInt eflags, Int d)
size of translations. */
void VGA_(init_low_baseBlock) ( Addr client_eip, Addr esp_at_startup )
{
/* Those with offsets under 128 are carefully chosen. */
vg_assert(0 == sizeof(VexGuestX86State) % 8);
/* WORD offsets in this column */
/* 0 */ VGOFF_(m_eax) = VG_(alloc_BaB_1_set)(0);
/* 1 */ VGOFF_(m_ecx) = VG_(alloc_BaB_1_set)(0);
/* 2 */ VGOFF_(m_edx) = VG_(alloc_BaB_1_set)(0);
/* 3 */ VGOFF_(m_ebx) = VG_(alloc_BaB_1_set)(0);
/* 4 */ VGOFF_(m_esp) = VG_(alloc_BaB_1_set)(esp_at_startup);
/* 5 */ VGOFF_(m_ebp) = VG_(alloc_BaB_1_set)(0);
/* 6 */ VGOFF_(m_esi) = VG_(alloc_BaB_1_set)(0);
/* 7 */ VGOFF_(m_edi) = VG_(alloc_BaB_1_set)(0);
/* 8 */ VGOFF_(m_eflags) = VG_(alloc_BaB_1_set)(0);
/* First the guest state. */
VGOFF_(m_vex) = VG_(alloc_BaB)( sizeof(VexGuestX86State) / 4 );
/* Then equal sized shadow state. */
VGOFF_(m_vex_shadow) = VG_(alloc_BaB)( sizeof(VexGuestX86State) / 4 );
/* Finally the spill area. */
VGOFF_(spillslots) = VG_(alloc_BaB)( LibVEX_N_SPILL_BYTES/4 );
if (0) VG_(printf)("SPILL SLOTS start at %d\n", VGOFF_(spillslots));
/* Zero out the initial state, and set up the simulated FPU in a
sane way. */
LibVEX_GuestX86_initialise(BASEBLOCK_VEX);
/* Zero out the shadow area. */
VG_(memset)(BASEBLOCK_VEX_SHADOW, 0, sizeof(VexGuestX86State));
/* Put essential stuff into the new state. */
BASEBLOCK_VEX->guest_ESP = esp_at_startup;
BASEBLOCK_VEX->guest_EIP = client_eip;
/* The dispatch loop needs to be able to find %EIP. */
VGOFF_(m_eip)
= VGOFF_(m_vex) + offsetof(VexGuestX86State,guest_EIP)/4;
if (VG_(needs).shadow_regs) {
/* 9 */ VGOFF_(sh_eax) = VG_(alloc_BaB_1_set)(0);
/* 10 */ VGOFF_(sh_ecx) = VG_(alloc_BaB_1_set)(0);
/* 11 */ VGOFF_(sh_edx) = VG_(alloc_BaB_1_set)(0);
/* 12 */ VGOFF_(sh_ebx) = VG_(alloc_BaB_1_set)(0);
/* 13 */ VGOFF_(sh_esp) = VG_(alloc_BaB_1_set)(0);
/* 14 */ VGOFF_(sh_ebp) = VG_(alloc_BaB_1_set)(0);
/* 15 */ VGOFF_(sh_esi) = VG_(alloc_BaB_1_set)(0);
/* 16 */ VGOFF_(sh_edi) = VG_(alloc_BaB_1_set)(0);
/* 17 */ VGOFF_(sh_eflags) = VG_(alloc_BaB_1_set)(0);
VG_TRACK( post_regs_write_init );
}
/* 9,10,11 or 18,19,20... depends on number whether shadow regs are used
* and on compact helpers registered */
/* Make these most-frequently-called specialised ones compact, if they
are used. */
if (VG_(defined_new_mem_stack_4)())
VG_(register_compact_helper)( (Addr) VG_(tool_interface).track_new_mem_stack_4);
if (VG_(defined_die_mem_stack_4)())
VG_(register_compact_helper)( (Addr) VG_(tool_interface).track_die_mem_stack_4);
}
void VGA_(init_high_baseBlock)( Addr client_eip, Addr esp_at_startup )
{
/* (9/10 or 18/19) + n_compact_helpers */
VGOFF_(m_eip) = VG_(alloc_BaB_1_set)(client_eip);
/* There are currently 24 spill slots */
/* (11+/20+ .. 32+/43+) + n_compact_helpers. This can overlap the magic
* boundary at >= 32 words, but most spills are to low numbered spill
* slots, so the ones above the boundary don't see much action. */
VGOFF_(spillslots) = VG_(alloc_BaB)(VG_MAX_SPILLSLOTS);
/* I gave up counting at this point. Since they're above the
short-amode-boundary, there's no point. */
VGOFF_(m_dflag) = VG_(alloc_BaB_1_set)(1); // 1 == forward D-flag
/* The FPU/SSE state. This _must_ be 16-byte aligned. Initial
state doesn't matter much, as long as it's not totally borked. */
VG_(align_BaB)(16);
VGOFF_(m_ssestate) = VG_(alloc_BaB)(VG_SIZE_OF_SSESTATE_W);
vg_assert(
0 == ( ((UInt)(& VG_(baseBlock)[VGOFF_(m_ssestate)])) % 16 )
);
/* I assume that if we have SSE2 we also have SSE */
VG_(have_ssestate) =
VG_(cpu_has_feature)(VG_X86_FEAT_FXSR) &&
VG_(cpu_has_feature)(VG_X86_FEAT_SSE);
/* set up an initial FPU state (doesn't really matter what it is,
so long as it's somewhat valid) */
if (!VG_(have_ssestate))
asm volatile("fwait; fnsave %0; fwait; frstor %0; fwait"
:
: "m" (VG_(baseBlock)[VGOFF_(m_ssestate)])
: "cc", "memory");
else
asm volatile("fwait; fxsave %0; fwait; andl $0xffbf, %1;"
"fxrstor %0; fwait"
:
: "m" (VG_(baseBlock)[VGOFF_(m_ssestate)]),
"m" (VG_(baseBlock)[VGOFF_(m_ssestate)+(24/4)])
: "cc", "memory");
VG_(have_ssestate) = False;
// VG_(cpu_has_feature)(VG_X86_FEAT_FXSR) &&
// VG_(cpu_has_feature)(VG_X86_FEAT_SSE);
if (0) {
if (VG_(have_ssestate))
@ -228,65 +118,17 @@ void VGA_(init_high_baseBlock)( Addr client_eip, Addr esp_at_startup )
/* TLS pointer: pretend the root thread has no TLS array for now. */
VGOFF_(tls_ptr) = VG_(alloc_BaB_1_set)((UInt)NULL);
/* segment registers */
VGOFF_(m_cs) = VG_(alloc_BaB_1_set)(0);
VGOFF_(m_ss) = VG_(alloc_BaB_1_set)(0);
VGOFF_(m_ds) = VG_(alloc_BaB_1_set)(0);
VGOFF_(m_es) = VG_(alloc_BaB_1_set)(0);
VGOFF_(m_fs) = VG_(alloc_BaB_1_set)(0);
VGOFF_(m_gs) = VG_(alloc_BaB_1_set)(0);
/* initialise %cs, %ds and %ss to point at the operating systems
default code, data and stack segments */
asm volatile("movw %%cs, %0"
:
: "m" (VG_(baseBlock)[VGOFF_(m_cs)]));
: "m" (BASEBLOCK_VEX->guest_CS));
asm volatile("movw %%ds, %0"
:
: "m" (VG_(baseBlock)[VGOFF_(m_ds)]));
: "m" (BASEBLOCK_VEX->guest_DS));
asm volatile("movw %%ss, %0"
:
: "m" (VG_(baseBlock)[VGOFF_(m_ss)]));
VG_(register_noncompact_helper)( (Addr) & VG_(do_useseg) );
# define HELPER(name) \
VGOFF_(helper_##name) = VG_(alloc_BaB_1_set)( (Addr) & VG_(helper_##name))
/* Helper functions. */
HELPER(idiv_64_32); HELPER(div_64_32);
HELPER(idiv_32_16); HELPER(div_32_16);
HELPER(idiv_16_8); HELPER(div_16_8);
HELPER(imul_32_64); HELPER(mul_32_64);
HELPER(imul_16_32); HELPER(mul_16_32);
HELPER(imul_8_16); HELPER(mul_8_16);
HELPER(CLD); HELPER(STD);
HELPER(get_dirflag);
HELPER(CLC); HELPER(STC);
HELPER(CMC);
HELPER(shldl); HELPER(shldw);
HELPER(shrdl); HELPER(shrdw);
HELPER(RDTSC); HELPER(CPUID);
HELPER(bsfw); HELPER(bsfl);
HELPER(bsrw); HELPER(bsrl);
HELPER(fstsw_AX);
HELPER(SAHF); HELPER(LAHF);
HELPER(DAS); HELPER(DAA);
HELPER(AAS); HELPER(AAA);
HELPER(AAD); HELPER(AAM);
HELPER(IN); HELPER(OUT);
HELPER(cmpxchg8b);
HELPER(undefined_instruction);
# undef HELPER
: "m" (BASEBLOCK_VEX->guest_SS));
}
/* Junk to fill up a thread's shadow regs with when shadow regs aren't
@ -295,44 +137,16 @@ void VGA_(init_high_baseBlock)( Addr client_eip, Addr esp_at_startup )
void VGA_(load_state) ( arch_thread_t* arch, ThreadId tid )
{
Int i;
VG_(baseBlock)[VGOFF_(ldt)] = (UInt)arch->ldt;
VG_(baseBlock)[VGOFF_(tls_ptr)] = (UInt)arch->tls;
VG_(baseBlock)[VGOFF_(m_cs)] = arch->m_cs;
VG_(baseBlock)[VGOFF_(m_ss)] = arch->m_ss;
VG_(baseBlock)[VGOFF_(m_ds)] = arch->m_ds;
VG_(baseBlock)[VGOFF_(m_es)] = arch->m_es;
VG_(baseBlock)[VGOFF_(m_fs)] = arch->m_fs;
VG_(baseBlock)[VGOFF_(m_gs)] = arch->m_gs;
VG_(baseBlock)[VGOFF_(m_eax)] = arch->m_eax;
VG_(baseBlock)[VGOFF_(m_ebx)] = arch->m_ebx;
VG_(baseBlock)[VGOFF_(m_ecx)] = arch->m_ecx;
VG_(baseBlock)[VGOFF_(m_edx)] = arch->m_edx;
VG_(baseBlock)[VGOFF_(m_esi)] = arch->m_esi;
VG_(baseBlock)[VGOFF_(m_edi)] = arch->m_edi;
VG_(baseBlock)[VGOFF_(m_ebp)] = arch->m_ebp;
VG_(baseBlock)[VGOFF_(m_esp)] = arch->m_esp;
VG_(baseBlock)[VGOFF_(m_eflags)] = arch->m_eflags & ~EFlagD;
VG_(baseBlock)[VGOFF_(m_dflag)] = extractDflag(arch->m_eflags);
VG_(baseBlock)[VGOFF_(m_eip)] = arch->m_eip;
for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
VG_(baseBlock)[VGOFF_(m_ssestate) + i] = arch->m_sse[i];
*BASEBLOCK_VEX = arch->vex;
if (VG_(needs).shadow_regs) {
VG_(baseBlock)[VGOFF_(sh_eax)] = arch->sh_eax;
VG_(baseBlock)[VGOFF_(sh_ebx)] = arch->sh_ebx;
VG_(baseBlock)[VGOFF_(sh_ecx)] = arch->sh_ecx;
VG_(baseBlock)[VGOFF_(sh_edx)] = arch->sh_edx;
VG_(baseBlock)[VGOFF_(sh_esi)] = arch->sh_esi;
VG_(baseBlock)[VGOFF_(sh_edi)] = arch->sh_edi;
VG_(baseBlock)[VGOFF_(sh_ebp)] = arch->sh_ebp;
VG_(baseBlock)[VGOFF_(sh_esp)] = arch->sh_esp;
VG_(baseBlock)[VGOFF_(sh_eflags)] = arch->sh_eflags;
*BASEBLOCK_VEX_SHADOW = arch->vex_shadow;
} else {
/* Fields shouldn't be used -- check their values haven't changed. */
/* ummm ...
vg_assert(
VG_UNUSED_SHADOW_REG_VALUE == arch->sh_eax &&
VG_UNUSED_SHADOW_REG_VALUE == arch->sh_ebx &&
@ -343,6 +157,7 @@ void VGA_(load_state) ( arch_thread_t* arch, ThreadId tid )
VG_UNUSED_SHADOW_REG_VALUE == arch->sh_ebp &&
VG_UNUSED_SHADOW_REG_VALUE == arch->sh_esp &&
VG_UNUSED_SHADOW_REG_VALUE == arch->sh_eflags);
*/
}
}
@ -358,7 +173,7 @@ void VGA_(save_state)( arch_thread_t *arch, ThreadId tid )
assertion fails. */
if ((void*)arch->ldt != (void*)VG_(baseBlock)[VGOFF_(ldt)])
VG_(printf)("VG_(threads)[%d].ldt=%p VG_(baseBlock)[VGOFF_(ldt)]=%p\n",
tid, (void*)arch->ldt,
tid, (void*)arch->ldt,
(void*)VG_(baseBlock)[VGOFF_(ldt)]);
vg_assert((void*)arch->ldt == (void*)VG_(baseBlock)[VGOFF_(ldt)]);
@ -371,48 +186,19 @@ void VGA_(save_state)( arch_thread_t *arch, ThreadId tid )
if ((void*)arch->tls != (void*)VG_(baseBlock)[VGOFF_(tls_ptr)])
VG_(printf)("VG_(threads)[%d].tls=%p VG_(baseBlock)[VGOFF_(tls_ptr)]=%p\
n",
tid, (void*)arch->tls,
tid, (void*)arch->tls,
(void*)VG_(baseBlock)[VGOFF_(tls_ptr)]);
vg_assert((void*)arch->tls
vg_assert((void*)arch->tls
== (void*)VG_(baseBlock)[VGOFF_(tls_ptr)]);
arch->m_cs = VG_(baseBlock)[VGOFF_(m_cs)];
arch->m_ss = VG_(baseBlock)[VGOFF_(m_ss)];
arch->m_ds = VG_(baseBlock)[VGOFF_(m_ds)];
arch->m_es = VG_(baseBlock)[VGOFF_(m_es)];
arch->m_fs = VG_(baseBlock)[VGOFF_(m_fs)];
arch->m_gs = VG_(baseBlock)[VGOFF_(m_gs)];
arch->m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
arch->m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
arch->m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
arch->m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
arch->m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
arch->m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
arch->m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
arch->m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
arch->m_eflags
= insertDflag(VG_(baseBlock)[VGOFF_(m_eflags)],
VG_(baseBlock)[VGOFF_(m_dflag)]);
arch->m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
arch->m_sse[i]
= VG_(baseBlock)[VGOFF_(m_ssestate) + i];
arch->vex = *BASEBLOCK_VEX;
if (VG_(needs).shadow_regs) {
arch->sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
arch->sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
arch->sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
arch->sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
arch->sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
arch->sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
arch->sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
arch->sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
arch->sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
arch->vex_shadow = *BASEBLOCK_VEX_SHADOW;
} else {
/* Fill with recognisable junk */
/* can't easily do this ...
arch->sh_eax =
arch->sh_ebx =
arch->sh_ecx =
@ -420,34 +206,92 @@ n",
arch->sh_esi =
arch->sh_edi =
arch->sh_ebp =
arch->sh_esp =
arch->sh_esp =
arch->sh_eflags = VG_UNUSED_SHADOW_REG_VALUE;
*/
}
/* Fill it up with junk. */
VG_(baseBlock)[VGOFF_(ldt)] = junk;
VG_(baseBlock)[VGOFF_(tls_ptr)] = junk;
VG_(baseBlock)[VGOFF_(m_cs)] = junk;
VG_(baseBlock)[VGOFF_(m_ss)] = junk;
VG_(baseBlock)[VGOFF_(m_ds)] = junk;
VG_(baseBlock)[VGOFF_(m_es)] = junk;
VG_(baseBlock)[VGOFF_(m_fs)] = junk;
VG_(baseBlock)[VGOFF_(m_gs)] = junk;
VG_(baseBlock)[VGOFF_(m_eax)] = junk;
VG_(baseBlock)[VGOFF_(m_ebx)] = junk;
VG_(baseBlock)[VGOFF_(m_ecx)] = junk;
VG_(baseBlock)[VGOFF_(m_edx)] = junk;
VG_(baseBlock)[VGOFF_(m_esi)] = junk;
VG_(baseBlock)[VGOFF_(m_edi)] = junk;
VG_(baseBlock)[VGOFF_(m_ebp)] = junk;
VG_(baseBlock)[VGOFF_(m_esp)] = junk;
VG_(baseBlock)[VGOFF_(m_eflags)] = junk;
VG_(baseBlock)[VGOFF_(m_eip)] = junk;
for (i = 0; i < VG_SIZE_OF_SSESTATE_W; i++)
VG_(baseBlock)[VGOFF_(m_ssestate) + i] = junk;
for (i = 0; i < (3 + sizeof(VexGuestX86State)) / 4; i++)
VG_(baseBlock)[VGOFF_(m_vex) + i] = junk;
}
/*------------------------------------------------------------*/
/*--- Register access stuff ---*/
/*------------------------------------------------------------*/
void VGA_(set_thread_shadow_archreg) ( ThreadId tid, UInt archreg, UInt val )
{
ThreadState* tst;
vg_assert(VG_(is_valid_tid)(tid));
tst = & VG_(threads)[tid];
if (0)
VG_(printf)("set_thread_shadow_archreg(%d, %d, 0x%x)\n",
tid, archreg, val);
switch (archreg) {
case R_EAX: tst->arch.vex_shadow.guest_EAX = val; break;
case R_ECX: tst->arch.vex_shadow.guest_ECX = val; break;
case R_EDX: tst->arch.vex_shadow.guest_EDX = val; break;
case R_EBX: tst->arch.vex_shadow.guest_EBX = val; break;
case R_ESP: tst->arch.vex_shadow.guest_ESP = val; break;
case R_EBP: tst->arch.vex_shadow.guest_EBP = val; break;
case R_ESI: tst->arch.vex_shadow.guest_ESI = val; break;
case R_EDI: tst->arch.vex_shadow.guest_EDI = val; break;
default: VG_(core_panic)( "set_thread_shadow_archreg");
}
}
UInt VGA_(get_thread_shadow_archreg) ( ThreadId tid, UInt archreg )
{
ThreadState* tst;
vg_assert(VG_(is_valid_tid)(tid));
tst = & VG_(threads)[tid];
VG_(printf)("get_thread_shadow_archreg(%d, %d)\n",
tid, archreg);
switch (archreg) {
case R_EAX: return tst->arch.vex_shadow.guest_EAX;
case R_ECX: return tst->arch.vex_shadow.guest_ECX;
case R_EDX: return tst->arch.vex_shadow.guest_EDX;
case R_EBX: return tst->arch.vex_shadow.guest_EBX;
case R_ESP: return tst->arch.vex_shadow.guest_ESP;
case R_EBP: return tst->arch.vex_shadow.guest_EBP;
case R_ESI: return tst->arch.vex_shadow.guest_ESI;
case R_EDI: return tst->arch.vex_shadow.guest_EDI;
default: VG_(core_panic)( "get_thread_shadow_archreg");
}
}
/* Return the baseBlock index for the specified shadow register */
static Int shadow_reg_index ( Int arch )
{
VG_(printf)("shadow_reg_index(%d)\n",
arch);
switch (arch) {
case R_EAX: return VGOFF_(m_vex_shadow) + offsetof(VexGuestX86State,guest_EAX)/4;
case R_ECX: return VGOFF_(m_vex_shadow) + offsetof(VexGuestX86State,guest_ECX)/4;
case R_EDX: return VGOFF_(m_vex_shadow) + offsetof(VexGuestX86State,guest_EDX)/4;
case R_EBX: return VGOFF_(m_vex_shadow) + offsetof(VexGuestX86State,guest_EBX)/4;
case R_ESP: return VGOFF_(m_vex_shadow) + offsetof(VexGuestX86State,guest_ESP)/4;
case R_EBP: return VGOFF_(m_vex_shadow) + offsetof(VexGuestX86State,guest_EBP)/4;
case R_ESI: return VGOFF_(m_vex_shadow) + offsetof(VexGuestX86State,guest_ESI)/4;
case R_EDI: return VGOFF_(m_vex_shadow) + offsetof(VexGuestX86State,guest_EDI)/4;
default: VG_(core_panic)( "shadow_reg_index");
}
}
/* Accessing shadow arch. registers */
UInt VGA_(get_shadow_archreg) ( UInt archreg )
{
return VG_(baseBlock)[ shadow_reg_index(archreg) ];
}
/*------------------------------------------------------------*/
/*--- Thread stuff ---*/
/*------------------------------------------------------------*/
@ -493,15 +337,15 @@ void VGA_(setup_child) ( arch_thread_t *regs, arch_thread_t *parent_regs )
void VGA_(set_arg_and_bogus_ret)( ThreadId tid, UWord arg, Addr ret )
{
/* Push the arg, and mark it as readable. */
SET_PTHREQ_ESP(tid, VG_(threads)[tid].arch.m_esp - sizeof(UWord));
* (UInt*)(VG_(threads)[tid].arch.m_esp) = arg;
VG_TRACK( post_mem_write, VG_(threads)[tid].arch.m_esp, sizeof(void*) );
SET_PTHREQ_ESP(tid, VG_(threads)[tid].arch.vex.guest_ESP - sizeof(UWord));
* (UInt*)(VG_(threads)[tid].arch.vex.guest_ESP) = arg;
VG_TRACK( post_mem_write, VG_(threads)[tid].arch.vex.guest_ESP, sizeof(void*) );
/* Don't mark the pushed return address as readable; any attempt to read
this is an internal valgrind bug since thread_exit_wrapper() should not
return. */
SET_PTHREQ_ESP(tid, VG_(threads)[tid].arch.m_esp - sizeof(UWord));
* (UInt*)(VG_(threads)[tid].arch.m_esp) = ret;
SET_PTHREQ_ESP(tid, VG_(threads)[tid].arch.vex.guest_ESP - sizeof(UWord));
* (UInt*)(VG_(threads)[tid].arch.vex.guest_ESP) = ret;
}
void VGA_(thread_initial_stack)(ThreadId tid, UWord arg, Addr ret)
@ -530,33 +374,30 @@ void VGA_(thread_initial_stack)(ThreadId tid, UWord arg, Addr ret)
UInt *VGA_(reg_addr_from_BB)(Int regno)
{
Int r;
switch (regno) {
case R_EAX: r = VGOFF_(m_eax); break;
case R_ECX: r = VGOFF_(m_ecx); break;
case R_EDX: r = VGOFF_(m_edx); break;
case R_EBX: r = VGOFF_(m_ebx); break;
case R_ESP: r = VGOFF_(m_esp); break;
case R_EBP: r = VGOFF_(m_ebp); break;
case R_ESI: r = VGOFF_(m_esi); break;
case R_EDI: r = VGOFF_(m_edi); break;
default:
return NULL;
case R_EAX: return &(BASEBLOCK_VEX->guest_EAX);
case R_ECX: return &(BASEBLOCK_VEX->guest_ECX);
case R_EDX: return &(BASEBLOCK_VEX->guest_EDX);
case R_EBX: return &(BASEBLOCK_VEX->guest_EBX);
case R_ESP: return &(BASEBLOCK_VEX->guest_ESP);
case R_EBP: return &(BASEBLOCK_VEX->guest_EBP);
case R_ESI: return &(BASEBLOCK_VEX->guest_ESI);
case R_EDI: return &(BASEBLOCK_VEX->guest_EDI);
default: return NULL;
}
return &VG_(baseBlock)[r];
}
UInt *VGA_(reg_addr_from_tst)(Int regno, arch_thread_t *arch)
{
switch (regno) {
case R_EAX: return &arch->m_eax;
case R_ECX: return &arch->m_ecx;
case R_EDX: return &arch->m_edx;
case R_EBX: return &arch->m_ebx;
case R_ESP: return &arch->m_esp;
case R_EBP: return &arch->m_ebp;
case R_ESI: return &arch->m_esi;
case R_EDI: return &arch->m_edi;
case R_EAX: return &arch->vex.guest_EAX;
case R_ECX: return &arch->vex.guest_ECX;
case R_EDX: return &arch->vex.guest_EDX;
case R_EBX: return &arch->vex.guest_EBX;
case R_ESP: return &arch->vex.guest_ESP;
case R_EBP: return &arch->vex.guest_EBP;
case R_ESI: return &arch->vex.guest_ESI;
case R_EDI: return &arch->vex.guest_EDI;
default: return NULL;
}
}
@ -597,22 +438,22 @@ Int VGA_(ptrace_setregs_from_BB)(Int pid)
{
struct vki_user_regs_struct regs;
regs.cs = VG_(baseBlock)[VGOFF_(m_cs)];
regs.ss = VG_(baseBlock)[VGOFF_(m_ss)];
regs.ds = VG_(baseBlock)[VGOFF_(m_ds)];
regs.es = VG_(baseBlock)[VGOFF_(m_es)];
regs.fs = VG_(baseBlock)[VGOFF_(m_fs)];
regs.gs = VG_(baseBlock)[VGOFF_(m_gs)];
regs.eax = VG_(baseBlock)[VGOFF_(m_eax)];
regs.ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
regs.ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
regs.edx = VG_(baseBlock)[VGOFF_(m_edx)];
regs.esi = VG_(baseBlock)[VGOFF_(m_esi)];
regs.edi = VG_(baseBlock)[VGOFF_(m_edi)];
regs.ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
regs.esp = VG_(baseBlock)[VGOFF_(m_esp)];
regs.eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
regs.eip = VG_(baseBlock)[VGOFF_(m_eip)];
regs.cs = BASEBLOCK_VEX->guest_CS;
regs.ss = BASEBLOCK_VEX->guest_SS;
regs.ds = BASEBLOCK_VEX->guest_DS;
regs.es = BASEBLOCK_VEX->guest_ES;
regs.fs = BASEBLOCK_VEX->guest_FS;
regs.gs = BASEBLOCK_VEX->guest_GS;
regs.eax = BASEBLOCK_VEX->guest_EAX;
regs.ebx = BASEBLOCK_VEX->guest_EBX;
regs.ecx = BASEBLOCK_VEX->guest_ECX;
regs.edx = BASEBLOCK_VEX->guest_EDX;
regs.esi = BASEBLOCK_VEX->guest_ESI;
regs.edi = BASEBLOCK_VEX->guest_EDI;
regs.ebp = BASEBLOCK_VEX->guest_EBP;
regs.esp = BASEBLOCK_VEX->guest_ESP;
regs.eflags = LibVEX_GuestX86_get_eflags(BASEBLOCK_VEX);
regs.eip = BASEBLOCK_VEX->guest_EIP;
return ptrace(PTRACE_SETREGS, pid, NULL, &regs);
}
@ -621,22 +462,22 @@ Int VGA_(ptrace_setregs_from_tst)(Int pid, arch_thread_t* arch)
{
struct vki_user_regs_struct regs;
regs.cs = arch->m_cs;
regs.ss = arch->m_ss;
regs.ds = arch->m_ds;
regs.es = arch->m_es;
regs.fs = arch->m_fs;
regs.gs = arch->m_gs;
regs.eax = arch->m_eax;
regs.ebx = arch->m_ebx;
regs.ecx = arch->m_ecx;
regs.edx = arch->m_edx;
regs.esi = arch->m_esi;
regs.edi = arch->m_edi;
regs.ebp = arch->m_ebp;
regs.esp = arch->m_esp;
regs.eflags = arch->m_eflags;
regs.eip = arch->m_eip;
regs.cs = arch->vex.guest_CS;
regs.ss = arch->vex.guest_SS;
regs.ds = arch->vex.guest_DS;
regs.es = arch->vex.guest_ES;
regs.fs = arch->vex.guest_FS;
regs.gs = arch->vex.guest_GS;
regs.eax = arch->vex.guest_EAX;
regs.ebx = arch->vex.guest_EBX;
regs.ecx = arch->vex.guest_ECX;
regs.edx = arch->vex.guest_EDX;
regs.esi = arch->vex.guest_ESI;
regs.edi = arch->vex.guest_EDI;
regs.ebp = arch->vex.guest_EBP;
regs.esp = arch->vex.guest_ESP;
regs.eflags = LibVEX_GuestX86_get_eflags(&arch->vex);
regs.eip = arch->vex.guest_EIP;
return ptrace(PTRACE_SETREGS, pid, NULL, &regs);
}

View File

@ -35,22 +35,19 @@
Basic types
------------------------------------------------------------------ */
#include "libvex_basictypes.h"
/* VEX defines Char, UChar, Short, UShort, Int, UInt, Long, ULong,
Addr32, Addr64, HWord, HChar, Bool, False and True. */
// By choosing the right types, we can get these right for 32-bit and 64-bit
// platforms without having to do any conditional compilation or anything.
//
// Size in bits on: 32-bit archs 64-bit archs
// ------------ ------------
typedef unsigned char UChar; // 8 8
typedef unsigned short UShort; // 16 16
typedef unsigned int UInt; // 32 32
typedef unsigned long UWord; // 32 64
typedef unsigned long long ULong; // 64 64
typedef signed char Char; // 8 8
typedef signed short Short; // 16 16
typedef signed int Int; // 32 32
typedef signed long Word; // 32 64
typedef signed long long Long; // 64 64
typedef UWord Addr; // 32 64
@ -59,9 +56,6 @@ typedef Word SSizeT; // 32 64
typedef Word OffT; // 32 64
typedef UChar Bool; // 8 8
#define False ((Bool)0)
#define True ((Bool)1)
/* ---------------------------------------------------------------------
Where to send bug reports to.

View File

@ -39,6 +39,9 @@
#include "tool_arch.h" // arch-specific tool stuff
#include "vki.h"
#include "libvex.h"
#include "libvex_ir.h"
/*====================================================================*/
/*=== Build options and table sizes. ===*/
/*====================================================================*/
@ -572,627 +575,10 @@ extern void VG_(cpuid) ( UInt eax,
UInt *eax_ret, UInt *ebx_ret,
UInt *ecx_ret, UInt *edx_ret );
/*====================================================================*/
/*=== UCode definition ===*/
/*====================================================================*/
/* Tags which describe what operands are. Must fit into 4 bits, which
they clearly do. */
typedef
enum { TempReg =0, /* virtual temp-reg */
ArchReg =1, /* simulated integer reg */
ArchRegS =2, /* simulated segment reg */
RealReg =3, /* real machine's real reg */
SpillNo =4, /* spill slot location */
Literal =5, /* literal; .lit32 field has actual value */
Lit16 =6, /* literal; .val[123] field has actual value */
NoValue =7 /* operand not in use */
}
Tag;
/* Invalid register numbers (can't be negative) */
#define INVALID_TEMPREG 999999999
#define INVALID_REALREG 999999999
/* Microinstruction opcodes. */
typedef
enum {
NOP, /* Null op */
LOCK, /* Indicate the existence of a LOCK prefix (functionally NOP) */
/* Moving values around */
GET, PUT, /* simulated register <--> TempReg */
GETF, PUTF, /* simulated %eflags <--> TempReg */
LOAD, STORE, /* memory <--> TempReg */
MOV, /* TempReg <--> TempReg */
CMOV, /* Used for cmpxchg and cmov */
/* Arithmetic/logical ops */
MUL, UMUL, /* Multiply */
ADD, ADC, SUB, SBB, /* Add/subtract (w/wo carry) */
AND, OR, XOR, NOT, /* Boolean ops */
SHL, SHR, SAR, ROL, ROR, RCL, RCR, /* Shift/rotate (w/wo carry) */
NEG, /* Negate */
INC, DEC, /* Increment/decrement */
BSWAP, /* Big-endian <--> little-endian */
CC2VAL, /* Condition code --> 0 or 1 */
WIDEN, /* Signed or unsigned widening */
/* Conditional or unconditional jump */
JMP,
/* FPU ops */
FPU, /* Doesn't touch memory */
FPU_R, FPU_W, /* Reads/writes memory */
/* ------------ MMX ops ------------ */
/* In this and the SSE encoding, bytes at higher addresses are
held in bits [7:0] in these 16-bit words. I guess this means
it is a big-endian encoding. */
/* 1 byte, no memrefs, no iregdefs, copy exactly to the
output. Held in val1[7:0]. */
MMX1,
/* 2 bytes, no memrefs, no iregdefs, copy exactly to the
output. Held in val1[15:0]. */
MMX2,
/* 3 bytes, no memrefs, no iregdefs, copy exactly to the
output. Held in val1[15:0] and val2[7:0]. */
MMX3,
/* 2 bytes, reads/writes mem. Insns of the form
bbbbbbbb:mod mmxreg r/m.
Held in val1[15:0], and mod and rm are to be replaced
at codegen time by a reference to the Temp/RealReg holding
the address. Arg2 holds this Temp/Real Reg.
Transfer is always at size 8.
*/
MMX2_MemRd,
MMX2_MemWr,
/* 3 bytes, reads/writes mem. Insns of the form
bbbbbbbb:mod mmxreg r/m:bbbbbbbb
Held in val1[15:0] and val2[7:0], and mod and rm are to be
replaced at codegen time by a reference to the Temp/RealReg
holding the address. Arg2 holds this Temp/Real Reg.
Transfer is always at size 8.
*/
MMX2a1_MemRd,
/* 2 bytes, reads/writes an integer ("E") register. Insns of the form
bbbbbbbb:11 mmxreg ireg.
Held in val1[15:0], and ireg is to be replaced
at codegen time by a reference to the relevant RealReg.
Transfer is always at size 4. Arg2 holds this Temp/Real Reg.
*/
MMX2_ERegRd,
MMX2_ERegWr,
/* ------------ SSE/SSE2 ops ------------ */
/* In the following:
a digit N indicates the next N bytes are to be copied exactly
to the output.
'a' indicates a mod-xmmreg-rm byte, where the mod-rm part is
to be replaced at codegen time to a Temp/RealReg holding the
address.
'e' indicates a byte of the form '11 xmmreg ireg', where ireg
is read or written, and is to be replaced at codegen time by
a reference to the relevant RealReg. 'e' because it's the E
reg in Intel encoding parlance.
'g' indicates a byte of the form '11 ireg xmmreg', where ireg
is read or written, and is to be replaced at codegen time by
a reference to the relevant RealReg. 'g' because it's called
G in Intel parlance. */
/* 3 bytes, no memrefs, no iregdefs, copy exactly to the
output. Held in val1[15:0] and val2[7:0]. */
SSE3,
/* 3 bytes, reads/writes mem. Insns of the form
bbbbbbbb:bbbbbbbb:mod mmxreg r/m.
Held in val1[15:0] and val2[7:0], and mod and rm are to be
replaced at codegen time by a reference to the Temp/RealReg
holding the address. Arg3 holds this Temp/Real Reg.
Transfer is usually, but not always, at size 16. */
SSE2a_MemRd,
SSE2a_MemWr,
/* 4 bytes, writes an integer register. Insns of the form
bbbbbbbb:bbbbbbbb:11 ireg bbb.
Held in val1[15:0] and val2[7:0], and ireg is to be replaced
at codegen time by a reference to the relevant RealReg.
Transfer is always at size 4. Arg3 holds this Temp/Real Reg.
*/
SSE2g_RegWr,
/* 5 bytes, writes an integer register. Insns of the form
bbbbbbbb:bbbbbbbb:11 ireg bbb :bbbbbbbb. Held in
val1[15:0] and val2[7:0] and lit32[7:0], and ireg is to be
replaced at codegen time by a reference to the relevant
RealReg. Transfer is always at size 4. Arg3 holds this
Temp/Real Reg.
*/
SSE2g1_RegWr,
/* 5 bytes, reads an integer register. Insns of the form
bbbbbbbb:bbbbbbbb:11 bbb ireg :bbbbbbbb. Held in
val1[15:0] and val2[7:0] and lit32[7:0], and ireg is to be
replaced at codegen time by a reference to the relevant
RealReg. Transfer is always at size 4. Arg3 holds this
Temp/Real Reg.
*/
SSE2e1_RegRd,
/* 4 bytes, no memrefs, no iregdefs, copy exactly to the
output. Held in val1[15:0] and val2[15:0]. */
SSE4,
/* 4 bytes, reads/writes mem. Insns of the form
bbbbbbbb:bbbbbbbb:bbbbbbbb:mod mmxreg r/m.
Held in val1[15:0] and val2[15:0], and mod and rm are to be
replaced at codegen time by a reference to the Temp/RealReg
holding the address. Arg3 holds this Temp/Real Reg.
Transfer is at stated size. */
SSE3a_MemRd,
SSE3a_MemWr,
/* 4 bytes, reads/writes mem. Insns of the form
bbbbbbbb:bbbbbbbb:mod mmxreg r/m:bbbbbbbb
Held in val1[15:0] and val2[15:0], and mod and rm are to be
replaced at codegen time by a reference to the Temp/RealReg
holding the address. Arg3 holds this Temp/Real Reg.
Transfer is at stated size. */
SSE2a1_MemRd,
/* 4 bytes, writes an integer register. Insns of the form
bbbbbbbb:bbbbbbbb:bbbbbbbb:11 ireg bbb.
Held in val1[15:0] and val2[15:0], and ireg is to be replaced
at codegen time by a reference to the relevant RealReg.
Transfer is always at size 4. Arg3 holds this Temp/Real Reg.
*/
SSE3g_RegWr,
/* 5 bytes, writes an integer register. Insns of the form
bbbbbbbb:bbbbbbbb:bbbbbbbb: 11 ireg bbb :bbbbbbbb. Held in
val1[15:0] and val2[15:0] and lit32[7:0], and ireg is to be
replaced at codegen time by a reference to the relevant
RealReg. Transfer is always at size 4. Arg3 holds this
Temp/Real Reg.
*/
SSE3g1_RegWr,
/* 4 bytes, reads an integer register. Insns of the form
bbbbbbbb:bbbbbbbb:bbbbbbbb:11 bbb ireg.
Held in val1[15:0] and val2[15:0], and ireg is to be replaced
at codegen time by a reference to the relevant RealReg.
Transfer is always at size 4. Arg3 holds this Temp/Real Reg.
*/
SSE3e_RegRd,
SSE3e_RegWr, /* variant that writes Ereg, not reads it */
/* 5 bytes, reads an integer register. Insns of the form
bbbbbbbb:bbbbbbbb:bbbbbbbb: 11 bbb ireg :bbbbbbbb. Held in
val1[15:0] and val2[15:0] and lit32[7:0], and ireg is to be
replaced at codegen time by a reference to the relevant
RealReg. Transfer is always at size 4. Arg3 holds this
Temp/Real Reg.
*/
SSE3e1_RegRd,
/* 4 bytes, reads memory, writes an integer register, but is
nevertheless an SSE insn. The insn is of the form
bbbbbbbb:bbbbbbbb:bbbbbbbb:mod ireg rm where mod indicates
memory (ie is not 11b) and ireg is the int reg written. The
first 4 bytes are held in lit32[31:0] since there is
insufficient space elsewhere. mod and rm are to be replaced
at codegen time by a reference to the Temp/RealReg holding
the address. Arg1 holds this Temp/RealReg. ireg is to be
replaced at codegen time by a reference to the relevant
RealReg in which the answer is to be written. Arg2 holds
this Temp/RealReg. Transfer to the destination reg is always
at size 4. However the memory read can be at sizes 4 or 8
and so this is what the sz field holds. Note that the 4th
byte of the instruction (the modrm byte) is redundant, but we
store it anyway so as to be consistent with all other SSE
uinstrs.
*/
SSE3ag_MemRd_RegWr,
/* 5 bytes, no memrefs, no iregdefs, copy exactly to the
output. Held in val1[15:0], val2[15:0] and val3[7:0]. */
SSE5,
/* 5 bytes, reads/writes mem. Insns of the form
bbbbbbbb:bbbbbbbb:bbbbbbbb:mod mmxreg r/m:bbbbbbbb
Held in val1[15:0], val2[15:0], lit32[7:0].
mod and rm are to be replaced at codegen time by a reference
to the Temp/RealReg holding the address. Arg3 holds this
Temp/Real Reg. Transfer is always at size 16. */
SSE3a1_MemRd,
/* ------------------------ */
/* Not strictly needed, but improve address calculation translations. */
LEA1, /* reg2 := const + reg1 */
LEA2, /* reg3 := const + reg1 + reg2 * 1,2,4 or 8 */
/* Hack for x86 REP insns. Jump to literal if TempReg/RealReg
is zero. */
JIFZ,
/* Advance the simulated %eip by some small (< 128) number. */
INCEIP,
/* Dealing with segment registers */
GETSEG, PUTSEG, /* simulated segment register <--> TempReg */
USESEG, /* (LDT/GDT index, virtual addr) --> linear addr */
/* Not for translating x86 calls -- only to call helpers */
CALLM_S, CALLM_E, /* Mark start/end of CALLM push/pop sequence */
PUSH, POP, CLEAR, /* Add/remove/zap args for helpers */
CALLM, /* Call assembly-code helper */
/* Not for translating x86 calls -- only to call C helper functions of
up to three arguments (or two if the functions has a return value).
Arguments and return value must be word-sized. More arguments can
be faked with global variables (eg. use VG_(lit_to_globvar)()).
Seven possibilities: 'arg[123]' show where args go, 'ret' shows
where return value goes (if present).
CCALL(-, -, - ) void f(void)
CCALL(arg1, -, - ) void f(UInt arg1)
CCALL(arg1, arg2, - ) void f(UInt arg1, UInt arg2)
CCALL(arg1, arg2, arg3) void f(UInt arg1, UInt arg2, UInt arg3)
CCALL(-, -, ret ) UInt f(UInt)
CCALL(arg1, -, ret ) UInt f(UInt arg1)
CCALL(arg1, arg2, ret ) UInt f(UInt arg1, UInt arg2) */
CCALL,
/* This opcode makes it easy for tools that extend UCode to do this to
avoid opcode overlap:
enum { EU_OP1 = DUMMY_FINAL_UOPCODE + 1, ... }
WARNING: Do not add new opcodes after this one! They can be added
before, though. */
DUMMY_FINAL_UOPCODE
}
Opcode;
/* Condition codes, using the Intel encoding. CondAlways is an extra. */
typedef
enum {
CondO = 0, /* overflow */
CondNO = 1, /* no overflow */
CondB = 2, /* below */
CondNB = 3, /* not below */
CondZ = 4, /* zero */
CondNZ = 5, /* not zero */
CondBE = 6, /* below or equal */
CondNBE = 7, /* not below or equal */
CondS = 8, /* negative */
CondNS = 9, /* not negative */
CondP = 10, /* parity even */
CondNP = 11, /* not parity even */
CondL = 12, /* jump less */
CondNL = 13, /* not less */
CondLE = 14, /* less or equal */
CondNLE = 15, /* not less or equal */
CondAlways = 16 /* Jump always */
}
Condcode;
/* Descriptions of additional properties of *unconditional* jumps. */
typedef
enum {
JmpBoring=0, /* boring unconditional jump */
JmpCall=1, /* jump due to an x86 call insn */
JmpRet=2, /* jump due to an x86 ret insn */
JmpSyscall=3, /* do a system call, then jump */
JmpClientReq=4,/* do a client request, then jump */
JmpYield=5 /* do a yield, then jump */
}
JmpKind;
/* Flags. User-level code can only read/write O(verflow), S(ign),
Z(ero), A(ux-carry), C(arry), P(arity), and may also write
D(irection). That's a total of 7 flags. A FlagSet is a bitset,
thusly:
76543210
DOSZACP
and bit 7 must always be zero since it is unused.
Note: these Flag? values are **not** the positions in the actual
%eflags register. */
typedef UChar FlagSet;
#define FlagD (1<<6)
#define FlagO (1<<5)
#define FlagS (1<<4)
#define FlagZ (1<<3)
#define FlagA (1<<2)
#define FlagC (1<<1)
#define FlagP (1<<0)
#define FlagsOSZACP (FlagO | FlagS | FlagZ | FlagA | FlagC | FlagP)
#define FlagsOSZAP (FlagO | FlagS | FlagZ | FlagA | FlagP)
#define FlagsOSZCP (FlagO | FlagS | FlagZ | FlagC | FlagP)
#define FlagsOSACP (FlagO | FlagS | FlagA | FlagC | FlagP)
#define FlagsSZACP ( FlagS | FlagZ | FlagA | FlagC | FlagP)
#define FlagsSZAP ( FlagS | FlagZ | FlagA | FlagP)
#define FlagsSZP ( FlagS | FlagZ | FlagP)
#define FlagsZCP ( FlagZ | FlagC | FlagP)
#define FlagsOC (FlagO | FlagC )
#define FlagsAC ( FlagA | FlagC )
#define FlagsALL (FlagsOSZACP | FlagD)
#define FlagsEmpty (FlagSet)0
/* flag positions in eflags */
#define EFlagC (1 << 0) /* carry */
#define EFlagP (1 << 2) /* parity */
#define EFlagA (1 << 4) /* aux carry */
#define EFlagZ (1 << 6) /* zero */
#define EFlagS (1 << 7) /* sign */
#define EFlagD (1 << 10) /* direction */
#define EFlagO (1 << 11) /* overflow */
#define EFlagID (1 << 21) /* changable if CPUID exists */
/* Liveness of general purpose registers, useful for code generation.
Reg rank order 0..N-1 corresponds to bits 0..N-1, ie. first
reg's liveness in bit 0, last reg's in bit N-1. Note that
these rankings don't match the Intel register ordering. */
typedef UInt RRegSet;
#define ALL_RREGS_DEAD 0 /* 0000...00b */
#define ALL_RREGS_LIVE ((1 << VG_MAX_REALREGS)-1) /* 0011...11b */
#define UNIT_RREGSET(rank) (1 << (rank))
#define IS_RREG_LIVE(rank,rregs_live) (rregs_live & UNIT_RREGSET(rank))
#define SET_RREG_LIVENESS(rank,rregs_live,b) \
do { RRegSet unit = UNIT_RREGSET(rank); \
if (b) rregs_live |= unit; \
else rregs_live &= ~unit; \
} while(0)
/* A Micro (u)-instruction. */
typedef
struct {
/* word 1 */
UInt lit32; /* 32-bit literal */
/* word 2 */
UShort val1; /* first operand */
UShort val2; /* second operand */
/* word 3 */
UShort val3; /* third operand */
UChar opcode; /* opcode */
UShort size; /* data transfer size */
/* word 4 */
FlagSet flags_r; /* :: FlagSet */
FlagSet flags_w; /* :: FlagSet */
UChar tag1:4; /* first operand tag */
UChar tag2:4; /* second operand tag */
UChar tag3:4; /* third operand tag */
UChar extra4b:4; /* Spare field, used by WIDEN for src
-size, and by LEA2 for scale (1,2,4 or 8),
and by JMPs for original x86 instr size */
/* word 5 */
UChar cond; /* condition, for jumps */
Bool signed_widen:1; /* signed or unsigned WIDEN ? */
JmpKind jmpkind:3; /* additional properties of unconditional JMP */
/* Additional properties for UInstrs that call C functions:
- CCALL
- PUT (when %ESP is the target)
- possibly tool-specific UInstrs
*/
UChar argc:2; /* Number of args, max 3 */
UChar regparms_n:2; /* Number of args passed in registers */
Bool has_ret_val:1; /* Function has return value? */
/* RealReg liveness; only sensical after reg alloc and liveness
analysis done. This info is a little bit arch-specific --
VG_MAX_REALREGS can vary on different architectures. Note that
to use this information requires converting between register ranks
and the Intel register numbers, using VG_(realreg_to_rank)()
and/or VG_(rank_to_realreg)() */
RRegSet regs_live_after:VG_MAX_REALREGS;
}
UInstr;
typedef
struct _UCodeBlock
UCodeBlock;
extern Int VG_(get_num_instrs) (UCodeBlock* cb);
extern Int VG_(get_num_temps) (UCodeBlock* cb);
extern UInstr* VG_(get_instr) (UCodeBlock* cb, Int i);
extern UInstr* VG_(get_last_instr) (UCodeBlock* cb);
/*====================================================================*/
/*=== Instrumenting UCode ===*/
/*====================================================================*/
/* Maximum number of registers read or written by a single UInstruction. */
#define VG_MAX_REGS_USED 3
/* Find what this instruction does to its regs, useful for
analysis/optimisation passes. `tag' indicates whether we're considering
TempRegs (pre-reg-alloc) or RealRegs (post-reg-alloc). `regs' is filled
with the affected register numbers, `isWrites' parallels it and indicates
if the reg is read or written. If a reg is read and written, it will
appear twice in `regs'. `regs' and `isWrites' must be able to fit
VG_MAX_REGS_USED elements. */
extern Int VG_(get_reg_usage) ( UInstr* u, Tag tag, Int* regs, Bool* isWrites );
/* Used to register helper functions to be called from generated code. A
limited number of compact helpers can be registered; the code generated
to call them is slightly shorter -- so register the mostly frequently
called helpers as compact. */
extern void VG_(register_compact_helper) ( Addr a );
extern void VG_(register_noncompact_helper) ( Addr a );
/* ------------------------------------------------------------------ */
/* Virtual register allocation */
/* Get a new virtual register */
extern Int VG_(get_new_temp) ( UCodeBlock* cb );
/* Get a new virtual shadow register */
extern Int VG_(get_new_shadow) ( UCodeBlock* cb );
/* Get a virtual register's corresponding virtual shadow register */
#define SHADOW(tempreg) ((tempreg)+1)
/* ------------------------------------------------------------------ */
/* Low-level UInstr builders */
extern void VG_(new_NOP) ( UInstr* u );
extern void VG_(new_UInstr0) ( UCodeBlock* cb, Opcode opcode, Int sz );
extern void VG_(new_UInstr1) ( UCodeBlock* cb, Opcode opcode, Int sz,
Tag tag1, UInt val1 );
extern void VG_(new_UInstr2) ( UCodeBlock* cb, Opcode opcode, Int sz,
Tag tag1, UInt val1,
Tag tag2, UInt val2 );
extern void VG_(new_UInstr3) ( UCodeBlock* cb, Opcode opcode, Int sz,
Tag tag1, UInt val1,
Tag tag2, UInt val2,
Tag tag3, UInt val3 );
/* Set read/write/undefined flags. Undefined flags are treaten as written,
but it's worth keeping them logically distinct. */
extern void VG_(set_flag_fields) ( UCodeBlock* cb, FlagSet fr, FlagSet fw,
FlagSet fu);
extern void VG_(set_lit_field) ( UCodeBlock* cb, UInt lit32 );
extern void VG_(set_ccall_fields) ( UCodeBlock* cb, Addr fn, UChar argc,
UChar regparms_n, Bool has_ret_val );
extern void VG_(set_cond_field) ( UCodeBlock* cb, Condcode code );
extern void VG_(set_widen_fields) ( UCodeBlock* cb, UInt szs, Bool is_signed );
extern void VG_(copy_UInstr) ( UCodeBlock* cb, UInstr* instr );
extern Bool VG_(any_flag_use)( UInstr* u );
/* Macro versions of the above; just shorter to type. */
#define uInstr0 VG_(new_UInstr0)
#define uInstr1 VG_(new_UInstr1)
#define uInstr2 VG_(new_UInstr2)
#define uInstr3 VG_(new_UInstr3)
#define uLiteral VG_(set_lit_field)
#define uCCall VG_(set_ccall_fields)
#define uCond VG_(set_cond_field)
#define uWiden VG_(set_widen_fields)
#define uFlagsRWU VG_(set_flag_fields)
#define newTemp VG_(get_new_temp)
#define newShadow VG_(get_new_shadow)
/* Refer to `the last instruction stuffed in' (can be lvalue). */
#define LAST_UINSTR(cb) (cb)->instrs[(cb)->used-1]
/* ------------------------------------------------------------------ */
/* Higher-level UInstr sequence builders */
extern void VG_(lit_to_reg) ( UCodeBlock* cb, UInt lit, UInt t );
extern UInt VG_(lit_to_newreg) ( UCodeBlock* cb, UInt lit );
#define CB_F UCodeBlock* cb, Addr f
#define EV extern void
#define RPn UInt regparms_n
/* Various CCALL builders, of the form "ccall_<args>_<retval>". 'R'
represents a TempReg, 'L' represents a literal, '0' represents nothing
(ie. no args, or no return value). */
EV VG_(ccall_0_0) ( CB_F );
EV VG_(ccall_R_0) ( CB_F, UInt R1, RPn );
EV VG_(ccall_L_0) ( CB_F, UInt L1, RPn );
EV VG_(ccall_R_R) ( CB_F, UInt R1, UInt R_ret, RPn );
EV VG_(ccall_L_R) ( CB_F, UInt L1, UInt R_ret, RPn );
EV VG_(ccall_RR_0) ( CB_F, UInt R1, UInt R2, RPn );
EV VG_(ccall_RL_0) ( CB_F, UInt R1, UInt RL, RPn );
EV VG_(ccall_LR_0) ( CB_F, UInt L1, UInt R2, RPn );
EV VG_(ccall_LL_0) ( CB_F, UInt L1, UInt L2, RPn );
EV VG_(ccall_RR_R) ( CB_F, UInt R1, UInt R2, UInt R_ret, RPn );
EV VG_(ccall_RL_R) ( CB_F, UInt R1, UInt L2, UInt R_ret, RPn );
EV VG_(ccall_LR_R) ( CB_F, UInt L1, UInt R2, UInt R_ret, RPn );
EV VG_(ccall_LL_R) ( CB_F, UInt L1, UInt L2, UInt R_ret, RPn );
EV VG_(ccall_RRR_0) ( CB_F, UInt R1, UInt R2, UInt R3, RPn );
EV VG_(ccall_RLL_0) ( CB_F, UInt R1, UInt L2, UInt L3, RPn );
EV VG_(ccall_LRR_0) ( CB_F, UInt L1, UInt R2, UInt R3, RPn );
EV VG_(ccall_LLR_0) ( CB_F, UInt L1, UInt L2, UInt R3, RPn );
EV VG_(ccall_LLL_0) ( CB_F, UInt L1, UInt L2, UInt L3, RPn );
#undef CB_F
#undef EV
#undef RPn
/* One way around the 3-arg C function limit is to pass args via global
* variables... ugly, but it works. */
void VG_(reg_to_globvar)(UCodeBlock* cb, UInt t, UInt* globvar_ptr);
void VG_(lit_to_globvar)(UCodeBlock* cb, UInt lit, UInt* globvar_ptr);
/* Old, deprecated versions of some of the helpers (DO NOT USE) */
extern void VG_(call_helper_0_0) ( UCodeBlock* cb, Addr f);
extern void VG_(call_helper_1_0) ( UCodeBlock* cb, Addr f, UInt arg1,
UInt regparms_n);
extern void VG_(call_helper_2_0) ( UCodeBlock* cb, Addr f, UInt arg1, UInt arg2,
UInt regparms_n);
extern void VG_(set_global_var) ( UCodeBlock* cb, Addr globvar_ptr, UInt val);
extern void VG_(set_global_var_tempreg) ( UCodeBlock* cb, Addr globvar_ptr,
UInt t_val);
/* ------------------------------------------------------------------ */
/* Allocating/freeing basic blocks of UCode */
extern UCodeBlock* VG_(setup_UCodeBlock) ( UCodeBlock* cb );
extern void VG_(free_UCodeBlock) ( UCodeBlock* cb );
/* ------------------------------------------------------------------ */
/* UCode pretty/ugly printing. Probably only useful to call from a tool
if VG_(needs).extended_UCode == True. */
/* When True, all generated code is/should be printed. */
extern Bool VG_(print_codegen);
/* Pretty/ugly printing functions */
extern void VG_(pp_UCodeBlock) ( UCodeBlock* cb, Char* title );
extern void VG_(pp_UInstr) ( Int instrNo, UInstr* u );
extern void VG_(pp_UInstr_regs) ( Int instrNo, UInstr* u );
extern void VG_(up_UInstr) ( Int instrNo, UInstr* u );
extern Char* VG_(name_UOpcode) ( Bool upper, Opcode opc );
extern Char* VG_(name_UCondcode) ( Condcode cond );
extern void VG_(pp_UOperand) ( UInstr* u, Int operandNo,
Int sz, Bool parens );
/* ------------------------------------------------------------------ */
/* Accessing archregs and their shadows */
/* ToDo: is this still needed ? */
extern UInt VG_(get_archreg) ( UInt archreg );
extern UInt VG_(get_thread_archreg) ( ThreadId tid, UInt archreg );
@ -1205,12 +591,7 @@ extern UInt VG_(get_thread_shadow_archreg) ( ThreadId tid, UInt archreg );
extern void VG_(set_thread_shadow_archreg) ( ThreadId tid, UInt archreg,
UInt val );
/*====================================================================*/
/*=== Generating x86 code from UCode ===*/
/*====================================================================*/
/* All this only necessary for tools with VG_(needs).extends_UCode == True. */
/* ToDo: FIX */
/* This is the Intel register encoding -- integer regs. */
#define R_EAX 0
#define R_ECX 1
@ -1221,133 +602,6 @@ extern void VG_(set_thread_shadow_archreg) ( ThreadId tid, UInt archreg,
#define R_ESI 6
#define R_EDI 7
#define R_AL (0+R_EAX)
#define R_CL (0+R_ECX)
#define R_DL (0+R_EDX)
#define R_BL (0+R_EBX)
#define R_AH (4+R_EAX)
#define R_CH (4+R_ECX)
#define R_DH (4+R_EDX)
#define R_BH (4+R_EBX)
/* This is the Intel register encoding -- segment regs. */
#define R_ES 0
#define R_CS 1
#define R_SS 2
#define R_DS 3
#define R_FS 4
#define R_GS 5
/* For pretty printing x86 code */
extern const Char* VG_(name_of_mmx_gran) ( UChar gran );
extern const Char* VG_(name_of_mmx_reg) ( Int mmxreg );
extern const Char* VG_(name_of_seg_reg) ( Int sreg );
extern const Char* VG_(name_of_int_reg) ( Int size, Int reg );
extern const Char VG_(name_of_int_size) ( Int size );
/* Shorter macros for convenience */
#define nameIReg VG_(name_of_int_reg)
#define nameISize VG_(name_of_int_size)
#define nameSReg VG_(name_of_seg_reg)
#define nameMMXReg VG_(name_of_mmx_reg)
#define nameMMXGran VG_(name_of_mmx_gran)
#define nameXMMReg VG_(name_of_xmm_reg)
/* Randomly useful things */
extern UInt VG_(extend_s_8to32) ( UInt x );
/* Code emitters */
extern void VG_(emitB) ( UInt b );
extern void VG_(emitW) ( UInt w );
extern void VG_(emitL) ( UInt l );
extern void VG_(new_emit) ( Bool upd_cc, FlagSet uses_flags, FlagSet sets_flags );
/* Finding offsets */
extern Int VG_(helper_offset) ( Addr a );
extern Int VG_(shadow_reg_offset) ( Int arch );
extern Int VG_(shadow_flags_offset) ( void );
/* Convert reg ranks <-> Intel register ordering, for using register
liveness information. */
extern Int VG_(realreg_to_rank) ( Int realreg );
extern Int VG_(rank_to_realreg) ( Int rank );
/* Call a subroutine. Does no argument passing, stack manipulations, etc. */
extern void VG_(synth_call) ( Bool ensure_shortform, Int word_offset,
Bool upd_cc, FlagSet use_flags, FlagSet set_flags );
/* For calling C functions -- saves caller save regs, pushes args, calls,
clears the stack, restores caller save regs. `fn' must be registered in
the baseBlock first. Acceptable tags are RealReg and Literal. Optimises
things, eg. by not preserving non-live caller-save registers.
WARNING: a UInstr should *not* be translated with synth_ccall() followed
by some other x86 assembly code; this will invalidate the results of
vg_realreg_liveness_analysis() and everything will fall over. */
extern void VG_(synth_ccall) ( Addr fn, Int argc, Int regparms_n, UInt argv[],
Tag tagv[], Int ret_reg,
RRegSet regs_live_before,
RRegSet regs_live_after );
/* Addressing modes */
extern void VG_(emit_amode_offregmem_reg)( Int off, Int regmem, Int reg );
extern void VG_(emit_amode_ereg_greg) ( Int e_reg, Int g_reg );
/* v-size (4, or 2 with OSO) insn emitters */
extern void VG_(emit_movv_offregmem_reg) ( Int sz, Int off, Int areg, Int reg );
extern void VG_(emit_movv_reg_offregmem) ( Int sz, Int reg, Int off, Int areg );
extern void VG_(emit_movv_reg_reg) ( Int sz, Int reg1, Int reg2 );
extern void VG_(emit_nonshiftopv_lit_reg)( Bool upd_cc, Int sz, Opcode opc, UInt lit,
Int reg );
extern void VG_(emit_shiftopv_lit_reg) ( Bool upd_cc, Int sz, Opcode opc, UInt lit,
Int reg );
extern void VG_(emit_nonshiftopv_reg_reg)( Bool upd_cc, Int sz, Opcode opc,
Int reg1, Int reg2 );
extern void VG_(emit_movv_lit_reg) ( Int sz, UInt lit, Int reg );
extern void VG_(emit_unaryopv_reg) ( Bool upd_cc, Int sz, Opcode opc, Int reg );
extern void VG_(emit_pushv_reg) ( Int sz, Int reg );
extern void VG_(emit_popv_reg) ( Int sz, Int reg );
extern void VG_(emit_pushl_lit32) ( UInt int32 );
extern void VG_(emit_pushl_lit8) ( Int lit8 );
extern void VG_(emit_cmpl_zero_reg) ( Bool upd_cc, Int reg );
extern void VG_(emit_swapl_reg_EAX) ( Int reg );
extern void VG_(emit_movv_lit_offregmem) ( Int sz, UInt lit, Int off,
Int memreg );
/* b-size (1 byte) instruction emitters */
extern void VG_(emit_movb_lit_offregmem) ( UInt lit, Int off, Int memreg );
extern void VG_(emit_movb_reg_offregmem) ( Int reg, Int off, Int areg );
extern void VG_(emit_unaryopb_reg) ( Bool upd_cc, Opcode opc, Int reg );
extern void VG_(emit_testb_lit_reg) ( Bool upd_cc, UInt lit, Int reg );
/* zero-extended load emitters */
extern void VG_(emit_movzbl_offregmem_reg) ( Bool bounds, Int off, Int regmem, Int reg );
extern void VG_(emit_movzwl_offregmem_reg) ( Bool bounds, Int off, Int areg, Int reg );
extern void VG_(emit_movzwl_regmem_reg) ( Bool bounds, Int reg1, Int reg2 );
/* misc instruction emitters */
extern void VG_(emit_call_reg) ( Int reg );
extern void VG_(emit_add_lit_to_esp) ( Int lit );
extern void VG_(emit_pushal) ( void );
extern void VG_(emit_popal) ( void );
extern void VG_(emit_AMD_prefetch_reg) ( Int reg );
/* jump emitters */
extern void VG_(init_target) ( Int *tgt );
extern void VG_(target_back) ( Int *tgt );
extern void VG_(target_forward) ( Int *tgt );
extern void VG_(emit_target_delta) ( Int *tgt );
typedef enum {
JP_NONE, /* no prediction */
JP_TAKEN, /* predict taken */
JP_NOT_TAKEN, /* predict not taken */
} JumpPred;
extern void VG_(emit_jcondshort_delta) ( Bool simd_cc, Condcode cond, Int delta, JumpPred );
extern void VG_(emit_jcondshort_target)( Bool simd_cc, Condcode cond, Int *tgt, JumpPred );
/*====================================================================*/

View File

@ -40,98 +40,9 @@
#define REGPARM(n) __attribute__((regparm(n)))
// Accessors for the baseBlock
#define R_STACK_PTR R_ESP
#define R_FRAME_PTR R_EBP
#define FIRST_ARCH_REG R_EAX
#define LAST_ARCH_REG R_EDI
#define N_ARCH_REGS 8
#define MIN_INSTR_SIZE 1
#define MAX_INSTR_SIZE 16
/* Total number of integer registers available for allocation -- all of
them except %esp (points to Valgrind's stack) and %ebp (permanently
points at the baseBlock).
If you increase this you'll have to also change at least these:
- VG_(rank_to_realreg)()
- VG_(realreg_to_rank)()
- ppRegsLiveness()
- the RegsLive type (maybe -- RegsLive type must have more than
VG_MAX_REALREGS bits)
You can decrease it, and performance will drop because more spills will
occur. If you decrease it too much, everything will fall over.
Do not change this unless you really know what you are doing! */
#define VG_MAX_REALREGS 6
/*====================================================================*/
/*=== Instrumenting UCode ===*/
/*====================================================================*/
/* ------------------------------------------------------------------ */
/* Offsets of addresses of helper functions. A "helper" function is one
which is called from generated code via CALLM. */
// XXX: eventually these should be private to the x86 part, not visible to
// tools, and the IR should provide a better way than this to see what the
// original instruction was.
extern Int VGOFF_(helper_idiv_64_32);
extern Int VGOFF_(helper_div_64_32);
extern Int VGOFF_(helper_idiv_32_16);
extern Int VGOFF_(helper_div_32_16);
extern Int VGOFF_(helper_idiv_16_8);
extern Int VGOFF_(helper_div_16_8);
extern Int VGOFF_(helper_imul_32_64);
extern Int VGOFF_(helper_mul_32_64);
extern Int VGOFF_(helper_imul_16_32);
extern Int VGOFF_(helper_mul_16_32);
extern Int VGOFF_(helper_imul_8_16);
extern Int VGOFF_(helper_mul_8_16);
extern Int VGOFF_(helper_CLD);
extern Int VGOFF_(helper_STD);
extern Int VGOFF_(helper_get_dirflag);
extern Int VGOFF_(helper_CLC);
extern Int VGOFF_(helper_STC);
extern Int VGOFF_(helper_CMC);
extern Int VGOFF_(helper_shldl);
extern Int VGOFF_(helper_shldw);
extern Int VGOFF_(helper_shrdl);
extern Int VGOFF_(helper_shrdw);
extern Int VGOFF_(helper_RDTSC);
extern Int VGOFF_(helper_CPUID);
extern Int VGOFF_(helper_IN);
extern Int VGOFF_(helper_OUT);
extern Int VGOFF_(helper_bsfw);
extern Int VGOFF_(helper_bsfl);
extern Int VGOFF_(helper_bsrw);
extern Int VGOFF_(helper_bsrl);
extern Int VGOFF_(helper_fstsw_AX);
extern Int VGOFF_(helper_SAHF);
extern Int VGOFF_(helper_LAHF);
extern Int VGOFF_(helper_DAS);
extern Int VGOFF_(helper_DAA);
extern Int VGOFF_(helper_AAS);
extern Int VGOFF_(helper_AAA);
extern Int VGOFF_(helper_AAD);
extern Int VGOFF_(helper_AAM);
extern Int VGOFF_(helper_cmpxchg8b);
#endif // __X86_TOOL_ARCH_H