Clear a few more AMD64 start-up hurdles:

- implemented VG_(clone)()
- implemented PLATFORM_DO_MMAP()
- implemented VG_(init_thread1state)()  [will need to be updated as the
  Vex AMD64 guest state is updated]
- implemented OYNK, because it's useful

Also a couple of general cleaning up things.



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@3165
This commit is contained in:
Nicholas Nethercote
2004-11-30 16:04:58 +00:00
parent 61c97ea72d
commit ad1bf0073e
6 changed files with 87 additions and 30 deletions

View File

@@ -95,7 +95,8 @@ extern Addr VG_(do_useseg) ( UInt seg_selector, Addr virtual_addr );
------------------------------------------------------------------ */
#define PLATFORM_DO_MMAP(ret, start, length, prot, flags, fd, offset) { \
I_die_here; \
ret = VG_(do_syscall6)(__NR_mmap, (UWord)(start), (length), \
(prot), (flags), (fd), (offset)); \
} while (0)
#define PLATFORM_GET_MMAP_ARGS(tst, a1, a2, a3, a4, a5, a6) do {\

View File

@@ -65,12 +65,59 @@ VG_(do_syscall):
ret
/*
Perform a clone system call. clone is strange because it has
fork()-like return-twice semantics, so it needs special
handling here.
# XXX: must reinstate comments also -- see x86-linux/syscall.S
Upon entry, we have:
int (*fn)(void*) in %rdi
void* child_stack in %rsi
int flags in %rdx
void* arg in %rcx
pid_t* child_tid in %r8
pid_t* parent_tid in %r9
*/
.globl VG_(clone)
VG_(clone):
ud2
// set up child stack, temporarily preserving fn and arg
subq $16, %rsi // make space on stack
movq %rcx, 8(%rsi) // save arg
movq %rdi, 0(%rsi) // save fn
// setup syscall
movl $__NR_clone, %eax // syscall number
movq %rdx, %rdi // syscall arg1: flags
// %rsi already setup // syscall arg2: child_stack
movq %r9, %rdx // syscall arg3: parent_tid
movq %r8, %rcx // syscall arg4: child_tid
syscall // clone()
testq %rax, %rax // child if retval == 0
jnz 1f
// CHILD - call thread function
pop %rax // pop fn
pop %rdi // pop fn arg1: arg
call *%eax // call fn
// exit with result
movq %rax, %rdi // arg1: return value from fn
movl $__NR_exit, %eax
syscall
// Exit returned?!
ud2
1: // PARENT or ERROR
ret
# XXX: must reinstate comments also -- see x86-linux/syscall.S
.globl VG_(sigreturn)
VG_(sigreturn):

View File

@@ -30,6 +30,15 @@
#ifndef __AMD64_CORE_ARCH_ASM_H
#define __AMD64_CORE_ARCH_ASM_H
// Print a constant from asm code.
#define OYNK(nnn) push %r8 ; push %r9 ; push %r10; push %r11; \
push %rax; push %rbx; push %rcx; push %rdx; \
push %rsi; push %rdi; \
movl $nnn, %edi; call VG_(oynk); \
pop %rdi; pop %rsi; pop %rdx; pop %rcx; \
pop %rbx; pop %rax; pop %r11; pop %r10; \
pop %r9 ; pop %r8
#endif // __AMD64_CORE_ARCH_ASM_H
/*--------------------------------------------------------------------*/

View File

@@ -42,27 +42,29 @@
thread), initialise the VEX guest state, and copy in essential
starting values.
*/
void VGA_(init_thread1state) ( Addr client_eip,
Addr esp_at_startup,
void VGA_(init_thread1state) ( Addr client_rip,
Addr rsp_at_startup,
/*MOD*/ ThreadArchState* arch )
{
I_die_here;
#if 0
vg_assert(0 == sizeof(VexGuestX86State) % 8);
vg_assert(0 == sizeof(VexGuestAMD64State) % 8);
/* Zero out the initial state, and set up the simulated FPU in a
sane way. */
LibVEX_GuestX86_initialise(&arch->vex);
LibVEX_GuestAMD64_initialise(&arch->vex);
/* Zero out the shadow area. */
VG_(memset)(&arch->vex_shadow, 0, sizeof(VexGuestX86State));
VG_(memset)(&arch->vex_shadow, 0, sizeof(VexGuestAMD64State));
/* Put essential stuff into the new state. */
arch->vex.guest_RSP = rsp_at_startup;
arch->vex.guest_RIP = client_rip;
// XXX: something will probably have to be done with the segment
// registers, once they're added to Vex-AMD64.
#if 0
/* initialise %cs, %ds and %ss to point at the operating systems
default code, data and stack segments */
arch->vex.guest_ESP = esp_at_startup;
arch->vex.guest_EIP = client_eip;
asm volatile("movw %%cs, %0"
:
: "m" (arch->vex.guest_CS));
@@ -72,21 +74,6 @@ void VGA_(init_thread1state) ( Addr client_eip,
asm volatile("movw %%ss, %0"
:
: "m" (arch->vex.guest_SS));
VG_TRACK( post_reg_write, Vg_CoreStartup, /*tid*/1, /*offset*/0,
sizeof(VexGuestArchState));
/* I assume that if we have SSE2 we also have SSE */
VG_(have_ssestate) = False;
// VG_(cpu_has_feature)(VG_X86_FEAT_FXSR) &&
// VG_(cpu_has_feature)(VG_X86_FEAT_SSE);
if (0) {
if (VG_(have_ssestate))
VG_(printf)("Looks like a SSE-capable CPU\n");
else
VG_(printf)("Looks like a MMX-only CPU\n");
}
#endif
}

View File

@@ -319,6 +319,12 @@ Addr VG_(get_stack_pointer) ( ThreadId tid )
return STACK_PTR( VG_(threads)[tid].arch );
}
/* Debugging thing .. can be called from assembly with OYNK macro. */
void VG_(oynk) ( Int n )
{
OINK(n);
}
/* Initialize the PID and PGRP of scheduler LWP; this is also called
in any new children after fork. */
static void newpid(ThreadId unused)
@@ -2605,6 +2611,12 @@ int main(int argc, char **argv)
// setup_scheduler() [for the rest of state 1 stuff]
//--------------------------------------------------------------
VGA_(init_thread1state)(client_eip, sp_at_startup, &VG_(threads)[1].arch );
// Tell the tool that we just wrote to the registers.
VG_TRACK( post_reg_write, Vg_CoreStartup, /*tid*/1, /*offset*/0,
sizeof(VexGuestArchState));
// Record the instr ptr offset, for use by asm code.
VG_(instr_ptr_offset) = offsetof(VexGuestArchState, ARCH_INSTR_PTR);
//--------------------------------------------------------------

View File

@@ -56,11 +56,12 @@ void VGA_(init_thread1state) ( Addr client_eip,
VG_(memset)(&arch->vex_shadow, 0, sizeof(VexGuestX86State));
/* Put essential stuff into the new state. */
/* initialise %cs, %ds and %ss to point at the operating systems
default code, data and stack segments */
arch->vex.guest_ESP = esp_at_startup;
arch->vex.guest_EIP = client_eip;
/* initialise %cs, %ds and %ss to point at the operating systems
default code, data and stack segments */
asm volatile("movw %%cs, %0"
:
: "m" (arch->vex.guest_CS));