diff --git a/addrcheck/ac_main.c b/addrcheck/ac_main.c index 984cc15a0..e026c48af 100644 --- a/addrcheck/ac_main.c +++ b/addrcheck/ac_main.c @@ -129,27 +129,29 @@ static Bool eq_AcAddrInfo ( VgRes res, AcAddrInfo* ai1, AcAddrInfo* ai2 ) are otherwise the same, the faulting addrs and associated rwoffsets are allowed to be different. */ -Bool SK_(eq_SkinError) ( VgRes res, - SkinError* e1, SkinError* e2 ) +Bool SK_(eq_SkinError) ( VgRes res, Error* e1, Error* e2 ) { - AddrCheckError* e1_extra = e1->extra; - AddrCheckError* e2_extra = e2->extra; + AddrCheckError* e1_extra = VG_(get_error_extra)(e1); + AddrCheckError* e2_extra = VG_(get_error_extra)(e2); - switch (e1->ekind) { - case CoreMemErr: + switch (VG_(get_error_kind)(e1)) { + case CoreMemErr: { + Char *e1s, *e2s; if (e1_extra->isWrite != e2_extra->isWrite) return False; - if (e2->ekind != CoreMemErr) return False; - if (e1->string == e2->string) return True; - if (0 == VG_(strcmp)(e1->string, e2->string)) return True; + if (VG_(get_error_kind)(e2) != CoreMemErr) return False; + e1s = VG_(get_error_string)(e1); + e2s = VG_(get_error_string)(e2); + if (e1s == e2s) return True; + if (0 == VG_(strcmp)(e1s, e2s)) return True; return False; + } case UserErr: case ParamErr: - if (e1_extra->isWrite != e2_extra->isWrite) - return False; - if (e1->ekind == ParamErr - && 0 != VG_(strcmp)(e1->string, e2->string)) - return False; + if (e1_extra->isWrite != e2_extra->isWrite) return False; + if (VG_(get_error_kind)(e1) == ParamErr + && 0 != VG_(strcmp)(VG_(get_error_string)(e1), + VG_(get_error_string)(e2))) return False; return True; case FreeErr: @@ -174,7 +176,8 @@ Bool SK_(eq_SkinError) ( VgRes res, return True; default: - VG_(printf)("Error:\n unknown AddrCheck error code %d\n", e1->ekind); + VG_(printf)("Error:\n unknown AddrCheck error code %d\n", + VG_(get_error_kind)(e1)); VG_(skin_panic)("unknown error code in SK_(eq_SkinError)"); } } @@ -229,18 +232,18 @@ static void pp_AcAddrInfo ( Addr a, AcAddrInfo* ai ) } } -void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) ) +void SK_(pp_SkinError) ( Error* err, void (*pp_ExeContext)(void) ) { - AddrCheckError* err_extra = err->extra; + AddrCheckError* err_extra = VG_(get_error_extra)(err); - switch (err->ekind) { + switch (VG_(get_error_kind)(err)) { case CoreMemErr: if (err_extra->isWrite) { VG_(message)(Vg_UserMsg, - "%s contains unaddressable byte(s)", err->string ); + "%s contains unaddressable byte(s)", VG_(get_error_string)(err)); } else { VG_(message)(Vg_UserMsg, - "%s contains unaddressable byte(s)", err->string ); + "%s contains unaddressable byte(s)", VG_(get_error_string)(err)); } pp_ExeContext(); break; @@ -261,33 +264,33 @@ void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) ) VG_(skin_panic)("pp_SkinError(axskind)"); } pp_ExeContext(); - pp_AcAddrInfo(err->addr, &err_extra->addrinfo); + pp_AcAddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo); break; case FreeErr: VG_(message)(Vg_UserMsg,"Invalid free() / delete / delete[]"); /* fall through */ case FreeMismatchErr: - if (err->ekind == FreeMismatchErr) + if (VG_(get_error_kind)(err) == FreeMismatchErr) VG_(message)(Vg_UserMsg, "Mismatched free() / delete / delete []"); pp_ExeContext(); - pp_AcAddrInfo(err->addr, &err_extra->addrinfo); + pp_AcAddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo); break; case ParamErr: if (err_extra->isWrite) { VG_(message)(Vg_UserMsg, - "Syscall param %s contains unaddressable byte(s)", - err->string ); + "Syscall param %s contains unaddressable byte(s)", + VG_(get_error_string)(err) ); } else { VG_(message)(Vg_UserMsg, "Syscall param %s contains uninitialised or " "unaddressable byte(s)", - err->string); + VG_(get_error_string)(err)); } pp_ExeContext(); - pp_AcAddrInfo(err->addr, &err_extra->addrinfo); + pp_AcAddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo); break; case UserErr: @@ -300,11 +303,12 @@ void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) ) "unaddressable byte(s) found during client check request"); } pp_ExeContext(); - pp_AcAddrInfo(err->addr, &err_extra->addrinfo); + pp_AcAddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo); break; default: - VG_(printf)("Error:\n unknown AddrCheck error code %d\n", err->ekind); + VG_(printf)("Error:\n unknown AddrCheck error code %d\n", + VG_(get_error_kind)(err)); VG_(skin_panic)("unknown error code in SK_(pp_SkinError)"); } } @@ -331,7 +335,8 @@ static void describe_addr ( Addr a, AcAddrInfo* ai ) /* Closure for searching malloc'd and free'd lists */ Bool addr_is_in_block(ShadowChunk *sh_ch) { - return VG_(addr_is_in_block) ( a, sh_ch->data, sh_ch->size ); + return VG_(addr_is_in_block) ( a, VG_(get_sc_data)(sh_ch), + VG_(get_sc_size)(sh_ch) ); } /* Perhaps it's on a thread's stack? */ tid = VG_(any_matching_thread_stack)(addr_is_in_bounds); @@ -344,18 +349,18 @@ static void describe_addr ( Addr a, AcAddrInfo* ai ) sc = SK_(any_matching_freed_ShadowChunks)(addr_is_in_block); if (NULL != sc) { ai->akind = Freed; - ai->blksize = sc->size; - ai->rwoffset = (Int)(a) - (Int)(sc->data); - ai->lastchange = (ExeContext*)sc->skin_extra[0]; + ai->blksize = VG_(get_sc_size)(sc); + ai->rwoffset = (Int)(a) - (Int)(VG_(get_sc_data)(sc)); + ai->lastchange = (ExeContext*)( VG_(get_sc_extra)(sc, 0) ); return; } /* Search for a currently malloc'd block which might bracket it. */ sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block); if (NULL != sc) { ai->akind = Mallocd; - ai->blksize = sc->size; - ai->rwoffset = (Int)(a) - (Int)(sc->data); - ai->lastchange = (ExeContext*)sc->skin_extra[0]; + ai->blksize = VG_(get_sc_size)(sc); + ai->rwoffset = (Int)(a) - (Int)(VG_(get_sc_data)(sc)); + ai->lastchange = (ExeContext*)( VG_(get_sc_extra)(sc, 0) ); return; } /* Clueless ... */ @@ -364,19 +369,19 @@ static void describe_addr ( Addr a, AcAddrInfo* ai ) } -/* Creates a copy of the err_extra, updates the copy with address info if - necessary, sticks the copy into the SkinError. */ -void SK_(dup_extra_and_update)(SkinError* err) +/* Creates a copy of the `extra' part, updates the copy with address info if + necessary, and returns the copy. */ +void* SK_(dup_extra_and_update)(Error* err) { - AddrCheckError* err_extra; + AddrCheckError* new_extra; - err_extra = VG_(malloc)(sizeof(AddrCheckError)); - *err_extra = *((AddrCheckError*)err->extra); + new_extra = VG_(malloc)(sizeof(AddrCheckError)); + *new_extra = *((AddrCheckError*)VG_(get_error_extra)(err)); - if (err_extra->addrinfo.akind == Undescribed) - describe_addr ( err->addr, &(err_extra->addrinfo) ); + if (new_extra->addrinfo.akind == Undescribed) + describe_addr ( VG_(get_error_address)(err), &(new_extra->addrinfo) ); - err->extra = err_extra; + return new_extra; } /* Is this address within some small distance below %ESP? Used only @@ -491,59 +496,65 @@ void SK_(record_user_error) ( ThreadState* tst, Addr a, Bool isWrite ) #define STREQ(s1,s2) (s1 != NULL && s2 != NULL \ && VG_(strcmp)((s1),(s2))==0) -Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind ) +Bool SK_(recognised_suppression) ( Char* name, Supp* su ) { - if (STREQ(name, "Param")) *skind = ParamSupp; - else if (STREQ(name, "CoreMem")) *skind = CoreMemSupp; - else if (STREQ(name, "Addr1")) *skind = Addr1Supp; - else if (STREQ(name, "Addr2")) *skind = Addr2Supp; - else if (STREQ(name, "Addr4")) *skind = Addr4Supp; - else if (STREQ(name, "Addr8")) *skind = Addr8Supp; - else if (STREQ(name, "Free")) *skind = FreeSupp; + SuppKind skind; + + if (STREQ(name, "Param")) skind = ParamSupp; + else if (STREQ(name, "CoreMem")) skind = CoreMemSupp; + else if (STREQ(name, "Addr1")) skind = Addr1Supp; + else if (STREQ(name, "Addr2")) skind = Addr2Supp; + else if (STREQ(name, "Addr4")) skind = Addr4Supp; + else if (STREQ(name, "Addr8")) skind = Addr8Supp; + else if (STREQ(name, "Free")) skind = FreeSupp; else return False; + VG_(set_supp_kind)(su, skind); return True; } -Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, - SkinSupp *s ) +Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp *su ) { Bool eof; - if (s->skind == ParamSupp) { + if (VG_(get_supp_kind)(su) == ParamSupp) { eof = VG_(get_line) ( fd, buf, nBuf ); if (eof) return False; - s->string = VG_(strdup)(buf); + VG_(set_supp_string)(su, VG_(strdup)(buf)); } return True; } -extern Bool SK_(error_matches_suppression)(SkinError* err, SkinSupp* su) +extern Bool SK_(error_matches_suppression)(Error* err, Supp* su) { UInt su_size; - AddrCheckError* err_extra = err->extra; + AddrCheckError* err_extra = VG_(get_error_extra)(err); + ErrorKind ekind = VG_(get_error_kind)(err); - switch (su->skind) { + switch (VG_(get_supp_kind)(su)) { case ParamSupp: - return (err->ekind == ParamErr && STREQ(su->string, err->string)); + return (ekind == ParamErr + && STREQ(VG_(get_error_string)(err), VG_(get_supp_string)(su))); case CoreMemSupp: - return (err->ekind == CoreMemErr && STREQ(su->string, err->string)); + return (ekind == CoreMemErr + && STREQ(VG_(get_error_string)(err), VG_(get_supp_string)(su))); case Addr1Supp: su_size = 1; goto addr_case; case Addr2Supp: su_size = 2; goto addr_case; case Addr4Supp: su_size = 4; goto addr_case; case Addr8Supp: su_size = 8; goto addr_case; addr_case: - return (err->ekind == AddrErr && err_extra->size == su_size); + return (ekind == AddrErr && err_extra->size == su_size); case FreeSupp: - return (err->ekind == FreeErr || err->ekind == FreeMismatchErr); + return (ekind == FreeErr || ekind == FreeMismatchErr); default: VG_(printf)("Error:\n" - " unknown AddrCheck suppression type %d\n", su->skind); + " unknown AddrCheck suppression type %d\n", + VG_(get_supp_kind)(su)); VG_(skin_panic)("unknown suppression type in " "SK_(error_matches_suppression)"); } @@ -1618,20 +1629,21 @@ void fpu_ACCESS_check_SLOWLY ( Addr addr, Int size ) static __inline__ void set_where( ShadowChunk* sc, ExeContext* ec ) { - sc->skin_extra[0] = (UInt)ec; + VG_(set_sc_extra)( sc, 0, (UInt)ec ); } static __inline__ ExeContext *get_where( ShadowChunk* sc ) { - return (ExeContext*)sc->skin_extra[0]; + return (ExeContext*)VG_(get_sc_extra)(sc, 0); } void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst ) { - set_where( sc, VG_(get_ExeContext) ( tst ) ); + VG_(set_sc_extra) ( sc, 0, (UInt)VG_(get_ExeContext)(tst) ); } + /*------------------------------------------------------------*/ /*--- Postponing free()ing ---*/ /*------------------------------------------------------------*/ @@ -1646,7 +1658,7 @@ static __attribute__ ((unused)) { ShadowChunk* sc; Int n = 0; - for (sc = vg_freed_list_start; sc != NULL; sc = sc->next) + for (sc = vg_freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc)) n++; return n; } @@ -1657,8 +1669,8 @@ static __attribute__ ((unused)) ShadowChunk* sc; Int n = 0; /* VG_(printf)("freelist sanity\n"); */ - for (sc = vg_freed_list_start; sc != NULL; sc = sc->next) - n += sc->size; + for (sc = vg_freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc)) + n += VG_(get_sc_size)(sc); sk_assert(n == vg_freed_list_volume); } @@ -1672,14 +1684,14 @@ static void add_to_freed_queue ( ShadowChunk* sc ) if (vg_freed_list_end == NULL) { sk_assert(vg_freed_list_start == NULL); vg_freed_list_end = vg_freed_list_start = sc; - vg_freed_list_volume = sc->size; + vg_freed_list_volume = VG_(get_sc_size)(sc); } else { - sk_assert(vg_freed_list_end->next == NULL); - vg_freed_list_end->next = sc; + sk_assert(VG_(get_sc_next)(vg_freed_list_end) == NULL); + VG_(set_sc_next)(vg_freed_list_end, sc); vg_freed_list_end = sc; - vg_freed_list_volume += sc->size; + vg_freed_list_volume += VG_(get_sc_size)(sc); } - sc->next = NULL; + VG_(set_sc_next)(sc, NULL); /* Release enough of the oldest blocks to bring the free queue volume below vg_clo_freelist_vol. */ @@ -1690,16 +1702,16 @@ static void add_to_freed_queue ( ShadowChunk* sc ) sk_assert(vg_freed_list_end != NULL); sc1 = vg_freed_list_start; - vg_freed_list_volume -= sc1->size; + vg_freed_list_volume -= VG_(get_sc_size)(sc1); /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */ sk_assert(vg_freed_list_volume >= 0); if (vg_freed_list_start == vg_freed_list_end) { vg_freed_list_start = vg_freed_list_end = NULL; } else { - vg_freed_list_start = sc1->next; + vg_freed_list_start = VG_(get_sc_next)(sc1); } - sc1->next = NULL; /* just paranoia */ + VG_(set_sc_next)(sc1, NULL); /* just paranoia */ VG_(free_ShadowChunk) ( sc1 ); } } @@ -1712,7 +1724,7 @@ ShadowChunk* SK_(any_matching_freed_ShadowChunks) /* No point looking through freed blocks if we're not keeping them around for a while... */ - for (sc = vg_freed_list_start; sc != NULL; sc = sc->next) + for (sc = vg_freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc)) if (p(sc)) return sc; @@ -1743,13 +1755,12 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr) UInstr* u_in; Int t_addr, t_size; - cb = VG_(alloc_UCodeBlock)(); - cb->nextTemp = cb_in->nextTemp; + cb = VG_(setup_UCodeBlock)(cb_in); - for (i = 0; i < cb_in->used; i++) { + for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) { t_addr = t_size = INVALID_TEMPREG; - u_in = &cb_in->instrs[i]; + u_in = VG_(get_instr)(cb_in, i); switch (u_in->opcode) { case NOP: case LOCK: case CALLM_E: case CALLM_S: @@ -2090,57 +2101,54 @@ Char* SK_(usage)(void) /*--- Setup ---*/ /*------------------------------------------------------------*/ -void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track) +void SK_(pre_clo_init)(void) { - details->name = "Addrcheck"; - details->version = NULL; - details->description = "a fine-grained address checker"; - details->copyright_author = - "Copyright (C) 2002, and GNU GPL'd, by Julian Seward."; - details->bug_reports_to = "jseward@acm.org"; + VG_(details_name) ("Addrcheck"); + VG_(details_version) (NULL); + VG_(details_description) ("a fine-grained address checker"); + VG_(details_copyright_author)( + "Copyright (C) 2002, and GNU GPL'd, by Julian Seward."); + VG_(details_bug_reports_to) ("jseward@acm.org"); - needs->core_errors = True; - needs->skin_errors = True; - needs->libc_freeres = True; - needs->sizeof_shadow_block = 1; - needs->basic_block_discards = False; - needs->shadow_regs = False; - needs->command_line_options = True; - needs->client_requests = True; - needs->extended_UCode = False; - needs->syscall_wrapper = True; - needs->alternative_free = True; - needs->sanity_checks = True; + VG_(needs_core_errors) (); + VG_(needs_skin_errors) (); + VG_(needs_libc_freeres) (); + VG_(needs_sizeof_shadow_block) ( 1 ); + VG_(needs_command_line_options)(); + VG_(needs_client_requests) (); + VG_(needs_syscall_wrapper) (); + VG_(needs_alternative_free) (); + VG_(needs_sanity_checks) (); - track->new_mem_startup = & addrcheck_new_mem_startup; - track->new_mem_heap = & addrcheck_new_mem_heap; - track->new_mem_stack = & SK_(make_accessible); - track->new_mem_stack_aligned = & make_writable_aligned; - track->new_mem_stack_signal = & SK_(make_accessible); - track->new_mem_brk = & SK_(make_accessible); - track->new_mem_mmap = & addrcheck_set_perms; + VG_(track_new_mem_startup) ( & addrcheck_new_mem_startup ); + VG_(track_new_mem_heap) ( & addrcheck_new_mem_heap ); + VG_(track_new_mem_stack) ( & SK_(make_accessible) ); + VG_(track_new_mem_stack_aligned)( & make_writable_aligned ); + VG_(track_new_mem_stack_signal) ( & SK_(make_accessible) ); + VG_(track_new_mem_brk) ( & SK_(make_accessible) ); + VG_(track_new_mem_mmap) ( & addrcheck_set_perms ); - track->copy_mem_heap = & copy_address_range_state; - track->copy_mem_remap = & copy_address_range_state; - track->change_mem_mprotect = & addrcheck_set_perms; + VG_(track_copy_mem_heap) ( & copy_address_range_state ); + VG_(track_copy_mem_remap) ( & copy_address_range_state ); + VG_(track_change_mem_mprotect) ( & addrcheck_set_perms ); - track->ban_mem_heap = & SK_(make_noaccess); - track->ban_mem_stack = & SK_(make_noaccess); + VG_(track_ban_mem_heap) ( & SK_(make_noaccess) ); + VG_(track_ban_mem_stack) ( & SK_(make_noaccess) ); - track->die_mem_heap = & SK_(make_noaccess); - track->die_mem_stack = & SK_(make_noaccess); - track->die_mem_stack_aligned = & make_noaccess_aligned; - track->die_mem_stack_signal = & SK_(make_noaccess); - track->die_mem_brk = & SK_(make_noaccess); - track->die_mem_munmap = & SK_(make_noaccess); + VG_(track_die_mem_heap) ( & SK_(make_noaccess) ); + VG_(track_die_mem_stack) ( & SK_(make_noaccess) ); + VG_(track_die_mem_stack_aligned)( & make_noaccess_aligned ); + VG_(track_die_mem_stack_signal) ( & SK_(make_noaccess) ); + VG_(track_die_mem_brk) ( & SK_(make_noaccess) ); + VG_(track_die_mem_munmap) ( & SK_(make_noaccess) ); - track->bad_free = & SK_(record_free_error); - track->mismatched_free = & SK_(record_freemismatch_error); + VG_(track_bad_free) ( & SK_(record_free_error) ); + VG_(track_mismatched_free) ( & SK_(record_freemismatch_error) ); - track->pre_mem_read = & check_is_readable; - track->pre_mem_read_asciiz = & check_is_readable_asciiz; - track->pre_mem_write = & check_is_writable; - track->post_mem_write = & SK_(make_accessible); + VG_(track_pre_mem_read) ( & check_is_readable ); + VG_(track_pre_mem_read_asciiz) ( & check_is_readable_asciiz ); + VG_(track_pre_mem_write) ( & check_is_writable ); + VG_(track_post_mem_write) ( & SK_(make_accessible) ); VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS4)); VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS2)); diff --git a/cachegrind/cg_main.c b/cachegrind/cg_main.c index 7a93fc292..134dc4ae4 100644 --- a/cachegrind/cg_main.c +++ b/cachegrind/cg_main.c @@ -495,8 +495,8 @@ static Int compute_BBCC_array_size(UCodeBlock* cb) is_LOAD = is_STORE = is_FPU_R = is_FPU_W = False; t_read = t_write = INVALID_TEMPREG; - for (i = 0; i < cb->used; i++) { - u_in = &cb->instrs[i]; + for (i = 0; i < VG_(get_num_instrs)(cb); i++) { + u_in = VG_(get_instr)(cb, i); switch(u_in->opcode) { case INCEIP: @@ -695,14 +695,13 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr) BBCC_node = get_BBCC(orig_addr, cb_in, /*remove=*/False, &BB_seen_before); BBCC_ptr0 = BBCC_ptr = (Addr)(BBCC_node->array); - cb = VG_(alloc_UCodeBlock)(); - cb->nextTemp = cb_in->nextTemp; + cb = VG_(setup_UCodeBlock)(cb_in); t_CC_addr = t_read_addr = t_write_addr = t_data_addr1 = t_data_addr2 = t_read = t_write = INVALID_TEMPREG; - for (i = 0; i < cb_in->used; i++) { - u_in = &cb_in->instrs[i]; + for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) { + u_in = VG_(get_instr)(cb_in, i); /* What this is all about: we want to instrument each x86 instruction * translation. The end of these are marked in three ways. The three @@ -802,21 +801,22 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr) case JMP: if (instrumented_Jcond) { sk_assert(CondAlways == u_in->cond); - sk_assert(i+1 == cb_in->used); + sk_assert(i+1 == VG_(get_num_instrs)(cb_in)); VG_(copy_UInstr)(cb, u_in); instrumented_Jcond = False; /* reset */ break; } /* The first JMP... instrument. */ if (CondAlways != u_in->cond) { - sk_assert(i+2 == cb_in->used); + sk_assert(i+2 == VG_(get_num_instrs)(cb_in)); instrumented_Jcond = True; } else { - sk_assert(i+1 == cb_in->used); + sk_assert(i+1 == VG_(get_num_instrs)(cb_in)); } /* Get x86 instr size from final JMP. */ - x86_instr_size = LAST_UINSTR(cb_in).extra4b; + x86_instr_size = VG_(get_last_instr)(cb_in)->extra4b; + goto instrument_x86_instr; @@ -1916,18 +1916,17 @@ Char* SK_(usage)(void) /*--- Setup ---*/ /*--------------------------------------------------------------------*/ -void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, - VgTrackEvents* not_used) +void SK_(pre_clo_init)(void) { - details->name = "Cachegrind"; - details->version = NULL; - details->description = "an I1/D1/L2 cache profiler"; - details->copyright_author = - "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote."; - details->bug_reports_to = "njn25@cam.ac.uk"; + VG_(details_name) ("Cachegrind"); + VG_(details_version) (NULL); + VG_(details_description) ("an I1/D1/L2 cache profiler"); + VG_(details_copyright_author)( + "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote."); + VG_(details_bug_reports_to) ("njn25@cam.ac.uk"); - needs->basic_block_discards = True; - needs->command_line_options = True; + VG_(needs_basic_block_discards)(); + VG_(needs_command_line_options)(); VG_(register_compact_helper)((Addr) & log_1I_0D_cache_access); VG_(register_compact_helper)((Addr) & log_1I_0D_cache_access_JIFZ); diff --git a/corecheck/cc_main.c b/corecheck/cc_main.c index b6223f85c..b6c3e9660 100644 --- a/corecheck/cc_main.c +++ b/corecheck/cc_main.c @@ -33,16 +33,16 @@ VG_DETERMINE_INTERFACE_VERSION -void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track) +void SK_(pre_clo_init)(void) { - details->name = "Coregrind"; - details->version = NULL; - details->description = "a rudimentary error detector"; - details->copyright_author = - "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote."; - details->bug_reports_to = "njn25@cam.ac.uk"; + VG_(details_name) ("Coregrind"); + VG_(details_version) (NULL); + VG_(details_description) ("a rudimentary error detector"); + VG_(details_copyright_author)( + "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote."); + VG_(details_bug_reports_to) ("njn25@cam.ac.uk"); - needs->core_errors = True; + VG_(needs_core_errors)(); /* No core events to track */ } diff --git a/coregrind/Makefile.am b/coregrind/Makefile.am index 7ad845ec5..fd113067a 100644 --- a/coregrind/Makefile.am +++ b/coregrind/Makefile.am @@ -51,6 +51,7 @@ valgrind_so_SOURCES = \ vg_memory.c \ vg_messages.c \ vg_mylibc.c \ + vg_needs.c \ vg_procselfmaps.c \ vg_dummy_profile.c \ vg_signals.c \ diff --git a/coregrind/docs/coregrind_skins.html b/coregrind/docs/coregrind_skins.html index a17397139..ab958d7c0 100644 --- a/coregrind/docs/coregrind_skins.html +++ b/coregrind/docs/coregrind_skins.html @@ -369,18 +369,18 @@ Only use SK_(post_clo_init)() if a skin provides command line options and must do some initialisation after option processing takes place (``clo'' stands for ``command line options'').

-The first argument to SK_(pre_clo_init)() must be initialised with -various ``details'' for a skin. These are all compulsory except for +First of all, various ``details'' need to be set for a skin, using the +functions VG_(details_*)(). These are all compulsory except for version. They are used when constructing the startup message, except for which is used if VG_(skin_panic)() is ever called, or a skin assertion fails.

-The second argument to SK_(pre_clo_init)() must be initialised with -the ``needs'' for a skin. They are mostly booleans, and can be left untouched -(they default to False). They determine whether a skin can do -various things such as: record, report and suppress errors; process command -line options; wrap system calls; record extra information about malloc'd -blocks, etc.

+Second, various ``needs'' can be set for a skin, using the functions +VG_(needs_*)(). They are mostly booleans, and can be left +untouched (they default to False). They determine whether a skin +can do various things such as: record, report and suppress errors; process +command line options; wrap system calls; record extra information about +malloc'd blocks, etc.

For example, if a skin wants the core's help in recording and reporting errors, it must set the skin_errors need to True, and then @@ -391,18 +391,20 @@ scratch because the core is doing most of the work. See the type VgNeeds in include/vg_skin.h for full details of all the needs.

-The third argument to SK_(pre_clo_init)() must be initialised to -indicate which events in core the skin wants to be notified about. These -include things such as blocks of memory being malloc'd, the stack pointer -changing, a mutex being locked, etc. If a skin wants to know about this, -it should set the relevant pointer in the structure to point to a function, -which will be called when that event happens.

+Third, the skin can indicate which events in core it wants to be notified +about, using the functions VG_(track_*)(). These include things +such as blocks of memory being malloc'd, the stack pointer changing, a mutex +being locked, etc. If a skin wants to know about this, it should set the +relevant pointer in the structure to point to a function, which will be called +when that event happens.

For example, if the skin want to be notified when a new block of memory is -malloc'd, it should set the new_mem_heap function pointer, and the -assigned function will be called each time this happens. See the type -VgTrackEvents in include/vg_skin.h for full details -of all the trackable events.

+malloc'd, it should call VG_(track_new_mem_heap)() with an +appropriate function pointer, and the assigned function will be called each +time this happens.

+ +More information about ``details'', ``needs'' and ``trackable events'' can be +found in include/vg_skin.h.

2.7  Instrumentation

@@ -675,9 +677,8 @@ it:
  • If you write a skin
  • If you have suggestions for new features, needs, trackable events, functions
  • -

  • If you have suggestions for making skins easier to write -
  • -

  • If you have suggestions for improving this documentation
  • +

  • If you have suggestions for making skins easier to write
  • +

  • If you have suggestions for improving this documentation
  • If you don't understand something
  • diff --git a/coregrind/vg_default.c b/coregrind/vg_default.c index 83008d4be..8778b8468 100644 --- a/coregrind/vg_default.c +++ b/coregrind/vg_default.c @@ -71,7 +71,7 @@ void non_fund_panic ( Char* fn ) ------------------------------------------------------------------ */ __attribute__ ((weak)) -void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track) +void SK_(pre_clo_init)( void ) { fund_panic("SK_(pre_clo_init)"); } @@ -99,38 +99,37 @@ void SK_(fini)(void) ------------------------------------------------------------------ */ __attribute__ ((weak)) -Bool SK_(eq_SkinError)(VgRes res, SkinError* e1, SkinError* e2) +Bool SK_(eq_SkinError)(VgRes res, Error* e1, Error* e2) { non_fund_panic("SK_(eq_SkinError)"); } __attribute__ ((weak)) -void SK_(pp_SkinError)(SkinError* ec, void (*pp_ExeContext)(void)) +void SK_(pp_SkinError)(Error* err, void (*pp_ExeContext)(void)) { non_fund_panic("SK_(pp_SkinError)"); } __attribute__ ((weak)) -void SK_(dup_extra_and_update)(SkinError* ec) +void* SK_(dup_extra_and_update)(Error* err) { non_fund_panic("SK_(dup_extra_and_update)"); } __attribute__ ((weak)) -Bool SK_(recognised_suppression)(Char* name, SuppKind* skind) +Bool SK_(recognised_suppression)(Char* name, Supp* su) { non_fund_panic("SK_(recognised_suppression)"); } __attribute__ ((weak)) -Bool SK_(read_extra_suppression_info)(Int fd, Char* buf, - Int nBuf, SkinSupp *s) +Bool SK_(read_extra_suppression_info)(Int fd, Char* buf, Int nBuf, Supp* su) { non_fund_panic("SK_(read_extra_suppression_info)"); } __attribute__ ((weak)) -Bool SK_(error_matches_suppression)(SkinError* ec, SkinSupp* su) +Bool SK_(error_matches_suppression)(Error* err, Supp* su) { non_fund_panic("SK_(error_matches_suppression)"); } @@ -213,7 +212,7 @@ void SK_(pp_XUInstr)(UInstr* u) } __attribute__ ((weak)) -Int SK_(get_Xreg_usage)(UInstr* u, Tag tag, RegUse* arr) +Int SK_(get_Xreg_usage)(UInstr* u, Tag tag, Int* regs, Bool* isWrites) { non_fund_panic("SK_(get_Xreg_usage)"); } diff --git a/coregrind/vg_errcontext.c b/coregrind/vg_errcontext.c index b01ca574c..4d82b50ec 100644 --- a/coregrind/vg_errcontext.c +++ b/coregrind/vg_errcontext.c @@ -36,11 +36,11 @@ /* The list of error contexts found, both suppressed and unsuppressed. Initially empty, and grows as errors are detected. */ -static CoreError* vg_errors = NULL; +static Error* vg_errors = NULL; /* The list of suppression directives, as read from the specified suppressions file. */ -static CoreSupp* vg_suppressions = NULL; +static Supp* vg_suppressions = NULL; /* Running count of unsuppressed errors detected. */ static UInt vg_n_errs_found = 0; @@ -49,7 +49,7 @@ static UInt vg_n_errs_found = 0; static UInt vg_n_errs_suppressed = 0; /* forwards ... */ -static CoreSupp* is_suppressible_error ( CoreError* err ); +static Supp* is_suppressible_error ( Error* err ); /*------------------------------------------------------------*/ @@ -59,34 +59,34 @@ static CoreSupp* is_suppressible_error ( CoreError* err ); /* Compare error contexts, to detect duplicates. Note that if they are otherwise the same, the faulting addrs and associated rwoffsets are allowed to be different. */ -static Bool eq_CoreError ( VgRes res, CoreError* e1, CoreError* e2 ) +static Bool eq_Error ( VgRes res, Error* e1, Error* e2 ) { - if (e1->skin_err.ekind != e2->skin_err.ekind) + if (e1->ekind != e2->ekind) return False; if (!VG_(eq_ExeContext)(res, e1->where, e2->where)) return False; - switch (e1->skin_err.ekind) { + switch (e1->ekind) { case PThreadErr: vg_assert(VG_(needs).core_errors); - if (e1->skin_err.string == e2->skin_err.string) + if (e1->string == e2->string) return True; - if (0 == VG_(strcmp)(e1->skin_err.string, e2->skin_err.string)) + if (0 == VG_(strcmp)(e1->string, e2->string)) return True; return False; default: if (VG_(needs).skin_errors) - return SK_(eq_SkinError)(res, &e1->skin_err, &e2->skin_err); + return SK_(eq_SkinError)(res, e1, e2); else { VG_(printf)("\nUnhandled error type: %u. VG_(needs).skin_errors\n" "probably needs to be set.\n", - e1->skin_err.ekind); + e1->ekind); VG_(skin_panic)("unhandled error type"); } } } -static void pp_CoreError ( CoreError* err, Bool printCount ) +static void pp_Error ( Error* err, Bool printCount ) { /* Closure for printing where the error occurred. Abstracts details about the `where' field away from the skin. */ @@ -100,19 +100,19 @@ static void pp_CoreError ( CoreError* err, Bool printCount ) if (err->tid > 1) VG_(message)(Vg_UserMsg, "Thread %d:", err->tid ); - switch (err->skin_err.ekind) { + switch (err->ekind) { case PThreadErr: vg_assert(VG_(needs).core_errors); - VG_(message)(Vg_UserMsg, "%s", err->skin_err.string ); + VG_(message)(Vg_UserMsg, "%s", err->string ); VG_(pp_ExeContext)(err->where); break; default: if (VG_(needs).skin_errors) - SK_(pp_SkinError)( &err->skin_err, &pp_ExeContextClosure ); + SK_(pp_SkinError)( err, &pp_ExeContextClosure ); else { VG_(printf)("\nUnhandled error type: %u. VG_(needs).skin_errors\n" "probably needs to be set?\n", - err->skin_err.ekind); + err->ekind); VG_(skin_panic)("unhandled error type"); } } @@ -175,10 +175,10 @@ Bool vg_is_GDB_attach_requested ( void ) stored thread state, not from VG_(baseBlock). */ static __inline__ -void construct_error ( CoreError* err, ThreadState* tst, +void construct_error ( Error* err, ThreadState* tst, ErrorKind ekind, Addr a, Char* s, void* extra ) { - /* CoreError parts */ + /* Core-only parts */ err->next = NULL; err->supp = NULL; err->count = 1; @@ -200,11 +200,11 @@ void construct_error ( CoreError* err, ThreadState* tst, err->m_ebp = tst->m_ebp; } - /* SkinError parts */ - err->skin_err.ekind = ekind; - err->skin_err.addr = a; - err->skin_err.string = s; - err->skin_err.extra = extra; + /* Skin-relevant parts */ + err->ekind = ekind; + err->addr = a; + err->string = s; + err->extra = extra; /* sanity... */ vg_assert(err->tid >= 0 && err->tid < VG_N_THREADS); @@ -216,14 +216,14 @@ void construct_error ( CoreError* err, ThreadState* tst, void VG_(maybe_record_error) ( ThreadState* tst, ErrorKind ekind, Addr a, Char* s, void* extra ) { - CoreError err; - CoreError* p; - CoreError* p_prev; - VgRes exe_res = Vg_MedRes; - static Bool is_first_shown_context = True; - static Bool stopping_message = False; - static Bool slowdown_message = False; - static Int vg_n_errs_shown = 0; + Error err; + Error* p; + Error* p_prev; + VgRes exe_res = Vg_MedRes; + static Bool is_first_shown_context = True; + static Bool stopping_message = False; + static Bool slowdown_message = False; + static Int vg_n_errs_shown = 0; /* After M_VG_COLLECT_NO_ERRORS_AFTER_SHOWN different errors have been found, or M_VG_COLLECT_NO_ERRORS_AFTER_FOUND total errors @@ -286,7 +286,7 @@ void VG_(maybe_record_error) ( ThreadState* tst, p = vg_errors; p_prev = NULL; while (p != NULL) { - if (eq_CoreError(exe_res, p, &err)) { + if (eq_Error(exe_res, p, &err)) { /* Found it. */ p->count++; if (p->supp != NULL) { @@ -316,19 +316,19 @@ void VG_(maybe_record_error) ( ThreadState* tst, /* OK, we're really going to collect it. First make a copy, because the error context is on the stack and will disappear shortly. We can duplicate the main part ourselves, but use - SK_(dup_extra_and_update) to duplicate the 'extra' part (unless it's + SK_(dup_extra_and_update) to duplicate the `extra' part (unless it's NULL). - SK_(dup_extra_and_update) can also update the SkinError. This is + SK_(dup_extra_and_update) can also update the `extra' part. This is for when there are more details to fill in which take time to work out but don't affect our earlier decision to include the error -- by postponing those details until now, we avoid the extra work in the - case where we ignore the error. + case where we ignore the error. Ugly. */ - p = VG_(arena_malloc)(VG_AR_ERRORS, sizeof(CoreError)); + p = VG_(arena_malloc)(VG_AR_ERRORS, sizeof(Error)); *p = err; - if (NULL != err.skin_err.extra) - SK_(dup_extra_and_update)(&p->skin_err); + if (NULL != err.extra) + p->extra = SK_(dup_extra_and_update)(p); p->next = vg_errors; p->supp = is_suppressible_error(&err); @@ -337,7 +337,7 @@ void VG_(maybe_record_error) ( ThreadState* tst, vg_n_errs_found++; if (!is_first_shown_context) VG_(message)(Vg_UserMsg, ""); - pp_CoreError(p, False); + pp_Error(p, False); is_first_shown_context = False; vg_n_errs_shown++; /* Perhaps we want a GDB attach at this point? */ @@ -369,11 +369,11 @@ void VG_(record_pthread_error) ( ThreadId tid, Char* msg ) void VG_(show_all_errors) ( void ) { - Int i, n_min; - Int n_err_contexts, n_supp_contexts; - CoreError *p, *p_min; - CoreSupp *su; - Bool any_supp; + Int i, n_min; + Int n_err_contexts, n_supp_contexts; + Error *p, *p_min; + Supp *su; + Bool any_supp; if (VG_(clo_verbosity) == 0) return; @@ -416,7 +416,7 @@ void VG_(show_all_errors) ( void ) VG_(message)(Vg_UserMsg, "%d errors in context %d of %d:", p_min->count, i+1, n_err_contexts); - pp_CoreError( p_min, False ); + pp_Error( p_min, False ); if ((i+1 == VG_(clo_dump_error))) { VG_(translate) ( 0 /* dummy ThreadId; irrelevant due to below NULLs */, @@ -558,11 +558,11 @@ static void load_one_suppressions_file ( Char* filename ) while (True) { /* Assign and initialise the two suppression halves (core and skin) */ - CoreSupp* supp; - supp = VG_(arena_malloc)(VG_AR_CORE, sizeof(CoreSupp)); + Supp* supp; + supp = VG_(arena_malloc)(VG_AR_CORE, sizeof(Supp)); supp->count = 0; for (i = 0; i < VG_N_SUPP_CALLERS; i++) supp->caller[i] = NULL; - supp->skin_supp.string = supp->skin_supp.extra = NULL; + supp->string = supp->extra = NULL; eof = VG_(get_line) ( fd, buf, N_BUF ); if (eof) break; @@ -593,7 +593,7 @@ static void load_one_suppressions_file ( Char* filename ) if (VG_(needs).core_errors && skin_name_present("core", skin_names)) { if (STREQ(supp_name, "PThread")) - supp->skin_supp.skind = PThreadSupp; + supp->skind = PThreadSupp; else goto syntax_error; } @@ -602,9 +602,9 @@ static void load_one_suppressions_file ( Char* filename ) else if (VG_(needs).skin_errors && skin_name_present(VG_(details).name, skin_names)) { - if (SK_(recognised_suppression)(supp_name, & supp->skin_supp.skind)) + if (SK_(recognised_suppression)(supp_name, supp)) { - /* Do nothing, function fills in supp->skin_supp.skind */ + /* Do nothing, function fills in supp->skind */ } else goto syntax_error; } @@ -621,7 +621,7 @@ static void load_one_suppressions_file ( Char* filename ) } if (VG_(needs).skin_errors && - !SK_(read_extra_suppression_info)(fd, buf, N_BUF, &supp->skin_supp)) + !SK_(read_extra_suppression_info)(fd, buf, N_BUF, supp)) goto syntax_error; /* "i > 0" ensures at least one caller read. */ @@ -687,28 +687,27 @@ void get_objname_fnname ( Addr a, } static __inline__ -Bool supp_matches_error(CoreSupp* su, CoreError* err) +Bool supp_matches_error(Supp* su, Error* err) { - switch (su->skin_supp.skind) { + switch (su->skind) { case PThreadSupp: - return (err->skin_err.ekind == PThreadErr); + return (err->ekind == PThreadErr); default: if (VG_(needs).skin_errors) { - return (SK_(error_matches_suppression)(&err->skin_err, - &su->skin_supp)); + return SK_(error_matches_suppression)(err, su); } else { VG_(printf)( "\nUnhandled suppression type: %u. VG_(needs).skin_errors\n" "probably needs to be set.\n", - err->skin_err.ekind); + err->ekind); VG_(skin_panic)("unhandled suppression type"); } } } static __inline__ -Bool supp_matches_callers(CoreSupp* su, Char caller_obj[][M_VG_ERRTXT], - Char caller_fun[][M_VG_ERRTXT]) +Bool supp_matches_callers(Supp* su, Char caller_obj[][M_VG_ERRTXT], + Char caller_fun[][M_VG_ERRTXT]) { Int i; @@ -728,19 +727,18 @@ Bool supp_matches_callers(CoreSupp* su, Char caller_obj[][M_VG_ERRTXT], return True; } -/* Does an error context match a suppression? ie is this a - suppressible error? If so, return a pointer to the CoreSupp - record, otherwise NULL. +/* Does an error context match a suppression? ie is this a suppressible + error? If so, return a pointer to the Supp record, otherwise NULL. Tries to minimise the number of symbol searches since they are expensive. */ -static CoreSupp* is_suppressible_error ( CoreError* err ) +static Supp* is_suppressible_error ( Error* err ) { Int i; Char caller_obj[VG_N_SUPP_CALLERS][M_VG_ERRTXT]; Char caller_fun[VG_N_SUPP_CALLERS][M_VG_ERRTXT]; - CoreSupp* su; + Supp* su; /* get_objname_fnname() writes the function name and object name if it finds them in the debug info. so the strings in the suppression diff --git a/coregrind/vg_include.h b/coregrind/vg_include.h index 0db3c65c0..8b559f191 100644 --- a/coregrind/vg_include.h +++ b/coregrind/vg_include.h @@ -278,6 +278,129 @@ extern void VGP_(done_profiling) ( void ); #endif +/* --------------------------------------------------------------------- + Skin-related types + ------------------------------------------------------------------ */ +/* These structs are not exposed to skins to mitigate possibility of + binary-incompatibilities when the core/skin interface changes. Instead, + set functions are provided (see include/vg_skin.h). */ +typedef + struct { + Char* name; + Char* version; + Char* description; + Char* copyright_author; + Char* bug_reports_to; + } + VgDetails; + +extern VgDetails VG_(details); + +/* If new fields are added to this type, update: + * - vg_main.c:initialisation of VG_(needs) + * - vg_main.c:sanity_check_needs() + * + * If the name of this type or any of its fields change, update: + * - dependent comments (just search for "VG_(needs)"). + */ +typedef + struct { + Bool libc_freeres; + Bool core_errors; + + Bool skin_errors; + Bool basic_block_discards; + Bool shadow_regs; + Bool command_line_options; + Bool client_requests; + Bool extended_UCode; + Bool syscall_wrapper; + UInt sizeof_shadow_block; + Bool alternative_free; + Bool sanity_checks; + Bool data_syms; + } + VgNeeds; + +extern VgNeeds VG_(needs); + +/* Events happening in core to track. To be notified, assign a function + to the function pointer. To ignore an event, don't do anything + (default assignment is to NULL in which case the call is skipped). */ +typedef + struct { + /* Memory events */ + void (*new_mem_startup)( Addr a, UInt len, Bool rr, Bool ww, Bool xx ); + void (*new_mem_heap) ( Addr a, UInt len, Bool is_inited ); + void (*new_mem_stack) ( Addr a, UInt len ); + void (*new_mem_stack_aligned) ( Addr a, UInt len ); + void (*new_mem_stack_signal) ( Addr a, UInt len ); + void (*new_mem_brk) ( Addr a, UInt len ); + void (*new_mem_mmap) ( Addr a, UInt len, Bool rr, Bool ww, Bool xx ); + + void (*copy_mem_heap) ( Addr from, Addr to, UInt len ); + void (*copy_mem_remap) ( Addr from, Addr to, UInt len ); + void (*change_mem_mprotect) ( Addr a, UInt len, Bool rr, Bool ww, Bool xx ); + + /* Used on redzones around malloc'd blocks and at end of stack */ + void (*ban_mem_heap) ( Addr a, UInt len ); + void (*ban_mem_stack) ( Addr a, UInt len ); + + void (*die_mem_heap) ( Addr a, UInt len ); + void (*die_mem_stack) ( Addr a, UInt len ); + void (*die_mem_stack_aligned) ( Addr a, UInt len ); + void (*die_mem_stack_signal) ( Addr a, UInt len ); + void (*die_mem_brk) ( Addr a, UInt len ); + void (*die_mem_munmap) ( Addr a, UInt len ); + + void (*bad_free) ( ThreadState* tst, Addr a ); + void (*mismatched_free) ( ThreadState* tst, Addr a ); + + void (*pre_mem_read) ( CorePart part, ThreadState* tst, + Char* s, Addr a, UInt size ); + void (*pre_mem_read_asciiz) ( CorePart part, ThreadState* tst, + Char* s, Addr a ); + void (*pre_mem_write) ( CorePart part, ThreadState* tst, + Char* s, Addr a, UInt size ); + /* Not implemented yet -- have to add in lots of places, which is a + pain. Won't bother unless/until there's a need. */ + /* void (*post_mem_read) ( ThreadState* tst, Char* s, + Addr a, UInt size ); */ + void (*post_mem_write) ( Addr a, UInt size ); + + + /* Scheduler events (not exhaustive) */ + void (*thread_run) ( ThreadId tid ); + + + /* Thread events (not exhaustive) */ + void (*post_thread_create) ( ThreadId tid, ThreadId child ); + void (*post_thread_join) ( ThreadId joiner, ThreadId joinee ); + + + /* Mutex events (not exhaustive) */ + void (*pre_mutex_lock) ( ThreadId tid, + void* /*pthread_mutex_t* */ mutex ); + void (*post_mutex_lock) ( ThreadId tid, + void* /*pthread_mutex_t* */ mutex ); + void (*post_mutex_unlock) ( ThreadId tid, + void* /*pthread_mutex_t* */ mutex ); + + + /* Others... condition variable, signal events... */ + /* ... */ + } + VgTrackEvents; + +extern VgTrackEvents VG_(track_events); + + +/* --------------------------------------------------------------------- + Exports of vg_needs.c + ------------------------------------------------------------------ */ + +void VG_(sanity_check_needs)(void); + /* --------------------------------------------------------------------- Exports of vg_malloc2.c ------------------------------------------------------------------ */ @@ -936,6 +1059,16 @@ extern Int VG_(disBB) ( UCodeBlock* cb, Addr eip0 ); Exports of vg_translate.c ------------------------------------------------------------------ */ +/* Expandable arrays of uinstrs. */ +struct _UCodeBlock { + Int used; + Int size; + UInstr* instrs; + Int nextTemp; +}; + +extern UCodeBlock* VG_(alloc_UCodeBlock) ( void ); + extern void VG_(translate) ( ThreadState* tst, Addr orig_addr, UInt* orig_size, @@ -998,23 +1131,29 @@ typedef } SuppLocTy; -/* Suppressions. Skin part `SkinSupp' (which is all skins have to deal - with) is in vg_skin.h */ -typedef - struct _CoreSupp { - struct _CoreSupp* next; - /* The number of times this error has been suppressed. */ - Int count; - /* The name by which the suppression is referred to. */ - Char* sname; - /* First two (name of fn where err occurs, and immediate caller) - * are mandatory; extra two are optional. */ - SuppLocTy caller_ty[VG_N_SUPP_CALLERS]; - Char* caller [VG_N_SUPP_CALLERS]; - /* The skin-specific part */ - SkinSupp skin_supp; - } - CoreSupp; +/* Suppressions. Skins can get/set skin-relevant parts with functions + declared in include/vg_skin.h. Extensible via the 'extra' field. + Skins can use a normal enum (with element values in the normal range + (0..)) for `skind'. */ +struct _Supp { + struct _Supp* next; + /* The number of times this error has been suppressed. */ + Int count; + /* The name by which the suppression is referred to. */ + Char* sname; + /* First two (name of fn where err occurs, and immediate caller) + * are mandatory; extra two are optional. */ + SuppLocTy caller_ty[VG_N_SUPP_CALLERS]; + Char* caller [VG_N_SUPP_CALLERS]; + + /* The skin-specific part */ + /* What kind of suppression. Must use the range (0..) */ + SuppKind skind; + /* String -- use is optional. NULL by default. */ + Char* string; + /* Anything else -- use is optional. NULL by default. */ + void* extra; +}; /* Note: it is imperative this doesn't overlap with (0..) at all, as skins * effectively extend it by defining their own enums in the (0..) range. */ @@ -1024,27 +1163,40 @@ typedef } CoreErrorKind; -/* Errors. Skin part `SkinError' (which is all skins have to deal - with) is in vg_skin.h */ -typedef - struct _CoreErrContext { - struct _CoreErrContext* next; - /* NULL if unsuppressed; or ptr to suppression record. */ - CoreSupp* supp; - Int count; - ExeContext* where; - ThreadId tid; - /* These record %EIP, %ESP and %EBP at the error point. They - are only used to make GDB-attaching convenient; there is no - other purpose; specifically they are not used to do - comparisons between errors. */ - UInt m_eip; - UInt m_esp; - UInt m_ebp; - /* The skin-specific part */ - SkinError skin_err; - } - CoreError; +/* Errors. Extensible (via the 'extra' field). Skins can use a normal + enum (with element values in the normal range (0..)) for `ekind'. + Functions for getting/setting the skin-relevant fields are in + include/vg_skin.h. + + When errors are found and recorded with VG_(maybe_record_error)(), all + the skin must do is pass in the four parameters; core will + allocate/initialise the error record. +*/ +struct _Error { + struct _Error* next; + /* NULL if unsuppressed; or ptr to suppression record. */ + Supp* supp; + Int count; + ExeContext* where; + ThreadId tid; + /* These record %EIP, %ESP and %EBP at the error point. They + are only used to make GDB-attaching convenient; there is no + other purpose; specifically they are not used to do + comparisons between errors. */ + UInt m_eip; + UInt m_esp; + UInt m_ebp; + + /* The skin-specific part */ + /* Used by ALL. Must be in the range (0..) */ + Int ekind; + /* Used frequently */ + Addr addr; + /* Used frequently */ + Char* string; + /* For any skin-specific extras */ + void* extra; +}; extern void VG_(load_suppressions) ( void ); @@ -1087,6 +1239,26 @@ extern void VG_(mini_stack_dump) ( ExeContext* ec ); Exports of vg_clientmalloc.c ------------------------------------------------------------------ */ +typedef + enum { + Vg_AllocMalloc = 0, + Vg_AllocNew = 1, + Vg_AllocNewVec = 2 + } + VgAllocKind; + +/* Description of a malloc'd chunk. Functions for extracting skin-relevant + parts are in include/vg_skin.h Size of skin_extra array is given by + VG_(needs).sizeof_shadow_chunk. */ +struct _ShadowChunk { + struct _ShadowChunk* next; + UInt size : 30; /* size requested */ + VgAllocKind allockind : 2; /* which wrapper did the allocation */ + Addr data; /* ptr to actual block */ + UInt extra[0]; /* extra skin-specific info */ +}; + + extern void VG_(client_malloc_init)(); /* These are called from the scheduler, when it intercepts a user diff --git a/coregrind/vg_main.c b/coregrind/vg_main.c index 900d3e17e..163b64b5e 100644 --- a/coregrind/vg_main.c +++ b/coregrind/vg_main.c @@ -443,94 +443,6 @@ UInt VG_(num_scheduling_events_MINOR) = 0; UInt VG_(num_scheduling_events_MAJOR) = 0; -/* --------------------------------------------------------------------- - Skin data structure initialisation - ------------------------------------------------------------------ */ - -/* Init with default values. */ -VgDetails VG_(details) = { - .name = NULL, - .version = NULL, - .description = NULL, - .copyright_author = NULL, - .bug_reports_to = NULL, -}; - -VgNeeds VG_(needs) = { - .core_errors = False, - .skin_errors = False, - .libc_freeres = False, - .sizeof_shadow_block = 0, - .basic_block_discards = False, - .shadow_regs = False, - .command_line_options = False, - .client_requests = False, - .extended_UCode = False, - .syscall_wrapper = False, - .alternative_free = False, - .sanity_checks = False, - .data_syms = False, -}; - -VgTrackEvents VG_(track_events) = { - /* Memory events */ - .new_mem_startup = NULL, - .new_mem_heap = NULL, - .new_mem_stack = NULL, - .new_mem_stack_aligned = NULL, - .new_mem_stack_signal = NULL, - .new_mem_brk = NULL, - .new_mem_mmap = NULL, - - .copy_mem_heap = NULL, - .copy_mem_remap = NULL, - .change_mem_mprotect = NULL, - - .ban_mem_heap = NULL, - .ban_mem_stack = NULL, - - .die_mem_heap = NULL, - .die_mem_stack = NULL, - .die_mem_stack_aligned = NULL, - .die_mem_stack_signal = NULL, - .die_mem_brk = NULL, - .die_mem_munmap = NULL, - - .bad_free = NULL, - .mismatched_free = NULL, - - .pre_mem_read = NULL, - .pre_mem_read_asciiz = NULL, - .pre_mem_write = NULL, - .post_mem_write = NULL, - - /* Scheduler events */ - .thread_run = NULL, - - /* Mutex events */ - .post_mutex_lock = NULL, - .post_mutex_unlock = NULL, -}; - -static void sanity_check_needs ( void ) -{ -#define CHECK_NOT(var, value) \ - if ((var)==(value)) { \ - VG_(printf)("\nSkin error: `%s' not initialised\n", \ - VG__STRING(var)); \ - VG_(skin_panic)("Uninitialised needs field\n"); \ - } - - CHECK_NOT(VG_(details).name, NULL); - /* Nb: .version can be NULL */ - CHECK_NOT(VG_(details).description, NULL); - CHECK_NOT(VG_(details).copyright_author, NULL); - CHECK_NOT(VG_(details).bug_reports_to, NULL); - -#undef CHECK_NOT -#undef INVALID_Bool -} - /* --------------------------------------------------------------------- Values derived from command-line options. ------------------------------------------------------------------ */ @@ -1350,8 +1262,8 @@ void VG_(main) ( void ) and turn on/off 'command_line_options' need - init_memory() (to setup memory event trackers). */ - SK_(pre_clo_init) ( & VG_(details), & VG_(needs), & VG_(track_events) ); - sanity_check_needs(); + SK_(pre_clo_init)(); + VG_(sanity_check_needs)(); /* Process Valgrind's command-line opts (from env var VG_ARGS). */ process_cmd_line_options(); diff --git a/coregrind/vg_needs.c b/coregrind/vg_needs.c new file mode 100644 index 000000000..5abeffde5 --- /dev/null +++ b/coregrind/vg_needs.c @@ -0,0 +1,337 @@ + +/*--------------------------------------------------------------------*/ +/*--- Stuff relating to skin data structures. ---*/ +/*--- vg_needs.c ---*/ +/*--------------------------------------------------------------------*/ + +/* + This file is part of Valgrind, an extensible x86 protected-mode + emulator for monitoring program execution on x86-Unixes. + + Copyright (C) 2000-2002 Nicholas Nethercote + jseward@acm.org + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307, USA. + + The GNU General Public License is contained in the file COPYING. +*/ + +#include "vg_include.h" + + +/* --------------------------------------------------------------------- + Skin data structure initialisation + ------------------------------------------------------------------ */ + +/* Init with default values. */ +VgDetails VG_(details) = { + .name = NULL, + .version = NULL, + .description = NULL, + .copyright_author = NULL, + .bug_reports_to = NULL, +}; + +VgNeeds VG_(needs) = { + .core_errors = False, + .skin_errors = False, + .libc_freeres = False, + .sizeof_shadow_block = 0, + .basic_block_discards = False, + .shadow_regs = False, + .command_line_options = False, + .client_requests = False, + .extended_UCode = False, + .syscall_wrapper = False, + .alternative_free = False, + .sanity_checks = False, + .data_syms = False, +}; + +VgTrackEvents VG_(track_events) = { + /* Memory events */ + .new_mem_startup = NULL, + .new_mem_heap = NULL, + .new_mem_stack = NULL, + .new_mem_stack_aligned = NULL, + .new_mem_stack_signal = NULL, + .new_mem_brk = NULL, + .new_mem_mmap = NULL, + + .copy_mem_heap = NULL, + .copy_mem_remap = NULL, + .change_mem_mprotect = NULL, + + .ban_mem_heap = NULL, + .ban_mem_stack = NULL, + + .die_mem_heap = NULL, + .die_mem_stack = NULL, + .die_mem_stack_aligned = NULL, + .die_mem_stack_signal = NULL, + .die_mem_brk = NULL, + .die_mem_munmap = NULL, + + .bad_free = NULL, + .mismatched_free = NULL, + + .pre_mem_read = NULL, + .pre_mem_read_asciiz = NULL, + .pre_mem_write = NULL, + .post_mem_write = NULL, + + /* Scheduler events */ + .thread_run = NULL, + + /* Mutex events */ + .post_mutex_lock = NULL, + .post_mutex_unlock = NULL, +}; + +/* static */ +void VG_(sanity_check_needs) ( void) +{ +#define CHECK_NOT(var, value) \ + if ((var)==(value)) { \ + VG_(printf)("\nSkin error: `%s' not initialised\n", \ + VG__STRING(var)); \ + VG_(skin_panic)("Uninitialised needs field\n"); \ + } + + CHECK_NOT(VG_(details).name, NULL); + /* Nb: .version can be NULL */ + CHECK_NOT(VG_(details).description, NULL); + CHECK_NOT(VG_(details).copyright_author, NULL); + CHECK_NOT(VG_(details).bug_reports_to, NULL); + +#undef CHECK_NOT +#undef INVALID_Bool +} + +/*--------------------------------------------------------------------*/ +/* Setting details */ + +/* Use macro because they're so repetitive */ +#define DETAILS(detail) \ + extern void VG_(details_##detail)(Char* detail) \ + { \ + VG_(details).detail = detail; \ + } + +DETAILS(name) +DETAILS(version) +DETAILS(description) +DETAILS(copyright_author) +DETAILS(bug_reports_to) + +/*--------------------------------------------------------------------*/ +/* Setting needs */ + +/* Use macro because they're so repetitive */ +#define NEEDS(need) \ + extern void VG_(needs_##need)(void) \ + { \ + VG_(needs).need = True; \ + } + +NEEDS(libc_freeres) +NEEDS(core_errors) +NEEDS(skin_errors) +NEEDS(basic_block_discards) +NEEDS(shadow_regs) +NEEDS(command_line_options) +NEEDS(client_requests) +NEEDS(extended_UCode) +NEEDS(syscall_wrapper) + +extern void VG_(needs_sizeof_shadow_block)(Int size) +{ + VG_(needs).sizeof_shadow_block = size; +} + +NEEDS(alternative_free) +NEEDS(sanity_checks) +NEEDS(data_syms) + +/*--------------------------------------------------------------------*/ +#define TRACK(event, args...) \ + void VG_(track_##event)(void (*f)(args)) \ + { \ + VG_(track_events).event = f; \ + } + +TRACK(new_mem_startup, Addr a, UInt len, Bool rr, Bool ww, Bool xx) +TRACK(new_mem_heap, Addr a, UInt len, Bool is_inited) +TRACK(new_mem_stack, Addr a, UInt len) +TRACK(new_mem_stack_aligned, Addr a, UInt len) +TRACK(new_mem_stack_signal, Addr a, UInt len) +TRACK(new_mem_brk, Addr a, UInt len) +TRACK(new_mem_mmap, Addr a, UInt len, Bool rr, Bool ww, Bool xx) + +TRACK(copy_mem_heap, Addr from, Addr to, UInt len) +TRACK(copy_mem_remap, Addr from, Addr to, UInt len) +TRACK(change_mem_mprotect, Addr a, UInt len, Bool rr, Bool ww, Bool xx) + +TRACK(ban_mem_heap, Addr a, UInt len) +TRACK(ban_mem_stack, Addr a, UInt len) + +TRACK(die_mem_heap, Addr a, UInt len) +TRACK(die_mem_stack, Addr a, UInt len) +TRACK(die_mem_stack_aligned, Addr a, UInt len) +TRACK(die_mem_stack_signal, Addr a, UInt len) +TRACK(die_mem_brk, Addr a, UInt len) +TRACK(die_mem_munmap, Addr a, UInt len) + +TRACK(bad_free, ThreadState* tst, Addr a) +TRACK(mismatched_free, ThreadState* tst, Addr a) + +TRACK(pre_mem_read, CorePart part, ThreadState* tst, Char* s, Addr a, + UInt size) +TRACK(pre_mem_read_asciiz, CorePart part, ThreadState* tst, Char* s, Addr a) +TRACK(pre_mem_write, CorePart part, ThreadState* tst, Char* s, Addr a, + UInt size) +TRACK(post_mem_write, Addr a, UInt size) + +TRACK(thread_run, ThreadId tid) + +TRACK(post_thread_create, ThreadId tid, ThreadId child) +TRACK(post_thread_join, ThreadId joiner, ThreadId joinee) + +TRACK( pre_mutex_lock, ThreadId tid, void* /*pthread_mutex_t* */ mutex) +TRACK(post_mutex_lock, ThreadId tid, void* /*pthread_mutex_t* */ mutex) +TRACK(post_mutex_unlock, ThreadId tid, void* /*pthread_mutex_t* */ mutex) + +/*--------------------------------------------------------------------*/ +/* UCodeBlocks */ + +Int VG_(get_num_instrs) ( UCodeBlock* cb ) +{ + return cb->used; +} + +Int VG_(get_num_temps) ( UCodeBlock* cb ) +{ + return cb->nextTemp; +} + +UInstr* VG_(get_instr) ( UCodeBlock* cb, Int i ) +{ + return & cb->instrs[i]; +} + +UInstr* VG_(get_last_instr) ( UCodeBlock* cb ) +{ + return & cb->instrs[cb->used-1]; +} + +/*--------------------------------------------------------------------*/ +/* Suppressions */ + +SuppKind VG_(get_supp_kind) ( Supp* su ) +{ + return su->skind; +} + +Char* VG_(get_supp_string) ( Supp* su ) +{ + return su->string; +} + +void* VG_(get_supp_extra) ( Supp* su ) +{ + return su->extra; +} + + +void VG_(set_supp_kind) ( Supp* su, SuppKind skind ) +{ + su->skind = skind; +} + +void VG_(set_supp_string) ( Supp* su, Char* string ) +{ + su->string = string; +} + +void VG_(set_supp_extra) ( Supp* su, void* extra ) +{ + su->extra = extra; +} + +/*--------------------------------------------------------------------*/ +/* Errors */ + +ErrorKind VG_(get_error_kind) ( Error* err ) +{ + return err->ekind; +} + +Addr VG_(get_error_address) ( Error* err ) +{ + return err->addr; +} + +Char* VG_(get_error_string) ( Error* err ) +{ + return err->string; +} + +void* VG_(get_error_extra) ( Error* err ) +{ + return err->extra; +} + +/*--------------------------------------------------------------------*/ +/* ShadowChunks */ + +UInt VG_(get_sc_size) ( ShadowChunk* sc ) +{ + return sc->size; +} + +Addr VG_(get_sc_data) ( ShadowChunk* sc ) +{ + return sc->data; +} + +UInt VG_(get_sc_extra) ( ShadowChunk* sc, UInt i ) +{ + vg_assert(i < VG_(needs).sizeof_shadow_block); + return sc->extra[i]; +} + +ShadowChunk* VG_(get_sc_next) ( ShadowChunk* sc ) +{ + return sc->next; +} + +void VG_(set_sc_extra) ( ShadowChunk* sc, UInt i, UInt word ) +{ + vg_assert(i < VG_(needs).sizeof_shadow_block); + sc->extra[i] = word; +} + +void VG_(set_sc_next) ( ShadowChunk* sc, ShadowChunk* next ) +{ + sc->next = next; +} + + +/*--------------------------------------------------------------------*/ +/*--- end vg_needs.c ---*/ +/*--------------------------------------------------------------------*/ + + diff --git a/coregrind/vg_to_ucode.c b/coregrind/vg_to_ucode.c index 99bf7b124..601b2ee4a 100644 --- a/coregrind/vg_to_ucode.c +++ b/coregrind/vg_to_ucode.c @@ -236,19 +236,6 @@ __inline__ static UInt getSDisp ( Int size, Addr eip ) /*--- Flag-related helpers. ---*/ /*------------------------------------------------------------*/ -/* For the last uinsn inserted into cb, set the read, written and - undefined flags. Undefined flags are counted as written, but it - seems worthwhile to distinguish them. -*/ -static __inline__ void uFlagsRWU ( UCodeBlock* cb, - FlagSet rr, FlagSet ww, FlagSet uu ) -{ - VG_(set_flag_RW)( - &LAST_UINSTR(cb), rr, VG_UNION_FLAG_SETS(ww,uu) - ); -} - - static void setFlagsFromUOpcode ( UCodeBlock* cb, Int uopc ) { switch (uopc) { @@ -275,7 +262,8 @@ static void setFlagsFromUOpcode ( UCodeBlock* cb, Int uopc ) } } -static __inline__ void uCond ( UCodeBlock* cb, Condcode cond ) +__inline__ +void VG_(set_cond_field) ( UCodeBlock* cb, Condcode cond ) { LAST_UINSTR(cb).cond = cond; } diff --git a/coregrind/vg_translate.c b/coregrind/vg_translate.c index 0b6175ab2..8f5ac3d2b 100644 --- a/coregrind/vg_translate.c +++ b/coregrind/vg_translate.c @@ -42,6 +42,7 @@ /*--- Basics ---*/ /*------------------------------------------------------------*/ +/* This one is called by the core */ UCodeBlock* VG_(alloc_UCodeBlock) ( void ) { UCodeBlock* cb = VG_(arena_malloc)(VG_AR_CORE, sizeof(UCodeBlock)); @@ -50,6 +51,15 @@ UCodeBlock* VG_(alloc_UCodeBlock) ( void ) return cb; } +/* This one is called by skins */ +UCodeBlock* VG_(setup_UCodeBlock) ( UCodeBlock* cb_in ) +{ + UCodeBlock* cb = VG_(arena_malloc)(VG_AR_CORE, sizeof(UCodeBlock)); + cb->used = cb->size = 0; + cb->nextTemp = cb_in->nextTemp; + cb->instrs = NULL; + return cb; +} void VG_(free_UCodeBlock) ( UCodeBlock* cb ) { @@ -205,17 +215,6 @@ void copyAuxInfoFromTo ( UInstr* src, UInstr* dst ) } -/* Set the flag R/W sets on a uinstr. */ -void VG_(set_flag_RW) ( UInstr* u, FlagSet fr, FlagSet fw ) -{ - /* VG_(pp_UInstr)(-1,u); */ - vg_assert(fr == (fr & FlagsALL)); - vg_assert(fw == (fw & FlagsALL)); - u->flags_r = fr; - u->flags_w = fw; -} - - /* Set the lit32 field of the most recent uinsn. */ void VG_(set_lit_field) ( UCodeBlock* cb, UInt lit32 ) { @@ -235,6 +234,23 @@ void VG_(set_ccall_fields) ( UCodeBlock* cb, Addr fn, UChar argc, UChar LAST_UINSTR(cb).has_ret_val = has_ret_val; } +/* For the last uinsn inserted into cb, set the read, written and + undefined flags. Undefined flags are counted as written, but it + seems worthwhile to distinguish them. +*/ +__inline__ +void VG_(set_flag_fields) ( UCodeBlock* cb, + FlagSet rr, FlagSet ww, FlagSet uu ) +{ + FlagSet uw = VG_UNION_FLAG_SETS(ww,uu); + + vg_assert(rr == (rr & FlagsALL)); + vg_assert(uw == (uw & FlagsALL)); + LAST_UINSTR(cb).flags_r = rr; + LAST_UINSTR(cb).flags_w = uw; +} + + Bool VG_(any_flag_use) ( UInstr* u ) { return (u->flags_r != FlagsEmpty @@ -1075,18 +1091,18 @@ void VG_(pp_UCodeBlock) ( UCodeBlock* cb, Char* title ) /*------------------------------------------------------------*/ /* Get the temp/reg use of a uinstr, parking them in an array supplied by - the caller, which is assumed to be big enough. Return the number - of entries. Insns which read _and_ write a register wind up - mentioning it twice. Entries are placed in the array in program - order, so that if a reg is read-modified-written, it appears first - as a read and then as a write. 'tag' indicates whether we are looking at - TempRegs or RealRegs. + the caller (regs), which is assumed to be big enough. Return the number + of entries. Written regs are indicated in parallel array isWrites. + Insns which read _and_ write a register wind up mentioning it twice. + Entries are placed in the array in program order, so that if a reg is + read-modified-written, it appears first as a read and then as a write. + 'tag' indicates whether we are looking at TempRegs or RealRegs. */ __inline__ -Int VG_(get_reg_usage) ( UInstr* u, Tag tag, RegUse* arr ) +Int VG_(get_reg_usage) ( UInstr* u, Tag tag, Int* regs, Bool* isWrites ) { -# define RD(ono) VG_UINSTR_READS_REG(ono) -# define WR(ono) VG_UINSTR_WRITES_REG(ono) +# define RD(ono) VG_UINSTR_READS_REG(ono, regs, isWrites) +# define WR(ono) VG_UINSTR_WRITES_REG(ono, regs, isWrites) Int n = 0; switch (u->opcode) { @@ -1142,7 +1158,7 @@ Int VG_(get_reg_usage) ( UInstr* u, Tag tag, RegUse* arr ) default: if (VG_(needs).extended_UCode) - return SK_(get_Xreg_usage)(u, tag, arr); + return SK_(get_Xreg_usage)(u, tag, regs, isWrites); else { VG_(printf)("unhandled opcode: %u. Perhaps " "VG_(needs).extended_UCode should be set?", @@ -1160,26 +1176,26 @@ Int VG_(get_reg_usage) ( UInstr* u, Tag tag, RegUse* arr ) /* Change temp regs in u into real regs, as directed by the * temps[i]-->reals[i] mapping. */ static __inline__ -void patchUInstr ( UInstr* u, RegUse temps[], UInt reals[], Int n_tmap ) +void patchUInstr ( UInstr* u, Int temps[], UInt reals[], Int n_tmap ) { Int i; if (u->tag1 == TempReg) { for (i = 0; i < n_tmap; i++) - if (temps[i].num == u->val1) break; + if (temps[i] == u->val1) break; if (i == n_tmap) VG_(core_panic)("patchUInstr(1)"); u->tag1 = RealReg; u->val1 = reals[i]; } if (u->tag2 == TempReg) { for (i = 0; i < n_tmap; i++) - if (temps[i].num == u->val2) break; + if (temps[i] == u->val2) break; if (i == n_tmap) VG_(core_panic)("patchUInstr(2)"); u->tag2 = RealReg; u->val2 = reals[i]; } if (u->tag3 == TempReg) { for (i = 0; i < n_tmap; i++) - if (temps[i].num == u->val3) break; + if (temps[i] == u->val3) break; if (i == n_tmap) VG_(core_panic)("patchUInstr(3)"); u->tag3 = RealReg; u->val3 = reals[i]; @@ -1255,10 +1271,12 @@ static __inline__ Bool uInstrMentionsTempReg ( UInstr* u, Int tempreg ) { Int i, k; - RegUse tempUse[3]; - k = VG_(get_reg_usage) ( u, TempReg, &tempUse[0] ); + Int tempUse[3]; + Bool notUsed[3]; + + k = VG_(get_reg_usage) ( u, TempReg, &tempUse[0], ¬Used[0] ); for (i = 0; i < k; i++) - if (tempUse[i].num == tempreg) + if (tempUse[i] == tempreg) return True; return False; } @@ -1280,7 +1298,8 @@ static void vg_improve ( UCodeBlock* cb ) Int i, j, k, m, n, ar, tr, told, actual_areg; Int areg_map[8]; Bool annul_put[8]; - RegUse tempUse[3]; + Int tempUse[3]; + Bool isWrites[3]; UInstr* u; Bool wr; Int* last_live_before; @@ -1307,12 +1326,12 @@ static void vg_improve ( UCodeBlock* cb ) for (i = cb->used-1; i >= 0; i--) { u = &cb->instrs[i]; - k = VG_(get_reg_usage)(u, TempReg, &tempUse[0]); + k = VG_(get_reg_usage)(u, TempReg, &tempUse[0], &isWrites[0]); /* For each temp usage ... bwds in program order. */ for (j = k-1; j >= 0; j--) { - tr = tempUse[j].num; - wr = tempUse[j].isWrite; + tr = tempUse[j]; + wr = isWrites[j]; if (last_live_before[tr] == -1) { vg_assert(tr >= 0 && tr < cb->nextTemp); last_live_before[tr] = wr ? (i+1) : i; @@ -1413,12 +1432,12 @@ static void vg_improve ( UCodeBlock* cb ) } /* boring insn; invalidate any mappings to temps it writes */ - k = VG_(get_reg_usage)(u, TempReg, &tempUse[0]); + k = VG_(get_reg_usage)(u, TempReg, &tempUse[0], &isWrites[0]); for (j = 0; j < k; j++) { - wr = tempUse[j].isWrite; + wr = isWrites[j]; if (!wr) continue; - tr = tempUse[j].num; + tr = tempUse[j]; for (m = 0; m < 8; m++) if (areg_map[m] == tr) areg_map[m] = -1; } @@ -1621,7 +1640,8 @@ UCodeBlock* vg_do_register_allocation ( UCodeBlock* c1 ) Int i, j, k, m, r, tno, max_ss_no; Bool wr, defer, isRead, spill_reqd; UInt realUse[3]; - RegUse tempUse[3]; + Int tempUse[3]; + Bool isWrites[3]; UCodeBlock* c2; /* Used to denote ... well, "no value" in this fn. */ @@ -1646,13 +1666,14 @@ UCodeBlock* vg_do_register_allocation ( UCodeBlock* c1 ) /* Scan fwds to establish live ranges. */ for (i = 0; i < c1->used; i++) { - k = VG_(get_reg_usage)(&c1->instrs[i], TempReg, &tempUse[0]); + k = VG_(get_reg_usage)(&c1->instrs[i], TempReg, &tempUse[0], + &isWrites[0]); vg_assert(k >= 0 && k <= 3); /* For each temp usage ... fwds in program order */ for (j = 0; j < k; j++) { - tno = tempUse[j].num; - wr = tempUse[j].isWrite; + tno = tempUse[j]; + wr = isWrites[j]; if (wr) { /* Writes hold a reg live until after this insn. */ if (temp_info[tno].live_after == VG_NOTHING) @@ -1781,7 +1802,8 @@ UCodeBlock* vg_do_register_allocation ( UCodeBlock* c1 ) generate spill stores since we may have to evict some TempRegs currently in real regs. Also generates spill loads. */ - k = VG_(get_reg_usage)(&c1->instrs[i], TempReg, &tempUse[0]); + k = VG_(get_reg_usage)(&c1->instrs[i], TempReg, &tempUse[0], + &isWrites[0]); vg_assert(k >= 0 && k <= 3); /* For each ***different*** temp mentioned in the insn .... */ @@ -1792,14 +1814,14 @@ UCodeBlock* vg_do_register_allocation ( UCodeBlock* c1 ) used by the insn once, even if it is mentioned more than once. */ defer = False; - tno = tempUse[j].num; + tno = tempUse[j]; for (m = j+1; m < k; m++) - if (tempUse[m].num == tno) + if (tempUse[m] == tno) defer = True; if (defer) continue; - /* Now we're trying to find a register for tempUse[j].num. + /* Now we're trying to find a register for tempUse[j]. First of all, if it already has a register assigned, we don't need to do anything more. */ if (temp_info[tno].real_no != VG_NOTHING) @@ -1825,7 +1847,7 @@ UCodeBlock* vg_do_register_allocation ( UCodeBlock* c1 ) Select r in 0 .. VG_MAX_REALREGS-1 such that real_to_temp[r] is not mentioned in - tempUse[0 .. k-1].num, since it would be just plain + tempUse[0 .. k-1], since it would be just plain wrong to eject some other TempReg which we need to use in this insn. @@ -1836,7 +1858,7 @@ UCodeBlock* vg_do_register_allocation ( UCodeBlock* c1 ) for (r = 0; r < VG_MAX_REALREGS; r++) { is_spill_cand[r] = True; for (m = 0; m < k; m++) { - if (real_to_temp[r] == tempUse[m].num) { + if (real_to_temp[r] == tempUse[m]) { is_spill_cand[r] = False; break; } @@ -1898,7 +1920,7 @@ UCodeBlock* vg_do_register_allocation ( UCodeBlock* c1 ) /* Decide if tno is read. */ isRead = False; for (m = 0; m < k; m++) - if (tempUse[m].num == tno && !tempUse[m].isWrite) + if (tempUse[m] == tno && !isWrites[m]) isRead = True; /* If so, generate a spill load. */ @@ -1922,7 +1944,7 @@ UCodeBlock* vg_do_register_allocation ( UCodeBlock* c1 ) and use patchUInstr to convert its rTempRegs into realregs. */ for (j = 0; j < k; j++) - realUse[j] = VG_(rank_to_realreg)(temp_info[tempUse[j].num].real_no); + realUse[j] = VG_(rank_to_realreg)(temp_info[tempUse[j]].real_no); VG_(copy_UInstr)(c2, &c1->instrs[i]); patchUInstr(&LAST_UINSTR(c2), &tempUse[0], &realUse[0], k); @@ -1951,7 +1973,8 @@ static void vg_realreg_liveness_analysis ( UCodeBlock* cb ) { Int i, j, k; RRegSet rregs_live; - RegUse regUse[3]; + Int regUse[3]; + Bool isWrites[3]; UInstr* u; /* All regs are dead at the end of the block */ @@ -1962,16 +1985,16 @@ static void vg_realreg_liveness_analysis ( UCodeBlock* cb ) u->regs_live_after = rregs_live; - k = VG_(get_reg_usage)(u, RealReg, regUse); + k = VG_(get_reg_usage)(u, RealReg, ®Use[0], &isWrites[0]); /* For each reg usage ... bwds in program order. Variable is live before this UInstr if it is read by this UInstr. - Note that regUse[j].num holds the Intel reg number, so we must + Note that regUse[j] holds the Intel reg number, so we must convert it to our rank number. */ for (j = k-1; j >= 0; j--) { - SET_RREG_LIVENESS ( VG_(realreg_to_rank)(regUse[j].num), + SET_RREG_LIVENESS ( VG_(realreg_to_rank)(regUse[j]), rregs_live, - !regUse[j].isWrite ); + !isWrites[j] ); } } } diff --git a/example/ex_main.c b/example/ex_main.c index 281d98035..89cd23121 100644 --- a/example/ex_main.c +++ b/example/ex_main.c @@ -9,12 +9,12 @@ VG_DETERMINE_INTERFACE_VERSION void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track) { - details->name = "Example"; - details->version = "0.0.1"; - details->description = "an example Valgrind skin"; - details->copyright_author = - "Copyright (C) 2002, and put in the public domain, by Santa Claus."; - details->bug_reports_to = "santa.claus@northpole.org"; + VG_(details_name) ("Example"); + VG_(details_version) ("0.0.1"); + VG_(details_description) ("an example Valgrind skin"); + VG_(details_copyright_author)( + "Copyright (C) 2002, and put in the public domain, by Santa Claus."); + VG_(details_bug_reports_to) ("santa.claus@northpole.org"); /* No needs, no core events to track */ } diff --git a/helgrind/hg_main.c b/helgrind/hg_main.c index b7a849a5f..f9ed3a0f7 100644 --- a/helgrind/hg_main.c +++ b/helgrind/hg_main.c @@ -1291,34 +1291,34 @@ static const LockSet *ls_union(const LockSet *a, const LockSet *b) #define SHADOW_EXTRA 2 -static __inline__ -void set_sc_where( ShadowChunk* sc, ExeContext* ec ) -{ - sc->skin_extra[0] = (UInt)ec; -} - static __inline__ ExeContext *get_sc_where( ShadowChunk* sc ) { - return (ExeContext*)sc->skin_extra[0]; -} - -static __inline__ -void set_sc_tid(ShadowChunk *sc, ThreadId tid) -{ - sc->skin_extra[1] = (UInt)tid; + return (ExeContext*)VG_(get_sc_extra)(sc, 0); } static __inline__ ThreadId get_sc_tid(ShadowChunk *sc) { - return (ThreadId)sc->skin_extra[1]; + return (ThreadId)VG_(get_sc_extra)(sc, 1); +} + +static __inline__ +void set_sc_where( ShadowChunk* sc, ExeContext* ec ) +{ + VG_(set_sc_extra)(sc, 0, (UInt)ec); +} + +static __inline__ +void set_sc_tid( ShadowChunk* sc, ThreadId tid ) +{ + VG_(set_sc_extra)(sc, 1, (UInt)tid); } void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst ) { - set_sc_where( sc, VG_(get_ExeContext) ( tst ) ); - set_sc_tid(sc, VG_(get_tid_from_ThreadState(tst))); + set_sc_where ( sc, VG_(get_ExeContext)(tst) ); + set_sc_tid ( sc, VG_(get_tid_from_ThreadState)(tst) ); } /*------------------------------------------------------------*/ @@ -1439,8 +1439,8 @@ static ShadowChunk *freechunks[N_FREED_CHUNKS]; void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst ) { ThreadId tid = VG_(get_tid_from_ThreadState)(tst); - Addr start = sc->data; - Addr end = start + sc->size; + Addr start = VG_(get_sc_data)(sc); + Addr end = start + VG_(get_sc_size)(sc); Bool deadmx(Mutex *mx) { if (mx->state != MxDead) @@ -1913,19 +1913,18 @@ UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used ) Bool *stackref = NULL; Bool locked = False; /* lock prefix */ - cb = VG_(alloc_UCodeBlock)(); - cb->nextTemp = cb_in->nextTemp; + cb = VG_(setup_UCodeBlock)(cb_in); /* stackref[] is used for super-simple value tracking to keep note of which tempregs currently hold a value which is derived from ESP or EBP, and is therefore likely stack-relative if used as the address for LOAD or STORE. */ - ntemps = cb->nextTemp; + ntemps = VG_(get_num_temps)(cb); stackref = VG_(malloc)(sizeof(*stackref) * ntemps); VG_(memset)(stackref, 0, sizeof(*stackref) * ntemps); - for (i = 0; i < cb_in->used; i++) { - u_in = &cb_in->instrs[i]; + for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) { + u_in = VG_(get_instr)(cb_in, i); switch (u_in->opcode) { @@ -2200,7 +2199,8 @@ static void describe_addr ( Addr a, AddrInfo* ai ) /* Closure for searching malloc'd and free'd lists */ Bool addr_is_in_block(ShadowChunk *sh_ch) { - return VG_(addr_is_in_block) ( a, sh_ch->data, sh_ch->size ); + return VG_(addr_is_in_block) ( a, VG_(get_sc_data)(sh_ch), + VG_(get_sc_size)(sh_ch) ); } /* Search for it in segments */ @@ -2240,8 +2240,8 @@ static void describe_addr ( Addr a, AddrInfo* ai ) sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block); if (NULL != sc) { ai->akind = Mallocd; - ai->blksize = sc->size; - ai->rwoffset = (Int)(a) - (Int)(sc->data); + ai->blksize = VG_(get_sc_size)(sc); + ai->rwoffset = (Int)(a) - (Int)(VG_(get_sc_data)(sc)); ai->lastchange = get_sc_where(sc); ai->lasttid = get_sc_tid(sc); return; @@ -2249,14 +2249,17 @@ static void describe_addr ( Addr a, AddrInfo* ai ) /* Look in recently freed memory */ for(i = 0; i < N_FREED_CHUNKS; i++) { + Addr sc_data = VG_(get_sc_data)(sc); + UInt sc_size = VG_(get_sc_size)(sc); + sc = freechunks[i]; if (sc == NULL) continue; - if (a >= sc->data && a < sc->data+sc->size) { + if (a >= sc_data && a < sc_data + sc_size) { ai->akind = Freed; - ai->blksize = sc->size; - ai->rwoffset = a - sc->data; + ai->blksize = sc_size; + ai->rwoffset = a - sc_data; ai->lastchange = get_sc_where(sc); ai->lasttid = get_sc_tid(sc); return; @@ -2269,19 +2272,19 @@ static void describe_addr ( Addr a, AddrInfo* ai ) } -/* Creates a copy of the err_extra, updates the copy with address info if - necessary, sticks the copy into the SkinError. */ -void SK_(dup_extra_and_update)(SkinError* err) +/* Creates a copy of the `extra' part, updates the copy with address info if + necessary, and returns the copy. */ +void* SK_(dup_extra_and_update)(Error* err) { - HelgrindError* err_extra; + HelgrindError* new_extra; - err_extra = VG_(malloc)(sizeof(HelgrindError)); - *err_extra = *((HelgrindError*)err->extra); + new_extra = VG_(malloc)(sizeof(HelgrindError)); + *new_extra = *((HelgrindError*)VG_(get_error_extra)(err)); - if (err_extra->addrinfo.akind == Undescribed) - describe_addr ( err->addr, &(err_extra->addrinfo) ); + if (new_extra->addrinfo.akind == Undescribed) + describe_addr ( VG_(get_error_address)(err), &(new_extra->addrinfo) ); - err->extra = err_extra; + return new_extra; } static void record_eraser_error ( ThreadState *tst, Addr a, Bool is_write, @@ -2346,21 +2349,24 @@ static void record_lockgraph_error(ThreadId tid, Mutex *mutex, mutex->mutexp, "", &err_extra); } -Bool SK_(eq_SkinError) ( VgRes not_used, - SkinError* e1, SkinError* e2 ) +Bool SK_(eq_SkinError) ( VgRes not_used, Error* e1, Error* e2 ) { - sk_assert(e1->ekind == e2->ekind); + Char *e1s, *e2s; - switch(e1->ekind) { + sk_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2)); + + switch (VG_(get_error_kind)(e1)) { case EraserErr: - return e1->addr == e2->addr; + return VG_(get_error_address)(e1) == VG_(get_error_address)(e2); case MutexErr: - return e1->addr == e2->addr; + return VG_(get_error_address)(e1) == VG_(get_error_address)(e2); } - if (e1->string != e2->string) return False; - if (0 != VG_(strcmp)(e1->string, e2->string)) return False; + e1s = VG_(get_error_string)(e1); + e2s = VG_(get_error_string)(e2); + if (e1s != e2s) return False; + if (0 != VG_(strcmp)(e1s, e2s)) return False; return True; } @@ -2443,21 +2449,23 @@ static Char *lockset_str(const Char *prefix, const LockSet *lockset) return buf; } -void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) ) +void SK_(pp_SkinError) ( Error* err, void (*pp_ExeContext)(void) ) { - HelgrindError *extra = (HelgrindError *)err->extra; + HelgrindError *extra = (HelgrindError *)VG_(get_error_extra)(err); Char buf[100]; Char *msg = buf; const LockSet *ls; *msg = '\0'; - switch(err->ekind) { - case EraserErr: + switch(VG_(get_error_kind)(err)) { + case EraserErr: { + Addr err_addr = VG_(get_error_address)(err); + VG_(message)(Vg_UserMsg, "Possible data race %s variable at %p %(y", - err->string, err->addr, err->addr ); + VG_(get_error_string)(err), err_addr, err_addr); pp_ExeContext(); - pp_AddrInfo(err->addr, &extra->addrinfo); + pp_AddrInfo(err_addr, &extra->addrinfo); switch(extra->prevstate.state) { case Vge_Virgin: @@ -2499,7 +2507,7 @@ void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) ) Addr eip = extra->lasttouched.eip; VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s by thread %u", - err->addr, + err_addr, pp_state(extra->lasttouched.state), unpackTLS(extra->lasttouched.tls)->tid); @@ -2514,33 +2522,36 @@ void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) ) } } else if (clo_execontext == EC_All && extra->lasttouched.ec != NULL) { VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s in tid %u", - err->addr, + err_addr, pp_state(extra->lasttouched.state), unpackTLS(extra->lasttouched.tls)->tid); VG_(pp_ExeContext)(extra->lasttouched.ec); } - break; + } case MutexErr: VG_(message)(Vg_UserMsg, "Mutex problem at %p%(y trying to %s", - err->addr, err->addr, err->string ); + VG_(get_error_address)(err), + VG_(get_error_address)(err), + VG_(get_error_string)(err)); pp_ExeContext(); if (extra->lasttouched.ec != NULL) { VG_(message)(Vg_UserMsg, " last touched by thread %d", extra->lasttid); VG_(pp_ExeContext)(extra->lasttouched.ec); } - pp_AddrInfo(err->addr, &extra->addrinfo); + pp_AddrInfo(VG_(get_error_address)(err), &extra->addrinfo); break; case LockGraphErr: { const LockSet *heldset = extra->held_lockset; + Addr err_addr = VG_(get_error_address)(err); Int i; msg = lockset_str(NULL, heldset); VG_(message)(Vg_UserMsg, "Mutex %p%(y locked in inconsistent order", - err->addr, err->addr); + err_addr, err_addr); pp_ExeContext(); VG_(message)(Vg_UserMsg, " while holding locks %s", msg); @@ -2568,10 +2579,10 @@ void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) ) } -Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind ) +Bool SK_(recognised_suppression) ( Char* name, Supp *su ) { if (0 == VG_(strcmp)(name, "Eraser")) { - *skind = EraserSupp; + VG_(set_supp_kind)(su, EraserSupp); return True; } else { return False; @@ -2579,8 +2590,7 @@ Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind ) } -Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, - Int nBuf, SkinSupp* s ) +Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp* su ) { /* do nothing -- no extra suppression info present. Return True to indicate nothing bad happened. */ @@ -2588,10 +2598,10 @@ Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, } -Bool SK_(error_matches_suppression)(SkinError* err, SkinSupp* su) +Bool SK_(error_matches_suppression)(Error* err, Supp* su) { - sk_assert( su->skind == EraserSupp); - sk_assert(err->ekind == EraserErr); + sk_assert(VG_(get_supp_kind) (su) == EraserSupp); + sk_assert(VG_(get_error_kind)(err) == EraserErr); return True; } @@ -3053,58 +3063,58 @@ Bool SK_(handle_client_request)(ThreadState *tst, UInt *args, UInt *ret) /*--- Setup ---*/ /*--------------------------------------------------------------------*/ -void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track) +void SK_(pre_clo_init)(void) { Int i; LockSet *empty; - details->name = "Helgrind"; - details->version = NULL; - details->description = "a data race detector"; - details->copyright_author = - "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote."; - details->bug_reports_to = "njn25@cam.ac.uk"; + VG_(details_name) ("Helgrind"); + VG_(details_version) (NULL); + VG_(details_description) ("a data race detector"); + VG_(details_copyright_author)( + "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote."); + VG_(details_bug_reports_to) ("njn25@cam.ac.uk"); - needs->core_errors = True; - needs->skin_errors = True; - needs->data_syms = True; - needs->sizeof_shadow_block = SHADOW_EXTRA; - needs->alternative_free = True; - needs->client_requests = True; - needs->command_line_options = True; + VG_(needs_core_errors)(); + VG_(needs_skin_errors)(); + VG_(needs_data_syms)(); + VG_(needs_sizeof_shadow_block)(SHADOW_EXTRA); + VG_(needs_alternative_free)(); + VG_(needs_client_requests)(); + VG_(needs_command_line_options)(); - track->new_mem_startup = & eraser_new_mem_startup; - track->new_mem_heap = & eraser_new_mem_heap; - track->new_mem_stack = & eraser_new_mem_stack_private; - track->new_mem_stack_aligned = & eraser_new_mem_stack_private; - track->new_mem_stack_signal = & eraser_new_mem_stack_private; - track->new_mem_brk = & make_writable; - track->new_mem_mmap = & eraser_new_mem_startup; + VG_(track_new_mem_startup) (& eraser_new_mem_startup); + VG_(track_new_mem_heap) (& eraser_new_mem_heap); - track->copy_mem_heap = & copy_address_range_state; - track->change_mem_mprotect = & eraser_set_perms; + /* stack ones not decided until VG_(post_clo_init)() */ - track->ban_mem_heap = NULL; - track->ban_mem_stack = NULL; + VG_(track_new_mem_brk) (& make_writable); + VG_(track_new_mem_mmap) (& eraser_new_mem_startup); - track->die_mem_heap = NULL; - track->die_mem_stack = NULL; - track->die_mem_stack_aligned = NULL; - track->die_mem_stack_signal = NULL; - track->die_mem_brk = NULL; - track->die_mem_munmap = NULL; + VG_(track_copy_mem_heap) (& copy_address_range_state); + VG_(track_change_mem_mprotect) (& eraser_set_perms); - track->pre_mem_read = & eraser_pre_mem_read; - track->pre_mem_read_asciiz = & eraser_pre_mem_read_asciiz; - track->pre_mem_write = & eraser_pre_mem_write; - track->post_mem_write = NULL; + VG_(track_ban_mem_heap) (NULL); + VG_(track_ban_mem_stack) (NULL); - track->pre_mutex_lock = & eraser_pre_mutex_lock; - track->post_mutex_lock = & eraser_post_mutex_lock; - track->post_mutex_unlock = & eraser_post_mutex_unlock; + VG_(track_die_mem_heap) (NULL); + VG_(track_die_mem_stack) (NULL); + VG_(track_die_mem_stack_aligned)(NULL); + VG_(track_die_mem_stack_signal) (NULL); + VG_(track_die_mem_brk) (NULL); + VG_(track_die_mem_munmap) (NULL); - track->post_thread_create = & hg_thread_create; - track->post_thread_join = & hg_thread_join; + VG_(track_pre_mem_read) (& eraser_pre_mem_read); + VG_(track_pre_mem_read_asciiz) (& eraser_pre_mem_read_asciiz); + VG_(track_pre_mem_write) (& eraser_pre_mem_write); + VG_(track_post_mem_write) (NULL); + + VG_(track_post_thread_create) (& hg_thread_create); + VG_(track_post_thread_join) (& hg_thread_join); + + VG_(track_post_mutex_lock) (& eraser_pre_mutex_lock); + VG_(track_post_mutex_lock) (& eraser_post_mutex_lock); + VG_(track_post_mutex_unlock) (& eraser_post_mutex_unlock); VG_(register_compact_helper)((Addr) & eraser_mem_help_read_1); VG_(register_compact_helper)((Addr) & eraser_mem_help_read_2); @@ -3204,18 +3214,21 @@ Char *SK_(usage)(void) void SK_(post_clo_init)(void) { + void (*stack_tracker)(Addr a, UInt len); + if (clo_execontext) { execontext_map = VG_(malloc)(sizeof(ExeContextMap *) * 65536); VG_(memset)(execontext_map, 0, sizeof(ExeContextMap *) * 65536); } - if (!clo_priv_stacks) { - VgTrackEvents *track = &VG_(track_events); + if (clo_priv_stacks) + stack_tracker = & eraser_new_mem_stack_private; + else + stack_tracker = & eraser_new_mem_stack; - track->new_mem_stack = & eraser_new_mem_stack; - track->new_mem_stack_aligned = & eraser_new_mem_stack; - track->new_mem_stack_signal = & eraser_new_mem_stack; - } + VG_(track_new_mem_stack) (stack_tracker); + VG_(track_new_mem_stack_aligned)(stack_tracker); + VG_(track_new_mem_stack_signal) (stack_tracker); } diff --git a/include/vg_skin.h b/include/vg_skin.h index 10f70e725..1d0e1d322 100644 --- a/include/vg_skin.h +++ b/include/vg_skin.h @@ -702,36 +702,31 @@ typedef UInstr; -/* Expandable arrays of uinstrs. */ typedef - struct { - Int used; - Int size; - UInstr* instrs; - Int nextTemp; - } + struct _UCodeBlock UCodeBlock; +extern Int VG_(get_num_instrs) (UCodeBlock* cb); +extern Int VG_(get_num_temps) (UCodeBlock* cb); +extern UInstr* VG_(get_instr) (UCodeBlock* cb, Int i); +extern UInstr* VG_(get_last_instr) (UCodeBlock* cb); + /*====================================================================*/ /*=== Instrumenting UCode ===*/ /*====================================================================*/ -/* A structure for communicating TempReg and RealReg uses of UInstrs. */ -typedef - struct { - Int num; - Bool isWrite; - } - RegUse; +/* Find what this instruction does to its regs. `tag' indicates whether we're + considering TempRegs (pre-reg-alloc) or RealRegs (post-reg-alloc). + `regs' is filled with the affected register numbers, `isWrites' parallels + it and indicates if the reg is read or written. If a reg is read and + written, it will appear twice in `regs'. `regs' and `isWrites' must be + able to fit 3 elements. -/* Find what this instruction does to its regs. Tag indicates whether we're - * considering TempRegs (pre-reg-alloc) or RealRegs (post-reg-alloc). - * Useful for analysis/optimisation passes. */ -extern Int VG_(get_reg_usage) ( UInstr* u, Tag tag, RegUse* arr ); + Useful for analysis/optimisation passes. */ +extern Int VG_(get_reg_usage) ( UInstr* u, Tag tag, Int* regs, Bool* isWrites ); -/* ------------------------------------------------------------------ */ /* Used to register helper functions to be called from generated code. A limited number of compact helpers can be registered; the code generated to call them is slightly shorter -- so register the mostly frequently @@ -767,10 +762,14 @@ extern void VG_(new_UInstr3) ( UCodeBlock* cb, Opcode opcode, Int sz, Tag tag2, UInt val2, Tag tag3, UInt val3 ); -extern void VG_(set_flag_RW) ( UInstr* u, FlagSet fr, FlagSet fw ); +/* Set read/write/undefined flags. Undefined flags are treaten as written, + but it's worth keeping them logically distinct. */ +extern void VG_(set_flag_fields) ( UCodeBlock* cb, FlagSet fr, FlagSet fw, + FlagSet fu); extern void VG_(set_lit_field) ( UCodeBlock* cb, UInt lit32 ); extern void VG_(set_ccall_fields) ( UCodeBlock* cb, Addr fn, UChar argc, UChar regparms_n, Bool has_ret_val ); +extern void VG_(set_cond_field) ( UCodeBlock* cb, Condcode code ); extern void VG_(copy_UInstr) ( UCodeBlock* cb, UInstr* instr ); @@ -783,6 +782,8 @@ extern Bool VG_(any_flag_use)( UInstr* u ); #define uInstr3 VG_(new_UInstr3) #define uLiteral VG_(set_lit_field) #define uCCall VG_(set_ccall_fields) +#define uCond VG_(set_cond_field) +#define uFlagsRWU VG_(set_flag_fields) #define newTemp VG_(get_new_temp) #define newShadow VG_(get_new_shadow) @@ -804,7 +805,7 @@ extern void VG_(set_global_var) ( UCodeBlock* cb, Addr globvar_ptr, UInt val); /* ------------------------------------------------------------------ */ /* Allocating/freeing basic blocks of UCode */ -extern UCodeBlock* VG_(alloc_UCodeBlock) ( void ); +extern UCodeBlock* VG_(setup_UCodeBlock) ( UCodeBlock* cb ); extern void VG_(free_UCodeBlock) ( UCodeBlock* cb ); /* ------------------------------------------------------------------ */ @@ -996,26 +997,25 @@ typedef Int /* Do not make this unsigned! */ SuppKind; -/* An extensible (via the 'extra' field) suppression record. This holds - the suppression details of interest to a skin. Skins can use a normal - enum (with element values in the normal range (0..)) for `skind'. - - If VG_(needs).report_errors==True, for each suppression read in by core - SKN_(recognised_suppression)() and SKN_(read_extra_suppression_info) will - be called. The `skind' field is filled in by the value returned in the - argument of the first function; the second function can fill in the - `string' and `extra' fields if it wants. +/* The skin-relevant parts of a suppression are: + kind: what kind of suppression; must be in the range (0..) + string: use is optional. NULL by default. + extra: use is optional. NULL by default. void* so it's extensible. */ typedef - struct { - /* What kind of suppression. Must use the range (0..) */ - SuppKind skind; - /* String -- use is optional. NULL by default. */ - Char* string; - /* Anything else -- use is optional. NULL by default. */ - void* extra; - } - SkinSupp; + struct _Supp + Supp; + +/* Useful in SK_(error_matches_suppression)() */ +SuppKind VG_(get_supp_kind) ( Supp* su ); +Char* VG_(get_supp_string) ( Supp* su ); +void* VG_(get_supp_extra) ( Supp* su ); + +/* Must be used in VG_(recognised_suppression)() */ +void VG_(set_supp_kind) ( Supp* su, SuppKind suppkind ); +/* May be used in VG_(read_extra_suppression_info)() */ +void VG_(set_supp_string) ( Supp* su, Char* string ); +void VG_(set_supp_extra) ( Supp* su, void* extra ); /* ------------------------------------------------------------------ */ @@ -1029,29 +1029,22 @@ typedef Int /* Do not make this unsigned! */ ErrorKind; -/* An extensible (via the 'extra' field) error record. This holds - the error details of interest to a skin. Skins can use a normal - enum (with element values in the normal range (0..)) for `ekind'. - - When errors are found and recorded with VG_(maybe_record_error)(), all - the skin must do is pass in the four parameters; core will - allocate/initialise the error record. +/* The skin-relevant parts of an Error are: + kind: what kind of error; must be in the range (0..) + addr: use is optional. 0 by default. + string: use is optional. NULL by default. + extra: use is optional. NULL by default. void* so it's extensible. */ typedef - struct { - /* Used by ALL. Must be in the range (0..) */ - Int ekind; - /* Used frequently */ - Addr addr; - /* Used frequently */ - Char* string; - /* For any skin-specific extras */ - void* extra; - } - SkinError; + struct _Error + Error; +/* Useful in SK_(error_matches_suppression)(), SK_(pp_SkinError)(), etc */ +SuppKind VG_(get_error_kind) ( Error* err ); +Addr VG_(get_error_address) ( Error* err ); +Char* VG_(get_error_string) ( Error* err ); +void* VG_(get_error_extra) ( Error* err ); -/* ------------------------------------------------------------------ */ /* Call this when an error occurs. It will be recorded if it hasn't been seen before. If it has, the existing error record will have its count incremented. @@ -1134,26 +1127,29 @@ extern VgSectKind VG_(seg_sect_kind)(Addr); /*=== Shadow chunks and block-finding ===*/ /*====================================================================*/ +/* The skin-relevant parts of a ShadowChunk are: + size: size of the block in bytes + addr: addr of the block + extra: anything extra kept by the skin; size is determined by + VG_(needs).sizeof_shadow_chunk +*/ typedef - enum { - Vg_AllocMalloc = 0, - Vg_AllocNew = 1, - Vg_AllocNewVec = 2 - } - VgAllocKind; - -/* Description of a malloc'd chunk. skin_extra[] part can be used by - the skin; size of array is given by VG_(needs).sizeof_shadow_chunk. */ -typedef - struct _ShadowChunk { - struct _ShadowChunk* next; - UInt size : 30; /* size requested */ - VgAllocKind allockind : 2; /* which wrapper did the allocation */ - Addr data; /* ptr to actual block */ - UInt skin_extra[0]; /* extra skin-specific info */ - } + struct _ShadowChunk ShadowChunk; +extern UInt VG_(get_sc_size) ( ShadowChunk* sc ); +extern Addr VG_(get_sc_data) ( ShadowChunk* sc ); +/* Gets the ith word of the `extra' field. */ +extern UInt VG_(get_sc_extra) ( ShadowChunk* sc, UInt i ); +/* Sets the ith word of the `extra' field to `word'. */ +extern void VG_(set_sc_extra) ( ShadowChunk* sc, UInt i, UInt word ); + +/* These two should only be used if the `alternative_free' need is set, once + we reach the point where the block would have been free'd. */ +extern ShadowChunk* VG_(get_sc_next) ( ShadowChunk* sc ); +extern void VG_(set_sc_next) ( ShadowChunk* sc, ShadowChunk* next ); + + /* Use this to free blocks if VG_(needs).alternative_free == True. It frees the ShadowChunk and the malloc'd block it points to. */ extern void VG_(free_ShadowChunk) ( ShadowChunk* sc ); @@ -1192,101 +1188,85 @@ extern void VG_(generic_detect_memory_leaks) ( /* ------------------------------------------------------------------ */ /* Details */ -typedef - struct { - /* Information used in the startup message. `name' also determines the - string used for identifying suppressions in a suppression file as - belonging to this skin. `version' can be NULL, in which case (not - surprisingly) no version info is printed; this mechanism is - designed for skins distributed with Valgrind that share a version - number with Valgrind. Other skins not distributed as part of - Valgrind should probably have their own version number. */ - Char* name; - Char* version; - Char* description; - Char* copyright_author; - /* String printed if an `sk_assert' assertion fails or VG_(skin_panic) - is called. Should probably be an email address. */ - Char* bug_reports_to; - } - VgDetails; +/* Information used in the startup message. `name' also determines the + string used for identifying suppressions in a suppression file as + belonging to this skin. `version' can be NULL, in which case (not + surprisingly) no version info is printed; this mechanism is designed for + skins distributed with Valgrind that share a version number with + Valgrind. Other skins not distributed as part of Valgrind should + probably have their own version number. */ +extern void VG_(details_name) ( Char* name ); +extern void VG_(details_version) ( Char* version ); +extern void VG_(details_description) ( Char* description ); +extern void VG_(details_copyright_author) ( Char* copyright_author ); -extern VgDetails VG_(details); +/* String printed if an `sk_assert' assertion fails or VG_(skin_panic) + is called. Should probably be an email address. */ +extern void VG_(details_bug_reports_to) ( Char* bug_reports_to ); /* ------------------------------------------------------------------ */ /* Needs */ -/* If new fields are added to this type, update: - * - vg_main.c:initialisation of VG_(needs) - * - vg_main.c:sanity_check_needs() - * - * If the name of this type or any of its fields change, update: - * - dependent comments (just search for "VG_(needs)"). - */ -typedef - struct { - /* Booleans that decide core behaviour, but don't require extra - operations to be defined if `True' */ +/* Booleans that decide core behaviour, but don't require extra + operations to be defined if `True' */ - /* Should __libc_freeres() be run? Bugs in it can crash the skin. */ - Bool libc_freeres; +/* Should __libc_freeres() be run? Bugs in it can crash the skin. */ +extern void VG_(needs_libc_freeres) ( void ); - /* Want to have errors detected by Valgrind's core reported? Includes: - - pthread API errors (many; eg. unlocking a non-locked mutex) - - silly arguments to malloc() et al (eg. negative size) - - invalid file descriptors to blocking syscalls read() and write() - - bad signal numbers passed to sigaction() - - attempt to install signal handler for SIGKILL or SIGSTOP */ - Bool core_errors; +/* Want to have errors detected by Valgrind's core reported? Includes: + - pthread API errors (many; eg. unlocking a non-locked mutex) + - silly arguments to malloc() et al (eg. negative size) + - invalid file descriptors to blocking syscalls read() and write() + - bad signal numbers passed to sigaction() + - attempt to install signal handler for SIGKILL or SIGSTOP */ +extern void VG_(needs_core_errors) ( void ); - /* Booleans that indicate extra operations are defined; if these are - True, the corresponding template functions (given below) must be - defined. A lot like being a member of a type class. */ +/* Booleans that indicate extra operations are defined; if these are True, + the corresponding template functions (given below) must be defined. A + lot like being a member of a type class. */ - /* Want to report errors from the skin? This implies use of - suppressions, too. */ - Bool skin_errors; +/* Want to report errors from skin? This implies use of suppressions, too. */ +extern void VG_(needs_skin_errors) ( void ); - /* Is information kept about specific individual basic blocks? (Eg. for - cachegrind there are cost-centres for every instruction, stored at a - basic block level.) If so, it sometimes has to be discarded, because - .so mmap/munmap-ping or self-modifying code (informed by the - DISCARD_TRANSLATIONS user request) can cause one instruction address - to be used for more than one instruction in one program run... */ - Bool basic_block_discards; +/* Is information kept about specific individual basic blocks? (Eg. for + cachegrind there are cost-centres for every instruction, stored at a + basic block level.) If so, it sometimes has to be discarded, because + .so mmap/munmap-ping or self-modifying code (informed by the + DISCARD_TRANSLATIONS user request) can cause one instruction address + to be used for more than one instruction in one program run... */ +extern void VG_(needs_basic_block_discards) ( void ); - /* Skin maintains information about each register? */ - Bool shadow_regs; +/* Skin maintains information about each register? */ +extern void VG_(needs_shadow_regs) ( void ); - /* Skin defines its own command line options? */ - Bool command_line_options; - /* Skin defines its own client requests? */ - Bool client_requests; +/* Skin defines its own command line options? */ +extern void VG_(needs_command_line_options) ( void ); - /* Skin defines its own UInstrs? */ - Bool extended_UCode; +/* Skin defines its own client requests? */ +extern void VG_(needs_client_requests) ( void ); - /* Skin does stuff before and/or after system calls? */ - Bool syscall_wrapper; +/* Skin defines its own UInstrs? */ +extern void VG_(needs_extended_UCode) ( void ); - /* Size, in words, of extra info about malloc'd blocks recorded by - skin. Be careful to get this right or you'll get seg faults! */ - UInt sizeof_shadow_block; +/* Skin does stuff before and/or after system calls? */ +extern void VG_(needs_syscall_wrapper) ( void ); - /* Skin does free()s itself? */ - Bool alternative_free; +/* Size, in words, of extra info about malloc'd blocks recorded by + skin. Be careful to get this right or you'll get seg faults! */ +extern void VG_(needs_sizeof_shadow_block) ( Int size ); - /* Are skin-state sanity checks performed? */ - Bool sanity_checks; +/* Skin does free()s itself? Useful if a skin needs to keep track of + blocks in some way after they're free'd. + WARNING: don't forget to call VG_(free_ShadowChunk)() for each block + eventually! */ +extern void VG_(needs_alternative_free) ( void ); - /* Do we need to see data symbols? */ - Bool data_syms; - } - VgNeeds; - -extern VgNeeds VG_(needs); +/* Are skin-state sanity checks performed? */ +extern void VG_(needs_sanity_checks) ( void ); +/* Do we need to see data symbols? */ +extern void VG_(needs_data_syms) ( void ); /* ------------------------------------------------------------------ */ /* Core events to track */ @@ -1297,87 +1277,89 @@ typedef enum { Vg_CorePThread, Vg_CoreSignal, Vg_CoreSysCall, Vg_CoreTranslate } CorePart; -/* Events happening in core to track. To be notified, assign a function - to the function pointer. To ignore an event, don't do anything - (default assignment is to NULL in which case the call is skipped). */ -typedef - struct { - /* Memory events */ - void (*new_mem_startup)( Addr a, UInt len, Bool rr, Bool ww, Bool xx ); - void (*new_mem_heap) ( Addr a, UInt len, Bool is_inited ); - void (*new_mem_stack) ( Addr a, UInt len ); - void (*new_mem_stack_aligned) ( Addr a, UInt len ); - void (*new_mem_stack_signal) ( Addr a, UInt len ); - void (*new_mem_brk) ( Addr a, UInt len ); - void (*new_mem_mmap) ( Addr a, UInt len, Bool rr, Bool ww, Bool xx ); +#define EV extern void - void (*copy_mem_heap) ( Addr from, Addr to, UInt len ); - void (*copy_mem_remap) ( Addr from, Addr to, UInt len ); - void (*change_mem_mprotect) ( Addr a, UInt len, Bool rr, Bool ww, Bool xx ); +/* Events happening in core to track. To be notified, pass a callback + function to the appropriate function. To ignore an event, don't do + anything (default is for events to be ignored). */ + +/* Memory events */ + +EV VG_(track_new_mem_startup) ( void (*f)(Addr a, UInt len, + Bool rr, Bool ww, Bool xx) ); +EV VG_(track_new_mem_heap) ( void (*f)(Addr a, UInt len, Bool is_inited) ); +EV VG_(track_new_mem_stack) ( void (*f)(Addr a, UInt len) ); +EV VG_(track_new_mem_stack_aligned) ( void (*f)(Addr a, UInt len) ); +EV VG_(track_new_mem_stack_signal) ( void (*f)(Addr a, UInt len) ); +EV VG_(track_new_mem_brk) ( void (*f)(Addr a, UInt len) ); +EV VG_(track_new_mem_mmap) ( void (*f)(Addr a, UInt len, + Bool rr, Bool ww, Bool xx) ); + +EV VG_(track_copy_mem_heap) ( void (*f)(Addr from, Addr to, UInt len) ); +EV VG_(track_copy_mem_remap) ( void (*f)(Addr from, Addr to, UInt len) ); +EV VG_(track_change_mem_mprotect) ( void (*f)(Addr a, UInt len, + Bool rr, Bool ww, Bool xx) ); - /* Used on redzones around malloc'd blocks and at end of stack */ - void (*ban_mem_heap) ( Addr a, UInt len ); - void (*ban_mem_stack) ( Addr a, UInt len ); +/* Used on redzones around malloc'd blocks and at end of stack */ +EV VG_(track_ban_mem_heap) ( void (*f)(Addr a, UInt len) ); +EV VG_(track_ban_mem_stack) ( void (*f)(Addr a, UInt len) ); - void (*die_mem_heap) ( Addr a, UInt len ); - void (*die_mem_stack) ( Addr a, UInt len ); - void (*die_mem_stack_aligned) ( Addr a, UInt len ); - void (*die_mem_stack_signal) ( Addr a, UInt len ); - void (*die_mem_brk) ( Addr a, UInt len ); - void (*die_mem_munmap) ( Addr a, UInt len ); +EV VG_(track_die_mem_heap) ( void (*f)(Addr a, UInt len) ); +EV VG_(track_die_mem_stack) ( void (*f)(Addr a, UInt len) ); +EV VG_(track_die_mem_stack_aligned) ( void (*f)(Addr a, UInt len) ); +EV VG_(track_die_mem_stack_signal) ( void (*f)(Addr a, UInt len) ); +EV VG_(track_die_mem_brk) ( void (*f)(Addr a, UInt len) ); +EV VG_(track_die_mem_munmap) ( void (*f)(Addr a, UInt len) ); - void (*bad_free) ( ThreadState* tst, Addr a ); - void (*mismatched_free) ( ThreadState* tst, Addr a ); +EV VG_(track_bad_free) ( void (*f)(ThreadState* tst, Addr a) ); +EV VG_(track_mismatched_free) ( void (*f)(ThreadState* tst, Addr a) ); - void (*pre_mem_read) ( CorePart part, ThreadState* tst, - Char* s, Addr a, UInt size ); - void (*pre_mem_read_asciiz) ( CorePart part, ThreadState* tst, - Char* s, Addr a ); - void (*pre_mem_write) ( CorePart part, ThreadState* tst, - Char* s, Addr a, UInt size ); - /* Not implemented yet -- have to add in lots of places, which is a - pain. Won't bother unless/until there's a need. */ - /* void (*post_mem_read) ( ThreadState* tst, Char* s, - Addr a, UInt size ); */ - void (*post_mem_write) ( Addr a, UInt size ); +EV VG_(track_pre_mem_read) ( void (*f)(CorePart part, ThreadState* tst, + Char* s, Addr a, UInt size) ); +EV VG_(track_pre_mem_read_asciiz) ( void (*f)(CorePart part, ThreadState* tst, + Char* s, Addr a) ); +EV VG_(track_pre_mem_write) ( void (*f)(CorePart part, ThreadState* tst, + Char* s, Addr a, UInt size) ); +/* Not implemented yet -- have to add in lots of places, which is a + pain. Won't bother unless/until there's a need. */ +/* EV VG_(track_post_mem_read) ( void (*f)(ThreadState* tst, Char* s, + Addr a, UInt size) ); */ +EV VG_(track_post_mem_write) ( void (*f)(Addr a, UInt size) ); - /* Scheduler events (not exhaustive) */ - void (*thread_run) ( ThreadId tid ); +/* Scheduler events (not exhaustive) */ +EV VG_(track_thread_run) ( void (*f)(ThreadId tid) ); - /* Mutex events (not exhaustive) */ +/* Thread events (not exhaustive) */ - /* Called before a thread can block while waiting for a mutex - (called regardless of whether the thread will block or - not) */ - void (*pre_mutex_lock) ( ThreadId tid, - void* /*pthread_mutex_t* */ mutex ); - /* Called once the thread actually holds the mutex (always - paired with pre_mutex_lock) */ - void (*post_mutex_lock) ( ThreadId tid, - void* /*pthread_mutex_t* */ mutex ); - /* Called after a thread has released a mutex (no need for a - corresponding pre_mutex_unlock, because unlocking can't - block) */ - void (*post_mutex_unlock) ( ThreadId tid, - void* /*pthread_mutex_t* */ mutex ); +/* Called during thread create, before the new thread has run any + instructions (or touched any memory). */ +EV VG_(track_post_thread_create)( void (*f)(ThreadId tid, ThreadId child) ); +/* Called once the joinee thread is terminated and the joining thread is + about to resume. */ +EV VG_(track_post_thread_join) ( void (*f)(ThreadId joiner, ThreadId joinee) ); - /* Called during thread create, before the new thread has run - any instructions (or touched any memory). */ - void (*post_thread_create)( ThreadId tid, ThreadId child ); - /* Called once the joinee thread is terminated and the joining - thread is about to resume. */ - void (*post_thread_join) ( ThreadId joiner, ThreadId joinee ); - /* Others... thread, condition variable, signal events... */ - /* ... */ - } - VgTrackEvents; +/* Mutex events (not exhaustive) */ -/* Declare the struct instance */ -extern VgTrackEvents VG_(track_events); +/* Called before a thread can block while waiting for a mutex (called + regardless of whether the thread will block or not). */ +EV VG_(track_pre_mutex_lock) ( void (*f)(ThreadId tid, + void* /*pthread_mutex_t* */ mutex) ); +/* Called once the thread actually holds the mutex (always paired with + pre_mutex_lock). */ +EV VG_(track_post_mutex_lock) ( void (*f)(ThreadId tid, + void* /*pthread_mutex_t* */ mutex) ); +/* Called after a thread has released a mutex (no need for a corresponding + pre_mutex_unlock, because unlocking can't block). */ +EV VG_(track_post_mutex_unlock) ( void (*f)(ThreadId tid, + void* /*pthread_mutex_t* */ mutex) ); +/* Others... condition variable, signal events... */ +/* ... */ + +#undef EV /* ------------------------------------------------------------------ */ /* Template functions */ @@ -1393,17 +1375,18 @@ extern VgTrackEvents VG_(track_events); /* Fundamental template functions */ /* Initialise skin. Must do the following: - - initialise the `details' struct + - initialise the `details' struct, via the VG_(details_*)() functions - register any helpers called by generated code May do the following: - - initialise the `needs' struct to indicate certain requirements - - initialise the `track' struct to indicate core events of interest + - initialise the `needs' struct to indicate certain requirements, via + the VG_(needs_*)() functions + - initialise the `track' struct to indicate core events of interest, via + the VG_(track_*)() functions - register any skin-specific profiling events - any other skin-specific initialisation */ -extern void SK_(pre_clo_init) ( VgDetails* details, VgNeeds* needs, - VgTrackEvents* track ); +extern void SK_(pre_clo_init) ( void ); /* Do initialisation that can only be done after command line processing. */ extern void SK_(post_clo_init)( void ); @@ -1423,40 +1406,44 @@ extern void SK_(fini) ( void ); /* Identify if two errors are equal, or equal enough. `res' indicates how close is "close enough". `res' should be passed on as necessary, eg. if - the SkinError's extra field contains an ExeContext, `res' should be + the Error's `extra' part contains an ExeContext, `res' should be passed to VG_(eq_ExeContext)() if the ExeContexts are considered. Other than that, probably don't worry about it unless you have lots of very similar errors occurring. */ -extern Bool SK_(eq_SkinError) ( VgRes res, - SkinError* e1, SkinError* e2 ); +extern Bool SK_(eq_SkinError) ( VgRes res, Error* e1, Error* e2 ); /* Print error context. The passed function pp_ExeContext() can be (and probably should be) used to print the location of the error. */ -extern void SK_(pp_SkinError) ( SkinError* ec, void (*pp_ExeContext)(void) ); +extern void SK_(pp_SkinError) ( Error* err, void (*pp_ExeContext)(void) ); -/* Copy the ec->extra part and replace ec->extra with the new copy. This is - necessary to move from a temporary stack copy to a permanent heap one. +/* Should copy the `extra' part which the core uses to override the old + version. This is necessary to move from a temporary stack copy to a + permanent heap one. - Then fill in any details that could be postponed until after the decision - whether to ignore the error (ie. details not affecting the result of - SK_(eq_SkinError)()). This saves time when errors are ignored. + Then should fill in any details that could be postponed until after the + decision whether to ignore the error (ie. details not affecting the + result of SK_(eq_SkinError)()). This saves time when errors are ignored. Yuk. */ -extern void SK_(dup_extra_and_update)(SkinError* ec); +extern void* SK_(dup_extra_and_update) ( Error* err ); -/* Return value indicates recognition. If recognised, type goes in `skind'. */ -extern Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind ); +/* Return value indicates recognition. If recognised, must set skind using + VG_(set_supp_kind)(). */ +extern Bool SK_(recognised_suppression) ( Char* name, Supp* su ); -/* Read any extra info for this suppression kind. For filling up the - `string' and `extra' fields in a `SkinSupp' struct if necessary. */ -extern Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, - Int nBuf, SkinSupp *s ); +/* Read any extra info for this suppression kind. Most likely for filling + in the `extra' and `string' parts (with VG_(set_supp_{extra,string})()) + of a suppression if necessary. Should return False if a syntax error + occurred, True otherwise. */ +extern Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, + Supp* su ); /* This should just check the kinds match and maybe some stuff in the - 'extra' field if appropriate */ -extern Bool SK_(error_matches_suppression)(SkinError* ec, SkinSupp* su); + `string' and `extra' field if appropriate (using VG_(get_supp_*)() to + get the relevant suppression parts). */ +extern Bool SK_(error_matches_suppression)(Error* err, Supp* su); /* ------------------------------------------------------------------ */ @@ -1496,23 +1483,24 @@ extern Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg_block, UInt /* VG_(needs).extends_UCode */ /* Useful to use in VG_(get_Xreg_usage)() */ -#define VG_UINSTR_READS_REG(ono) \ +#define VG_UINSTR_READS_REG(ono,regs,isWrites) \ { if (mycat(u->tag,ono) == tag) \ - { arr[n].num = mycat(u->val,ono); \ - arr[n].isWrite = False; \ + { regs[n] = mycat(u->val,ono); \ + isWrites[n] = False; \ n++; \ } \ } -#define VG_UINSTR_WRITES_REG(ono) \ +#define VG_UINSTR_WRITES_REG(ono,regs,isWrites) \ { if (mycat(u->tag,ono) == tag) \ - { arr[n].num = mycat(u->val,ono); \ - arr[n].isWrite = True; \ + { regs[n] = mycat(u->val,ono); \ + isWrites[n] = True; \ n++; \ } \ } /* 'X' prefix indicates eXtended UCode. */ -extern Int SK_(get_Xreg_usage) ( UInstr* u, Tag tag, RegUse* arr ); +extern Int SK_(get_Xreg_usage) ( UInstr* u, Tag tag, Int* regs, + Bool* isWrites ); extern void SK_(emit_XUInstr) ( UInstr* u, RRegSet regs_live_before ); extern Bool SK_(sane_XUInstr) ( Bool beforeRA, Bool beforeLiveness, UInstr* u ); @@ -1536,12 +1524,16 @@ extern void SK_(post_syscall) ( ThreadId tid, UInt syscallno, /* ------------------------------------------------------------------ */ /* VG_(needs).sizeof_shadow_chunk (if > 0) */ +/* Must fill in the `extra' part, using VG_(set_sc_extra)(). */ extern void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst ); /* ------------------------------------------------------------------ */ /* VG_(needs).alternative_free */ +/* If this need is set, when a dynamic block would normally be free'd, this + is called instead. The block is contained inside the ShadowChunk; use + the VG_(get_sc_*)() functions to access it. */ extern void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst ); diff --git a/lackey/lk_main.c b/lackey/lk_main.c index 4b79d9a1e..117cb49ad 100644 --- a/lackey/lk_main.c +++ b/lackey/lk_main.c @@ -74,15 +74,14 @@ static void add_one_Jcc_untaken(void) n_Jccs_untaken++; } -void SK_(pre_clo_init)(VgDetails* details, VgNeeds* not_used1, - VgTrackEvents* not_used2) +void SK_(pre_clo_init)(void) { - details->name = "Lackey"; - details->version = NULL; - details->description = "an example Valgrind skin"; - details->copyright_author = - "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote."; - details->bug_reports_to = "njn25@cam.ac.uk"; + VG_(details_name) ("Lackey"); + VG_(details_version) (NULL); + VG_(details_description) ("an example Valgrind skin"); + VG_(details_copyright_author)( + "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote."); + VG_(details_bug_reports_to) ("njn25@cam.ac.uk"); VG_(register_compact_helper)((Addr) & add_one_dlrr_call); VG_(register_compact_helper)((Addr) & add_one_BB); @@ -145,8 +144,7 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr) UInstr* u; Char fnname[100]; - cb = VG_(alloc_UCodeBlock)(); - cb->nextTemp = cb_in->nextTemp; + cb = VG_(setup_UCodeBlock)(cb_in); /* Count call to dlrr(), if this BB is dlrr()'s entry point */ if (VG_(get_fnname_if_entry)(orig_addr, fnname, 100) && @@ -158,8 +156,8 @@ UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr) /* Count basic block */ VG_(call_helper_0_0)(cb, (Addr) & add_one_BB); - for (i = 0; i < cb_in->used; i++) { - u = &cb_in->instrs[i]; + for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) { + u = VG_(get_instr)(cb_in, i); switch (u->opcode) { case NOP: case LOCK: case CALLM_S: case CALLM_E: diff --git a/memcheck/mc_errcontext.c b/memcheck/mc_errcontext.c index 9a4bde2b8..d770deeea 100644 --- a/memcheck/mc_errcontext.c +++ b/memcheck/mc_errcontext.c @@ -128,27 +128,29 @@ static Bool eq_AddrInfo ( VgRes res, AddrInfo* ai1, AddrInfo* ai2 ) are otherwise the same, the faulting addrs and associated rwoffsets are allowed to be different. */ -Bool SK_(eq_SkinError) ( VgRes res, - SkinError* e1, SkinError* e2 ) +Bool SK_(eq_SkinError) ( VgRes res, Error* e1, Error* e2 ) { - MemCheckError* e1_extra = e1->extra; - MemCheckError* e2_extra = e2->extra; + MemCheckError* e1_extra = VG_(get_error_extra)(e1); + MemCheckError* e2_extra = VG_(get_error_extra)(e2); - switch (e1->ekind) { - case CoreMemErr: + switch (VG_(get_error_kind)(e1)) { + case CoreMemErr: { + Char *e1s, *e2s; if (e1_extra->isWrite != e2_extra->isWrite) return False; - if (e2->ekind != CoreMemErr) return False; - if (e1->string == e2->string) return True; - if (0 == VG_(strcmp)(e1->string, e2->string)) return True; + if (VG_(get_error_kind)(e2) != CoreMemErr) return False; + e1s = VG_(get_error_string)(e1); + e2s = VG_(get_error_string)(e2); + if (e1s == e2s) return True; + if (0 == VG_(strcmp)(e1s, e2s)) return True; return False; + } case UserErr: case ParamErr: - if (e1_extra->isWrite != e2_extra->isWrite) - return False; - if (e1->ekind == ParamErr - && 0 != VG_(strcmp)(e1->string, e2->string)) - return False; + if (e1_extra->isWrite != e2_extra->isWrite) return False; + if (VG_(get_error_kind)(e1) == ParamErr + && 0 != VG_(strcmp)(VG_(get_error_string)(e1), + VG_(get_error_string)(e2))) return False; return True; case FreeErr: @@ -177,7 +179,8 @@ Bool SK_(eq_SkinError) ( VgRes res, return True; default: - VG_(printf)("Error:\n unknown MemCheck error code %d\n", e1->ekind); + VG_(printf)("Error:\n unknown MemCheck error code %d\n", + VG_(get_error_kind)(e1)); VG_(skin_panic)("unknown error code in SK_(eq_SkinError)"); } } @@ -237,19 +240,19 @@ static void pp_AddrInfo ( Addr a, AddrInfo* ai ) } } -void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) ) +void SK_(pp_SkinError) ( Error* err, void (*pp_ExeContext)(void) ) { - MemCheckError* err_extra = err->extra; + MemCheckError* err_extra = VG_(get_error_extra)(err); - switch (err->ekind) { + switch (VG_(get_error_kind)(err)) { case CoreMemErr: if (err_extra->isWrite) { VG_(message)(Vg_UserMsg, - "%s contains unaddressable byte(s)", err->string ); + "%s contains unaddressable byte(s)", VG_(get_error_string)(err)); } else { VG_(message)(Vg_UserMsg, "%s contains uninitialised or unaddressable byte(s)", - err->string); + VG_(get_error_string)(err)); } pp_ExeContext(); break; @@ -285,33 +288,33 @@ void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) ) VG_(skin_panic)("pp_SkinError(axskind)"); } pp_ExeContext(); - pp_AddrInfo(err->addr, &err_extra->addrinfo); + pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo); break; case FreeErr: VG_(message)(Vg_UserMsg,"Invalid free() / delete / delete[]"); /* fall through */ case FreeMismatchErr: - if (err->ekind == FreeMismatchErr) + if (VG_(get_error_kind)(err) == FreeMismatchErr) VG_(message)(Vg_UserMsg, "Mismatched free() / delete / delete []"); pp_ExeContext(); - pp_AddrInfo(err->addr, &err_extra->addrinfo); + pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo); break; case ParamErr: if (err_extra->isWrite) { VG_(message)(Vg_UserMsg, "Syscall param %s contains unaddressable byte(s)", - err->string ); + VG_(get_error_string)(err)); } else { VG_(message)(Vg_UserMsg, "Syscall param %s contains uninitialised or " "unaddressable byte(s)", - err->string); + VG_(get_error_string)(err)); } pp_ExeContext(); - pp_AddrInfo(err->addr, &err_extra->addrinfo); + pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo); break; case UserErr: @@ -324,11 +327,12 @@ void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) ) "unaddressable byte(s) found during client check request"); } pp_ExeContext(); - pp_AddrInfo(err->addr, &err_extra->addrinfo); + pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo); break; default: - VG_(printf)("Error:\n unknown MemCheck error code %d\n", err->ekind); + VG_(printf)("Error:\n unknown MemCheck error code %d\n", + VG_(get_error_kind)(err)); VG_(skin_panic)("unknown error code in SK_(pp_SkinError)"); } } @@ -356,7 +360,8 @@ static void describe_addr ( Addr a, AddrInfo* ai ) /* Closure for searching malloc'd and free'd lists */ Bool addr_is_in_block(ShadowChunk *sh_ch) { - return VG_(addr_is_in_block) ( a, sh_ch->data, sh_ch->size ); + return VG_(addr_is_in_block) ( a, VG_(get_sc_data)(sh_ch), + VG_(get_sc_size)(sh_ch) ); } /* Perhaps it's a user-def'd block ? */ @@ -374,18 +379,18 @@ static void describe_addr ( Addr a, AddrInfo* ai ) sc = SK_(any_matching_freed_ShadowChunks)(addr_is_in_block); if (NULL != sc) { ai->akind = Freed; - ai->blksize = sc->size; - ai->rwoffset = (Int)(a) - (Int)(sc->data); - ai->lastchange = (ExeContext*)sc->skin_extra[0]; + ai->blksize = VG_(get_sc_size)(sc); + ai->rwoffset = (Int)(a) - (Int)(VG_(get_sc_data)(sc)); + ai->lastchange = (ExeContext*)( VG_(get_sc_extra)(sc, 0) ); return; } /* Search for a currently malloc'd block which might bracket it. */ sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block); if (NULL != sc) { ai->akind = Mallocd; - ai->blksize = sc->size; - ai->rwoffset = (Int)(a) - (Int)(sc->data); - ai->lastchange = (ExeContext*)sc->skin_extra[0]; + ai->blksize = VG_(get_sc_size)(sc); + ai->rwoffset = (Int)(a) - (Int)(VG_(get_sc_data)(sc)); + ai->lastchange = (ExeContext*)( VG_(get_sc_extra)(sc, 0) ); return; } /* Clueless ... */ @@ -394,19 +399,19 @@ static void describe_addr ( Addr a, AddrInfo* ai ) } -/* Creates a copy of the err_extra, updates the copy with address info if - necessary, sticks the copy into the SkinError. */ -void SK_(dup_extra_and_update)(SkinError* err) +/* Creates a copy of the `extra' part, updates the copy with address info if + necessary, and returns the copy. */ +void* SK_(dup_extra_and_update)( Error* err ) { - MemCheckError* err_extra; + MemCheckError* new_extra; - err_extra = VG_(malloc)(sizeof(MemCheckError)); - *err_extra = *((MemCheckError*)err->extra); + new_extra = VG_(malloc)(sizeof(MemCheckError)); + *new_extra = *((MemCheckError*)VG_(get_error_extra)(err)); - if (err_extra->addrinfo.akind == Undescribed) - describe_addr ( err->addr, &(err_extra->addrinfo) ); + if (new_extra->addrinfo.akind == Undescribed) + describe_addr ( VG_(get_error_address)(err), &(new_extra->addrinfo) ); - err->extra = err_extra; + return new_extra; } /* These two are called from generated code. */ @@ -530,51 +535,56 @@ void SK_(record_user_error) ( ThreadState* tst, Addr a, Bool isWrite ) #define STREQ(s1,s2) (s1 != NULL && s2 != NULL \ && VG_(strcmp)((s1),(s2))==0) -Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind ) +Bool SK_(recognised_suppression) ( Char* name, Supp* su ) { - if (STREQ(name, "Param")) *skind = ParamSupp; - else if (STREQ(name, "CoreMem")) *skind = CoreMemSupp; - else if (STREQ(name, "Value0")) *skind = Value0Supp; /* backwards compat */ - else if (STREQ(name, "Cond")) *skind = Value0Supp; - else if (STREQ(name, "Value1")) *skind = Value1Supp; - else if (STREQ(name, "Value2")) *skind = Value2Supp; - else if (STREQ(name, "Value4")) *skind = Value4Supp; - else if (STREQ(name, "Value8")) *skind = Value8Supp; - else if (STREQ(name, "Addr1")) *skind = Addr1Supp; - else if (STREQ(name, "Addr2")) *skind = Addr2Supp; - else if (STREQ(name, "Addr4")) *skind = Addr4Supp; - else if (STREQ(name, "Addr8")) *skind = Addr8Supp; - else if (STREQ(name, "Free")) *skind = FreeSupp; + SuppKind skind; + + if (STREQ(name, "Param")) skind = ParamSupp; + else if (STREQ(name, "CoreMem")) skind = CoreMemSupp; + else if (STREQ(name, "Value0")) skind = Value0Supp; /* backwards compat */ + else if (STREQ(name, "Cond")) skind = Value0Supp; + else if (STREQ(name, "Value1")) skind = Value1Supp; + else if (STREQ(name, "Value2")) skind = Value2Supp; + else if (STREQ(name, "Value4")) skind = Value4Supp; + else if (STREQ(name, "Value8")) skind = Value8Supp; + else if (STREQ(name, "Addr1")) skind = Addr1Supp; + else if (STREQ(name, "Addr2")) skind = Addr2Supp; + else if (STREQ(name, "Addr4")) skind = Addr4Supp; + else if (STREQ(name, "Addr8")) skind = Addr8Supp; + else if (STREQ(name, "Free")) skind = FreeSupp; else return False; + VG_(set_supp_kind)(su, skind); return True; } -Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, - SkinSupp *s ) +Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp *su ) { Bool eof; - if (s->skind == ParamSupp) { + if (VG_(get_supp_kind)(su) == ParamSupp) { eof = VG_(get_line) ( fd, buf, nBuf ); if (eof) return False; - s->string = VG_(strdup)(buf); + VG_(set_supp_string)(su, VG_(strdup)(buf)); } return True; } -extern Bool SK_(error_matches_suppression)(SkinError* err, SkinSupp* su) +extern Bool SK_(error_matches_suppression)(Error* err, Supp* su) { UInt su_size; - MemCheckError* err_extra = err->extra; + MemCheckError* err_extra = VG_(get_error_extra)(err); + ErrorKind ekind = VG_(get_error_kind )(err); - switch (su->skind) { + switch (VG_(get_supp_kind)(su)) { case ParamSupp: - return (err->ekind == ParamErr && STREQ(su->string, err->string)); + return (ekind == ParamErr + && STREQ(VG_(get_error_string)(err), VG_(get_supp_string)(su))); case CoreMemSupp: - return (err->ekind == CoreMemErr && STREQ(su->string, err->string)); + return (ekind == CoreMemErr + && STREQ(VG_(get_error_string)(err), VG_(get_supp_string)(su))); case Value0Supp: su_size = 0; goto value_case; case Value1Supp: su_size = 1; goto value_case; @@ -582,21 +592,22 @@ extern Bool SK_(error_matches_suppression)(SkinError* err, SkinSupp* su) case Value4Supp: su_size = 4; goto value_case; case Value8Supp: su_size = 8; goto value_case; value_case: - return (err->ekind == ValueErr && err_extra->size == su_size); + return (ekind == ValueErr && err_extra->size == su_size); case Addr1Supp: su_size = 1; goto addr_case; case Addr2Supp: su_size = 2; goto addr_case; case Addr4Supp: su_size = 4; goto addr_case; case Addr8Supp: su_size = 8; goto addr_case; addr_case: - return (err->ekind == AddrErr && err_extra->size == su_size); + return (ekind == AddrErr && err_extra->size == su_size); case FreeSupp: - return (err->ekind == FreeErr || err->ekind == FreeMismatchErr); + return (ekind == FreeErr || ekind == FreeMismatchErr); default: VG_(printf)("Error:\n" - " unknown MemCheck suppression type %d\n", su->skind); + " unknown MemCheck suppression type %d\n", + VG_(get_supp_kind)(su)); VG_(skin_panic)("unknown suppression type in " "SK_(error_matches_suppression)"); } diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c index 288359a16..0d6e0f7fd 100644 --- a/memcheck/mc_main.c +++ b/memcheck/mc_main.c @@ -1519,18 +1519,18 @@ void fpu_write_check_SLOWLY ( Addr addr, Int size ) static __inline__ void set_where( ShadowChunk* sc, ExeContext* ec ) { - sc->skin_extra[0] = (UInt)ec; + VG_(set_sc_extra)( sc, 0, (UInt)ec ); } static __inline__ ExeContext *get_where( ShadowChunk* sc ) { - return (ExeContext*)sc->skin_extra[0]; + return (ExeContext*)VG_(get_sc_extra)(sc, 0); } void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst ) { - set_where( sc, VG_(get_ExeContext) ( tst ) ); + VG_(set_sc_extra) ( sc, 0, (UInt)VG_(get_ExeContext)(tst) ); } /*------------------------------------------------------------*/ @@ -1547,7 +1547,7 @@ static __attribute__ ((unused)) { ShadowChunk* sc; Int n = 0; - for (sc = vg_freed_list_start; sc != NULL; sc = sc->next) + for (sc = vg_freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc)) n++; return n; } @@ -1558,8 +1558,8 @@ static __attribute__ ((unused)) ShadowChunk* sc; Int n = 0; /* VG_(printf)("freelist sanity\n"); */ - for (sc = vg_freed_list_start; sc != NULL; sc = sc->next) - n += sc->size; + for (sc = vg_freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc)) + n += VG_(get_sc_size)(sc); sk_assert(n == vg_freed_list_volume); } @@ -1573,14 +1573,14 @@ static void add_to_freed_queue ( ShadowChunk* sc ) if (vg_freed_list_end == NULL) { sk_assert(vg_freed_list_start == NULL); vg_freed_list_end = vg_freed_list_start = sc; - vg_freed_list_volume = sc->size; + vg_freed_list_volume = VG_(get_sc_size)(sc); } else { - sk_assert(vg_freed_list_end->next == NULL); - vg_freed_list_end->next = sc; + sk_assert(VG_(get_sc_next)(vg_freed_list_end) == NULL); + VG_(set_sc_next)(vg_freed_list_end, sc); vg_freed_list_end = sc; - vg_freed_list_volume += sc->size; + vg_freed_list_volume += VG_(get_sc_size)(sc); } - sc->next = NULL; + VG_(set_sc_next)(sc, NULL); /* Release enough of the oldest blocks to bring the free queue volume below vg_clo_freelist_vol. */ @@ -1591,16 +1591,16 @@ static void add_to_freed_queue ( ShadowChunk* sc ) sk_assert(vg_freed_list_end != NULL); sc1 = vg_freed_list_start; - vg_freed_list_volume -= sc1->size; + vg_freed_list_volume -= VG_(get_sc_size)(sc1); /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */ sk_assert(vg_freed_list_volume >= 0); if (vg_freed_list_start == vg_freed_list_end) { vg_freed_list_start = vg_freed_list_end = NULL; } else { - vg_freed_list_start = sc1->next; + vg_freed_list_start = VG_(get_sc_next)(sc1); } - sc1->next = NULL; /* just paranoia */ + VG_(set_sc_next)(sc1, NULL); /* just paranoia */ VG_(free_ShadowChunk) ( sc1 ); } } @@ -1613,7 +1613,7 @@ ShadowChunk* SK_(any_matching_freed_ShadowChunks) /* No point looking through freed blocks if we're not keeping them around for a while... */ - for (sc = vg_freed_list_start; sc != NULL; sc = sc->next) + for (sc = vg_freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc)) if (p(sc)) return sc; @@ -1937,57 +1937,56 @@ Char* SK_(usage)(void) /*--- Setup ---*/ /*------------------------------------------------------------*/ -void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track) +void SK_(pre_clo_init)(void) { - details->name = "Memcheck"; - details->version = NULL; - details->description = "a.k.a. Valgrind, a memory error detector"; - details->copyright_author = - "Copyright (C) 2000-2002, and GNU GPL'd, by Julian Seward."; - details->bug_reports_to = "jseward@acm.org"; + VG_(details_name) ("Memcheck"); + VG_(details_version) (NULL); + VG_(details_description) ("a.k.a. Valgrind, a memory error detector"); + VG_(details_copyright_author)( + "Copyright (C) 2002, and GNU GPL'd, by Julian Seward."); + VG_(details_bug_reports_to) ("jseward@acm.org"); - needs->core_errors = True; - needs->skin_errors = True; - needs->libc_freeres = True; - needs->sizeof_shadow_block = 1; - needs->basic_block_discards = False; - needs->shadow_regs = True; - needs->command_line_options = True; - needs->client_requests = True; - needs->extended_UCode = True; - needs->syscall_wrapper = True; - needs->alternative_free = True; - needs->sanity_checks = True; + VG_(needs_core_errors) (); + VG_(needs_skin_errors) (); + VG_(needs_libc_freeres) (); + VG_(needs_sizeof_shadow_block) ( 1 ); + VG_(needs_shadow_regs) (); + VG_(needs_command_line_options)(); + VG_(needs_client_requests) (); + VG_(needs_extended_UCode) (); + VG_(needs_syscall_wrapper) (); + VG_(needs_alternative_free) (); + VG_(needs_sanity_checks) (); - track->new_mem_startup = & memcheck_new_mem_startup; - track->new_mem_heap = & memcheck_new_mem_heap; - track->new_mem_stack = & SK_(make_writable); - track->new_mem_stack_aligned = & make_writable_aligned; - track->new_mem_stack_signal = & SK_(make_writable); - track->new_mem_brk = & SK_(make_writable); - track->new_mem_mmap = & memcheck_set_perms; + VG_(track_new_mem_startup) ( & memcheck_new_mem_startup ); + VG_(track_new_mem_heap) ( & memcheck_new_mem_heap ); + VG_(track_new_mem_stack) ( & SK_(make_writable) ); + VG_(track_new_mem_stack_aligned)( & make_writable_aligned ); + VG_(track_new_mem_stack_signal) ( & SK_(make_writable) ); + VG_(track_new_mem_brk) ( & SK_(make_writable) ); + VG_(track_new_mem_mmap) ( & memcheck_set_perms ); - track->copy_mem_heap = & copy_address_range_state; - track->copy_mem_remap = & copy_address_range_state; - track->change_mem_mprotect = & memcheck_set_perms; + VG_(track_copy_mem_heap) ( & copy_address_range_state ); + VG_(track_copy_mem_remap) ( & copy_address_range_state ); + VG_(track_change_mem_mprotect) ( & memcheck_set_perms ); - track->ban_mem_heap = & SK_(make_noaccess); - track->ban_mem_stack = & SK_(make_noaccess); + VG_(track_ban_mem_heap) ( & SK_(make_noaccess) ); + VG_(track_ban_mem_stack) ( & SK_(make_noaccess) ); - track->die_mem_heap = & SK_(make_noaccess); - track->die_mem_stack = & SK_(make_noaccess); - track->die_mem_stack_aligned = & make_noaccess_aligned; - track->die_mem_stack_signal = & SK_(make_noaccess); - track->die_mem_brk = & SK_(make_noaccess); - track->die_mem_munmap = & SK_(make_noaccess); + VG_(track_die_mem_heap) ( & SK_(make_noaccess) ); + VG_(track_die_mem_stack) ( & SK_(make_noaccess) ); + VG_(track_die_mem_stack_aligned)( & make_noaccess_aligned ); + VG_(track_die_mem_stack_signal) ( & SK_(make_noaccess) ); + VG_(track_die_mem_brk) ( & SK_(make_noaccess) ); + VG_(track_die_mem_munmap) ( & SK_(make_noaccess) ); - track->bad_free = & SK_(record_free_error); - track->mismatched_free = & SK_(record_freemismatch_error); + VG_(track_bad_free) ( & SK_(record_free_error) ); + VG_(track_mismatched_free) ( & SK_(record_freemismatch_error) ); - track->pre_mem_read = & check_is_readable; - track->pre_mem_read_asciiz = & check_is_readable_asciiz; - track->pre_mem_write = & check_is_writable; - track->post_mem_write = & SK_(make_readable); + VG_(track_pre_mem_read) ( & check_is_readable ); + VG_(track_pre_mem_read_asciiz) ( & check_is_readable_asciiz ); + VG_(track_pre_mem_write) ( & check_is_writable ); + VG_(track_post_mem_write) ( & SK_(make_readable) ); VG_(register_compact_helper)((Addr) & SK_(helper_value_check4_fail)); VG_(register_compact_helper)((Addr) & SK_(helper_value_check0_fail)); diff --git a/memcheck/mc_translate.c b/memcheck/mc_translate.c index 657b99801..faa270e07 100644 --- a/memcheck/mc_translate.c +++ b/memcheck/mc_translate.c @@ -237,14 +237,13 @@ void SK_(pp_XUInstr)(UInstr* u) } -Int SK_(get_Xreg_usage)(UInstr* u, Tag tag, RegUse* arr) +Int SK_(get_Xreg_usage)(UInstr* u, Tag tag, Int* regs, Bool* isWrites) { -# define RD(ono) VG_UINSTR_READS_REG(ono) -# define WR(ono) VG_UINSTR_WRITES_REG(ono) +# define RD(ono) VG_UINSTR_READS_REG(ono, regs, isWrites) +# define WR(ono) VG_UINSTR_WRITES_REG(ono, regs, isWrites) Int n = 0; switch (u->opcode) { - case TAG1: RD(1); WR(1); break; case TAG2: RD(1); RD(2); WR(2); break; case LOADV: RD(1); WR(2); break; @@ -499,8 +498,8 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in ) UInstr* u_in; Int qs, qd, qt, qtt; Bool bogusLiterals; - cb = VG_(alloc_UCodeBlock)(); - cb->nextTemp = cb_in->nextTemp; + + cb = VG_(setup_UCodeBlock)(cb_in); /* Scan the block to look for bogus literals. These are magic numbers which particularly appear in hand-optimised / inlined @@ -511,8 +510,8 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in ) bogusLiterals = False; if (SK_(clo_avoid_strlen_errors)) { - for (i = 0; i < cb_in->used; i++) { - u_in = &cb_in->instrs[i]; + for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) { + u_in = VG_(get_instr)(cb_in, i); switch (u_in->opcode) { case ADD: case SUB: case MOV: if (u_in->size == 4 && u_in->tag1 == Literal) @@ -535,9 +534,9 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in ) } } - for (i = 0; i < cb_in->used; i++) { + for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) { + u_in = VG_(get_instr)(cb_in, i); qs = qd = qt = qtt = INVALID_TEMPREG; - u_in = &cb_in->instrs[i]; switch (u_in->opcode) { @@ -853,8 +852,8 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in ) uInstr1(cb, SETV, 0, TempReg, qt); uInstr2(cb, CMOV, 4, TempReg, qs, TempReg, qd); - LAST_UINSTR(cb).cond = u_in->cond; - LAST_UINSTR(cb).flags_r = u_in->flags_r; + uCond(cb, u_in->cond); + uFlagsRWU(cb, u_in->flags_r, u_in->flags_w, FlagsEmpty); VG_(copy_UInstr)(cb, u_in); break; @@ -977,8 +976,8 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in ) qt = newShadow(cb); qtt = newShadow(cb); uInstr1(cb, SETV, 0, TempReg, qt); - for (j = i-1; cb_in->instrs[j].opcode != CALLM_S; j--) { - uu = & cb_in->instrs[j]; + for (j = i-1; VG_(get_instr)(cb_in, j)->opcode != CALLM_S; j--) { + uu = VG_(get_instr)(cb_in, j); if (uu->opcode != PUSH) continue; /* cast via a temporary */ uInstr2(cb, MOV, 4, TempReg, SHADOW(uu->val1), @@ -1003,8 +1002,8 @@ static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in ) a dummy tempreg. */ res_used = False; - for (j = i+1; cb_in->instrs[j].opcode != CALLM_E; j++) { - uu = & cb_in->instrs[j]; + for (j = i+1; VG_(get_instr)(cb_in, j)->opcode != CALLM_E; j++) { + uu = VG_(get_instr)(cb_in, j); if (uu->opcode != POP) continue; /* Cast via a temp. */ uInstr2(cb, MOV, 4, TempReg, qt, TempReg, qtt); @@ -1139,10 +1138,11 @@ Bool VG_(clo_memcheck_codegen) = False; static void vg_delete_redundant_SETVs ( UCodeBlock* cb ) { Int i, j, k; - Int n_temps = cb->nextTemp; + Int n_temps = VG_(get_num_temps)(cb); Bool* next_is_write; UInstr* u; - RegUse tempUse[3]; + Int tempUse[3]; + Bool isWrites[3]; if (n_temps == 0) return; @@ -1150,8 +1150,8 @@ static void vg_delete_redundant_SETVs ( UCodeBlock* cb ) for (i = 0; i < n_temps; i++) next_is_write[i] = True; - for (i = cb->used-1; i >= 0; i--) { - u = &cb->instrs[i]; + for (i = VG_(get_num_instrs)(cb) - 1; i >= 0; i--) { + u = VG_(get_instr)(cb, i); /* If we're not checking address V bits, there will be a lot of GETVs, TAG1s and TAG2s calculating values which are never @@ -1211,11 +1211,10 @@ static void vg_delete_redundant_SETVs ( UCodeBlock* cb ) } else { /* Find out what this insn does to the temps. */ - k = VG_(get_reg_usage)(u, TempReg, &tempUse[0]); + k = VG_(get_reg_usage)(u, TempReg, &tempUse[0], &isWrites[0]); sk_assert(k <= 3); for (j = k-1; j >= 0; j--) { - next_is_write[ tempUse[j].num ] - = tempUse[j].isWrite; + next_is_write[ tempUse[j] ] = isWrites[j]; } } } @@ -1239,10 +1238,11 @@ static void vg_delete_redundant_SETVs ( UCodeBlock* cb ) static void vg_propagate_definedness ( UCodeBlock* cb ) { Int i, j, k, t; - Int n_temps = cb->nextTemp; + Int n_temps = VG_(get_num_temps)(cb); UChar* def; UInstr* u; - RegUse tempUse[3]; + Int tempUse[3]; + Bool isWrites[3]; if (n_temps == 0) return; @@ -1253,8 +1253,8 @@ static void vg_propagate_definedness ( UCodeBlock* cb ) /* Run forwards, detecting and using the all-defined property. */ - for (i = 0; i < cb->used; i++) { - u = &cb->instrs[i]; + for (i = 0; i < VG_(get_num_instrs)(cb); i++) { + u = VG_(get_instr)(cb, i); switch (u->opcode) { /* Tag-handling uinstrs. */ @@ -1470,12 +1470,12 @@ static void vg_propagate_definedness ( UCodeBlock* cb ) unhandled: /* We don't know how to handle this uinstr. Be safe, and set to VGC_VALUE or VGC_UNDEF all temps written by it. */ - k = VG_(get_reg_usage)(u, TempReg, &tempUse[0]); + k = VG_(get_reg_usage)(u, TempReg, &tempUse[0], &isWrites[0]); sk_assert(k <= 3); for (j = 0; j < k; j++) { - t = tempUse[j].num; + t = tempUse[j]; sk_assert(t >= 0 && t < n_temps); - if (!tempUse[j].isWrite) { + if (!isWrites[j]) { /* t is read; ignore it. */ if (0&& VGC_IS_SHADOW(t) && def[t] <= 4) VG_(printf)("ignoring def %d at %s %s\n", diff --git a/none/nl_main.c b/none/nl_main.c index 609e6e3fc..dc31c8170 100644 --- a/none/nl_main.c +++ b/none/nl_main.c @@ -32,15 +32,14 @@ VG_DETERMINE_INTERFACE_VERSION -void SK_(pre_clo_init)(VgDetails* details, VgNeeds* not_used1, - VgTrackEvents* not_used2) +void SK_(pre_clo_init)(void) { - details->name = "Nulgrind"; - details->version = NULL; - details->description = "a binary JIT-compiler"; - details->copyright_author = - "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote."; - details->bug_reports_to = "njn25@cam.ac.uk"; + VG_(details_name) ("Nulgrind"); + VG_(details_version) (NULL); + VG_(details_description) ("a binary JIT-compiler"); + VG_(details_copyright_author)( + "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote."); + VG_(details_bug_reports_to) ("njn25@cam.ac.uk"); /* No needs, no core events to track */ }