mirror of
https://github.com/Zenithsiz/ftmemsim-valgrind.git
synced 2026-02-03 18:13:01 +00:00
Added reference counting to segments. Synchronization objects (mutex, semaphore, barrier, rwlock) now keep a pointer to a segment instead of copying a vector clock for modeling causal relationships.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@7727
This commit is contained in:
parent
6966bc8a44
commit
4bc9d8c635
@ -45,7 +45,7 @@ struct barrier_thread_info
|
||||
UWord tid; // A DrdThreadId
|
||||
Word iteration; // iteration of last pthread_barrier_wait()
|
||||
// call thread tid participated in.
|
||||
VectorClock vc[2]; // vector clocks corresponding to the last two
|
||||
Segment* sg[2]; // Segments of the last two
|
||||
// pthread_barrier() calls by thread tid.
|
||||
};
|
||||
|
||||
@ -77,15 +77,15 @@ static void barrier_thread_initialize(struct barrier_thread_info* const p,
|
||||
{
|
||||
p->tid = tid;
|
||||
p->iteration = iteration;
|
||||
vc_init(&p->vc[0], 0, 0);
|
||||
vc_init(&p->vc[1], 0, 0);
|
||||
p->sg[0] = 0;
|
||||
p->sg[1] = 0;
|
||||
}
|
||||
|
||||
/** Deallocate the memory that was allocated in barrier_thread_initialize(). */
|
||||
static void barrier_thread_destroy(struct barrier_thread_info* const p)
|
||||
{
|
||||
vc_cleanup(&p->vc[0]);
|
||||
vc_cleanup(&p->vc[1]);
|
||||
sg_put(p->sg[0]);
|
||||
sg_put(p->sg[1]);
|
||||
}
|
||||
|
||||
/** Initialize the structure *p with the specified client-side barrier address,
|
||||
@ -112,7 +112,6 @@ void barrier_initialize(struct barrier_info* const p,
|
||||
tl_assert(sizeof(((struct barrier_thread_info*)0)->tid)
|
||||
>= sizeof(DrdThreadId));
|
||||
p->oset = VG_(OSetGen_Create)(0, 0, VG_(malloc), VG_(free));
|
||||
vc_init(&p->finished_threads_vc, 0, 0);
|
||||
}
|
||||
|
||||
/** Deallocate the memory allocated by barrier_initialize() and in p->oset.
|
||||
@ -141,7 +140,6 @@ void barrier_cleanup(struct barrier_info* p)
|
||||
barrier_thread_destroy(q);
|
||||
}
|
||||
VG_(OSetGen_Destroy)(p->oset);
|
||||
vc_cleanup(&p->finished_threads_vc);
|
||||
}
|
||||
|
||||
/** Look up the client-side barrier address barrier in s_barrier[]. If not
|
||||
@ -192,7 +190,7 @@ void barrier_init(const Addr barrier,
|
||||
if (reinitialization)
|
||||
{
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"[%d/%d] barrier_reinit %s 0x%lx count %d -> %d",
|
||||
"[%d/%d] barrier_reinit %s 0x%lx count %ld -> %ld",
|
||||
VG_(get_running_tid)(),
|
||||
thread_get_running_tid(),
|
||||
barrier_get_typename(p),
|
||||
@ -263,20 +261,20 @@ void barrier_pre_wait(const DrdThreadId tid, const Addr barrier,
|
||||
p = barrier_get(barrier);
|
||||
if (p == 0 && barrier_type == gomp_barrier)
|
||||
{
|
||||
VG_(message)(Vg_UserMsg, "");
|
||||
VG_(message)(Vg_UserMsg, "%s", "");
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"Please verify whether gcc has been configured"
|
||||
" with option --disable-linux-futex.");
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"See also the section about OpenMP in the DRD manual.");
|
||||
VG_(message)(Vg_UserMsg, "");
|
||||
VG_(message)(Vg_UserMsg, "%s", "");
|
||||
}
|
||||
tl_assert(p);
|
||||
|
||||
if (s_trace_barrier)
|
||||
{
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"[%d/%d] barrier_pre_wait %s 0x%lx iteration %d",
|
||||
"[%d/%d] barrier_pre_wait %s 0x%lx iteration %ld",
|
||||
VG_(get_running_tid)(),
|
||||
thread_get_running_tid(),
|
||||
barrier_get_typename(p),
|
||||
@ -292,8 +290,7 @@ void barrier_pre_wait(const DrdThreadId tid, const Addr barrier,
|
||||
VG_(OSetGen_Insert)(p->oset, q);
|
||||
tl_assert(VG_(OSetGen_Lookup)(p->oset, &word_tid) == q);
|
||||
}
|
||||
vc_assign(&q->vc[p->pre_iteration], &thread_get_segment(tid)->vc);
|
||||
tl_assert(q->vc[p->pre_iteration].size > 0);
|
||||
thread_get_latest_segment(&q->sg[p->pre_iteration], tid);
|
||||
|
||||
if (--p->pre_waiters_left <= 0)
|
||||
{
|
||||
@ -313,7 +310,7 @@ void barrier_post_wait(const DrdThreadId tid, const Addr barrier,
|
||||
if (s_trace_barrier)
|
||||
{
|
||||
VG_(message)(Vg_UserMsg,
|
||||
"[%d/%d] barrier_post_wait %s 0x%lx iteration %d",
|
||||
"[%d/%d] barrier_post_wait %s 0x%lx iteration %ld",
|
||||
VG_(get_running_tid)(),
|
||||
tid,
|
||||
p ? barrier_get_typename(p) : "(?)",
|
||||
@ -340,10 +337,10 @@ void barrier_post_wait(const DrdThreadId tid, const Addr barrier,
|
||||
{
|
||||
if (r != q)
|
||||
{
|
||||
thread_combine_vc2(tid, &r->vc[p->post_iteration]);
|
||||
tl_assert(r->sg[p->post_iteration]);
|
||||
thread_combine_vc2(tid, &r->sg[p->post_iteration]->vc);
|
||||
}
|
||||
}
|
||||
thread_combine_vc2(tid, &p->finished_threads_vc);
|
||||
|
||||
thread_new_segment(tid);
|
||||
|
||||
@ -366,7 +363,6 @@ void barrier_thread_delete(const DrdThreadId tid)
|
||||
struct barrier_thread_info* q;
|
||||
const UWord word_tid = tid;
|
||||
q = VG_(OSetGen_Remove)(p->oset, &word_tid);
|
||||
vc_combine(&p->finished_threads_vc, &q->vc[p->post_iteration]);
|
||||
barrier_thread_destroy(q);
|
||||
VG_(OSetGen_FreeNode)(p->oset, q);
|
||||
}
|
||||
|
||||
@ -63,7 +63,7 @@ struct mutex_info
|
||||
MutexT mutex_type; // pthread_mutex_t or pthread_spinlock_t.
|
||||
int recursion_count; // 0 if free, >= 1 if locked.
|
||||
DrdThreadId owner; // owner if locked, last owner if free.
|
||||
VectorClock vc; // vector clock associated with last unlock.
|
||||
Segment* last_locked_segment;
|
||||
};
|
||||
|
||||
struct cond_info
|
||||
@ -72,8 +72,8 @@ struct cond_info
|
||||
ObjType type;
|
||||
void (*cleanup)(union drd_clientobj*);
|
||||
int waiter_count;
|
||||
Addr mutex; // Client mutex specified in pthread_cond_wait() call, and null
|
||||
// if no client threads are currently waiting on this cond.var.
|
||||
Addr mutex; //Client mutex specified in pthread_cond_wait() call, and null
|
||||
//if no client threads are currently waiting on this cond.var.
|
||||
};
|
||||
|
||||
struct semaphore_info
|
||||
@ -84,7 +84,7 @@ struct semaphore_info
|
||||
UWord value; // Semaphore value.
|
||||
UWord waiters; // Number of threads inside sem_wait().
|
||||
DrdThreadId last_sem_post_tid; // Thread ID associated with last sem_post().
|
||||
VectorClock vc; // Vector clock of last sem_post() call.
|
||||
Segment* last_sem_post_segment;
|
||||
};
|
||||
|
||||
struct barrier_info
|
||||
@ -99,7 +99,6 @@ struct barrier_info
|
||||
Word pre_waiters_left; // number of waiters left for a complete barrier.
|
||||
Word post_waiters_left; // number of waiters left for a complete barrier.
|
||||
OSet* oset; // Thread-specific barrier information.
|
||||
VectorClock finished_threads_vc;
|
||||
};
|
||||
|
||||
struct rwlock_info
|
||||
|
||||
@ -62,11 +62,11 @@ void mutex_initialize(struct mutex_info* const p,
|
||||
tl_assert(mutex != 0);
|
||||
|
||||
tl_assert(p->a1 == mutex);
|
||||
p->cleanup = (void(*)(DrdClientobj*))&mutex_cleanup;
|
||||
p->mutex_type = mutex_type;
|
||||
p->recursion_count = 0;
|
||||
p->owner = DRD_INVALID_THREADID;
|
||||
vc_init(&p->vc, 0, 0);
|
||||
p->cleanup = (void(*)(DrdClientobj*))&mutex_cleanup;
|
||||
p->mutex_type = mutex_type;
|
||||
p->recursion_count = 0;
|
||||
p->owner = DRD_INVALID_THREADID;
|
||||
p->last_locked_segment = 0;
|
||||
}
|
||||
|
||||
/** Deallocate the memory that was allocated by mutex_initialize(). */
|
||||
@ -92,7 +92,8 @@ static void mutex_cleanup(struct mutex_info* p)
|
||||
&MEI);
|
||||
}
|
||||
|
||||
vc_cleanup(&p->vc);
|
||||
sg_put(p->last_locked_segment);
|
||||
p->last_locked_segment = 0;
|
||||
}
|
||||
|
||||
static
|
||||
@ -276,7 +277,10 @@ void mutex_post_lock(const Addr mutex, const Bool took_lock)
|
||||
const DrdThreadId last_owner = p->owner;
|
||||
|
||||
if (last_owner != drd_tid && last_owner != DRD_INVALID_THREADID)
|
||||
thread_combine_vc2(drd_tid, mutex_get_last_vc(mutex));
|
||||
{
|
||||
tl_assert(p->last_locked_segment);
|
||||
thread_combine_vc2(drd_tid, &p->last_locked_segment->vc);
|
||||
}
|
||||
thread_new_segment(drd_tid);
|
||||
|
||||
p->owner = drd_tid;
|
||||
@ -307,7 +311,6 @@ void mutex_unlock(const Addr mutex, const MutexT mutex_type)
|
||||
{
|
||||
const DrdThreadId drd_tid = thread_get_running_tid();
|
||||
const ThreadId vg_tid = VG_(get_running_tid)();
|
||||
const VectorClock* const vc = thread_get_vc(drd_tid);
|
||||
struct mutex_info* const p = mutex_get(mutex);
|
||||
|
||||
if (s_trace_mutex)
|
||||
@ -318,8 +321,7 @@ void mutex_unlock(const Addr mutex, const MutexT mutex_type)
|
||||
drd_tid,
|
||||
p ? mutex_get_typename(p) : "?",
|
||||
mutex,
|
||||
p ? p->recursion_count : 0,
|
||||
p ? p->owner : 0);
|
||||
p ? p->recursion_count : 0);
|
||||
}
|
||||
|
||||
if (p == 0 || mutex_type == mutex_type_invalid_mutex)
|
||||
@ -347,7 +349,7 @@ void mutex_unlock(const Addr mutex, const MutexT mutex_type)
|
||||
tl_assert(p);
|
||||
if (p->mutex_type != mutex_type)
|
||||
{
|
||||
VG_(message)(Vg_UserMsg, "??? mutex %p: type changed from %d into %d",
|
||||
VG_(message)(Vg_UserMsg, "??? mutex 0x%lx: type changed from %d into %d",
|
||||
p->a1, p->mutex_type, mutex_type);
|
||||
}
|
||||
tl_assert(p->mutex_type == mutex_type);
|
||||
@ -372,8 +374,8 @@ void mutex_unlock(const Addr mutex, const MutexT mutex_type)
|
||||
/* This pthread_mutex_unlock() call really unlocks the mutex. Save the */
|
||||
/* current vector clock of the thread such that it is available when */
|
||||
/* this mutex is locked again. */
|
||||
vc_assign(&p->vc, vc);
|
||||
|
||||
thread_get_latest_segment(&p->last_locked_segment, drd_tid);
|
||||
thread_new_segment(drd_tid);
|
||||
}
|
||||
}
|
||||
@ -422,12 +424,6 @@ Bool mutex_is_locked_by(const Addr mutex, const DrdThreadId tid)
|
||||
return False;
|
||||
}
|
||||
|
||||
const VectorClock* mutex_get_last_vc(const Addr mutex)
|
||||
{
|
||||
struct mutex_info* const p = mutex_get(mutex);
|
||||
return p ? &p->vc : 0;
|
||||
}
|
||||
|
||||
int mutex_get_recursion_count(const Addr mutex)
|
||||
{
|
||||
struct mutex_info* const p = mutex_get(mutex);
|
||||
|
||||
@ -51,7 +51,6 @@ void mutex_unlock(const Addr mutex, const MutexT mutex_type);
|
||||
const char* mutex_get_typename(struct mutex_info* const p);
|
||||
const char* mutex_type_name(const MutexT mt);
|
||||
Bool mutex_is_locked_by(const Addr mutex, const DrdThreadId tid);
|
||||
const VectorClock* mutex_get_last_vc(const Addr mutex);
|
||||
int mutex_get_recursion_count(const Addr mutex);
|
||||
void mutex_thread_delete(const DrdThreadId tid);
|
||||
ULong get_mutex_lock_count(void);
|
||||
|
||||
@ -39,11 +39,11 @@
|
||||
|
||||
struct rwlock_thread_info
|
||||
{
|
||||
UWord tid; // DrdThreadId.
|
||||
UInt reader_nesting_count;
|
||||
UInt writer_nesting_count;
|
||||
VectorClock vc; // Vector clock associated with last unlock by this thread.
|
||||
Bool last_lock_was_writer_lock;
|
||||
UWord tid; // DrdThreadId.
|
||||
UInt reader_nesting_count;
|
||||
UInt writer_nesting_count;
|
||||
Segment* last_unlock_segment; // Segment of last unlock call by this thread.
|
||||
Bool last_lock_was_writer_lock;
|
||||
};
|
||||
|
||||
|
||||
@ -129,7 +129,7 @@ struct rwlock_thread_info* lookup_or_insert_node(OSet* oset, const UWord tid)
|
||||
q->tid = tid;
|
||||
q->reader_nesting_count = 0;
|
||||
q->writer_nesting_count = 0;
|
||||
vc_init(&q->vc, 0, 0);
|
||||
q->last_unlock_segment = 0;
|
||||
q->last_lock_was_writer_lock = False;
|
||||
VG_(OSetGen_Insert)(oset, q);
|
||||
}
|
||||
@ -148,7 +148,7 @@ static void rwlock_combine_other_vc(struct rwlock_info* const p,
|
||||
{
|
||||
if (q->tid != tid && (readers_too || q->last_lock_was_writer_lock))
|
||||
{
|
||||
thread_combine_vc2(tid, &q->vc);
|
||||
thread_combine_vc2(tid, &q->last_unlock_segment->vc);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -193,7 +193,7 @@ static void rwlock_cleanup(struct rwlock_info* p)
|
||||
VG_(OSetGen_ResetIter)(p->thread_info);
|
||||
for ( ; (q = VG_(OSetGen_Next)(p->thread_info)); q++)
|
||||
{
|
||||
vc_cleanup(&q->vc);
|
||||
sg_put(q->last_unlock_segment);
|
||||
}
|
||||
VG_(OSetGen_Destroy)(p->thread_info);
|
||||
}
|
||||
@ -438,7 +438,6 @@ void rwlock_pre_unlock(const Addr rwlock)
|
||||
{
|
||||
const DrdThreadId drd_tid = thread_get_running_tid();
|
||||
const ThreadId vg_tid = VG_(get_running_tid)();
|
||||
const VectorClock* const vc = thread_get_vc(drd_tid);
|
||||
struct rwlock_info* const p = rwlock_get(rwlock);
|
||||
struct rwlock_thread_info* q;
|
||||
|
||||
@ -476,9 +475,9 @@ void rwlock_pre_unlock(const Addr rwlock)
|
||||
/* This pthread_rwlock_unlock() call really unlocks the rwlock. Save the */
|
||||
/* current vector clock of the thread such that it is available when */
|
||||
/* this rwlock is locked again. */
|
||||
vc_assign(&q->vc, vc);
|
||||
q->last_lock_was_writer_lock = False;
|
||||
|
||||
thread_get_latest_segment(&q->last_unlock_segment, drd_tid);
|
||||
q->last_lock_was_writer_lock = False;
|
||||
thread_new_segment(drd_tid);
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,8 +46,8 @@ static Bool drd_trace_segment = False;
|
||||
|
||||
// Function definitions.
|
||||
|
||||
/**
|
||||
* Note: creator and created may be equal.
|
||||
/** Initialize the memory pointed at by sg.
|
||||
* @note The creator and created thread ID's may be equal.
|
||||
*/
|
||||
static
|
||||
void sg_init(Segment* const sg,
|
||||
@ -62,9 +62,10 @@ void sg_init(Segment* const sg,
|
||||
|
||||
creator_sg = (creator != DRD_INVALID_THREADID
|
||||
? thread_get_segment(creator) : 0);
|
||||
|
||||
|
||||
sg->next = 0;
|
||||
sg->prev = 0;
|
||||
sg->refcnt = 1;
|
||||
|
||||
if (vg_created != VG_INVALID_THREADID && VG_(get_SP)(vg_created) != 0)
|
||||
sg->stacktrace = VG_(record_ExeContext)(vg_created, 0);
|
||||
@ -82,23 +83,30 @@ void sg_init(Segment* const sg,
|
||||
{
|
||||
char msg[256];
|
||||
VG_(snprintf)(msg, sizeof(msg),
|
||||
"New segment for thread %d/%d with vc ",
|
||||
DrdThreadIdToVgThreadId(creator), creator);
|
||||
"New segment for thread %d/%d for vc ",
|
||||
creator != VG_INVALID_THREADID
|
||||
? DrdThreadIdToVgThreadId(creator)
|
||||
: DRD_INVALID_THREADID,
|
||||
creator);
|
||||
vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
|
||||
&sg->vc);
|
||||
VG_(message)(Vg_UserMsg, "%s", msg);
|
||||
}
|
||||
}
|
||||
|
||||
/** Deallocate the memory that was allocated by sg_init(). */
|
||||
static
|
||||
void sg_cleanup(Segment* const sg)
|
||||
{
|
||||
tl_assert(sg);
|
||||
tl_assert(sg->refcnt == 0);
|
||||
|
||||
vc_cleanup(&sg->vc);
|
||||
bm_delete(sg->bm);
|
||||
sg->bm = 0;
|
||||
}
|
||||
|
||||
/** Allocate and initialize a new segment. */
|
||||
Segment* sg_new(ThreadId const creator, ThreadId const created)
|
||||
{
|
||||
Segment* sg;
|
||||
@ -114,6 +122,7 @@ Segment* sg_new(ThreadId const creator, ThreadId const created)
|
||||
return sg;
|
||||
}
|
||||
|
||||
static
|
||||
void sg_delete(Segment* const sg)
|
||||
{
|
||||
#if 1
|
||||
@ -135,6 +144,50 @@ void sg_delete(Segment* const sg)
|
||||
VG_(free)(sg);
|
||||
}
|
||||
|
||||
/** Query the reference count of the specified segment. */
|
||||
int sg_get_refcnt(const Segment* const sg)
|
||||
{
|
||||
tl_assert(sg);
|
||||
|
||||
return sg->refcnt;
|
||||
}
|
||||
|
||||
/** Increment the reference count of the specified segment. */
|
||||
Segment* sg_get(Segment* const sg)
|
||||
{
|
||||
tl_assert(sg);
|
||||
|
||||
sg->refcnt++;
|
||||
return sg;
|
||||
}
|
||||
|
||||
/** Decrement the reference count of the specified segment and deallocate the
|
||||
* segment if the reference count became zero.
|
||||
*/
|
||||
void sg_put(Segment* const sg)
|
||||
{
|
||||
if (sg == 0)
|
||||
return;
|
||||
|
||||
if (drd_trace_segment)
|
||||
{
|
||||
char msg[256];
|
||||
VG_(snprintf)(msg, sizeof(msg),
|
||||
"Decrementing segment reference count %d -> %d with vc ",
|
||||
sg->refcnt, sg->refcnt - 1);
|
||||
vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
|
||||
&sg->vc);
|
||||
VG_(message)(Vg_UserMsg, "%s", msg);
|
||||
}
|
||||
|
||||
tl_assert(sg->refcnt >= 1);
|
||||
|
||||
if (--sg->refcnt == 0)
|
||||
{
|
||||
sg_delete(sg);
|
||||
}
|
||||
}
|
||||
|
||||
void sg_print(const Segment* const sg)
|
||||
{
|
||||
tl_assert(sg);
|
||||
|
||||
@ -42,13 +42,17 @@ typedef struct segment
|
||||
{
|
||||
struct segment* next;
|
||||
struct segment* prev;
|
||||
int refcnt;
|
||||
ExeContext* stacktrace;
|
||||
VectorClock vc;
|
||||
struct bitmap* bm;
|
||||
} Segment;
|
||||
|
||||
|
||||
Segment* sg_new(const ThreadId creator, const ThreadId created);
|
||||
void sg_delete(Segment* const sg);
|
||||
int sg_get_refcnt(const Segment* const sg);
|
||||
Segment* sg_get(Segment* const sg);
|
||||
void sg_put(Segment* const sg);
|
||||
void sg_print(const Segment* const sg);
|
||||
Bool sg_get_trace(void);
|
||||
void sg_set_trace(const Bool trace_segment);
|
||||
|
||||
@ -64,7 +64,7 @@ void semaphore_initialize(struct semaphore_info* const p,
|
||||
p->value = value;
|
||||
p->waiters = 0;
|
||||
p->last_sem_post_tid = DRD_INVALID_THREADID;
|
||||
vc_init(&p->vc, 0, 0);
|
||||
p->last_sem_post_segment = 0;
|
||||
}
|
||||
|
||||
/** Free the memory that was allocated by semaphore_initialize(). Called by
|
||||
@ -82,7 +82,7 @@ static void semaphore_cleanup(struct semaphore_info* p)
|
||||
" upon",
|
||||
&sei);
|
||||
}
|
||||
vc_cleanup(&p->vc);
|
||||
sg_put(p->last_sem_post_segment);
|
||||
}
|
||||
|
||||
static
|
||||
@ -215,8 +215,12 @@ void semaphore_post_wait(const DrdThreadId tid, const Addr semaphore,
|
||||
}
|
||||
p->value--;
|
||||
tl_assert(p->value >= 0);
|
||||
if (p->last_sem_post_tid != tid)
|
||||
thread_combine_vc2(tid, &p->vc);
|
||||
if (p->last_sem_post_tid != tid
|
||||
&& p->last_sem_post_tid != DRD_INVALID_THREADID)
|
||||
{
|
||||
tl_assert(p->last_sem_post_segment);
|
||||
thread_combine_vc2(tid, &p->last_sem_post_segment->vc);
|
||||
}
|
||||
thread_new_segment(tid);
|
||||
}
|
||||
|
||||
@ -239,7 +243,7 @@ void semaphore_pre_post(const DrdThreadId tid, const Addr semaphore)
|
||||
{
|
||||
p->last_sem_post_tid = tid;
|
||||
thread_new_segment(tid);
|
||||
vc_assign(&p->vc, thread_get_vc(tid));
|
||||
thread_get_latest_segment(&p->last_sem_post_segment, tid);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -42,6 +42,7 @@
|
||||
|
||||
static void thread_append_segment(const DrdThreadId tid,
|
||||
Segment* const sg);
|
||||
static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
|
||||
static void thread_update_danger_set(const DrdThreadId tid);
|
||||
|
||||
|
||||
@ -245,9 +246,8 @@ Addr thread_get_stack_max(const DrdThreadId tid)
|
||||
return s_threadinfo[tid].stack_max;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up thread-specific data structures. Call this just after
|
||||
* pthread_join().
|
||||
/** Clean up thread-specific data structures. Call this just after
|
||||
* pthread_join().
|
||||
*/
|
||||
void thread_delete(const DrdThreadId tid)
|
||||
{
|
||||
@ -260,7 +260,9 @@ void thread_delete(const DrdThreadId tid)
|
||||
for (sg = s_threadinfo[tid].last; sg; sg = sg_prev)
|
||||
{
|
||||
sg_prev = sg->prev;
|
||||
sg_delete(sg);
|
||||
sg->prev = 0;
|
||||
sg->next = 0;
|
||||
sg_put(sg);
|
||||
}
|
||||
s_threadinfo[tid].vg_thread_exists = False;
|
||||
s_threadinfo[tid].posix_thread_exists = False;
|
||||
@ -350,9 +352,11 @@ void thread_set_running_tid(const ThreadId vg_tid, const DrdThreadId drd_tid)
|
||||
&& s_drd_running_tid != DRD_INVALID_THREADID)
|
||||
{
|
||||
VG_(message)(Vg_DebugMsg,
|
||||
"Context switch from thread %d/%d to thread %d/%d",
|
||||
"Context switch from thread %d/%d to thread %d/%d;"
|
||||
" segments: %llu",
|
||||
s_vg_running_tid, s_drd_running_tid,
|
||||
DrdThreadIdToVgThreadId(drd_tid), drd_tid);
|
||||
DrdThreadIdToVgThreadId(drd_tid), drd_tid,
|
||||
sg_get_alive_segments_count());
|
||||
}
|
||||
s_vg_running_tid = vg_tid;
|
||||
s_drd_running_tid = drd_tid;
|
||||
@ -416,18 +420,30 @@ static void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
|
||||
s_threadinfo[tid].first = sg->next;
|
||||
if (sg == s_threadinfo[tid].last)
|
||||
s_threadinfo[tid].last = sg->prev;
|
||||
sg_delete(sg);
|
||||
sg_put(sg);
|
||||
tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
|
||||
}
|
||||
|
||||
VectorClock* thread_get_vc(const DrdThreadId tid)
|
||||
{
|
||||
tl_assert(0 <= tid && tid < DRD_N_THREADS
|
||||
&& tid != DRD_INVALID_THREADID);
|
||||
tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
|
||||
tl_assert(s_threadinfo[tid].last);
|
||||
return &s_threadinfo[tid].last->vc;
|
||||
}
|
||||
|
||||
/** Return the latest segment of thread 'tid' and increment its reference
|
||||
* count.
|
||||
*/
|
||||
void thread_get_latest_segment(Segment** sg, const DrdThreadId tid)
|
||||
{
|
||||
tl_assert(sg);
|
||||
tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
|
||||
tl_assert(s_threadinfo[tid].last);
|
||||
|
||||
sg_put(*sg);
|
||||
*sg = sg_get(s_threadinfo[tid].last);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the minimum of all latest vector clocks of all threads
|
||||
* (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
|
||||
@ -504,7 +520,7 @@ static void thread_discard_ordered_segments(void)
|
||||
", max vc is ");
|
||||
vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
|
||||
&thread_vc_max);
|
||||
VG_(message)(Vg_DebugMsg, "%s", msg);
|
||||
VG_(message)(Vg_UserMsg, "%s", msg);
|
||||
vc_cleanup(&thread_vc_max);
|
||||
}
|
||||
|
||||
@ -522,19 +538,14 @@ static void thread_discard_ordered_segments(void)
|
||||
vc_cleanup(&thread_vc_min);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new segment for the specified thread, and report all data races
|
||||
* of the most recent thread segment with other threads.
|
||||
/** Create a new segment for the specified thread, and discard any segments
|
||||
* that cannot cause races anymore.
|
||||
*/
|
||||
void thread_new_segment(const DrdThreadId tid)
|
||||
{
|
||||
Segment* sg;
|
||||
tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
|
||||
|
||||
tl_assert(0 <= tid && tid < DRD_N_THREADS
|
||||
&& tid != DRD_INVALID_THREADID);
|
||||
|
||||
sg = sg_new(tid, tid);
|
||||
thread_append_segment(tid, sg);
|
||||
thread_append_segment(tid, sg_new(tid, tid));
|
||||
|
||||
thread_discard_ordered_segments();
|
||||
|
||||
@ -639,7 +650,7 @@ void thread_print_all(void)
|
||||
if (s_threadinfo[i].first)
|
||||
{
|
||||
VG_(printf)("**************\n"
|
||||
"* thread %3d (%d/%d/%d/0x%x/%d) *\n"
|
||||
"* thread %3d (%d/%d/%d/0x%lx/%d) *\n"
|
||||
"**************\n",
|
||||
i,
|
||||
s_threadinfo[i].vg_thread_exists,
|
||||
@ -773,7 +784,7 @@ static void thread_update_danger_set(const DrdThreadId tid)
|
||||
vc_snprint(msg + VG_(strlen)(msg),
|
||||
sizeof(msg) - VG_(strlen)(msg),
|
||||
&s_threadinfo[tid].last->vc);
|
||||
VG_(message)(Vg_DebugMsg, "%s", msg);
|
||||
VG_(message)(Vg_UserMsg, "%s", msg);
|
||||
}
|
||||
|
||||
p = s_threadinfo[tid].last;
|
||||
@ -790,7 +801,7 @@ static void thread_update_danger_set(const DrdThreadId tid)
|
||||
vc_snprint(msg + VG_(strlen)(msg),
|
||||
sizeof(msg) - VG_(strlen)(msg),
|
||||
&p->vc);
|
||||
VG_(message)(Vg_DebugMsg, "%s", msg);
|
||||
VG_(message)(Vg_UserMsg, "%s", msg);
|
||||
}
|
||||
|
||||
for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
|
||||
@ -810,7 +821,7 @@ static void thread_update_danger_set(const DrdThreadId tid)
|
||||
vc_snprint(msg + VG_(strlen)(msg),
|
||||
sizeof(msg) - VG_(strlen)(msg),
|
||||
&q->vc);
|
||||
VG_(message)(Vg_DebugMsg, "%s", msg);
|
||||
VG_(message)(Vg_UserMsg, "%s", msg);
|
||||
}
|
||||
bm_merge2(s_danger_set, q->bm);
|
||||
}
|
||||
@ -824,7 +835,7 @@ static void thread_update_danger_set(const DrdThreadId tid)
|
||||
vc_snprint(msg + VG_(strlen)(msg),
|
||||
sizeof(msg) - VG_(strlen)(msg),
|
||||
&q->vc);
|
||||
VG_(message)(Vg_DebugMsg, "%s", msg);
|
||||
VG_(message)(Vg_UserMsg, "%s", msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -856,9 +867,9 @@ static void thread_update_danger_set(const DrdThreadId tid)
|
||||
|
||||
if (0 && s_trace_danger_set)
|
||||
{
|
||||
VG_(message)(Vg_DebugMsg, "[%d] new danger set:", tid);
|
||||
VG_(message)(Vg_UserMsg, "[%d] new danger set:", tid);
|
||||
bm_print(s_danger_set);
|
||||
VG_(message)(Vg_DebugMsg, "[%d] end of new danger set.", tid);
|
||||
VG_(message)(Vg_UserMsg, "[%d] end of new danger set.", tid);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -114,6 +114,7 @@ int thread_leave_synchr(const DrdThreadId tid);
|
||||
int thread_get_synchr_nesting_count(const DrdThreadId tid);
|
||||
void thread_new_segment(const DrdThreadId tid);
|
||||
VectorClock* thread_get_vc(const DrdThreadId tid);
|
||||
void thread_get_latest_segment(Segment** sg, const DrdThreadId tid);
|
||||
void thread_combine_vc(const DrdThreadId joiner, const DrdThreadId joinee);
|
||||
void thread_combine_vc2(const DrdThreadId tid, const VectorClock* const vc);
|
||||
void thread_stop_using_mem(const Addr a1, const Addr a2);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user