Skip to content

Commit 7dcfb54

Browse files
committed
data_race: make the release/acquire API more clear
1 parent e9ebc6f commit 7dcfb54

File tree

4 files changed

+67
-94
lines changed

4 files changed

+67
-94
lines changed

src/tools/miri/src/alloc_addresses/mod.rs

+7-9
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet};
1313
use rustc_span::Span;
1414
use rustc_target::abi::{Align, HasDataLayout, Size};
1515

16-
use crate::*;
16+
use crate::{concurrency::VClock, *};
1717

1818
use self::reuse_pool::ReusePool;
1919

@@ -172,7 +172,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
172172
ecx.get_active_thread(),
173173
) {
174174
if let Some(data_race) = &ecx.machine.data_race {
175-
data_race.validate_lock_acquire(&clock, ecx.get_active_thread());
175+
data_race.acquire_clock(&clock, ecx.get_active_thread());
176176
}
177177
reuse_addr
178178
} else {
@@ -369,15 +369,13 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
369369
// Also remember this address for future reuse.
370370
let thread = self.threads.get_active_thread_id();
371371
global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || {
372-
let mut clock = concurrency::VClock::default();
373372
if let Some(data_race) = &self.data_race {
374-
data_race.validate_lock_release(
375-
&mut clock,
376-
thread,
377-
self.threads.active_thread_ref().current_span(),
378-
);
373+
data_race
374+
.release_clock(thread, self.threads.active_thread_ref().current_span())
375+
.clone()
376+
} else {
377+
VClock::default()
379378
}
380-
clock
381379
})
382380
}
383381
}

src/tools/miri/src/concurrency/data_race.rs

+31-40
Original file line numberDiff line numberDiff line change
@@ -1701,49 +1701,34 @@ impl GlobalState {
17011701
format!("thread `{thread_name}`")
17021702
}
17031703

1704-
/// Acquire a lock, express that the previous call of
1705-
/// `validate_lock_release` must happen before this.
1704+
/// Acquire the given clock into the given thread, establishing synchronization with
1705+
/// the moment when that clock snapshot was taken via `release_clock`.
17061706
/// As this is an acquire operation, the thread timestamp is not
17071707
/// incremented.
1708-
pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) {
1709-
let (_, mut clocks) = self.load_thread_state_mut(thread);
1708+
pub fn acquire_clock(&self, lock: &VClock, thread: ThreadId) {
1709+
let (_, mut clocks) = self.thread_state_mut(thread);
17101710
clocks.clock.join(lock);
17111711
}
17121712

1713-
/// Release a lock handle, express that this happens-before
1714-
/// any subsequent calls to `validate_lock_acquire`.
1715-
/// For normal locks this should be equivalent to `validate_lock_release_shared`
1716-
/// since an acquire operation should have occurred before, however
1717-
/// for futex & condvar operations this is not the case and this
1718-
/// operation must be used.
1719-
pub fn validate_lock_release(&self, lock: &mut VClock, thread: ThreadId, current_span: Span) {
1720-
let (index, mut clocks) = self.load_thread_state_mut(thread);
1721-
lock.clone_from(&clocks.clock);
1722-
clocks.increment_clock(index, current_span);
1723-
}
1724-
1725-
/// Release a lock handle, express that this happens-before
1726-
/// any subsequent calls to `validate_lock_acquire` as well
1727-
/// as any previous calls to this function after any
1728-
/// `validate_lock_release` calls.
1729-
/// For normal locks this should be equivalent to `validate_lock_release`.
1730-
/// This function only exists for joining over the set of concurrent readers
1731-
/// in a read-write lock and should not be used for anything else.
1732-
pub fn validate_lock_release_shared(
1733-
&self,
1734-
lock: &mut VClock,
1735-
thread: ThreadId,
1736-
current_span: Span,
1737-
) {
1738-
let (index, mut clocks) = self.load_thread_state_mut(thread);
1739-
lock.join(&clocks.clock);
1713+
/// Returns the `release` clock of the given thread.
1714+
/// Other threads can acquire this clock in the future to establish synchronization
1715+
/// with this program point.
1716+
pub fn release_clock(&self, thread: ThreadId, current_span: Span) -> Ref<'_, VClock> {
1717+
// We increment the clock each time this happens, to ensure no two releases
1718+
// can be confused with each other.
1719+
let (index, mut clocks) = self.thread_state_mut(thread);
17401720
clocks.increment_clock(index, current_span);
1721+
drop(clocks);
1722+
// To return a read-only view, we need to release the RefCell
1723+
// and borrow it again.
1724+
let (_index, clocks) = self.thread_state(thread);
1725+
Ref::map(clocks, |c| &c.clock)
17411726
}
17421727

17431728
/// Load the vector index used by the given thread as well as the set of vector clocks
17441729
/// used by the thread.
17451730
#[inline]
1746-
fn load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1731+
fn thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
17471732
let index = self.thread_info.borrow()[thread]
17481733
.vector_index
17491734
.expect("Loading thread state for thread with no assigned vector");
@@ -1752,17 +1737,26 @@ impl GlobalState {
17521737
(index, clocks)
17531738
}
17541739

1740+
/// Load the vector index used by the given thread as well as the set of vector clocks
1741+
/// used by the thread.
1742+
#[inline]
1743+
fn thread_state(&self, thread: ThreadId) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1744+
let index = self.thread_info.borrow()[thread]
1745+
.vector_index
1746+
.expect("Loading thread state for thread with no assigned vector");
1747+
let ref_vector = self.vector_clocks.borrow();
1748+
let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1749+
(index, clocks)
1750+
}
1751+
17551752
/// Load the current vector clock in use and the current set of thread clocks
17561753
/// in use for the vector.
17571754
#[inline]
17581755
pub(super) fn current_thread_state(
17591756
&self,
17601757
thread_mgr: &ThreadManager<'_, '_>,
17611758
) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1762-
let index = self.current_index(thread_mgr);
1763-
let ref_vector = self.vector_clocks.borrow();
1764-
let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1765-
(index, clocks)
1759+
self.thread_state(thread_mgr.get_active_thread_id())
17661760
}
17671761

17681762
/// Load the current vector clock in use and the current set of thread clocks
@@ -1772,10 +1766,7 @@ impl GlobalState {
17721766
&self,
17731767
thread_mgr: &ThreadManager<'_, '_>,
17741768
) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1775-
let index = self.current_index(thread_mgr);
1776-
let ref_vector = self.vector_clocks.borrow_mut();
1777-
let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1778-
(index, clocks)
1769+
self.thread_state_mut(thread_mgr.get_active_thread_id())
17791770
}
17801771

17811772
/// Return the current thread, should be the same

src/tools/miri/src/concurrency/init_once.rs

+5-7
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ pub enum InitOnceStatus {
4141
pub(super) struct InitOnce<'mir, 'tcx> {
4242
status: InitOnceStatus,
4343
waiters: VecDeque<InitOnceWaiter<'mir, 'tcx>>,
44-
data_race: VClock,
44+
clock: VClock,
4545
}
4646

4747
impl<'mir, 'tcx> VisitProvenance for InitOnce<'mir, 'tcx> {
@@ -61,10 +61,8 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
6161
let current_thread = this.get_active_thread();
6262

6363
if let Some(data_race) = &this.machine.data_race {
64-
data_race.validate_lock_acquire(
65-
&this.machine.threads.sync.init_onces[id].data_race,
66-
current_thread,
67-
);
64+
data_race
65+
.acquire_clock(&this.machine.threads.sync.init_onces[id].clock, current_thread);
6866
}
6967
}
7068

@@ -176,7 +174,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
176174

177175
// Each complete happens-before the end of the wait
178176
if let Some(data_race) = &this.machine.data_race {
179-
data_race.validate_lock_release(&mut init_once.data_race, current_thread, current_span);
177+
init_once.clock.clone_from(&data_race.release_clock(current_thread, current_span));
180178
}
181179

182180
// Wake up everyone.
@@ -202,7 +200,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
202200

203201
// Each complete happens-before the end of the wait
204202
if let Some(data_race) = &this.machine.data_race {
205-
data_race.validate_lock_release(&mut init_once.data_race, current_thread, current_span);
203+
init_once.clock.clone_from(&data_race.release_clock(current_thread, current_span));
206204
}
207205

208206
// Wake up one waiting thread, so they can go ahead and try to init this.

src/tools/miri/src/concurrency/sync.rs

+24-38
Original file line numberDiff line numberDiff line change
@@ -69,12 +69,8 @@ struct Mutex {
6969
lock_count: usize,
7070
/// The queue of threads waiting for this mutex.
7171
queue: VecDeque<ThreadId>,
72-
/// Data race handle. This tracks the happens-before
73-
/// relationship between each mutex access. It is
74-
/// released to during unlock and acquired from during
75-
/// locking, and therefore stores the clock of the last
76-
/// thread to release this mutex.
77-
data_race: VClock,
72+
/// Mutex clock. This tracks the moment of the last unlock.
73+
clock: VClock,
7874
}
7975

8076
declare_id!(RwLockId);
@@ -91,16 +87,16 @@ struct RwLock {
9187
writer_queue: VecDeque<ThreadId>,
9288
/// The queue of reader threads waiting for this lock.
9389
reader_queue: VecDeque<ThreadId>,
94-
/// Data race handle for writers. Tracks the happens-before
90+
/// Data race clock for writers. Tracks the happens-before
9591
/// ordering between each write access to a rwlock and is updated
9692
/// after a sequence of concurrent readers to track the happens-
9793
/// before ordering between the set of previous readers and
9894
/// the current writer.
9995
/// Contains the clock of the last thread to release a writer
10096
/// lock or the joined clock of the set of last threads to release
10197
/// shared reader locks.
102-
data_race: VClock,
103-
/// Data race handle for readers. This is temporary storage
98+
clock_unlocked: VClock,
99+
/// Data race clock for readers. This is temporary storage
104100
/// for the combined happens-before ordering for between all
105101
/// concurrent readers and the next writer, and the value
106102
/// is stored to the main data_race variable once all
@@ -110,7 +106,7 @@ struct RwLock {
110106
/// add happens-before orderings between shared reader
111107
/// locks.
112108
/// This is only relevant when there is an active reader.
113-
data_race_reader: VClock,
109+
clock_current_readers: VClock,
114110
}
115111

116112
declare_id!(CondvarId);
@@ -132,8 +128,8 @@ struct Condvar {
132128
/// between a cond-var signal and a cond-var
133129
/// wait during a non-spurious signal event.
134130
/// Contains the clock of the last thread to
135-
/// perform a futex-signal.
136-
data_race: VClock,
131+
/// perform a condvar-signal.
132+
clock: VClock,
137133
}
138134

139135
/// The futex state.
@@ -145,7 +141,7 @@ struct Futex {
145141
/// during a non-spurious wake event.
146142
/// Contains the clock of the last thread to
147143
/// perform a futex-wake.
148-
data_race: VClock,
144+
clock: VClock,
149145
}
150146

151147
/// A thread waiting on a futex.
@@ -346,7 +342,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
346342
}
347343
mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
348344
if let Some(data_race) = &this.machine.data_race {
349-
data_race.validate_lock_acquire(&mutex.data_race, thread);
345+
data_race.acquire_clock(&mutex.clock, thread);
350346
}
351347
}
352348

@@ -373,11 +369,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
373369
// The mutex is completely unlocked. Try transferring ownership
374370
// to another thread.
375371
if let Some(data_race) = &this.machine.data_race {
376-
data_race.validate_lock_release(
377-
&mut mutex.data_race,
378-
current_owner,
379-
current_span,
380-
);
372+
mutex.clock.clone_from(&data_race.release_clock(current_owner, current_span));
381373
}
382374
this.mutex_dequeue_and_lock(id);
383375
}
@@ -448,7 +440,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
448440
let count = rwlock.readers.entry(reader).or_insert(0);
449441
*count = count.checked_add(1).expect("the reader counter overflowed");
450442
if let Some(data_race) = &this.machine.data_race {
451-
data_race.validate_lock_acquire(&rwlock.data_race, reader);
443+
data_race.acquire_clock(&rwlock.clock_unlocked, reader);
452444
}
453445
}
454446

@@ -474,20 +466,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
474466
}
475467
if let Some(data_race) = &this.machine.data_race {
476468
// Add this to the shared-release clock of all concurrent readers.
477-
data_race.validate_lock_release_shared(
478-
&mut rwlock.data_race_reader,
479-
reader,
480-
current_span,
481-
);
469+
rwlock.clock_current_readers.join(&data_race.release_clock(reader, current_span));
482470
}
483471

484472
// The thread was a reader. If the lock is not held any more, give it to a writer.
485473
if this.rwlock_is_locked(id).not() {
486474
// All the readers are finished, so set the writer data-race handle to the value
487-
// of the union of all reader data race handles, since the set of readers
488-
// happen-before the writers
475+
// of the union of all reader data race handles, since the set of readers
476+
// happen-before the writers
489477
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
490-
rwlock.data_race.clone_from(&rwlock.data_race_reader);
478+
rwlock.clock_unlocked.clone_from(&rwlock.clock_current_readers);
491479
this.rwlock_dequeue_and_lock_writer(id);
492480
}
493481
true
@@ -511,7 +499,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
511499
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
512500
rwlock.writer = Some(writer);
513501
if let Some(data_race) = &this.machine.data_race {
514-
data_race.validate_lock_acquire(&rwlock.data_race, writer);
502+
data_race.acquire_clock(&rwlock.clock_unlocked, writer);
515503
}
516504
}
517505

@@ -530,11 +518,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
530518
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
531519
// Release memory to next lock holder.
532520
if let Some(data_race) = &this.machine.data_race {
533-
data_race.validate_lock_release(
534-
&mut rwlock.data_race,
535-
current_writer,
536-
current_span,
537-
);
521+
rwlock
522+
.clock_unlocked
523+
.clone_from(&*data_race.release_clock(current_writer, current_span));
538524
}
539525
// The thread was a writer.
540526
//
@@ -611,11 +597,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
611597

612598
// Each condvar signal happens-before the end of the condvar wake
613599
if let Some(data_race) = data_race {
614-
data_race.validate_lock_release(&mut condvar.data_race, current_thread, current_span);
600+
condvar.clock.clone_from(&*data_race.release_clock(current_thread, current_span));
615601
}
616602
condvar.waiters.pop_front().map(|waiter| {
617603
if let Some(data_race) = data_race {
618-
data_race.validate_lock_acquire(&condvar.data_race, waiter.thread);
604+
data_race.acquire_clock(&condvar.clock, waiter.thread);
619605
}
620606
(waiter.thread, waiter.lock)
621607
})
@@ -645,14 +631,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
645631

646632
// Each futex-wake happens-before the end of the futex wait
647633
if let Some(data_race) = data_race {
648-
data_race.validate_lock_release(&mut futex.data_race, current_thread, current_span);
634+
futex.clock.clone_from(&*data_race.release_clock(current_thread, current_span));
649635
}
650636

651637
// Wake up the first thread in the queue that matches any of the bits in the bitset.
652638
futex.waiters.iter().position(|w| w.bitset & bitset != 0).map(|i| {
653639
let waiter = futex.waiters.remove(i).unwrap();
654640
if let Some(data_race) = data_race {
655-
data_race.validate_lock_acquire(&futex.data_race, waiter.thread);
641+
data_race.acquire_clock(&futex.clock, waiter.thread);
656642
}
657643
waiter.thread
658644
})

0 commit comments

Comments
 (0)