@@ -4,70 +4,89 @@ use rand::Rng;
4
4
5
5
use rustc_target:: abi:: { Align , Size } ;
6
6
7
- const MAX_POOL_SIZE : usize = 64 ;
7
+ use crate :: { concurrency :: VClock , MemoryKind } ;
8
8
9
- // Just use fair coins, until we have evidence that other numbers are better.
10
- const ADDR_REMEMBER_CHANCE : f64 = 0.5 ;
11
- const ADDR_TAKE_CHANCE : f64 = 0.5 ;
9
+ const MAX_POOL_SIZE : usize = 64 ;
12
10
13
11
/// The pool strikes a balance between exploring more possible executions and making it more likely
14
12
/// to find bugs. The hypothesis is that bugs are more likely to occur when reuse happens for
15
13
/// allocations with the same layout, since that can trigger e.g. ABA issues in a concurrent data
16
14
/// structure. Therefore we only reuse allocations when size and alignment match exactly.
17
15
#[ derive( Debug ) ]
18
16
pub struct ReusePool {
17
+ address_reuse_rate : f64 ,
19
18
/// The i-th element in `pool` stores allocations of alignment `2^i`. We store these reusable
20
19
/// allocations as address-size pairs, the list must be sorted by the size.
21
20
///
22
21
/// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to
23
22
/// less than 64 different possible value, that bounds the overall size of the pool.
24
- pool : Vec < Vec < ( u64 , Size ) > > ,
23
+ ///
24
+ /// We also store the clock from the thread that donated this pool element,
25
+ /// to ensure synchronization with the thread that picks up this address.
26
+ pool : Vec < Vec < ( u64 , Size , VClock ) > > ,
25
27
}
26
28
27
29
impl ReusePool {
28
- pub fn new ( ) -> Self {
29
- ReusePool { pool : vec ! [ ] }
30
+ pub fn new ( address_reuse_rate : f64 ) -> Self {
31
+ ReusePool { address_reuse_rate , pool : vec ! [ ] }
30
32
}
31
33
32
- fn subpool ( & mut self , align : Align ) -> & mut Vec < ( u64 , Size ) > {
34
+ fn subpool ( & mut self , align : Align ) -> & mut Vec < ( u64 , Size , VClock ) > {
33
35
let pool_idx: usize = align. bytes ( ) . trailing_zeros ( ) . try_into ( ) . unwrap ( ) ;
34
36
if self . pool . len ( ) <= pool_idx {
35
37
self . pool . resize ( pool_idx + 1 , Vec :: new ( ) ) ;
36
38
}
37
39
& mut self . pool [ pool_idx]
38
40
}
39
41
40
- pub fn add_addr ( & mut self , rng : & mut impl Rng , addr : u64 , size : Size , align : Align ) {
42
+ pub fn add_addr (
43
+ & mut self ,
44
+ rng : & mut impl Rng ,
45
+ addr : u64 ,
46
+ size : Size ,
47
+ align : Align ,
48
+ kind : MemoryKind ,
49
+ clock : impl FnOnce ( ) -> VClock ,
50
+ ) {
41
51
// Let's see if we even want to remember this address.
42
- if !rng. gen_bool ( ADDR_REMEMBER_CHANCE ) {
52
+ // We don't remember stack addresses: there's a lot of them (so the perf impact is big),
53
+ // and we only want to reuse stack slots within the same thread or else we'll add a lot of
54
+ // undesired synchronization.
55
+ if kind == MemoryKind :: Stack || !rng. gen_bool ( self . address_reuse_rate ) {
43
56
return ;
44
57
}
45
58
// Determine the pool to add this to, and where in the pool to put it.
46
59
let subpool = self . subpool ( align) ;
47
- let pos = subpool. partition_point ( |( _addr, other_size) | * other_size < size) ;
60
+ let pos = subpool. partition_point ( |( _addr, other_size, _ ) | * other_size < size) ;
48
61
// Make sure the pool does not grow too big.
49
62
if subpool. len ( ) >= MAX_POOL_SIZE {
50
63
// Pool full. Replace existing element, or last one if this would be even bigger.
51
64
let clamped_pos = pos. min ( subpool. len ( ) - 1 ) ;
52
- subpool[ clamped_pos] = ( addr, size) ;
65
+ subpool[ clamped_pos] = ( addr, size, clock ( ) ) ;
53
66
return ;
54
67
}
55
68
// Add address to pool, at the right position.
56
- subpool. insert ( pos, ( addr, size) ) ;
69
+ subpool. insert ( pos, ( addr, size, clock ( ) ) ) ;
57
70
}
58
71
59
- pub fn take_addr ( & mut self , rng : & mut impl Rng , size : Size , align : Align ) -> Option < u64 > {
60
- // Determine whether we'll even attempt a reuse.
61
- if !rng. gen_bool ( ADDR_TAKE_CHANCE ) {
72
+ pub fn take_addr (
73
+ & mut self ,
74
+ rng : & mut impl Rng ,
75
+ size : Size ,
76
+ align : Align ,
77
+ kind : MemoryKind ,
78
+ ) -> Option < ( u64 , VClock ) > {
79
+ // Determine whether we'll even attempt a reuse. As above, we don't do reuse for stack addresses.
80
+ if kind == MemoryKind :: Stack || !rng. gen_bool ( self . address_reuse_rate ) {
62
81
return None ;
63
82
}
64
83
// Determine the pool to take this from.
65
84
let subpool = self . subpool ( align) ;
66
85
// Let's see if we can find something of the right size. We want to find the full range of
67
86
// such items, beginning with the first, so we can't use `binary_search_by_key`.
68
- let begin = subpool. partition_point ( |( _addr, other_size) | * other_size < size) ;
87
+ let begin = subpool. partition_point ( |( _addr, other_size, _ ) | * other_size < size) ;
69
88
let mut end = begin;
70
- while let Some ( ( _addr, other_size) ) = subpool. get ( end) {
89
+ while let Some ( ( _addr, other_size, _ ) ) = subpool. get ( end) {
71
90
if * other_size != size {
72
91
break ;
73
92
}
@@ -80,8 +99,8 @@ impl ReusePool {
80
99
// Pick a random element with the desired size.
81
100
let idx = rng. gen_range ( begin..end) ;
82
101
// Remove it from the pool and return.
83
- let ( chosen_addr, chosen_size) = subpool. remove ( idx) ;
102
+ let ( chosen_addr, chosen_size, clock ) = subpool. remove ( idx) ;
84
103
debug_assert ! ( chosen_size >= size && chosen_addr % align. bytes( ) == 0 ) ;
85
- Some ( chosen_addr)
104
+ Some ( ( chosen_addr, clock ) )
86
105
}
87
106
}
0 commit comments