Skip to content

Commit 6dfd5cb

Browse files
authored
Unify lazy types (#804)
Replace ad-hoc atomic lazy caches with shared lazy helpers in the Linux/Android fallback, NetBSD, RDRAND, and RNDR backends. Adds `LazyPtr` and `LazyBool` so pointer and boolean caches use the same initialization contract. This reduces duplicated cache logic and keeps backend probing/fallback semantics aligned while preserving the existing retry-until-cached behavior.
1 parent 5e6b022 commit 6dfd5cb

8 files changed

Lines changed: 148 additions & 134 deletions

File tree

src/backends/efi_rng.rs

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22
use crate::Error;
33
use core::{
44
mem::MaybeUninit,
5-
ptr::{self, NonNull, null_mut},
6-
sync::atomic::{AtomicPtr, Ordering::Relaxed},
5+
ptr::{self, NonNull},
76
};
87
use r_efi::{
98
efi::{BootServices, Handle},
@@ -17,8 +16,6 @@ pub use crate::util::{inner_u32, inner_u64};
1716
#[cfg(not(target_os = "uefi"))]
1817
compile_error!("`efi_rng` backend can be enabled only for UEFI targets!");
1918

20-
static RNG_PROTOCOL: AtomicPtr<rng::Protocol> = AtomicPtr::new(null_mut());
21-
2219
#[cold]
2320
#[inline(never)]
2421
fn init() -> Result<NonNull<rng::Protocol>, Error> {
@@ -36,7 +33,7 @@ fn init() -> Result<NonNull<rng::Protocol>, Error> {
3633
((*boot_services.as_ptr()).locate_handle)(
3734
r_efi::efi::BY_PROTOCOL,
3835
&mut guid,
39-
null_mut(),
36+
ptr::null_mut(),
4037
&mut buf_size,
4138
handles.as_mut_ptr(),
4239
)
@@ -88,18 +85,19 @@ fn init() -> Result<NonNull<rng::Protocol>, Error> {
8885
continue;
8986
}
9087

91-
RNG_PROTOCOL.store(protocol.as_ptr(), Relaxed);
9288
return Ok(protocol);
9389
}
9490
Err(Error::NO_RNG_HANDLE)
9591
}
9692

9793
#[inline]
9894
pub fn fill_inner(dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> {
99-
let protocol = match NonNull::new(RNG_PROTOCOL.load(Relaxed)) {
100-
Some(p) => p,
101-
None => init()?,
102-
};
95+
#[path = "../utils/lazy_ptr.rs"]
96+
mod lazy;
97+
98+
static RNG_PROTOCOL: lazy::LazyPtr<rng::Protocol> = lazy::LazyPtr::new();
99+
100+
let protocol = RNG_PROTOCOL.try_unsync_init(init)?;
103101

104102
let mut alg_guid = rng::ALGORITHM_RAW;
105103
let ret = unsafe {

src/backends/linux_android_with_fallback.rs

Lines changed: 10 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,7 @@ use crate::Error;
44
use core::{
55
ffi::c_void,
66
mem::{MaybeUninit, transmute},
7-
ptr::NonNull,
8-
sync::atomic::{AtomicPtr, Ordering},
7+
ptr::{self, NonNull},
98
};
109
use use_file::utils;
1110

@@ -17,18 +16,12 @@ type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint)
1716
/// or not supported by kernel.
1817
const NOT_AVAILABLE: NonNull<c_void> = unsafe { NonNull::new_unchecked(usize::MAX as *mut c_void) };
1918

20-
static GETRANDOM_FN: AtomicPtr<c_void> = AtomicPtr::new(core::ptr::null_mut());
21-
2219
#[cold]
2320
#[inline(never)]
2421
fn init() -> NonNull<c_void> {
2522
// Use static linking to `libc::getrandom` on MUSL targets and `dlsym` everywhere else
2623
#[cfg(not(target_env = "musl"))]
27-
let raw_ptr = {
28-
static NAME: &[u8] = b"getrandom\0";
29-
let name_ptr = NAME.as_ptr().cast::<libc::c_char>();
30-
unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) }
31-
};
24+
let raw_ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) };
3225
#[cfg(target_env = "musl")]
3326
let raw_ptr = {
3427
let fptr: GetRandomFn = libc::getrandom;
@@ -37,10 +30,9 @@ fn init() -> NonNull<c_void> {
3730

3831
let res_ptr = match NonNull::new(raw_ptr) {
3932
Some(fptr) => {
40-
let getrandom_fn = unsafe { transmute::<NonNull<c_void>, GetRandomFn>(fptr) };
41-
let dangling_ptr = NonNull::dangling().as_ptr();
33+
let getrandom_fn = unsafe { transmute::<*mut c_void, GetRandomFn>(fptr.as_ptr()) };
4234
// Check that `getrandom` syscall is supported by kernel
43-
let res = unsafe { getrandom_fn(dangling_ptr, 0, 0) };
35+
let res = unsafe { getrandom_fn(ptr::dangling_mut(), 0, 0) };
4436
if cfg!(getrandom_test_linux_fallback) {
4537
NOT_AVAILABLE
4638
} else if res.is_negative() {
@@ -65,7 +57,6 @@ fn init() -> NonNull<c_void> {
6557
panic!("Fallback is triggered with enabled `getrandom_test_linux_without_fallback`")
6658
}
6759

68-
GETRANDOM_FN.store(res_ptr.as_ptr(), Ordering::Release);
6960
res_ptr
7061
}
7162

@@ -77,23 +68,17 @@ fn use_file_fallback(dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> {
7768

7869
#[inline]
7970
pub fn fill_inner(dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> {
80-
// Despite being only a single atomic variable, we still cannot always use
81-
// Ordering::Relaxed, as we need to make sure a successful call to `init`
82-
// is "ordered before" any data read through the returned pointer (which
83-
// occurs when the function is called). Our implementation mirrors that of
84-
// the one in libstd, meaning that the use of non-Relaxed operations is
85-
// probably unnecessary.
86-
let raw_ptr = GETRANDOM_FN.load(Ordering::Acquire);
87-
let fptr = match NonNull::new(raw_ptr) {
88-
Some(p) => p,
89-
None => init(),
90-
};
71+
#[path = "../utils/lazy_ptr.rs"]
72+
mod lazy;
73+
74+
static GETRANDOM_FN: lazy::LazyPtr<c_void> = lazy::LazyPtr::new();
75+
let fptr = GETRANDOM_FN.unsync_init(init);
9176

9277
if fptr == NOT_AVAILABLE {
9378
use_file_fallback(dest)
9479
} else {
9580
// note: `transmute` is currently the only way to convert a pointer into a function reference
96-
let getrandom_fn = unsafe { transmute::<NonNull<c_void>, GetRandomFn>(fptr) };
81+
let getrandom_fn = unsafe { transmute::<*mut c_void, GetRandomFn>(fptr.as_ptr()) };
9782
utils::sys_fill_exact(dest, |buf| unsafe {
9883
getrandom_fn(buf.as_mut_ptr().cast(), buf.len(), 0)
9984
})

src/backends/netbsd.rs

Lines changed: 17 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@ use core::{
88
cmp,
99
ffi::c_void,
1010
mem::{self, MaybeUninit},
11-
ptr,
12-
sync::atomic::{AtomicPtr, Ordering},
11+
ptr::{self, NonNull},
1312
};
1413

1514
pub use crate::util::{inner_u32, inner_u64};
@@ -42,36 +41,29 @@ unsafe extern "C" fn polyfill_using_kern_arand(
4241

4342
type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) -> libc::ssize_t;
4443

45-
static GETRANDOM: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());
46-
4744
#[cold]
4845
#[inline(never)]
49-
fn init() -> *mut c_void {
50-
static NAME: &[u8] = b"getrandom\0";
51-
let name_ptr = NAME.as_ptr().cast::<libc::c_char>();
52-
let mut ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) };
53-
if ptr.is_null() || cfg!(getrandom_test_netbsd_fallback) {
54-
// Verify `polyfill_using_kern_arand` has the right signature.
55-
const POLYFILL: GetRandomFn = polyfill_using_kern_arand;
56-
ptr = POLYFILL as *mut c_void;
46+
fn init() -> NonNull<c_void> {
47+
let ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c"getrandom".as_ptr()) };
48+
if !cfg!(getrandom_test_netbsd_fallback) {
49+
if let Some(ptr) = NonNull::new(ptr) {
50+
return ptr;
51+
}
5752
}
58-
GETRANDOM.store(ptr, Ordering::Release);
59-
ptr
53+
// Verify `polyfill_using_kern_arand` has the right signature.
54+
const POLYFILL: GetRandomFn = polyfill_using_kern_arand;
55+
unsafe { NonNull::new_unchecked(POLYFILL as *mut c_void) }
6056
}
6157

6258
#[inline]
6359
pub fn fill_inner(dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> {
64-
// Despite being only a single atomic variable, we still cannot always use
65-
// Ordering::Relaxed, as we need to make sure a successful call to `init`
66-
// is "ordered before" any data read through the returned pointer (which
67-
// occurs when the function is called). Our implementation mirrors that of
68-
// the one in libstd, meaning that the use of non-Relaxed operations is
69-
// probably unnecessary.
70-
let mut fptr = GETRANDOM.load(Ordering::Acquire);
71-
if fptr.is_null() {
72-
fptr = init();
73-
}
74-
let fptr = unsafe { mem::transmute::<*mut c_void, GetRandomFn>(fptr) };
60+
#[path = "../utils/lazy_ptr.rs"]
61+
mod lazy;
62+
63+
static GETRANDOM_FN: lazy::LazyPtr<c_void> = lazy::LazyPtr::new();
64+
65+
let fptr = GETRANDOM_FN.unsync_init(init);
66+
let fptr = unsafe { mem::transmute::<*mut c_void, GetRandomFn>(fptr.as_ptr()) };
7567
utils::sys_fill_exact(dest, |buf| unsafe {
7668
fptr(buf.as_mut_ptr().cast::<c_void>(), buf.len(), 0)
7769
})

src/backends/rdrand.rs

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,6 @@
22
use crate::{Error, util::slice_as_uninit};
33
use core::mem::{MaybeUninit, size_of};
44

5-
#[path = "../utils/lazy.rs"]
6-
mod lazy;
7-
85
#[cfg(not(any(target_arch = "x86_64", target_arch = "x86")))]
96
compile_error!("`rdrand` backend can be enabled only for x86 and x86-64 targets!");
107

@@ -20,8 +17,6 @@ cfg_if! {
2017
}
2118
}
2219

23-
static RDRAND_GOOD: lazy::LazyBool = lazy::LazyBool::new();
24-
2520
// Recommendation from "Intel® Digital Random Number Generator (DRNG) Software
2621
// Implementation Guide" - Section 5.2.1 and "Intel® 64 and IA-32 Architectures
2722
// Software Developer’s Manual" - Volume 1 - Section 7.3.17.1.
@@ -72,7 +67,9 @@ fn self_test() -> bool {
7267
fails <= 2
7368
}
7469

75-
fn is_rdrand_good() -> bool {
70+
#[cold]
71+
#[inline(never)]
72+
fn init() -> bool {
7673
#[cfg(not(target_feature = "rdrand"))]
7774
{
7875
// SAFETY: All Rust x86 targets are new enough to have CPUID, and we
@@ -115,6 +112,15 @@ fn is_rdrand_good() -> bool {
115112
unsafe { self_test() }
116113
}
117114

115+
fn is_rdrand_good() -> bool {
116+
#[path = "../utils/lazy_bool.rs"]
117+
mod lazy;
118+
119+
static RDRAND_GOOD: lazy::LazyBool = lazy::LazyBool::new();
120+
121+
RDRAND_GOOD.unsync_init(init)
122+
}
123+
118124
#[target_feature(enable = "rdrand")]
119125
fn rdrand_exact(dest: &mut [MaybeUninit<u8>]) -> Option<()> {
120126
// We use chunks_exact_mut instead of chunks_mut as it allows almost all
@@ -162,7 +168,7 @@ fn rdrand_u64() -> Option<u64> {
162168

163169
#[inline]
164170
pub fn inner_u32() -> Result<u32, Error> {
165-
if !RDRAND_GOOD.unsync_init(is_rdrand_good) {
171+
if !is_rdrand_good() {
166172
return Err(Error::NO_RDRAND);
167173
}
168174
// SAFETY: After this point, we know rdrand is supported.
@@ -171,7 +177,7 @@ pub fn inner_u32() -> Result<u32, Error> {
171177

172178
#[inline]
173179
pub fn inner_u64() -> Result<u64, Error> {
174-
if !RDRAND_GOOD.unsync_init(is_rdrand_good) {
180+
if !is_rdrand_good() {
175181
return Err(Error::NO_RDRAND);
176182
}
177183
// SAFETY: After this point, we know rdrand is supported.
@@ -180,7 +186,7 @@ pub fn inner_u64() -> Result<u64, Error> {
180186

181187
#[inline]
182188
pub fn fill_inner(dest: &mut [MaybeUninit<u8>]) -> Result<(), Error> {
183-
if !RDRAND_GOOD.unsync_init(is_rdrand_good) {
189+
if !is_rdrand_good() {
184190
return Err(Error::NO_RDRAND);
185191
}
186192
// SAFETY: After this point, we know rdrand is supported.

src/backends/rndr.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,9 @@ fn is_rndr_available() -> bool {
6969

7070
#[cfg(not(target_feature = "rand"))]
7171
fn is_rndr_available() -> bool {
72-
#[path = "../utils/lazy.rs"]
72+
#[path = "../utils/lazy_bool.rs"]
7373
mod lazy;
74+
7475
static RNDR_GOOD: lazy::LazyBool = lazy::LazyBool::new();
7576

7677
cfg_if::cfg_if! {

src/utils/lazy.rs

Lines changed: 0 additions & 64 deletions
This file was deleted.

src/utils/lazy_bool.rs

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
use core::sync::atomic::{AtomicU8, Ordering::Relaxed};
2+
3+
/// Lazily caches a `bool` in an `AtomicU8`.
4+
///
5+
/// Initialization is intentionally unsynchronized: concurrent callers may race
6+
/// and run `init` more than once. Once a value is produced, it is cached and
7+
/// reused by subsequent calls.
8+
///
9+
/// Uses `Relaxed` ordering because this helper only publishes the cached
10+
/// value itself.
11+
pub(crate) struct LazyBool(AtomicU8);
12+
13+
impl LazyBool {
14+
const UNINIT: u8 = u8::MAX;
15+
16+
/// Create new `LazyBool`.
17+
pub const fn new() -> Self {
18+
Self(AtomicU8::new(Self::UNINIT))
19+
}
20+
21+
/// Call the `init` closure and return the result after caching it.
22+
#[cold]
23+
fn cold_init(&self, init: impl FnOnce() -> bool) -> bool {
24+
let val = u8::from(init());
25+
self.0.store(val, Relaxed);
26+
val != 0
27+
}
28+
29+
/// Retrieve the cached value if it was already initialized or call the `init` closure
30+
/// and return the result after caching it.
31+
#[inline]
32+
pub fn unsync_init(&self, init: impl FnOnce() -> bool) -> bool {
33+
let val = self.0.load(Relaxed);
34+
if val == Self::UNINIT {
35+
return self.cold_init(init);
36+
}
37+
val != 0
38+
}
39+
}

0 commit comments

Comments
 (0)