11#pragma once
22
33#include < atomic>
4+ #include < chrono>
5+ #include < thread>
46
57#include " opentelemetry/version.h"
68
79OPENTELEMETRY_BEGIN_NAMESPACE
810namespace common
911{
1012
13+ constexpr int SPINLOCK_FAST_ITERATIONS = 100 ;
14+ constexpr int SPINLOCK_SLEEP_MS = 1 ;
15+
1116/* *
1217 * A Mutex which uses atomic flags and spin-locks instead of halting threads.
1318 *
19+ * This mutex uses an incremental back-off strategy with the following phases:
20+ * 1. A tight spin-lock loop (pending: using hardware PAUSE/YIELD instructions)
21+ * 2. A loop where the current thread yields control after checking the lock.
22+ * 3. Issuing a thread-sleep call before starting back in phase 1.
23+ *
24+ * This is meant to give a good balance of perofrmance and CPU consumption in
25+ * practice.
26+ *
1427 * This class implements the `BasicLockable` specification:
1528 * https://en.cppreference.com/w/cpp/named_req/BasicLockable
1629 */
@@ -23,6 +36,15 @@ class SpinLockMutex
2336 SpinLockMutex &operator =(const SpinLockMutex &) = delete ;
2437 SpinLockMutex &operator =(const SpinLockMutex &) volatile = delete ;
2538
39+ /* *
40+ * Attempts to lock the mutex. Return immediately with `true` (success) or `false` (failure).
41+ */
42+ bool try_lock () noexcept
43+ {
44+ return !flag_.load (std::memory_order_relaxed) &&
45+ !flag_.exchange (true , std::memory_order_acquire);
46+ }
47+
2648 /* *
2749 * Blocks until a lock can be obtained for the current thread.
2850 *
@@ -32,22 +54,39 @@ class SpinLockMutex
3254 */
3355 void lock () noexcept
3456 {
35- /* Note: We expect code protected by this lock to be "fast", i.e. we do NOT incrementally
36- * back-off and wait/notify here, we just loop until we have access, then try again.
37- *
38- * This has the downside that we could be spinning a long time if the exporter is slow.
39- * Note: in C++20x we could use `.wait` to make this slightly better. This should move to
40- * an exponential-back-off / wait strategy.
41- */
42- while (flag_.test_and_set (std::memory_order_acquire))
43- /* * TODO - We should immmediately yield if the machine is single processor. */
44- ;
57+ for (;;)
58+ {
59+ // Try once
60+ if (!flag_.exchange (true , std::memory_order_acquire))
61+ {
62+ return ;
63+ }
64+ // Spin-Fast (goal ~10ns)
65+ for (std::size_t i = 0 ; i < SPINLOCK_FAST_ITERATIONS; ++i)
66+ {
67+ if (try_lock ())
68+ {
69+ return ;
70+ }
71+ // TODO: Issue PAUSE/YIELD instruction to reduce contention.
72+ // e.g. __builtin_ia32_pause() / YieldProcessor() / _mm_pause();
73+ }
74+ // Yield then try again (goal ~100ns)
75+ std::this_thread::yield ();
76+ if (try_lock ())
77+ {
78+ return ;
79+ }
80+ // Sleep and then start the whole process again. (goal ~1000ns)
81+ std::this_thread::sleep_for (std::chrono::milliseconds (SPINLOCK_SLEEP_MS));
82+ }
83+ return ;
4584 }
4685 /* * Releases the lock held by the execution agent. Throws no exceptions. */
47- void unlock () noexcept { flag_.clear ( std::memory_order_release); }
86+ void unlock () noexcept { flag_.store ( false , std::memory_order_release); }
4887
4988private:
50- std::atomic_flag flag_{ATOMIC_FLAG_INIT };
89+ std::atomic< bool > flag_{false };
5190};
5291
5392} // namespace common
0 commit comments