parking_lot_core/
word_lock.rs1use crate::spinwait::SpinWait;
9use crate::thread_parker::{ThreadParker, ThreadParkerT, UnparkHandleT};
10use core::{
11 cell::Cell,
12 mem, ptr,
13 sync::atomic::{fence, AtomicUsize, Ordering},
14};
15
16struct ThreadData {
17 parker: ThreadParker,
18
19 queue_tail: Cell<*const ThreadData>,
32 prev: Cell<*const ThreadData>,
33 next: Cell<*const ThreadData>,
34}
35
36impl ThreadData {
37 #[inline]
38 fn new() -> ThreadData {
39 if !(mem::align_of::<ThreadData>() > !QUEUE_MASK) {
::core::panicking::panic("assertion failed: mem::align_of::<ThreadData>() > !QUEUE_MASK")
};assert!(mem::align_of::<ThreadData>() > !QUEUE_MASK);
40 ThreadData {
41 parker: ThreadParker::new(),
42 queue_tail: Cell::new(ptr::null()),
43 prev: Cell::new(ptr::null()),
44 next: Cell::new(ptr::null()),
45 }
46 }
47}
48
49#[inline]
51fn with_thread_data<T>(f: impl FnOnce(&ThreadData) -> T) -> T {
52 let mut thread_data_ptr = ptr::null();
53 if !ThreadParker::IS_CHEAP_TO_CONSTRUCT {
56 const THREAD_DATA: ::std::thread::LocalKey<ThreadData> =
{
#[inline]
fn __rust_std_internal_init_fn() -> ThreadData { ThreadData::new() }
unsafe {
::std::thread::LocalKey::new(const {
if ::std::mem::needs_drop::<ThreadData>() {
|__rust_std_internal_init|
{
#[thread_local]
static __RUST_STD_INTERNAL_VAL:
::std::thread::local_impl::LazyStorage<ThreadData, ()> =
::std::thread::local_impl::LazyStorage::new();
__RUST_STD_INTERNAL_VAL.get_or_init(__rust_std_internal_init,
__rust_std_internal_init_fn)
}
} else {
|__rust_std_internal_init|
{
#[thread_local]
static __RUST_STD_INTERNAL_VAL:
::std::thread::local_impl::LazyStorage<ThreadData, !> =
::std::thread::local_impl::LazyStorage::new();
__RUST_STD_INTERNAL_VAL.get_or_init(__rust_std_internal_init,
__rust_std_internal_init_fn)
}
}
})
}
};thread_local!(static THREAD_DATA: ThreadData = ThreadData::new());
57 if let Ok(tls_thread_data) = THREAD_DATA.try_with(|x| x as *const ThreadData) {
58 thread_data_ptr = tls_thread_data;
59 }
60 }
61 let mut thread_data_storage = None;
63 if thread_data_ptr.is_null() {
64 thread_data_ptr = thread_data_storage.get_or_insert_with(ThreadData::new);
65 }
66
67 f(unsafe { &*thread_data_ptr })
68}
69
70const LOCKED_BIT: usize = 1;
71const QUEUE_LOCKED_BIT: usize = 2;
72const QUEUE_MASK: usize = !3;
73
74pub struct WordLock {
77 state: AtomicUsize,
78}
79
80impl WordLock {
81 pub const fn new() -> Self {
83 WordLock {
84 state: AtomicUsize::new(0),
85 }
86 }
87
88 #[inline]
89 pub fn lock(&self) {
90 if self
91 .state
92 .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
93 .is_ok()
94 {
95 return;
96 }
97 self.lock_slow();
98 }
99
100 #[inline]
102 pub unsafe fn unlock(&self) {
103 let state = self.state.fetch_sub(LOCKED_BIT, Ordering::Release);
104 if state.is_queue_locked() || state.queue_head().is_null() {
105 return;
106 }
107 self.unlock_slow();
108 }
109
110 #[cold]
111 fn lock_slow(&self) {
112 let mut spinwait = SpinWait::new();
113 let mut state = self.state.load(Ordering::Relaxed);
114 loop {
115 if !state.is_locked() {
117 match self.state.compare_exchange_weak(
118 state,
119 state | LOCKED_BIT,
120 Ordering::Acquire,
121 Ordering::Relaxed,
122 ) {
123 Ok(_) => return,
124 Err(x) => state = x,
125 }
126 continue;
127 }
128
129 if state.queue_head().is_null() && spinwait.spin() {
131 state = self.state.load(Ordering::Relaxed);
132 continue;
133 }
134
135 state = with_thread_data(|thread_data| {
137 #[allow(unused_unsafe)]
140 unsafe {
141 thread_data.parker.prepare_park();
142 }
143
144 let queue_head = state.queue_head();
146 if queue_head.is_null() {
147 thread_data.queue_tail.set(thread_data);
148 thread_data.prev.set(ptr::null());
149 } else {
150 thread_data.queue_tail.set(ptr::null());
151 thread_data.prev.set(ptr::null());
152 thread_data.next.set(queue_head);
153 }
154 if let Err(x) = self.state.compare_exchange_weak(
155 state,
156 state.with_queue_head(thread_data),
157 Ordering::AcqRel,
158 Ordering::Relaxed,
159 ) {
160 return x;
161 }
162
163 #[allow(unused_unsafe)]
166 unsafe {
167 thread_data.parker.park();
168 }
169
170 spinwait.reset();
172 self.state.load(Ordering::Relaxed)
173 });
174 }
175 }
176
177 #[cold]
178 fn unlock_slow(&self) {
179 let mut state = self.state.load(Ordering::Relaxed);
180 loop {
181 if state.is_queue_locked() || state.queue_head().is_null() {
185 return;
186 }
187
188 match self.state.compare_exchange_weak(
190 state,
191 state | QUEUE_LOCKED_BIT,
192 Ordering::Acquire,
193 Ordering::Relaxed,
194 ) {
195 Ok(_) => break,
196 Err(x) => state = x,
197 }
198 }
199
200 'outer: loop {
202 let queue_head = state.queue_head();
206 let mut queue_tail;
207 let mut current = queue_head;
208 loop {
209 queue_tail = unsafe { (*current).queue_tail.get() };
210 if !queue_tail.is_null() {
211 break;
212 }
213 unsafe {
214 let next = (*current).next.get();
215 (*next).prev.set(current);
216 current = next;
217 }
218 }
219
220 unsafe {
223 (*queue_head).queue_tail.set(queue_tail);
224 }
225
226 if state.is_locked() {
230 match self.state.compare_exchange_weak(
231 state,
232 state & !QUEUE_LOCKED_BIT,
233 Ordering::Release,
234 Ordering::Relaxed,
235 ) {
236 Ok(_) => return,
237 Err(x) => state = x,
238 }
239
240 fence_acquire(&self.state);
242 continue;
243 }
244
245 let new_tail = unsafe { (*queue_tail).prev.get() };
247 if new_tail.is_null() {
248 loop {
249 match self.state.compare_exchange_weak(
250 state,
251 state & LOCKED_BIT,
252 Ordering::Release,
253 Ordering::Relaxed,
254 ) {
255 Ok(_) => break,
256 Err(x) => state = x,
257 }
258
259 if state.queue_head().is_null() {
263 continue;
264 } else {
265 fence_acquire(&self.state);
267 continue 'outer;
268 }
269 }
270 } else {
271 unsafe {
272 (*queue_head).queue_tail.set(new_tail);
273 }
274 self.state.fetch_and(!QUEUE_LOCKED_BIT, Ordering::Release);
275 }
276
277 unsafe {
282 (*queue_tail).parker.unpark_lock().unpark();
283 }
284 break;
285 }
286 }
287}
288
289#[inline]
292fn fence_acquire(a: &AtomicUsize) {
293 if falsecfg!(tsan_enabled) {
294 let _ = a.load(Ordering::Acquire);
295 } else {
296 fence(Ordering::Acquire);
297 }
298}
299
300trait LockState {
301 fn is_locked(self) -> bool;
302 fn is_queue_locked(self) -> bool;
303 fn queue_head(self) -> *const ThreadData;
304 fn with_queue_head(self, thread_data: *const ThreadData) -> Self;
305}
306
307impl LockState for usize {
308 #[inline]
309 fn is_locked(self) -> bool {
310 self & LOCKED_BIT != 0
311 }
312
313 #[inline]
314 fn is_queue_locked(self) -> bool {
315 self & QUEUE_LOCKED_BIT != 0
316 }
317
318 #[inline]
319 fn queue_head(self) -> *const ThreadData {
320 (self & QUEUE_MASK) as *const ThreadData
321 }
322
323 #[inline]
324 fn with_queue_head(self, thread_data: *const ThreadData) -> Self {
325 (self & !QUEUE_MASK) | thread_data as *const _ as usize
326 }
327}