1// Copyright 2016 Amanieu d'Antras
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
78use crate::raw_rwlock::RawRwLock;
910/// A reader-writer lock
11///
12/// This type of lock allows a number of readers or at most one writer at any
13/// point in time. The write portion of this lock typically allows modification
14/// of the underlying data (exclusive access) and the read portion of this lock
15/// typically allows for read-only access (shared access).
16///
17/// This lock uses a task-fair locking policy which avoids both reader and
18/// writer starvation. This means that readers trying to acquire the lock will
19/// block even if the lock is unlocked when there are writers waiting to acquire
20/// the lock. Because of this, attempts to recursively acquire a read lock
21/// within a single thread may result in a deadlock.
22///
23/// The type parameter `T` represents the data that this lock protects. It is
24/// required that `T` satisfies `Send` to be shared across threads and `Sync` to
25/// allow concurrent access through readers. The RAII guards returned from the
26/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
27/// to allow access to the contained of the lock.
28///
29/// # Fairness
30///
31/// A typical unfair lock can often end up in a situation where a single thread
32/// quickly acquires and releases the same lock in succession, which can starve
33/// other threads waiting to acquire the rwlock. While this improves throughput
34/// because it doesn't force a context switch when a thread tries to re-acquire
35/// a rwlock it has just released, this can starve other threads.
36///
37/// This rwlock uses [eventual fairness](https://trac.webkit.org/changeset/203350)
38/// to ensure that the lock will be fair on average without sacrificing
39/// throughput. This is done by forcing a fair unlock on average every 0.5ms,
40/// which will force the lock to go to the next thread waiting for the rwlock.
41///
42/// Additionally, any critical section longer than 1ms will always use a fair
43/// unlock, which has a negligible impact on throughput considering the length
44/// of the critical section.
45///
46/// You can also force a fair unlock by calling `RwLockReadGuard::unlock_fair`
47/// or `RwLockWriteGuard::unlock_fair` when unlocking a mutex instead of simply
48/// dropping the guard.
49///
50/// # Differences from the standard library `RwLock`
51///
52/// - Supports atomically downgrading a write lock into a read lock.
53/// - Task-fair locking policy instead of an unspecified platform default.
54/// - No poisoning, the lock is released normally on panic.
55/// - Only requires 1 word of space, whereas the standard library boxes the
56/// `RwLock` due to platform limitations.
57/// - Can be statically constructed.
58/// - Does not require any drop glue when dropped.
59/// - Inline fast path for the uncontended case.
60/// - Efficient handling of micro-contention using adaptive spinning.
61/// - Allows raw locking & unlocking without a guard.
62/// - Supports eventual fairness so that the rwlock is fair on average.
63/// - Optionally allows making the rwlock fair by calling
64/// `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`.
65///
66/// # Examples
67///
68/// ```
69/// use parking_lot::RwLock;
70///
71/// let lock = RwLock::new(5);
72///
73/// // many reader locks can be held at once
74/// {
75/// let r1 = lock.read();
76/// let r2 = lock.read();
77/// assert_eq!(*r1, 5);
78/// assert_eq!(*r2, 5);
79/// } // read locks are dropped at this point
80///
81/// // only one write lock may be held, however
82/// {
83/// let mut w = lock.write();
84/// *w += 1;
85/// assert_eq!(*w, 6);
86/// } // write lock is dropped here
87/// ```
88pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>;
8990/// Creates a new instance of an `RwLock<T>` which is unlocked.
91///
92/// This allows creating a `RwLock<T>` in a constant context on stable Rust.
93pub const fn const_rwlock<T>(val: T) -> RwLock<T> {
94 RwLock::const_new(<RawRwLock as lock_api::RawRwLock>::INIT, val)
95}
9697/// RAII structure used to release the shared read access of a lock when
98/// dropped.
99pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>;
100101/// RAII structure used to release the exclusive write access of a lock when
102/// dropped.
103pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>;
104105/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
106/// subfield of the protected data.
107///
108/// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
109/// former doesn't support temporarily unlocking and re-locking, since that
110/// could introduce soundness issues if the locked object is modified by another
111/// thread.
112pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>;
113114/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
115/// subfield of the protected data.
116///
117/// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
118/// former doesn't support temporarily unlocking and re-locking, since that
119/// could introduce soundness issues if the locked object is modified by another
120/// thread.
121pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>;
122123/// RAII structure used to release the upgradable read access of a lock when
124/// dropped.
125pub type RwLockUpgradableReadGuard<'a, T> = lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>;
126127#[cfg(test)]
128mod tests {
129use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard};
130use rand::Rng;
131use std::sync::atomic::{AtomicUsize, Ordering};
132use std::sync::mpsc::channel;
133use std::sync::Arc;
134use std::thread;
135use std::time::Duration;
136137#[cfg(feature = "serde")]
138use bincode::{deserialize, serialize};
139140#[derive(Eq, PartialEq, Debug)]
141struct NonCopy(i32);
142143#[test]
144fn smoke() {
145let l = RwLock::new(());
146 drop(l.read());
147 drop(l.write());
148 drop(l.upgradable_read());
149 drop((l.read(), l.read()));
150 drop((l.read(), l.upgradable_read()));
151 drop(l.write());
152 }
153154#[test]
155fn frob() {
156const N: u32 = 10;
157const M: u32 = 1000;
158159let r = Arc::new(RwLock::new(()));
160161let (tx, rx) = channel::<()>();
162for _ in 0..N {
163let tx = tx.clone();
164let r = r.clone();
165 thread::spawn(move || {
166let mut rng = rand::thread_rng();
167for _ in 0..M {
168if rng.gen_bool(1.0 / N as f64) {
169 drop(r.write());
170 } else {
171 drop(r.read());
172 }
173 }
174 drop(tx);
175 });
176 }
177 drop(tx);
178let _ = rx.recv();
179 }
180181#[test]
182fn test_rw_arc_no_poison_wr() {
183let arc = Arc::new(RwLock::new(1));
184let arc2 = arc.clone();
185let _: Result<(), _> = thread::spawn(move || {
186let _lock = arc2.write();
187panic!();
188 })
189 .join();
190let lock = arc.read();
191assert_eq!(*lock, 1);
192 }
193194#[test]
195fn test_rw_arc_no_poison_ww() {
196let arc = Arc::new(RwLock::new(1));
197let arc2 = arc.clone();
198let _: Result<(), _> = thread::spawn(move || {
199let _lock = arc2.write();
200panic!();
201 })
202 .join();
203let lock = arc.write();
204assert_eq!(*lock, 1);
205 }
206207#[test]
208fn test_rw_arc_no_poison_rr() {
209let arc = Arc::new(RwLock::new(1));
210let arc2 = arc.clone();
211let _: Result<(), _> = thread::spawn(move || {
212let _lock = arc2.read();
213panic!();
214 })
215 .join();
216let lock = arc.read();
217assert_eq!(*lock, 1);
218 }
219220#[test]
221fn test_rw_arc_no_poison_rw() {
222let arc = Arc::new(RwLock::new(1));
223let arc2 = arc.clone();
224let _: Result<(), _> = thread::spawn(move || {
225let _lock = arc2.read();
226panic!()
227 })
228 .join();
229let lock = arc.write();
230assert_eq!(*lock, 1);
231 }
232233#[test]
234fn test_ruw_arc() {
235let arc = Arc::new(RwLock::new(0));
236let arc2 = arc.clone();
237let (tx, rx) = channel();
238239 thread::spawn(move || {
240for _ in 0..10 {
241let mut lock = arc2.write();
242let tmp = *lock;
243*lock = -1;
244 thread::yield_now();
245*lock = tmp + 1;
246 }
247 tx.send(()).unwrap();
248 });
249250let mut children = Vec::new();
251252// Upgradable readers try to catch the writer in the act and also
253 // try to touch the value
254for _ in 0..5 {
255let arc3 = arc.clone();
256 children.push(thread::spawn(move || {
257let lock = arc3.upgradable_read();
258let tmp = *lock;
259assert!(tmp >= 0);
260 thread::yield_now();
261let mut lock = RwLockUpgradableReadGuard::upgrade(lock);
262assert_eq!(tmp, *lock);
263*lock = -1;
264 thread::yield_now();
265*lock = tmp + 1;
266 }));
267 }
268269// Readers try to catch the writers in the act
270for _ in 0..5 {
271let arc4 = arc.clone();
272 children.push(thread::spawn(move || {
273let lock = arc4.read();
274assert!(*lock >= 0);
275 }));
276 }
277278// Wait for children to pass their asserts
279for r in children {
280assert!(r.join().is_ok());
281 }
282283// Wait for writer to finish
284rx.recv().unwrap();
285let lock = arc.read();
286assert_eq!(*lock, 15);
287 }
288289#[test]
290fn test_rw_arc() {
291let arc = Arc::new(RwLock::new(0));
292let arc2 = arc.clone();
293let (tx, rx) = channel();
294295 thread::spawn(move || {
296let mut lock = arc2.write();
297for _ in 0..10 {
298let tmp = *lock;
299*lock = -1;
300 thread::yield_now();
301*lock = tmp + 1;
302 }
303 tx.send(()).unwrap();
304 });
305306// Readers try to catch the writer in the act
307let mut children = Vec::new();
308for _ in 0..5 {
309let arc3 = arc.clone();
310 children.push(thread::spawn(move || {
311let lock = arc3.read();
312assert!(*lock >= 0);
313 }));
314 }
315316// Wait for children to pass their asserts
317for r in children {
318assert!(r.join().is_ok());
319 }
320321// Wait for writer to finish
322rx.recv().unwrap();
323let lock = arc.read();
324assert_eq!(*lock, 10);
325 }
326327#[test]
328fn test_rw_arc_access_in_unwind() {
329let arc = Arc::new(RwLock::new(1));
330let arc2 = arc.clone();
331let _ = thread::spawn(move || {
332struct Unwinder {
333 i: Arc<RwLock<isize>>,
334 }
335impl Drop for Unwinder {
336fn drop(&mut self) {
337let mut lock = self.i.write();
338*lock += 1;
339 }
340 }
341let _u = Unwinder { i: arc2 };
342panic!();
343 })
344 .join();
345let lock = arc.read();
346assert_eq!(*lock, 2);
347 }
348349#[test]
350fn test_rwlock_unsized() {
351let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
352 {
353let b = &mut *rw.write();
354 b[0] = 4;
355 b[2] = 5;
356 }
357let comp: &[i32] = &[4, 2, 5];
358assert_eq!(&*rw.read(), comp);
359 }
360361#[test]
362fn test_rwlock_try_read() {
363let lock = RwLock::new(0isize);
364 {
365let read_guard = lock.read();
366367let read_result = lock.try_read();
368assert!(
369 read_result.is_some(),
370"try_read should succeed while read_guard is in scope"
371);
372373 drop(read_guard);
374 }
375 {
376let upgrade_guard = lock.upgradable_read();
377378let read_result = lock.try_read();
379assert!(
380 read_result.is_some(),
381"try_read should succeed while upgrade_guard is in scope"
382);
383384 drop(upgrade_guard);
385 }
386 {
387let write_guard = lock.write();
388389let read_result = lock.try_read();
390assert!(
391 read_result.is_none(),
392"try_read should fail while write_guard is in scope"
393);
394395 drop(write_guard);
396 }
397 }
398399#[test]
400fn test_rwlock_try_write() {
401let lock = RwLock::new(0isize);
402 {
403let read_guard = lock.read();
404405let write_result = lock.try_write();
406assert!(
407 write_result.is_none(),
408"try_write should fail while read_guard is in scope"
409);
410assert!(lock.is_locked());
411assert!(!lock.is_locked_exclusive());
412413 drop(read_guard);
414 }
415 {
416let upgrade_guard = lock.upgradable_read();
417418let write_result = lock.try_write();
419assert!(
420 write_result.is_none(),
421"try_write should fail while upgrade_guard is in scope"
422);
423assert!(lock.is_locked());
424assert!(!lock.is_locked_exclusive());
425426 drop(upgrade_guard);
427 }
428 {
429let write_guard = lock.write();
430431let write_result = lock.try_write();
432assert!(
433 write_result.is_none(),
434"try_write should fail while write_guard is in scope"
435);
436assert!(lock.is_locked());
437assert!(lock.is_locked_exclusive());
438439 drop(write_guard);
440 }
441 }
442443#[test]
444fn test_rwlock_try_upgrade() {
445let lock = RwLock::new(0isize);
446 {
447let read_guard = lock.read();
448449let upgrade_result = lock.try_upgradable_read();
450assert!(
451 upgrade_result.is_some(),
452"try_upgradable_read should succeed while read_guard is in scope"
453);
454455 drop(read_guard);
456 }
457 {
458let upgrade_guard = lock.upgradable_read();
459460let upgrade_result = lock.try_upgradable_read();
461assert!(
462 upgrade_result.is_none(),
463"try_upgradable_read should fail while upgrade_guard is in scope"
464);
465466 drop(upgrade_guard);
467 }
468 {
469let write_guard = lock.write();
470471let upgrade_result = lock.try_upgradable_read();
472assert!(
473 upgrade_result.is_none(),
474"try_upgradable should fail while write_guard is in scope"
475);
476477 drop(write_guard);
478 }
479 }
480481#[test]
482fn test_into_inner() {
483let m = RwLock::new(NonCopy(10));
484assert_eq!(m.into_inner(), NonCopy(10));
485 }
486487#[test]
488fn test_into_inner_drop() {
489struct Foo(Arc<AtomicUsize>);
490impl Drop for Foo {
491fn drop(&mut self) {
492self.0.fetch_add(1, Ordering::SeqCst);
493 }
494 }
495let num_drops = Arc::new(AtomicUsize::new(0));
496let m = RwLock::new(Foo(num_drops.clone()));
497assert_eq!(num_drops.load(Ordering::SeqCst), 0);
498 {
499let _inner = m.into_inner();
500assert_eq!(num_drops.load(Ordering::SeqCst), 0);
501 }
502assert_eq!(num_drops.load(Ordering::SeqCst), 1);
503 }
504505#[test]
506fn test_get_mut() {
507let mut m = RwLock::new(NonCopy(10));
508*m.get_mut() = NonCopy(20);
509assert_eq!(m.into_inner(), NonCopy(20));
510 }
511512#[test]
513fn test_rwlockguard_sync() {
514fn sync<T: Sync>(_: T) {}
515516let rwlock = RwLock::new(());
517 sync(rwlock.read());
518 sync(rwlock.write());
519 }
520521#[test]
522fn test_rwlock_downgrade() {
523let x = Arc::new(RwLock::new(0));
524let mut handles = Vec::new();
525for _ in 0..8 {
526let x = x.clone();
527 handles.push(thread::spawn(move || {
528for _ in 0..100 {
529let mut writer = x.write();
530*writer += 1;
531let cur_val = *writer;
532let reader = RwLockWriteGuard::downgrade(writer);
533assert_eq!(cur_val, *reader);
534 }
535 }));
536 }
537for handle in handles {
538 handle.join().unwrap()
539 }
540assert_eq!(*x.read(), 800);
541 }
542543#[test]
544fn test_rwlock_recursive() {
545let arc = Arc::new(RwLock::new(1));
546let arc2 = arc.clone();
547let lock1 = arc.read();
548let t = thread::spawn(move || {
549let _lock = arc2.write();
550 });
551552if cfg!(not(all(target_env = "sgx", target_vendor = "fortanix"))) {
553 thread::sleep(Duration::from_millis(100));
554 } else {
555// FIXME: https://github.com/fortanix/rust-sgx/issues/31
556for _ in 0..100 {
557 thread::yield_now();
558 }
559 }
560561// A normal read would block here since there is a pending writer
562let lock2 = arc.read_recursive();
563564// Unblock the thread and join it.
565drop(lock1);
566 drop(lock2);
567 t.join().unwrap();
568 }
569570#[test]
571fn test_rwlock_debug() {
572let x = RwLock::new(vec![0u8, 10]);
573574assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }");
575let _lock = x.write();
576assert_eq!(format!("{:?}", x), "RwLock { data: <locked> }");
577 }
578579#[test]
580fn test_clone() {
581let rwlock = RwLock::new(Arc::new(1));
582let a = rwlock.read_recursive();
583let b = a.clone();
584assert_eq!(Arc::strong_count(&b), 2);
585 }
586587#[cfg(feature = "serde")]
588 #[test]
589fn test_serde() {
590let contents: Vec<u8> = vec![0, 1, 2];
591let mutex = RwLock::new(contents.clone());
592593let serialized = serialize(&mutex).unwrap();
594let deserialized: RwLock<Vec<u8>> = deserialize(&serialized).unwrap();
595596assert_eq!(*(mutex.read()), *(deserialized.read()));
597assert_eq!(contents, *(deserialized.read()));
598 }
599600#[test]
601fn test_issue_203() {
602struct Bar(RwLock<()>);
603604impl Drop for Bar {
605fn drop(&mut self) {
606let _n = self.0.write();
607 }
608 }
609610thread_local! {
611static B: Bar = Bar(RwLock::new(()));
612 }
613614 thread::spawn(|| {
615 B.with(|_| ());
616617let a = RwLock::new(());
618let _a = a.read();
619 })
620 .join()
621 .unwrap();
622 }
623624#[test]
625fn test_rw_write_is_locked() {
626let lock = RwLock::new(0isize);
627 {
628let _read_guard = lock.read();
629630assert!(lock.is_locked());
631assert!(!lock.is_locked_exclusive());
632 }
633634 {
635let _write_guard = lock.write();
636637assert!(lock.is_locked());
638assert!(lock.is_locked_exclusive());
639 }
640 }
641642#[test]
643 #[cfg(feature = "arc_lock")]
644fn test_issue_430() {
645let lock = std::sync::Arc::new(RwLock::new(0));
646647let mut rl = lock.upgradable_read_arc();
648649 rl.with_upgraded(|_| {
650println!("lock upgrade");
651 });
652653 rl.with_upgraded(|_| {
654println!("lock upgrade");
655 });
656657 drop(lock);
658 }
659}