mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-17 02:38:22 +07:00
5b735eb1ce
The kernel documents smp_mb__after_unlock_lock() the following way: "Place this after a lock-acquisition primitive to guarantee that an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies if the UNLOCK and LOCK are executed by the same CPU or if the UNLOCK and LOCK operate on the same lock variable." Formalize in LKMM the above guarantee by defining (new) mb-links according to the law: ([M] ; po ; [UL] ; (co | po) ; [LKW] ; fencerel(After-unlock-lock) ; [M]) where the component ([UL] ; co ; [LKW]) identifies "UNLOCK+LOCK pairs on the same lock variable" and the component ([UL] ; po ; [LKW]) identifies "UNLOCK+LOCK pairs executed by the same CPU". In particular, the LKMM forbids the following two behaviors (the second litmus test below is based on: Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html c.f., Section "Tree RCU Grace Period Memory Ordering Building Blocks"): C after-unlock-lock-same-cpu (* * Result: Never *) {} P0(spinlock_t *s, spinlock_t *t, int *x, int *y) { int r0; spin_lock(s); WRITE_ONCE(*x, 1); spin_unlock(s); spin_lock(t); smp_mb__after_unlock_lock(); r0 = READ_ONCE(*y); spin_unlock(t); } P1(int *x, int *y) { int r0; WRITE_ONCE(*y, 1); smp_mb(); r0 = READ_ONCE(*x); } exists (0:r0=0 /\ 1:r0=0) C after-unlock-lock-same-lock-variable (* * Result: Never *) {} P0(spinlock_t *s, int *x, int *y) { int r0; spin_lock(s); WRITE_ONCE(*x, 1); r0 = READ_ONCE(*y); spin_unlock(s); } P1(spinlock_t *s, int *y, int *z) { int r0; spin_lock(s); smp_mb__after_unlock_lock(); WRITE_ONCE(*y, 1); r0 = READ_ONCE(*z); spin_unlock(s); } P2(int *z, int *x) { int r0; WRITE_ONCE(*z, 1); smp_mb(); r0 = READ_ONCE(*x); } exists (0:r0=0 /\ 1:r0=0 /\ 2:r0=0) Signed-off-by: Andrea Parri <andrea.parri@amarulasolutions.com> Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com> Cc: Akira Yokosawa <akiyks@gmail.com> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Daniel Lustig <dlustig@nvidia.com> Cc: David Howells <dhowells@redhat.com> Cc: Jade Alglave <j.alglave@ucl.ac.uk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luc Maranget <luc.maranget@inria.fr> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-arch@vger.kernel.org Cc: parri.andrea@gmail.com Link: http://lkml.kernel.org/r/20181203230451.28921-1-paulmck@linux.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
54 lines
1.9 KiB
Plaintext
54 lines
1.9 KiB
Plaintext
// SPDX-License-Identifier: GPL-2.0+
|
|
(*
|
|
* Copyright (C) 2015 Jade Alglave <j.alglave@ucl.ac.uk>,
|
|
* Copyright (C) 2016 Luc Maranget <luc.maranget@inria.fr> for Inria
|
|
* Copyright (C) 2017 Alan Stern <stern@rowland.harvard.edu>,
|
|
* Andrea Parri <parri.andrea@gmail.com>
|
|
*
|
|
* An earlier version of this file appeared in the companion webpage for
|
|
* "Frightening small children and disconcerting grown-ups: Concurrency
|
|
* in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern,
|
|
* which appeared in ASPLOS 2018.
|
|
*)
|
|
|
|
"Linux-kernel memory consistency model"
|
|
|
|
enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) ||
|
|
'release (*smp_store_release*) ||
|
|
'acquire (*smp_load_acquire*) ||
|
|
'noreturn (* R of non-return RMW *)
|
|
instructions R[{'once,'acquire,'noreturn}]
|
|
instructions W[{'once,'release}]
|
|
instructions RMW[{'once,'acquire,'release}]
|
|
|
|
enum Barriers = 'wmb (*smp_wmb*) ||
|
|
'rmb (*smp_rmb*) ||
|
|
'mb (*smp_mb*) ||
|
|
'rcu-lock (*rcu_read_lock*) ||
|
|
'rcu-unlock (*rcu_read_unlock*) ||
|
|
'sync-rcu (*synchronize_rcu*) ||
|
|
'before-atomic (*smp_mb__before_atomic*) ||
|
|
'after-atomic (*smp_mb__after_atomic*) ||
|
|
'after-spinlock (*smp_mb__after_spinlock*) ||
|
|
'after-unlock-lock (*smp_mb__after_unlock_lock*)
|
|
instructions F[Barriers]
|
|
|
|
(* Compute matching pairs of nested Rcu-lock and Rcu-unlock *)
|
|
let matched = let rec
|
|
unmatched-locks = Rcu-lock \ domain(matched)
|
|
and unmatched-unlocks = Rcu-unlock \ range(matched)
|
|
and unmatched = unmatched-locks | unmatched-unlocks
|
|
and unmatched-po = [unmatched] ; po ; [unmatched]
|
|
and unmatched-locks-to-unlocks =
|
|
[unmatched-locks] ; po ; [unmatched-unlocks]
|
|
and matched = matched | (unmatched-locks-to-unlocks \
|
|
(unmatched-po ; unmatched-po))
|
|
in matched
|
|
|
|
(* Validate nesting *)
|
|
flag ~empty Rcu-lock \ domain(matched) as unbalanced-rcu-locking
|
|
flag ~empty Rcu-unlock \ range(matched) as unbalanced-rcu-locking
|
|
|
|
(* Outermost level of nesting only *)
|
|
let crit = matched \ (po^-1 ; matched ; po^-1)
|