summaryrefslogtreecommitdiffstats
path: root/tools/memory-model
diff options
context:
space:
mode:
authorLuc Maranget <Luc.Maranget@inria.fr>2018-05-14 19:33:48 -0400
committerIngo Molnar <mingo@kernel.org>2018-05-15 02:11:17 -0400
commit15553dcbca0638de57047e79b9fb4ea77aa04db3 (patch)
treec8ea4191091b107a12b2e5bb1f35bb04a112fe04 /tools/memory-model
parent2fb6ae162f25a9c3bc45663c479a2b15fb69e768 (diff)
tools/memory-model: Add model support for spin_is_locked()
This commit first adds a trivial macro for spin_is_locked() to linux-kernel.def. It also adds cat code for enumerating all possible matches of lock write events (set LKW) with islocked events returning true (set RL, for Read from Lock), and unlock write events (set UL) with islocked events returning false (set RU, for Read from Unlock). Note that this intentionally does not model uniprocessor kernels (CONFIG_SMP=n) built with CONFIG_DEBUG_SPINLOCK=n, in which spin_is_locked() unconditionally returns zero. It also adds a pair of litmus tests demonstrating the minimal ordering provided by spin_is_locked() in conjunction with spin_lock(). Will Deacon noted that this minimal ordering happens on ARMv8: https://lkml.kernel.org/r/20180226162426.GB17158@arm.com Notice that herd7 installations strictly older than version 7.49 do not handle the new constructs. Signed-off-by: Luc Maranget <luc.maranget@inria.fr> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Alan Stern <stern@rowland.harvard.edu> Cc: Akira Yokosawa <akiyks@gmail.com> Cc: Andrea Parri <parri.andrea@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: David Howells <dhowells@redhat.com> Cc: Jade Alglave <j.alglave@ucl.ac.uk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luc Maranget <Luc.Maranget@inria.fr> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-arch@vger.kernel.org Link: http://lkml.kernel.org/r/1526340837-12222-10-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools/memory-model')
-rw-r--r--tools/memory-model/linux-kernel.def1
-rw-r--r--tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus35
-rw-r--r--tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus34
-rw-r--r--tools/memory-model/litmus-tests/README10
-rw-r--r--tools/memory-model/lock.cat53
5 files changed, 129 insertions, 4 deletions
diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
index 6bd3bc431b3d..f0553bd37c08 100644
--- a/tools/memory-model/linux-kernel.def
+++ b/tools/memory-model/linux-kernel.def
@@ -38,6 +38,7 @@ cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
38spin_lock(X) { __lock(X); } 38spin_lock(X) { __lock(X); }
39spin_unlock(X) { __unlock(X); } 39spin_unlock(X) { __unlock(X); }
40spin_trylock(X) __trylock(X) 40spin_trylock(X) __trylock(X)
41spin_is_locked(X) __islocked(X)
41 42
42// RCU 43// RCU
43rcu_read_lock() { __fence{rcu-lock}; } 44rcu_read_lock() { __fence{rcu-lock}; }
diff --git a/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus b/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus
new file mode 100644
index 000000000000..50f4d62bbf0e
--- /dev/null
+++ b/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus
@@ -0,0 +1,35 @@
1C MP+polockmbonce+poacquiresilsil
2
3(*
4 * Result: Never
5 *
6 * Do spinlocks combined with smp_mb__after_spinlock() provide order
7 * to outside observers using spin_is_locked() to sense the lock-held
8 * state, ordered by acquire? Note that when the first spin_is_locked()
9 * returns false and the second true, we know that the smp_load_acquire()
10 * executed before the lock was acquired (loosely speaking).
11 *)
12
13{
14}
15
16P0(spinlock_t *lo, int *x)
17{
18 spin_lock(lo);
19 smp_mb__after_spinlock();
20 WRITE_ONCE(*x, 1);
21 spin_unlock(lo);
22}
23
24P1(spinlock_t *lo, int *x)
25{
26 int r1;
27 int r2;
28 int r3;
29
30 r1 = smp_load_acquire(x);
31 r2 = spin_is_locked(lo);
32 r3 = spin_is_locked(lo);
33}
34
35exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1)
diff --git a/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus b/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus
new file mode 100644
index 000000000000..abf81e7a0895
--- /dev/null
+++ b/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus
@@ -0,0 +1,34 @@
1C MP+polockonce+poacquiresilsil
2
3(*
4 * Result: Sometimes
5 *
6 * Do spinlocks provide order to outside observers using spin_is_locked()
7 * to sense the lock-held state, ordered by acquire? Note that when the
8 * first spin_is_locked() returns false and the second true, we know that
9 * the smp_load_acquire() executed before the lock was acquired (loosely
10 * speaking).
11 *)
12
13{
14}
15
16P0(spinlock_t *lo, int *x)
17{
18 spin_lock(lo);
19 WRITE_ONCE(*x, 1);
20 spin_unlock(lo);
21}
22
23P1(spinlock_t *lo, int *x)
24{
25 int r1;
26 int r2;
27 int r3;
28
29 r1 = smp_load_acquire(x);
30 r2 = spin_is_locked(lo);
31 r3 = spin_is_locked(lo);
32}
33
34exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1)
diff --git a/tools/memory-model/litmus-tests/README b/tools/memory-model/litmus-tests/README
index 04096fb8b8d9..6919909bbd0f 100644
--- a/tools/memory-model/litmus-tests/README
+++ b/tools/memory-model/litmus-tests/README
@@ -63,6 +63,16 @@ LB+poonceonces.litmus
63MP+onceassign+derefonce.litmus 63MP+onceassign+derefonce.litmus
64 As below, but with rcu_assign_pointer() and an rcu_dereference(). 64 As below, but with rcu_assign_pointer() and an rcu_dereference().
65 65
66MP+polockmbonce+poacquiresilsil.litmus
67 Protect the access with a lock and an smp_mb__after_spinlock()
68 in one process, and use an acquire load followed by a pair of
69 spin_is_locked() calls in the other process.
70
71MP+polockonce+poacquiresilsil.litmus
72 Protect the access with a lock in one process, and use an
73 acquire load followed by a pair of spin_is_locked() calls
74 in the other process.
75
66MP+polocks.litmus 76MP+polocks.litmus
67 As below, but with the second access of the writer process 77 As below, but with the second access of the writer process
68 and the first access of reader process protected by a lock. 78 and the first access of reader process protected by a lock.
diff --git a/tools/memory-model/lock.cat b/tools/memory-model/lock.cat
index ba4a4ec6d313..3b1439edc818 100644
--- a/tools/memory-model/lock.cat
+++ b/tools/memory-model/lock.cat
@@ -5,7 +5,11 @@
5 *) 5 *)
6 6
7(* Generate coherence orders and handle lock operations *) 7(* Generate coherence orders and handle lock operations *)
8 8(*
9 * Warning, crashes with herd7 versions strictly before 7.48.
10 * spin_islocked is functional from version 7.49.
11 *
12 *)
9include "cross.cat" 13include "cross.cat"
10 14
11(* From lock reads to their partner lock writes *) 15(* From lock reads to their partner lock writes *)
@@ -32,12 +36,16 @@ flag ~empty [M \ IW] ; loc ; [ALL-LOCKS] as mixed-lock-accesses
32(* The final value of a spinlock should not be tested *) 36(* The final value of a spinlock should not be tested *)
33flag ~empty [FW] ; loc ; [ALL-LOCKS] as lock-final 37flag ~empty [FW] ; loc ; [ALL-LOCKS] as lock-final
34 38
35 39(*
40 * Backward compatibility
41 *)
42let RL = try RL with emptyset (* defined herd7 >= 7.49 *)
43let RU = try RU with emptyset (* defined herd7 >= 7.49 *)
36(* 44(*
37 * Put lock operations in their appropriate classes, but leave UL out of W 45 * Put lock operations in their appropriate classes, but leave UL out of W
38 * until after the co relation has been generated. 46 * until after the co relation has been generated.
39 *) 47 *)
40let R = R | LKR | LF 48let R = R | LKR | LF | RL | RU
41let W = W | LKW 49let W = W | LKW
42 50
43let Release = Release | UL 51let Release = Release | UL
@@ -72,8 +80,45 @@ let all-possible-rfe-lf =
72 80
73(* Generate all rf relations for LF events *) 81(* Generate all rf relations for LF events *)
74with rfe-lf from cross(all-possible-rfe-lf) 82with rfe-lf from cross(all-possible-rfe-lf)
75let rf = rf | rfi-lf | rfe-lf
76 83
84let rf-lf = rfe-lf | rfi-lf
85
86(* rf for RL events, ie islocked returning true, similar to LF above *)
87
88(* islocked returning true inside a critical section
89 * must read from the opening lock
90 *)
91let rfi-rl = ([LKW] ; po-loc ; [RL]) \ ([LKW] ; po-loc ; [UL] ; po-loc)
92
93(* islocked returning true outside critical sections can match any
94 * external lock.
95 *)
96let all-possible-rfe-rl =
97 let possible-rfe-lf r =
98 let pair-to-relation p = p ++ 0
99 in map pair-to-relation ((LKW * {r}) & loc & ext)
100 in map possible-rfe-lf (RL \ range(rfi-rl))
101
102with rfe-rl from cross(all-possible-rfe-rl)
103let rf-rl = rfe-rl | rfi-rl
104
105(* Read from unlock, ie islocked returning false, slightly different *)
106
107(* islocked returning false can read from the last po-previous unlock *)
108let rfi-ru = ([UL] ; po-loc ; [RU]) \ ([UL] ; po-loc ; [LKW] ; po-loc)
109
110(* any islocked returning false can read from any external unlock *)
111let all-possible-rfe-ru =
112 let possible-rfe-ru r =
113 let pair-to-relation p = p ++ 0
114 in map pair-to-relation (((UL|IW) * {r}) & loc & ext)
115 in map possible-rfe-ru RU
116
117with rfe-ru from cross(all-possible-rfe-ru)
118let rf-ru = rfe-ru | rfi-ru
119
120(* Final rf relation *)
121let rf = rf | rf-lf | rf-rl | rf-ru
77 122
78(* Generate all co relations, including LKW events but not UL *) 123(* Generate all co relations, including LKW events but not UL *)
79let co0 = co0 | ([IW] ; loc ; [LKW]) | 124let co0 = co0 | ([IW] ; loc ; [LKW]) |