diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-10-23 16:47:01 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-11-13 17:08:23 -0500 |
commit | f0a0e6f282c72247e7c8ec17c68d528c1bb4d49e (patch) | |
tree | 22b66fc8ac9b95586866ddb447dcc8712d441c14 /include/linux/rcupdate.h | |
parent | 67afeed2cab0e59712b4ebf1aef9a2e555a188ce (diff) |
rcu: Clarify memory-ordering properties of grace-period primitives
This commit explicitly states the memory-ordering properties of the
RCU grace-period primitives. Although these properties were in some
sense implied by the fundmental property of RCU ("a grace period must
wait for all pre-existing RCU read-side critical sections to complete"),
stating it explicitly will be a great labor-saving device.
Reported-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 7c968e4f929e..6256759fb81e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -90,6 +90,25 @@ extern void do_trace_rcu_torture_read(char *rcutorturename, | |||
90 | * that started after call_rcu() was invoked. RCU read-side critical | 90 | * that started after call_rcu() was invoked. RCU read-side critical |
91 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 91 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
92 | * and may be nested. | 92 | * and may be nested. |
93 | * | ||
94 | * Note that all CPUs must agree that the grace period extended beyond | ||
95 | * all pre-existing RCU read-side critical section. On systems with more | ||
96 | * than one CPU, this means that when "func()" is invoked, each CPU is | ||
97 | * guaranteed to have executed a full memory barrier since the end of its | ||
98 | * last RCU read-side critical section whose beginning preceded the call | ||
99 | * to call_rcu(). It also means that each CPU executing an RCU read-side | ||
100 | * critical section that continues beyond the start of "func()" must have | ||
101 | * executed a memory barrier after the call_rcu() but before the beginning | ||
102 | * of that RCU read-side critical section. Note that these guarantees | ||
103 | * include CPUs that are offline, idle, or executing in user mode, as | ||
104 | * well as CPUs that are executing in the kernel. | ||
105 | * | ||
106 | * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the | ||
107 | * resulting RCU callback function "func()", then both CPU A and CPU B are | ||
108 | * guaranteed to execute a full memory barrier during the time interval | ||
109 | * between the call to call_rcu() and the invocation of "func()" -- even | ||
110 | * if CPU A and CPU B are the same CPU (but again only if the system has | ||
111 | * more than one CPU). | ||
93 | */ | 112 | */ |
94 | extern void call_rcu(struct rcu_head *head, | 113 | extern void call_rcu(struct rcu_head *head, |
95 | void (*func)(struct rcu_head *head)); | 114 | void (*func)(struct rcu_head *head)); |
@@ -118,6 +137,9 @@ extern void call_rcu(struct rcu_head *head, | |||
118 | * OR | 137 | * OR |
119 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. | 138 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. |
120 | * These may be nested. | 139 | * These may be nested. |
140 | * | ||
141 | * See the description of call_rcu() for more detailed information on | ||
142 | * memory ordering guarantees. | ||
121 | */ | 143 | */ |
122 | extern void call_rcu_bh(struct rcu_head *head, | 144 | extern void call_rcu_bh(struct rcu_head *head, |
123 | void (*func)(struct rcu_head *head)); | 145 | void (*func)(struct rcu_head *head)); |
@@ -137,6 +159,9 @@ extern void call_rcu_bh(struct rcu_head *head, | |||
137 | * OR | 159 | * OR |
138 | * anything that disables preemption. | 160 | * anything that disables preemption. |
139 | * These may be nested. | 161 | * These may be nested. |
162 | * | ||
163 | * See the description of call_rcu() for more detailed information on | ||
164 | * memory ordering guarantees. | ||
140 | */ | 165 | */ |
141 | extern void call_rcu_sched(struct rcu_head *head, | 166 | extern void call_rcu_sched(struct rcu_head *head, |
142 | void (*func)(struct rcu_head *rcu)); | 167 | void (*func)(struct rcu_head *rcu)); |