aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/srcu.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2012-02-22 16:29:06 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-04-30 13:48:20 -0400
commit440253c17fc4ed41d778492a7fb44dc0d756eccc (patch)
tree333cb87d73c154cdc82d2b08356a7337abec72e2 /kernel/srcu.c
parent4b7a3e9e32114a09c61995048f055615b5d4c26d (diff)
rcu: Increment upper bit only for srcu_read_lock()
The purpose of the upper bit of SRCU's per-CPU counters is to guarantee that no reasonable series of srcu_read_lock() and srcu_read_unlock() operations can return the value of the counter to its original value. This guarantee is require only after the index has been switched to the other set of counters, so at most one srcu_read_lock() can affect a given CPU's counter. The number of srcu_read_unlock() operations on a given counter is limited to the number of tasks in the system, which given the Linux kernel's current structure is limited to far less than 2^30 on 32-bit systems and far less than 2^62 on 64-bit systems. (Something about a limited number of bytes in the kernel's address space.) Therefore, if srcu_read_lock() increments the upper bits, then srcu_read_unlock() need not do so. In this case, an srcu_read_lock() and an srcu_read_unlock() will flip the lower bit of the upper field of the counter. An unreasonably large additional number of srcu_read_unlock() operations would be required to return the counter to its initial value, thus preserving the guarantee. This commit takes this approach, which further allows it to shrink the size of the upper field to one bit, making the number of srcu_read_unlock() operations required to return the counter to its initial value even more unreasonable than before. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/srcu.c')
-rw-r--r--kernel/srcu.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 17e95bcc901c..43f1d61e513e 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -138,14 +138,14 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
138 138
139 /* 139 /*
140 * Now, we check the ->snap array that srcu_readers_active_idx() 140 * Now, we check the ->snap array that srcu_readers_active_idx()
141 * filled in from the per-CPU counter values. Since both 141 * filled in from the per-CPU counter values. Since
142 * __srcu_read_lock() and __srcu_read_unlock() increment the 142 * __srcu_read_lock() increments the upper bits of the per-CPU
143 * upper bits of the per-CPU counter, an increment/decrement 143 * counter, an increment/decrement pair will change the value
144 * pair will change the value of the counter. Since there is 144 * of the counter. Since there is only one possible increment,
145 * only one possible increment, the only way to wrap the counter 145 * the only way to wrap the counter is to have a huge number of
146 * is to have a huge number of counter decrements, which requires 146 * counter decrements, which requires a huge number of tasks and
147 * a huge number of tasks and huge SRCU read-side critical-section 147 * huge SRCU read-side critical-section nesting levels, even on
148 * nesting levels, even on 32-bit systems. 148 * 32-bit systems.
149 * 149 *
150 * All of the ways of confusing the readings require that the scan 150 * All of the ways of confusing the readings require that the scan
151 * in srcu_readers_active_idx() see the read-side task's decrement, 151 * in srcu_readers_active_idx() see the read-side task's decrement,
@@ -234,8 +234,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
234{ 234{
235 preempt_disable(); 235 preempt_disable();
236 smp_mb(); /* C */ /* Avoid leaking the critical section. */ 236 smp_mb(); /* C */ /* Avoid leaking the critical section. */
237 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 237 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
238 SRCU_USAGE_COUNT - 1;
239 preempt_enable(); 238 preempt_enable();
240} 239}
241EXPORT_SYMBOL_GPL(__srcu_read_unlock); 240EXPORT_SYMBOL_GPL(__srcu_read_unlock);