aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/srcu.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/srcu.c')
-rw-r--r--kernel/srcu.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 17e95bcc901c..43f1d61e513e 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -138,14 +138,14 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
138 138
139 /* 139 /*
140 * Now, we check the ->snap array that srcu_readers_active_idx() 140 * Now, we check the ->snap array that srcu_readers_active_idx()
141 * filled in from the per-CPU counter values. Since both 141 * filled in from the per-CPU counter values. Since
142 * __srcu_read_lock() and __srcu_read_unlock() increment the 142 * __srcu_read_lock() increments the upper bits of the per-CPU
143 * upper bits of the per-CPU counter, an increment/decrement 143 * counter, an increment/decrement pair will change the value
144 * pair will change the value of the counter. Since there is 144 * of the counter. Since there is only one possible increment,
145 * only one possible increment, the only way to wrap the counter 145 * the only way to wrap the counter is to have a huge number of
146 * is to have a huge number of counter decrements, which requires 146 * counter decrements, which requires a huge number of tasks and
147 * a huge number of tasks and huge SRCU read-side critical-section 147 * huge SRCU read-side critical-section nesting levels, even on
148 * nesting levels, even on 32-bit systems. 148 * 32-bit systems.
149 * 149 *
150 * All of the ways of confusing the readings require that the scan 150 * All of the ways of confusing the readings require that the scan
151 * in srcu_readers_active_idx() see the read-side task's decrement, 151 * in srcu_readers_active_idx() see the read-side task's decrement,
@@ -234,8 +234,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
234{ 234{
235 preempt_disable(); 235 preempt_disable();
236 smp_mb(); /* C */ /* Avoid leaking the critical section. */ 236 smp_mb(); /* C */ /* Avoid leaking the critical section. */
237 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 237 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
238 SRCU_USAGE_COUNT - 1;
239 preempt_enable(); 238 preempt_enable();
240} 239}
241EXPORT_SYMBOL_GPL(__srcu_read_unlock); 240EXPORT_SYMBOL_GPL(__srcu_read_unlock);