aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/srcu.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2012-03-19 04:12:12 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-04-30 13:48:24 -0400
commitd9792edd7a9a0858a3b1df92cf8beb31e4191e3c (patch)
tree149aa8e7fe288094129c94d02db2e2b64c6ff53c /kernel/srcu.c
parentdc87917501e324701dbfb249def44054b5220187 (diff)
rcu: Use single value to handle expedited SRCU grace periods
The earlier algorithm used an "expedited" flag combined with a "trycount" counter to differentiate between normal and expedited SRCU grace periods. However, the difference can be encoded into a single counter with a cutoff value and different initial values for expedited and normal SRCU grace periods. This commit makes that change. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Conflicts: kernel/srcu.c
Diffstat (limited to 'kernel/srcu.c')
-rw-r--r--kernel/srcu.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/kernel/srcu.c b/kernel/srcu.c
index a43211c92863..b9088524935a 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -266,16 +266,16 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
266 * we repeatedly block for 1-millisecond time periods. This approach 266 * we repeatedly block for 1-millisecond time periods. This approach
267 * has done well in testing, so there is no need for a config parameter. 267 * has done well in testing, so there is no need for a config parameter.
268 */ 268 */
269#define SYNCHRONIZE_SRCU_READER_DELAY 5 269#define SYNCHRONIZE_SRCU_READER_DELAY 5
270#define SYNCHRONIZE_SRCU_TRYCOUNT 2
271#define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12
270 272
271/* 273/*
272 * Wait until all pre-existing readers complete. Such readers 274 * Wait until all pre-existing readers complete. Such readers
273 * will have used the index specified by "idx". 275 * will have used the index specified by "idx".
274 */ 276 */
275static void wait_idx(struct srcu_struct *sp, int idx, bool expedited) 277static void wait_idx(struct srcu_struct *sp, int idx, int trycount)
276{ 278{
277 int trycount = 0;
278
279 /* 279 /*
280 * SRCU read-side critical sections are normally short, so wait 280 * SRCU read-side critical sections are normally short, so wait
281 * a small amount of time before possibly blocking. 281 * a small amount of time before possibly blocking.
@@ -283,9 +283,10 @@ static void wait_idx(struct srcu_struct *sp, int idx, bool expedited)
283 if (!srcu_readers_active_idx_check(sp, idx)) { 283 if (!srcu_readers_active_idx_check(sp, idx)) {
284 udelay(SYNCHRONIZE_SRCU_READER_DELAY); 284 udelay(SYNCHRONIZE_SRCU_READER_DELAY);
285 while (!srcu_readers_active_idx_check(sp, idx)) { 285 while (!srcu_readers_active_idx_check(sp, idx)) {
286 if (expedited && ++ trycount < 10) 286 if (trycount > 0) {
287 trycount--;
287 udelay(SYNCHRONIZE_SRCU_READER_DELAY); 288 udelay(SYNCHRONIZE_SRCU_READER_DELAY);
288 else 289 } else
289 schedule_timeout_interruptible(1); 290 schedule_timeout_interruptible(1);
290 } 291 }
291 } 292 }
@@ -299,7 +300,7 @@ static void srcu_flip(struct srcu_struct *sp)
299/* 300/*
300 * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 301 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
301 */ 302 */
302static void __synchronize_srcu(struct srcu_struct *sp, bool expedited) 303static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
303{ 304{
304 int busy_idx; 305 int busy_idx;
305 306
@@ -319,8 +320,8 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool expedited)
319 * have completed: 320 * have completed:
320 * 321 *
321 * __synchronize_srcu() { 322 * __synchronize_srcu() {
322 * wait_idx(sp, 0, expedited); 323 * wait_idx(sp, 0, trycount);
323 * wait_idx(sp, 1, expedited); 324 * wait_idx(sp, 1, trycount);
324 * } 325 * }
325 * 326 *
326 * Starvation is prevented by the fact that we flip the index. 327 * Starvation is prevented by the fact that we flip the index.
@@ -344,13 +345,13 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool expedited)
344 * this call to wait_idx(), which waits on really old readers 345 * this call to wait_idx(), which waits on really old readers
345 * describe in this comment above, will almost never need to wait. 346 * describe in this comment above, will almost never need to wait.
346 */ 347 */
347 wait_idx(sp, 1 - busy_idx, expedited); 348 wait_idx(sp, 1 - busy_idx, trycount);
348 349
349 /* Flip the index to avoid reader-induced starvation. */ 350 /* Flip the index to avoid reader-induced starvation. */
350 srcu_flip(sp); 351 srcu_flip(sp);
351 352
352 /* Wait for recent pre-existing readers. */ 353 /* Wait for recent pre-existing readers. */
353 wait_idx(sp, busy_idx, expedited); 354 wait_idx(sp, busy_idx, trycount);
354 355
355 mutex_unlock(&sp->mutex); 356 mutex_unlock(&sp->mutex);
356} 357}
@@ -371,7 +372,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool expedited)
371 */ 372 */
372void synchronize_srcu(struct srcu_struct *sp) 373void synchronize_srcu(struct srcu_struct *sp)
373{ 374{
374 __synchronize_srcu(sp, 0); 375 __synchronize_srcu(sp, SYNCHRONIZE_SRCU_TRYCOUNT);
375} 376}
376EXPORT_SYMBOL_GPL(synchronize_srcu); 377EXPORT_SYMBOL_GPL(synchronize_srcu);
377 378
@@ -392,7 +393,7 @@ EXPORT_SYMBOL_GPL(synchronize_srcu);
392 */ 393 */
393void synchronize_srcu_expedited(struct srcu_struct *sp) 394void synchronize_srcu_expedited(struct srcu_struct *sp)
394{ 395{
395 __synchronize_srcu(sp, 1); 396 __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
396} 397}
397EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); 398EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
398 399