diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-10-26 07:43:36 -0400 |
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-12-09 18:12:38 -0500 |
| commit | bc72d962d6a0ba8d9d5314d04fd1775261a9ec79 (patch) | |
| tree | d06c3317489007f1263abbca3bd5650190be4b91 /kernel | |
| parent | 04f34650ca5e8445aae0ab3e0ff6704f141150a8 (diff) | |
rcu: Improve SRCU's grace-period comments
This commit documents the memory-barrier guarantees provided by
synchronize_srcu() and call_srcu().
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/rcu/srcu.c | 56 |
1 files changed, 49 insertions, 7 deletions
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index 0f0c63111f20..3318d8284384 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c | |||
| @@ -363,6 +363,29 @@ static void srcu_flip(struct srcu_struct *sp) | |||
| 363 | /* | 363 | /* |
| 364 | * Enqueue an SRCU callback on the specified srcu_struct structure, | 364 | * Enqueue an SRCU callback on the specified srcu_struct structure, |
| 365 | * initiating grace-period processing if it is not already running. | 365 | * initiating grace-period processing if it is not already running. |
| 366 | * | ||
| 367 | * Note that all CPUs must agree that the grace period extended beyond | ||
| 368 | * all pre-existing SRCU read-side critical section. On systems with | ||
| 369 | * more than one CPU, this means that when "func()" is invoked, each CPU | ||
| 370 | * is guaranteed to have executed a full memory barrier since the end of | ||
| 371 | * its last corresponding SRCU read-side critical section whose beginning | ||
| 372 | * preceded the call to call_rcu(). It also means that each CPU executing | ||
| 373 | * an SRCU read-side critical section that continues beyond the start of | ||
| 374 | * "func()" must have executed a memory barrier after the call_rcu() | ||
| 375 | * but before the beginning of that SRCU read-side critical section. | ||
| 376 | * Note that these guarantees include CPUs that are offline, idle, or | ||
| 377 | * executing in user mode, as well as CPUs that are executing in the kernel. | ||
| 378 | * | ||
| 379 | * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the | ||
| 380 | * resulting SRCU callback function "func()", then both CPU A and CPU | ||
| 381 | * B are guaranteed to execute a full memory barrier during the time | ||
| 382 | * interval between the call to call_rcu() and the invocation of "func()". | ||
| 383 | * This guarantee applies even if CPU A and CPU B are the same CPU (but | ||
| 384 | * again only if the system has more than one CPU). | ||
| 385 | * | ||
| 386 | * Of course, these guarantees apply only for invocations of call_srcu(), | ||
| 387 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same | ||
| 388 | * srcu_struct structure. | ||
| 366 | */ | 389 | */ |
| 367 | void call_srcu(struct srcu_struct *sp, struct rcu_head *head, | 390 | void call_srcu(struct srcu_struct *sp, struct rcu_head *head, |
| 368 | void (*func)(struct rcu_head *head)) | 391 | void (*func)(struct rcu_head *head)) |
| @@ -459,7 +482,30 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) | |||
| 459 | * Note that it is illegal to call synchronize_srcu() from the corresponding | 482 | * Note that it is illegal to call synchronize_srcu() from the corresponding |
| 460 | * SRCU read-side critical section; doing so will result in deadlock. | 483 | * SRCU read-side critical section; doing so will result in deadlock. |
| 461 | * However, it is perfectly legal to call synchronize_srcu() on one | 484 | * However, it is perfectly legal to call synchronize_srcu() on one |
| 462 | * srcu_struct from some other srcu_struct's read-side critical section. | 485 | * srcu_struct from some other srcu_struct's read-side critical section, |
| 486 | * as long as the resulting graph of srcu_structs is acyclic. | ||
| 487 | * | ||
| 488 | * There are memory-ordering constraints implied by synchronize_srcu(). | ||
| 489 | * On systems with more than one CPU, when synchronize_srcu() returns, | ||
| 490 | * each CPU is guaranteed to have executed a full memory barrier since | ||
| 491 | * the end of its last corresponding SRCU-sched read-side critical section | ||
| 492 | * whose beginning preceded the call to synchronize_srcu(). In addition, | ||
| 493 | * each CPU having an SRCU read-side critical section that extends beyond | ||
| 494 | * the return from synchronize_srcu() is guaranteed to have executed a | ||
| 495 | * full memory barrier after the beginning of synchronize_srcu() and before | ||
| 496 | * the beginning of that SRCU read-side critical section. Note that these | ||
| 497 | * guarantees include CPUs that are offline, idle, or executing in user mode, | ||
| 498 | * as well as CPUs that are executing in the kernel. | ||
| 499 | * | ||
| 500 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned | ||
| 501 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed | ||
| 502 | * to have executed a full memory barrier during the execution of | ||
| 503 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B | ||
| 504 | * are the same CPU, but again only if the system has more than one CPU. | ||
| 505 | * | ||
| 506 | * Of course, these memory-ordering guarantees apply only when | ||
| 507 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are | ||
| 508 | * passed the same srcu_struct structure. | ||
| 463 | */ | 509 | */ |
| 464 | void synchronize_srcu(struct srcu_struct *sp) | 510 | void synchronize_srcu(struct srcu_struct *sp) |
| 465 | { | 511 | { |
| @@ -476,12 +522,8 @@ EXPORT_SYMBOL_GPL(synchronize_srcu); | |||
| 476 | * Wait for an SRCU grace period to elapse, but be more aggressive about | 522 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
| 477 | * spinning rather than blocking when waiting. | 523 | * spinning rather than blocking when waiting. |
| 478 | * | 524 | * |
| 479 | * Note that it is also illegal to call synchronize_srcu_expedited() | 525 | * Note that synchronize_srcu_expedited() has the same deadlock and |
| 480 | * from the corresponding SRCU read-side critical section; | 526 | * memory-ordering properties as does synchronize_srcu(). |
| 481 | * doing so will result in deadlock. However, it is perfectly legal | ||
| 482 | * to call synchronize_srcu_expedited() on one srcu_struct from some | ||
| 483 | * other srcu_struct's read-side critical section, as long as | ||
| 484 | * the resulting graph of srcu_structs is acyclic. | ||
| 485 | */ | 527 | */ |
| 486 | void synchronize_srcu_expedited(struct srcu_struct *sp) | 528 | void synchronize_srcu_expedited(struct srcu_struct *sp) |
| 487 | { | 529 | { |
