diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-05-21 14:58:36 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-07-02 15:34:23 -0400 |
commit | 2a3fa843b555d202e682bf08c65ee1a4a93c79cf (patch) | |
tree | b7db689ab83f041319b80493b12a8f7ecc79ad0f /kernel | |
parent | e9023c4061054cbf59c5288068118a4d0b152f01 (diff) |
rcu: Consolidate tree/tiny __rcu_read_{,un}lock() implementations
The CONFIG_TREE_PREEMPT_RCU and CONFIG_TINY_PREEMPT_RCU versions of
__rcu_read_lock() and __rcu_read_unlock() are identical, so this commit
consolidates them into kernel/rcupdate.h.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcupdate.c | 44 | ||||
-rw-r--r-- | kernel/rcutiny_plugin.h | 47 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 47 |
3 files changed, 46 insertions, 92 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 95cba41ce1e9..4e6a61b15e86 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -54,6 +54,50 @@ | |||
54 | #ifdef CONFIG_PREEMPT_RCU | 54 | #ifdef CONFIG_PREEMPT_RCU |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * Preemptible RCU implementation for rcu_read_lock(). | ||
58 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | ||
59 | * if we block. | ||
60 | */ | ||
61 | void __rcu_read_lock(void) | ||
62 | { | ||
63 | current->rcu_read_lock_nesting++; | ||
64 | barrier(); /* critical section after entry code. */ | ||
65 | } | ||
66 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | ||
67 | |||
68 | /* | ||
69 | * Preemptible RCU implementation for rcu_read_unlock(). | ||
70 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | ||
71 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | ||
72 | * invoke rcu_read_unlock_special() to clean up after a context switch | ||
73 | * in an RCU read-side critical section and other special cases. | ||
74 | */ | ||
75 | void __rcu_read_unlock(void) | ||
76 | { | ||
77 | struct task_struct *t = current; | ||
78 | |||
79 | if (t->rcu_read_lock_nesting != 1) { | ||
80 | --t->rcu_read_lock_nesting; | ||
81 | } else { | ||
82 | barrier(); /* critical section before exit code. */ | ||
83 | t->rcu_read_lock_nesting = INT_MIN; | ||
84 | barrier(); /* assign before ->rcu_read_unlock_special load */ | ||
85 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | ||
86 | rcu_read_unlock_special(t); | ||
87 | barrier(); /* ->rcu_read_unlock_special load before assign */ | ||
88 | t->rcu_read_lock_nesting = 0; | ||
89 | } | ||
90 | #ifdef CONFIG_PROVE_LOCKING | ||
91 | { | ||
92 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | ||
93 | |||
94 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | ||
95 | } | ||
96 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | ||
97 | } | ||
98 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | ||
99 | |||
100 | /* | ||
57 | * Check for a task exiting while in a preemptible-RCU read-side | 101 | * Check for a task exiting while in a preemptible-RCU read-side |
58 | * critical section, clean up if so. No need to issue warnings, | 102 | * critical section, clean up if so. No need to issue warnings, |
59 | * as debug_check_no_locks_held() already does this if lockdep | 103 | * as debug_check_no_locks_held() already does this if lockdep |
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index fc31a2d65100..a269b0da0eb6 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h | |||
@@ -132,7 +132,6 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { | |||
132 | RCU_TRACE(.rcb.name = "rcu_preempt") | 132 | RCU_TRACE(.rcb.name = "rcu_preempt") |
133 | }; | 133 | }; |
134 | 134 | ||
135 | static void rcu_read_unlock_special(struct task_struct *t); | ||
136 | static int rcu_preempted_readers_exp(void); | 135 | static int rcu_preempted_readers_exp(void); |
137 | static void rcu_report_exp_done(void); | 136 | static void rcu_report_exp_done(void); |
138 | 137 | ||
@@ -527,23 +526,11 @@ void rcu_preempt_note_context_switch(void) | |||
527 | } | 526 | } |
528 | 527 | ||
529 | /* | 528 | /* |
530 | * Tiny-preemptible RCU implementation for rcu_read_lock(). | ||
531 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | ||
532 | * if we block. | ||
533 | */ | ||
534 | void __rcu_read_lock(void) | ||
535 | { | ||
536 | current->rcu_read_lock_nesting++; | ||
537 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */ | ||
538 | } | ||
539 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | ||
540 | |||
541 | /* | ||
542 | * Handle special cases during rcu_read_unlock(), such as needing to | 529 | * Handle special cases during rcu_read_unlock(), such as needing to |
543 | * notify RCU core processing or task having blocked during the RCU | 530 | * notify RCU core processing or task having blocked during the RCU |
544 | * read-side critical section. | 531 | * read-side critical section. |
545 | */ | 532 | */ |
546 | static noinline void rcu_read_unlock_special(struct task_struct *t) | 533 | void rcu_read_unlock_special(struct task_struct *t) |
547 | { | 534 | { |
548 | int empty; | 535 | int empty; |
549 | int empty_exp; | 536 | int empty_exp; |
@@ -627,38 +614,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) | |||
627 | } | 614 | } |
628 | 615 | ||
629 | /* | 616 | /* |
630 | * Tiny-preemptible RCU implementation for rcu_read_unlock(). | ||
631 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | ||
632 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | ||
633 | * invoke rcu_read_unlock_special() to clean up after a context switch | ||
634 | * in an RCU read-side critical section and other special cases. | ||
635 | */ | ||
636 | void __rcu_read_unlock(void) | ||
637 | { | ||
638 | struct task_struct *t = current; | ||
639 | |||
640 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ | ||
641 | if (t->rcu_read_lock_nesting != 1) | ||
642 | --t->rcu_read_lock_nesting; | ||
643 | else { | ||
644 | t->rcu_read_lock_nesting = INT_MIN; | ||
645 | barrier(); /* assign before ->rcu_read_unlock_special load */ | ||
646 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | ||
647 | rcu_read_unlock_special(t); | ||
648 | barrier(); /* ->rcu_read_unlock_special load before assign */ | ||
649 | t->rcu_read_lock_nesting = 0; | ||
650 | } | ||
651 | #ifdef CONFIG_PROVE_LOCKING | ||
652 | { | ||
653 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | ||
654 | |||
655 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | ||
656 | } | ||
657 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | ||
658 | } | ||
659 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | ||
660 | |||
661 | /* | ||
662 | * Check for a quiescent state from the current CPU. When a task blocks, | 617 | * Check for a quiescent state from the current CPU. When a task blocks, |
663 | * the task is recorded in the rcu_preempt_ctrlblk structure, which is | 618 | * the task is recorded in the rcu_preempt_ctrlblk structure, which is |
664 | * checked elsewhere. This is called from the scheduling-clock interrupt. | 619 | * checked elsewhere. This is called from the scheduling-clock interrupt. |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 3e4899459f3d..4b6b17cdf66b 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -78,7 +78,6 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt); | |||
78 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | 78 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); |
79 | static struct rcu_state *rcu_state = &rcu_preempt_state; | 79 | static struct rcu_state *rcu_state = &rcu_preempt_state; |
80 | 80 | ||
81 | static void rcu_read_unlock_special(struct task_struct *t); | ||
82 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | 81 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
83 | 82 | ||
84 | /* | 83 | /* |
@@ -233,18 +232,6 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
233 | } | 232 | } |
234 | 233 | ||
235 | /* | 234 | /* |
236 | * Tree-preemptible RCU implementation for rcu_read_lock(). | ||
237 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | ||
238 | * if we block. | ||
239 | */ | ||
240 | void __rcu_read_lock(void) | ||
241 | { | ||
242 | current->rcu_read_lock_nesting++; | ||
243 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ | ||
244 | } | ||
245 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | ||
246 | |||
247 | /* | ||
248 | * Check for preempted RCU readers blocking the current grace period | 235 | * Check for preempted RCU readers blocking the current grace period |
249 | * for the specified rcu_node structure. If the caller needs a reliable | 236 | * for the specified rcu_node structure. If the caller needs a reliable |
250 | * answer, it must hold the rcu_node's ->lock. | 237 | * answer, it must hold the rcu_node's ->lock. |
@@ -310,7 +297,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t, | |||
310 | * notify RCU core processing or task having blocked during the RCU | 297 | * notify RCU core processing or task having blocked during the RCU |
311 | * read-side critical section. | 298 | * read-side critical section. |
312 | */ | 299 | */ |
313 | static noinline void rcu_read_unlock_special(struct task_struct *t) | 300 | void rcu_read_unlock_special(struct task_struct *t) |
314 | { | 301 | { |
315 | int empty; | 302 | int empty; |
316 | int empty_exp; | 303 | int empty_exp; |
@@ -418,38 +405,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) | |||
418 | } | 405 | } |
419 | } | 406 | } |
420 | 407 | ||
421 | /* | ||
422 | * Tree-preemptible RCU implementation for rcu_read_unlock(). | ||
423 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | ||
424 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | ||
425 | * invoke rcu_read_unlock_special() to clean up after a context switch | ||
426 | * in an RCU read-side critical section and other special cases. | ||
427 | */ | ||
428 | void __rcu_read_unlock(void) | ||
429 | { | ||
430 | struct task_struct *t = current; | ||
431 | |||
432 | if (t->rcu_read_lock_nesting != 1) | ||
433 | --t->rcu_read_lock_nesting; | ||
434 | else { | ||
435 | barrier(); /* critical section before exit code. */ | ||
436 | t->rcu_read_lock_nesting = INT_MIN; | ||
437 | barrier(); /* assign before ->rcu_read_unlock_special load */ | ||
438 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | ||
439 | rcu_read_unlock_special(t); | ||
440 | barrier(); /* ->rcu_read_unlock_special load before assign */ | ||
441 | t->rcu_read_lock_nesting = 0; | ||
442 | } | ||
443 | #ifdef CONFIG_PROVE_LOCKING | ||
444 | { | ||
445 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); | ||
446 | |||
447 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); | ||
448 | } | ||
449 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | ||
450 | } | ||
451 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | ||
452 | |||
453 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE | 408 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE |
454 | 409 | ||
455 | /* | 410 | /* |