diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-08-17 17:18:46 -0400 |
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-08-20 12:00:16 -0400 |
| commit | 7b0b759b65247cbc66384a912be9acf8d4800636 (patch) | |
| tree | 085a18f506193725ab16fa61cae41f93dcc7c3e9 | |
| parent | 73d4da4d360136826b36f78f5cf72b29da82c8a6 (diff) | |
rcu: combine duplicate code, courtesy of CONFIG_PREEMPT_RCU
The CONFIG_PREEMPT_RCU kernel configuration parameter was recently
re-introduced, but as an indication of the type of RCU (preemptible
vs. non-preemptible) instead of as selecting a given implementation.
This commit uses CONFIG_PREEMPT_RCU to combine duplicate code
from include/linux/rcutiny.h and include/linux/rcutree.h into
include/linux/rcupdate.h. This commit also combines a few other pieces
of duplicate code that have accumulated.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
| -rw-r--r-- | include/linux/rcupdate.h | 75 | ||||
| -rw-r--r-- | include/linux/rcutiny.h | 51 | ||||
| -rw-r--r-- | include/linux/rcutree.h | 50 | ||||
| -rw-r--r-- | kernel/rcutree_plugin.h | 9 |
4 files changed, 72 insertions, 113 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 325bad7bbca9..89414d67d961 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -61,16 +61,30 @@ struct rcu_head { | |||
| 61 | }; | 61 | }; |
| 62 | 62 | ||
| 63 | /* Exported common interfaces */ | 63 | /* Exported common interfaces */ |
| 64 | extern void call_rcu_sched(struct rcu_head *head, | ||
| 65 | void (*func)(struct rcu_head *rcu)); | ||
| 66 | extern void synchronize_sched(void); | ||
| 64 | extern void rcu_barrier_bh(void); | 67 | extern void rcu_barrier_bh(void); |
| 65 | extern void rcu_barrier_sched(void); | 68 | extern void rcu_barrier_sched(void); |
| 66 | extern void synchronize_sched_expedited(void); | 69 | extern void synchronize_sched_expedited(void); |
| 67 | extern int sched_expedited_torture_stats(char *page); | 70 | extern int sched_expedited_torture_stats(char *page); |
| 68 | 71 | ||
| 69 | /* Internal to kernel */ | 72 | static inline void __rcu_read_lock_bh(void) |
| 70 | extern void rcu_init(void); | 73 | { |
| 74 | local_bh_disable(); | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline void __rcu_read_unlock_bh(void) | ||
| 78 | { | ||
| 79 | local_bh_enable(); | ||
| 80 | } | ||
| 71 | 81 | ||
| 72 | #ifdef CONFIG_PREEMPT_RCU | 82 | #ifdef CONFIG_PREEMPT_RCU |
| 73 | 83 | ||
| 84 | extern void __rcu_read_lock(void); | ||
| 85 | extern void __rcu_read_unlock(void); | ||
| 86 | void synchronize_rcu(void); | ||
| 87 | |||
| 74 | /* | 88 | /* |
| 75 | * Defined as a macro as it is a very low level header included from | 89 | * Defined as a macro as it is a very low level header included from |
| 76 | * areas that don't even know about current. This gives the rcu_read_lock() | 90 | * areas that don't even know about current. This gives the rcu_read_lock() |
| @@ -79,7 +93,53 @@ extern void rcu_init(void); | |||
| 79 | */ | 93 | */ |
| 80 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) | 94 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) |
| 81 | 95 | ||
| 82 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | 96 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
| 97 | |||
| 98 | static inline void __rcu_read_lock(void) | ||
| 99 | { | ||
| 100 | preempt_disable(); | ||
| 101 | } | ||
| 102 | |||
| 103 | static inline void __rcu_read_unlock(void) | ||
| 104 | { | ||
| 105 | preempt_enable(); | ||
| 106 | } | ||
| 107 | |||
| 108 | static inline void synchronize_rcu(void) | ||
| 109 | { | ||
| 110 | synchronize_sched(); | ||
| 111 | } | ||
| 112 | |||
| 113 | static inline int rcu_preempt_depth(void) | ||
| 114 | { | ||
| 115 | return 0; | ||
| 116 | } | ||
| 117 | |||
| 118 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
| 119 | |||
| 120 | /* Internal to kernel */ | ||
| 121 | extern void rcu_init(void); | ||
| 122 | extern void rcu_sched_qs(int cpu); | ||
| 123 | extern void rcu_bh_qs(int cpu); | ||
| 124 | extern void rcu_check_callbacks(int cpu, int user); | ||
| 125 | struct notifier_block; | ||
| 126 | |||
| 127 | #ifdef CONFIG_NO_HZ | ||
| 128 | |||
| 129 | extern void rcu_enter_nohz(void); | ||
| 130 | extern void rcu_exit_nohz(void); | ||
| 131 | |||
| 132 | #else /* #ifdef CONFIG_NO_HZ */ | ||
| 133 | |||
| 134 | static inline void rcu_enter_nohz(void) | ||
| 135 | { | ||
| 136 | } | ||
| 137 | |||
| 138 | static inline void rcu_exit_nohz(void) | ||
| 139 | { | ||
| 140 | } | ||
| 141 | |||
| 142 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
| 83 | 143 | ||
| 84 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 144 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
| 85 | #include <linux/rcutree.h> | 145 | #include <linux/rcutree.h> |
| @@ -626,6 +686,8 @@ struct rcu_synchronize { | |||
| 626 | 686 | ||
| 627 | extern void wakeme_after_rcu(struct rcu_head *head); | 687 | extern void wakeme_after_rcu(struct rcu_head *head); |
| 628 | 688 | ||
| 689 | #ifdef CONFIG_PREEMPT_RCU | ||
| 690 | |||
| 629 | /** | 691 | /** |
| 630 | * call_rcu() - Queue an RCU callback for invocation after a grace period. | 692 | * call_rcu() - Queue an RCU callback for invocation after a grace period. |
| 631 | * @head: structure to be used for queueing the RCU updates. | 693 | * @head: structure to be used for queueing the RCU updates. |
| @@ -642,6 +704,13 @@ extern void wakeme_after_rcu(struct rcu_head *head); | |||
| 642 | extern void call_rcu(struct rcu_head *head, | 704 | extern void call_rcu(struct rcu_head *head, |
| 643 | void (*func)(struct rcu_head *head)); | 705 | void (*func)(struct rcu_head *head)); |
| 644 | 706 | ||
| 707 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | ||
| 708 | |||
| 709 | /* In classic RCU, call_rcu() is just call_rcu_sched(). */ | ||
| 710 | #define call_rcu call_rcu_sched | ||
| 711 | |||
| 712 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
| 713 | |||
| 645 | /** | 714 | /** |
| 646 | * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. | 715 | * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. |
| 647 | * @head: structure to be used for queueing the RCU updates. | 716 | * @head: structure to be used for queueing the RCU updates. |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index c6b11dc5ba0a..13877cb93a60 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
| @@ -27,34 +27,10 @@ | |||
| 27 | 27 | ||
| 28 | #include <linux/cache.h> | 28 | #include <linux/cache.h> |
| 29 | 29 | ||
| 30 | void rcu_sched_qs(int cpu); | ||
| 31 | void rcu_bh_qs(int cpu); | ||
| 32 | |||
| 33 | #ifdef CONFIG_TINY_RCU | ||
| 34 | #define __rcu_read_lock() preempt_disable() | ||
| 35 | #define __rcu_read_unlock() preempt_enable() | ||
| 36 | #else /* #ifdef CONFIG_TINY_RCU */ | ||
| 37 | void __rcu_read_lock(void); | ||
| 38 | void __rcu_read_unlock(void); | ||
| 39 | #endif /* #else #ifdef CONFIG_TINY_RCU */ | ||
| 40 | #define __rcu_read_lock_bh() local_bh_disable() | ||
| 41 | #define __rcu_read_unlock_bh() local_bh_enable() | ||
| 42 | extern void call_rcu_sched(struct rcu_head *head, | ||
| 43 | void (*func)(struct rcu_head *rcu)); | ||
| 44 | |||
| 45 | #define rcu_init_sched() do { } while (0) | 30 | #define rcu_init_sched() do { } while (0) |
| 46 | 31 | ||
| 47 | extern void synchronize_sched(void); | ||
| 48 | |||
| 49 | #ifdef CONFIG_TINY_RCU | 32 | #ifdef CONFIG_TINY_RCU |
| 50 | 33 | ||
| 51 | #define call_rcu call_rcu_sched | ||
| 52 | |||
| 53 | static inline void synchronize_rcu(void) | ||
| 54 | { | ||
| 55 | synchronize_sched(); | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline void synchronize_rcu_expedited(void) | 34 | static inline void synchronize_rcu_expedited(void) |
| 59 | { | 35 | { |
| 60 | synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ | 36 | synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ |
| @@ -67,7 +43,6 @@ static inline void rcu_barrier(void) | |||
| 67 | 43 | ||
| 68 | #else /* #ifdef CONFIG_TINY_RCU */ | 44 | #else /* #ifdef CONFIG_TINY_RCU */ |
| 69 | 45 | ||
| 70 | void synchronize_rcu(void); | ||
| 71 | void rcu_barrier(void); | 46 | void rcu_barrier(void); |
| 72 | void synchronize_rcu_expedited(void); | 47 | void synchronize_rcu_expedited(void); |
| 73 | 48 | ||
| @@ -83,25 +58,6 @@ static inline void synchronize_rcu_bh_expedited(void) | |||
| 83 | synchronize_sched(); | 58 | synchronize_sched(); |
| 84 | } | 59 | } |
| 85 | 60 | ||
| 86 | struct notifier_block; | ||
| 87 | |||
| 88 | #ifdef CONFIG_NO_HZ | ||
| 89 | |||
| 90 | extern void rcu_enter_nohz(void); | ||
| 91 | extern void rcu_exit_nohz(void); | ||
| 92 | |||
| 93 | #else /* #ifdef CONFIG_NO_HZ */ | ||
| 94 | |||
| 95 | static inline void rcu_enter_nohz(void) | ||
| 96 | { | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline void rcu_exit_nohz(void) | ||
| 100 | { | ||
| 101 | } | ||
| 102 | |||
| 103 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
| 104 | |||
| 105 | #ifdef CONFIG_TINY_RCU | 61 | #ifdef CONFIG_TINY_RCU |
| 106 | 62 | ||
| 107 | static inline void rcu_preempt_note_context_switch(void) | 63 | static inline void rcu_preempt_note_context_switch(void) |
| @@ -117,11 +73,6 @@ static inline int rcu_needs_cpu(int cpu) | |||
| 117 | return 0; | 73 | return 0; |
| 118 | } | 74 | } |
| 119 | 75 | ||
| 120 | static inline int rcu_preempt_depth(void) | ||
| 121 | { | ||
| 122 | return 0; | ||
| 123 | } | ||
| 124 | |||
| 125 | #else /* #ifdef CONFIG_TINY_RCU */ | 76 | #else /* #ifdef CONFIG_TINY_RCU */ |
| 126 | 77 | ||
| 127 | void rcu_preempt_note_context_switch(void); | 78 | void rcu_preempt_note_context_switch(void); |
| @@ -141,8 +92,6 @@ static inline void rcu_note_context_switch(int cpu) | |||
| 141 | rcu_preempt_note_context_switch(); | 92 | rcu_preempt_note_context_switch(); |
| 142 | } | 93 | } |
| 143 | 94 | ||
| 144 | extern void rcu_check_callbacks(int cpu, int user); | ||
| 145 | |||
| 146 | /* | 95 | /* |
| 147 | * Return the number of grace periods. | 96 | * Return the number of grace periods. |
| 148 | */ | 97 | */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 54a20c11f98d..95518e628794 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
| @@ -30,59 +30,23 @@ | |||
| 30 | #ifndef __LINUX_RCUTREE_H | 30 | #ifndef __LINUX_RCUTREE_H |
| 31 | #define __LINUX_RCUTREE_H | 31 | #define __LINUX_RCUTREE_H |
| 32 | 32 | ||
| 33 | struct notifier_block; | ||
| 34 | |||
| 35 | extern void rcu_sched_qs(int cpu); | ||
| 36 | extern void rcu_bh_qs(int cpu); | ||
| 37 | extern void rcu_note_context_switch(int cpu); | 33 | extern void rcu_note_context_switch(int cpu); |
| 38 | extern int rcu_needs_cpu(int cpu); | 34 | extern int rcu_needs_cpu(int cpu); |
| 39 | extern void rcu_cpu_stall_reset(void); | 35 | extern void rcu_cpu_stall_reset(void); |
| 40 | 36 | ||
| 41 | #ifdef CONFIG_TREE_PREEMPT_RCU | 37 | #ifdef CONFIG_TREE_PREEMPT_RCU |
| 42 | 38 | ||
| 43 | extern void __rcu_read_lock(void); | ||
| 44 | extern void __rcu_read_unlock(void); | ||
| 45 | extern void synchronize_rcu(void); | ||
| 46 | extern void exit_rcu(void); | 39 | extern void exit_rcu(void); |
| 47 | 40 | ||
| 48 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 41 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
| 49 | 42 | ||
| 50 | static inline void __rcu_read_lock(void) | ||
| 51 | { | ||
| 52 | preempt_disable(); | ||
| 53 | } | ||
| 54 | |||
| 55 | static inline void __rcu_read_unlock(void) | ||
| 56 | { | ||
| 57 | preempt_enable(); | ||
| 58 | } | ||
| 59 | |||
| 60 | #define synchronize_rcu synchronize_sched | ||
| 61 | |||
| 62 | static inline void exit_rcu(void) | 43 | static inline void exit_rcu(void) |
| 63 | { | 44 | { |
| 64 | } | 45 | } |
| 65 | 46 | ||
| 66 | static inline int rcu_preempt_depth(void) | ||
| 67 | { | ||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 71 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | 47 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
| 72 | 48 | ||
| 73 | static inline void __rcu_read_lock_bh(void) | ||
| 74 | { | ||
| 75 | local_bh_disable(); | ||
| 76 | } | ||
| 77 | static inline void __rcu_read_unlock_bh(void) | ||
| 78 | { | ||
| 79 | local_bh_enable(); | ||
| 80 | } | ||
| 81 | |||
| 82 | extern void call_rcu_sched(struct rcu_head *head, | ||
| 83 | void (*func)(struct rcu_head *rcu)); | ||
| 84 | extern void synchronize_rcu_bh(void); | 49 | extern void synchronize_rcu_bh(void); |
| 85 | extern void synchronize_sched(void); | ||
| 86 | extern void synchronize_rcu_expedited(void); | 50 | extern void synchronize_rcu_expedited(void); |
| 87 | 51 | ||
| 88 | static inline void synchronize_rcu_bh_expedited(void) | 52 | static inline void synchronize_rcu_bh_expedited(void) |
| @@ -92,8 +56,6 @@ static inline void synchronize_rcu_bh_expedited(void) | |||
| 92 | 56 | ||
| 93 | extern void rcu_barrier(void); | 57 | extern void rcu_barrier(void); |
| 94 | 58 | ||
| 95 | extern void rcu_check_callbacks(int cpu, int user); | ||
| 96 | |||
| 97 | extern long rcu_batches_completed(void); | 59 | extern long rcu_batches_completed(void); |
| 98 | extern long rcu_batches_completed_bh(void); | 60 | extern long rcu_batches_completed_bh(void); |
| 99 | extern long rcu_batches_completed_sched(void); | 61 | extern long rcu_batches_completed_sched(void); |
| @@ -101,18 +63,6 @@ extern void rcu_force_quiescent_state(void); | |||
| 101 | extern void rcu_bh_force_quiescent_state(void); | 63 | extern void rcu_bh_force_quiescent_state(void); |
| 102 | extern void rcu_sched_force_quiescent_state(void); | 64 | extern void rcu_sched_force_quiescent_state(void); |
| 103 | 65 | ||
| 104 | #ifdef CONFIG_NO_HZ | ||
| 105 | void rcu_enter_nohz(void); | ||
| 106 | void rcu_exit_nohz(void); | ||
| 107 | #else /* CONFIG_NO_HZ */ | ||
| 108 | static inline void rcu_enter_nohz(void) | ||
| 109 | { | ||
| 110 | } | ||
| 111 | static inline void rcu_exit_nohz(void) | ||
| 112 | { | ||
| 113 | } | ||
| 114 | #endif /* CONFIG_NO_HZ */ | ||
| 115 | |||
| 116 | /* A context switch is a grace period for RCU-sched and RCU-bh. */ | 66 | /* A context switch is a grace period for RCU-sched and RCU-bh. */ |
| 117 | static inline int rcu_blocking_is_gp(void) | 67 | static inline int rcu_blocking_is_gp(void) |
| 118 | { | 68 | { |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 561410f70d4a..87f60f06b18e 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -939,15 +939,6 @@ static void rcu_preempt_process_callbacks(void) | |||
| 939 | } | 939 | } |
| 940 | 940 | ||
| 941 | /* | 941 | /* |
| 942 | * In classic RCU, call_rcu() is just call_rcu_sched(). | ||
| 943 | */ | ||
| 944 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
| 945 | { | ||
| 946 | call_rcu_sched(head, func); | ||
| 947 | } | ||
| 948 | EXPORT_SYMBOL_GPL(call_rcu); | ||
| 949 | |||
| 950 | /* | ||
| 951 | * Wait for an rcu-preempt grace period, but make it happen quickly. | 942 | * Wait for an rcu-preempt grace period, but make it happen quickly. |
| 952 | * But because preemptable RCU does not exist, map to rcu-sched. | 943 | * But because preemptable RCU does not exist, map to rcu-sched. |
| 953 | */ | 944 | */ |
