diff options
Diffstat (limited to 'kernel/rcutiny.c')
-rw-r--r-- | kernel/rcutiny.c | 46 |
1 files changed, 23 insertions, 23 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 0c343b9a46d5..7bbac7d0f5ab 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -35,15 +35,16 @@ | |||
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/time.h> | 36 | #include <linux/time.h> |
37 | #include <linux/cpu.h> | 37 | #include <linux/cpu.h> |
38 | #include <linux/prefetch.h> | ||
38 | 39 | ||
39 | /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */ | 40 | /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */ |
40 | static struct task_struct *rcu_kthread_task; | 41 | static struct task_struct *rcu_kthread_task; |
41 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); | 42 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); |
42 | static unsigned long have_rcu_kthread_work; | 43 | static unsigned long have_rcu_kthread_work; |
43 | static void invoke_rcu_kthread(void); | ||
44 | 44 | ||
45 | /* Forward declarations for rcutiny_plugin.h. */ | 45 | /* Forward declarations for rcutiny_plugin.h. */ |
46 | struct rcu_ctrlblk; | 46 | struct rcu_ctrlblk; |
47 | static void invoke_rcu_kthread(void); | ||
47 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); | 48 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
48 | static int rcu_kthread(void *arg); | 49 | static int rcu_kthread(void *arg); |
49 | static void __call_rcu(struct rcu_head *head, | 50 | static void __call_rcu(struct rcu_head *head, |
@@ -79,36 +80,45 @@ void rcu_exit_nohz(void) | |||
79 | #endif /* #ifdef CONFIG_NO_HZ */ | 80 | #endif /* #ifdef CONFIG_NO_HZ */ |
80 | 81 | ||
81 | /* | 82 | /* |
82 | * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc(). | 83 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
83 | * Also disable irqs to avoid confusion due to interrupt handlers | 84 | * Also irqs are disabled to avoid confusion due to interrupt handlers |
84 | * invoking call_rcu(). | 85 | * invoking call_rcu(). |
85 | */ | 86 | */ |
86 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | 87 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) |
87 | { | 88 | { |
88 | unsigned long flags; | ||
89 | |||
90 | local_irq_save(flags); | ||
91 | if (rcp->rcucblist != NULL && | 89 | if (rcp->rcucblist != NULL && |
92 | rcp->donetail != rcp->curtail) { | 90 | rcp->donetail != rcp->curtail) { |
93 | rcp->donetail = rcp->curtail; | 91 | rcp->donetail = rcp->curtail; |
94 | local_irq_restore(flags); | ||
95 | return 1; | 92 | return 1; |
96 | } | 93 | } |
97 | local_irq_restore(flags); | ||
98 | 94 | ||
99 | return 0; | 95 | return 0; |
100 | } | 96 | } |
101 | 97 | ||
102 | /* | 98 | /* |
99 | * Wake up rcu_kthread() to process callbacks now eligible for invocation | ||
100 | * or to boost readers. | ||
101 | */ | ||
102 | static void invoke_rcu_kthread(void) | ||
103 | { | ||
104 | have_rcu_kthread_work = 1; | ||
105 | wake_up(&rcu_kthread_wq); | ||
106 | } | ||
107 | |||
108 | /* | ||
103 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we | 109 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we |
104 | * are at it, given that any rcu quiescent state is also an rcu_bh | 110 | * are at it, given that any rcu quiescent state is also an rcu_bh |
105 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. | 111 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. |
106 | */ | 112 | */ |
107 | void rcu_sched_qs(int cpu) | 113 | void rcu_sched_qs(int cpu) |
108 | { | 114 | { |
115 | unsigned long flags; | ||
116 | |||
117 | local_irq_save(flags); | ||
109 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + | 118 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
110 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | 119 | rcu_qsctr_help(&rcu_bh_ctrlblk)) |
111 | invoke_rcu_kthread(); | 120 | invoke_rcu_kthread(); |
121 | local_irq_restore(flags); | ||
112 | } | 122 | } |
113 | 123 | ||
114 | /* | 124 | /* |
@@ -116,8 +126,12 @@ void rcu_sched_qs(int cpu) | |||
116 | */ | 126 | */ |
117 | void rcu_bh_qs(int cpu) | 127 | void rcu_bh_qs(int cpu) |
118 | { | 128 | { |
129 | unsigned long flags; | ||
130 | |||
131 | local_irq_save(flags); | ||
119 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) | 132 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
120 | invoke_rcu_kthread(); | 133 | invoke_rcu_kthread(); |
134 | local_irq_restore(flags); | ||
121 | } | 135 | } |
122 | 136 | ||
123 | /* | 137 | /* |
@@ -167,7 +181,7 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
167 | prefetch(next); | 181 | prefetch(next); |
168 | debug_rcu_head_unqueue(list); | 182 | debug_rcu_head_unqueue(list); |
169 | local_bh_disable(); | 183 | local_bh_disable(); |
170 | list->func(list); | 184 | __rcu_reclaim(list); |
171 | local_bh_enable(); | 185 | local_bh_enable(); |
172 | list = next; | 186 | list = next; |
173 | RCU_TRACE(cb_count++); | 187 | RCU_TRACE(cb_count++); |
@@ -208,20 +222,6 @@ static int rcu_kthread(void *arg) | |||
208 | } | 222 | } |
209 | 223 | ||
210 | /* | 224 | /* |
211 | * Wake up rcu_kthread() to process callbacks now eligible for invocation | ||
212 | * or to boost readers. | ||
213 | */ | ||
214 | static void invoke_rcu_kthread(void) | ||
215 | { | ||
216 | unsigned long flags; | ||
217 | |||
218 | local_irq_save(flags); | ||
219 | have_rcu_kthread_work = 1; | ||
220 | wake_up(&rcu_kthread_wq); | ||
221 | local_irq_restore(flags); | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Wait for a grace period to elapse. But it is illegal to invoke | 225 | * Wait for a grace period to elapse. But it is illegal to invoke |
226 | * synchronize_sched() from within an RCU read-side critical section. | 226 | * synchronize_sched() from within an RCU read-side critical section. |
227 | * Therefore, any legal call to synchronize_sched() is a quiescent | 227 | * Therefore, any legal call to synchronize_sched() is a quiescent |