diff options
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r-- | kernel/rcu/tree_plugin.h | 77 |
1 files changed, 47 insertions, 30 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 59318ea32bc8..e2c5910546f6 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -128,18 +128,19 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); | |||
128 | * not in a quiescent state. There might be any number of tasks blocked | 128 | * not in a quiescent state. There might be any number of tasks blocked |
129 | * while in an RCU read-side critical section. | 129 | * while in an RCU read-side critical section. |
130 | * | 130 | * |
131 | * Unlike the other rcu_*_qs() functions, callers to this function | 131 | * As with the other rcu_*_qs() functions, callers to this function |
132 | * must disable irqs in order to protect the assignment to | 132 | * must disable preemption. |
133 | * ->rcu_read_unlock_special. | 133 | */ |
134 | */ | 134 | static void rcu_preempt_qs(void) |
135 | static void rcu_preempt_qs(int cpu) | 135 | { |
136 | { | 136 | if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) { |
137 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 137 | trace_rcu_grace_period(TPS("rcu_preempt"), |
138 | 138 | __this_cpu_read(rcu_preempt_data.gpnum), | |
139 | if (rdp->passed_quiesce == 0) | 139 | TPS("cpuqs")); |
140 | trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs")); | 140 | __this_cpu_write(rcu_preempt_data.passed_quiesce, 1); |
141 | rdp->passed_quiesce = 1; | 141 | barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */ |
142 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | 142 | current->rcu_read_unlock_special.b.need_qs = false; |
143 | } | ||
143 | } | 144 | } |
144 | 145 | ||
145 | /* | 146 | /* |
@@ -163,14 +164,14 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
163 | struct rcu_node *rnp; | 164 | struct rcu_node *rnp; |
164 | 165 | ||
165 | if (t->rcu_read_lock_nesting > 0 && | 166 | if (t->rcu_read_lock_nesting > 0 && |
166 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | 167 | !t->rcu_read_unlock_special.b.blocked) { |
167 | 168 | ||
168 | /* Possibly blocking in an RCU read-side critical section. */ | 169 | /* Possibly blocking in an RCU read-side critical section. */ |
169 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); | 170 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); |
170 | rnp = rdp->mynode; | 171 | rnp = rdp->mynode; |
171 | raw_spin_lock_irqsave(&rnp->lock, flags); | 172 | raw_spin_lock_irqsave(&rnp->lock, flags); |
172 | smp_mb__after_unlock_lock(); | 173 | smp_mb__after_unlock_lock(); |
173 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | 174 | t->rcu_read_unlock_special.b.blocked = true; |
174 | t->rcu_blocked_node = rnp; | 175 | t->rcu_blocked_node = rnp; |
175 | 176 | ||
176 | /* | 177 | /* |
@@ -212,7 +213,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
212 | : rnp->gpnum + 1); | 213 | : rnp->gpnum + 1); |
213 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 214 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
214 | } else if (t->rcu_read_lock_nesting < 0 && | 215 | } else if (t->rcu_read_lock_nesting < 0 && |
215 | t->rcu_read_unlock_special) { | 216 | t->rcu_read_unlock_special.s) { |
216 | 217 | ||
217 | /* | 218 | /* |
218 | * Complete exit from RCU read-side critical section on | 219 | * Complete exit from RCU read-side critical section on |
@@ -230,9 +231,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
230 | * grace period, then the fact that the task has been enqueued | 231 | * grace period, then the fact that the task has been enqueued |
231 | * means that we continue to block the current grace period. | 232 | * means that we continue to block the current grace period. |
232 | */ | 233 | */ |
233 | local_irq_save(flags); | 234 | rcu_preempt_qs(); |
234 | rcu_preempt_qs(cpu); | ||
235 | local_irq_restore(flags); | ||
236 | } | 235 | } |
237 | 236 | ||
238 | /* | 237 | /* |
@@ -313,7 +312,7 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
313 | bool drop_boost_mutex = false; | 312 | bool drop_boost_mutex = false; |
314 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 313 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
315 | struct rcu_node *rnp; | 314 | struct rcu_node *rnp; |
316 | int special; | 315 | union rcu_special special; |
317 | 316 | ||
318 | /* NMI handlers cannot block and cannot safely manipulate state. */ | 317 | /* NMI handlers cannot block and cannot safely manipulate state. */ |
319 | if (in_nmi()) | 318 | if (in_nmi()) |
@@ -323,12 +322,13 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
323 | 322 | ||
324 | /* | 323 | /* |
325 | * If RCU core is waiting for this CPU to exit critical section, | 324 | * If RCU core is waiting for this CPU to exit critical section, |
326 | * let it know that we have done so. | 325 | * let it know that we have done so. Because irqs are disabled, |
326 | * t->rcu_read_unlock_special cannot change. | ||
327 | */ | 327 | */ |
328 | special = t->rcu_read_unlock_special; | 328 | special = t->rcu_read_unlock_special; |
329 | if (special & RCU_READ_UNLOCK_NEED_QS) { | 329 | if (special.b.need_qs) { |
330 | rcu_preempt_qs(smp_processor_id()); | 330 | rcu_preempt_qs(); |
331 | if (!t->rcu_read_unlock_special) { | 331 | if (!t->rcu_read_unlock_special.s) { |
332 | local_irq_restore(flags); | 332 | local_irq_restore(flags); |
333 | return; | 333 | return; |
334 | } | 334 | } |
@@ -341,8 +341,8 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
341 | } | 341 | } |
342 | 342 | ||
343 | /* Clean up if blocked during RCU read-side critical section. */ | 343 | /* Clean up if blocked during RCU read-side critical section. */ |
344 | if (special & RCU_READ_UNLOCK_BLOCKED) { | 344 | if (special.b.blocked) { |
345 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | 345 | t->rcu_read_unlock_special.b.blocked = false; |
346 | 346 | ||
347 | /* | 347 | /* |
348 | * Remove this task from the list it blocked on. The | 348 | * Remove this task from the list it blocked on. The |
@@ -626,12 +626,13 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
626 | struct task_struct *t = current; | 626 | struct task_struct *t = current; |
627 | 627 | ||
628 | if (t->rcu_read_lock_nesting == 0) { | 628 | if (t->rcu_read_lock_nesting == 0) { |
629 | rcu_preempt_qs(cpu); | 629 | rcu_preempt_qs(); |
630 | return; | 630 | return; |
631 | } | 631 | } |
632 | if (t->rcu_read_lock_nesting > 0 && | 632 | if (t->rcu_read_lock_nesting > 0 && |
633 | per_cpu(rcu_preempt_data, cpu).qs_pending) | 633 | per_cpu(rcu_preempt_data, cpu).qs_pending && |
634 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | 634 | !per_cpu(rcu_preempt_data, cpu).passed_quiesce) |
635 | t->rcu_read_unlock_special.b.need_qs = true; | ||
635 | } | 636 | } |
636 | 637 | ||
637 | #ifdef CONFIG_RCU_BOOST | 638 | #ifdef CONFIG_RCU_BOOST |
@@ -915,7 +916,7 @@ void exit_rcu(void) | |||
915 | return; | 916 | return; |
916 | t->rcu_read_lock_nesting = 1; | 917 | t->rcu_read_lock_nesting = 1; |
917 | barrier(); | 918 | barrier(); |
918 | t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED; | 919 | t->rcu_read_unlock_special.b.blocked = true; |
919 | __rcu_read_unlock(); | 920 | __rcu_read_unlock(); |
920 | } | 921 | } |
921 | 922 | ||
@@ -1816,7 +1817,7 @@ static int rcu_oom_notify(struct notifier_block *self, | |||
1816 | get_online_cpus(); | 1817 | get_online_cpus(); |
1817 | for_each_online_cpu(cpu) { | 1818 | for_each_online_cpu(cpu) { |
1818 | smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); | 1819 | smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); |
1819 | cond_resched(); | 1820 | cond_resched_rcu_qs(); |
1820 | } | 1821 | } |
1821 | put_online_cpus(); | 1822 | put_online_cpus(); |
1822 | 1823 | ||
@@ -3162,3 +3163,19 @@ static void rcu_bind_gp_kthread(void) | |||
3162 | housekeeping_affine(current); | 3163 | housekeeping_affine(current); |
3163 | #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | 3164 | #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
3164 | } | 3165 | } |
3166 | |||
3167 | /* Record the current task on dyntick-idle entry. */ | ||
3168 | static void rcu_dynticks_task_enter(void) | ||
3169 | { | ||
3170 | #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) | ||
3171 | ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id(); | ||
3172 | #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ | ||
3173 | } | ||
3174 | |||
3175 | /* Record no current task on dyntick-idle exit. */ | ||
3176 | static void rcu_dynticks_task_exit(void) | ||
3177 | { | ||
3178 | #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) | ||
3179 | ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1; | ||
3180 | #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ | ||
3181 | } | ||