aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h111
1 files changed, 59 insertions, 52 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index c1d7f27bd38f..3ec85cb5d544 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -30,14 +30,24 @@
30#include <linux/smpboot.h> 30#include <linux/smpboot.h>
31#include "../time/tick-internal.h" 31#include "../time/tick-internal.h"
32 32
33#define RCU_KTHREAD_PRIO 1
34
35#ifdef CONFIG_RCU_BOOST 33#ifdef CONFIG_RCU_BOOST
34
36#include "../locking/rtmutex_common.h" 35#include "../locking/rtmutex_common.h"
37#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO 36
38#else 37/* rcuc/rcub kthread realtime priority */
39#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO 38static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
40#endif 39module_param(kthread_prio, int, 0644);
40
41/*
42 * Control variables for per-CPU and per-rcu_node kthreads. These
43 * handle all flavors of RCU.
44 */
45static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
46DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
47DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
48DEFINE_PER_CPU(char, rcu_cpu_has_work);
49
50#endif /* #ifdef CONFIG_RCU_BOOST */
41 51
42#ifdef CONFIG_RCU_NOCB_CPU 52#ifdef CONFIG_RCU_NOCB_CPU
43static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 53static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
@@ -72,9 +82,6 @@ static void __init rcu_bootup_announce_oddness(void)
72#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE 82#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
73 pr_info("\tRCU torture testing starts during boot.\n"); 83 pr_info("\tRCU torture testing starts during boot.\n");
74#endif 84#endif
75#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
76 pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n");
77#endif
78#if defined(CONFIG_RCU_CPU_STALL_INFO) 85#if defined(CONFIG_RCU_CPU_STALL_INFO)
79 pr_info("\tAdditional per-CPU info printed with stalls.\n"); 86 pr_info("\tAdditional per-CPU info printed with stalls.\n");
80#endif 87#endif
@@ -85,9 +92,12 @@ static void __init rcu_bootup_announce_oddness(void)
85 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); 92 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
86 if (nr_cpu_ids != NR_CPUS) 93 if (nr_cpu_ids != NR_CPUS)
87 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); 94 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
95#ifdef CONFIG_RCU_BOOST
96 pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
97#endif
88} 98}
89 99
90#ifdef CONFIG_TREE_PREEMPT_RCU 100#ifdef CONFIG_PREEMPT_RCU
91 101
92RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); 102RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
93static struct rcu_state *rcu_state_p = &rcu_preempt_state; 103static struct rcu_state *rcu_state_p = &rcu_preempt_state;
@@ -156,7 +166,7 @@ static void rcu_preempt_qs(void)
156 * 166 *
157 * Caller must disable preemption. 167 * Caller must disable preemption.
158 */ 168 */
159static void rcu_preempt_note_context_switch(int cpu) 169static void rcu_preempt_note_context_switch(void)
160{ 170{
161 struct task_struct *t = current; 171 struct task_struct *t = current;
162 unsigned long flags; 172 unsigned long flags;
@@ -167,7 +177,7 @@ static void rcu_preempt_note_context_switch(int cpu)
167 !t->rcu_read_unlock_special.b.blocked) { 177 !t->rcu_read_unlock_special.b.blocked) {
168 178
169 /* Possibly blocking in an RCU read-side critical section. */ 179 /* Possibly blocking in an RCU read-side critical section. */
170 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); 180 rdp = this_cpu_ptr(rcu_preempt_state.rda);
171 rnp = rdp->mynode; 181 rnp = rdp->mynode;
172 raw_spin_lock_irqsave(&rnp->lock, flags); 182 raw_spin_lock_irqsave(&rnp->lock, flags);
173 smp_mb__after_unlock_lock(); 183 smp_mb__after_unlock_lock();
@@ -415,8 +425,6 @@ void rcu_read_unlock_special(struct task_struct *t)
415 } 425 }
416} 426}
417 427
418#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
419
420/* 428/*
421 * Dump detailed information for all tasks blocking the current RCU 429 * Dump detailed information for all tasks blocking the current RCU
422 * grace period on the specified rcu_node structure. 430 * grace period on the specified rcu_node structure.
@@ -451,14 +459,6 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
451 rcu_print_detail_task_stall_rnp(rnp); 459 rcu_print_detail_task_stall_rnp(rnp);
452} 460}
453 461
454#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
455
456static void rcu_print_detail_task_stall(struct rcu_state *rsp)
457{
458}
459
460#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
461
462#ifdef CONFIG_RCU_CPU_STALL_INFO 462#ifdef CONFIG_RCU_CPU_STALL_INFO
463 463
464static void rcu_print_task_stall_begin(struct rcu_node *rnp) 464static void rcu_print_task_stall_begin(struct rcu_node *rnp)
@@ -621,7 +621,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
621 * 621 *
622 * Caller must disable hard irqs. 622 * Caller must disable hard irqs.
623 */ 623 */
624static void rcu_preempt_check_callbacks(int cpu) 624static void rcu_preempt_check_callbacks(void)
625{ 625{
626 struct task_struct *t = current; 626 struct task_struct *t = current;
627 627
@@ -630,8 +630,8 @@ static void rcu_preempt_check_callbacks(int cpu)
630 return; 630 return;
631 } 631 }
632 if (t->rcu_read_lock_nesting > 0 && 632 if (t->rcu_read_lock_nesting > 0 &&
633 per_cpu(rcu_preempt_data, cpu).qs_pending && 633 __this_cpu_read(rcu_preempt_data.qs_pending) &&
634 !per_cpu(rcu_preempt_data, cpu).passed_quiesce) 634 !__this_cpu_read(rcu_preempt_data.passed_quiesce))
635 t->rcu_read_unlock_special.b.need_qs = true; 635 t->rcu_read_unlock_special.b.need_qs = true;
636} 636}
637 637
@@ -919,7 +919,7 @@ void exit_rcu(void)
919 __rcu_read_unlock(); 919 __rcu_read_unlock();
920} 920}
921 921
922#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 922#else /* #ifdef CONFIG_PREEMPT_RCU */
923 923
924static struct rcu_state *rcu_state_p = &rcu_sched_state; 924static struct rcu_state *rcu_state_p = &rcu_sched_state;
925 925
@@ -945,7 +945,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
945 * Because preemptible RCU does not exist, we never have to check for 945 * Because preemptible RCU does not exist, we never have to check for
946 * CPUs being in quiescent states. 946 * CPUs being in quiescent states.
947 */ 947 */
948static void rcu_preempt_note_context_switch(int cpu) 948static void rcu_preempt_note_context_switch(void)
949{ 949{
950} 950}
951 951
@@ -1017,7 +1017,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1017 * Because preemptible RCU does not exist, it never has any callbacks 1017 * Because preemptible RCU does not exist, it never has any callbacks
1018 * to check. 1018 * to check.
1019 */ 1019 */
1020static void rcu_preempt_check_callbacks(int cpu) 1020static void rcu_preempt_check_callbacks(void)
1021{ 1021{
1022} 1022}
1023 1023
@@ -1070,7 +1070,7 @@ void exit_rcu(void)
1070{ 1070{
1071} 1071}
1072 1072
1073#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 1073#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
1074 1074
1075#ifdef CONFIG_RCU_BOOST 1075#ifdef CONFIG_RCU_BOOST
1076 1076
@@ -1326,7 +1326,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1326 smp_mb__after_unlock_lock(); 1326 smp_mb__after_unlock_lock();
1327 rnp->boost_kthread_task = t; 1327 rnp->boost_kthread_task = t;
1328 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1328 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1329 sp.sched_priority = RCU_BOOST_PRIO; 1329 sp.sched_priority = kthread_prio;
1330 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1330 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1331 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1331 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1332 return 0; 1332 return 0;
@@ -1343,7 +1343,7 @@ static void rcu_cpu_kthread_setup(unsigned int cpu)
1343{ 1343{
1344 struct sched_param sp; 1344 struct sched_param sp;
1345 1345
1346 sp.sched_priority = RCU_KTHREAD_PRIO; 1346 sp.sched_priority = kthread_prio;
1347 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 1347 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1348} 1348}
1349 1349
@@ -1512,10 +1512,10 @@ static void rcu_prepare_kthreads(int cpu)
1512 * any flavor of RCU. 1512 * any flavor of RCU.
1513 */ 1513 */
1514#ifndef CONFIG_RCU_NOCB_CPU_ALL 1514#ifndef CONFIG_RCU_NOCB_CPU_ALL
1515int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 1515int rcu_needs_cpu(unsigned long *delta_jiffies)
1516{ 1516{
1517 *delta_jiffies = ULONG_MAX; 1517 *delta_jiffies = ULONG_MAX;
1518 return rcu_cpu_has_callbacks(cpu, NULL); 1518 return rcu_cpu_has_callbacks(NULL);
1519} 1519}
1520#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 1520#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1521 1521
@@ -1523,7 +1523,7 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1523 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up 1523 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1524 * after it. 1524 * after it.
1525 */ 1525 */
1526static void rcu_cleanup_after_idle(int cpu) 1526static void rcu_cleanup_after_idle(void)
1527{ 1527{
1528} 1528}
1529 1529
@@ -1531,7 +1531,7 @@ static void rcu_cleanup_after_idle(int cpu)
1531 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n, 1531 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1532 * is nothing. 1532 * is nothing.
1533 */ 1533 */
1534static void rcu_prepare_for_idle(int cpu) 1534static void rcu_prepare_for_idle(void)
1535{ 1535{
1536} 1536}
1537 1537
@@ -1624,15 +1624,15 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
1624 * The caller must have disabled interrupts. 1624 * The caller must have disabled interrupts.
1625 */ 1625 */
1626#ifndef CONFIG_RCU_NOCB_CPU_ALL 1626#ifndef CONFIG_RCU_NOCB_CPU_ALL
1627int rcu_needs_cpu(int cpu, unsigned long *dj) 1627int rcu_needs_cpu(unsigned long *dj)
1628{ 1628{
1629 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1629 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1630 1630
1631 /* Snapshot to detect later posting of non-lazy callback. */ 1631 /* Snapshot to detect later posting of non-lazy callback. */
1632 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; 1632 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1633 1633
1634 /* If no callbacks, RCU doesn't need the CPU. */ 1634 /* If no callbacks, RCU doesn't need the CPU. */
1635 if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) { 1635 if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
1636 *dj = ULONG_MAX; 1636 *dj = ULONG_MAX;
1637 return 0; 1637 return 0;
1638 } 1638 }
@@ -1666,12 +1666,12 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
1666 * 1666 *
1667 * The caller must have disabled interrupts. 1667 * The caller must have disabled interrupts.
1668 */ 1668 */
1669static void rcu_prepare_for_idle(int cpu) 1669static void rcu_prepare_for_idle(void)
1670{ 1670{
1671#ifndef CONFIG_RCU_NOCB_CPU_ALL 1671#ifndef CONFIG_RCU_NOCB_CPU_ALL
1672 bool needwake; 1672 bool needwake;
1673 struct rcu_data *rdp; 1673 struct rcu_data *rdp;
1674 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1674 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1675 struct rcu_node *rnp; 1675 struct rcu_node *rnp;
1676 struct rcu_state *rsp; 1676 struct rcu_state *rsp;
1677 int tne; 1677 int tne;
@@ -1679,7 +1679,7 @@ static void rcu_prepare_for_idle(int cpu)
1679 /* Handle nohz enablement switches conservatively. */ 1679 /* Handle nohz enablement switches conservatively. */
1680 tne = ACCESS_ONCE(tick_nohz_active); 1680 tne = ACCESS_ONCE(tick_nohz_active);
1681 if (tne != rdtp->tick_nohz_enabled_snap) { 1681 if (tne != rdtp->tick_nohz_enabled_snap) {
1682 if (rcu_cpu_has_callbacks(cpu, NULL)) 1682 if (rcu_cpu_has_callbacks(NULL))
1683 invoke_rcu_core(); /* force nohz to see update. */ 1683 invoke_rcu_core(); /* force nohz to see update. */
1684 rdtp->tick_nohz_enabled_snap = tne; 1684 rdtp->tick_nohz_enabled_snap = tne;
1685 return; 1685 return;
@@ -1688,7 +1688,7 @@ static void rcu_prepare_for_idle(int cpu)
1688 return; 1688 return;
1689 1689
1690 /* If this is a no-CBs CPU, no callbacks, just return. */ 1690 /* If this is a no-CBs CPU, no callbacks, just return. */
1691 if (rcu_is_nocb_cpu(cpu)) 1691 if (rcu_is_nocb_cpu(smp_processor_id()))
1692 return; 1692 return;
1693 1693
1694 /* 1694 /*
@@ -1712,7 +1712,7 @@ static void rcu_prepare_for_idle(int cpu)
1712 return; 1712 return;
1713 rdtp->last_accelerate = jiffies; 1713 rdtp->last_accelerate = jiffies;
1714 for_each_rcu_flavor(rsp) { 1714 for_each_rcu_flavor(rsp) {
1715 rdp = per_cpu_ptr(rsp->rda, cpu); 1715 rdp = this_cpu_ptr(rsp->rda);
1716 if (!*rdp->nxttail[RCU_DONE_TAIL]) 1716 if (!*rdp->nxttail[RCU_DONE_TAIL])
1717 continue; 1717 continue;
1718 rnp = rdp->mynode; 1718 rnp = rdp->mynode;
@@ -1731,10 +1731,10 @@ static void rcu_prepare_for_idle(int cpu)
1731 * any grace periods that elapsed while the CPU was idle, and if any 1731 * any grace periods that elapsed while the CPU was idle, and if any
1732 * callbacks are now ready to invoke, initiate invocation. 1732 * callbacks are now ready to invoke, initiate invocation.
1733 */ 1733 */
1734static void rcu_cleanup_after_idle(int cpu) 1734static void rcu_cleanup_after_idle(void)
1735{ 1735{
1736#ifndef CONFIG_RCU_NOCB_CPU_ALL 1736#ifndef CONFIG_RCU_NOCB_CPU_ALL
1737 if (rcu_is_nocb_cpu(cpu)) 1737 if (rcu_is_nocb_cpu(smp_processor_id()))
1738 return; 1738 return;
1739 if (rcu_try_advance_all_cbs()) 1739 if (rcu_try_advance_all_cbs())
1740 invoke_rcu_core(); 1740 invoke_rcu_core();
@@ -2573,9 +2573,13 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
2573 rdp->nocb_leader = rdp_spawn; 2573 rdp->nocb_leader = rdp_spawn;
2574 if (rdp_last && rdp != rdp_spawn) 2574 if (rdp_last && rdp != rdp_spawn)
2575 rdp_last->nocb_next_follower = rdp; 2575 rdp_last->nocb_next_follower = rdp;
2576 rdp_last = rdp; 2576 if (rdp == rdp_spawn) {
2577 rdp = rdp->nocb_next_follower; 2577 rdp = rdp->nocb_next_follower;
2578 rdp_last->nocb_next_follower = NULL; 2578 } else {
2579 rdp_last = rdp;
2580 rdp = rdp->nocb_next_follower;
2581 rdp_last->nocb_next_follower = NULL;
2582 }
2579 } while (rdp); 2583 } while (rdp);
2580 rdp_spawn->nocb_next_follower = rdp_old_leader; 2584 rdp_spawn->nocb_next_follower = rdp_old_leader;
2581 } 2585 }
@@ -2761,9 +2765,10 @@ static int full_sysidle_state; /* Current system-idle state. */
2761 * to detect full-system idle states, not RCU quiescent states and grace 2765 * to detect full-system idle states, not RCU quiescent states and grace
2762 * periods. The caller must have disabled interrupts. 2766 * periods. The caller must have disabled interrupts.
2763 */ 2767 */
2764static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) 2768static void rcu_sysidle_enter(int irq)
2765{ 2769{
2766 unsigned long j; 2770 unsigned long j;
2771 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
2767 2772
2768 /* If there are no nohz_full= CPUs, no need to track this. */ 2773 /* If there are no nohz_full= CPUs, no need to track this. */
2769 if (!tick_nohz_full_enabled()) 2774 if (!tick_nohz_full_enabled())
@@ -2832,8 +2837,10 @@ void rcu_sysidle_force_exit(void)
2832 * usermode execution does -not- count as idle here! The caller must 2837 * usermode execution does -not- count as idle here! The caller must
2833 * have disabled interrupts. 2838 * have disabled interrupts.
2834 */ 2839 */
2835static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) 2840static void rcu_sysidle_exit(int irq)
2836{ 2841{
2842 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
2843
2837 /* If there are no nohz_full= CPUs, no need to track this. */ 2844 /* If there are no nohz_full= CPUs, no need to track this. */
2838 if (!tick_nohz_full_enabled()) 2845 if (!tick_nohz_full_enabled())
2839 return; 2846 return;
@@ -3127,11 +3134,11 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
3127 3134
3128#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 3135#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3129 3136
3130static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) 3137static void rcu_sysidle_enter(int irq)
3131{ 3138{
3132} 3139}
3133 3140
3134static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) 3141static void rcu_sysidle_exit(int irq)
3135{ 3142{
3136} 3143}
3137 3144