diff options
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r-- | kernel/rcu/tree_plugin.h | 136 |
1 files changed, 37 insertions, 99 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 962d1d589929..29977ae84e7e 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -116,7 +116,7 @@ static void __init rcu_bootup_announce_oddness(void) | |||
116 | #ifdef CONFIG_TREE_PREEMPT_RCU | 116 | #ifdef CONFIG_TREE_PREEMPT_RCU |
117 | 117 | ||
118 | RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); | 118 | RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); |
119 | static struct rcu_state *rcu_state = &rcu_preempt_state; | 119 | static struct rcu_state *rcu_state_p = &rcu_preempt_state; |
120 | 120 | ||
121 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | 121 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
122 | 122 | ||
@@ -149,15 +149,6 @@ long rcu_batches_completed(void) | |||
149 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 149 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
150 | 150 | ||
151 | /* | 151 | /* |
152 | * Force a quiescent state for preemptible RCU. | ||
153 | */ | ||
154 | void rcu_force_quiescent_state(void) | ||
155 | { | ||
156 | force_quiescent_state(&rcu_preempt_state); | ||
157 | } | ||
158 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | ||
159 | |||
160 | /* | ||
161 | * Record a preemptible-RCU quiescent state for the specified CPU. Note | 152 | * Record a preemptible-RCU quiescent state for the specified CPU. Note |
162 | * that this just means that the task currently running on the CPU is | 153 | * that this just means that the task currently running on the CPU is |
163 | * not in a quiescent state. There might be any number of tasks blocked | 154 | * not in a quiescent state. There might be any number of tasks blocked |
@@ -688,20 +679,6 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
688 | } | 679 | } |
689 | EXPORT_SYMBOL_GPL(call_rcu); | 680 | EXPORT_SYMBOL_GPL(call_rcu); |
690 | 681 | ||
691 | /* | ||
692 | * Queue an RCU callback for lazy invocation after a grace period. | ||
693 | * This will likely be later named something like "call_rcu_lazy()", | ||
694 | * but this change will require some way of tagging the lazy RCU | ||
695 | * callbacks in the list of pending callbacks. Until then, this | ||
696 | * function may only be called from __kfree_rcu(). | ||
697 | */ | ||
698 | void kfree_call_rcu(struct rcu_head *head, | ||
699 | void (*func)(struct rcu_head *rcu)) | ||
700 | { | ||
701 | __call_rcu(head, func, &rcu_preempt_state, -1, 1); | ||
702 | } | ||
703 | EXPORT_SYMBOL_GPL(kfree_call_rcu); | ||
704 | |||
705 | /** | 682 | /** |
706 | * synchronize_rcu - wait until a grace period has elapsed. | 683 | * synchronize_rcu - wait until a grace period has elapsed. |
707 | * | 684 | * |
@@ -970,7 +947,7 @@ void exit_rcu(void) | |||
970 | 947 | ||
971 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 948 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
972 | 949 | ||
973 | static struct rcu_state *rcu_state = &rcu_sched_state; | 950 | static struct rcu_state *rcu_state_p = &rcu_sched_state; |
974 | 951 | ||
975 | /* | 952 | /* |
976 | * Tell them what RCU they are running. | 953 | * Tell them what RCU they are running. |
@@ -991,16 +968,6 @@ long rcu_batches_completed(void) | |||
991 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 968 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
992 | 969 | ||
993 | /* | 970 | /* |
994 | * Force a quiescent state for RCU, which, because there is no preemptible | ||
995 | * RCU, becomes the same as rcu-sched. | ||
996 | */ | ||
997 | void rcu_force_quiescent_state(void) | ||
998 | { | ||
999 | rcu_sched_force_quiescent_state(); | ||
1000 | } | ||
1001 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | ||
1002 | |||
1003 | /* | ||
1004 | * Because preemptible RCU does not exist, we never have to check for | 971 | * Because preemptible RCU does not exist, we never have to check for |
1005 | * CPUs being in quiescent states. | 972 | * CPUs being in quiescent states. |
1006 | */ | 973 | */ |
@@ -1080,22 +1047,6 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
1080 | } | 1047 | } |
1081 | 1048 | ||
1082 | /* | 1049 | /* |
1083 | * Queue an RCU callback for lazy invocation after a grace period. | ||
1084 | * This will likely be later named something like "call_rcu_lazy()", | ||
1085 | * but this change will require some way of tagging the lazy RCU | ||
1086 | * callbacks in the list of pending callbacks. Until then, this | ||
1087 | * function may only be called from __kfree_rcu(). | ||
1088 | * | ||
1089 | * Because there is no preemptible RCU, we use RCU-sched instead. | ||
1090 | */ | ||
1091 | void kfree_call_rcu(struct rcu_head *head, | ||
1092 | void (*func)(struct rcu_head *rcu)) | ||
1093 | { | ||
1094 | __call_rcu(head, func, &rcu_sched_state, -1, 1); | ||
1095 | } | ||
1096 | EXPORT_SYMBOL_GPL(kfree_call_rcu); | ||
1097 | |||
1098 | /* | ||
1099 | * Wait for an rcu-preempt grace period, but make it happen quickly. | 1050 | * Wait for an rcu-preempt grace period, but make it happen quickly. |
1100 | * But because preemptible RCU does not exist, map to rcu-sched. | 1051 | * But because preemptible RCU does not exist, map to rcu-sched. |
1101 | */ | 1052 | */ |
@@ -1517,11 +1468,11 @@ static int __init rcu_spawn_kthreads(void) | |||
1517 | for_each_possible_cpu(cpu) | 1468 | for_each_possible_cpu(cpu) |
1518 | per_cpu(rcu_cpu_has_work, cpu) = 0; | 1469 | per_cpu(rcu_cpu_has_work, cpu) = 0; |
1519 | BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); | 1470 | BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); |
1520 | rnp = rcu_get_root(rcu_state); | 1471 | rnp = rcu_get_root(rcu_state_p); |
1521 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); | 1472 | (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); |
1522 | if (NUM_RCU_NODES > 1) { | 1473 | if (NUM_RCU_NODES > 1) { |
1523 | rcu_for_each_leaf_node(rcu_state, rnp) | 1474 | rcu_for_each_leaf_node(rcu_state_p, rnp) |
1524 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); | 1475 | (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); |
1525 | } | 1476 | } |
1526 | return 0; | 1477 | return 0; |
1527 | } | 1478 | } |
@@ -1529,12 +1480,12 @@ early_initcall(rcu_spawn_kthreads); | |||
1529 | 1480 | ||
1530 | static void rcu_prepare_kthreads(int cpu) | 1481 | static void rcu_prepare_kthreads(int cpu) |
1531 | { | 1482 | { |
1532 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | 1483 | struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); |
1533 | struct rcu_node *rnp = rdp->mynode; | 1484 | struct rcu_node *rnp = rdp->mynode; |
1534 | 1485 | ||
1535 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | 1486 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ |
1536 | if (rcu_scheduler_fully_active) | 1487 | if (rcu_scheduler_fully_active) |
1537 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); | 1488 | (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); |
1538 | } | 1489 | } |
1539 | 1490 | ||
1540 | #else /* #ifdef CONFIG_RCU_BOOST */ | 1491 | #else /* #ifdef CONFIG_RCU_BOOST */ |
@@ -1744,6 +1695,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj) | |||
1744 | static void rcu_prepare_for_idle(int cpu) | 1695 | static void rcu_prepare_for_idle(int cpu) |
1745 | { | 1696 | { |
1746 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | 1697 | #ifndef CONFIG_RCU_NOCB_CPU_ALL |
1698 | bool needwake; | ||
1747 | struct rcu_data *rdp; | 1699 | struct rcu_data *rdp; |
1748 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | 1700 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
1749 | struct rcu_node *rnp; | 1701 | struct rcu_node *rnp; |
@@ -1792,8 +1744,10 @@ static void rcu_prepare_for_idle(int cpu) | |||
1792 | rnp = rdp->mynode; | 1744 | rnp = rdp->mynode; |
1793 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ | 1745 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
1794 | smp_mb__after_unlock_lock(); | 1746 | smp_mb__after_unlock_lock(); |
1795 | rcu_accelerate_cbs(rsp, rnp, rdp); | 1747 | needwake = rcu_accelerate_cbs(rsp, rnp, rdp); |
1796 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1748 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1749 | if (needwake) | ||
1750 | rcu_gp_kthread_wake(rsp); | ||
1797 | } | 1751 | } |
1798 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ | 1752 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ |
1799 | } | 1753 | } |
@@ -1855,7 +1809,7 @@ static void rcu_oom_notify_cpu(void *unused) | |||
1855 | struct rcu_data *rdp; | 1809 | struct rcu_data *rdp; |
1856 | 1810 | ||
1857 | for_each_rcu_flavor(rsp) { | 1811 | for_each_rcu_flavor(rsp) { |
1858 | rdp = __this_cpu_ptr(rsp->rda); | 1812 | rdp = raw_cpu_ptr(rsp->rda); |
1859 | if (rdp->qlen_lazy != 0) { | 1813 | if (rdp->qlen_lazy != 0) { |
1860 | atomic_inc(&oom_callback_count); | 1814 | atomic_inc(&oom_callback_count); |
1861 | rsp->call(&rdp->oom_head, rcu_oom_callback); | 1815 | rsp->call(&rdp->oom_head, rcu_oom_callback); |
@@ -1997,7 +1951,7 @@ static void increment_cpu_stall_ticks(void) | |||
1997 | struct rcu_state *rsp; | 1951 | struct rcu_state *rsp; |
1998 | 1952 | ||
1999 | for_each_rcu_flavor(rsp) | 1953 | for_each_rcu_flavor(rsp) |
2000 | __this_cpu_ptr(rsp->rda)->ticks_this_gp++; | 1954 | raw_cpu_inc(rsp->rda->ticks_this_gp); |
2001 | } | 1955 | } |
2002 | 1956 | ||
2003 | #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ | 1957 | #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ |
@@ -2068,19 +2022,6 @@ static int __init parse_rcu_nocb_poll(char *arg) | |||
2068 | early_param("rcu_nocb_poll", parse_rcu_nocb_poll); | 2022 | early_param("rcu_nocb_poll", parse_rcu_nocb_poll); |
2069 | 2023 | ||
2070 | /* | 2024 | /* |
2071 | * Do any no-CBs CPUs need another grace period? | ||
2072 | * | ||
2073 | * Interrupts must be disabled. If the caller does not hold the root | ||
2074 | * rnp_node structure's ->lock, the results are advisory only. | ||
2075 | */ | ||
2076 | static int rcu_nocb_needs_gp(struct rcu_state *rsp) | ||
2077 | { | ||
2078 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
2079 | |||
2080 | return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1]; | ||
2081 | } | ||
2082 | |||
2083 | /* | ||
2084 | * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended | 2025 | * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended |
2085 | * grace period. | 2026 | * grace period. |
2086 | */ | 2027 | */ |
@@ -2109,7 +2050,7 @@ static void rcu_init_one_nocb(struct rcu_node *rnp) | |||
2109 | } | 2050 | } |
2110 | 2051 | ||
2111 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | 2052 | #ifndef CONFIG_RCU_NOCB_CPU_ALL |
2112 | /* Is the specified CPU a no-CPUs CPU? */ | 2053 | /* Is the specified CPU a no-CBs CPU? */ |
2113 | bool rcu_is_nocb_cpu(int cpu) | 2054 | bool rcu_is_nocb_cpu(int cpu) |
2114 | { | 2055 | { |
2115 | if (have_rcu_nocb_mask) | 2056 | if (have_rcu_nocb_mask) |
@@ -2243,12 +2184,15 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) | |||
2243 | unsigned long c; | 2184 | unsigned long c; |
2244 | bool d; | 2185 | bool d; |
2245 | unsigned long flags; | 2186 | unsigned long flags; |
2187 | bool needwake; | ||
2246 | struct rcu_node *rnp = rdp->mynode; | 2188 | struct rcu_node *rnp = rdp->mynode; |
2247 | 2189 | ||
2248 | raw_spin_lock_irqsave(&rnp->lock, flags); | 2190 | raw_spin_lock_irqsave(&rnp->lock, flags); |
2249 | smp_mb__after_unlock_lock(); | 2191 | smp_mb__after_unlock_lock(); |
2250 | c = rcu_start_future_gp(rnp, rdp); | 2192 | needwake = rcu_start_future_gp(rnp, rdp, &c); |
2251 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 2193 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
2194 | if (needwake) | ||
2195 | rcu_gp_kthread_wake(rdp->rsp); | ||
2252 | 2196 | ||
2253 | /* | 2197 | /* |
2254 | * Wait for the grace period. Do so interruptibly to avoid messing | 2198 | * Wait for the grace period. Do so interruptibly to avoid messing |
@@ -2402,11 +2346,6 @@ static bool init_nocb_callback_list(struct rcu_data *rdp) | |||
2402 | 2346 | ||
2403 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ | 2347 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
2404 | 2348 | ||
2405 | static int rcu_nocb_needs_gp(struct rcu_state *rsp) | ||
2406 | { | ||
2407 | return 0; | ||
2408 | } | ||
2409 | |||
2410 | static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) | 2349 | static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) |
2411 | { | 2350 | { |
2412 | } | 2351 | } |
@@ -2657,20 +2596,6 @@ static bool is_sysidle_rcu_state(struct rcu_state *rsp) | |||
2657 | } | 2596 | } |
2658 | 2597 | ||
2659 | /* | 2598 | /* |
2660 | * Bind the grace-period kthread for the sysidle flavor of RCU to the | ||
2661 | * timekeeping CPU. | ||
2662 | */ | ||
2663 | static void rcu_bind_gp_kthread(void) | ||
2664 | { | ||
2665 | int cpu = ACCESS_ONCE(tick_do_timer_cpu); | ||
2666 | |||
2667 | if (cpu < 0 || cpu >= nr_cpu_ids) | ||
2668 | return; | ||
2669 | if (raw_smp_processor_id() != cpu) | ||
2670 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
2671 | } | ||
2672 | |||
2673 | /* | ||
2674 | * Return a delay in jiffies based on the number of CPUs, rcu_node | 2599 | * Return a delay in jiffies based on the number of CPUs, rcu_node |
2675 | * leaf fanout, and jiffies tick rate. The idea is to allow larger | 2600 | * leaf fanout, and jiffies tick rate. The idea is to allow larger |
2676 | * systems more time to transition to full-idle state in order to | 2601 | * systems more time to transition to full-idle state in order to |
@@ -2734,7 +2659,8 @@ static void rcu_sysidle(unsigned long j) | |||
2734 | static void rcu_sysidle_cancel(void) | 2659 | static void rcu_sysidle_cancel(void) |
2735 | { | 2660 | { |
2736 | smp_mb(); | 2661 | smp_mb(); |
2737 | ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT; | 2662 | if (full_sysidle_state > RCU_SYSIDLE_SHORT) |
2663 | ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT; | ||
2738 | } | 2664 | } |
2739 | 2665 | ||
2740 | /* | 2666 | /* |
@@ -2880,10 +2806,6 @@ static bool is_sysidle_rcu_state(struct rcu_state *rsp) | |||
2880 | return false; | 2806 | return false; |
2881 | } | 2807 | } |
2882 | 2808 | ||
2883 | static void rcu_bind_gp_kthread(void) | ||
2884 | { | ||
2885 | } | ||
2886 | |||
2887 | static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, | 2809 | static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, |
2888 | unsigned long maxj) | 2810 | unsigned long maxj) |
2889 | { | 2811 | { |
@@ -2914,3 +2836,19 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp) | |||
2914 | #endif /* #ifdef CONFIG_NO_HZ_FULL */ | 2836 | #endif /* #ifdef CONFIG_NO_HZ_FULL */ |
2915 | return 0; | 2837 | return 0; |
2916 | } | 2838 | } |
2839 | |||
2840 | /* | ||
2841 | * Bind the grace-period kthread for the sysidle flavor of RCU to the | ||
2842 | * timekeeping CPU. | ||
2843 | */ | ||
2844 | static void rcu_bind_gp_kthread(void) | ||
2845 | { | ||
2846 | #ifdef CONFIG_NO_HZ_FULL | ||
2847 | int cpu = ACCESS_ONCE(tick_do_timer_cpu); | ||
2848 | |||
2849 | if (cpu < 0 || cpu >= nr_cpu_ids) | ||
2850 | return; | ||
2851 | if (raw_smp_processor_id() != cpu) | ||
2852 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
2853 | #endif /* #ifdef CONFIG_NO_HZ_FULL */ | ||
2854 | } | ||