summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/rcutree.c5
-rw-r--r--kernel/rcutree.h4
-rw-r--r--kernel/rcutree_plugin.h156
3 files changed, 132 insertions, 33 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 9888a0ad2d4e..b1711c48a7ec 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -365,6 +365,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
365 current->pid, current->comm, 365 current->pid, current->comm,
366 idle->pid, idle->comm); /* must be idle task! */ 366 idle->pid, idle->comm); /* must be idle task! */
367 } 367 }
368 rcu_prepare_for_idle(smp_processor_id());
368 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 369 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
369 smp_mb__before_atomic_inc(); /* See above. */ 370 smp_mb__before_atomic_inc(); /* See above. */
370 atomic_inc(&rdtp->dynticks); 371 atomic_inc(&rdtp->dynticks);
@@ -1085,6 +1086,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
1085 * callbacks are waiting on the grace period that just now 1086 * callbacks are waiting on the grace period that just now
1086 * completed. 1087 * completed.
1087 */ 1088 */
1089 rcu_schedule_wake_gp_end();
1088 if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) { 1090 if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) {
1089 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1091 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1090 1092
@@ -1670,6 +1672,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
1670 &__get_cpu_var(rcu_sched_data)); 1672 &__get_cpu_var(rcu_sched_data));
1671 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); 1673 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1672 rcu_preempt_process_callbacks(); 1674 rcu_preempt_process_callbacks();
1675 rcu_wake_cpus_for_gp_end();
1673 trace_rcu_utilization("End RCU core"); 1676 trace_rcu_utilization("End RCU core");
1674} 1677}
1675 1678
@@ -1923,7 +1926,7 @@ static int rcu_pending(int cpu)
1923 * by the current CPU, even if none need be done immediately, returning 1926 * by the current CPU, even if none need be done immediately, returning
1924 * 1 if so. 1927 * 1 if so.
1925 */ 1928 */
1926static int rcu_needs_cpu_quick_check(int cpu) 1929static int rcu_cpu_has_callbacks(int cpu)
1927{ 1930{
1928 /* RCU callbacks either ready or pending? */ 1931 /* RCU callbacks either ready or pending? */
1929 return per_cpu(rcu_sched_data, cpu).nxtlist || 1932 return per_cpu(rcu_sched_data, cpu).nxtlist ||
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index fd2f87db2ab1..ea32405177c9 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -88,6 +88,7 @@ struct rcu_dynticks {
88 /* Process level is worth LLONG_MAX/2. */ 88 /* Process level is worth LLONG_MAX/2. */
89 int dynticks_nmi_nesting; /* Track NMI nesting level. */ 89 int dynticks_nmi_nesting; /* Track NMI nesting level. */
90 atomic_t dynticks; /* Even value for idle, else odd. */ 90 atomic_t dynticks; /* Even value for idle, else odd. */
91 int wake_gp_end; /* A GP ended, need to wake up CPUs. */
91}; 92};
92 93
93/* RCU's kthread states for tracing. */ 94/* RCU's kthread states for tracing. */
@@ -467,5 +468,8 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
467#endif /* #ifdef CONFIG_RCU_BOOST */ 468#endif /* #ifdef CONFIG_RCU_BOOST */
468static void rcu_cpu_kthread_setrt(int cpu, int to_rt); 469static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
469static void __cpuinit rcu_prepare_kthreads(int cpu); 470static void __cpuinit rcu_prepare_kthreads(int cpu);
471static void rcu_prepare_for_idle(int cpu);
472static void rcu_wake_cpus_for_gp_end(void);
473static void rcu_schedule_wake_gp_end(void);
470 474
471#endif /* #ifndef RCU_TREE_NONCORE */ 475#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 7a7961feeecf..b70ca8cc52e1 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1953,7 +1953,31 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1953 */ 1953 */
1954int rcu_needs_cpu(int cpu) 1954int rcu_needs_cpu(int cpu)
1955{ 1955{
1956 return rcu_needs_cpu_quick_check(cpu); 1956 return rcu_cpu_has_callbacks(cpu);
1957}
1958
1959/*
1960 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=y,
1961 * is nothing.
1962 */
1963static void rcu_prepare_for_idle(int cpu)
1964{
1965}
1966
1967/*
1968 * CPUs are never putting themselves to sleep with callbacks pending,
1969 * so there is no need to awaken them.
1970 */
1971static void rcu_wake_cpus_for_gp_end(void)
1972{
1973}
1974
1975/*
1976 * CPUs are never putting themselves to sleep with callbacks pending,
1977 * so there is no need to schedule the act of awakening them.
1978 */
1979static void rcu_schedule_wake_gp_end(void)
1980{
1957} 1981}
1958 1982
1959#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1983#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
@@ -1961,47 +1985,56 @@ int rcu_needs_cpu(int cpu)
1961#define RCU_NEEDS_CPU_FLUSHES 5 1985#define RCU_NEEDS_CPU_FLUSHES 5
1962static DEFINE_PER_CPU(int, rcu_dyntick_drain); 1986static DEFINE_PER_CPU(int, rcu_dyntick_drain);
1963static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); 1987static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1988static DEFINE_PER_CPU(bool, rcu_awake_at_gp_end);
1964 1989
1965/* 1990/*
1966 * Check to see if any future RCU-related work will need to be done 1991 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1967 * by the current CPU, even if none need be done immediately, returning 1992 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1968 * 1 if so. This function is part of the RCU implementation; it is -not- 1993 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1969 * an exported member of the RCU API. 1994 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
1995 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
1996 * it is better to incur scheduling-clock interrupts than to spin
1997 * continuously for the same time duration!
1998 */
1999int rcu_needs_cpu(int cpu)
2000{
2001 /* If no callbacks, RCU doesn't need the CPU. */
2002 if (!rcu_cpu_has_callbacks(cpu))
2003 return 0;
2004 /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
2005 return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
2006}
2007
2008/*
2009 * Check to see if any RCU-related work can be done by the current CPU,
2010 * and if so, schedule a softirq to get it done. This function is part
2011 * of the RCU implementation; it is -not- an exported member of the RCU API.
1970 * 2012 *
1971 * Because we are not supporting preemptible RCU, attempt to accelerate 2013 * The idea is for the current CPU to clear out all work required by the
1972 * any current grace periods so that RCU no longer needs this CPU, but 2014 * RCU core for the current grace period, so that this CPU can be permitted
1973 * only if all other CPUs are already in dynticks-idle mode. This will 2015 * to enter dyntick-idle mode. In some cases, it will need to be awakened
1974 * allow the CPU cores to be powered down immediately, as opposed to after 2016 * at the end of the grace period by whatever CPU ends the grace period.
1975 * waiting many milliseconds for grace periods to elapse. 2017 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
2018 * number of wakeups by a modest integer factor.
1976 * 2019 *
1977 * Because it is not legal to invoke rcu_process_callbacks() with irqs 2020 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1978 * disabled, we do one pass of force_quiescent_state(), then do a 2021 * disabled, we do one pass of force_quiescent_state(), then do a
1979 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked 2022 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1980 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. 2023 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
2024 *
2025 * The caller must have disabled interrupts.
1981 */ 2026 */
1982int rcu_needs_cpu(int cpu) 2027static void rcu_prepare_for_idle(int cpu)
1983{ 2028{
1984 int c = 0; 2029 int c = 0;
1985 int snap;
1986 int thatcpu;
1987 2030
1988 /* Check for being in the holdoff period. */ 2031 /* If no callbacks or in the holdoff period, enter dyntick-idle. */
1989 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) 2032 if (!rcu_cpu_has_callbacks(cpu)) {
1990 return rcu_needs_cpu_quick_check(cpu); 2033 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
1991 2034 return;
1992 /* Don't bother unless we are the last non-dyntick-idle CPU. */
1993 for_each_online_cpu(thatcpu) {
1994 if (thatcpu == cpu)
1995 continue;
1996 snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
1997 thatcpu).dynticks);
1998 smp_mb(); /* Order sampling of snap with end of grace period. */
1999 if ((snap & 0x1) != 0) {
2000 per_cpu(rcu_dyntick_drain, cpu) = 0;
2001 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2002 return rcu_needs_cpu_quick_check(cpu);
2003 }
2004 } 2035 }
2036 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
2037 return;
2005 2038
2006 /* Check and update the rcu_dyntick_drain sequencing. */ 2039 /* Check and update the rcu_dyntick_drain sequencing. */
2007 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2040 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
@@ -2010,10 +2043,25 @@ int rcu_needs_cpu(int cpu)
2010 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2043 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2011 /* We have hit the limit, so time to give up. */ 2044 /* We have hit the limit, so time to give up. */
2012 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2045 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2013 return rcu_needs_cpu_quick_check(cpu); 2046 if (!rcu_pending(cpu)) {
2047 per_cpu(rcu_awake_at_gp_end, cpu) = 1;
2048 return; /* Nothing to do immediately. */
2049 }
2050 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2051 return;
2014 } 2052 }
2015 2053
2016 /* Do one step pushing remaining RCU callbacks through. */ 2054 /*
2055 * Do one step of pushing the remaining RCU callbacks through
2056 * the RCU core state machine.
2057 */
2058#ifdef CONFIG_TREE_PREEMPT_RCU
2059 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2060 rcu_preempt_qs(cpu);
2061 force_quiescent_state(&rcu_preempt_state, 0);
2062 c = c || per_cpu(rcu_preempt_data, cpu).nxtlist;
2063 }
2064#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2017 if (per_cpu(rcu_sched_data, cpu).nxtlist) { 2065 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2018 rcu_sched_qs(cpu); 2066 rcu_sched_qs(cpu);
2019 force_quiescent_state(&rcu_sched_state, 0); 2067 force_quiescent_state(&rcu_sched_state, 0);
@@ -2028,7 +2076,51 @@ int rcu_needs_cpu(int cpu)
2028 /* If RCU callbacks are still pending, RCU still needs this CPU. */ 2076 /* If RCU callbacks are still pending, RCU still needs this CPU. */
2029 if (c) 2077 if (c)
2030 invoke_rcu_core(); 2078 invoke_rcu_core();
2031 return c;
2032} 2079}
2033 2080
2081/*
2082 * Wake up a CPU by invoking the RCU core. Intended for use by
2083 * rcu_wake_cpus_for_gp_end(), which passes this function to
2084 * smp_call_function_single().
2085 */
2086static void rcu_wake_cpu(void *unused)
2087{
2088 invoke_rcu_core();
2089}
2090
2091/*
2092 * If an RCU grace period ended recently, scan the rcu_awake_at_gp_end
2093 * per-CPU variables, and wake up any CPUs that requested a wakeup.
2094 */
2095static void rcu_wake_cpus_for_gp_end(void)
2096{
2097 int cpu;
2098 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
2099
2100 if (!rdtp->wake_gp_end)
2101 return;
2102 rdtp->wake_gp_end = 0;
2103 for_each_online_cpu(cpu) {
2104 if (per_cpu(rcu_awake_at_gp_end, cpu)) {
2105 per_cpu(rcu_awake_at_gp_end, cpu) = 0;
2106 smp_call_function_single(cpu, rcu_wake_cpu, NULL, 0);
2107 }
2108 }
2109}
2110
2111/*
2112 * A grace period has just ended, and so we will need to awaken CPUs
2113 * that now have work to do. But we cannot send IPIs with interrupts
2114 * disabled, so just set a flag so that this will happen upon exit
2115 * from RCU core processing.
2116 */
2117static void rcu_schedule_wake_gp_end(void)
2118{
2119 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
2120
2121 rdtp->wake_gp_end = 1;
2122}
2123
2124/* @@@ need tracing as well. */
2125
2034#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 2126#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */