diff options
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 229 |
1 files changed, 146 insertions, 83 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index c023464816be..3e4899459f3d 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -969,22 +969,6 @@ static void __init __rcu_init_preempt(void) | |||
969 | rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); | 969 | rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); |
970 | } | 970 | } |
971 | 971 | ||
972 | /* | ||
973 | * Check for a task exiting while in a preemptible-RCU read-side | ||
974 | * critical section, clean up if so. No need to issue warnings, | ||
975 | * as debug_check_no_locks_held() already does this if lockdep | ||
976 | * is enabled. | ||
977 | */ | ||
978 | void exit_rcu(void) | ||
979 | { | ||
980 | struct task_struct *t = current; | ||
981 | |||
982 | if (t->rcu_read_lock_nesting == 0) | ||
983 | return; | ||
984 | t->rcu_read_lock_nesting = 1; | ||
985 | __rcu_read_unlock(); | ||
986 | } | ||
987 | |||
988 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 972 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
989 | 973 | ||
990 | static struct rcu_state *rcu_state = &rcu_sched_state; | 974 | static struct rcu_state *rcu_state = &rcu_sched_state; |
@@ -1910,8 +1894,9 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) | |||
1910 | * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs | 1894 | * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs |
1911 | * any flavor of RCU. | 1895 | * any flavor of RCU. |
1912 | */ | 1896 | */ |
1913 | int rcu_needs_cpu(int cpu) | 1897 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
1914 | { | 1898 | { |
1899 | *delta_jiffies = ULONG_MAX; | ||
1915 | return rcu_cpu_has_callbacks(cpu); | 1900 | return rcu_cpu_has_callbacks(cpu); |
1916 | } | 1901 | } |
1917 | 1902 | ||
@@ -1938,6 +1923,14 @@ static void rcu_prepare_for_idle(int cpu) | |||
1938 | { | 1923 | { |
1939 | } | 1924 | } |
1940 | 1925 | ||
1926 | /* | ||
1927 | * Don't bother keeping a running count of the number of RCU callbacks | ||
1928 | * posted because CONFIG_RCU_FAST_NO_HZ=n. | ||
1929 | */ | ||
1930 | static void rcu_idle_count_callbacks_posted(void) | ||
1931 | { | ||
1932 | } | ||
1933 | |||
1941 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 1934 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
1942 | 1935 | ||
1943 | /* | 1936 | /* |
@@ -1978,30 +1971,6 @@ static void rcu_prepare_for_idle(int cpu) | |||
1978 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ | 1971 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ |
1979 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ | 1972 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ |
1980 | 1973 | ||
1981 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); | ||
1982 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | ||
1983 | static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer); | ||
1984 | static ktime_t rcu_idle_gp_wait; /* If some non-lazy callbacks. */ | ||
1985 | static ktime_t rcu_idle_lazy_gp_wait; /* If only lazy callbacks. */ | ||
1986 | |||
1987 | /* | ||
1988 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no | ||
1989 | * callbacks on this CPU, (2) this CPU has not yet attempted to enter | ||
1990 | * dyntick-idle mode, or (3) this CPU is in the process of attempting to | ||
1991 | * enter dyntick-idle mode. Otherwise, if we have recently tried and failed | ||
1992 | * to enter dyntick-idle mode, we refuse to try to enter it. After all, | ||
1993 | * it is better to incur scheduling-clock interrupts than to spin | ||
1994 | * continuously for the same time duration! | ||
1995 | */ | ||
1996 | int rcu_needs_cpu(int cpu) | ||
1997 | { | ||
1998 | /* If no callbacks, RCU doesn't need the CPU. */ | ||
1999 | if (!rcu_cpu_has_callbacks(cpu)) | ||
2000 | return 0; | ||
2001 | /* Otherwise, RCU needs the CPU only if it recently tried and failed. */ | ||
2002 | return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies; | ||
2003 | } | ||
2004 | |||
2005 | /* | 1974 | /* |
2006 | * Does the specified flavor of RCU have non-lazy callbacks pending on | 1975 | * Does the specified flavor of RCU have non-lazy callbacks pending on |
2007 | * the specified CPU? Both RCU flavor and CPU are specified by the | 1976 | * the specified CPU? Both RCU flavor and CPU are specified by the |
@@ -2045,16 +2014,75 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu) | |||
2045 | } | 2014 | } |
2046 | 2015 | ||
2047 | /* | 2016 | /* |
2017 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no | ||
2018 | * callbacks on this CPU, (2) this CPU has not yet attempted to enter | ||
2019 | * dyntick-idle mode, or (3) this CPU is in the process of attempting to | ||
2020 | * enter dyntick-idle mode. Otherwise, if we have recently tried and failed | ||
2021 | * to enter dyntick-idle mode, we refuse to try to enter it. After all, | ||
2022 | * it is better to incur scheduling-clock interrupts than to spin | ||
2023 | * continuously for the same time duration! | ||
2024 | * | ||
2025 | * The delta_jiffies argument is used to store the time when RCU is | ||
2026 | * going to need the CPU again if it still has callbacks. The reason | ||
2027 | * for this is that rcu_prepare_for_idle() might need to post a timer, | ||
2028 | * but if so, it will do so after tick_nohz_stop_sched_tick() has set | ||
2029 | * the wakeup time for this CPU. This means that RCU's timer can be | ||
2030 | * delayed until the wakeup time, which defeats the purpose of posting | ||
2031 | * a timer. | ||
2032 | */ | ||
2033 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) | ||
2034 | { | ||
2035 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | ||
2036 | |||
2037 | /* Flag a new idle sojourn to the idle-entry state machine. */ | ||
2038 | rdtp->idle_first_pass = 1; | ||
2039 | /* If no callbacks, RCU doesn't need the CPU. */ | ||
2040 | if (!rcu_cpu_has_callbacks(cpu)) { | ||
2041 | *delta_jiffies = ULONG_MAX; | ||
2042 | return 0; | ||
2043 | } | ||
2044 | if (rdtp->dyntick_holdoff == jiffies) { | ||
2045 | /* RCU recently tried and failed, so don't try again. */ | ||
2046 | *delta_jiffies = 1; | ||
2047 | return 1; | ||
2048 | } | ||
2049 | /* Set up for the possibility that RCU will post a timer. */ | ||
2050 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | ||
2051 | *delta_jiffies = RCU_IDLE_GP_DELAY; | ||
2052 | else | ||
2053 | *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY; | ||
2054 | return 0; | ||
2055 | } | ||
2056 | |||
2057 | /* | ||
2058 | * Handler for smp_call_function_single(). The only point of this | ||
2059 | * handler is to wake the CPU up, so the handler does only tracing. | ||
2060 | */ | ||
2061 | void rcu_idle_demigrate(void *unused) | ||
2062 | { | ||
2063 | trace_rcu_prep_idle("Demigrate"); | ||
2064 | } | ||
2065 | |||
2066 | /* | ||
2048 | * Timer handler used to force CPU to start pushing its remaining RCU | 2067 | * Timer handler used to force CPU to start pushing its remaining RCU |
2049 | * callbacks in the case where it entered dyntick-idle mode with callbacks | 2068 | * callbacks in the case where it entered dyntick-idle mode with callbacks |
2050 | * pending. The hander doesn't really need to do anything because the | 2069 | * pending. The hander doesn't really need to do anything because the |
2051 | * real work is done upon re-entry to idle, or by the next scheduling-clock | 2070 | * real work is done upon re-entry to idle, or by the next scheduling-clock |
2052 | * interrupt should idle not be re-entered. | 2071 | * interrupt should idle not be re-entered. |
2072 | * | ||
2073 | * One special case: the timer gets migrated without awakening the CPU | ||
2074 | * on which the timer was scheduled on. In this case, we must wake up | ||
2075 | * that CPU. We do so with smp_call_function_single(). | ||
2053 | */ | 2076 | */ |
2054 | static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp) | 2077 | static void rcu_idle_gp_timer_func(unsigned long cpu_in) |
2055 | { | 2078 | { |
2079 | int cpu = (int)cpu_in; | ||
2080 | |||
2056 | trace_rcu_prep_idle("Timer"); | 2081 | trace_rcu_prep_idle("Timer"); |
2057 | return HRTIMER_NORESTART; | 2082 | if (cpu != smp_processor_id()) |
2083 | smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0); | ||
2084 | else | ||
2085 | WARN_ON_ONCE(1); /* Getting here can hang the system... */ | ||
2058 | } | 2086 | } |
2059 | 2087 | ||
2060 | /* | 2088 | /* |
@@ -2062,29 +2090,25 @@ static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp) | |||
2062 | */ | 2090 | */ |
2063 | static void rcu_prepare_for_idle_init(int cpu) | 2091 | static void rcu_prepare_for_idle_init(int cpu) |
2064 | { | 2092 | { |
2065 | static int firsttime = 1; | 2093 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2066 | struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu); | ||
2067 | 2094 | ||
2068 | hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 2095 | rdtp->dyntick_holdoff = jiffies - 1; |
2069 | hrtp->function = rcu_idle_gp_timer_func; | 2096 | setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu); |
2070 | if (firsttime) { | 2097 | rdtp->idle_gp_timer_expires = jiffies - 1; |
2071 | unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY); | 2098 | rdtp->idle_first_pass = 1; |
2072 | |||
2073 | rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000); | ||
2074 | upj = jiffies_to_usecs(RCU_IDLE_LAZY_GP_DELAY); | ||
2075 | rcu_idle_lazy_gp_wait = ns_to_ktime(upj * (u64)1000); | ||
2076 | firsttime = 0; | ||
2077 | } | ||
2078 | } | 2099 | } |
2079 | 2100 | ||
2080 | /* | 2101 | /* |
2081 | * Clean up for exit from idle. Because we are exiting from idle, there | 2102 | * Clean up for exit from idle. Because we are exiting from idle, there |
2082 | * is no longer any point to rcu_idle_gp_timer, so cancel it. This will | 2103 | * is no longer any point to ->idle_gp_timer, so cancel it. This will |
2083 | * do nothing if this timer is not active, so just cancel it unconditionally. | 2104 | * do nothing if this timer is not active, so just cancel it unconditionally. |
2084 | */ | 2105 | */ |
2085 | static void rcu_cleanup_after_idle(int cpu) | 2106 | static void rcu_cleanup_after_idle(int cpu) |
2086 | { | 2107 | { |
2087 | hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu)); | 2108 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2109 | |||
2110 | del_timer(&rdtp->idle_gp_timer); | ||
2111 | trace_rcu_prep_idle("Cleanup after idle"); | ||
2088 | } | 2112 | } |
2089 | 2113 | ||
2090 | /* | 2114 | /* |
@@ -2102,19 +2126,41 @@ static void rcu_cleanup_after_idle(int cpu) | |||
2102 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | 2126 | * Because it is not legal to invoke rcu_process_callbacks() with irqs |
2103 | * disabled, we do one pass of force_quiescent_state(), then do a | 2127 | * disabled, we do one pass of force_quiescent_state(), then do a |
2104 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked | 2128 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked |
2105 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. | 2129 | * later. The ->dyntick_drain field controls the sequencing. |
2106 | * | 2130 | * |
2107 | * The caller must have disabled interrupts. | 2131 | * The caller must have disabled interrupts. |
2108 | */ | 2132 | */ |
2109 | static void rcu_prepare_for_idle(int cpu) | 2133 | static void rcu_prepare_for_idle(int cpu) |
2110 | { | 2134 | { |
2135 | struct timer_list *tp; | ||
2136 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | ||
2137 | |||
2138 | /* | ||
2139 | * If this is an idle re-entry, for example, due to use of | ||
2140 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle | ||
2141 | * loop, then don't take any state-machine actions, unless the | ||
2142 | * momentary exit from idle queued additional non-lazy callbacks. | ||
2143 | * Instead, repost the ->idle_gp_timer if this CPU has callbacks | ||
2144 | * pending. | ||
2145 | */ | ||
2146 | if (!rdtp->idle_first_pass && | ||
2147 | (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) { | ||
2148 | if (rcu_cpu_has_callbacks(cpu)) { | ||
2149 | tp = &rdtp->idle_gp_timer; | ||
2150 | mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); | ||
2151 | } | ||
2152 | return; | ||
2153 | } | ||
2154 | rdtp->idle_first_pass = 0; | ||
2155 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1; | ||
2156 | |||
2111 | /* | 2157 | /* |
2112 | * If there are no callbacks on this CPU, enter dyntick-idle mode. | 2158 | * If there are no callbacks on this CPU, enter dyntick-idle mode. |
2113 | * Also reset state to avoid prejudicing later attempts. | 2159 | * Also reset state to avoid prejudicing later attempts. |
2114 | */ | 2160 | */ |
2115 | if (!rcu_cpu_has_callbacks(cpu)) { | 2161 | if (!rcu_cpu_has_callbacks(cpu)) { |
2116 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 2162 | rdtp->dyntick_holdoff = jiffies - 1; |
2117 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2163 | rdtp->dyntick_drain = 0; |
2118 | trace_rcu_prep_idle("No callbacks"); | 2164 | trace_rcu_prep_idle("No callbacks"); |
2119 | return; | 2165 | return; |
2120 | } | 2166 | } |
@@ -2123,32 +2169,37 @@ static void rcu_prepare_for_idle(int cpu) | |||
2123 | * If in holdoff mode, just return. We will presumably have | 2169 | * If in holdoff mode, just return. We will presumably have |
2124 | * refrained from disabling the scheduling-clock tick. | 2170 | * refrained from disabling the scheduling-clock tick. |
2125 | */ | 2171 | */ |
2126 | if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) { | 2172 | if (rdtp->dyntick_holdoff == jiffies) { |
2127 | trace_rcu_prep_idle("In holdoff"); | 2173 | trace_rcu_prep_idle("In holdoff"); |
2128 | return; | 2174 | return; |
2129 | } | 2175 | } |
2130 | 2176 | ||
2131 | /* Check and update the rcu_dyntick_drain sequencing. */ | 2177 | /* Check and update the ->dyntick_drain sequencing. */ |
2132 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2178 | if (rdtp->dyntick_drain <= 0) { |
2133 | /* First time through, initialize the counter. */ | 2179 | /* First time through, initialize the counter. */ |
2134 | per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; | 2180 | rdtp->dyntick_drain = RCU_IDLE_FLUSHES; |
2135 | } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && | 2181 | } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES && |
2136 | !rcu_pending(cpu) && | 2182 | !rcu_pending(cpu) && |
2137 | !local_softirq_pending()) { | 2183 | !local_softirq_pending()) { |
2138 | /* Can we go dyntick-idle despite still having callbacks? */ | 2184 | /* Can we go dyntick-idle despite still having callbacks? */ |
2139 | trace_rcu_prep_idle("Dyntick with callbacks"); | 2185 | rdtp->dyntick_drain = 0; |
2140 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2186 | rdtp->dyntick_holdoff = jiffies; |
2141 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 2187 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { |
2142 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | 2188 | trace_rcu_prep_idle("Dyntick with callbacks"); |
2143 | hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu), | 2189 | rdtp->idle_gp_timer_expires = |
2144 | rcu_idle_gp_wait, HRTIMER_MODE_REL); | 2190 | jiffies + RCU_IDLE_GP_DELAY; |
2145 | else | 2191 | } else { |
2146 | hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu), | 2192 | rdtp->idle_gp_timer_expires = |
2147 | rcu_idle_lazy_gp_wait, HRTIMER_MODE_REL); | 2193 | jiffies + RCU_IDLE_LAZY_GP_DELAY; |
2194 | trace_rcu_prep_idle("Dyntick with lazy callbacks"); | ||
2195 | } | ||
2196 | tp = &rdtp->idle_gp_timer; | ||
2197 | mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); | ||
2198 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; | ||
2148 | return; /* Nothing more to do immediately. */ | 2199 | return; /* Nothing more to do immediately. */ |
2149 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2200 | } else if (--(rdtp->dyntick_drain) <= 0) { |
2150 | /* We have hit the limit, so time to give up. */ | 2201 | /* We have hit the limit, so time to give up. */ |
2151 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 2202 | rdtp->dyntick_holdoff = jiffies; |
2152 | trace_rcu_prep_idle("Begin holdoff"); | 2203 | trace_rcu_prep_idle("Begin holdoff"); |
2153 | invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ | 2204 | invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ |
2154 | return; | 2205 | return; |
@@ -2184,6 +2235,19 @@ static void rcu_prepare_for_idle(int cpu) | |||
2184 | trace_rcu_prep_idle("Callbacks drained"); | 2235 | trace_rcu_prep_idle("Callbacks drained"); |
2185 | } | 2236 | } |
2186 | 2237 | ||
2238 | /* | ||
2239 | * Keep a running count of the number of non-lazy callbacks posted | ||
2240 | * on this CPU. This running counter (which is never decremented) allows | ||
2241 | * rcu_prepare_for_idle() to detect when something out of the idle loop | ||
2242 | * posts a callback, even if an equal number of callbacks are invoked. | ||
2243 | * Of course, callbacks should only be posted from within a trace event | ||
2244 | * designed to be called from idle or from within RCU_NONIDLE(). | ||
2245 | */ | ||
2246 | static void rcu_idle_count_callbacks_posted(void) | ||
2247 | { | ||
2248 | __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); | ||
2249 | } | ||
2250 | |||
2187 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 2251 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
2188 | 2252 | ||
2189 | #ifdef CONFIG_RCU_CPU_STALL_INFO | 2253 | #ifdef CONFIG_RCU_CPU_STALL_INFO |
@@ -2192,14 +2256,13 @@ static void rcu_prepare_for_idle(int cpu) | |||
2192 | 2256 | ||
2193 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | 2257 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) |
2194 | { | 2258 | { |
2195 | struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu); | 2259 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2260 | struct timer_list *tltp = &rdtp->idle_gp_timer; | ||
2196 | 2261 | ||
2197 | sprintf(cp, "drain=%d %c timer=%lld", | 2262 | sprintf(cp, "drain=%d %c timer=%lu", |
2198 | per_cpu(rcu_dyntick_drain, cpu), | 2263 | rdtp->dyntick_drain, |
2199 | per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.', | 2264 | rdtp->dyntick_holdoff == jiffies ? 'H' : '.', |
2200 | hrtimer_active(hrtp) | 2265 | timer_pending(tltp) ? tltp->expires - jiffies : -1); |
2201 | ? ktime_to_us(hrtimer_get_remaining(hrtp)) | ||
2202 | : -1); | ||
2203 | } | 2266 | } |
2204 | 2267 | ||
2205 | #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | 2268 | #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |