diff options
Diffstat (limited to 'kernel/rcutree_plugin.h')
| -rw-r--r-- | kernel/rcutree_plugin.h | 154 |
1 files changed, 99 insertions, 55 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index c023464816be..2411000d9869 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -153,7 +153,7 @@ static void rcu_preempt_qs(int cpu) | |||
| 153 | * | 153 | * |
| 154 | * Caller must disable preemption. | 154 | * Caller must disable preemption. |
| 155 | */ | 155 | */ |
| 156 | static void rcu_preempt_note_context_switch(int cpu) | 156 | void rcu_preempt_note_context_switch(void) |
| 157 | { | 157 | { |
| 158 | struct task_struct *t = current; | 158 | struct task_struct *t = current; |
| 159 | unsigned long flags; | 159 | unsigned long flags; |
| @@ -164,7 +164,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
| 164 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | 164 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { |
| 165 | 165 | ||
| 166 | /* Possibly blocking in an RCU read-side critical section. */ | 166 | /* Possibly blocking in an RCU read-side critical section. */ |
| 167 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); | 167 | rdp = __this_cpu_ptr(rcu_preempt_state.rda); |
| 168 | rnp = rdp->mynode; | 168 | rnp = rdp->mynode; |
| 169 | raw_spin_lock_irqsave(&rnp->lock, flags); | 169 | raw_spin_lock_irqsave(&rnp->lock, flags); |
| 170 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | 170 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
| @@ -228,7 +228,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
| 228 | * means that we continue to block the current grace period. | 228 | * means that we continue to block the current grace period. |
| 229 | */ | 229 | */ |
| 230 | local_irq_save(flags); | 230 | local_irq_save(flags); |
| 231 | rcu_preempt_qs(cpu); | 231 | rcu_preempt_qs(smp_processor_id()); |
| 232 | local_irq_restore(flags); | 232 | local_irq_restore(flags); |
| 233 | } | 233 | } |
| 234 | 234 | ||
| @@ -969,22 +969,6 @@ static void __init __rcu_init_preempt(void) | |||
| 969 | rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); | 969 | rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); |
| 970 | } | 970 | } |
| 971 | 971 | ||
| 972 | /* | ||
| 973 | * Check for a task exiting while in a preemptible-RCU read-side | ||
| 974 | * critical section, clean up if so. No need to issue warnings, | ||
| 975 | * as debug_check_no_locks_held() already does this if lockdep | ||
| 976 | * is enabled. | ||
| 977 | */ | ||
| 978 | void exit_rcu(void) | ||
| 979 | { | ||
| 980 | struct task_struct *t = current; | ||
| 981 | |||
| 982 | if (t->rcu_read_lock_nesting == 0) | ||
| 983 | return; | ||
| 984 | t->rcu_read_lock_nesting = 1; | ||
| 985 | __rcu_read_unlock(); | ||
| 986 | } | ||
| 987 | |||
| 988 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 972 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
| 989 | 973 | ||
| 990 | static struct rcu_state *rcu_state = &rcu_sched_state; | 974 | static struct rcu_state *rcu_state = &rcu_sched_state; |
| @@ -1018,14 +1002,6 @@ void rcu_force_quiescent_state(void) | |||
| 1018 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | 1002 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
| 1019 | 1003 | ||
| 1020 | /* | 1004 | /* |
| 1021 | * Because preemptible RCU does not exist, we never have to check for | ||
| 1022 | * CPUs being in quiescent states. | ||
| 1023 | */ | ||
| 1024 | static void rcu_preempt_note_context_switch(int cpu) | ||
| 1025 | { | ||
| 1026 | } | ||
| 1027 | |||
| 1028 | /* | ||
| 1029 | * Because preemptible RCU does not exist, there are never any preempted | 1005 | * Because preemptible RCU does not exist, there are never any preempted |
| 1030 | * RCU readers. | 1006 | * RCU readers. |
| 1031 | */ | 1007 | */ |
| @@ -1938,6 +1914,14 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 1938 | { | 1914 | { |
| 1939 | } | 1915 | } |
| 1940 | 1916 | ||
| 1917 | /* | ||
| 1918 | * Don't bother keeping a running count of the number of RCU callbacks | ||
| 1919 | * posted because CONFIG_RCU_FAST_NO_HZ=n. | ||
| 1920 | */ | ||
| 1921 | static void rcu_idle_count_callbacks_posted(void) | ||
| 1922 | { | ||
| 1923 | } | ||
| 1924 | |||
| 1941 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 1925 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
| 1942 | 1926 | ||
| 1943 | /* | 1927 | /* |
| @@ -1978,11 +1962,20 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 1978 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ | 1962 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ |
| 1979 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ | 1963 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ |
| 1980 | 1964 | ||
| 1965 | /* Loop counter for rcu_prepare_for_idle(). */ | ||
| 1981 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); | 1966 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); |
| 1967 | /* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */ | ||
| 1982 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | 1968 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); |
| 1983 | static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer); | 1969 | /* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */ |
| 1984 | static ktime_t rcu_idle_gp_wait; /* If some non-lazy callbacks. */ | 1970 | static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer); |
| 1985 | static ktime_t rcu_idle_lazy_gp_wait; /* If only lazy callbacks. */ | 1971 | /* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */ |
| 1972 | static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires); | ||
| 1973 | /* Enable special processing on first attempt to enter dyntick-idle mode. */ | ||
| 1974 | static DEFINE_PER_CPU(bool, rcu_idle_first_pass); | ||
| 1975 | /* Running count of non-lazy callbacks posted, never decremented. */ | ||
| 1976 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted); | ||
| 1977 | /* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */ | ||
| 1978 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap); | ||
| 1986 | 1979 | ||
| 1987 | /* | 1980 | /* |
| 1988 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no | 1981 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no |
| @@ -1995,6 +1988,8 @@ static ktime_t rcu_idle_lazy_gp_wait; /* If only lazy callbacks. */ | |||
| 1995 | */ | 1988 | */ |
| 1996 | int rcu_needs_cpu(int cpu) | 1989 | int rcu_needs_cpu(int cpu) |
| 1997 | { | 1990 | { |
| 1991 | /* Flag a new idle sojourn to the idle-entry state machine. */ | ||
| 1992 | per_cpu(rcu_idle_first_pass, cpu) = 1; | ||
| 1998 | /* If no callbacks, RCU doesn't need the CPU. */ | 1993 | /* If no callbacks, RCU doesn't need the CPU. */ |
| 1999 | if (!rcu_cpu_has_callbacks(cpu)) | 1994 | if (!rcu_cpu_has_callbacks(cpu)) |
| 2000 | return 0; | 1995 | return 0; |
| @@ -2045,16 +2040,34 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu) | |||
| 2045 | } | 2040 | } |
| 2046 | 2041 | ||
| 2047 | /* | 2042 | /* |
| 2043 | * Handler for smp_call_function_single(). The only point of this | ||
| 2044 | * handler is to wake the CPU up, so the handler does only tracing. | ||
| 2045 | */ | ||
| 2046 | void rcu_idle_demigrate(void *unused) | ||
| 2047 | { | ||
| 2048 | trace_rcu_prep_idle("Demigrate"); | ||
| 2049 | } | ||
| 2050 | |||
| 2051 | /* | ||
| 2048 | * Timer handler used to force CPU to start pushing its remaining RCU | 2052 | * Timer handler used to force CPU to start pushing its remaining RCU |
| 2049 | * callbacks in the case where it entered dyntick-idle mode with callbacks | 2053 | * callbacks in the case where it entered dyntick-idle mode with callbacks |
| 2050 | * pending. The hander doesn't really need to do anything because the | 2054 | * pending. The hander doesn't really need to do anything because the |
| 2051 | * real work is done upon re-entry to idle, or by the next scheduling-clock | 2055 | * real work is done upon re-entry to idle, or by the next scheduling-clock |
| 2052 | * interrupt should idle not be re-entered. | 2056 | * interrupt should idle not be re-entered. |
| 2057 | * | ||
| 2058 | * One special case: the timer gets migrated without awakening the CPU | ||
| 2059 | * on which the timer was scheduled on. In this case, we must wake up | ||
| 2060 | * that CPU. We do so with smp_call_function_single(). | ||
| 2053 | */ | 2061 | */ |
| 2054 | static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp) | 2062 | static void rcu_idle_gp_timer_func(unsigned long cpu_in) |
| 2055 | { | 2063 | { |
| 2064 | int cpu = (int)cpu_in; | ||
| 2065 | |||
| 2056 | trace_rcu_prep_idle("Timer"); | 2066 | trace_rcu_prep_idle("Timer"); |
| 2057 | return HRTIMER_NORESTART; | 2067 | if (cpu != smp_processor_id()) |
| 2068 | smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0); | ||
| 2069 | else | ||
| 2070 | WARN_ON_ONCE(1); /* Getting here can hang the system... */ | ||
| 2058 | } | 2071 | } |
| 2059 | 2072 | ||
| 2060 | /* | 2073 | /* |
| @@ -2062,19 +2075,11 @@ static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp) | |||
| 2062 | */ | 2075 | */ |
| 2063 | static void rcu_prepare_for_idle_init(int cpu) | 2076 | static void rcu_prepare_for_idle_init(int cpu) |
| 2064 | { | 2077 | { |
| 2065 | static int firsttime = 1; | 2078 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; |
| 2066 | struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu); | 2079 | setup_timer(&per_cpu(rcu_idle_gp_timer, cpu), |
| 2067 | 2080 | rcu_idle_gp_timer_func, cpu); | |
| 2068 | hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 2081 | per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1; |
| 2069 | hrtp->function = rcu_idle_gp_timer_func; | 2082 | per_cpu(rcu_idle_first_pass, cpu) = 1; |
| 2070 | if (firsttime) { | ||
| 2071 | unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY); | ||
| 2072 | |||
| 2073 | rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000); | ||
| 2074 | upj = jiffies_to_usecs(RCU_IDLE_LAZY_GP_DELAY); | ||
| 2075 | rcu_idle_lazy_gp_wait = ns_to_ktime(upj * (u64)1000); | ||
| 2076 | firsttime = 0; | ||
| 2077 | } | ||
| 2078 | } | 2083 | } |
| 2079 | 2084 | ||
| 2080 | /* | 2085 | /* |
| @@ -2084,7 +2089,8 @@ static void rcu_prepare_for_idle_init(int cpu) | |||
| 2084 | */ | 2089 | */ |
| 2085 | static void rcu_cleanup_after_idle(int cpu) | 2090 | static void rcu_cleanup_after_idle(int cpu) |
| 2086 | { | 2091 | { |
| 2087 | hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu)); | 2092 | del_timer(&per_cpu(rcu_idle_gp_timer, cpu)); |
| 2093 | trace_rcu_prep_idle("Cleanup after idle"); | ||
| 2088 | } | 2094 | } |
| 2089 | 2095 | ||
| 2090 | /* | 2096 | /* |
| @@ -2108,6 +2114,29 @@ static void rcu_cleanup_after_idle(int cpu) | |||
| 2108 | */ | 2114 | */ |
| 2109 | static void rcu_prepare_for_idle(int cpu) | 2115 | static void rcu_prepare_for_idle(int cpu) |
| 2110 | { | 2116 | { |
| 2117 | struct timer_list *tp; | ||
| 2118 | |||
| 2119 | /* | ||
| 2120 | * If this is an idle re-entry, for example, due to use of | ||
| 2121 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle | ||
| 2122 | * loop, then don't take any state-machine actions, unless the | ||
| 2123 | * momentary exit from idle queued additional non-lazy callbacks. | ||
| 2124 | * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks | ||
| 2125 | * pending. | ||
| 2126 | */ | ||
| 2127 | if (!per_cpu(rcu_idle_first_pass, cpu) && | ||
| 2128 | (per_cpu(rcu_nonlazy_posted, cpu) == | ||
| 2129 | per_cpu(rcu_nonlazy_posted_snap, cpu))) { | ||
| 2130 | if (rcu_cpu_has_callbacks(cpu)) { | ||
| 2131 | tp = &per_cpu(rcu_idle_gp_timer, cpu); | ||
| 2132 | mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); | ||
| 2133 | } | ||
| 2134 | return; | ||
| 2135 | } | ||
| 2136 | per_cpu(rcu_idle_first_pass, cpu) = 0; | ||
| 2137 | per_cpu(rcu_nonlazy_posted_snap, cpu) = | ||
| 2138 | per_cpu(rcu_nonlazy_posted, cpu) - 1; | ||
| 2139 | |||
| 2111 | /* | 2140 | /* |
| 2112 | * If there are no callbacks on this CPU, enter dyntick-idle mode. | 2141 | * If there are no callbacks on this CPU, enter dyntick-idle mode. |
| 2113 | * Also reset state to avoid prejudicing later attempts. | 2142 | * Also reset state to avoid prejudicing later attempts. |
| @@ -2140,11 +2169,15 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 2140 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2169 | per_cpu(rcu_dyntick_drain, cpu) = 0; |
| 2141 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 2170 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; |
| 2142 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | 2171 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) |
| 2143 | hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu), | 2172 | per_cpu(rcu_idle_gp_timer_expires, cpu) = |
| 2144 | rcu_idle_gp_wait, HRTIMER_MODE_REL); | 2173 | jiffies + RCU_IDLE_GP_DELAY; |
| 2145 | else | 2174 | else |
| 2146 | hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu), | 2175 | per_cpu(rcu_idle_gp_timer_expires, cpu) = |
| 2147 | rcu_idle_lazy_gp_wait, HRTIMER_MODE_REL); | 2176 | jiffies + RCU_IDLE_LAZY_GP_DELAY; |
| 2177 | tp = &per_cpu(rcu_idle_gp_timer, cpu); | ||
| 2178 | mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); | ||
| 2179 | per_cpu(rcu_nonlazy_posted_snap, cpu) = | ||
| 2180 | per_cpu(rcu_nonlazy_posted, cpu); | ||
| 2148 | return; /* Nothing more to do immediately. */ | 2181 | return; /* Nothing more to do immediately. */ |
| 2149 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2182 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { |
| 2150 | /* We have hit the limit, so time to give up. */ | 2183 | /* We have hit the limit, so time to give up. */ |
| @@ -2184,6 +2217,19 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 2184 | trace_rcu_prep_idle("Callbacks drained"); | 2217 | trace_rcu_prep_idle("Callbacks drained"); |
| 2185 | } | 2218 | } |
| 2186 | 2219 | ||
| 2220 | /* | ||
| 2221 | * Keep a running count of the number of non-lazy callbacks posted | ||
| 2222 | * on this CPU. This running counter (which is never decremented) allows | ||
| 2223 | * rcu_prepare_for_idle() to detect when something out of the idle loop | ||
| 2224 | * posts a callback, even if an equal number of callbacks are invoked. | ||
| 2225 | * Of course, callbacks should only be posted from within a trace event | ||
| 2226 | * designed to be called from idle or from within RCU_NONIDLE(). | ||
| 2227 | */ | ||
| 2228 | static void rcu_idle_count_callbacks_posted(void) | ||
| 2229 | { | ||
| 2230 | __this_cpu_add(rcu_nonlazy_posted, 1); | ||
| 2231 | } | ||
| 2232 | |||
| 2187 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 2233 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
| 2188 | 2234 | ||
| 2189 | #ifdef CONFIG_RCU_CPU_STALL_INFO | 2235 | #ifdef CONFIG_RCU_CPU_STALL_INFO |
| @@ -2192,14 +2238,12 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 2192 | 2238 | ||
| 2193 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | 2239 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) |
| 2194 | { | 2240 | { |
| 2195 | struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu); | 2241 | struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu); |
| 2196 | 2242 | ||
| 2197 | sprintf(cp, "drain=%d %c timer=%lld", | 2243 | sprintf(cp, "drain=%d %c timer=%lu", |
| 2198 | per_cpu(rcu_dyntick_drain, cpu), | 2244 | per_cpu(rcu_dyntick_drain, cpu), |
| 2199 | per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.', | 2245 | per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.', |
| 2200 | hrtimer_active(hrtp) | 2246 | timer_pending(tltp) ? tltp->expires - jiffies : -1); |
| 2201 | ? ktime_to_us(hrtimer_get_remaining(hrtp)) | ||
| 2202 | : -1); | ||
| 2203 | } | 2247 | } |
| 2204 | 2248 | ||
| 2205 | #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | 2249 | #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |
