aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.h
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /kernel/rcutree.h
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'kernel/rcutree.h')
-rw-r--r--kernel/rcutree.h251
1 files changed, 77 insertions, 174 deletions
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 4b69291b093..01b2ccda26f 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -29,41 +29,45 @@
29#include <linux/seqlock.h> 29#include <linux/seqlock.h>
30 30
31/* 31/*
32 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and 32 * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
33 * CONFIG_RCU_FANOUT_LEAF.
34 * In theory, it should be possible to add more levels straightforwardly. 33 * In theory, it should be possible to add more levels straightforwardly.
35 * In practice, this did work well going from three levels to four. 34 * In practice, this did work well going from three levels to four.
36 * Of course, your mileage may vary. 35 * Of course, your mileage may vary.
37 */ 36 */
38#define MAX_RCU_LVLS 4 37#define MAX_RCU_LVLS 4
39#define RCU_FANOUT_1 (CONFIG_RCU_FANOUT_LEAF) 38#if CONFIG_RCU_FANOUT > 16
39#define RCU_FANOUT_LEAF 16
40#else /* #if CONFIG_RCU_FANOUT > 16 */
41#define RCU_FANOUT_LEAF (CONFIG_RCU_FANOUT)
42#endif /* #else #if CONFIG_RCU_FANOUT > 16 */
43#define RCU_FANOUT_1 (RCU_FANOUT_LEAF)
40#define RCU_FANOUT_2 (RCU_FANOUT_1 * CONFIG_RCU_FANOUT) 44#define RCU_FANOUT_2 (RCU_FANOUT_1 * CONFIG_RCU_FANOUT)
41#define RCU_FANOUT_3 (RCU_FANOUT_2 * CONFIG_RCU_FANOUT) 45#define RCU_FANOUT_3 (RCU_FANOUT_2 * CONFIG_RCU_FANOUT)
42#define RCU_FANOUT_4 (RCU_FANOUT_3 * CONFIG_RCU_FANOUT) 46#define RCU_FANOUT_4 (RCU_FANOUT_3 * CONFIG_RCU_FANOUT)
43 47
44#if NR_CPUS <= RCU_FANOUT_1 48#if NR_CPUS <= RCU_FANOUT_1
45# define RCU_NUM_LVLS 1 49# define NUM_RCU_LVLS 1
46# define NUM_RCU_LVL_0 1 50# define NUM_RCU_LVL_0 1
47# define NUM_RCU_LVL_1 (NR_CPUS) 51# define NUM_RCU_LVL_1 (NR_CPUS)
48# define NUM_RCU_LVL_2 0 52# define NUM_RCU_LVL_2 0
49# define NUM_RCU_LVL_3 0 53# define NUM_RCU_LVL_3 0
50# define NUM_RCU_LVL_4 0 54# define NUM_RCU_LVL_4 0
51#elif NR_CPUS <= RCU_FANOUT_2 55#elif NR_CPUS <= RCU_FANOUT_2
52# define RCU_NUM_LVLS 2 56# define NUM_RCU_LVLS 2
53# define NUM_RCU_LVL_0 1 57# define NUM_RCU_LVL_0 1
54# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 58# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
55# define NUM_RCU_LVL_2 (NR_CPUS) 59# define NUM_RCU_LVL_2 (NR_CPUS)
56# define NUM_RCU_LVL_3 0 60# define NUM_RCU_LVL_3 0
57# define NUM_RCU_LVL_4 0 61# define NUM_RCU_LVL_4 0
58#elif NR_CPUS <= RCU_FANOUT_3 62#elif NR_CPUS <= RCU_FANOUT_3
59# define RCU_NUM_LVLS 3 63# define NUM_RCU_LVLS 3
60# define NUM_RCU_LVL_0 1 64# define NUM_RCU_LVL_0 1
61# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) 65# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
62# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 66# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
63# define NUM_RCU_LVL_3 (NR_CPUS) 67# define NUM_RCU_LVL_3 (NR_CPUS)
64# define NUM_RCU_LVL_4 0 68# define NUM_RCU_LVL_4 0
65#elif NR_CPUS <= RCU_FANOUT_4 69#elif NR_CPUS <= RCU_FANOUT_4
66# define RCU_NUM_LVLS 4 70# define NUM_RCU_LVLS 4
67# define NUM_RCU_LVL_0 1 71# define NUM_RCU_LVL_0 1
68# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) 72# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
69# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) 73# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
@@ -76,36 +80,13 @@
76#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4) 80#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
77#define NUM_RCU_NODES (RCU_SUM - NR_CPUS) 81#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
78 82
79extern int rcu_num_lvls;
80extern int rcu_num_nodes;
81
82/* 83/*
83 * Dynticks per-CPU state. 84 * Dynticks per-CPU state.
84 */ 85 */
85struct rcu_dynticks { 86struct rcu_dynticks {
86 long long dynticks_nesting; /* Track irq/process nesting level. */ 87 int dynticks_nesting; /* Track irq/process nesting level. */
87 /* Process level is worth LLONG_MAX/2. */ 88 int dynticks_nmi_nesting; /* Track NMI nesting level. */
88 int dynticks_nmi_nesting; /* Track NMI nesting level. */ 89 atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
89 atomic_t dynticks; /* Even value for idle, else odd. */
90#ifdef CONFIG_RCU_FAST_NO_HZ
91 int dyntick_drain; /* Prepare-for-idle state variable. */
92 unsigned long dyntick_holdoff;
93 /* No retries for the jiffy of failure. */
94 struct timer_list idle_gp_timer;
95 /* Wake up CPU sleeping with callbacks. */
96 unsigned long idle_gp_timer_expires;
97 /* When to wake up CPU (for repost). */
98 bool idle_first_pass; /* First pass of attempt to go idle? */
99 unsigned long nonlazy_posted;
100 /* # times non-lazy CBs posted to CPU. */
101 unsigned long nonlazy_posted_snap;
102 /* idle-period nonlazy_posted snapshot. */
103 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
104#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
105#ifdef CONFIG_RCU_USER_QS
106 bool ignore_user_qs; /* Treat userspace as extended QS or not */
107 bool in_user; /* Is the CPU in userland from RCU POV? */
108#endif
109}; 90};
110 91
111/* RCU's kthread states for tracing. */ 92/* RCU's kthread states for tracing. */
@@ -200,7 +181,12 @@ struct rcu_node {
200 /* Refused to boost: not sure why, though. */ 181 /* Refused to boost: not sure why, though. */
201 /* This can happen due to race conditions. */ 182 /* This can happen due to race conditions. */
202#endif /* #ifdef CONFIG_RCU_BOOST */ 183#endif /* #ifdef CONFIG_RCU_BOOST */
203 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; 184 struct task_struct *node_kthread_task;
185 /* kthread that takes care of this rcu_node */
186 /* structure, for example, awakening the */
187 /* per-CPU kthreads as needed. */
188 unsigned int node_kthread_status;
189 /* State of node_kthread_task for tracing. */
204} ____cacheline_internodealigned_in_smp; 190} ____cacheline_internodealigned_in_smp;
205 191
206/* 192/*
@@ -209,7 +195,7 @@ struct rcu_node {
209 */ 195 */
210#define rcu_for_each_node_breadth_first(rsp, rnp) \ 196#define rcu_for_each_node_breadth_first(rsp, rnp) \
211 for ((rnp) = &(rsp)->node[0]; \ 197 for ((rnp) = &(rsp)->node[0]; \
212 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) 198 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
213 199
214/* 200/*
215 * Do a breadth-first scan of the non-leaf rcu_node structures for the 201 * Do a breadth-first scan of the non-leaf rcu_node structures for the
@@ -218,7 +204,7 @@ struct rcu_node {
218 */ 204 */
219#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \ 205#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
220 for ((rnp) = &(rsp)->node[0]; \ 206 for ((rnp) = &(rsp)->node[0]; \
221 (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++) 207 (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
222 208
223/* 209/*
224 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state 210 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
@@ -227,8 +213,8 @@ struct rcu_node {
227 * It is still a leaf node, even if it is also the root node. 213 * It is still a leaf node, even if it is also the root node.
228 */ 214 */
229#define rcu_for_each_leaf_node(rsp, rnp) \ 215#define rcu_for_each_leaf_node(rsp, rnp) \
230 for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \ 216 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
231 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) 217 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
232 218
233/* Index values for nxttail array in struct rcu_data. */ 219/* Index values for nxttail array in struct rcu_data. */
234#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ 220#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
@@ -244,18 +230,14 @@ struct rcu_data {
244 /* in order to detect GP end. */ 230 /* in order to detect GP end. */
245 unsigned long gpnum; /* Highest gp number that this CPU */ 231 unsigned long gpnum; /* Highest gp number that this CPU */
246 /* is aware of having started. */ 232 /* is aware of having started. */
247 bool passed_quiesce; /* User-mode/idle loop etc. */ 233 unsigned long passed_quiesc_completed;
234 /* Value of completed at time of qs. */
235 bool passed_quiesc; /* User-mode/idle loop etc. */
248 bool qs_pending; /* Core waits for quiesc state. */ 236 bool qs_pending; /* Core waits for quiesc state. */
249 bool beenonline; /* CPU online at least once. */ 237 bool beenonline; /* CPU online at least once. */
250 bool preemptible; /* Preemptible RCU? */ 238 bool preemptible; /* Preemptible RCU? */
251 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 239 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
252 unsigned long grpmask; /* Mask to apply to leaf qsmask. */ 240 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
253#ifdef CONFIG_RCU_CPU_STALL_INFO
254 unsigned long ticks_this_gp; /* The number of scheduling-clock */
255 /* ticks this CPU has handled */
256 /* during and after the last grace */
257 /* period it is aware of. */
258#endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
259 241
260 /* 2) batch handling */ 242 /* 2) batch handling */
261 /* 243 /*
@@ -282,25 +264,28 @@ struct rcu_data {
282 */ 264 */
283 struct rcu_head *nxtlist; 265 struct rcu_head *nxtlist;
284 struct rcu_head **nxttail[RCU_NEXT_SIZE]; 266 struct rcu_head **nxttail[RCU_NEXT_SIZE];
285 long qlen_lazy; /* # of lazy queued callbacks */ 267 long qlen; /* # of queued callbacks */
286 long qlen; /* # of queued callbacks, incl lazy */
287 long qlen_last_fqs_check; 268 long qlen_last_fqs_check;
288 /* qlen at last check for QS forcing */ 269 /* qlen at last check for QS forcing */
289 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ 270 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
290 unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
291 unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */ 271 unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
292 unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */ 272 unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */
293 unsigned long n_force_qs_snap; 273 unsigned long n_force_qs_snap;
294 /* did other CPU force QS recently? */ 274 /* did other CPU force QS recently? */
295 long blimit; /* Upper limit on a processed batch */ 275 long blimit; /* Upper limit on a processed batch */
296 276
277#ifdef CONFIG_NO_HZ
297 /* 3) dynticks interface. */ 278 /* 3) dynticks interface. */
298 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ 279 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
299 int dynticks_snap; /* Per-GP tracking for dynticks. */ 280 int dynticks_snap; /* Per-GP tracking for dynticks. */
281#endif /* #ifdef CONFIG_NO_HZ */
300 282
301 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ 283 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
284#ifdef CONFIG_NO_HZ
302 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ 285 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
286#endif /* #ifdef CONFIG_NO_HZ */
303 unsigned long offline_fqs; /* Kicked due to being offline. */ 287 unsigned long offline_fqs; /* Kicked due to being offline. */
288 unsigned long resched_ipi; /* Sent a resched IPI. */
304 289
305 /* 5) __rcu_pending() statistics. */ 290 /* 5) __rcu_pending() statistics. */
306 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ 291 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
@@ -310,36 +295,22 @@ struct rcu_data {
310 unsigned long n_rp_cpu_needs_gp; 295 unsigned long n_rp_cpu_needs_gp;
311 unsigned long n_rp_gp_completed; 296 unsigned long n_rp_gp_completed;
312 unsigned long n_rp_gp_started; 297 unsigned long n_rp_gp_started;
298 unsigned long n_rp_need_fqs;
313 unsigned long n_rp_need_nothing; 299 unsigned long n_rp_need_nothing;
314 300
315 /* 6) _rcu_barrier() and OOM callbacks. */
316 struct rcu_head barrier_head;
317#ifdef CONFIG_RCU_FAST_NO_HZ
318 struct rcu_head oom_head;
319#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
320
321 /* 7) Callback offloading. */
322#ifdef CONFIG_RCU_NOCB_CPU
323 struct rcu_head *nocb_head; /* CBs waiting for kthread. */
324 struct rcu_head **nocb_tail;
325 atomic_long_t nocb_q_count; /* # CBs waiting for kthread */
326 atomic_long_t nocb_q_count_lazy; /* (approximate). */
327 int nocb_p_count; /* # CBs being invoked by kthread */
328 int nocb_p_count_lazy; /* (approximate). */
329 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
330 struct task_struct *nocb_kthread;
331#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
332
333 int cpu; 301 int cpu;
334 struct rcu_state *rsp;
335}; 302};
336 303
337/* Values for fqs_state field in struct rcu_state. */ 304/* Values for signaled field in struct rcu_state. */
338#define RCU_GP_IDLE 0 /* No grace period in progress. */ 305#define RCU_GP_IDLE 0 /* No grace period in progress. */
339#define RCU_GP_INIT 1 /* Grace period being initialized. */ 306#define RCU_GP_INIT 1 /* Grace period being initialized. */
340#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ 307#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
341#define RCU_FORCE_QS 3 /* Need to force quiescent state. */ 308#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
309#ifdef CONFIG_NO_HZ
342#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK 310#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
311#else /* #ifdef CONFIG_NO_HZ */
312#define RCU_SIGNAL_INIT RCU_FORCE_QS
313#endif /* #else #ifdef CONFIG_NO_HZ */
343 314
344#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ 315#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
345 316
@@ -348,6 +319,12 @@ struct rcu_data {
348#else 319#else
349#define RCU_STALL_DELAY_DELTA 0 320#define RCU_STALL_DELAY_DELTA 0
350#endif 321#endif
322
323#define RCU_SECONDS_TILL_STALL_CHECK (CONFIG_RCU_CPU_STALL_TIMEOUT * HZ + \
324 RCU_STALL_DELAY_DELTA)
325 /* for rsp->jiffies_stall */
326#define RCU_SECONDS_TILL_STALL_RECHECK (3 * RCU_SECONDS_TILL_STALL_CHECK + 30)
327 /* for rsp->jiffies_stall */
351#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ 328#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
352 /* to take at least one */ 329 /* to take at least one */
353 /* scheduling clock irq */ 330 /* scheduling clock irq */
@@ -376,65 +353,32 @@ do { \
376 */ 353 */
377struct rcu_state { 354struct rcu_state {
378 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ 355 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
379 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */ 356 struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
380 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ 357 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
381 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 358 u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
382 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ 359 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
383 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
384 void (*func)(struct rcu_head *head));
385#ifdef CONFIG_RCU_NOCB_CPU
386 void (*call_remote)(struct rcu_head *head,
387 void (*func)(struct rcu_head *head));
388 /* call_rcu() flavor, but for */
389 /* placing on remote CPU. */
390#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
391 360
392 /* The following fields are guarded by the root rcu_node's lock. */ 361 /* The following fields are guarded by the root rcu_node's lock. */
393 362
394 u8 fqs_state ____cacheline_internodealigned_in_smp; 363 u8 signaled ____cacheline_internodealigned_in_smp;
395 /* Force QS state. */ 364 /* Force QS state. */
365 u8 fqs_active; /* force_quiescent_state() */
366 /* is running. */
367 u8 fqs_need_gp; /* A CPU was prevented from */
368 /* starting a new grace */
369 /* period because */
370 /* force_quiescent_state() */
371 /* was running. */
396 u8 boost; /* Subject to priority boost. */ 372 u8 boost; /* Subject to priority boost. */
397 unsigned long gpnum; /* Current gp number. */ 373 unsigned long gpnum; /* Current gp number. */
398 unsigned long completed; /* # of last completed gp. */ 374 unsigned long completed; /* # of last completed gp. */
399 struct task_struct *gp_kthread; /* Task for grace periods. */
400 wait_queue_head_t gp_wq; /* Where GP task waits. */
401 int gp_flags; /* Commands for GP task. */
402 375
403 /* End of fields guarded by root rcu_node's lock. */ 376 /* End of fields guarded by root rcu_node's lock. */
404 377
405 raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp; 378 raw_spinlock_t onofflock; /* exclude on/offline and */
406 /* Protect following fields. */ 379 /* starting new GP. */
407 struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */ 380 raw_spinlock_t fqslock; /* Only one task forcing */
408 /* need a grace period. */ 381 /* quiescent states. */
409 struct rcu_head **orphan_nxttail; /* Tail of above. */
410 struct rcu_head *orphan_donelist; /* Orphaned callbacks that */
411 /* are ready to invoke. */
412 struct rcu_head **orphan_donetail; /* Tail of above. */
413 long qlen_lazy; /* Number of lazy callbacks. */
414 long qlen; /* Total number of callbacks. */
415 /* End of fields guarded by orphan_lock. */
416
417 struct mutex onoff_mutex; /* Coordinate hotplug & GPs. */
418
419 struct mutex barrier_mutex; /* Guards barrier fields. */
420 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
421 struct completion barrier_completion; /* Wake at barrier end. */
422 unsigned long n_barrier_done; /* ++ at start and end of */
423 /* _rcu_barrier(). */
424 /* End of fields guarded by barrier_mutex. */
425
426 atomic_long_t expedited_start; /* Starting ticket. */
427 atomic_long_t expedited_done; /* Done ticket. */
428 atomic_long_t expedited_wrap; /* # near-wrap incidents. */
429 atomic_long_t expedited_tryfail; /* # acquisition failures. */
430 atomic_long_t expedited_workdone1; /* # done by others #1. */
431 atomic_long_t expedited_workdone2; /* # done by others #2. */
432 atomic_long_t expedited_normal; /* # fallbacks to normal. */
433 atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
434 atomic_long_t expedited_done_tries; /* # tries to update _done. */
435 atomic_long_t expedited_done_lost; /* # times beaten to _done. */
436 atomic_long_t expedited_done_exit; /* # times exited _done loop. */
437
438 unsigned long jiffies_force_qs; /* Time at which to invoke */ 382 unsigned long jiffies_force_qs; /* Time at which to invoke */
439 /* force_quiescent_state(). */ 383 /* force_quiescent_state(). */
440 unsigned long n_force_qs; /* Number of calls to */ 384 unsigned long n_force_qs; /* Number of calls to */
@@ -450,19 +394,8 @@ struct rcu_state {
450 unsigned long gp_max; /* Maximum GP duration in */ 394 unsigned long gp_max; /* Maximum GP duration in */
451 /* jiffies. */ 395 /* jiffies. */
452 char *name; /* Name of structure. */ 396 char *name; /* Name of structure. */
453 struct list_head flavors; /* List of RCU flavors. */
454}; 397};
455 398
456/* Values for rcu_state structure's gp_flags field. */
457#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
458#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
459
460extern struct list_head rcu_struct_flavors;
461
462/* Sequence through rcu_state structures for each RCU flavor. */
463#define for_each_rcu_flavor(rsp) \
464 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
465
466/* Return values for rcu_preempt_offline_tasks(). */ 399/* Return values for rcu_preempt_offline_tasks(). */
467 400
468#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */ 401#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
@@ -484,13 +417,6 @@ extern struct rcu_state rcu_preempt_state;
484DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); 417DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
485#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 418#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
486 419
487#ifdef CONFIG_RCU_BOOST
488DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
489DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
490DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
491DECLARE_PER_CPU(char, rcu_cpu_has_work);
492#endif /* #ifdef CONFIG_RCU_BOOST */
493
494#ifndef RCU_TREE_NONCORE 420#ifndef RCU_TREE_NONCORE
495 421
496/* Forward declarations for rcutree_plugin.h */ 422/* Forward declarations for rcutree_plugin.h */
@@ -501,67 +427,44 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
501#ifdef CONFIG_HOTPLUG_CPU 427#ifdef CONFIG_HOTPLUG_CPU
502static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, 428static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
503 unsigned long flags); 429 unsigned long flags);
430static void rcu_stop_cpu_kthread(int cpu);
504#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 431#endif /* #ifdef CONFIG_HOTPLUG_CPU */
505static void rcu_print_detail_task_stall(struct rcu_state *rsp); 432static void rcu_print_detail_task_stall(struct rcu_state *rsp);
506static int rcu_print_task_stall(struct rcu_node *rnp); 433static void rcu_print_task_stall(struct rcu_node *rnp);
434static void rcu_preempt_stall_reset(void);
507static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 435static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
508#ifdef CONFIG_HOTPLUG_CPU 436#ifdef CONFIG_HOTPLUG_CPU
509static int rcu_preempt_offline_tasks(struct rcu_state *rsp, 437static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
510 struct rcu_node *rnp, 438 struct rcu_node *rnp,
511 struct rcu_data *rdp); 439 struct rcu_data *rdp);
440static void rcu_preempt_offline_cpu(int cpu);
512#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 441#endif /* #ifdef CONFIG_HOTPLUG_CPU */
513static void rcu_preempt_check_callbacks(int cpu); 442static void rcu_preempt_check_callbacks(int cpu);
443static void rcu_preempt_process_callbacks(void);
514void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 444void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
515#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) 445#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
516static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 446static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
517 bool wake);
518#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ 447#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
448static int rcu_preempt_pending(int cpu);
449static int rcu_preempt_needs_cpu(int cpu);
450static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
451static void rcu_preempt_send_cbs_to_online(void);
519static void __init __rcu_init_preempt(void); 452static void __init __rcu_init_preempt(void);
453static void rcu_needs_cpu_flush(void);
520static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 454static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
521static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 455static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
522static void invoke_rcu_callbacks_kthread(void); 456static void invoke_rcu_callbacks_kthread(void);
523static bool rcu_is_callbacks_kthread(void);
524#ifdef CONFIG_RCU_BOOST 457#ifdef CONFIG_RCU_BOOST
525static void rcu_preempt_do_callbacks(void); 458static void rcu_preempt_do_callbacks(void);
459static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
460 cpumask_var_t cm);
526static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 461static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
527 struct rcu_node *rnp); 462 struct rcu_node *rnp,
463 int rnp_index);
464static void invoke_rcu_node_kthread(struct rcu_node *rnp);
465static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
528#endif /* #ifdef CONFIG_RCU_BOOST */ 466#endif /* #ifdef CONFIG_RCU_BOOST */
467static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
529static void __cpuinit rcu_prepare_kthreads(int cpu); 468static void __cpuinit rcu_prepare_kthreads(int cpu);
530static void rcu_prepare_for_idle_init(int cpu);
531static void rcu_cleanup_after_idle(int cpu);
532static void rcu_prepare_for_idle(int cpu);
533static void rcu_idle_count_callbacks_posted(void);
534static void print_cpu_stall_info_begin(void);
535static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
536static void print_cpu_stall_info_end(void);
537static void zero_cpu_stall_ticks(struct rcu_data *rdp);
538static void increment_cpu_stall_ticks(void);
539static bool is_nocb_cpu(int cpu);
540static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
541 bool lazy);
542static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
543 struct rcu_data *rdp);
544static bool nocb_cpu_expendable(int cpu);
545static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
546static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
547static void init_nocb_callback_list(struct rcu_data *rdp);
548static void __init rcu_init_nocb(void);
549 469
550#endif /* #ifndef RCU_TREE_NONCORE */ 470#endif /* #ifndef RCU_TREE_NONCORE */
551
552#ifdef CONFIG_RCU_TRACE
553#ifdef CONFIG_RCU_NOCB_CPU
554/* Sum up queue lengths for tracing. */
555static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
556{
557 *ql = atomic_long_read(&rdp->nocb_q_count) + rdp->nocb_p_count;
558 *qll = atomic_long_read(&rdp->nocb_q_count_lazy) + rdp->nocb_p_count_lazy;
559}
560#else /* #ifdef CONFIG_RCU_NOCB_CPU */
561static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
562{
563 *ql = 0;
564 *qll = 0;
565}
566#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
567#endif /* #ifdef CONFIG_RCU_TRACE */