diff options
Diffstat (limited to 'kernel/rcutree.h')
-rw-r--r-- | kernel/rcutree.h | 193 |
1 files changed, 137 insertions, 56 deletions
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 14c040b18ed0..01b2ccda26fb 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -31,46 +31,51 @@ | |||
31 | /* | 31 | /* |
32 | * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. | 32 | * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. |
33 | * In theory, it should be possible to add more levels straightforwardly. | 33 | * In theory, it should be possible to add more levels straightforwardly. |
34 | * In practice, this has not been tested, so there is probably some | 34 | * In practice, this did work well going from three levels to four. |
35 | * bug somewhere. | 35 | * Of course, your mileage may vary. |
36 | */ | 36 | */ |
37 | #define MAX_RCU_LVLS 4 | 37 | #define MAX_RCU_LVLS 4 |
38 | #define RCU_FANOUT (CONFIG_RCU_FANOUT) | 38 | #if CONFIG_RCU_FANOUT > 16 |
39 | #define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) | 39 | #define RCU_FANOUT_LEAF 16 |
40 | #define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) | 40 | #else /* #if CONFIG_RCU_FANOUT > 16 */ |
41 | #define RCU_FANOUT_FOURTH (RCU_FANOUT_CUBE * RCU_FANOUT) | 41 | #define RCU_FANOUT_LEAF (CONFIG_RCU_FANOUT) |
42 | 42 | #endif /* #else #if CONFIG_RCU_FANOUT > 16 */ | |
43 | #if NR_CPUS <= RCU_FANOUT | 43 | #define RCU_FANOUT_1 (RCU_FANOUT_LEAF) |
44 | #define RCU_FANOUT_2 (RCU_FANOUT_1 * CONFIG_RCU_FANOUT) | ||
45 | #define RCU_FANOUT_3 (RCU_FANOUT_2 * CONFIG_RCU_FANOUT) | ||
46 | #define RCU_FANOUT_4 (RCU_FANOUT_3 * CONFIG_RCU_FANOUT) | ||
47 | |||
48 | #if NR_CPUS <= RCU_FANOUT_1 | ||
44 | # define NUM_RCU_LVLS 1 | 49 | # define NUM_RCU_LVLS 1 |
45 | # define NUM_RCU_LVL_0 1 | 50 | # define NUM_RCU_LVL_0 1 |
46 | # define NUM_RCU_LVL_1 (NR_CPUS) | 51 | # define NUM_RCU_LVL_1 (NR_CPUS) |
47 | # define NUM_RCU_LVL_2 0 | 52 | # define NUM_RCU_LVL_2 0 |
48 | # define NUM_RCU_LVL_3 0 | 53 | # define NUM_RCU_LVL_3 0 |
49 | # define NUM_RCU_LVL_4 0 | 54 | # define NUM_RCU_LVL_4 0 |
50 | #elif NR_CPUS <= RCU_FANOUT_SQ | 55 | #elif NR_CPUS <= RCU_FANOUT_2 |
51 | # define NUM_RCU_LVLS 2 | 56 | # define NUM_RCU_LVLS 2 |
52 | # define NUM_RCU_LVL_0 1 | 57 | # define NUM_RCU_LVL_0 1 |
53 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) | 58 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) |
54 | # define NUM_RCU_LVL_2 (NR_CPUS) | 59 | # define NUM_RCU_LVL_2 (NR_CPUS) |
55 | # define NUM_RCU_LVL_3 0 | 60 | # define NUM_RCU_LVL_3 0 |
56 | # define NUM_RCU_LVL_4 0 | 61 | # define NUM_RCU_LVL_4 0 |
57 | #elif NR_CPUS <= RCU_FANOUT_CUBE | 62 | #elif NR_CPUS <= RCU_FANOUT_3 |
58 | # define NUM_RCU_LVLS 3 | 63 | # define NUM_RCU_LVLS 3 |
59 | # define NUM_RCU_LVL_0 1 | 64 | # define NUM_RCU_LVL_0 1 |
60 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) | 65 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) |
61 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) | 66 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) |
62 | # define NUM_RCU_LVL_3 NR_CPUS | 67 | # define NUM_RCU_LVL_3 (NR_CPUS) |
63 | # define NUM_RCU_LVL_4 0 | 68 | # define NUM_RCU_LVL_4 0 |
64 | #elif NR_CPUS <= RCU_FANOUT_FOURTH | 69 | #elif NR_CPUS <= RCU_FANOUT_4 |
65 | # define NUM_RCU_LVLS 4 | 70 | # define NUM_RCU_LVLS 4 |
66 | # define NUM_RCU_LVL_0 1 | 71 | # define NUM_RCU_LVL_0 1 |
67 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_CUBE) | 72 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) |
68 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) | 73 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) |
69 | # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) | 74 | # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) |
70 | # define NUM_RCU_LVL_4 NR_CPUS | 75 | # define NUM_RCU_LVL_4 (NR_CPUS) |
71 | #else | 76 | #else |
72 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" | 77 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" |
73 | #endif /* #if (NR_CPUS) <= RCU_FANOUT */ | 78 | #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ |
74 | 79 | ||
75 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4) | 80 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4) |
76 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) | 81 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) |
@@ -79,13 +84,19 @@ | |||
79 | * Dynticks per-CPU state. | 84 | * Dynticks per-CPU state. |
80 | */ | 85 | */ |
81 | struct rcu_dynticks { | 86 | struct rcu_dynticks { |
82 | int dynticks_nesting; /* Track nesting level, sort of. */ | 87 | int dynticks_nesting; /* Track irq/process nesting level. */ |
83 | int dynticks; /* Even value for dynticks-idle, else odd. */ | 88 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ |
84 | int dynticks_nmi; /* Even value for either dynticks-idle or */ | 89 | atomic_t dynticks; /* Even value for dynticks-idle, else odd. */ |
85 | /* not in nmi handler, else odd. So this */ | ||
86 | /* remains even for nmi from irq handler. */ | ||
87 | }; | 90 | }; |
88 | 91 | ||
92 | /* RCU's kthread states for tracing. */ | ||
93 | #define RCU_KTHREAD_STOPPED 0 | ||
94 | #define RCU_KTHREAD_RUNNING 1 | ||
95 | #define RCU_KTHREAD_WAITING 2 | ||
96 | #define RCU_KTHREAD_OFFCPU 3 | ||
97 | #define RCU_KTHREAD_YIELDING 4 | ||
98 | #define RCU_KTHREAD_MAX 4 | ||
99 | |||
89 | /* | 100 | /* |
90 | * Definition for node within the RCU grace-period-detection hierarchy. | 101 | * Definition for node within the RCU grace-period-detection hierarchy. |
91 | */ | 102 | */ |
@@ -104,10 +115,13 @@ struct rcu_node { | |||
104 | /* an rcu_data structure, otherwise, each */ | 115 | /* an rcu_data structure, otherwise, each */ |
105 | /* bit corresponds to a child rcu_node */ | 116 | /* bit corresponds to a child rcu_node */ |
106 | /* structure. */ | 117 | /* structure. */ |
107 | unsigned long expmask; /* Groups that have ->blocked_tasks[] */ | 118 | unsigned long expmask; /* Groups that have ->blkd_tasks */ |
108 | /* elements that need to drain to allow the */ | 119 | /* elements that need to drain to allow the */ |
109 | /* current expedited grace period to */ | 120 | /* current expedited grace period to */ |
110 | /* complete (only for TREE_PREEMPT_RCU). */ | 121 | /* complete (only for TREE_PREEMPT_RCU). */ |
122 | atomic_t wakemask; /* CPUs whose kthread needs to be awakened. */ | ||
123 | /* Since this has meaning only for leaf */ | ||
124 | /* rcu_node structures, 32 bits suffices. */ | ||
111 | unsigned long qsmaskinit; | 125 | unsigned long qsmaskinit; |
112 | /* Per-GP initial value for qsmask & expmask. */ | 126 | /* Per-GP initial value for qsmask & expmask. */ |
113 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ | 127 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ |
@@ -117,11 +131,62 @@ struct rcu_node { | |||
117 | u8 grpnum; /* CPU/group number for next level up. */ | 131 | u8 grpnum; /* CPU/group number for next level up. */ |
118 | u8 level; /* root is at level 0. */ | 132 | u8 level; /* root is at level 0. */ |
119 | struct rcu_node *parent; | 133 | struct rcu_node *parent; |
120 | struct list_head blocked_tasks[4]; | 134 | struct list_head blkd_tasks; |
121 | /* Tasks blocked in RCU read-side critsect. */ | 135 | /* Tasks blocked in RCU read-side critical */ |
122 | /* Grace period number (->gpnum) x blocked */ | 136 | /* section. Tasks are placed at the head */ |
123 | /* by tasks on the (x & 0x1) element of the */ | 137 | /* of this list and age towards the tail. */ |
124 | /* blocked_tasks[] array. */ | 138 | struct list_head *gp_tasks; |
139 | /* Pointer to the first task blocking the */ | ||
140 | /* current grace period, or NULL if there */ | ||
141 | /* is no such task. */ | ||
142 | struct list_head *exp_tasks; | ||
143 | /* Pointer to the first task blocking the */ | ||
144 | /* current expedited grace period, or NULL */ | ||
145 | /* if there is no such task. If there */ | ||
146 | /* is no current expedited grace period, */ | ||
147 | /* then there can cannot be any such task. */ | ||
148 | #ifdef CONFIG_RCU_BOOST | ||
149 | struct list_head *boost_tasks; | ||
150 | /* Pointer to first task that needs to be */ | ||
151 | /* priority boosted, or NULL if no priority */ | ||
152 | /* boosting is needed for this rcu_node */ | ||
153 | /* structure. If there are no tasks */ | ||
154 | /* queued on this rcu_node structure that */ | ||
155 | /* are blocking the current grace period, */ | ||
156 | /* there can be no such task. */ | ||
157 | unsigned long boost_time; | ||
158 | /* When to start boosting (jiffies). */ | ||
159 | struct task_struct *boost_kthread_task; | ||
160 | /* kthread that takes care of priority */ | ||
161 | /* boosting for this rcu_node structure. */ | ||
162 | unsigned int boost_kthread_status; | ||
163 | /* State of boost_kthread_task for tracing. */ | ||
164 | unsigned long n_tasks_boosted; | ||
165 | /* Total number of tasks boosted. */ | ||
166 | unsigned long n_exp_boosts; | ||
167 | /* Number of tasks boosted for expedited GP. */ | ||
168 | unsigned long n_normal_boosts; | ||
169 | /* Number of tasks boosted for normal GP. */ | ||
170 | unsigned long n_balk_blkd_tasks; | ||
171 | /* Refused to boost: no blocked tasks. */ | ||
172 | unsigned long n_balk_exp_gp_tasks; | ||
173 | /* Refused to boost: nothing blocking GP. */ | ||
174 | unsigned long n_balk_boost_tasks; | ||
175 | /* Refused to boost: already boosting. */ | ||
176 | unsigned long n_balk_notblocked; | ||
177 | /* Refused to boost: RCU RS CS still running. */ | ||
178 | unsigned long n_balk_notyet; | ||
179 | /* Refused to boost: not yet time. */ | ||
180 | unsigned long n_balk_nos; | ||
181 | /* Refused to boost: not sure why, though. */ | ||
182 | /* This can happen due to race conditions. */ | ||
183 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
184 | struct task_struct *node_kthread_task; | ||
185 | /* kthread that takes care of this rcu_node */ | ||
186 | /* structure, for example, awakening the */ | ||
187 | /* per-CPU kthreads as needed. */ | ||
188 | unsigned int node_kthread_status; | ||
189 | /* State of node_kthread_task for tracing. */ | ||
125 | } ____cacheline_internodealigned_in_smp; | 190 | } ____cacheline_internodealigned_in_smp; |
126 | 191 | ||
127 | /* | 192 | /* |
@@ -170,7 +235,7 @@ struct rcu_data { | |||
170 | bool passed_quiesc; /* User-mode/idle loop etc. */ | 235 | bool passed_quiesc; /* User-mode/idle loop etc. */ |
171 | bool qs_pending; /* Core waits for quiesc state. */ | 236 | bool qs_pending; /* Core waits for quiesc state. */ |
172 | bool beenonline; /* CPU online at least once. */ | 237 | bool beenonline; /* CPU online at least once. */ |
173 | bool preemptable; /* Preemptable RCU? */ | 238 | bool preemptible; /* Preemptible RCU? */ |
174 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ | 239 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ |
175 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ | 240 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ |
176 | 241 | ||
@@ -202,6 +267,9 @@ struct rcu_data { | |||
202 | long qlen; /* # of queued callbacks */ | 267 | long qlen; /* # of queued callbacks */ |
203 | long qlen_last_fqs_check; | 268 | long qlen_last_fqs_check; |
204 | /* qlen at last check for QS forcing */ | 269 | /* qlen at last check for QS forcing */ |
270 | unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ | ||
271 | unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */ | ||
272 | unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */ | ||
205 | unsigned long n_force_qs_snap; | 273 | unsigned long n_force_qs_snap; |
206 | /* did other CPU force QS recently? */ | 274 | /* did other CPU force QS recently? */ |
207 | long blimit; /* Upper limit on a processed batch */ | 275 | long blimit; /* Upper limit on a processed batch */ |
@@ -210,7 +278,6 @@ struct rcu_data { | |||
210 | /* 3) dynticks interface. */ | 278 | /* 3) dynticks interface. */ |
211 | struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ | 279 | struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ |
212 | int dynticks_snap; /* Per-GP tracking for dynticks. */ | 280 | int dynticks_snap; /* Per-GP tracking for dynticks. */ |
213 | int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */ | ||
214 | #endif /* #ifdef CONFIG_NO_HZ */ | 281 | #endif /* #ifdef CONFIG_NO_HZ */ |
215 | 282 | ||
216 | /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ | 283 | /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ |
@@ -246,7 +313,6 @@ struct rcu_data { | |||
246 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 313 | #endif /* #else #ifdef CONFIG_NO_HZ */ |
247 | 314 | ||
248 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ | 315 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ |
249 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
250 | 316 | ||
251 | #ifdef CONFIG_PROVE_RCU | 317 | #ifdef CONFIG_PROVE_RCU |
252 | #define RCU_STALL_DELAY_DELTA (5 * HZ) | 318 | #define RCU_STALL_DELAY_DELTA (5 * HZ) |
@@ -254,19 +320,26 @@ struct rcu_data { | |||
254 | #define RCU_STALL_DELAY_DELTA 0 | 320 | #define RCU_STALL_DELAY_DELTA 0 |
255 | #endif | 321 | #endif |
256 | 322 | ||
257 | #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ + RCU_STALL_DELAY_DELTA) | 323 | #define RCU_SECONDS_TILL_STALL_CHECK (CONFIG_RCU_CPU_STALL_TIMEOUT * HZ + \ |
324 | RCU_STALL_DELAY_DELTA) | ||
258 | /* for rsp->jiffies_stall */ | 325 | /* for rsp->jiffies_stall */ |
259 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA) | 326 | #define RCU_SECONDS_TILL_STALL_RECHECK (3 * RCU_SECONDS_TILL_STALL_CHECK + 30) |
260 | /* for rsp->jiffies_stall */ | 327 | /* for rsp->jiffies_stall */ |
261 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ | 328 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ |
262 | /* to take at least one */ | 329 | /* to take at least one */ |
263 | /* scheduling clock irq */ | 330 | /* scheduling clock irq */ |
264 | /* before ratting on them. */ | 331 | /* before ratting on them. */ |
265 | 332 | ||
266 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 333 | #define rcu_wait(cond) \ |
267 | 334 | do { \ | |
268 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) | 335 | for (;;) { \ |
269 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) | 336 | set_current_state(TASK_INTERRUPTIBLE); \ |
337 | if (cond) \ | ||
338 | break; \ | ||
339 | schedule(); \ | ||
340 | } \ | ||
341 | __set_current_state(TASK_RUNNING); \ | ||
342 | } while (0) | ||
270 | 343 | ||
271 | /* | 344 | /* |
272 | * RCU global state, including node hierarchy. This hierarchy is | 345 | * RCU global state, including node hierarchy. This hierarchy is |
@@ -283,7 +356,7 @@ struct rcu_state { | |||
283 | struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ | 356 | struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ |
284 | u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ | 357 | u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ |
285 | u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ | 358 | u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ |
286 | struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ | 359 | struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ |
287 | 360 | ||
288 | /* The following fields are guarded by the root rcu_node's lock. */ | 361 | /* The following fields are guarded by the root rcu_node's lock. */ |
289 | 362 | ||
@@ -296,21 +369,14 @@ struct rcu_state { | |||
296 | /* period because */ | 369 | /* period because */ |
297 | /* force_quiescent_state() */ | 370 | /* force_quiescent_state() */ |
298 | /* was running. */ | 371 | /* was running. */ |
372 | u8 boost; /* Subject to priority boost. */ | ||
299 | unsigned long gpnum; /* Current gp number. */ | 373 | unsigned long gpnum; /* Current gp number. */ |
300 | unsigned long completed; /* # of last completed gp. */ | 374 | unsigned long completed; /* # of last completed gp. */ |
301 | 375 | ||
302 | /* End of fields guarded by root rcu_node's lock. */ | 376 | /* End of fields guarded by root rcu_node's lock. */ |
303 | 377 | ||
304 | raw_spinlock_t onofflock; /* exclude on/offline and */ | 378 | raw_spinlock_t onofflock; /* exclude on/offline and */ |
305 | /* starting new GP. Also */ | 379 | /* starting new GP. */ |
306 | /* protects the following */ | ||
307 | /* orphan_cbs fields. */ | ||
308 | struct rcu_head *orphan_cbs_list; /* list of rcu_head structs */ | ||
309 | /* orphaned by all CPUs in */ | ||
310 | /* a given leaf rcu_node */ | ||
311 | /* going offline. */ | ||
312 | struct rcu_head **orphan_cbs_tail; /* And tail pointer. */ | ||
313 | long orphan_qlen; /* Number of orphaned cbs. */ | ||
314 | raw_spinlock_t fqslock; /* Only one task forcing */ | 380 | raw_spinlock_t fqslock; /* Only one task forcing */ |
315 | /* quiescent states. */ | 381 | /* quiescent states. */ |
316 | unsigned long jiffies_force_qs; /* Time at which to invoke */ | 382 | unsigned long jiffies_force_qs; /* Time at which to invoke */ |
@@ -321,12 +387,12 @@ struct rcu_state { | |||
321 | /* due to lock unavailable. */ | 387 | /* due to lock unavailable. */ |
322 | unsigned long n_force_qs_ngp; /* Number of calls leaving */ | 388 | unsigned long n_force_qs_ngp; /* Number of calls leaving */ |
323 | /* due to no GP active. */ | 389 | /* due to no GP active. */ |
324 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
325 | unsigned long gp_start; /* Time at which GP started, */ | 390 | unsigned long gp_start; /* Time at which GP started, */ |
326 | /* but in jiffies. */ | 391 | /* but in jiffies. */ |
327 | unsigned long jiffies_stall; /* Time at which to check */ | 392 | unsigned long jiffies_stall; /* Time at which to check */ |
328 | /* for CPU stalls. */ | 393 | /* for CPU stalls. */ |
329 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 394 | unsigned long gp_max; /* Maximum GP duration in */ |
395 | /* jiffies. */ | ||
330 | char *name; /* Name of structure. */ | 396 | char *name; /* Name of structure. */ |
331 | }; | 397 | }; |
332 | 398 | ||
@@ -357,15 +423,15 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); | |||
357 | static void rcu_bootup_announce(void); | 423 | static void rcu_bootup_announce(void); |
358 | long rcu_batches_completed(void); | 424 | long rcu_batches_completed(void); |
359 | static void rcu_preempt_note_context_switch(int cpu); | 425 | static void rcu_preempt_note_context_switch(int cpu); |
360 | static int rcu_preempted_readers(struct rcu_node *rnp); | 426 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); |
361 | #ifdef CONFIG_HOTPLUG_CPU | 427 | #ifdef CONFIG_HOTPLUG_CPU |
362 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | 428 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, |
363 | unsigned long flags); | 429 | unsigned long flags); |
430 | static void rcu_stop_cpu_kthread(int cpu); | ||
364 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 431 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
365 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
366 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); | 432 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
367 | static void rcu_print_task_stall(struct rcu_node *rnp); | 433 | static void rcu_print_task_stall(struct rcu_node *rnp); |
368 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 434 | static void rcu_preempt_stall_reset(void); |
369 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | 435 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
370 | #ifdef CONFIG_HOTPLUG_CPU | 436 | #ifdef CONFIG_HOTPLUG_CPU |
371 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | 437 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
@@ -382,8 +448,23 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp); | |||
382 | static int rcu_preempt_pending(int cpu); | 448 | static int rcu_preempt_pending(int cpu); |
383 | static int rcu_preempt_needs_cpu(int cpu); | 449 | static int rcu_preempt_needs_cpu(int cpu); |
384 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); | 450 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); |
385 | static void rcu_preempt_send_cbs_to_orphanage(void); | 451 | static void rcu_preempt_send_cbs_to_online(void); |
386 | static void __init __rcu_init_preempt(void); | 452 | static void __init __rcu_init_preempt(void); |
387 | static void rcu_needs_cpu_flush(void); | 453 | static void rcu_needs_cpu_flush(void); |
454 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | ||
455 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | ||
456 | static void invoke_rcu_callbacks_kthread(void); | ||
457 | #ifdef CONFIG_RCU_BOOST | ||
458 | static void rcu_preempt_do_callbacks(void); | ||
459 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | ||
460 | cpumask_var_t cm); | ||
461 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | ||
462 | struct rcu_node *rnp, | ||
463 | int rnp_index); | ||
464 | static void invoke_rcu_node_kthread(struct rcu_node *rnp); | ||
465 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg); | ||
466 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
467 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt); | ||
468 | static void __cpuinit rcu_prepare_kthreads(int cpu); | ||
388 | 469 | ||
389 | #endif /* #ifndef RCU_TREE_NONCORE */ | 470 | #endif /* #ifndef RCU_TREE_NONCORE */ |