diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2006-01-10 08:48:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-10 11:42:50 -0500 |
commit | 69a0b3157983925f14fe0bdc49622d5389538d8d (patch) | |
tree | d77b406001d01b0a200c9f713d8287a2ec42ae58 | |
parent | c0400dc507a4345357fc25f13e8ac929287688a8 (diff) |
[PATCH] rcu: join rcu_ctrlblk and rcu_state
This patch moves rcu_state into the rcu_ctrlblk. I think there
are no reasons why we should have 2 different variables to control
rcu state. Every user of rcu_state has also "rcu_ctrlblk *rcp" in
the parameter list.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Acked-by: Paul E. McKenney <paulmck@us.ibm.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/rcupdate.h | 4 | ||||
-rw-r--r-- | kernel/rcupdate.c | 82 |
2 files changed, 42 insertions, 44 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index a1d26cb28925..981f9aa43353 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -65,6 +65,10 @@ struct rcu_ctrlblk { | |||
65 | long cur; /* Current batch number. */ | 65 | long cur; /* Current batch number. */ |
66 | long completed; /* Number of the last completed batch */ | 66 | long completed; /* Number of the last completed batch */ |
67 | int next_pending; /* Is the next batch already waiting? */ | 67 | int next_pending; /* Is the next batch already waiting? */ |
68 | |||
69 | spinlock_t lock ____cacheline_internodealigned_in_smp; | ||
70 | cpumask_t cpumask; /* CPUs that need to switch in order */ | ||
71 | /* for current batch to proceed. */ | ||
68 | } ____cacheline_internodealigned_in_smp; | 72 | } ____cacheline_internodealigned_in_smp; |
69 | 73 | ||
70 | /* Is batch a before batch b ? */ | 74 | /* Is batch a before batch b ? */ |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 05ee48316f70..e18f9190eafa 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -49,22 +49,18 @@ | |||
49 | #include <linux/cpu.h> | 49 | #include <linux/cpu.h> |
50 | 50 | ||
51 | /* Definition for rcupdate control block. */ | 51 | /* Definition for rcupdate control block. */ |
52 | struct rcu_ctrlblk rcu_ctrlblk = | 52 | struct rcu_ctrlblk rcu_ctrlblk = { |
53 | { .cur = -300, .completed = -300 }; | 53 | .cur = -300, |
54 | struct rcu_ctrlblk rcu_bh_ctrlblk = | 54 | .completed = -300, |
55 | { .cur = -300, .completed = -300 }; | 55 | .lock = SPIN_LOCK_UNLOCKED, |
56 | 56 | .cpumask = CPU_MASK_NONE, | |
57 | /* Bookkeeping of the progress of the grace period */ | 57 | }; |
58 | struct rcu_state { | 58 | struct rcu_ctrlblk rcu_bh_ctrlblk = { |
59 | spinlock_t lock; /* Guard this struct and writes to rcu_ctrlblk */ | 59 | .cur = -300, |
60 | cpumask_t cpumask; /* CPUs that need to switch in order */ | 60 | .completed = -300, |
61 | /* for current batch to proceed. */ | 61 | .lock = SPIN_LOCK_UNLOCKED, |
62 | .cpumask = CPU_MASK_NONE, | ||
62 | }; | 63 | }; |
63 | |||
64 | static struct rcu_state rcu_state ____cacheline_internodealigned_in_smp = | ||
65 | {.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE }; | ||
66 | static struct rcu_state rcu_bh_state ____cacheline_internodealigned_in_smp = | ||
67 | {.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE }; | ||
68 | 64 | ||
69 | DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; | 65 | DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; |
70 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; | 66 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; |
@@ -220,13 +216,13 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
220 | * This is done by rcu_start_batch. The start is not broadcasted to | 216 | * This is done by rcu_start_batch. The start is not broadcasted to |
221 | * all cpus, they must pick this up by comparing rcp->cur with | 217 | * all cpus, they must pick this up by comparing rcp->cur with |
222 | * rdp->quiescbatch. All cpus are recorded in the | 218 | * rdp->quiescbatch. All cpus are recorded in the |
223 | * rcu_state.cpumask bitmap. | 219 | * rcu_ctrlblk.cpumask bitmap. |
224 | * - All cpus must go through a quiescent state. | 220 | * - All cpus must go through a quiescent state. |
225 | * Since the start of the grace period is not broadcasted, at least two | 221 | * Since the start of the grace period is not broadcasted, at least two |
226 | * calls to rcu_check_quiescent_state are required: | 222 | * calls to rcu_check_quiescent_state are required: |
227 | * The first call just notices that a new grace period is running. The | 223 | * The first call just notices that a new grace period is running. The |
228 | * following calls check if there was a quiescent state since the beginning | 224 | * following calls check if there was a quiescent state since the beginning |
229 | * of the grace period. If so, it updates rcu_state.cpumask. If | 225 | * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If |
230 | * the bitmap is empty, then the grace period is completed. | 226 | * the bitmap is empty, then the grace period is completed. |
231 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace | 227 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace |
232 | * period (if necessary). | 228 | * period (if necessary). |
@@ -234,9 +230,9 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
234 | /* | 230 | /* |
235 | * Register a new batch of callbacks, and start it up if there is currently no | 231 | * Register a new batch of callbacks, and start it up if there is currently no |
236 | * active batch and the batch to be registered has not already occurred. | 232 | * active batch and the batch to be registered has not already occurred. |
237 | * Caller must hold rcu_state.lock. | 233 | * Caller must hold rcu_ctrlblk.lock. |
238 | */ | 234 | */ |
239 | static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp) | 235 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) |
240 | { | 236 | { |
241 | if (rcp->next_pending && | 237 | if (rcp->next_pending && |
242 | rcp->completed == rcp->cur) { | 238 | rcp->completed == rcp->cur) { |
@@ -251,11 +247,11 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp) | |||
251 | /* | 247 | /* |
252 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a | 248 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a |
253 | * Barrier Otherwise it can cause tickless idle CPUs to be | 249 | * Barrier Otherwise it can cause tickless idle CPUs to be |
254 | * included in rsp->cpumask, which will extend graceperiods | 250 | * included in rcp->cpumask, which will extend graceperiods |
255 | * unnecessarily. | 251 | * unnecessarily. |
256 | */ | 252 | */ |
257 | smp_mb(); | 253 | smp_mb(); |
258 | cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask); | 254 | cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); |
259 | 255 | ||
260 | } | 256 | } |
261 | } | 257 | } |
@@ -265,13 +261,13 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp) | |||
265 | * Clear it from the cpu mask and complete the grace period if it was the last | 261 | * Clear it from the cpu mask and complete the grace period if it was the last |
266 | * cpu. Start another grace period if someone has further entries pending | 262 | * cpu. Start another grace period if someone has further entries pending |
267 | */ | 263 | */ |
268 | static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp, struct rcu_state *rsp) | 264 | static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) |
269 | { | 265 | { |
270 | cpu_clear(cpu, rsp->cpumask); | 266 | cpu_clear(cpu, rcp->cpumask); |
271 | if (cpus_empty(rsp->cpumask)) { | 267 | if (cpus_empty(rcp->cpumask)) { |
272 | /* batch completed ! */ | 268 | /* batch completed ! */ |
273 | rcp->completed = rcp->cur; | 269 | rcp->completed = rcp->cur; |
274 | rcu_start_batch(rcp, rsp); | 270 | rcu_start_batch(rcp); |
275 | } | 271 | } |
276 | } | 272 | } |
277 | 273 | ||
@@ -281,7 +277,7 @@ static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp, struct rcu_state *rsp) | |||
281 | * quiescent cycle, then indicate that it has done so. | 277 | * quiescent cycle, then indicate that it has done so. |
282 | */ | 278 | */ |
283 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | 279 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, |
284 | struct rcu_state *rsp, struct rcu_data *rdp) | 280 | struct rcu_data *rdp) |
285 | { | 281 | { |
286 | if (rdp->quiescbatch != rcp->cur) { | 282 | if (rdp->quiescbatch != rcp->cur) { |
287 | /* start new grace period: */ | 283 | /* start new grace period: */ |
@@ -306,15 +302,15 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |||
306 | return; | 302 | return; |
307 | rdp->qs_pending = 0; | 303 | rdp->qs_pending = 0; |
308 | 304 | ||
309 | spin_lock(&rsp->lock); | 305 | spin_lock(&rcp->lock); |
310 | /* | 306 | /* |
311 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync | 307 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync |
312 | * during cpu startup. Ignore the quiescent state. | 308 | * during cpu startup. Ignore the quiescent state. |
313 | */ | 309 | */ |
314 | if (likely(rdp->quiescbatch == rcp->cur)) | 310 | if (likely(rdp->quiescbatch == rcp->cur)) |
315 | cpu_quiet(rdp->cpu, rcp, rsp); | 311 | cpu_quiet(rdp->cpu, rcp); |
316 | 312 | ||
317 | spin_unlock(&rsp->lock); | 313 | spin_unlock(&rcp->lock); |
318 | } | 314 | } |
319 | 315 | ||
320 | 316 | ||
@@ -335,16 +331,16 @@ static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, | |||
335 | } | 331 | } |
336 | 332 | ||
337 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, | 333 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, |
338 | struct rcu_ctrlblk *rcp, struct rcu_state *rsp, struct rcu_data *rdp) | 334 | struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
339 | { | 335 | { |
340 | /* if the cpu going offline owns the grace period | 336 | /* if the cpu going offline owns the grace period |
341 | * we can block indefinitely waiting for it, so flush | 337 | * we can block indefinitely waiting for it, so flush |
342 | * it here | 338 | * it here |
343 | */ | 339 | */ |
344 | spin_lock_bh(&rsp->lock); | 340 | spin_lock_bh(&rcp->lock); |
345 | if (rcp->cur != rcp->completed) | 341 | if (rcp->cur != rcp->completed) |
346 | cpu_quiet(rdp->cpu, rcp, rsp); | 342 | cpu_quiet(rdp->cpu, rcp); |
347 | spin_unlock_bh(&rsp->lock); | 343 | spin_unlock_bh(&rcp->lock); |
348 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); | 344 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); |
349 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); | 345 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); |
350 | 346 | ||
@@ -354,9 +350,9 @@ static void rcu_offline_cpu(int cpu) | |||
354 | struct rcu_data *this_rdp = &get_cpu_var(rcu_data); | 350 | struct rcu_data *this_rdp = &get_cpu_var(rcu_data); |
355 | struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); | 351 | struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); |
356 | 352 | ||
357 | __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, &rcu_state, | 353 | __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, |
358 | &per_cpu(rcu_data, cpu)); | 354 | &per_cpu(rcu_data, cpu)); |
359 | __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, &rcu_bh_state, | 355 | __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, |
360 | &per_cpu(rcu_bh_data, cpu)); | 356 | &per_cpu(rcu_bh_data, cpu)); |
361 | put_cpu_var(rcu_data); | 357 | put_cpu_var(rcu_data); |
362 | put_cpu_var(rcu_bh_data); | 358 | put_cpu_var(rcu_bh_data); |
@@ -375,7 +371,7 @@ static void rcu_offline_cpu(int cpu) | |||
375 | * This does the RCU processing work from tasklet context. | 371 | * This does the RCU processing work from tasklet context. |
376 | */ | 372 | */ |
377 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | 373 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, |
378 | struct rcu_state *rsp, struct rcu_data *rdp) | 374 | struct rcu_data *rdp) |
379 | { | 375 | { |
380 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { | 376 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { |
381 | *rdp->donetail = rdp->curlist; | 377 | *rdp->donetail = rdp->curlist; |
@@ -405,25 +401,23 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | |||
405 | 401 | ||
406 | if (!rcp->next_pending) { | 402 | if (!rcp->next_pending) { |
407 | /* and start it/schedule start if it's a new batch */ | 403 | /* and start it/schedule start if it's a new batch */ |
408 | spin_lock(&rsp->lock); | 404 | spin_lock(&rcp->lock); |
409 | rcp->next_pending = 1; | 405 | rcp->next_pending = 1; |
410 | rcu_start_batch(rcp, rsp); | 406 | rcu_start_batch(rcp); |
411 | spin_unlock(&rsp->lock); | 407 | spin_unlock(&rcp->lock); |
412 | } | 408 | } |
413 | } else { | 409 | } else { |
414 | local_irq_enable(); | 410 | local_irq_enable(); |
415 | } | 411 | } |
416 | rcu_check_quiescent_state(rcp, rsp, rdp); | 412 | rcu_check_quiescent_state(rcp, rdp); |
417 | if (rdp->donelist) | 413 | if (rdp->donelist) |
418 | rcu_do_batch(rdp); | 414 | rcu_do_batch(rdp); |
419 | } | 415 | } |
420 | 416 | ||
421 | static void rcu_process_callbacks(unsigned long unused) | 417 | static void rcu_process_callbacks(unsigned long unused) |
422 | { | 418 | { |
423 | __rcu_process_callbacks(&rcu_ctrlblk, &rcu_state, | 419 | __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); |
424 | &__get_cpu_var(rcu_data)); | 420 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); |
425 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &rcu_bh_state, | ||
426 | &__get_cpu_var(rcu_bh_data)); | ||
427 | } | 421 | } |
428 | 422 | ||
429 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | 423 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |