diff options
Diffstat (limited to 'kernel/rcuclassic.c')
-rw-r--r-- | kernel/rcuclassic.c | 177 |
1 files changed, 104 insertions, 73 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index aad93cdc9f68..d4271146a9bd 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -60,12 +60,14 @@ EXPORT_SYMBOL_GPL(rcu_lock_map); | |||
60 | static struct rcu_ctrlblk rcu_ctrlblk = { | 60 | static struct rcu_ctrlblk rcu_ctrlblk = { |
61 | .cur = -300, | 61 | .cur = -300, |
62 | .completed = -300, | 62 | .completed = -300, |
63 | .pending = -300, | ||
63 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), | 64 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), |
64 | .cpumask = CPU_MASK_NONE, | 65 | .cpumask = CPU_MASK_NONE, |
65 | }; | 66 | }; |
66 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | 67 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
67 | .cur = -300, | 68 | .cur = -300, |
68 | .completed = -300, | 69 | .completed = -300, |
70 | .pending = -300, | ||
69 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), | 71 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), |
70 | .cpumask = CPU_MASK_NONE, | 72 | .cpumask = CPU_MASK_NONE, |
71 | }; | 73 | }; |
@@ -118,6 +120,43 @@ static inline void force_quiescent_state(struct rcu_data *rdp, | |||
118 | } | 120 | } |
119 | #endif | 121 | #endif |
120 | 122 | ||
123 | static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp, | ||
124 | struct rcu_data *rdp) | ||
125 | { | ||
126 | long batch; | ||
127 | smp_mb(); /* reads the most recently updated value of rcu->cur. */ | ||
128 | |||
129 | /* | ||
130 | * Determine the batch number of this callback. | ||
131 | * | ||
132 | * Using ACCESS_ONCE to avoid the following error when gcc eliminates | ||
133 | * local variable "batch" and emits codes like this: | ||
134 | * 1) rdp->batch = rcp->cur + 1 # gets old value | ||
135 | * ...... | ||
136 | * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value | ||
137 | * then [*nxttail[0], *nxttail[1]) may contain callbacks | ||
138 | * that batch# = rdp->batch, see the comment of struct rcu_data. | ||
139 | */ | ||
140 | batch = ACCESS_ONCE(rcp->cur) + 1; | ||
141 | |||
142 | if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) { | ||
143 | /* process callbacks */ | ||
144 | rdp->nxttail[0] = rdp->nxttail[1]; | ||
145 | rdp->nxttail[1] = rdp->nxttail[2]; | ||
146 | if (rcu_batch_after(batch - 1, rdp->batch)) | ||
147 | rdp->nxttail[0] = rdp->nxttail[2]; | ||
148 | } | ||
149 | |||
150 | rdp->batch = batch; | ||
151 | *rdp->nxttail[2] = head; | ||
152 | rdp->nxttail[2] = &head->next; | ||
153 | |||
154 | if (unlikely(++rdp->qlen > qhimark)) { | ||
155 | rdp->blimit = INT_MAX; | ||
156 | force_quiescent_state(rdp, &rcu_ctrlblk); | ||
157 | } | ||
158 | } | ||
159 | |||
121 | /** | 160 | /** |
122 | * call_rcu - Queue an RCU callback for invocation after a grace period. | 161 | * call_rcu - Queue an RCU callback for invocation after a grace period. |
123 | * @head: structure to be used for queueing the RCU updates. | 162 | * @head: structure to be used for queueing the RCU updates. |
@@ -133,18 +172,11 @@ void call_rcu(struct rcu_head *head, | |||
133 | void (*func)(struct rcu_head *rcu)) | 172 | void (*func)(struct rcu_head *rcu)) |
134 | { | 173 | { |
135 | unsigned long flags; | 174 | unsigned long flags; |
136 | struct rcu_data *rdp; | ||
137 | 175 | ||
138 | head->func = func; | 176 | head->func = func; |
139 | head->next = NULL; | 177 | head->next = NULL; |
140 | local_irq_save(flags); | 178 | local_irq_save(flags); |
141 | rdp = &__get_cpu_var(rcu_data); | 179 | __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data)); |
142 | *rdp->nxttail = head; | ||
143 | rdp->nxttail = &head->next; | ||
144 | if (unlikely(++rdp->qlen > qhimark)) { | ||
145 | rdp->blimit = INT_MAX; | ||
146 | force_quiescent_state(rdp, &rcu_ctrlblk); | ||
147 | } | ||
148 | local_irq_restore(flags); | 180 | local_irq_restore(flags); |
149 | } | 181 | } |
150 | EXPORT_SYMBOL_GPL(call_rcu); | 182 | EXPORT_SYMBOL_GPL(call_rcu); |
@@ -169,20 +201,11 @@ void call_rcu_bh(struct rcu_head *head, | |||
169 | void (*func)(struct rcu_head *rcu)) | 201 | void (*func)(struct rcu_head *rcu)) |
170 | { | 202 | { |
171 | unsigned long flags; | 203 | unsigned long flags; |
172 | struct rcu_data *rdp; | ||
173 | 204 | ||
174 | head->func = func; | 205 | head->func = func; |
175 | head->next = NULL; | 206 | head->next = NULL; |
176 | local_irq_save(flags); | 207 | local_irq_save(flags); |
177 | rdp = &__get_cpu_var(rcu_bh_data); | 208 | __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); |
178 | *rdp->nxttail = head; | ||
179 | rdp->nxttail = &head->next; | ||
180 | |||
181 | if (unlikely(++rdp->qlen > qhimark)) { | ||
182 | rdp->blimit = INT_MAX; | ||
183 | force_quiescent_state(rdp, &rcu_bh_ctrlblk); | ||
184 | } | ||
185 | |||
186 | local_irq_restore(flags); | 209 | local_irq_restore(flags); |
187 | } | 210 | } |
188 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 211 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
@@ -211,12 +234,6 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | |||
211 | static inline void raise_rcu_softirq(void) | 234 | static inline void raise_rcu_softirq(void) |
212 | { | 235 | { |
213 | raise_softirq(RCU_SOFTIRQ); | 236 | raise_softirq(RCU_SOFTIRQ); |
214 | /* | ||
215 | * The smp_mb() here is required to ensure that this cpu's | ||
216 | * __rcu_process_callbacks() reads the most recently updated | ||
217 | * value of rcu->cur. | ||
218 | */ | ||
219 | smp_mb(); | ||
220 | } | 237 | } |
221 | 238 | ||
222 | /* | 239 | /* |
@@ -276,14 +293,8 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
276 | */ | 293 | */ |
277 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) | 294 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) |
278 | { | 295 | { |
279 | if (rcp->next_pending && | 296 | if (rcp->cur != rcp->pending && |
280 | rcp->completed == rcp->cur) { | 297 | rcp->completed == rcp->cur) { |
281 | rcp->next_pending = 0; | ||
282 | /* | ||
283 | * next_pending == 0 must be visible in | ||
284 | * __rcu_process_callbacks() before it can see new value of cur. | ||
285 | */ | ||
286 | smp_wmb(); | ||
287 | rcp->cur++; | 298 | rcp->cur++; |
288 | 299 | ||
289 | /* | 300 | /* |
@@ -364,13 +375,15 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |||
364 | * which is dead and hence not processing interrupts. | 375 | * which is dead and hence not processing interrupts. |
365 | */ | 376 | */ |
366 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, | 377 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, |
367 | struct rcu_head **tail) | 378 | struct rcu_head **tail, long batch) |
368 | { | 379 | { |
369 | local_irq_disable(); | 380 | if (list) { |
370 | *this_rdp->nxttail = list; | 381 | local_irq_disable(); |
371 | if (list) | 382 | this_rdp->batch = batch; |
372 | this_rdp->nxttail = tail; | 383 | *this_rdp->nxttail[2] = list; |
373 | local_irq_enable(); | 384 | this_rdp->nxttail[2] = tail; |
385 | local_irq_enable(); | ||
386 | } | ||
374 | } | 387 | } |
375 | 388 | ||
376 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, | 389 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, |
@@ -384,9 +397,9 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp, | |||
384 | if (rcp->cur != rcp->completed) | 397 | if (rcp->cur != rcp->completed) |
385 | cpu_quiet(rdp->cpu, rcp); | 398 | cpu_quiet(rdp->cpu, rcp); |
386 | spin_unlock_bh(&rcp->lock); | 399 | spin_unlock_bh(&rcp->lock); |
387 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); | 400 | /* spin_lock implies smp_mb() */ |
388 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); | 401 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1); |
389 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); | 402 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1); |
390 | 403 | ||
391 | local_irq_disable(); | 404 | local_irq_disable(); |
392 | this_rdp->qlen += rdp->qlen; | 405 | this_rdp->qlen += rdp->qlen; |
@@ -420,37 +433,45 @@ static void rcu_offline_cpu(int cpu) | |||
420 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | 433 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, |
421 | struct rcu_data *rdp) | 434 | struct rcu_data *rdp) |
422 | { | 435 | { |
423 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { | 436 | if (rdp->nxtlist) { |
424 | *rdp->donetail = rdp->curlist; | ||
425 | rdp->donetail = rdp->curtail; | ||
426 | rdp->curlist = NULL; | ||
427 | rdp->curtail = &rdp->curlist; | ||
428 | } | ||
429 | |||
430 | if (rdp->nxtlist && !rdp->curlist) { | ||
431 | local_irq_disable(); | 437 | local_irq_disable(); |
432 | rdp->curlist = rdp->nxtlist; | ||
433 | rdp->curtail = rdp->nxttail; | ||
434 | rdp->nxtlist = NULL; | ||
435 | rdp->nxttail = &rdp->nxtlist; | ||
436 | local_irq_enable(); | ||
437 | 438 | ||
438 | /* | 439 | /* |
439 | * start the next batch of callbacks | 440 | * move the other grace-period-completed entries to |
441 | * [rdp->nxtlist, *rdp->nxttail[0]) temporarily | ||
440 | */ | 442 | */ |
443 | if (!rcu_batch_before(rcp->completed, rdp->batch)) | ||
444 | rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2]; | ||
445 | else if (!rcu_batch_before(rcp->completed, rdp->batch - 1)) | ||
446 | rdp->nxttail[0] = rdp->nxttail[1]; | ||
441 | 447 | ||
442 | /* determine batch number */ | 448 | /* |
443 | rdp->batch = rcp->cur + 1; | 449 | * the grace period for entries in |
444 | /* see the comment and corresponding wmb() in | 450 | * [rdp->nxtlist, *rdp->nxttail[0]) has completed and |
445 | * the rcu_start_batch() | 451 | * move these entries to donelist |
446 | */ | 452 | */ |
447 | smp_rmb(); | 453 | if (rdp->nxttail[0] != &rdp->nxtlist) { |
454 | *rdp->donetail = rdp->nxtlist; | ||
455 | rdp->donetail = rdp->nxttail[0]; | ||
456 | rdp->nxtlist = *rdp->nxttail[0]; | ||
457 | *rdp->donetail = NULL; | ||
458 | |||
459 | if (rdp->nxttail[1] == rdp->nxttail[0]) | ||
460 | rdp->nxttail[1] = &rdp->nxtlist; | ||
461 | if (rdp->nxttail[2] == rdp->nxttail[0]) | ||
462 | rdp->nxttail[2] = &rdp->nxtlist; | ||
463 | rdp->nxttail[0] = &rdp->nxtlist; | ||
464 | } | ||
465 | |||
466 | local_irq_enable(); | ||
448 | 467 | ||
449 | if (!rcp->next_pending) { | 468 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
450 | /* and start it/schedule start if it's a new batch */ | 469 | /* and start it/schedule start if it's a new batch */ |
451 | spin_lock(&rcp->lock); | 470 | spin_lock(&rcp->lock); |
452 | rcp->next_pending = 1; | 471 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
453 | rcu_start_batch(rcp); | 472 | rcp->pending = rdp->batch; |
473 | rcu_start_batch(rcp); | ||
474 | } | ||
454 | spin_unlock(&rcp->lock); | 475 | spin_unlock(&rcp->lock); |
455 | } | 476 | } |
456 | } | 477 | } |
@@ -468,15 +489,26 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
468 | 489 | ||
469 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | 490 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
470 | { | 491 | { |
471 | /* This cpu has pending rcu entries and the grace period | 492 | if (rdp->nxtlist) { |
472 | * for them has completed. | 493 | /* |
473 | */ | 494 | * This cpu has pending rcu entries and the grace period |
474 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) | 495 | * for them has completed. |
475 | return 1; | 496 | */ |
497 | if (!rcu_batch_before(rcp->completed, rdp->batch)) | ||
498 | return 1; | ||
499 | if (!rcu_batch_before(rcp->completed, rdp->batch - 1) && | ||
500 | rdp->nxttail[0] != rdp->nxttail[1]) | ||
501 | return 1; | ||
502 | if (rdp->nxttail[0] != &rdp->nxtlist) | ||
503 | return 1; | ||
476 | 504 | ||
477 | /* This cpu has no pending entries, but there are new entries */ | 505 | /* |
478 | if (!rdp->curlist && rdp->nxtlist) | 506 | * This cpu has pending rcu entries and the new batch |
479 | return 1; | 507 | * for then hasn't been started nor scheduled start |
508 | */ | ||
509 | if (rcu_batch_after(rdp->batch, rcp->pending)) | ||
510 | return 1; | ||
511 | } | ||
480 | 512 | ||
481 | /* This cpu has finished callbacks to invoke */ | 513 | /* This cpu has finished callbacks to invoke */ |
482 | if (rdp->donelist) | 514 | if (rdp->donelist) |
@@ -512,7 +544,7 @@ int rcu_needs_cpu(int cpu) | |||
512 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | 544 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); |
513 | struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); | 545 | struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); |
514 | 546 | ||
515 | return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); | 547 | return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu); |
516 | } | 548 | } |
517 | 549 | ||
518 | void rcu_check_callbacks(int cpu, int user) | 550 | void rcu_check_callbacks(int cpu, int user) |
@@ -559,8 +591,7 @@ static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | |||
559 | struct rcu_data *rdp) | 591 | struct rcu_data *rdp) |
560 | { | 592 | { |
561 | memset(rdp, 0, sizeof(*rdp)); | 593 | memset(rdp, 0, sizeof(*rdp)); |
562 | rdp->curtail = &rdp->curlist; | 594 | rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist; |
563 | rdp->nxttail = &rdp->nxtlist; | ||
564 | rdp->donetail = &rdp->donelist; | 595 | rdp->donetail = &rdp->donelist; |
565 | rdp->quiescbatch = rcp->completed; | 596 | rdp->quiescbatch = rcp->completed; |
566 | rdp->qs_pending = 0; | 597 | rdp->qs_pending = 0; |