diff options
Diffstat (limited to 'kernel/rcupdate.c')
| -rw-r--r-- | kernel/rcupdate.c | 99 |
1 files changed, 69 insertions, 30 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 0cf8146bd585..13458bbaa1be 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -47,15 +47,16 @@ | |||
| 47 | #include <linux/notifier.h> | 47 | #include <linux/notifier.h> |
| 48 | #include <linux/rcupdate.h> | 48 | #include <linux/rcupdate.h> |
| 49 | #include <linux/cpu.h> | 49 | #include <linux/cpu.h> |
| 50 | #include <linux/mutex.h> | ||
| 50 | 51 | ||
| 51 | /* Definition for rcupdate control block. */ | 52 | /* Definition for rcupdate control block. */ |
| 52 | struct rcu_ctrlblk rcu_ctrlblk = { | 53 | static struct rcu_ctrlblk rcu_ctrlblk = { |
| 53 | .cur = -300, | 54 | .cur = -300, |
| 54 | .completed = -300, | 55 | .completed = -300, |
| 55 | .lock = SPIN_LOCK_UNLOCKED, | 56 | .lock = SPIN_LOCK_UNLOCKED, |
| 56 | .cpumask = CPU_MASK_NONE, | 57 | .cpumask = CPU_MASK_NONE, |
| 57 | }; | 58 | }; |
| 58 | struct rcu_ctrlblk rcu_bh_ctrlblk = { | 59 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
| 59 | .cur = -300, | 60 | .cur = -300, |
| 60 | .completed = -300, | 61 | .completed = -300, |
| 61 | .lock = SPIN_LOCK_UNLOCKED, | 62 | .lock = SPIN_LOCK_UNLOCKED, |
| @@ -67,7 +68,43 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; | |||
| 67 | 68 | ||
| 68 | /* Fake initialization required by compiler */ | 69 | /* Fake initialization required by compiler */ |
| 69 | static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; | 70 | static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; |
| 70 | static int maxbatch = 10000; | 71 | static int blimit = 10; |
| 72 | static int qhimark = 10000; | ||
| 73 | static int qlowmark = 100; | ||
| 74 | #ifdef CONFIG_SMP | ||
| 75 | static int rsinterval = 1000; | ||
| 76 | #endif | ||
| 77 | |||
| 78 | static atomic_t rcu_barrier_cpu_count; | ||
| 79 | static DEFINE_MUTEX(rcu_barrier_mutex); | ||
| 80 | static struct completion rcu_barrier_completion; | ||
| 81 | |||
| 82 | #ifdef CONFIG_SMP | ||
| 83 | static void force_quiescent_state(struct rcu_data *rdp, | ||
| 84 | struct rcu_ctrlblk *rcp) | ||
| 85 | { | ||
| 86 | int cpu; | ||
| 87 | cpumask_t cpumask; | ||
| 88 | set_need_resched(); | ||
| 89 | if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) { | ||
| 90 | rdp->last_rs_qlen = rdp->qlen; | ||
| 91 | /* | ||
| 92 | * Don't send IPI to itself. With irqs disabled, | ||
| 93 | * rdp->cpu is the current cpu. | ||
| 94 | */ | ||
| 95 | cpumask = rcp->cpumask; | ||
| 96 | cpu_clear(rdp->cpu, cpumask); | ||
| 97 | for_each_cpu_mask(cpu, cpumask) | ||
| 98 | smp_send_reschedule(cpu); | ||
| 99 | } | ||
| 100 | } | ||
| 101 | #else | ||
| 102 | static inline void force_quiescent_state(struct rcu_data *rdp, | ||
| 103 | struct rcu_ctrlblk *rcp) | ||
| 104 | { | ||
| 105 | set_need_resched(); | ||
| 106 | } | ||
| 107 | #endif | ||
| 71 | 108 | ||
| 72 | /** | 109 | /** |
| 73 | * call_rcu - Queue an RCU callback for invocation after a grace period. | 110 | * call_rcu - Queue an RCU callback for invocation after a grace period. |
| @@ -92,17 +129,13 @@ void fastcall call_rcu(struct rcu_head *head, | |||
| 92 | rdp = &__get_cpu_var(rcu_data); | 129 | rdp = &__get_cpu_var(rcu_data); |
| 93 | *rdp->nxttail = head; | 130 | *rdp->nxttail = head; |
| 94 | rdp->nxttail = &head->next; | 131 | rdp->nxttail = &head->next; |
| 95 | 132 | if (unlikely(++rdp->qlen > qhimark)) { | |
| 96 | if (unlikely(++rdp->count > 10000)) | 133 | rdp->blimit = INT_MAX; |
| 97 | set_need_resched(); | 134 | force_quiescent_state(rdp, &rcu_ctrlblk); |
| 98 | 135 | } | |
| 99 | local_irq_restore(flags); | 136 | local_irq_restore(flags); |
| 100 | } | 137 | } |
| 101 | 138 | ||
| 102 | static atomic_t rcu_barrier_cpu_count; | ||
| 103 | static struct semaphore rcu_barrier_sema; | ||
| 104 | static struct completion rcu_barrier_completion; | ||
| 105 | |||
| 106 | /** | 139 | /** |
| 107 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | 140 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. |
| 108 | * @head: structure to be used for queueing the RCU updates. | 141 | * @head: structure to be used for queueing the RCU updates. |
| @@ -131,12 +164,12 @@ void fastcall call_rcu_bh(struct rcu_head *head, | |||
| 131 | rdp = &__get_cpu_var(rcu_bh_data); | 164 | rdp = &__get_cpu_var(rcu_bh_data); |
| 132 | *rdp->nxttail = head; | 165 | *rdp->nxttail = head; |
| 133 | rdp->nxttail = &head->next; | 166 | rdp->nxttail = &head->next; |
| 134 | rdp->count++; | 167 | |
| 135 | /* | 168 | if (unlikely(++rdp->qlen > qhimark)) { |
| 136 | * Should we directly call rcu_do_batch() here ? | 169 | rdp->blimit = INT_MAX; |
| 137 | * if (unlikely(rdp->count > 10000)) | 170 | force_quiescent_state(rdp, &rcu_bh_ctrlblk); |
| 138 | * rcu_do_batch(rdp); | 171 | } |
| 139 | */ | 172 | |
| 140 | local_irq_restore(flags); | 173 | local_irq_restore(flags); |
| 141 | } | 174 | } |
| 142 | 175 | ||
| @@ -175,13 +208,13 @@ static void rcu_barrier_func(void *notused) | |||
| 175 | void rcu_barrier(void) | 208 | void rcu_barrier(void) |
| 176 | { | 209 | { |
| 177 | BUG_ON(in_interrupt()); | 210 | BUG_ON(in_interrupt()); |
| 178 | /* Take cpucontrol semaphore to protect against CPU hotplug */ | 211 | /* Take cpucontrol mutex to protect against CPU hotplug */ |
| 179 | down(&rcu_barrier_sema); | 212 | mutex_lock(&rcu_barrier_mutex); |
| 180 | init_completion(&rcu_barrier_completion); | 213 | init_completion(&rcu_barrier_completion); |
| 181 | atomic_set(&rcu_barrier_cpu_count, 0); | 214 | atomic_set(&rcu_barrier_cpu_count, 0); |
| 182 | on_each_cpu(rcu_barrier_func, NULL, 0, 1); | 215 | on_each_cpu(rcu_barrier_func, NULL, 0, 1); |
| 183 | wait_for_completion(&rcu_barrier_completion); | 216 | wait_for_completion(&rcu_barrier_completion); |
| 184 | up(&rcu_barrier_sema); | 217 | mutex_unlock(&rcu_barrier_mutex); |
| 185 | } | 218 | } |
| 186 | EXPORT_SYMBOL_GPL(rcu_barrier); | 219 | EXPORT_SYMBOL_GPL(rcu_barrier); |
| 187 | 220 | ||
| @@ -199,10 +232,12 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
| 199 | next = rdp->donelist = list->next; | 232 | next = rdp->donelist = list->next; |
| 200 | list->func(list); | 233 | list->func(list); |
| 201 | list = next; | 234 | list = next; |
| 202 | rdp->count--; | 235 | rdp->qlen--; |
| 203 | if (++count >= maxbatch) | 236 | if (++count >= rdp->blimit) |
| 204 | break; | 237 | break; |
| 205 | } | 238 | } |
| 239 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) | ||
| 240 | rdp->blimit = blimit; | ||
| 206 | if (!rdp->donelist) | 241 | if (!rdp->donelist) |
| 207 | rdp->donetail = &rdp->donelist; | 242 | rdp->donetail = &rdp->donelist; |
| 208 | else | 243 | else |
| @@ -381,8 +416,8 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | |||
| 381 | rdp->curtail = &rdp->curlist; | 416 | rdp->curtail = &rdp->curlist; |
| 382 | } | 417 | } |
| 383 | 418 | ||
| 384 | local_irq_disable(); | ||
| 385 | if (rdp->nxtlist && !rdp->curlist) { | 419 | if (rdp->nxtlist && !rdp->curlist) { |
| 420 | local_irq_disable(); | ||
| 386 | rdp->curlist = rdp->nxtlist; | 421 | rdp->curlist = rdp->nxtlist; |
| 387 | rdp->curtail = rdp->nxttail; | 422 | rdp->curtail = rdp->nxttail; |
| 388 | rdp->nxtlist = NULL; | 423 | rdp->nxtlist = NULL; |
| @@ -407,9 +442,8 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | |||
| 407 | rcu_start_batch(rcp); | 442 | rcu_start_batch(rcp); |
| 408 | spin_unlock(&rcp->lock); | 443 | spin_unlock(&rcp->lock); |
| 409 | } | 444 | } |
| 410 | } else { | ||
| 411 | local_irq_enable(); | ||
| 412 | } | 445 | } |
| 446 | |||
| 413 | rcu_check_quiescent_state(rcp, rdp); | 447 | rcu_check_quiescent_state(rcp, rdp); |
| 414 | if (rdp->donelist) | 448 | if (rdp->donelist) |
| 415 | rcu_do_batch(rdp); | 449 | rcu_do_batch(rdp); |
| @@ -473,6 +507,7 @@ static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | |||
| 473 | rdp->quiescbatch = rcp->completed; | 507 | rdp->quiescbatch = rcp->completed; |
| 474 | rdp->qs_pending = 0; | 508 | rdp->qs_pending = 0; |
| 475 | rdp->cpu = cpu; | 509 | rdp->cpu = cpu; |
| 510 | rdp->blimit = blimit; | ||
| 476 | } | 511 | } |
| 477 | 512 | ||
| 478 | static void __devinit rcu_online_cpu(int cpu) | 513 | static void __devinit rcu_online_cpu(int cpu) |
| @@ -514,7 +549,6 @@ static struct notifier_block __devinitdata rcu_nb = { | |||
| 514 | */ | 549 | */ |
| 515 | void __init rcu_init(void) | 550 | void __init rcu_init(void) |
| 516 | { | 551 | { |
| 517 | sema_init(&rcu_barrier_sema, 1); | ||
| 518 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, | 552 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, |
| 519 | (void *)(long)smp_processor_id()); | 553 | (void *)(long)smp_processor_id()); |
| 520 | /* Register notifier for non-boot CPUs */ | 554 | /* Register notifier for non-boot CPUs */ |
| @@ -567,9 +601,14 @@ void synchronize_kernel(void) | |||
| 567 | synchronize_rcu(); | 601 | synchronize_rcu(); |
| 568 | } | 602 | } |
| 569 | 603 | ||
| 570 | module_param(maxbatch, int, 0); | 604 | module_param(blimit, int, 0); |
| 605 | module_param(qhimark, int, 0); | ||
| 606 | module_param(qlowmark, int, 0); | ||
| 607 | #ifdef CONFIG_SMP | ||
| 608 | module_param(rsinterval, int, 0); | ||
| 609 | #endif | ||
| 571 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 610 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
| 572 | EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */ | 611 | EXPORT_SYMBOL_GPL_FUTURE(call_rcu); /* WARNING: GPL-only in April 2006. */ |
| 573 | EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */ | 612 | EXPORT_SYMBOL_GPL_FUTURE(call_rcu_bh); /* WARNING: GPL-only in April 2006. */ |
| 574 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 613 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
| 575 | EXPORT_SYMBOL(synchronize_kernel); /* WARNING: GPL-only in April 2006. */ | 614 | EXPORT_SYMBOL_GPL_FUTURE(synchronize_kernel); /* WARNING: GPL-only in April 2006. */ |
