diff options
Diffstat (limited to 'kernel/rcutree.c')
| -rw-r--r-- | kernel/rcutree.c | 280 |
1 files changed, 183 insertions, 97 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7717b95c2027..6b11b07cfe7f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/rcupdate.h> | 35 | #include <linux/rcupdate.h> |
| 36 | #include <linux/interrupt.h> | 36 | #include <linux/interrupt.h> |
| 37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
| 38 | #include <linux/nmi.h> | ||
| 38 | #include <asm/atomic.h> | 39 | #include <asm/atomic.h> |
| 39 | #include <linux/bitops.h> | 40 | #include <linux/bitops.h> |
| 40 | #include <linux/module.h> | 41 | #include <linux/module.h> |
| @@ -46,6 +47,8 @@ | |||
| 46 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
| 47 | #include <linux/time.h> | 48 | #include <linux/time.h> |
| 48 | 49 | ||
| 50 | #include "rcutree.h" | ||
| 51 | |||
| 49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 50 | static struct lock_class_key rcu_lock_key; | 53 | static struct lock_class_key rcu_lock_key; |
| 51 | struct lockdep_map rcu_lock_map = | 54 | struct lockdep_map rcu_lock_map = |
| @@ -72,30 +75,59 @@ EXPORT_SYMBOL_GPL(rcu_lock_map); | |||
| 72 | .n_force_qs_ngp = 0, \ | 75 | .n_force_qs_ngp = 0, \ |
| 73 | } | 76 | } |
| 74 | 77 | ||
| 75 | struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state); | 78 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); |
| 76 | DEFINE_PER_CPU(struct rcu_data, rcu_data); | 79 | DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); |
| 77 | 80 | ||
| 78 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 81 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
| 79 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 82 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
| 80 | 83 | ||
| 84 | extern long rcu_batches_completed_sched(void); | ||
| 85 | static struct rcu_node *rcu_get_root(struct rcu_state *rsp); | ||
| 86 | static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, | ||
| 87 | struct rcu_node *rnp, unsigned long flags); | ||
| 88 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags); | ||
| 89 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 90 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp); | ||
| 91 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 92 | static void __rcu_process_callbacks(struct rcu_state *rsp, | ||
| 93 | struct rcu_data *rdp); | ||
| 94 | static void __call_rcu(struct rcu_head *head, | ||
| 95 | void (*func)(struct rcu_head *rcu), | ||
| 96 | struct rcu_state *rsp); | ||
| 97 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp); | ||
| 98 | static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp, | ||
| 99 | int preemptable); | ||
| 100 | |||
| 101 | #include "rcutree_plugin.h" | ||
| 102 | |||
| 81 | /* | 103 | /* |
| 82 | * Increment the quiescent state counter. | 104 | * Note a quiescent state. Because we do not need to know |
| 83 | * The counter is a bit degenerated: We do not need to know | ||
| 84 | * how many quiescent states passed, just if there was at least | 105 | * how many quiescent states passed, just if there was at least |
| 85 | * one since the start of the grace period. Thus just a flag. | 106 | * one since the start of the grace period, this just sets a flag. |
| 86 | */ | 107 | */ |
| 87 | void rcu_qsctr_inc(int cpu) | 108 | void rcu_sched_qs(int cpu) |
| 88 | { | 109 | { |
| 89 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | 110 | unsigned long flags; |
| 111 | struct rcu_data *rdp; | ||
| 112 | |||
| 113 | local_irq_save(flags); | ||
| 114 | rdp = &per_cpu(rcu_sched_data, cpu); | ||
| 90 | rdp->passed_quiesc = 1; | 115 | rdp->passed_quiesc = 1; |
| 91 | rdp->passed_quiesc_completed = rdp->completed; | 116 | rdp->passed_quiesc_completed = rdp->completed; |
| 117 | rcu_preempt_qs(cpu); | ||
| 118 | local_irq_restore(flags); | ||
| 92 | } | 119 | } |
| 93 | 120 | ||
| 94 | void rcu_bh_qsctr_inc(int cpu) | 121 | void rcu_bh_qs(int cpu) |
| 95 | { | 122 | { |
| 96 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | 123 | unsigned long flags; |
| 124 | struct rcu_data *rdp; | ||
| 125 | |||
| 126 | local_irq_save(flags); | ||
| 127 | rdp = &per_cpu(rcu_bh_data, cpu); | ||
| 97 | rdp->passed_quiesc = 1; | 128 | rdp->passed_quiesc = 1; |
| 98 | rdp->passed_quiesc_completed = rdp->completed; | 129 | rdp->passed_quiesc_completed = rdp->completed; |
| 130 | local_irq_restore(flags); | ||
| 99 | } | 131 | } |
| 100 | 132 | ||
| 101 | #ifdef CONFIG_NO_HZ | 133 | #ifdef CONFIG_NO_HZ |
| @@ -110,15 +142,16 @@ static int qhimark = 10000; /* If this many pending, ignore blimit. */ | |||
| 110 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ | 142 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ |
| 111 | 143 | ||
| 112 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 144 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
| 145 | static int rcu_pending(int cpu); | ||
| 113 | 146 | ||
| 114 | /* | 147 | /* |
| 115 | * Return the number of RCU batches processed thus far for debug & stats. | 148 | * Return the number of RCU-sched batches processed thus far for debug & stats. |
| 116 | */ | 149 | */ |
| 117 | long rcu_batches_completed(void) | 150 | long rcu_batches_completed_sched(void) |
| 118 | { | 151 | { |
| 119 | return rcu_state.completed; | 152 | return rcu_sched_state.completed; |
| 120 | } | 153 | } |
| 121 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 154 | EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); |
| 122 | 155 | ||
| 123 | /* | 156 | /* |
| 124 | * Return the number of RCU BH batches processed thus far for debug & stats. | 157 | * Return the number of RCU BH batches processed thus far for debug & stats. |
| @@ -181,6 +214,10 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
| 181 | return 1; | 214 | return 1; |
| 182 | } | 215 | } |
| 183 | 216 | ||
| 217 | /* If preemptable RCU, no point in sending reschedule IPI. */ | ||
| 218 | if (rdp->preemptable) | ||
| 219 | return 0; | ||
| 220 | |||
| 184 | /* The CPU is online, so send it a reschedule IPI. */ | 221 | /* The CPU is online, so send it a reschedule IPI. */ |
| 185 | if (rdp->cpu != smp_processor_id()) | 222 | if (rdp->cpu != smp_processor_id()) |
| 186 | smp_send_reschedule(rdp->cpu); | 223 | smp_send_reschedule(rdp->cpu); |
| @@ -193,7 +230,6 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
| 193 | #endif /* #ifdef CONFIG_SMP */ | 230 | #endif /* #ifdef CONFIG_SMP */ |
| 194 | 231 | ||
| 195 | #ifdef CONFIG_NO_HZ | 232 | #ifdef CONFIG_NO_HZ |
| 196 | static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5); | ||
| 197 | 233 | ||
| 198 | /** | 234 | /** |
| 199 | * rcu_enter_nohz - inform RCU that current CPU is entering nohz | 235 | * rcu_enter_nohz - inform RCU that current CPU is entering nohz |
| @@ -213,7 +249,7 @@ void rcu_enter_nohz(void) | |||
| 213 | rdtp = &__get_cpu_var(rcu_dynticks); | 249 | rdtp = &__get_cpu_var(rcu_dynticks); |
| 214 | rdtp->dynticks++; | 250 | rdtp->dynticks++; |
| 215 | rdtp->dynticks_nesting--; | 251 | rdtp->dynticks_nesting--; |
| 216 | WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); | 252 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
| 217 | local_irq_restore(flags); | 253 | local_irq_restore(flags); |
| 218 | } | 254 | } |
| 219 | 255 | ||
| @@ -232,7 +268,7 @@ void rcu_exit_nohz(void) | |||
| 232 | rdtp = &__get_cpu_var(rcu_dynticks); | 268 | rdtp = &__get_cpu_var(rcu_dynticks); |
| 233 | rdtp->dynticks++; | 269 | rdtp->dynticks++; |
| 234 | rdtp->dynticks_nesting++; | 270 | rdtp->dynticks_nesting++; |
| 235 | WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); | 271 | WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); |
| 236 | local_irq_restore(flags); | 272 | local_irq_restore(flags); |
| 237 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 273 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
| 238 | } | 274 | } |
| @@ -251,7 +287,7 @@ void rcu_nmi_enter(void) | |||
| 251 | if (rdtp->dynticks & 0x1) | 287 | if (rdtp->dynticks & 0x1) |
| 252 | return; | 288 | return; |
| 253 | rdtp->dynticks_nmi++; | 289 | rdtp->dynticks_nmi++; |
| 254 | WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs); | 290 | WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1)); |
| 255 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 291 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
| 256 | } | 292 | } |
| 257 | 293 | ||
| @@ -270,7 +306,7 @@ void rcu_nmi_exit(void) | |||
| 270 | return; | 306 | return; |
| 271 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 307 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
| 272 | rdtp->dynticks_nmi++; | 308 | rdtp->dynticks_nmi++; |
| 273 | WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs); | 309 | WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1); |
| 274 | } | 310 | } |
| 275 | 311 | ||
| 276 | /** | 312 | /** |
| @@ -286,7 +322,7 @@ void rcu_irq_enter(void) | |||
| 286 | if (rdtp->dynticks_nesting++) | 322 | if (rdtp->dynticks_nesting++) |
| 287 | return; | 323 | return; |
| 288 | rdtp->dynticks++; | 324 | rdtp->dynticks++; |
| 289 | WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); | 325 | WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); |
| 290 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 326 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
| 291 | } | 327 | } |
| 292 | 328 | ||
| @@ -305,10 +341,10 @@ void rcu_irq_exit(void) | |||
| 305 | return; | 341 | return; |
| 306 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 342 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
| 307 | rdtp->dynticks++; | 343 | rdtp->dynticks++; |
| 308 | WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); | 344 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
| 309 | 345 | ||
| 310 | /* If the interrupt queued a callback, get out of dyntick mode. */ | 346 | /* If the interrupt queued a callback, get out of dyntick mode. */ |
| 311 | if (__get_cpu_var(rcu_data).nxtlist || | 347 | if (__get_cpu_var(rcu_sched_data).nxtlist || |
| 312 | __get_cpu_var(rcu_bh_data).nxtlist) | 348 | __get_cpu_var(rcu_bh_data).nxtlist) |
| 313 | set_need_resched(); | 349 | set_need_resched(); |
| 314 | } | 350 | } |
| @@ -461,6 +497,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
| 461 | 497 | ||
| 462 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | 498 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); |
| 463 | for (; rnp_cur < rnp_end; rnp_cur++) { | 499 | for (; rnp_cur < rnp_end; rnp_cur++) { |
| 500 | rcu_print_task_stall(rnp); | ||
| 464 | if (rnp_cur->qsmask == 0) | 501 | if (rnp_cur->qsmask == 0) |
| 465 | continue; | 502 | continue; |
| 466 | for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) | 503 | for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) |
| @@ -469,6 +506,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
| 469 | } | 506 | } |
| 470 | printk(" (detected by %d, t=%ld jiffies)\n", | 507 | printk(" (detected by %d, t=%ld jiffies)\n", |
| 471 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); | 508 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); |
| 509 | trigger_all_cpu_backtrace(); | ||
| 510 | |||
| 472 | force_quiescent_state(rsp, 0); /* Kick them all. */ | 511 | force_quiescent_state(rsp, 0); /* Kick them all. */ |
| 473 | } | 512 | } |
| 474 | 513 | ||
| @@ -479,12 +518,14 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
| 479 | 518 | ||
| 480 | printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", | 519 | printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", |
| 481 | smp_processor_id(), jiffies - rsp->gp_start); | 520 | smp_processor_id(), jiffies - rsp->gp_start); |
| 482 | dump_stack(); | 521 | trigger_all_cpu_backtrace(); |
| 522 | |||
| 483 | spin_lock_irqsave(&rnp->lock, flags); | 523 | spin_lock_irqsave(&rnp->lock, flags); |
| 484 | if ((long)(jiffies - rsp->jiffies_stall) >= 0) | 524 | if ((long)(jiffies - rsp->jiffies_stall) >= 0) |
| 485 | rsp->jiffies_stall = | 525 | rsp->jiffies_stall = |
| 486 | jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | 526 | jiffies + RCU_SECONDS_TILL_STALL_RECHECK; |
| 487 | spin_unlock_irqrestore(&rnp->lock, flags); | 527 | spin_unlock_irqrestore(&rnp->lock, flags); |
| 528 | |||
| 488 | set_need_resched(); /* kick ourselves to get things going. */ | 529 | set_need_resched(); /* kick ourselves to get things going. */ |
| 489 | } | 530 | } |
| 490 | 531 | ||
| @@ -674,6 +715,19 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 674 | } | 715 | } |
| 675 | 716 | ||
| 676 | /* | 717 | /* |
| 718 | * Clean up after the prior grace period and let rcu_start_gp() start up | ||
| 719 | * the next grace period if one is needed. Note that the caller must | ||
| 720 | * hold rnp->lock, as required by rcu_start_gp(), which will release it. | ||
| 721 | */ | ||
| 722 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) | ||
| 723 | __releases(rnp->lock) | ||
| 724 | { | ||
| 725 | rsp->completed = rsp->gpnum; | ||
| 726 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | ||
| 727 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | ||
| 728 | } | ||
| 729 | |||
| 730 | /* | ||
| 677 | * Similar to cpu_quiet(), for which it is a helper function. Allows | 731 | * Similar to cpu_quiet(), for which it is a helper function. Allows |
| 678 | * a group of CPUs to be quieted at one go, though all the CPUs in the | 732 | * a group of CPUs to be quieted at one go, though all the CPUs in the |
| 679 | * group must be represented by the same leaf rcu_node structure. | 733 | * group must be represented by the same leaf rcu_node structure. |
| @@ -694,7 +748,7 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
| 694 | return; | 748 | return; |
| 695 | } | 749 | } |
| 696 | rnp->qsmask &= ~mask; | 750 | rnp->qsmask &= ~mask; |
| 697 | if (rnp->qsmask != 0) { | 751 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { |
| 698 | 752 | ||
| 699 | /* Other bits still set at this level, so done. */ | 753 | /* Other bits still set at this level, so done. */ |
| 700 | spin_unlock_irqrestore(&rnp->lock, flags); | 754 | spin_unlock_irqrestore(&rnp->lock, flags); |
| @@ -714,14 +768,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
| 714 | 768 | ||
| 715 | /* | 769 | /* |
| 716 | * Get here if we are the last CPU to pass through a quiescent | 770 | * Get here if we are the last CPU to pass through a quiescent |
| 717 | * state for this grace period. Clean up and let rcu_start_gp() | 771 | * state for this grace period. Invoke cpu_quiet_msk_finish() |
| 718 | * start up the next grace period if one is needed. Note that | 772 | * to clean up and start the next grace period if one is needed. |
| 719 | * we still hold rnp->lock, as required by rcu_start_gp(), which | ||
| 720 | * will release it. | ||
| 721 | */ | 773 | */ |
| 722 | rsp->completed = rsp->gpnum; | 774 | cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ |
| 723 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | ||
| 724 | rcu_start_gp(rsp, flags); /* releases rnp->lock. */ | ||
| 725 | } | 775 | } |
| 726 | 776 | ||
| 727 | /* | 777 | /* |
| @@ -828,11 +878,12 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
| 828 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 878 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
| 829 | rnp->qsmaskinit &= ~mask; | 879 | rnp->qsmaskinit &= ~mask; |
| 830 | if (rnp->qsmaskinit != 0) { | 880 | if (rnp->qsmaskinit != 0) { |
| 831 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 881 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
| 832 | break; | 882 | break; |
| 833 | } | 883 | } |
| 884 | rcu_preempt_offline_tasks(rsp, rnp); | ||
| 834 | mask = rnp->grpmask; | 885 | mask = rnp->grpmask; |
| 835 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 886 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
| 836 | rnp = rnp->parent; | 887 | rnp = rnp->parent; |
| 837 | } while (rnp != NULL); | 888 | } while (rnp != NULL); |
| 838 | lastcomp = rsp->completed; | 889 | lastcomp = rsp->completed; |
| @@ -845,7 +896,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
| 845 | /* | 896 | /* |
| 846 | * Move callbacks from the outgoing CPU to the running CPU. | 897 | * Move callbacks from the outgoing CPU to the running CPU. |
| 847 | * Note that the outgoing CPU is now quiscent, so it is now | 898 | * Note that the outgoing CPU is now quiscent, so it is now |
| 848 | * (uncharacteristically) safe to access it rcu_data structure. | 899 | * (uncharacteristically) safe to access its rcu_data structure. |
| 849 | * Note also that we must carefully retain the order of the | 900 | * Note also that we must carefully retain the order of the |
| 850 | * outgoing CPU's callbacks in order for rcu_barrier() to work | 901 | * outgoing CPU's callbacks in order for rcu_barrier() to work |
| 851 | * correctly. Finally, note that we start all the callbacks | 902 | * correctly. Finally, note that we start all the callbacks |
| @@ -876,8 +927,9 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
| 876 | */ | 927 | */ |
| 877 | static void rcu_offline_cpu(int cpu) | 928 | static void rcu_offline_cpu(int cpu) |
| 878 | { | 929 | { |
| 879 | __rcu_offline_cpu(cpu, &rcu_state); | 930 | __rcu_offline_cpu(cpu, &rcu_sched_state); |
| 880 | __rcu_offline_cpu(cpu, &rcu_bh_state); | 931 | __rcu_offline_cpu(cpu, &rcu_bh_state); |
| 932 | rcu_preempt_offline_cpu(cpu); | ||
| 881 | } | 933 | } |
| 882 | 934 | ||
| 883 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | 935 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
| @@ -963,6 +1015,8 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
| 963 | */ | 1015 | */ |
| 964 | void rcu_check_callbacks(int cpu, int user) | 1016 | void rcu_check_callbacks(int cpu, int user) |
| 965 | { | 1017 | { |
| 1018 | if (!rcu_pending(cpu)) | ||
| 1019 | return; /* if nothing for RCU to do. */ | ||
| 966 | if (user || | 1020 | if (user || |
| 967 | (idle_cpu(cpu) && rcu_scheduler_active && | 1021 | (idle_cpu(cpu) && rcu_scheduler_active && |
| 968 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 1022 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
| @@ -971,17 +1025,16 @@ void rcu_check_callbacks(int cpu, int user) | |||
| 971 | * Get here if this CPU took its interrupt from user | 1025 | * Get here if this CPU took its interrupt from user |
| 972 | * mode or from the idle loop, and if this is not a | 1026 | * mode or from the idle loop, and if this is not a |
| 973 | * nested interrupt. In this case, the CPU is in | 1027 | * nested interrupt. In this case, the CPU is in |
| 974 | * a quiescent state, so count it. | 1028 | * a quiescent state, so note it. |
| 975 | * | 1029 | * |
| 976 | * No memory barrier is required here because both | 1030 | * No memory barrier is required here because both |
| 977 | * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference | 1031 | * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local |
| 978 | * only CPU-local variables that other CPUs neither | 1032 | * variables that other CPUs neither access nor modify, |
| 979 | * access nor modify, at least not while the corresponding | 1033 | * at least not while the corresponding CPU is online. |
| 980 | * CPU is online. | ||
| 981 | */ | 1034 | */ |
| 982 | 1035 | ||
| 983 | rcu_qsctr_inc(cpu); | 1036 | rcu_sched_qs(cpu); |
| 984 | rcu_bh_qsctr_inc(cpu); | 1037 | rcu_bh_qs(cpu); |
| 985 | 1038 | ||
| 986 | } else if (!in_softirq()) { | 1039 | } else if (!in_softirq()) { |
| 987 | 1040 | ||
| @@ -989,11 +1042,12 @@ void rcu_check_callbacks(int cpu, int user) | |||
| 989 | * Get here if this CPU did not take its interrupt from | 1042 | * Get here if this CPU did not take its interrupt from |
| 990 | * softirq, in other words, if it is not interrupting | 1043 | * softirq, in other words, if it is not interrupting |
| 991 | * a rcu_bh read-side critical section. This is an _bh | 1044 | * a rcu_bh read-side critical section. This is an _bh |
| 992 | * critical section, so count it. | 1045 | * critical section, so note it. |
| 993 | */ | 1046 | */ |
| 994 | 1047 | ||
| 995 | rcu_bh_qsctr_inc(cpu); | 1048 | rcu_bh_qs(cpu); |
| 996 | } | 1049 | } |
| 1050 | rcu_preempt_check_callbacks(cpu); | ||
| 997 | raise_softirq(RCU_SOFTIRQ); | 1051 | raise_softirq(RCU_SOFTIRQ); |
| 998 | } | 1052 | } |
| 999 | 1053 | ||
| @@ -1132,6 +1186,8 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1132 | { | 1186 | { |
| 1133 | unsigned long flags; | 1187 | unsigned long flags; |
| 1134 | 1188 | ||
| 1189 | WARN_ON_ONCE(rdp->beenonline == 0); | ||
| 1190 | |||
| 1135 | /* | 1191 | /* |
| 1136 | * If an RCU GP has gone long enough, go check for dyntick | 1192 | * If an RCU GP has gone long enough, go check for dyntick |
| 1137 | * idle CPUs and, if needed, send resched IPIs. | 1193 | * idle CPUs and, if needed, send resched IPIs. |
| @@ -1170,8 +1226,10 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
| 1170 | */ | 1226 | */ |
| 1171 | smp_mb(); /* See above block comment. */ | 1227 | smp_mb(); /* See above block comment. */ |
| 1172 | 1228 | ||
| 1173 | __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data)); | 1229 | __rcu_process_callbacks(&rcu_sched_state, |
| 1230 | &__get_cpu_var(rcu_sched_data)); | ||
| 1174 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | 1231 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); |
| 1232 | rcu_preempt_process_callbacks(); | ||
| 1175 | 1233 | ||
| 1176 | /* | 1234 | /* |
| 1177 | * Memory references from any later RCU read-side critical sections | 1235 | * Memory references from any later RCU read-side critical sections |
| @@ -1227,13 +1285,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
| 1227 | } | 1285 | } |
| 1228 | 1286 | ||
| 1229 | /* | 1287 | /* |
| 1230 | * Queue an RCU callback for invocation after a grace period. | 1288 | * Queue an RCU-sched callback for invocation after a grace period. |
| 1231 | */ | 1289 | */ |
| 1232 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 1290 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
| 1233 | { | 1291 | { |
| 1234 | __call_rcu(head, func, &rcu_state); | 1292 | __call_rcu(head, func, &rcu_sched_state); |
| 1235 | } | 1293 | } |
| 1236 | EXPORT_SYMBOL_GPL(call_rcu); | 1294 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
| 1237 | 1295 | ||
| 1238 | /* | 1296 | /* |
| 1239 | * Queue an RCU for invocation after a quicker grace period. | 1297 | * Queue an RCU for invocation after a quicker grace period. |
| @@ -1305,10 +1363,11 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1305 | * by the current CPU, returning 1 if so. This function is part of the | 1363 | * by the current CPU, returning 1 if so. This function is part of the |
| 1306 | * RCU implementation; it is -not- an exported member of the RCU API. | 1364 | * RCU implementation; it is -not- an exported member of the RCU API. |
| 1307 | */ | 1365 | */ |
| 1308 | int rcu_pending(int cpu) | 1366 | static int rcu_pending(int cpu) |
| 1309 | { | 1367 | { |
| 1310 | return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) || | 1368 | return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || |
| 1311 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)); | 1369 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) || |
| 1370 | rcu_preempt_pending(cpu); | ||
| 1312 | } | 1371 | } |
| 1313 | 1372 | ||
| 1314 | /* | 1373 | /* |
| @@ -1320,27 +1379,46 @@ int rcu_pending(int cpu) | |||
| 1320 | int rcu_needs_cpu(int cpu) | 1379 | int rcu_needs_cpu(int cpu) |
| 1321 | { | 1380 | { |
| 1322 | /* RCU callbacks either ready or pending? */ | 1381 | /* RCU callbacks either ready or pending? */ |
| 1323 | return per_cpu(rcu_data, cpu).nxtlist || | 1382 | return per_cpu(rcu_sched_data, cpu).nxtlist || |
| 1324 | per_cpu(rcu_bh_data, cpu).nxtlist; | 1383 | per_cpu(rcu_bh_data, cpu).nxtlist || |
| 1384 | rcu_preempt_needs_cpu(cpu); | ||
| 1325 | } | 1385 | } |
| 1326 | 1386 | ||
| 1327 | /* | 1387 | /* |
| 1328 | * Initialize a CPU's per-CPU RCU data. We take this "scorched earth" | 1388 | * Do boot-time initialization of a CPU's per-CPU RCU data. |
| 1329 | * approach so that we don't have to worry about how long the CPU has | ||
| 1330 | * been gone, or whether it ever was online previously. We do trust the | ||
| 1331 | * ->mynode field, as it is constant for a given struct rcu_data and | ||
| 1332 | * initialized during early boot. | ||
| 1333 | * | ||
| 1334 | * Note that only one online or offline event can be happening at a given | ||
| 1335 | * time. Note also that we can accept some slop in the rsp->completed | ||
| 1336 | * access due to the fact that this CPU cannot possibly have any RCU | ||
| 1337 | * callbacks in flight yet. | ||
| 1338 | */ | 1389 | */ |
| 1339 | static void __cpuinit | 1390 | static void __init |
| 1340 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | 1391 | rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) |
| 1341 | { | 1392 | { |
| 1342 | unsigned long flags; | 1393 | unsigned long flags; |
| 1343 | int i; | 1394 | int i; |
| 1395 | struct rcu_data *rdp = rsp->rda[cpu]; | ||
| 1396 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
| 1397 | |||
| 1398 | /* Set up local state, ensuring consistent view of global state. */ | ||
| 1399 | spin_lock_irqsave(&rnp->lock, flags); | ||
| 1400 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); | ||
| 1401 | rdp->nxtlist = NULL; | ||
| 1402 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
| 1403 | rdp->nxttail[i] = &rdp->nxtlist; | ||
| 1404 | rdp->qlen = 0; | ||
| 1405 | #ifdef CONFIG_NO_HZ | ||
| 1406 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | ||
| 1407 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
| 1408 | rdp->cpu = cpu; | ||
| 1409 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 1410 | } | ||
| 1411 | |||
| 1412 | /* | ||
| 1413 | * Initialize a CPU's per-CPU RCU data. Note that only one online or | ||
| 1414 | * offline event can be happening at a given time. Note also that we | ||
| 1415 | * can accept some slop in the rsp->completed access due to the fact | ||
| 1416 | * that this CPU cannot possibly have any RCU callbacks in flight yet. | ||
| 1417 | */ | ||
| 1418 | static void __cpuinit | ||
| 1419 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | ||
| 1420 | { | ||
| 1421 | unsigned long flags; | ||
| 1344 | long lastcomp; | 1422 | long lastcomp; |
| 1345 | unsigned long mask; | 1423 | unsigned long mask; |
| 1346 | struct rcu_data *rdp = rsp->rda[cpu]; | 1424 | struct rcu_data *rdp = rsp->rda[cpu]; |
| @@ -1354,17 +1432,9 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
| 1354 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | 1432 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ |
| 1355 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | 1433 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ |
| 1356 | rdp->beenonline = 1; /* We have now been online. */ | 1434 | rdp->beenonline = 1; /* We have now been online. */ |
| 1435 | rdp->preemptable = preemptable; | ||
| 1357 | rdp->passed_quiesc_completed = lastcomp - 1; | 1436 | rdp->passed_quiesc_completed = lastcomp - 1; |
| 1358 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); | ||
| 1359 | rdp->nxtlist = NULL; | ||
| 1360 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
| 1361 | rdp->nxttail[i] = &rdp->nxtlist; | ||
| 1362 | rdp->qlen = 0; | ||
| 1363 | rdp->blimit = blimit; | 1437 | rdp->blimit = blimit; |
| 1364 | #ifdef CONFIG_NO_HZ | ||
| 1365 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | ||
| 1366 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
| 1367 | rdp->cpu = cpu; | ||
| 1368 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1438 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
| 1369 | 1439 | ||
| 1370 | /* | 1440 | /* |
| @@ -1405,16 +1475,16 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
| 1405 | 1475 | ||
| 1406 | static void __cpuinit rcu_online_cpu(int cpu) | 1476 | static void __cpuinit rcu_online_cpu(int cpu) |
| 1407 | { | 1477 | { |
| 1408 | rcu_init_percpu_data(cpu, &rcu_state); | 1478 | rcu_init_percpu_data(cpu, &rcu_sched_state, 0); |
| 1409 | rcu_init_percpu_data(cpu, &rcu_bh_state); | 1479 | rcu_init_percpu_data(cpu, &rcu_bh_state, 0); |
| 1410 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1480 | rcu_preempt_init_percpu_data(cpu); |
| 1411 | } | 1481 | } |
| 1412 | 1482 | ||
| 1413 | /* | 1483 | /* |
| 1414 | * Handle CPU online/offline notifcation events. | 1484 | * Handle CPU online/offline notification events. |
| 1415 | */ | 1485 | */ |
| 1416 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | 1486 | int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
| 1417 | unsigned long action, void *hcpu) | 1487 | unsigned long action, void *hcpu) |
| 1418 | { | 1488 | { |
| 1419 | long cpu = (long)hcpu; | 1489 | long cpu = (long)hcpu; |
| 1420 | 1490 | ||
| @@ -1486,6 +1556,7 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
| 1486 | rnp = rsp->level[i]; | 1556 | rnp = rsp->level[i]; |
| 1487 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { | 1557 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { |
| 1488 | spin_lock_init(&rnp->lock); | 1558 | spin_lock_init(&rnp->lock); |
| 1559 | rnp->gpnum = 0; | ||
| 1489 | rnp->qsmask = 0; | 1560 | rnp->qsmask = 0; |
| 1490 | rnp->qsmaskinit = 0; | 1561 | rnp->qsmaskinit = 0; |
| 1491 | rnp->grplo = j * cpustride; | 1562 | rnp->grplo = j * cpustride; |
| @@ -1503,16 +1574,20 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
| 1503 | j / rsp->levelspread[i - 1]; | 1574 | j / rsp->levelspread[i - 1]; |
| 1504 | } | 1575 | } |
| 1505 | rnp->level = i; | 1576 | rnp->level = i; |
| 1577 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); | ||
| 1578 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | ||
| 1506 | } | 1579 | } |
| 1507 | } | 1580 | } |
| 1508 | } | 1581 | } |
| 1509 | 1582 | ||
| 1510 | /* | 1583 | /* |
| 1511 | * Helper macro for __rcu_init(). To be used nowhere else! | 1584 | * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used |
| 1512 | * Assigns leaf node pointers into each CPU's rcu_data structure. | 1585 | * nowhere else! Assigns leaf node pointers into each CPU's rcu_data |
| 1586 | * structure. | ||
| 1513 | */ | 1587 | */ |
| 1514 | #define RCU_DATA_PTR_INIT(rsp, rcu_data) \ | 1588 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ |
| 1515 | do { \ | 1589 | do { \ |
| 1590 | rcu_init_one(rsp); \ | ||
| 1516 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ | 1591 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ |
| 1517 | j = 0; \ | 1592 | j = 0; \ |
| 1518 | for_each_possible_cpu(i) { \ | 1593 | for_each_possible_cpu(i) { \ |
| @@ -1520,32 +1595,43 @@ do { \ | |||
| 1520 | j++; \ | 1595 | j++; \ |
| 1521 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ | 1596 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ |
| 1522 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ | 1597 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ |
| 1598 | rcu_boot_init_percpu_data(i, rsp); \ | ||
| 1523 | } \ | 1599 | } \ |
| 1524 | } while (0) | 1600 | } while (0) |
| 1525 | 1601 | ||
| 1526 | static struct notifier_block __cpuinitdata rcu_nb = { | 1602 | #ifdef CONFIG_TREE_PREEMPT_RCU |
| 1527 | .notifier_call = rcu_cpu_notify, | 1603 | |
| 1528 | }; | 1604 | void __init __rcu_init_preempt(void) |
| 1605 | { | ||
| 1606 | int i; /* All used by RCU_INIT_FLAVOR(). */ | ||
| 1607 | int j; | ||
| 1608 | struct rcu_node *rnp; | ||
| 1609 | |||
| 1610 | RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); | ||
| 1611 | } | ||
| 1612 | |||
| 1613 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
| 1614 | |||
| 1615 | void __init __rcu_init_preempt(void) | ||
| 1616 | { | ||
| 1617 | } | ||
| 1618 | |||
| 1619 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
| 1529 | 1620 | ||
| 1530 | void __init __rcu_init(void) | 1621 | void __init __rcu_init(void) |
| 1531 | { | 1622 | { |
| 1532 | int i; /* All used by RCU_DATA_PTR_INIT(). */ | 1623 | int i; /* All used by RCU_INIT_FLAVOR(). */ |
| 1533 | int j; | 1624 | int j; |
| 1534 | struct rcu_node *rnp; | 1625 | struct rcu_node *rnp; |
| 1535 | 1626 | ||
| 1536 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 1627 | rcu_bootup_announce(); |
| 1537 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 1628 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
| 1538 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | 1629 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); |
| 1539 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 1630 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
| 1540 | rcu_init_one(&rcu_state); | 1631 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); |
| 1541 | RCU_DATA_PTR_INIT(&rcu_state, rcu_data); | 1632 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); |
| 1542 | rcu_init_one(&rcu_bh_state); | 1633 | __rcu_init_preempt(); |
| 1543 | RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); | 1634 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
| 1544 | |||
| 1545 | for_each_online_cpu(i) | ||
| 1546 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i); | ||
| 1547 | /* Register notifier for non-boot CPUs */ | ||
| 1548 | register_cpu_notifier(&rcu_nb); | ||
| 1549 | } | 1635 | } |
| 1550 | 1636 | ||
| 1551 | module_param(blimit, int, 0); | 1637 | module_param(blimit, int, 0); |
