diff options
Diffstat (limited to 'kernel/rcutree_plugin.h')
| -rw-r--r-- | kernel/rcutree_plugin.h | 69 | 
1 files changed, 61 insertions, 8 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 79b53bda8943..0e4f420245d9 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h  | |||
| @@ -26,6 +26,45 @@ | |||
| 26 | 26 | ||
| 27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> | 
| 28 | 28 | ||
| 29 | /* | ||
| 30 | * Check the RCU kernel configuration parameters and print informative | ||
| 31 | * messages about anything out of the ordinary. If you like #ifdef, you | ||
| 32 | * will love this function. | ||
| 33 | */ | ||
| 34 | static void __init rcu_bootup_announce_oddness(void) | ||
| 35 | { | ||
| 36 | #ifdef CONFIG_RCU_TRACE | ||
| 37 | printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n"); | ||
| 38 | #endif | ||
| 39 | #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) | ||
| 40 | printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n", | ||
| 41 | CONFIG_RCU_FANOUT); | ||
| 42 | #endif | ||
| 43 | #ifdef CONFIG_RCU_FANOUT_EXACT | ||
| 44 | printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n"); | ||
| 45 | #endif | ||
| 46 | #ifdef CONFIG_RCU_FAST_NO_HZ | ||
| 47 | printk(KERN_INFO | ||
| 48 | "\tRCU dyntick-idle grace-period acceleration is enabled.\n"); | ||
| 49 | #endif | ||
| 50 | #ifdef CONFIG_PROVE_RCU | ||
| 51 | printk(KERN_INFO "\tRCU lockdep checking is enabled.\n"); | ||
| 52 | #endif | ||
| 53 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE | ||
| 54 | printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); | ||
| 55 | #endif | ||
| 56 | #ifndef CONFIG_RCU_CPU_STALL_DETECTOR | ||
| 57 | printk(KERN_INFO | ||
| 58 | "\tRCU-based detection of stalled CPUs is disabled.\n"); | ||
| 59 | #endif | ||
| 60 | #ifndef CONFIG_RCU_CPU_STALL_VERBOSE | ||
| 61 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); | ||
| 62 | #endif | ||
| 63 | #if NUM_RCU_LVL_4 != 0 | ||
| 64 | printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); | ||
| 65 | #endif | ||
| 66 | } | ||
| 67 | |||
| 29 | #ifdef CONFIG_TREE_PREEMPT_RCU | 68 | #ifdef CONFIG_TREE_PREEMPT_RCU | 
| 30 | 69 | ||
| 31 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | 70 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | 
| @@ -38,8 +77,8 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp); | |||
| 38 | */ | 77 | */ | 
| 39 | static void __init rcu_bootup_announce(void) | 78 | static void __init rcu_bootup_announce(void) | 
| 40 | { | 79 | { | 
| 41 | printk(KERN_INFO | 80 | printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n"); | 
| 42 | "Experimental preemptable hierarchical RCU implementation.\n"); | 81 | rcu_bootup_announce_oddness(); | 
| 43 | } | 82 | } | 
| 44 | 83 | ||
| 45 | /* | 84 | /* | 
| @@ -75,13 +114,19 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | |||
| 75 | * that this just means that the task currently running on the CPU is | 114 | * that this just means that the task currently running on the CPU is | 
| 76 | * not in a quiescent state. There might be any number of tasks blocked | 115 | * not in a quiescent state. There might be any number of tasks blocked | 
| 77 | * while in an RCU read-side critical section. | 116 | * while in an RCU read-side critical section. | 
| 117 | * | ||
| 118 | * Unlike the other rcu_*_qs() functions, callers to this function | ||
| 119 | * must disable irqs in order to protect the assignment to | ||
| 120 | * ->rcu_read_unlock_special. | ||
| 78 | */ | 121 | */ | 
| 79 | static void rcu_preempt_qs(int cpu) | 122 | static void rcu_preempt_qs(int cpu) | 
| 80 | { | 123 | { | 
| 81 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 124 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 
| 125 | |||
| 82 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | 126 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | 
| 83 | barrier(); | 127 | barrier(); | 
| 84 | rdp->passed_quiesc = 1; | 128 | rdp->passed_quiesc = 1; | 
| 129 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | ||
| 85 | } | 130 | } | 
| 86 | 131 | ||
| 87 | /* | 132 | /* | 
| @@ -144,9 +189,8 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
| 144 | * grace period, then the fact that the task has been enqueued | 189 | * grace period, then the fact that the task has been enqueued | 
| 145 | * means that we continue to block the current grace period. | 190 | * means that we continue to block the current grace period. | 
| 146 | */ | 191 | */ | 
| 147 | rcu_preempt_qs(cpu); | ||
| 148 | local_irq_save(flags); | 192 | local_irq_save(flags); | 
| 149 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | 193 | rcu_preempt_qs(cpu); | 
| 150 | local_irq_restore(flags); | 194 | local_irq_restore(flags); | 
| 151 | } | 195 | } | 
| 152 | 196 | ||
| @@ -236,7 +280,6 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
| 236 | */ | 280 | */ | 
| 237 | special = t->rcu_read_unlock_special; | 281 | special = t->rcu_read_unlock_special; | 
| 238 | if (special & RCU_READ_UNLOCK_NEED_QS) { | 282 | if (special & RCU_READ_UNLOCK_NEED_QS) { | 
| 239 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | ||
| 240 | rcu_preempt_qs(smp_processor_id()); | 283 | rcu_preempt_qs(smp_processor_id()); | 
| 241 | } | 284 | } | 
| 242 | 285 | ||
| @@ -473,7 +516,6 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
| 473 | struct task_struct *t = current; | 516 | struct task_struct *t = current; | 
| 474 | 517 | ||
| 475 | if (t->rcu_read_lock_nesting == 0) { | 518 | if (t->rcu_read_lock_nesting == 0) { | 
| 476 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | ||
| 477 | rcu_preempt_qs(cpu); | 519 | rcu_preempt_qs(cpu); | 
| 478 | return; | 520 | return; | 
| 479 | } | 521 | } | 
| @@ -515,11 +557,13 @@ void synchronize_rcu(void) | |||
| 515 | if (!rcu_scheduler_active) | 557 | if (!rcu_scheduler_active) | 
| 516 | return; | 558 | return; | 
| 517 | 559 | ||
| 560 | init_rcu_head_on_stack(&rcu.head); | ||
| 518 | init_completion(&rcu.completion); | 561 | init_completion(&rcu.completion); | 
| 519 | /* Will wake me after RCU finished. */ | 562 | /* Will wake me after RCU finished. */ | 
| 520 | call_rcu(&rcu.head, wakeme_after_rcu); | 563 | call_rcu(&rcu.head, wakeme_after_rcu); | 
| 521 | /* Wait for it. */ | 564 | /* Wait for it. */ | 
| 522 | wait_for_completion(&rcu.completion); | 565 | wait_for_completion(&rcu.completion); | 
| 566 | destroy_rcu_head_on_stack(&rcu.head); | ||
| 523 | } | 567 | } | 
| 524 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 568 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 
| 525 | 569 | ||
| @@ -754,6 +798,7 @@ void exit_rcu(void) | |||
| 754 | static void __init rcu_bootup_announce(void) | 798 | static void __init rcu_bootup_announce(void) | 
| 755 | { | 799 | { | 
| 756 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 800 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 
| 801 | rcu_bootup_announce_oddness(); | ||
| 757 | } | 802 | } | 
| 758 | 803 | ||
| 759 | /* | 804 | /* | 
| @@ -1008,6 +1053,8 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | |||
| 1008 | int rcu_needs_cpu(int cpu) | 1053 | int rcu_needs_cpu(int cpu) | 
| 1009 | { | 1054 | { | 
| 1010 | int c = 0; | 1055 | int c = 0; | 
| 1056 | int snap; | ||
| 1057 | int snap_nmi; | ||
| 1011 | int thatcpu; | 1058 | int thatcpu; | 
| 1012 | 1059 | ||
| 1013 | /* Check for being in the holdoff period. */ | 1060 | /* Check for being in the holdoff period. */ | 
| @@ -1015,12 +1062,18 @@ int rcu_needs_cpu(int cpu) | |||
| 1015 | return rcu_needs_cpu_quick_check(cpu); | 1062 | return rcu_needs_cpu_quick_check(cpu); | 
| 1016 | 1063 | ||
| 1017 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ | 1064 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ | 
| 1018 | for_each_cpu_not(thatcpu, nohz_cpu_mask) | 1065 | for_each_online_cpu(thatcpu) { | 
| 1019 | if (thatcpu != cpu) { | 1066 | if (thatcpu == cpu) | 
| 1067 | continue; | ||
| 1068 | snap = per_cpu(rcu_dynticks, thatcpu).dynticks; | ||
| 1069 | snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi; | ||
| 1070 | smp_mb(); /* Order sampling of snap with end of grace period. */ | ||
| 1071 | if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) { | ||
| 1020 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 1072 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 
| 1021 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 1073 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 
| 1022 | return rcu_needs_cpu_quick_check(cpu); | 1074 | return rcu_needs_cpu_quick_check(cpu); | 
| 1023 | } | 1075 | } | 
| 1076 | } | ||
| 1024 | 1077 | ||
| 1025 | /* Check and update the rcu_dyntick_drain sequencing. */ | 1078 | /* Check and update the rcu_dyntick_drain sequencing. */ | 
| 1026 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 1079 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 
