diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-02-29 12:46:50 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-02-29 12:46:50 -0500 |
commit | 2232c2d8e0a6a31061dec311f3d1cf7624bc14f1 (patch) | |
tree | 1d90ec0b8bd4e3c154e386f005ef596ee25fa53f /include/linux | |
parent | c0f4133b8f70769bc8dda977feb9a29109d6ccca (diff) |
rcu: add support for dynamic ticks and preempt rcu
The PREEMPT-RCU can get stuck if a CPU goes idle and NO_HZ is set. The
idle CPU will not progress the RCU through its grace period and a
synchronize_rcu my get stuck. Without this patch I have a box that will
not boot when PREEMPT_RCU and NO_HZ are set. That same box boots fine
with this patch.
This patch comes from the -rt kernel where it has been tested for
several months.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/hardirq.h | 10 | ||||
-rw-r--r-- | include/linux/rcuclassic.h | 3 | ||||
-rw-r--r-- | include/linux/rcupreempt.h | 22 |
3 files changed, 35 insertions, 0 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 2961ec788046..49829988bfa0 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -109,6 +109,14 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
109 | } | 109 | } |
110 | #endif | 110 | #endif |
111 | 111 | ||
112 | #if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ) | ||
113 | extern void rcu_irq_enter(void); | ||
114 | extern void rcu_irq_exit(void); | ||
115 | #else | ||
116 | # define rcu_irq_enter() do { } while (0) | ||
117 | # define rcu_irq_exit() do { } while (0) | ||
118 | #endif /* CONFIG_PREEMPT_RCU */ | ||
119 | |||
112 | /* | 120 | /* |
113 | * It is safe to do non-atomic ops on ->hardirq_context, | 121 | * It is safe to do non-atomic ops on ->hardirq_context, |
114 | * because NMI handlers may not preempt and the ops are | 122 | * because NMI handlers may not preempt and the ops are |
@@ -117,6 +125,7 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
117 | */ | 125 | */ |
118 | #define __irq_enter() \ | 126 | #define __irq_enter() \ |
119 | do { \ | 127 | do { \ |
128 | rcu_irq_enter(); \ | ||
120 | account_system_vtime(current); \ | 129 | account_system_vtime(current); \ |
121 | add_preempt_count(HARDIRQ_OFFSET); \ | 130 | add_preempt_count(HARDIRQ_OFFSET); \ |
122 | trace_hardirq_enter(); \ | 131 | trace_hardirq_enter(); \ |
@@ -135,6 +144,7 @@ extern void irq_enter(void); | |||
135 | trace_hardirq_exit(); \ | 144 | trace_hardirq_exit(); \ |
136 | account_system_vtime(current); \ | 145 | account_system_vtime(current); \ |
137 | sub_preempt_count(HARDIRQ_OFFSET); \ | 146 | sub_preempt_count(HARDIRQ_OFFSET); \ |
147 | rcu_irq_exit(); \ | ||
138 | } while (0) | 148 | } while (0) |
139 | 149 | ||
140 | /* | 150 | /* |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 4d6624260b4c..b3dccd68629e 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
@@ -160,5 +160,8 @@ extern void rcu_restart_cpu(int cpu); | |||
160 | extern long rcu_batches_completed(void); | 160 | extern long rcu_batches_completed(void); |
161 | extern long rcu_batches_completed_bh(void); | 161 | extern long rcu_batches_completed_bh(void); |
162 | 162 | ||
163 | #define rcu_enter_nohz() do { } while (0) | ||
164 | #define rcu_exit_nohz() do { } while (0) | ||
165 | |||
163 | #endif /* __KERNEL__ */ | 166 | #endif /* __KERNEL__ */ |
164 | #endif /* __LINUX_RCUCLASSIC_H */ | 167 | #endif /* __LINUX_RCUCLASSIC_H */ |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 60c2a033b19e..01152ed532c8 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h | |||
@@ -82,5 +82,27 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | |||
82 | 82 | ||
83 | struct softirq_action; | 83 | struct softirq_action; |
84 | 84 | ||
85 | #ifdef CONFIG_NO_HZ | ||
86 | DECLARE_PER_CPU(long, dynticks_progress_counter); | ||
87 | |||
88 | static inline void rcu_enter_nohz(void) | ||
89 | { | ||
90 | __get_cpu_var(dynticks_progress_counter)++; | ||
91 | WARN_ON(__get_cpu_var(dynticks_progress_counter) & 0x1); | ||
92 | mb(); | ||
93 | } | ||
94 | |||
95 | static inline void rcu_exit_nohz(void) | ||
96 | { | ||
97 | mb(); | ||
98 | __get_cpu_var(dynticks_progress_counter)++; | ||
99 | WARN_ON(!(__get_cpu_var(dynticks_progress_counter) & 0x1)); | ||
100 | } | ||
101 | |||
102 | #else /* CONFIG_NO_HZ */ | ||
103 | #define rcu_enter_nohz() do { } while (0) | ||
104 | #define rcu_exit_nohz() do { } while (0) | ||
105 | #endif /* CONFIG_NO_HZ */ | ||
106 | |||
85 | #endif /* __KERNEL__ */ | 107 | #endif /* __KERNEL__ */ |
86 | #endif /* __LINUX_RCUPREEMPT_H */ | 108 | #endif /* __LINUX_RCUPREEMPT_H */ |