diff options
author | Chris Dearman <chris@mips.com> | 2007-05-24 17:24:20 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2007-06-14 13:25:15 -0400 |
commit | ffe9ee4709cf513fb80e9b7e04d214dd8b76a10d (patch) | |
tree | 07453e5644806b9c755159e5a4c1fe11dacfcab0 /arch/mips | |
parent | b72c05262298cc2ac92edb657f5ea3a97ad5ea3d (diff) |
[MIPS] Separate performance counter interrupts
Support for performance counter overflow interrupt that is on a separate
interrupt from the timer.
Signed-off-by: Chris Dearman <chris@mips.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/kernel/smp-mt.c | 12 | ||||
-rw-r--r-- | arch/mips/kernel/time.c | 44 | ||||
-rw-r--r-- | arch/mips/mips-boards/generic/time.c | 118 | ||||
-rw-r--r-- | arch/mips/oprofile/op_model_mipsxx.c | 7 |
4 files changed, 135 insertions, 46 deletions
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 64b62bdfb4f6..b8fa7ddd78f6 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -129,13 +129,13 @@ static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) | |||
129 | 129 | ||
130 | static struct irqaction irq_resched = { | 130 | static struct irqaction irq_resched = { |
131 | .handler = ipi_resched_interrupt, | 131 | .handler = ipi_resched_interrupt, |
132 | .flags = IRQF_DISABLED, | 132 | .flags = IRQF_DISABLED|IRQF_PERCPU, |
133 | .name = "IPI_resched" | 133 | .name = "IPI_resched" |
134 | }; | 134 | }; |
135 | 135 | ||
136 | static struct irqaction irq_call = { | 136 | static struct irqaction irq_call = { |
137 | .handler = ipi_call_interrupt, | 137 | .handler = ipi_call_interrupt, |
138 | .flags = IRQF_DISABLED, | 138 | .flags = IRQF_DISABLED|IRQF_PERCPU, |
139 | .name = "IPI_call" | 139 | .name = "IPI_call" |
140 | }; | 140 | }; |
141 | 141 | ||
@@ -275,10 +275,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus) | |||
275 | setup_irq(cpu_ipi_resched_irq, &irq_resched); | 275 | setup_irq(cpu_ipi_resched_irq, &irq_resched); |
276 | setup_irq(cpu_ipi_call_irq, &irq_call); | 276 | setup_irq(cpu_ipi_call_irq, &irq_call); |
277 | 277 | ||
278 | /* need to mark IPI's as IRQ_PER_CPU */ | ||
279 | irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU; | ||
280 | set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); | 278 | set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); |
281 | irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU; | ||
282 | set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); | 279 | set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); |
283 | } | 280 | } |
284 | 281 | ||
@@ -326,8 +323,11 @@ void prom_boot_secondary(int cpu, struct task_struct *idle) | |||
326 | 323 | ||
327 | void prom_init_secondary(void) | 324 | void prom_init_secondary(void) |
328 | { | 325 | { |
326 | /* Enable per-cpu interrupts */ | ||
327 | |||
328 | /* This is Malta specific: IPI,performance and timer inetrrupts */ | ||
329 | write_c0_status((read_c0_status() & ~ST0_IM ) | | 329 | write_c0_status((read_c0_status() & ~ST0_IM ) | |
330 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7)); | 330 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7)); |
331 | } | 331 | } |
332 | 332 | ||
333 | void prom_smp_finish(void) | 333 | void prom_smp_finish(void) |
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 751b4a18b133..7def1ff3da94 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c | |||
@@ -199,6 +199,30 @@ int (*perf_irq)(void) = null_perf_irq; | |||
199 | EXPORT_SYMBOL(null_perf_irq); | 199 | EXPORT_SYMBOL(null_perf_irq); |
200 | EXPORT_SYMBOL(perf_irq); | 200 | EXPORT_SYMBOL(perf_irq); |
201 | 201 | ||
202 | /* | ||
203 | * Performance counter IRQ or -1 if shared with timer | ||
204 | */ | ||
205 | int mipsxx_perfcount_irq; | ||
206 | EXPORT_SYMBOL(mipsxx_perfcount_irq); | ||
207 | |||
208 | /* | ||
209 | * Possibly handle a performance counter interrupt. | ||
210 | * Return true if the timer interrupt should not be checked | ||
211 | */ | ||
212 | static inline int handle_perf_irq (int r2) | ||
213 | { | ||
214 | /* | ||
215 | * The performance counter overflow interrupt may be shared with the | ||
216 | * timer interrupt (mipsxx_perfcount_irq < 0). If it is and a | ||
217 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) | ||
218 | * and we can't reliably determine if a counter interrupt has also | ||
219 | * happened (!r2) then don't check for a timer interrupt. | ||
220 | */ | ||
221 | return (mipsxx_perfcount_irq < 0) && | ||
222 | perf_irq() == IRQ_HANDLED && | ||
223 | !r2; | ||
224 | } | ||
225 | |||
202 | asmlinkage void ll_timer_interrupt(int irq) | 226 | asmlinkage void ll_timer_interrupt(int irq) |
203 | { | 227 | { |
204 | int r2 = cpu_has_mips_r2; | 228 | int r2 = cpu_has_mips_r2; |
@@ -206,19 +230,13 @@ asmlinkage void ll_timer_interrupt(int irq) | |||
206 | irq_enter(); | 230 | irq_enter(); |
207 | kstat_this_cpu.irqs[irq]++; | 231 | kstat_this_cpu.irqs[irq]++; |
208 | 232 | ||
209 | /* | 233 | if (handle_perf_irq(r2)) |
210 | * Suckage alert: | 234 | goto out; |
211 | * Before R2 of the architecture there was no way to see if a | ||
212 | * performance counter interrupt was pending, so we have to run the | ||
213 | * performance counter interrupt handler anyway. | ||
214 | */ | ||
215 | if (!r2 || (read_c0_cause() & (1 << 26))) | ||
216 | if (perf_irq()) | ||
217 | goto out; | ||
218 | 235 | ||
219 | /* we keep interrupt disabled all the time */ | 236 | if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) |
220 | if (!r2 || (read_c0_cause() & (1 << 30))) | 237 | goto out; |
221 | timer_interrupt(irq, NULL); | 238 | |
239 | timer_interrupt(irq, NULL); | ||
222 | 240 | ||
223 | out: | 241 | out: |
224 | irq_exit(); | 242 | irq_exit(); |
@@ -258,7 +276,7 @@ unsigned int mips_hpt_frequency; | |||
258 | 276 | ||
259 | static struct irqaction timer_irqaction = { | 277 | static struct irqaction timer_irqaction = { |
260 | .handler = timer_interrupt, | 278 | .handler = timer_interrupt, |
261 | .flags = IRQF_DISABLED, | 279 | .flags = IRQF_DISABLED | IRQF_PERCPU, |
262 | .name = "timer", | 280 | .name = "timer", |
263 | }; | 281 | }; |
264 | 282 | ||
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c index b41db9e7ab1f..33432ea188fb 100644 --- a/arch/mips/mips-boards/generic/time.c +++ b/arch/mips/mips-boards/generic/time.c | |||
@@ -53,9 +53,8 @@ | |||
53 | 53 | ||
54 | unsigned long cpu_khz; | 54 | unsigned long cpu_khz; |
55 | 55 | ||
56 | #define CPUCTR_IMASKBIT (0x100 << MIPSCPU_INT_CPUCTR) | ||
57 | |||
58 | static int mips_cpu_timer_irq; | 56 | static int mips_cpu_timer_irq; |
57 | extern int mipsxx_perfcount_irq; | ||
59 | extern void smtc_timer_broadcast(int); | 58 | extern void smtc_timer_broadcast(int); |
60 | 59 | ||
61 | static void mips_timer_dispatch(void) | 60 | static void mips_timer_dispatch(void) |
@@ -63,6 +62,11 @@ static void mips_timer_dispatch(void) | |||
63 | do_IRQ(mips_cpu_timer_irq); | 62 | do_IRQ(mips_cpu_timer_irq); |
64 | } | 63 | } |
65 | 64 | ||
65 | static void mips_perf_dispatch(void) | ||
66 | { | ||
67 | do_IRQ(mipsxx_perfcount_irq); | ||
68 | } | ||
69 | |||
66 | /* | 70 | /* |
67 | * Redeclare until I get around mopping the timer code insanity on MIPS. | 71 | * Redeclare until I get around mopping the timer code insanity on MIPS. |
68 | */ | 72 | */ |
@@ -70,6 +74,24 @@ extern int null_perf_irq(void); | |||
70 | 74 | ||
71 | extern int (*perf_irq)(void); | 75 | extern int (*perf_irq)(void); |
72 | 76 | ||
77 | /* | ||
78 | * Possibly handle a performance counter interrupt. | ||
79 | * Return true if the timer interrupt should not be checked | ||
80 | */ | ||
81 | static inline int handle_perf_irq (int r2) | ||
82 | { | ||
83 | /* | ||
84 | * The performance counter overflow interrupt may be shared with the | ||
85 | * timer interrupt (mipsxx_perfcount_irq < 0). If it is and a | ||
86 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) | ||
87 | * and we can't reliably determine if a counter interrupt has also | ||
88 | * happened (!r2) then don't check for a timer interrupt. | ||
89 | */ | ||
90 | return (mipsxx_perfcount_irq < 0) && | ||
91 | perf_irq() == IRQ_HANDLED && | ||
92 | !r2; | ||
93 | } | ||
94 | |||
73 | irqreturn_t mips_timer_interrupt(int irq, void *dev_id) | 95 | irqreturn_t mips_timer_interrupt(int irq, void *dev_id) |
74 | { | 96 | { |
75 | int cpu = smp_processor_id(); | 97 | int cpu = smp_processor_id(); |
@@ -92,8 +114,7 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id) | |||
92 | * We could be here due to timer interrupt, | 114 | * We could be here due to timer interrupt, |
93 | * perf counter overflow, or both. | 115 | * perf counter overflow, or both. |
94 | */ | 116 | */ |
95 | if (read_c0_cause() & (1 << 26)) | 117 | (void) handle_perf_irq(1); |
96 | perf_irq(); | ||
97 | 118 | ||
98 | if (read_c0_cause() & (1 << 30)) { | 119 | if (read_c0_cause() & (1 << 30)) { |
99 | /* | 120 | /* |
@@ -115,19 +136,19 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id) | |||
115 | #else /* CONFIG_MIPS_MT_SMTC */ | 136 | #else /* CONFIG_MIPS_MT_SMTC */ |
116 | int r2 = cpu_has_mips_r2; | 137 | int r2 = cpu_has_mips_r2; |
117 | 138 | ||
139 | if (handle_perf_irq(r2)) | ||
140 | goto out; | ||
141 | |||
142 | if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) | ||
143 | goto out; | ||
144 | |||
118 | if (cpu == 0) { | 145 | if (cpu == 0) { |
119 | /* | 146 | /* |
120 | * CPU 0 handles the global timer interrupt job and process | 147 | * CPU 0 handles the global timer interrupt job and process |
121 | * accounting resets count/compare registers to trigger next | 148 | * accounting resets count/compare registers to trigger next |
122 | * timer int. | 149 | * timer int. |
123 | */ | 150 | */ |
124 | if (!r2 || (read_c0_cause() & (1 << 26))) | 151 | timer_interrupt(irq, NULL); |
125 | if (perf_irq()) | ||
126 | goto out; | ||
127 | |||
128 | /* we keep interrupt disabled all the time */ | ||
129 | if (!r2 || (read_c0_cause() & (1 << 30))) | ||
130 | timer_interrupt(irq, NULL); | ||
131 | } else { | 152 | } else { |
132 | /* Everyone else needs to reset the timer int here as | 153 | /* Everyone else needs to reset the timer int here as |
133 | ll_local_timer_interrupt doesn't */ | 154 | ll_local_timer_interrupt doesn't */ |
@@ -225,35 +246,82 @@ void __init mips_time_init(void) | |||
225 | mips_scroll_message(); | 246 | mips_scroll_message(); |
226 | } | 247 | } |
227 | 248 | ||
228 | void __init plat_timer_setup(struct irqaction *irq) | 249 | irqreturn_t mips_perf_interrupt(int irq, void *dev_id) |
229 | { | 250 | { |
251 | return perf_irq(); | ||
252 | } | ||
253 | |||
254 | static struct irqaction perf_irqaction = { | ||
255 | .handler = mips_perf_interrupt, | ||
256 | .flags = IRQF_DISABLED | IRQF_PERCPU, | ||
257 | .name = "performance", | ||
258 | }; | ||
259 | |||
260 | void __init plat_perf_setup(struct irqaction *irq) | ||
261 | { | ||
262 | int hwint = 0; | ||
263 | mipsxx_perfcount_irq = -1; | ||
264 | |||
230 | #ifdef MSC01E_INT_BASE | 265 | #ifdef MSC01E_INT_BASE |
231 | if (cpu_has_veic) { | 266 | if (cpu_has_veic) { |
232 | set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch); | 267 | set_vi_handler (MSC01E_INT_PERFCTR, mips_perf_dispatch); |
233 | mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; | 268 | mipsxx_perfcount_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; |
234 | } else | 269 | } else |
235 | #endif | 270 | #endif |
236 | { | 271 | if (cpu_has_mips_r2) { |
237 | if (cpu_has_vint) | 272 | /* |
238 | set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch); | 273 | * Read IntCtl.IPPCI to determine the performance |
239 | mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR; | 274 | * counter interrupt |
275 | */ | ||
276 | hwint = (read_c0_intctl () >> 26) & 7; | ||
277 | if (hwint != MIPSCPU_INT_CPUCTR) { | ||
278 | if (cpu_has_vint) | ||
279 | set_vi_handler (hwint, mips_perf_dispatch); | ||
280 | mipsxx_perfcount_irq = MIPSCPU_INT_BASE + hwint; | ||
281 | } | ||
282 | } | ||
283 | if (mipsxx_perfcount_irq >= 0) { | ||
284 | #ifdef CONFIG_MIPS_MT_SMTC | ||
285 | setup_irq_smtc(mipsxx_perfcount_irq, irq, 0x100 << hwint); | ||
286 | #else | ||
287 | setup_irq(mipsxx_perfcount_irq, irq); | ||
288 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
289 | #ifdef CONFIG_SMP | ||
290 | set_irq_handler(mipsxx_perfcount_irq, handle_percpu_irq); | ||
291 | #endif | ||
240 | } | 292 | } |
293 | } | ||
241 | 294 | ||
295 | void __init plat_timer_setup(struct irqaction *irq) | ||
296 | { | ||
297 | int hwint = 0; | ||
298 | if (cpu_has_veic) { | ||
299 | set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch); | ||
300 | mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; | ||
301 | } | ||
302 | else { | ||
303 | if (cpu_has_mips_r2) | ||
304 | /* | ||
305 | * Read IntCtl.IPTI to determine the timer interrupt | ||
306 | */ | ||
307 | hwint = (read_c0_intctl () >> 29) & 7; | ||
308 | else | ||
309 | hwint = MIPSCPU_INT_CPUCTR; | ||
310 | if (cpu_has_vint) | ||
311 | set_vi_handler (hwint, mips_timer_dispatch); | ||
312 | mips_cpu_timer_irq = MIPSCPU_INT_BASE + hwint; | ||
313 | } | ||
242 | 314 | ||
243 | /* we are using the cpu counter for timer interrupts */ | 315 | /* we are using the cpu counter for timer interrupts */ |
244 | irq->handler = mips_timer_interrupt; /* we use our own handler */ | 316 | irq->handler = mips_timer_interrupt; /* we use our own handler */ |
245 | #ifdef CONFIG_MIPS_MT_SMTC | 317 | #ifdef CONFIG_MIPS_MT_SMTC |
246 | setup_irq_smtc(mips_cpu_timer_irq, irq, CPUCTR_IMASKBIT); | 318 | setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << hwint); |
247 | #else | 319 | #else |
248 | setup_irq(mips_cpu_timer_irq, irq); | 320 | setup_irq(mips_cpu_timer_irq, irq); |
249 | #endif /* CONFIG_MIPS_MT_SMTC */ | 321 | #endif /* CONFIG_MIPS_MT_SMTC */ |
250 | |||
251 | #ifdef CONFIG_SMP | 322 | #ifdef CONFIG_SMP |
252 | /* irq_desc(riptor) is a global resource, when the interrupt overlaps | ||
253 | on seperate cpu's the first one tries to handle the second interrupt. | ||
254 | The effect is that the int remains disabled on the second cpu. | ||
255 | Mark the interrupt with IRQ_PER_CPU to avoid any confusion */ | ||
256 | irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU; | ||
257 | set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq); | 323 | set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq); |
258 | #endif | 324 | #endif |
325 | |||
326 | plat_perf_setup(&perf_irqaction); | ||
259 | } | 327 | } |
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index 4f94fa261aae..1ea5c9c1010b 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c | |||
@@ -177,7 +177,10 @@ static int mipsxx_perfcount_handler(void) | |||
177 | unsigned int counters = op_model_mipsxx_ops.num_counters; | 177 | unsigned int counters = op_model_mipsxx_ops.num_counters; |
178 | unsigned int control; | 178 | unsigned int control; |
179 | unsigned int counter; | 179 | unsigned int counter; |
180 | int handled = 0; | 180 | int handled = IRQ_NONE; |
181 | |||
182 | if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26))) | ||
183 | return handled; | ||
181 | 184 | ||
182 | switch (counters) { | 185 | switch (counters) { |
183 | #define HANDLE_COUNTER(n) \ | 186 | #define HANDLE_COUNTER(n) \ |
@@ -188,7 +191,7 @@ static int mipsxx_perfcount_handler(void) | |||
188 | (counter & M_COUNTER_OVERFLOW)) { \ | 191 | (counter & M_COUNTER_OVERFLOW)) { \ |
189 | oprofile_add_sample(get_irq_regs(), n); \ | 192 | oprofile_add_sample(get_irq_regs(), n); \ |
190 | w_c0_perfcntr ## n(reg.counter[n]); \ | 193 | w_c0_perfcntr ## n(reg.counter[n]); \ |
191 | handled = 1; \ | 194 | handled = IRQ_HANDLED; \ |
192 | } | 195 | } |
193 | HANDLE_COUNTER(3) | 196 | HANDLE_COUNTER(3) |
194 | HANDLE_COUNTER(2) | 197 | HANDLE_COUNTER(2) |