aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/irq.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2013-06-27 03:01:09 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-08-22 06:20:04 -0400
commit1f44a225777e40fd9a945b09f958052c47494e1e (patch)
treec33f67424d4cb06f481883b25e75390eccd15ca7 /arch/s390/kernel/irq.c
parent5d0d8f43535bc4e19406ecf158340ccc4027a477 (diff)
s390: convert interrupt handling to use generic hardirq
With the introduction of PCI it became apparent that s390 should convert to generic hardirqs as too many drivers do not have the correct dependency for GENERIC_HARDIRQS. On the architecture level s390 does not have irq lines. It has external interrupts, I/O interrupts and adapter interrupts. This patch hard-codes all external interrupts as irq #1, all I/O interrupts as irq #2 and all adapter interrupts as irq #3. The additional information from the lowcore associated with the interrupt is stored in the pt_regs of the interrupt frame, where the interrupt handler can pick it up. For PCI/MSI interrupts the adapter interrupt handler scans the relevant bit fields and calls generic_handle_irq with the virtual irq number for the MSI interrupt. Reviewed-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/irq.c')
-rw-r--r--arch/s390/kernel/irq.c160
1 files changed, 58 insertions, 102 deletions
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 54b0995514e8..b34ba0ea96a9 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -22,6 +22,7 @@
22#include <asm/cputime.h> 22#include <asm/cputime.h>
23#include <asm/lowcore.h> 23#include <asm/lowcore.h>
24#include <asm/irq.h> 24#include <asm/irq.h>
25#include <asm/hw_irq.h>
25#include "entry.h" 26#include "entry.h"
26 27
27DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); 28DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
@@ -42,9 +43,10 @@ struct irq_class {
42 * Since the external and I/O interrupt fields are already sums we would end 43 * Since the external and I/O interrupt fields are already sums we would end
43 * up with having a sum which accounts each interrupt twice. 44 * up with having a sum which accounts each interrupt twice.
44 */ 45 */
45static const struct irq_class irqclass_main_desc[NR_IRQS] = { 46static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
46 [EXTERNAL_INTERRUPT] = {.name = "EXT"}, 47 [EXT_INTERRUPT] = {.name = "EXT"},
47 [IO_INTERRUPT] = {.name = "I/O"} 48 [IO_INTERRUPT] = {.name = "I/O"},
49 [THIN_INTERRUPT] = {.name = "AIO"},
48}; 50};
49 51
50/* 52/*
@@ -86,6 +88,28 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
86 [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"}, 88 [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
87}; 89};
88 90
91void __init init_IRQ(void)
92{
93 irq_reserve_irqs(0, THIN_INTERRUPT);
94 init_cio_interrupts();
95 init_airq_interrupts();
96 init_ext_interrupts();
97}
98
99void do_IRQ(struct pt_regs *regs, int irq)
100{
101 struct pt_regs *old_regs;
102
103 old_regs = set_irq_regs(regs);
104 irq_enter();
105 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
106 /* Serve timer interrupts first. */
107 clock_comparator_work();
108 generic_handle_irq(irq);
109 irq_exit();
110 set_irq_regs(old_regs);
111}
112
89/* 113/*
90 * show_interrupts is needed by /proc/interrupts. 114 * show_interrupts is needed by /proc/interrupts.
91 */ 115 */
@@ -100,27 +124,36 @@ int show_interrupts(struct seq_file *p, void *v)
100 for_each_online_cpu(cpu) 124 for_each_online_cpu(cpu)
101 seq_printf(p, "CPU%d ", cpu); 125 seq_printf(p, "CPU%d ", cpu);
102 seq_putc(p, '\n'); 126 seq_putc(p, '\n');
127 goto out;
103 } 128 }
104 if (irq < NR_IRQS) { 129 if (irq < NR_IRQS) {
130 if (irq >= NR_IRQS_BASE)
131 goto out;
105 seq_printf(p, "%s: ", irqclass_main_desc[irq].name); 132 seq_printf(p, "%s: ", irqclass_main_desc[irq].name);
106 for_each_online_cpu(cpu) 133 for_each_online_cpu(cpu)
107 seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]); 134 seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
108 seq_putc(p, '\n'); 135 seq_putc(p, '\n');
109 goto skip_arch_irqs; 136 goto out;
110 } 137 }
111 for (irq = 0; irq < NR_ARCH_IRQS; irq++) { 138 for (irq = 0; irq < NR_ARCH_IRQS; irq++) {
112 seq_printf(p, "%s: ", irqclass_sub_desc[irq].name); 139 seq_printf(p, "%s: ", irqclass_sub_desc[irq].name);
113 for_each_online_cpu(cpu) 140 for_each_online_cpu(cpu)
114 seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]); 141 seq_printf(p, "%10u ",
142 per_cpu(irq_stat, cpu).irqs[irq]);
115 if (irqclass_sub_desc[irq].desc) 143 if (irqclass_sub_desc[irq].desc)
116 seq_printf(p, " %s", irqclass_sub_desc[irq].desc); 144 seq_printf(p, " %s", irqclass_sub_desc[irq].desc);
117 seq_putc(p, '\n'); 145 seq_putc(p, '\n');
118 } 146 }
119skip_arch_irqs: 147out:
120 put_online_cpus(); 148 put_online_cpus();
121 return 0; 149 return 0;
122} 150}
123 151
152int arch_show_interrupts(struct seq_file *p, int prec)
153{
154 return 0;
155}
156
124/* 157/*
125 * Switch to the asynchronous interrupt stack for softirq execution. 158 * Switch to the asynchronous interrupt stack for softirq execution.
126 */ 159 */
@@ -159,14 +192,6 @@ asmlinkage void do_softirq(void)
159 local_irq_restore(flags); 192 local_irq_restore(flags);
160} 193}
161 194
162#ifdef CONFIG_PROC_FS
163void init_irq_proc(void)
164{
165 if (proc_mkdir("irq", NULL))
166 create_prof_cpu_mask();
167}
168#endif
169
170/* 195/*
171 * ext_int_hash[index] is the list head for all external interrupts that hash 196 * ext_int_hash[index] is the list head for all external interrupts that hash
172 * to this index. 197 * to this index.
@@ -183,14 +208,6 @@ struct ext_int_info {
183/* ext_int_hash_lock protects the handler lists for external interrupts */ 208/* ext_int_hash_lock protects the handler lists for external interrupts */
184DEFINE_SPINLOCK(ext_int_hash_lock); 209DEFINE_SPINLOCK(ext_int_hash_lock);
185 210
186static void __init init_external_interrupts(void)
187{
188 int idx;
189
190 for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
191 INIT_LIST_HEAD(&ext_int_hash[idx]);
192}
193
194static inline int ext_hash(u16 code) 211static inline int ext_hash(u16 code)
195{ 212{
196 return (code + (code >> 9)) & 0xff; 213 return (code + (code >> 9)) & 0xff;
@@ -234,20 +251,13 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
234} 251}
235EXPORT_SYMBOL(unregister_external_interrupt); 252EXPORT_SYMBOL(unregister_external_interrupt);
236 253
237void __irq_entry do_extint(struct pt_regs *regs) 254static irqreturn_t do_ext_interrupt(int irq, void *dummy)
238{ 255{
256 struct pt_regs *regs = get_irq_regs();
239 struct ext_code ext_code; 257 struct ext_code ext_code;
240 struct pt_regs *old_regs;
241 struct ext_int_info *p; 258 struct ext_int_info *p;
242 int index; 259 int index;
243 260
244 old_regs = set_irq_regs(regs);
245 irq_enter();
246 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) {
247 /* Serve timer interrupts first. */
248 clock_comparator_work();
249 }
250 kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
251 ext_code = *(struct ext_code *) &regs->int_code; 261 ext_code = *(struct ext_code *) &regs->int_code;
252 if (ext_code.code != 0x1004) 262 if (ext_code.code != 0x1004)
253 __get_cpu_var(s390_idle).nohz_delay = 1; 263 __get_cpu_var(s390_idle).nohz_delay = 1;
@@ -259,13 +269,25 @@ void __irq_entry do_extint(struct pt_regs *regs)
259 p->handler(ext_code, regs->int_parm, 269 p->handler(ext_code, regs->int_parm,
260 regs->int_parm_long); 270 regs->int_parm_long);
261 rcu_read_unlock(); 271 rcu_read_unlock();
262 irq_exit(); 272
263 set_irq_regs(old_regs); 273 return IRQ_HANDLED;
264} 274}
265 275
266void __init init_IRQ(void) 276static struct irqaction external_interrupt = {
277 .name = "EXT",
278 .handler = do_ext_interrupt,
279};
280
281void __init init_ext_interrupts(void)
267{ 282{
268 init_external_interrupts(); 283 int idx;
284
285 for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
286 INIT_LIST_HEAD(&ext_int_hash[idx]);
287
288 irq_set_chip_and_handler(EXT_INTERRUPT,
289 &dummy_irq_chip, handle_percpu_irq);
290 setup_irq(EXT_INTERRUPT, &external_interrupt);
269} 291}
270 292
271static DEFINE_SPINLOCK(sc_irq_lock); 293static DEFINE_SPINLOCK(sc_irq_lock);
@@ -313,69 +335,3 @@ void measurement_alert_subclass_unregister(void)
313 spin_unlock(&ma_subclass_lock); 335 spin_unlock(&ma_subclass_lock);
314} 336}
315EXPORT_SYMBOL(measurement_alert_subclass_unregister); 337EXPORT_SYMBOL(measurement_alert_subclass_unregister);
316
317#ifdef CONFIG_SMP
318void synchronize_irq(unsigned int irq)
319{
320 /*
321 * Not needed, the handler is protected by a lock and IRQs that occur
322 * after the handler is deleted are just NOPs.
323 */
324}
325EXPORT_SYMBOL_GPL(synchronize_irq);
326#endif
327
328#ifndef CONFIG_PCI
329
330/* Only PCI devices have dynamically-defined IRQ handlers */
331
332int request_irq(unsigned int irq, irq_handler_t handler,
333 unsigned long irqflags, const char *devname, void *dev_id)
334{
335 return -EINVAL;
336}
337EXPORT_SYMBOL_GPL(request_irq);
338
339void free_irq(unsigned int irq, void *dev_id)
340{
341 WARN_ON(1);
342}
343EXPORT_SYMBOL_GPL(free_irq);
344
345void enable_irq(unsigned int irq)
346{
347 WARN_ON(1);
348}
349EXPORT_SYMBOL_GPL(enable_irq);
350
351void disable_irq(unsigned int irq)
352{
353 WARN_ON(1);
354}
355EXPORT_SYMBOL_GPL(disable_irq);
356
357#endif /* !CONFIG_PCI */
358
359void disable_irq_nosync(unsigned int irq)
360{
361 disable_irq(irq);
362}
363EXPORT_SYMBOL_GPL(disable_irq_nosync);
364
365unsigned long probe_irq_on(void)
366{
367 return 0;
368}
369EXPORT_SYMBOL_GPL(probe_irq_on);
370
371int probe_irq_off(unsigned long val)
372{
373 return 0;
374}
375EXPORT_SYMBOL_GPL(probe_irq_off);
376
377unsigned int probe_irq_mask(unsigned long val)
378{
379 return val;
380}
381EXPORT_SYMBOL_GPL(probe_irq_mask);