aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
authorJames Bottomley <James.Bottomley@suse.de>2011-02-10 12:21:02 -0500
committerJames Bottomley <James.Bottomley@suse.de>2011-02-10 12:21:02 -0500
commit1c0f647690f1b9471d63af48dec960bc59492e61 (patch)
tree5bfaabb033ec7b907a7c7ee19603d012e1c82867 /arch/parisc
parent2c250ad23d438fa56e1beded374e44dc565c6c47 (diff)
parent4c4231ea2f794d73bbb50b8d84e00c66a012a607 (diff)
Merge branch 'irq' into for-next
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/irq.h13
-rw-r--r--arch/parisc/kernel/irq.c64
3 files changed, 42 insertions, 36 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index fed2946f7335..fafdf30bea9e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -15,6 +15,7 @@ config PARISC
15 select HAVE_GENERIC_HARDIRQS 15 select HAVE_GENERIC_HARDIRQS
16 select GENERIC_IRQ_PROBE 16 select GENERIC_IRQ_PROBE
17 select IRQ_PER_CPU 17 select IRQ_PER_CPU
18 select GENERIC_HARDIRQS_NO_DEPRECATED
18 19
19 help 20 help
20 The PA-RISC microprocessor is designed by Hewlett-Packard and used 21 The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/irq.h b/arch/parisc/include/asm/irq.h
index c67dccf2e31f..1073599a7be9 100644
--- a/arch/parisc/include/asm/irq.h
+++ b/arch/parisc/include/asm/irq.h
@@ -32,15 +32,10 @@ static __inline__ int irq_canonicalize(int irq)
32} 32}
33 33
34struct irq_chip; 34struct irq_chip;
35struct irq_data;
35 36
36/* 37void cpu_ack_irq(struct irq_data *d);
37 * Some useful "we don't have to do anything here" handlers. Should 38void cpu_eoi_irq(struct irq_data *d);
38 * probably be provided by the generic code.
39 */
40void no_ack_irq(unsigned int irq);
41void no_end_irq(unsigned int irq);
42void cpu_ack_irq(unsigned int irq);
43void cpu_eoi_irq(unsigned int irq);
44 39
45extern int txn_alloc_irq(unsigned int nbits); 40extern int txn_alloc_irq(unsigned int nbits);
46extern int txn_claim_irq(int); 41extern int txn_claim_irq(int);
@@ -49,7 +44,7 @@ extern unsigned long txn_alloc_addr(unsigned int);
49extern unsigned long txn_affinity_addr(unsigned int irq, int cpu); 44extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
50 45
51extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *); 46extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
52extern int cpu_check_affinity(unsigned int irq, const struct cpumask *dest); 47extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
53 48
54/* soft power switch support (power.c) */ 49/* soft power switch support (power.c) */
55extern struct tasklet_struct power_tasklet; 50extern struct tasklet_struct power_tasklet;
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 3948f1dd455a..cb450e1e79b3 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -52,9 +52,9 @@ static volatile unsigned long cpu_eiem = 0;
52*/ 52*/
53static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; 53static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
54 54
55static void cpu_mask_irq(unsigned int irq) 55static void cpu_mask_irq(struct irq_data *d)
56{ 56{
57 unsigned long eirr_bit = EIEM_MASK(irq); 57 unsigned long eirr_bit = EIEM_MASK(d->irq);
58 58
59 cpu_eiem &= ~eirr_bit; 59 cpu_eiem &= ~eirr_bit;
60 /* Do nothing on the other CPUs. If they get this interrupt, 60 /* Do nothing on the other CPUs. If they get this interrupt,
@@ -63,7 +63,7 @@ static void cpu_mask_irq(unsigned int irq)
63 * then gets disabled */ 63 * then gets disabled */
64} 64}
65 65
66static void cpu_unmask_irq(unsigned int irq) 66static void __cpu_unmask_irq(unsigned int irq)
67{ 67{
68 unsigned long eirr_bit = EIEM_MASK(irq); 68 unsigned long eirr_bit = EIEM_MASK(irq);
69 69
@@ -75,9 +75,14 @@ static void cpu_unmask_irq(unsigned int irq)
75 smp_send_all_nop(); 75 smp_send_all_nop();
76} 76}
77 77
78void cpu_ack_irq(unsigned int irq) 78static void cpu_unmask_irq(struct irq_data *d)
79{
80 __cpu_unmask_irq(d->irq);
81}
82
83void cpu_ack_irq(struct irq_data *d)
79{ 84{
80 unsigned long mask = EIEM_MASK(irq); 85 unsigned long mask = EIEM_MASK(d->irq);
81 int cpu = smp_processor_id(); 86 int cpu = smp_processor_id();
82 87
83 /* Clear in EIEM so we can no longer process */ 88 /* Clear in EIEM so we can no longer process */
@@ -90,9 +95,9 @@ void cpu_ack_irq(unsigned int irq)
90 mtctl(mask, 23); 95 mtctl(mask, 23);
91} 96}
92 97
93void cpu_eoi_irq(unsigned int irq) 98void cpu_eoi_irq(struct irq_data *d)
94{ 99{
95 unsigned long mask = EIEM_MASK(irq); 100 unsigned long mask = EIEM_MASK(d->irq);
96 int cpu = smp_processor_id(); 101 int cpu = smp_processor_id();
97 102
98 /* set it in the eiems---it's no longer in process */ 103 /* set it in the eiems---it's no longer in process */
@@ -103,15 +108,16 @@ void cpu_eoi_irq(unsigned int irq)
103} 108}
104 109
105#ifdef CONFIG_SMP 110#ifdef CONFIG_SMP
106int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) 111int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
107{ 112{
108 int cpu_dest; 113 int cpu_dest;
109 114
110 /* timer and ipi have to always be received on all CPUs */ 115 /* timer and ipi have to always be received on all CPUs */
111 if (CHECK_IRQ_PER_CPU(irq_to_desc(irq)->status)) { 116 if (CHECK_IRQ_PER_CPU(irq_to_desc(d->irq)->status)) {
112 /* Bad linux design decision. The mask has already 117 /* Bad linux design decision. The mask has already
113 * been set; we must reset it */ 118 * been set; we must reset it. Will fix - tglx
114 cpumask_setall(irq_desc[irq].affinity); 119 */
120 cpumask_setall(d->affinity);
115 return -EINVAL; 121 return -EINVAL;
116 } 122 }
117 123
@@ -121,33 +127,34 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
121 return cpu_dest; 127 return cpu_dest;
122} 128}
123 129
124static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) 130static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
131 bool force)
125{ 132{
126 int cpu_dest; 133 int cpu_dest;
127 134
128 cpu_dest = cpu_check_affinity(irq, dest); 135 cpu_dest = cpu_check_affinity(d, dest);
129 if (cpu_dest < 0) 136 if (cpu_dest < 0)
130 return -1; 137 return -1;
131 138
132 cpumask_copy(irq_desc[irq].affinity, dest); 139 cpumask_copy(d->affinity, dest);
133 140
134 return 0; 141 return 0;
135} 142}
136#endif 143#endif
137 144
138static struct irq_chip cpu_interrupt_type = { 145static struct irq_chip cpu_interrupt_type = {
139 .name = "CPU", 146 .name = "CPU",
140 .mask = cpu_mask_irq, 147 .irq_mask = cpu_mask_irq,
141 .unmask = cpu_unmask_irq, 148 .irq_unmask = cpu_unmask_irq,
142 .ack = cpu_ack_irq, 149 .irq_ack = cpu_ack_irq,
143 .eoi = cpu_eoi_irq, 150 .irq_eoi = cpu_eoi_irq,
144#ifdef CONFIG_SMP 151#ifdef CONFIG_SMP
145 .set_affinity = cpu_set_affinity_irq, 152 .irq_set_affinity = cpu_set_affinity_irq,
146#endif 153#endif
147 /* XXX: Needs to be written. We managed without it so far, but 154 /* XXX: Needs to be written. We managed without it so far, but
148 * we really ought to write it. 155 * we really ought to write it.
149 */ 156 */
150 .retrigger = NULL, 157 .irq_retrigger = NULL,
151}; 158};
152 159
153int show_interrupts(struct seq_file *p, void *v) 160int show_interrupts(struct seq_file *p, void *v)
@@ -181,7 +188,7 @@ int show_interrupts(struct seq_file *p, void *v)
181 seq_printf(p, "%10u ", kstat_irqs(i)); 188 seq_printf(p, "%10u ", kstat_irqs(i));
182#endif 189#endif
183 190
184 seq_printf(p, " %14s", irq_desc[i].chip->name); 191 seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
185#ifndef PARISC_IRQ_CR16_COUNTS 192#ifndef PARISC_IRQ_CR16_COUNTS
186 seq_printf(p, " %s", action->name); 193 seq_printf(p, " %s", action->name);
187 194
@@ -233,14 +240,14 @@ int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
233{ 240{
234 if (irq_desc[irq].action) 241 if (irq_desc[irq].action)
235 return -EBUSY; 242 return -EBUSY;
236 if (irq_desc[irq].chip != &cpu_interrupt_type) 243 if (get_irq_chip(irq) != &cpu_interrupt_type)
237 return -EBUSY; 244 return -EBUSY;
238 245
239 /* for iosapic interrupts */ 246 /* for iosapic interrupts */
240 if (type) { 247 if (type) {
241 set_irq_chip_and_handler(irq, type, handle_percpu_irq); 248 set_irq_chip_and_handler(irq, type, handle_percpu_irq);
242 set_irq_chip_data(irq, data); 249 set_irq_chip_data(irq, data);
243 cpu_unmask_irq(irq); 250 __cpu_unmask_irq(irq);
244 } 251 }
245 return 0; 252 return 0;
246} 253}
@@ -289,7 +296,8 @@ int txn_alloc_irq(unsigned int bits_wide)
289unsigned long txn_affinity_addr(unsigned int irq, int cpu) 296unsigned long txn_affinity_addr(unsigned int irq, int cpu)
290{ 297{
291#ifdef CONFIG_SMP 298#ifdef CONFIG_SMP
292 cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); 299 struct irq_data *d = irq_get_irq_data(irq);
300 cpumask_copy(d->affinity, cpumask_of(cpu));
293#endif 301#endif
294 302
295 return per_cpu(cpu_data, cpu).txn_addr; 303 return per_cpu(cpu_data, cpu).txn_addr;
@@ -333,6 +341,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
333 unsigned long eirr_val; 341 unsigned long eirr_val;
334 int irq, cpu = smp_processor_id(); 342 int irq, cpu = smp_processor_id();
335#ifdef CONFIG_SMP 343#ifdef CONFIG_SMP
344 struct irq_desc *desc;
336 cpumask_t dest; 345 cpumask_t dest;
337#endif 346#endif
338 347
@@ -346,8 +355,9 @@ void do_cpu_irq_mask(struct pt_regs *regs)
346 irq = eirr_to_irq(eirr_val); 355 irq = eirr_to_irq(eirr_val);
347 356
348#ifdef CONFIG_SMP 357#ifdef CONFIG_SMP
349 cpumask_copy(&dest, irq_desc[irq].affinity); 358 desc = irq_to_desc(irq);
350 if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && 359 cpumask_copy(&dest, desc->irq_data.affinity);
360 if (CHECK_IRQ_PER_CPU(desc->status) &&
351 !cpu_isset(smp_processor_id(), dest)) { 361 !cpu_isset(smp_processor_id(), dest)) {
352 int cpu = first_cpu(dest); 362 int cpu = first_cpu(dest);
353 363