diff options
| author | James Bottomley <James.Bottomley@suse.de> | 2011-02-10 12:21:02 -0500 |
|---|---|---|
| committer | James Bottomley <James.Bottomley@suse.de> | 2011-02-10 12:21:02 -0500 |
| commit | 1c0f647690f1b9471d63af48dec960bc59492e61 (patch) | |
| tree | 5bfaabb033ec7b907a7c7ee19603d012e1c82867 /arch/parisc/kernel | |
| parent | 2c250ad23d438fa56e1beded374e44dc565c6c47 (diff) | |
| parent | 4c4231ea2f794d73bbb50b8d84e00c66a012a607 (diff) | |
Merge branch 'irq' into for-next
Diffstat (limited to 'arch/parisc/kernel')
| -rw-r--r-- | arch/parisc/kernel/irq.c | 64 |
1 files changed, 37 insertions, 27 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 3948f1dd455..cb450e1e79b 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
| @@ -52,9 +52,9 @@ static volatile unsigned long cpu_eiem = 0; | |||
| 52 | */ | 52 | */ |
| 53 | static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; | 53 | static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; |
| 54 | 54 | ||
| 55 | static void cpu_mask_irq(unsigned int irq) | 55 | static void cpu_mask_irq(struct irq_data *d) |
| 56 | { | 56 | { |
| 57 | unsigned long eirr_bit = EIEM_MASK(irq); | 57 | unsigned long eirr_bit = EIEM_MASK(d->irq); |
| 58 | 58 | ||
| 59 | cpu_eiem &= ~eirr_bit; | 59 | cpu_eiem &= ~eirr_bit; |
| 60 | /* Do nothing on the other CPUs. If they get this interrupt, | 60 | /* Do nothing on the other CPUs. If they get this interrupt, |
| @@ -63,7 +63,7 @@ static void cpu_mask_irq(unsigned int irq) | |||
| 63 | * then gets disabled */ | 63 | * then gets disabled */ |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | static void cpu_unmask_irq(unsigned int irq) | 66 | static void __cpu_unmask_irq(unsigned int irq) |
| 67 | { | 67 | { |
| 68 | unsigned long eirr_bit = EIEM_MASK(irq); | 68 | unsigned long eirr_bit = EIEM_MASK(irq); |
| 69 | 69 | ||
| @@ -75,9 +75,14 @@ static void cpu_unmask_irq(unsigned int irq) | |||
| 75 | smp_send_all_nop(); | 75 | smp_send_all_nop(); |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | void cpu_ack_irq(unsigned int irq) | 78 | static void cpu_unmask_irq(struct irq_data *d) |
| 79 | { | ||
| 80 | __cpu_unmask_irq(d->irq); | ||
| 81 | } | ||
| 82 | |||
| 83 | void cpu_ack_irq(struct irq_data *d) | ||
| 79 | { | 84 | { |
| 80 | unsigned long mask = EIEM_MASK(irq); | 85 | unsigned long mask = EIEM_MASK(d->irq); |
| 81 | int cpu = smp_processor_id(); | 86 | int cpu = smp_processor_id(); |
| 82 | 87 | ||
| 83 | /* Clear in EIEM so we can no longer process */ | 88 | /* Clear in EIEM so we can no longer process */ |
| @@ -90,9 +95,9 @@ void cpu_ack_irq(unsigned int irq) | |||
| 90 | mtctl(mask, 23); | 95 | mtctl(mask, 23); |
| 91 | } | 96 | } |
| 92 | 97 | ||
| 93 | void cpu_eoi_irq(unsigned int irq) | 98 | void cpu_eoi_irq(struct irq_data *d) |
| 94 | { | 99 | { |
| 95 | unsigned long mask = EIEM_MASK(irq); | 100 | unsigned long mask = EIEM_MASK(d->irq); |
| 96 | int cpu = smp_processor_id(); | 101 | int cpu = smp_processor_id(); |
| 97 | 102 | ||
| 98 | /* set it in the eiems---it's no longer in process */ | 103 | /* set it in the eiems---it's no longer in process */ |
| @@ -103,15 +108,16 @@ void cpu_eoi_irq(unsigned int irq) | |||
| 103 | } | 108 | } |
| 104 | 109 | ||
| 105 | #ifdef CONFIG_SMP | 110 | #ifdef CONFIG_SMP |
| 106 | int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) | 111 | int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) |
| 107 | { | 112 | { |
| 108 | int cpu_dest; | 113 | int cpu_dest; |
| 109 | 114 | ||
| 110 | /* timer and ipi have to always be received on all CPUs */ | 115 | /* timer and ipi have to always be received on all CPUs */ |
| 111 | if (CHECK_IRQ_PER_CPU(irq_to_desc(irq)->status)) { | 116 | if (CHECK_IRQ_PER_CPU(irq_to_desc(d->irq)->status)) { |
| 112 | /* Bad linux design decision. The mask has already | 117 | /* Bad linux design decision. The mask has already |
| 113 | * been set; we must reset it */ | 118 | * been set; we must reset it. Will fix - tglx |
| 114 | cpumask_setall(irq_desc[irq].affinity); | 119 | */ |
| 120 | cpumask_setall(d->affinity); | ||
| 115 | return -EINVAL; | 121 | return -EINVAL; |
| 116 | } | 122 | } |
| 117 | 123 | ||
| @@ -121,33 +127,34 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) | |||
| 121 | return cpu_dest; | 127 | return cpu_dest; |
| 122 | } | 128 | } |
| 123 | 129 | ||
| 124 | static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) | 130 | static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, |
| 131 | bool force) | ||
| 125 | { | 132 | { |
| 126 | int cpu_dest; | 133 | int cpu_dest; |
| 127 | 134 | ||
| 128 | cpu_dest = cpu_check_affinity(irq, dest); | 135 | cpu_dest = cpu_check_affinity(d, dest); |
| 129 | if (cpu_dest < 0) | 136 | if (cpu_dest < 0) |
| 130 | return -1; | 137 | return -1; |
| 131 | 138 | ||
| 132 | cpumask_copy(irq_desc[irq].affinity, dest); | 139 | cpumask_copy(d->affinity, dest); |
| 133 | 140 | ||
| 134 | return 0; | 141 | return 0; |
| 135 | } | 142 | } |
| 136 | #endif | 143 | #endif |
| 137 | 144 | ||
| 138 | static struct irq_chip cpu_interrupt_type = { | 145 | static struct irq_chip cpu_interrupt_type = { |
| 139 | .name = "CPU", | 146 | .name = "CPU", |
| 140 | .mask = cpu_mask_irq, | 147 | .irq_mask = cpu_mask_irq, |
| 141 | .unmask = cpu_unmask_irq, | 148 | .irq_unmask = cpu_unmask_irq, |
| 142 | .ack = cpu_ack_irq, | 149 | .irq_ack = cpu_ack_irq, |
| 143 | .eoi = cpu_eoi_irq, | 150 | .irq_eoi = cpu_eoi_irq, |
| 144 | #ifdef CONFIG_SMP | 151 | #ifdef CONFIG_SMP |
| 145 | .set_affinity = cpu_set_affinity_irq, | 152 | .irq_set_affinity = cpu_set_affinity_irq, |
| 146 | #endif | 153 | #endif |
| 147 | /* XXX: Needs to be written. We managed without it so far, but | 154 | /* XXX: Needs to be written. We managed without it so far, but |
| 148 | * we really ought to write it. | 155 | * we really ought to write it. |
| 149 | */ | 156 | */ |
| 150 | .retrigger = NULL, | 157 | .irq_retrigger = NULL, |
| 151 | }; | 158 | }; |
| 152 | 159 | ||
| 153 | int show_interrupts(struct seq_file *p, void *v) | 160 | int show_interrupts(struct seq_file *p, void *v) |
| @@ -181,7 +188,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 181 | seq_printf(p, "%10u ", kstat_irqs(i)); | 188 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 182 | #endif | 189 | #endif |
| 183 | 190 | ||
| 184 | seq_printf(p, " %14s", irq_desc[i].chip->name); | 191 | seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name); |
| 185 | #ifndef PARISC_IRQ_CR16_COUNTS | 192 | #ifndef PARISC_IRQ_CR16_COUNTS |
| 186 | seq_printf(p, " %s", action->name); | 193 | seq_printf(p, " %s", action->name); |
| 187 | 194 | ||
| @@ -233,14 +240,14 @@ int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data) | |||
| 233 | { | 240 | { |
| 234 | if (irq_desc[irq].action) | 241 | if (irq_desc[irq].action) |
| 235 | return -EBUSY; | 242 | return -EBUSY; |
| 236 | if (irq_desc[irq].chip != &cpu_interrupt_type) | 243 | if (get_irq_chip(irq) != &cpu_interrupt_type) |
| 237 | return -EBUSY; | 244 | return -EBUSY; |
| 238 | 245 | ||
| 239 | /* for iosapic interrupts */ | 246 | /* for iosapic interrupts */ |
| 240 | if (type) { | 247 | if (type) { |
| 241 | set_irq_chip_and_handler(irq, type, handle_percpu_irq); | 248 | set_irq_chip_and_handler(irq, type, handle_percpu_irq); |
| 242 | set_irq_chip_data(irq, data); | 249 | set_irq_chip_data(irq, data); |
| 243 | cpu_unmask_irq(irq); | 250 | __cpu_unmask_irq(irq); |
| 244 | } | 251 | } |
| 245 | return 0; | 252 | return 0; |
| 246 | } | 253 | } |
| @@ -289,7 +296,8 @@ int txn_alloc_irq(unsigned int bits_wide) | |||
| 289 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) | 296 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) |
| 290 | { | 297 | { |
| 291 | #ifdef CONFIG_SMP | 298 | #ifdef CONFIG_SMP |
| 292 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); | 299 | struct irq_data *d = irq_get_irq_data(irq); |
| 300 | cpumask_copy(d->affinity, cpumask_of(cpu)); | ||
| 293 | #endif | 301 | #endif |
| 294 | 302 | ||
| 295 | return per_cpu(cpu_data, cpu).txn_addr; | 303 | return per_cpu(cpu_data, cpu).txn_addr; |
| @@ -333,6 +341,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
| 333 | unsigned long eirr_val; | 341 | unsigned long eirr_val; |
| 334 | int irq, cpu = smp_processor_id(); | 342 | int irq, cpu = smp_processor_id(); |
| 335 | #ifdef CONFIG_SMP | 343 | #ifdef CONFIG_SMP |
| 344 | struct irq_desc *desc; | ||
| 336 | cpumask_t dest; | 345 | cpumask_t dest; |
| 337 | #endif | 346 | #endif |
| 338 | 347 | ||
| @@ -346,8 +355,9 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
| 346 | irq = eirr_to_irq(eirr_val); | 355 | irq = eirr_to_irq(eirr_val); |
| 347 | 356 | ||
| 348 | #ifdef CONFIG_SMP | 357 | #ifdef CONFIG_SMP |
| 349 | cpumask_copy(&dest, irq_desc[irq].affinity); | 358 | desc = irq_to_desc(irq); |
| 350 | if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && | 359 | cpumask_copy(&dest, desc->irq_data.affinity); |
| 360 | if (CHECK_IRQ_PER_CPU(desc->status) && | ||
| 351 | !cpu_isset(smp_processor_id(), dest)) { | 361 | !cpu_isset(smp_processor_id(), dest)) { |
| 352 | int cpu = first_cpu(dest); | 362 | int cpu = first_cpu(dest); |
| 353 | 363 | ||
