diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/parisc/kernel/irq.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/parisc/kernel/irq.c')
-rw-r--r-- | arch/parisc/kernel/irq.c | 96 |
1 files changed, 47 insertions, 49 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index efbcee5d2220..c0b1affc06a8 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -52,9 +52,9 @@ static volatile unsigned long cpu_eiem = 0; | |||
52 | */ | 52 | */ |
53 | static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; | 53 | static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; |
54 | 54 | ||
55 | static void cpu_disable_irq(unsigned int irq) | 55 | static void cpu_mask_irq(struct irq_data *d) |
56 | { | 56 | { |
57 | unsigned long eirr_bit = EIEM_MASK(irq); | 57 | unsigned long eirr_bit = EIEM_MASK(d->irq); |
58 | 58 | ||
59 | cpu_eiem &= ~eirr_bit; | 59 | cpu_eiem &= ~eirr_bit; |
60 | /* Do nothing on the other CPUs. If they get this interrupt, | 60 | /* Do nothing on the other CPUs. If they get this interrupt, |
@@ -63,7 +63,7 @@ static void cpu_disable_irq(unsigned int irq) | |||
63 | * then gets disabled */ | 63 | * then gets disabled */ |
64 | } | 64 | } |
65 | 65 | ||
66 | static void cpu_enable_irq(unsigned int irq) | 66 | static void __cpu_unmask_irq(unsigned int irq) |
67 | { | 67 | { |
68 | unsigned long eirr_bit = EIEM_MASK(irq); | 68 | unsigned long eirr_bit = EIEM_MASK(irq); |
69 | 69 | ||
@@ -75,18 +75,14 @@ static void cpu_enable_irq(unsigned int irq) | |||
75 | smp_send_all_nop(); | 75 | smp_send_all_nop(); |
76 | } | 76 | } |
77 | 77 | ||
78 | static unsigned int cpu_startup_irq(unsigned int irq) | 78 | static void cpu_unmask_irq(struct irq_data *d) |
79 | { | 79 | { |
80 | cpu_enable_irq(irq); | 80 | __cpu_unmask_irq(d->irq); |
81 | return 0; | ||
82 | } | 81 | } |
83 | 82 | ||
84 | void no_ack_irq(unsigned int irq) { } | 83 | void cpu_ack_irq(struct irq_data *d) |
85 | void no_end_irq(unsigned int irq) { } | ||
86 | |||
87 | void cpu_ack_irq(unsigned int irq) | ||
88 | { | 84 | { |
89 | unsigned long mask = EIEM_MASK(irq); | 85 | unsigned long mask = EIEM_MASK(d->irq); |
90 | int cpu = smp_processor_id(); | 86 | int cpu = smp_processor_id(); |
91 | 87 | ||
92 | /* Clear in EIEM so we can no longer process */ | 88 | /* Clear in EIEM so we can no longer process */ |
@@ -99,9 +95,9 @@ void cpu_ack_irq(unsigned int irq) | |||
99 | mtctl(mask, 23); | 95 | mtctl(mask, 23); |
100 | } | 96 | } |
101 | 97 | ||
102 | void cpu_end_irq(unsigned int irq) | 98 | void cpu_eoi_irq(struct irq_data *d) |
103 | { | 99 | { |
104 | unsigned long mask = EIEM_MASK(irq); | 100 | unsigned long mask = EIEM_MASK(d->irq); |
105 | int cpu = smp_processor_id(); | 101 | int cpu = smp_processor_id(); |
106 | 102 | ||
107 | /* set it in the eiems---it's no longer in process */ | 103 | /* set it in the eiems---it's no longer in process */ |
@@ -112,17 +108,13 @@ void cpu_end_irq(unsigned int irq) | |||
112 | } | 108 | } |
113 | 109 | ||
114 | #ifdef CONFIG_SMP | 110 | #ifdef CONFIG_SMP |
115 | int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) | 111 | int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) |
116 | { | 112 | { |
117 | int cpu_dest; | 113 | int cpu_dest; |
118 | 114 | ||
119 | /* timer and ipi have to always be received on all CPUs */ | 115 | /* timer and ipi have to always be received on all CPUs */ |
120 | if (CHECK_IRQ_PER_CPU(irq)) { | 116 | if (irqd_is_per_cpu(d)) |
121 | /* Bad linux design decision. The mask has already | ||
122 | * been set; we must reset it */ | ||
123 | cpumask_setall(irq_desc[irq].affinity); | ||
124 | return -EINVAL; | 117 | return -EINVAL; |
125 | } | ||
126 | 118 | ||
127 | /* whatever mask they set, we just allow one CPU */ | 119 | /* whatever mask they set, we just allow one CPU */ |
128 | cpu_dest = first_cpu(*dest); | 120 | cpu_dest = first_cpu(*dest); |
@@ -130,35 +122,34 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) | |||
130 | return cpu_dest; | 122 | return cpu_dest; |
131 | } | 123 | } |
132 | 124 | ||
133 | static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) | 125 | static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, |
126 | bool force) | ||
134 | { | 127 | { |
135 | int cpu_dest; | 128 | int cpu_dest; |
136 | 129 | ||
137 | cpu_dest = cpu_check_affinity(irq, dest); | 130 | cpu_dest = cpu_check_affinity(d, dest); |
138 | if (cpu_dest < 0) | 131 | if (cpu_dest < 0) |
139 | return -1; | 132 | return -1; |
140 | 133 | ||
141 | cpumask_copy(irq_desc[irq].affinity, dest); | 134 | cpumask_copy(d->affinity, dest); |
142 | 135 | ||
143 | return 0; | 136 | return 0; |
144 | } | 137 | } |
145 | #endif | 138 | #endif |
146 | 139 | ||
147 | static struct irq_chip cpu_interrupt_type = { | 140 | static struct irq_chip cpu_interrupt_type = { |
148 | .name = "CPU", | 141 | .name = "CPU", |
149 | .startup = cpu_startup_irq, | 142 | .irq_mask = cpu_mask_irq, |
150 | .shutdown = cpu_disable_irq, | 143 | .irq_unmask = cpu_unmask_irq, |
151 | .enable = cpu_enable_irq, | 144 | .irq_ack = cpu_ack_irq, |
152 | .disable = cpu_disable_irq, | 145 | .irq_eoi = cpu_eoi_irq, |
153 | .ack = cpu_ack_irq, | ||
154 | .end = cpu_end_irq, | ||
155 | #ifdef CONFIG_SMP | 146 | #ifdef CONFIG_SMP |
156 | .set_affinity = cpu_set_affinity_irq, | 147 | .irq_set_affinity = cpu_set_affinity_irq, |
157 | #endif | 148 | #endif |
158 | /* XXX: Needs to be written. We managed without it so far, but | 149 | /* XXX: Needs to be written. We managed without it so far, but |
159 | * we really ought to write it. | 150 | * we really ought to write it. |
160 | */ | 151 | */ |
161 | .retrigger = NULL, | 152 | .irq_retrigger = NULL, |
162 | }; | 153 | }; |
163 | 154 | ||
164 | int show_interrupts(struct seq_file *p, void *v) | 155 | int show_interrupts(struct seq_file *p, void *v) |
@@ -178,10 +169,11 @@ int show_interrupts(struct seq_file *p, void *v) | |||
178 | } | 169 | } |
179 | 170 | ||
180 | if (i < NR_IRQS) { | 171 | if (i < NR_IRQS) { |
172 | struct irq_desc *desc = irq_to_desc(i); | ||
181 | struct irqaction *action; | 173 | struct irqaction *action; |
182 | 174 | ||
183 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | 175 | raw_spin_lock_irqsave(&desc->lock, flags); |
184 | action = irq_desc[i].action; | 176 | action = desc->action; |
185 | if (!action) | 177 | if (!action) |
186 | goto skip; | 178 | goto skip; |
187 | seq_printf(p, "%3d: ", i); | 179 | seq_printf(p, "%3d: ", i); |
@@ -192,7 +184,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
192 | seq_printf(p, "%10u ", kstat_irqs(i)); | 184 | seq_printf(p, "%10u ", kstat_irqs(i)); |
193 | #endif | 185 | #endif |
194 | 186 | ||
195 | seq_printf(p, " %14s", irq_desc[i].chip->name); | 187 | seq_printf(p, " %14s", irq_desc_get_chip(desc)->name); |
196 | #ifndef PARISC_IRQ_CR16_COUNTS | 188 | #ifndef PARISC_IRQ_CR16_COUNTS |
197 | seq_printf(p, " %s", action->name); | 189 | seq_printf(p, " %s", action->name); |
198 | 190 | ||
@@ -224,7 +216,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
224 | 216 | ||
225 | seq_putc(p, '\n'); | 217 | seq_putc(p, '\n'); |
226 | skip: | 218 | skip: |
227 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 219 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
228 | } | 220 | } |
229 | 221 | ||
230 | return 0; | 222 | return 0; |
@@ -242,15 +234,16 @@ int show_interrupts(struct seq_file *p, void *v) | |||
242 | 234 | ||
243 | int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data) | 235 | int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data) |
244 | { | 236 | { |
245 | if (irq_desc[irq].action) | 237 | if (irq_has_action(irq)) |
246 | return -EBUSY; | 238 | return -EBUSY; |
247 | if (irq_desc[irq].chip != &cpu_interrupt_type) | 239 | if (irq_get_chip(irq) != &cpu_interrupt_type) |
248 | return -EBUSY; | 240 | return -EBUSY; |
249 | 241 | ||
242 | /* for iosapic interrupts */ | ||
250 | if (type) { | 243 | if (type) { |
251 | irq_desc[irq].chip = type; | 244 | irq_set_chip_and_handler(irq, type, handle_percpu_irq); |
252 | irq_desc[irq].chip_data = data; | 245 | irq_set_chip_data(irq, data); |
253 | cpu_interrupt_type.enable(irq); | 246 | __cpu_unmask_irq(irq); |
254 | } | 247 | } |
255 | return 0; | 248 | return 0; |
256 | } | 249 | } |
@@ -299,7 +292,8 @@ int txn_alloc_irq(unsigned int bits_wide) | |||
299 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) | 292 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) |
300 | { | 293 | { |
301 | #ifdef CONFIG_SMP | 294 | #ifdef CONFIG_SMP |
302 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); | 295 | struct irq_data *d = irq_get_irq_data(irq); |
296 | cpumask_copy(d->affinity, cpumask_of(cpu)); | ||
303 | #endif | 297 | #endif |
304 | 298 | ||
305 | return per_cpu(cpu_data, cpu).txn_addr; | 299 | return per_cpu(cpu_data, cpu).txn_addr; |
@@ -343,6 +337,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
343 | unsigned long eirr_val; | 337 | unsigned long eirr_val; |
344 | int irq, cpu = smp_processor_id(); | 338 | int irq, cpu = smp_processor_id(); |
345 | #ifdef CONFIG_SMP | 339 | #ifdef CONFIG_SMP |
340 | struct irq_desc *desc; | ||
346 | cpumask_t dest; | 341 | cpumask_t dest; |
347 | #endif | 342 | #endif |
348 | 343 | ||
@@ -356,8 +351,9 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
356 | irq = eirr_to_irq(eirr_val); | 351 | irq = eirr_to_irq(eirr_val); |
357 | 352 | ||
358 | #ifdef CONFIG_SMP | 353 | #ifdef CONFIG_SMP |
359 | cpumask_copy(&dest, irq_desc[irq].affinity); | 354 | desc = irq_to_desc(irq); |
360 | if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && | 355 | cpumask_copy(&dest, desc->irq_data.affinity); |
356 | if (irqd_is_per_cpu(&desc->irq_data) && | ||
361 | !cpu_isset(smp_processor_id(), dest)) { | 357 | !cpu_isset(smp_processor_id(), dest)) { |
362 | int cpu = first_cpu(dest); | 358 | int cpu = first_cpu(dest); |
363 | 359 | ||
@@ -368,7 +364,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
368 | goto set_out; | 364 | goto set_out; |
369 | } | 365 | } |
370 | #endif | 366 | #endif |
371 | __do_IRQ(irq); | 367 | generic_handle_irq(irq); |
372 | 368 | ||
373 | out: | 369 | out: |
374 | irq_exit(); | 370 | irq_exit(); |
@@ -398,14 +394,15 @@ static void claim_cpu_irqs(void) | |||
398 | { | 394 | { |
399 | int i; | 395 | int i; |
400 | for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { | 396 | for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { |
401 | irq_desc[i].chip = &cpu_interrupt_type; | 397 | irq_set_chip_and_handler(i, &cpu_interrupt_type, |
398 | handle_percpu_irq); | ||
402 | } | 399 | } |
403 | 400 | ||
404 | irq_desc[TIMER_IRQ].action = &timer_action; | 401 | irq_set_handler(TIMER_IRQ, handle_percpu_irq); |
405 | irq_desc[TIMER_IRQ].status = IRQ_PER_CPU; | 402 | setup_irq(TIMER_IRQ, &timer_action); |
406 | #ifdef CONFIG_SMP | 403 | #ifdef CONFIG_SMP |
407 | irq_desc[IPI_IRQ].action = &ipi_action; | 404 | irq_set_handler(IPI_IRQ, handle_percpu_irq); |
408 | irq_desc[IPI_IRQ].status = IRQ_PER_CPU; | 405 | setup_irq(IPI_IRQ, &ipi_action); |
409 | #endif | 406 | #endif |
410 | } | 407 | } |
411 | 408 | ||
@@ -423,3 +420,4 @@ void __init init_IRQ(void) | |||
423 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ | 420 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ |
424 | 421 | ||
425 | } | 422 | } |
423 | |||