diff options
Diffstat (limited to 'arch/mips/cavium-octeon/octeon-irq.c')
-rw-r--r-- | arch/mips/cavium-octeon/octeon-irq.c | 553 |
1 files changed, 323 insertions, 230 deletions
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index c424cd158dc6..ce7500cdf5b7 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -3,15 +3,13 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2004-2008 Cavium Networks | 6 | * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks |
7 | */ | 7 | */ |
8 | #include <linux/irq.h> | 8 | #include <linux/irq.h> |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/smp.h> | 10 | #include <linux/smp.h> |
11 | 11 | ||
12 | #include <asm/octeon/octeon.h> | 12 | #include <asm/octeon/octeon.h> |
13 | #include <asm/octeon/cvmx-pexp-defs.h> | ||
14 | #include <asm/octeon/cvmx-npi-defs.h> | ||
15 | 13 | ||
16 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); | 14 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); |
17 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); | 15 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); |
@@ -41,14 +39,14 @@ static void octeon_irq_core_ack(unsigned int irq) | |||
41 | 39 | ||
42 | static void octeon_irq_core_eoi(unsigned int irq) | 40 | static void octeon_irq_core_eoi(unsigned int irq) |
43 | { | 41 | { |
44 | struct irq_desc *desc = irq_desc + irq; | 42 | struct irq_desc *desc = irq_to_desc(irq); |
45 | unsigned int bit = irq - OCTEON_IRQ_SW0; | 43 | unsigned int bit = irq - OCTEON_IRQ_SW0; |
46 | /* | 44 | /* |
47 | * If an IRQ is being processed while we are disabling it the | 45 | * If an IRQ is being processed while we are disabling it the |
48 | * handler will attempt to unmask the interrupt after it has | 46 | * handler will attempt to unmask the interrupt after it has |
49 | * been disabled. | 47 | * been disabled. |
50 | */ | 48 | */ |
51 | if (desc->status & IRQ_DISABLED) | 49 | if ((unlikely(desc->status & IRQ_DISABLED))) |
52 | return; | 50 | return; |
53 | /* | 51 | /* |
54 | * We don't need to disable IRQs to make these atomic since | 52 | * We don't need to disable IRQs to make these atomic since |
@@ -106,6 +104,29 @@ static struct irq_chip octeon_irq_chip_core = { | |||
106 | 104 | ||
107 | static void octeon_irq_ciu0_ack(unsigned int irq) | 105 | static void octeon_irq_ciu0_ack(unsigned int irq) |
108 | { | 106 | { |
107 | switch (irq) { | ||
108 | case OCTEON_IRQ_GMX_DRP0: | ||
109 | case OCTEON_IRQ_GMX_DRP1: | ||
110 | case OCTEON_IRQ_IPD_DRP: | ||
111 | case OCTEON_IRQ_KEY_ZERO: | ||
112 | case OCTEON_IRQ_TIMER0: | ||
113 | case OCTEON_IRQ_TIMER1: | ||
114 | case OCTEON_IRQ_TIMER2: | ||
115 | case OCTEON_IRQ_TIMER3: | ||
116 | { | ||
117 | int index = cvmx_get_core_num() * 2; | ||
118 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | ||
119 | /* | ||
120 | * CIU timer type interrupts must be acknoleged by | ||
121 | * writing a '1' bit to their sum0 bit. | ||
122 | */ | ||
123 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | ||
124 | break; | ||
125 | } | ||
126 | default: | ||
127 | break; | ||
128 | } | ||
129 | |||
109 | /* | 130 | /* |
110 | * In order to avoid any locking accessing the CIU, we | 131 | * In order to avoid any locking accessing the CIU, we |
111 | * acknowledge CIU interrupts by disabling all of them. This | 132 | * acknowledge CIU interrupts by disabling all of them. This |
@@ -130,8 +151,54 @@ static void octeon_irq_ciu0_eoi(unsigned int irq) | |||
130 | set_c0_status(0x100 << 2); | 151 | set_c0_status(0x100 << 2); |
131 | } | 152 | } |
132 | 153 | ||
154 | static int next_coreid_for_irq(struct irq_desc *desc) | ||
155 | { | ||
156 | |||
157 | #ifdef CONFIG_SMP | ||
158 | int coreid; | ||
159 | int weight = cpumask_weight(desc->affinity); | ||
160 | |||
161 | if (weight > 1) { | ||
162 | int cpu = smp_processor_id(); | ||
163 | for (;;) { | ||
164 | cpu = cpumask_next(cpu, desc->affinity); | ||
165 | if (cpu >= nr_cpu_ids) { | ||
166 | cpu = -1; | ||
167 | continue; | ||
168 | } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { | ||
169 | break; | ||
170 | } | ||
171 | } | ||
172 | coreid = octeon_coreid_for_cpu(cpu); | ||
173 | } else if (weight == 1) { | ||
174 | coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity)); | ||
175 | } else { | ||
176 | coreid = cvmx_get_core_num(); | ||
177 | } | ||
178 | return coreid; | ||
179 | #else | ||
180 | return cvmx_get_core_num(); | ||
181 | #endif | ||
182 | } | ||
183 | |||
133 | static void octeon_irq_ciu0_enable(unsigned int irq) | 184 | static void octeon_irq_ciu0_enable(unsigned int irq) |
134 | { | 185 | { |
186 | struct irq_desc *desc = irq_to_desc(irq); | ||
187 | int coreid = next_coreid_for_irq(desc); | ||
188 | unsigned long flags; | ||
189 | uint64_t en0; | ||
190 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | ||
191 | |||
192 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | ||
193 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | ||
194 | en0 |= 1ull << bit; | ||
195 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | ||
196 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | ||
197 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | ||
198 | } | ||
199 | |||
200 | static void octeon_irq_ciu0_enable_mbox(unsigned int irq) | ||
201 | { | ||
135 | int coreid = cvmx_get_core_num(); | 202 | int coreid = cvmx_get_core_num(); |
136 | unsigned long flags; | 203 | unsigned long flags; |
137 | uint64_t en0; | 204 | uint64_t en0; |
@@ -167,63 +234,76 @@ static void octeon_irq_ciu0_disable(unsigned int irq) | |||
167 | } | 234 | } |
168 | 235 | ||
169 | /* | 236 | /* |
170 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} | 237 | * Enable the irq on the next core in the affinity set for chips that |
171 | * registers. | 238 | * have the EN*_W1{S,C} registers. |
172 | */ | 239 | */ |
173 | static void octeon_irq_ciu0_enable_v2(unsigned int irq) | 240 | static void octeon_irq_ciu0_enable_v2(unsigned int irq) |
174 | { | 241 | { |
175 | int index = cvmx_get_core_num() * 2; | 242 | int index; |
176 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 243 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
244 | struct irq_desc *desc = irq_to_desc(irq); | ||
177 | 245 | ||
178 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 246 | if ((desc->status & IRQ_DISABLED) == 0) { |
247 | index = next_coreid_for_irq(desc) * 2; | ||
248 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
249 | } | ||
179 | } | 250 | } |
180 | 251 | ||
181 | /* | 252 | /* |
182 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} | 253 | * Enable the irq on the current CPU for chips that |
183 | * registers. | 254 | * have the EN*_W1{S,C} registers. |
184 | */ | 255 | */ |
185 | static void octeon_irq_ciu0_ack_v2(unsigned int irq) | 256 | static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq) |
186 | { | 257 | { |
187 | int index = cvmx_get_core_num() * 2; | 258 | int index; |
188 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 259 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
189 | 260 | ||
190 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 261 | index = cvmx_get_core_num() * 2; |
262 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
191 | } | 263 | } |
192 | 264 | ||
193 | /* | 265 | /* |
194 | * CIU timer type interrupts must be acknoleged by writing a '1' bit | 266 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} |
195 | * to their sum0 bit. | 267 | * registers. |
196 | */ | 268 | */ |
197 | static void octeon_irq_ciu0_timer_ack(unsigned int irq) | 269 | static void octeon_irq_ciu0_ack_v2(unsigned int irq) |
198 | { | 270 | { |
199 | int index = cvmx_get_core_num() * 2; | 271 | int index = cvmx_get_core_num() * 2; |
200 | uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 272 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
201 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | ||
202 | } | ||
203 | 273 | ||
204 | static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq) | 274 | switch (irq) { |
205 | { | 275 | case OCTEON_IRQ_GMX_DRP0: |
206 | octeon_irq_ciu0_timer_ack(irq); | 276 | case OCTEON_IRQ_GMX_DRP1: |
207 | octeon_irq_ciu0_ack(irq); | 277 | case OCTEON_IRQ_IPD_DRP: |
208 | } | 278 | case OCTEON_IRQ_KEY_ZERO: |
279 | case OCTEON_IRQ_TIMER0: | ||
280 | case OCTEON_IRQ_TIMER1: | ||
281 | case OCTEON_IRQ_TIMER2: | ||
282 | case OCTEON_IRQ_TIMER3: | ||
283 | /* | ||
284 | * CIU timer type interrupts must be acknoleged by | ||
285 | * writing a '1' bit to their sum0 bit. | ||
286 | */ | ||
287 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | ||
288 | break; | ||
289 | default: | ||
290 | break; | ||
291 | } | ||
209 | 292 | ||
210 | static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq) | 293 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
211 | { | ||
212 | octeon_irq_ciu0_timer_ack(irq); | ||
213 | octeon_irq_ciu0_ack_v2(irq); | ||
214 | } | 294 | } |
215 | 295 | ||
216 | /* | 296 | /* |
217 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} | 297 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} |
218 | * registers. | 298 | * registers. |
219 | */ | 299 | */ |
220 | static void octeon_irq_ciu0_eoi_v2(unsigned int irq) | 300 | static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq) |
221 | { | 301 | { |
222 | struct irq_desc *desc = irq_desc + irq; | 302 | struct irq_desc *desc = irq_to_desc(irq); |
223 | int index = cvmx_get_core_num() * 2; | 303 | int index = cvmx_get_core_num() * 2; |
224 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 304 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
225 | 305 | ||
226 | if ((desc->status & IRQ_DISABLED) == 0) | 306 | if (likely((desc->status & IRQ_DISABLED) == 0)) |
227 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 307 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
228 | } | 308 | } |
229 | 309 | ||
@@ -246,18 +326,30 @@ static void octeon_irq_ciu0_disable_all_v2(unsigned int irq) | |||
246 | static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) | 326 | static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) |
247 | { | 327 | { |
248 | int cpu; | 328 | int cpu; |
329 | struct irq_desc *desc = irq_to_desc(irq); | ||
330 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
249 | unsigned long flags; | 331 | unsigned long flags; |
250 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 332 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
251 | 333 | ||
334 | /* | ||
335 | * For non-v2 CIU, we will allow only single CPU affinity. | ||
336 | * This removes the need to do locking in the .ack/.eoi | ||
337 | * functions. | ||
338 | */ | ||
339 | if (cpumask_weight(dest) != 1) | ||
340 | return -EINVAL; | ||
341 | |||
252 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | 342 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
253 | for_each_online_cpu(cpu) { | 343 | for_each_online_cpu(cpu) { |
254 | int coreid = octeon_coreid_for_cpu(cpu); | 344 | int coreid = octeon_coreid_for_cpu(cpu); |
255 | uint64_t en0 = | 345 | uint64_t en0 = |
256 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 346 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
257 | if (cpumask_test_cpu(cpu, dest)) | 347 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
348 | enable_one = 0; | ||
258 | en0 |= 1ull << bit; | 349 | en0 |= 1ull << bit; |
259 | else | 350 | } else { |
260 | en0 &= ~(1ull << bit); | 351 | en0 &= ~(1ull << bit); |
352 | } | ||
261 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 353 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
262 | } | 354 | } |
263 | /* | 355 | /* |
@@ -279,13 +371,18 @@ static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq, | |||
279 | { | 371 | { |
280 | int cpu; | 372 | int cpu; |
281 | int index; | 373 | int index; |
374 | struct irq_desc *desc = irq_to_desc(irq); | ||
375 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
282 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 376 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
377 | |||
283 | for_each_online_cpu(cpu) { | 378 | for_each_online_cpu(cpu) { |
284 | index = octeon_coreid_for_cpu(cpu) * 2; | 379 | index = octeon_coreid_for_cpu(cpu) * 2; |
285 | if (cpumask_test_cpu(cpu, dest)) | 380 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
381 | enable_one = 0; | ||
286 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 382 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
287 | else | 383 | } else { |
288 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 384 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
385 | } | ||
289 | } | 386 | } |
290 | return 0; | 387 | return 0; |
291 | } | 388 | } |
@@ -298,8 +395,7 @@ static struct irq_chip octeon_irq_chip_ciu0_v2 = { | |||
298 | .name = "CIU0", | 395 | .name = "CIU0", |
299 | .enable = octeon_irq_ciu0_enable_v2, | 396 | .enable = octeon_irq_ciu0_enable_v2, |
300 | .disable = octeon_irq_ciu0_disable_all_v2, | 397 | .disable = octeon_irq_ciu0_disable_all_v2, |
301 | .ack = octeon_irq_ciu0_ack_v2, | 398 | .eoi = octeon_irq_ciu0_enable_v2, |
302 | .eoi = octeon_irq_ciu0_eoi_v2, | ||
303 | #ifdef CONFIG_SMP | 399 | #ifdef CONFIG_SMP |
304 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, | 400 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, |
305 | #endif | 401 | #endif |
@@ -309,36 +405,27 @@ static struct irq_chip octeon_irq_chip_ciu0 = { | |||
309 | .name = "CIU0", | 405 | .name = "CIU0", |
310 | .enable = octeon_irq_ciu0_enable, | 406 | .enable = octeon_irq_ciu0_enable, |
311 | .disable = octeon_irq_ciu0_disable, | 407 | .disable = octeon_irq_ciu0_disable, |
312 | .ack = octeon_irq_ciu0_ack, | ||
313 | .eoi = octeon_irq_ciu0_eoi, | 408 | .eoi = octeon_irq_ciu0_eoi, |
314 | #ifdef CONFIG_SMP | 409 | #ifdef CONFIG_SMP |
315 | .set_affinity = octeon_irq_ciu0_set_affinity, | 410 | .set_affinity = octeon_irq_ciu0_set_affinity, |
316 | #endif | 411 | #endif |
317 | }; | 412 | }; |
318 | 413 | ||
319 | static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = { | 414 | /* The mbox versions don't do any affinity or round-robin. */ |
320 | .name = "CIU0-T", | 415 | static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = { |
321 | .enable = octeon_irq_ciu0_enable_v2, | 416 | .name = "CIU0-M", |
322 | .disable = octeon_irq_ciu0_disable_all_v2, | 417 | .enable = octeon_irq_ciu0_enable_mbox_v2, |
323 | .ack = octeon_irq_ciu0_timer_ack_v2, | 418 | .disable = octeon_irq_ciu0_disable, |
324 | .eoi = octeon_irq_ciu0_eoi_v2, | 419 | .eoi = octeon_irq_ciu0_eoi_mbox_v2, |
325 | #ifdef CONFIG_SMP | ||
326 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, | ||
327 | #endif | ||
328 | }; | 420 | }; |
329 | 421 | ||
330 | static struct irq_chip octeon_irq_chip_ciu0_timer = { | 422 | static struct irq_chip octeon_irq_chip_ciu0_mbox = { |
331 | .name = "CIU0-T", | 423 | .name = "CIU0-M", |
332 | .enable = octeon_irq_ciu0_enable, | 424 | .enable = octeon_irq_ciu0_enable_mbox, |
333 | .disable = octeon_irq_ciu0_disable, | 425 | .disable = octeon_irq_ciu0_disable, |
334 | .ack = octeon_irq_ciu0_timer_ack_v1, | ||
335 | .eoi = octeon_irq_ciu0_eoi, | 426 | .eoi = octeon_irq_ciu0_eoi, |
336 | #ifdef CONFIG_SMP | ||
337 | .set_affinity = octeon_irq_ciu0_set_affinity, | ||
338 | #endif | ||
339 | }; | 427 | }; |
340 | 428 | ||
341 | |||
342 | static void octeon_irq_ciu1_ack(unsigned int irq) | 429 | static void octeon_irq_ciu1_ack(unsigned int irq) |
343 | { | 430 | { |
344 | /* | 431 | /* |
@@ -365,10 +452,30 @@ static void octeon_irq_ciu1_eoi(unsigned int irq) | |||
365 | 452 | ||
366 | static void octeon_irq_ciu1_enable(unsigned int irq) | 453 | static void octeon_irq_ciu1_enable(unsigned int irq) |
367 | { | 454 | { |
368 | int coreid = cvmx_get_core_num(); | 455 | struct irq_desc *desc = irq_to_desc(irq); |
456 | int coreid = next_coreid_for_irq(desc); | ||
457 | unsigned long flags; | ||
458 | uint64_t en1; | ||
459 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | ||
460 | |||
461 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
462 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
463 | en1 |= 1ull << bit; | ||
464 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | ||
465 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
466 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * Watchdog interrupts are special. They are associated with a single | ||
471 | * core, so we hardwire the affinity to that core. | ||
472 | */ | ||
473 | static void octeon_irq_ciu1_wd_enable(unsigned int irq) | ||
474 | { | ||
369 | unsigned long flags; | 475 | unsigned long flags; |
370 | uint64_t en1; | 476 | uint64_t en1; |
371 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 477 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
478 | int coreid = bit; | ||
372 | 479 | ||
373 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | 480 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
374 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 481 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
@@ -405,36 +512,43 @@ static void octeon_irq_ciu1_disable(unsigned int irq) | |||
405 | */ | 512 | */ |
406 | static void octeon_irq_ciu1_enable_v2(unsigned int irq) | 513 | static void octeon_irq_ciu1_enable_v2(unsigned int irq) |
407 | { | 514 | { |
408 | int index = cvmx_get_core_num() * 2 + 1; | 515 | int index; |
409 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 516 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
517 | struct irq_desc *desc = irq_to_desc(irq); | ||
410 | 518 | ||
411 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 519 | if ((desc->status & IRQ_DISABLED) == 0) { |
520 | index = next_coreid_for_irq(desc) * 2 + 1; | ||
521 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
522 | } | ||
412 | } | 523 | } |
413 | 524 | ||
414 | /* | 525 | /* |
415 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} | 526 | * Watchdog interrupts are special. They are associated with a single |
416 | * registers. | 527 | * core, so we hardwire the affinity to that core. |
417 | */ | 528 | */ |
418 | static void octeon_irq_ciu1_ack_v2(unsigned int irq) | 529 | static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq) |
419 | { | 530 | { |
420 | int index = cvmx_get_core_num() * 2 + 1; | 531 | int index; |
532 | int coreid = irq - OCTEON_IRQ_WDOG0; | ||
421 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 533 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
534 | struct irq_desc *desc = irq_to_desc(irq); | ||
422 | 535 | ||
423 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 536 | if ((desc->status & IRQ_DISABLED) == 0) { |
537 | index = coreid * 2 + 1; | ||
538 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
539 | } | ||
424 | } | 540 | } |
425 | 541 | ||
426 | /* | 542 | /* |
427 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} | 543 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} |
428 | * registers. | 544 | * registers. |
429 | */ | 545 | */ |
430 | static void octeon_irq_ciu1_eoi_v2(unsigned int irq) | 546 | static void octeon_irq_ciu1_ack_v2(unsigned int irq) |
431 | { | 547 | { |
432 | struct irq_desc *desc = irq_desc + irq; | ||
433 | int index = cvmx_get_core_num() * 2 + 1; | 548 | int index = cvmx_get_core_num() * 2 + 1; |
434 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 549 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
435 | 550 | ||
436 | if ((desc->status & IRQ_DISABLED) == 0) | 551 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
437 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
438 | } | 552 | } |
439 | 553 | ||
440 | /* | 554 | /* |
@@ -457,19 +571,30 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq, | |||
457 | const struct cpumask *dest) | 571 | const struct cpumask *dest) |
458 | { | 572 | { |
459 | int cpu; | 573 | int cpu; |
574 | struct irq_desc *desc = irq_to_desc(irq); | ||
575 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
460 | unsigned long flags; | 576 | unsigned long flags; |
461 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 577 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
462 | 578 | ||
579 | /* | ||
580 | * For non-v2 CIU, we will allow only single CPU affinity. | ||
581 | * This removes the need to do locking in the .ack/.eoi | ||
582 | * functions. | ||
583 | */ | ||
584 | if (cpumask_weight(dest) != 1) | ||
585 | return -EINVAL; | ||
586 | |||
463 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | 587 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
464 | for_each_online_cpu(cpu) { | 588 | for_each_online_cpu(cpu) { |
465 | int coreid = octeon_coreid_for_cpu(cpu); | 589 | int coreid = octeon_coreid_for_cpu(cpu); |
466 | uint64_t en1 = | 590 | uint64_t en1 = |
467 | cvmx_read_csr(CVMX_CIU_INTX_EN1 | 591 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
468 | (coreid * 2 + 1)); | 592 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
469 | if (cpumask_test_cpu(cpu, dest)) | 593 | enable_one = 0; |
470 | en1 |= 1ull << bit; | 594 | en1 |= 1ull << bit; |
471 | else | 595 | } else { |
472 | en1 &= ~(1ull << bit); | 596 | en1 &= ~(1ull << bit); |
597 | } | ||
473 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | 598 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
474 | } | 599 | } |
475 | /* | 600 | /* |
@@ -491,13 +616,17 @@ static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq, | |||
491 | { | 616 | { |
492 | int cpu; | 617 | int cpu; |
493 | int index; | 618 | int index; |
619 | struct irq_desc *desc = irq_to_desc(irq); | ||
620 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
494 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 621 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
495 | for_each_online_cpu(cpu) { | 622 | for_each_online_cpu(cpu) { |
496 | index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 623 | index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
497 | if (cpumask_test_cpu(cpu, dest)) | 624 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
625 | enable_one = 0; | ||
498 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 626 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
499 | else | 627 | } else { |
500 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 628 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
629 | } | ||
501 | } | 630 | } |
502 | return 0; | 631 | return 0; |
503 | } | 632 | } |
@@ -507,11 +636,10 @@ static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq, | |||
507 | * Newer octeon chips have support for lockless CIU operation. | 636 | * Newer octeon chips have support for lockless CIU operation. |
508 | */ | 637 | */ |
509 | static struct irq_chip octeon_irq_chip_ciu1_v2 = { | 638 | static struct irq_chip octeon_irq_chip_ciu1_v2 = { |
510 | .name = "CIU0", | 639 | .name = "CIU1", |
511 | .enable = octeon_irq_ciu1_enable_v2, | 640 | .enable = octeon_irq_ciu1_enable_v2, |
512 | .disable = octeon_irq_ciu1_disable_all_v2, | 641 | .disable = octeon_irq_ciu1_disable_all_v2, |
513 | .ack = octeon_irq_ciu1_ack_v2, | 642 | .eoi = octeon_irq_ciu1_enable_v2, |
514 | .eoi = octeon_irq_ciu1_eoi_v2, | ||
515 | #ifdef CONFIG_SMP | 643 | #ifdef CONFIG_SMP |
516 | .set_affinity = octeon_irq_ciu1_set_affinity_v2, | 644 | .set_affinity = octeon_irq_ciu1_set_affinity_v2, |
517 | #endif | 645 | #endif |
@@ -521,103 +649,36 @@ static struct irq_chip octeon_irq_chip_ciu1 = { | |||
521 | .name = "CIU1", | 649 | .name = "CIU1", |
522 | .enable = octeon_irq_ciu1_enable, | 650 | .enable = octeon_irq_ciu1_enable, |
523 | .disable = octeon_irq_ciu1_disable, | 651 | .disable = octeon_irq_ciu1_disable, |
524 | .ack = octeon_irq_ciu1_ack, | ||
525 | .eoi = octeon_irq_ciu1_eoi, | 652 | .eoi = octeon_irq_ciu1_eoi, |
526 | #ifdef CONFIG_SMP | 653 | #ifdef CONFIG_SMP |
527 | .set_affinity = octeon_irq_ciu1_set_affinity, | 654 | .set_affinity = octeon_irq_ciu1_set_affinity, |
528 | #endif | 655 | #endif |
529 | }; | 656 | }; |
530 | 657 | ||
531 | #ifdef CONFIG_PCI_MSI | 658 | static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = { |
532 | 659 | .name = "CIU1-W", | |
533 | static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock); | 660 | .enable = octeon_irq_ciu1_wd_enable_v2, |
534 | 661 | .disable = octeon_irq_ciu1_disable_all_v2, | |
535 | static void octeon_irq_msi_ack(unsigned int irq) | 662 | .eoi = octeon_irq_ciu1_wd_enable_v2, |
536 | { | 663 | }; |
537 | if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { | ||
538 | /* These chips have PCI */ | ||
539 | cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV, | ||
540 | 1ull << (irq - OCTEON_IRQ_MSI_BIT0)); | ||
541 | } else { | ||
542 | /* | ||
543 | * These chips have PCIe. Thankfully the ACK doesn't | ||
544 | * need any locking. | ||
545 | */ | ||
546 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0, | ||
547 | 1ull << (irq - OCTEON_IRQ_MSI_BIT0)); | ||
548 | } | ||
549 | } | ||
550 | |||
551 | static void octeon_irq_msi_eoi(unsigned int irq) | ||
552 | { | ||
553 | /* Nothing needed */ | ||
554 | } | ||
555 | |||
556 | static void octeon_irq_msi_enable(unsigned int irq) | ||
557 | { | ||
558 | if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { | ||
559 | /* | ||
560 | * Octeon PCI doesn't have the ability to mask/unmask | ||
561 | * MSI interrupts individually. Instead of | ||
562 | * masking/unmasking them in groups of 16, we simple | ||
563 | * assume MSI devices are well behaved. MSI | ||
564 | * interrupts are always enable and the ACK is assumed | ||
565 | * to be enough. | ||
566 | */ | ||
567 | } else { | ||
568 | /* These chips have PCIe. Note that we only support | ||
569 | * the first 64 MSI interrupts. Unfortunately all the | ||
570 | * MSI enables are in the same register. We use | ||
571 | * MSI0's lock to control access to them all. | ||
572 | */ | ||
573 | uint64_t en; | ||
574 | unsigned long flags; | ||
575 | raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags); | ||
576 | en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); | ||
577 | en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0); | ||
578 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); | ||
579 | cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); | ||
580 | raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); | ||
581 | } | ||
582 | } | ||
583 | |||
584 | static void octeon_irq_msi_disable(unsigned int irq) | ||
585 | { | ||
586 | if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { | ||
587 | /* See comment in enable */ | ||
588 | } else { | ||
589 | /* | ||
590 | * These chips have PCIe. Note that we only support | ||
591 | * the first 64 MSI interrupts. Unfortunately all the | ||
592 | * MSI enables are in the same register. We use | ||
593 | * MSI0's lock to control access to them all. | ||
594 | */ | ||
595 | uint64_t en; | ||
596 | unsigned long flags; | ||
597 | raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags); | ||
598 | en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); | ||
599 | en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0)); | ||
600 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); | ||
601 | cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); | ||
602 | raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); | ||
603 | } | ||
604 | } | ||
605 | 664 | ||
606 | static struct irq_chip octeon_irq_chip_msi = { | 665 | static struct irq_chip octeon_irq_chip_ciu1_wd = { |
607 | .name = "MSI", | 666 | .name = "CIU1-W", |
608 | .enable = octeon_irq_msi_enable, | 667 | .enable = octeon_irq_ciu1_wd_enable, |
609 | .disable = octeon_irq_msi_disable, | 668 | .disable = octeon_irq_ciu1_disable, |
610 | .ack = octeon_irq_msi_ack, | 669 | .eoi = octeon_irq_ciu1_eoi, |
611 | .eoi = octeon_irq_msi_eoi, | ||
612 | }; | 670 | }; |
613 | #endif | 671 | |
672 | static void (*octeon_ciu0_ack)(unsigned int); | ||
673 | static void (*octeon_ciu1_ack)(unsigned int); | ||
614 | 674 | ||
615 | void __init arch_init_irq(void) | 675 | void __init arch_init_irq(void) |
616 | { | 676 | { |
617 | int irq; | 677 | unsigned int irq; |
618 | struct irq_chip *chip0; | 678 | struct irq_chip *chip0; |
619 | struct irq_chip *chip0_timer; | 679 | struct irq_chip *chip0_mbox; |
620 | struct irq_chip *chip1; | 680 | struct irq_chip *chip1; |
681 | struct irq_chip *chip1_wd; | ||
621 | 682 | ||
622 | #ifdef CONFIG_SMP | 683 | #ifdef CONFIG_SMP |
623 | /* Set the default affinity to the boot cpu. */ | 684 | /* Set the default affinity to the boot cpu. */ |
@@ -631,13 +692,19 @@ void __init arch_init_irq(void) | |||
631 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || | 692 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || |
632 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || | 693 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || |
633 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { | 694 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { |
695 | octeon_ciu0_ack = octeon_irq_ciu0_ack_v2; | ||
696 | octeon_ciu1_ack = octeon_irq_ciu1_ack_v2; | ||
634 | chip0 = &octeon_irq_chip_ciu0_v2; | 697 | chip0 = &octeon_irq_chip_ciu0_v2; |
635 | chip0_timer = &octeon_irq_chip_ciu0_timer_v2; | 698 | chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2; |
636 | chip1 = &octeon_irq_chip_ciu1_v2; | 699 | chip1 = &octeon_irq_chip_ciu1_v2; |
700 | chip1_wd = &octeon_irq_chip_ciu1_wd_v2; | ||
637 | } else { | 701 | } else { |
702 | octeon_ciu0_ack = octeon_irq_ciu0_ack; | ||
703 | octeon_ciu1_ack = octeon_irq_ciu1_ack; | ||
638 | chip0 = &octeon_irq_chip_ciu0; | 704 | chip0 = &octeon_irq_chip_ciu0; |
639 | chip0_timer = &octeon_irq_chip_ciu0_timer; | 705 | chip0_mbox = &octeon_irq_chip_ciu0_mbox; |
640 | chip1 = &octeon_irq_chip_ciu1; | 706 | chip1 = &octeon_irq_chip_ciu1; |
707 | chip1_wd = &octeon_irq_chip_ciu1_wd; | ||
641 | } | 708 | } |
642 | 709 | ||
643 | /* 0 - 15 reserved for i8259 master and slave controller. */ | 710 | /* 0 - 15 reserved for i8259 master and slave controller. */ |
@@ -651,34 +718,23 @@ void __init arch_init_irq(void) | |||
651 | /* 24 - 87 CIU_INT_SUM0 */ | 718 | /* 24 - 87 CIU_INT_SUM0 */ |
652 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { | 719 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { |
653 | switch (irq) { | 720 | switch (irq) { |
654 | case OCTEON_IRQ_GMX_DRP0: | 721 | case OCTEON_IRQ_MBOX0: |
655 | case OCTEON_IRQ_GMX_DRP1: | 722 | case OCTEON_IRQ_MBOX1: |
656 | case OCTEON_IRQ_IPD_DRP: | 723 | set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq); |
657 | case OCTEON_IRQ_KEY_ZERO: | ||
658 | case OCTEON_IRQ_TIMER0: | ||
659 | case OCTEON_IRQ_TIMER1: | ||
660 | case OCTEON_IRQ_TIMER2: | ||
661 | case OCTEON_IRQ_TIMER3: | ||
662 | set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq); | ||
663 | break; | 724 | break; |
664 | default: | 725 | default: |
665 | set_irq_chip_and_handler(irq, chip0, handle_percpu_irq); | 726 | set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq); |
666 | break; | 727 | break; |
667 | } | 728 | } |
668 | } | 729 | } |
669 | 730 | ||
670 | /* 88 - 151 CIU_INT_SUM1 */ | 731 | /* 88 - 151 CIU_INT_SUM1 */ |
671 | for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) { | 732 | for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++) |
672 | set_irq_chip_and_handler(irq, chip1, handle_percpu_irq); | 733 | set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq); |
673 | } | 734 | |
735 | for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++) | ||
736 | set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq); | ||
674 | 737 | ||
675 | #ifdef CONFIG_PCI_MSI | ||
676 | /* 152 - 215 PCI/PCIe MSI interrupts */ | ||
677 | for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) { | ||
678 | set_irq_chip_and_handler(irq, &octeon_irq_chip_msi, | ||
679 | handle_percpu_irq); | ||
680 | } | ||
681 | #endif | ||
682 | set_c0_status(0x300 << 2); | 738 | set_c0_status(0x300 << 2); |
683 | } | 739 | } |
684 | 740 | ||
@@ -693,6 +749,7 @@ asmlinkage void plat_irq_dispatch(void) | |||
693 | unsigned long cop0_status; | 749 | unsigned long cop0_status; |
694 | uint64_t ciu_en; | 750 | uint64_t ciu_en; |
695 | uint64_t ciu_sum; | 751 | uint64_t ciu_sum; |
752 | unsigned int irq; | ||
696 | 753 | ||
697 | while (1) { | 754 | while (1) { |
698 | cop0_cause = read_c0_cause(); | 755 | cop0_cause = read_c0_cause(); |
@@ -704,18 +761,24 @@ asmlinkage void plat_irq_dispatch(void) | |||
704 | ciu_sum = cvmx_read_csr(ciu_sum0_address); | 761 | ciu_sum = cvmx_read_csr(ciu_sum0_address); |
705 | ciu_en = cvmx_read_csr(ciu_en0_address); | 762 | ciu_en = cvmx_read_csr(ciu_en0_address); |
706 | ciu_sum &= ciu_en; | 763 | ciu_sum &= ciu_en; |
707 | if (likely(ciu_sum)) | 764 | if (likely(ciu_sum)) { |
708 | do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1); | 765 | irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1; |
709 | else | 766 | octeon_ciu0_ack(irq); |
767 | do_IRQ(irq); | ||
768 | } else { | ||
710 | spurious_interrupt(); | 769 | spurious_interrupt(); |
770 | } | ||
711 | } else if (unlikely(cop0_cause & STATUSF_IP3)) { | 771 | } else if (unlikely(cop0_cause & STATUSF_IP3)) { |
712 | ciu_sum = cvmx_read_csr(ciu_sum1_address); | 772 | ciu_sum = cvmx_read_csr(ciu_sum1_address); |
713 | ciu_en = cvmx_read_csr(ciu_en1_address); | 773 | ciu_en = cvmx_read_csr(ciu_en1_address); |
714 | ciu_sum &= ciu_en; | 774 | ciu_sum &= ciu_en; |
715 | if (likely(ciu_sum)) | 775 | if (likely(ciu_sum)) { |
716 | do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1); | 776 | irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1; |
717 | else | 777 | octeon_ciu1_ack(irq); |
778 | do_IRQ(irq); | ||
779 | } else { | ||
718 | spurious_interrupt(); | 780 | spurious_interrupt(); |
781 | } | ||
719 | } else if (likely(cop0_cause)) { | 782 | } else if (likely(cop0_cause)) { |
720 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); | 783 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); |
721 | } else { | 784 | } else { |
@@ -725,54 +788,84 @@ asmlinkage void plat_irq_dispatch(void) | |||
725 | } | 788 | } |
726 | 789 | ||
727 | #ifdef CONFIG_HOTPLUG_CPU | 790 | #ifdef CONFIG_HOTPLUG_CPU |
728 | static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu) | ||
729 | { | ||
730 | unsigned int isset; | ||
731 | int coreid = octeon_coreid_for_cpu(cpu); | ||
732 | int bit = (irq < OCTEON_IRQ_WDOG0) ? | ||
733 | irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0; | ||
734 | if (irq < 64) { | ||
735 | isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) & | ||
736 | (1ull << bit)) >> bit; | ||
737 | } else { | ||
738 | isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) & | ||
739 | (1ull << bit)) >> bit; | ||
740 | } | ||
741 | return isset; | ||
742 | } | ||
743 | 791 | ||
744 | void fixup_irqs(void) | 792 | void fixup_irqs(void) |
745 | { | 793 | { |
746 | int irq; | 794 | int irq; |
795 | struct irq_desc *desc; | ||
796 | cpumask_t new_affinity; | ||
797 | unsigned long flags; | ||
798 | int do_set_affinity; | ||
799 | int cpu; | ||
800 | |||
801 | cpu = smp_processor_id(); | ||
747 | 802 | ||
748 | for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) | 803 | for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) |
749 | octeon_irq_core_disable_local(irq); | 804 | octeon_irq_core_disable_local(irq); |
750 | 805 | ||
751 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) { | 806 | for (irq = OCTEON_IRQ_WORKQ0; irq < OCTEON_IRQ_LAST; irq++) { |
752 | if (is_irq_enabled_on_cpu(irq, smp_processor_id())) { | 807 | desc = irq_to_desc(irq); |
753 | /* ciu irq migrates to next cpu */ | 808 | switch (irq) { |
754 | octeon_irq_chip_ciu0.disable(irq); | 809 | case OCTEON_IRQ_MBOX0: |
755 | octeon_irq_ciu0_set_affinity(irq, &cpu_online_map); | 810 | case OCTEON_IRQ_MBOX1: |
756 | } | 811 | /* The eoi function will disable them on this CPU. */ |
757 | } | 812 | desc->chip->eoi(irq); |
758 | 813 | break; | |
759 | #if 0 | 814 | case OCTEON_IRQ_WDOG0: |
760 | for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++) | 815 | case OCTEON_IRQ_WDOG1: |
761 | octeon_irq_mailbox_mask(irq); | 816 | case OCTEON_IRQ_WDOG2: |
762 | #endif | 817 | case OCTEON_IRQ_WDOG3: |
763 | for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { | 818 | case OCTEON_IRQ_WDOG4: |
764 | if (is_irq_enabled_on_cpu(irq, smp_processor_id())) { | 819 | case OCTEON_IRQ_WDOG5: |
765 | /* ciu irq migrates to next cpu */ | 820 | case OCTEON_IRQ_WDOG6: |
766 | octeon_irq_chip_ciu0.disable(irq); | 821 | case OCTEON_IRQ_WDOG7: |
767 | octeon_irq_ciu0_set_affinity(irq, &cpu_online_map); | 822 | case OCTEON_IRQ_WDOG8: |
768 | } | 823 | case OCTEON_IRQ_WDOG9: |
769 | } | 824 | case OCTEON_IRQ_WDOG10: |
825 | case OCTEON_IRQ_WDOG11: | ||
826 | case OCTEON_IRQ_WDOG12: | ||
827 | case OCTEON_IRQ_WDOG13: | ||
828 | case OCTEON_IRQ_WDOG14: | ||
829 | case OCTEON_IRQ_WDOG15: | ||
830 | /* | ||
831 | * These have special per CPU semantics and | ||
832 | * are handled in the watchdog driver. | ||
833 | */ | ||
834 | break; | ||
835 | default: | ||
836 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
837 | /* | ||
838 | * If this irq has an action, it is in use and | ||
839 | * must be migrated if it has affinity to this | ||
840 | * cpu. | ||
841 | */ | ||
842 | if (desc->action && cpumask_test_cpu(cpu, desc->affinity)) { | ||
843 | if (cpumask_weight(desc->affinity) > 1) { | ||
844 | /* | ||
845 | * It has multi CPU affinity, | ||
846 | * just remove this CPU from | ||
847 | * the affinity set. | ||
848 | */ | ||
849 | cpumask_copy(&new_affinity, desc->affinity); | ||
850 | cpumask_clear_cpu(cpu, &new_affinity); | ||
851 | } else { | ||
852 | /* | ||
853 | * Otherwise, put it on lowest | ||
854 | * numbered online CPU. | ||
855 | */ | ||
856 | cpumask_clear(&new_affinity); | ||
857 | cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); | ||
858 | } | ||
859 | do_set_affinity = 1; | ||
860 | } else { | ||
861 | do_set_affinity = 0; | ||
862 | } | ||
863 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
864 | |||
865 | if (do_set_affinity) | ||
866 | irq_set_affinity(irq, &new_affinity); | ||
770 | 867 | ||
771 | for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) { | 868 | break; |
772 | if (is_irq_enabled_on_cpu(irq, smp_processor_id())) { | ||
773 | /* ciu irq migrates to next cpu */ | ||
774 | octeon_irq_chip_ciu1.disable(irq); | ||
775 | octeon_irq_ciu1_set_affinity(irq, &cpu_online_map); | ||
776 | } | 869 | } |
777 | } | 870 | } |
778 | } | 871 | } |