diff options
Diffstat (limited to 'arch/mips/cavium-octeon/octeon-irq.c')
-rw-r--r-- | arch/mips/cavium-octeon/octeon-irq.c | 160 |
1 files changed, 117 insertions, 43 deletions
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 6f2acf09328d..c424cd158dc6 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -13,9 +13,8 @@ | |||
13 | #include <asm/octeon/cvmx-pexp-defs.h> | 13 | #include <asm/octeon/cvmx-pexp-defs.h> |
14 | #include <asm/octeon/cvmx-npi-defs.h> | 14 | #include <asm/octeon/cvmx-npi-defs.h> |
15 | 15 | ||
16 | DEFINE_RWLOCK(octeon_irq_ciu0_rwlock); | 16 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); |
17 | DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); | 17 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); |
18 | DEFINE_SPINLOCK(octeon_irq_msi_lock); | ||
19 | 18 | ||
20 | static int octeon_coreid_for_cpu(int cpu) | 19 | static int octeon_coreid_for_cpu(int cpu) |
21 | { | 20 | { |
@@ -51,9 +50,6 @@ static void octeon_irq_core_eoi(unsigned int irq) | |||
51 | */ | 50 | */ |
52 | if (desc->status & IRQ_DISABLED) | 51 | if (desc->status & IRQ_DISABLED) |
53 | return; | 52 | return; |
54 | |||
55 | /* There is a race here. We should fix it. */ | ||
56 | |||
57 | /* | 53 | /* |
58 | * We don't need to disable IRQs to make these atomic since | 54 | * We don't need to disable IRQs to make these atomic since |
59 | * they are already disabled earlier in the low level | 55 | * they are already disabled earlier in the low level |
@@ -141,19 +137,12 @@ static void octeon_irq_ciu0_enable(unsigned int irq) | |||
141 | uint64_t en0; | 137 | uint64_t en0; |
142 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 138 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
143 | 139 | ||
144 | /* | 140 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
145 | * A read lock is used here to make sure only one core is ever | ||
146 | * updating the CIU enable bits at a time. During an enable | ||
147 | * the cores don't interfere with each other. During a disable | ||
148 | * the write lock stops any enables that might cause a | ||
149 | * problem. | ||
150 | */ | ||
151 | read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); | ||
152 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 141 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
153 | en0 |= 1ull << bit; | 142 | en0 |= 1ull << bit; |
154 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 143 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
155 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 144 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
156 | read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); | 145 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
157 | } | 146 | } |
158 | 147 | ||
159 | static void octeon_irq_ciu0_disable(unsigned int irq) | 148 | static void octeon_irq_ciu0_disable(unsigned int irq) |
@@ -162,7 +151,7 @@ static void octeon_irq_ciu0_disable(unsigned int irq) | |||
162 | unsigned long flags; | 151 | unsigned long flags; |
163 | uint64_t en0; | 152 | uint64_t en0; |
164 | int cpu; | 153 | int cpu; |
165 | write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); | 154 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
166 | for_each_online_cpu(cpu) { | 155 | for_each_online_cpu(cpu) { |
167 | int coreid = octeon_coreid_for_cpu(cpu); | 156 | int coreid = octeon_coreid_for_cpu(cpu); |
168 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 157 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
@@ -174,7 +163,7 @@ static void octeon_irq_ciu0_disable(unsigned int irq) | |||
174 | * of them are done. | 163 | * of them are done. |
175 | */ | 164 | */ |
176 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); | 165 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); |
177 | write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); | 166 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
178 | } | 167 | } |
179 | 168 | ||
180 | /* | 169 | /* |
@@ -193,7 +182,7 @@ static void octeon_irq_ciu0_enable_v2(unsigned int irq) | |||
193 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} | 182 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} |
194 | * registers. | 183 | * registers. |
195 | */ | 184 | */ |
196 | static void octeon_irq_ciu0_disable_v2(unsigned int irq) | 185 | static void octeon_irq_ciu0_ack_v2(unsigned int irq) |
197 | { | 186 | { |
198 | int index = cvmx_get_core_num() * 2; | 187 | int index = cvmx_get_core_num() * 2; |
199 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 188 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
@@ -202,6 +191,43 @@ static void octeon_irq_ciu0_disable_v2(unsigned int irq) | |||
202 | } | 191 | } |
203 | 192 | ||
204 | /* | 193 | /* |
194 | * CIU timer type interrupts must be acknoleged by writing a '1' bit | ||
195 | * to their sum0 bit. | ||
196 | */ | ||
197 | static void octeon_irq_ciu0_timer_ack(unsigned int irq) | ||
198 | { | ||
199 | int index = cvmx_get_core_num() * 2; | ||
200 | uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | ||
201 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | ||
202 | } | ||
203 | |||
204 | static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq) | ||
205 | { | ||
206 | octeon_irq_ciu0_timer_ack(irq); | ||
207 | octeon_irq_ciu0_ack(irq); | ||
208 | } | ||
209 | |||
210 | static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq) | ||
211 | { | ||
212 | octeon_irq_ciu0_timer_ack(irq); | ||
213 | octeon_irq_ciu0_ack_v2(irq); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} | ||
218 | * registers. | ||
219 | */ | ||
220 | static void octeon_irq_ciu0_eoi_v2(unsigned int irq) | ||
221 | { | ||
222 | struct irq_desc *desc = irq_desc + irq; | ||
223 | int index = cvmx_get_core_num() * 2; | ||
224 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | ||
225 | |||
226 | if ((desc->status & IRQ_DISABLED) == 0) | ||
227 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
228 | } | ||
229 | |||
230 | /* | ||
205 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} | 231 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} |
206 | * registers. | 232 | * registers. |
207 | */ | 233 | */ |
@@ -223,7 +249,7 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * | |||
223 | unsigned long flags; | 249 | unsigned long flags; |
224 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 250 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
225 | 251 | ||
226 | write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); | 252 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
227 | for_each_online_cpu(cpu) { | 253 | for_each_online_cpu(cpu) { |
228 | int coreid = octeon_coreid_for_cpu(cpu); | 254 | int coreid = octeon_coreid_for_cpu(cpu); |
229 | uint64_t en0 = | 255 | uint64_t en0 = |
@@ -239,7 +265,7 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * | |||
239 | * of them are done. | 265 | * of them are done. |
240 | */ | 266 | */ |
241 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); | 267 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); |
242 | write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); | 268 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
243 | 269 | ||
244 | return 0; | 270 | return 0; |
245 | } | 271 | } |
@@ -272,8 +298,8 @@ static struct irq_chip octeon_irq_chip_ciu0_v2 = { | |||
272 | .name = "CIU0", | 298 | .name = "CIU0", |
273 | .enable = octeon_irq_ciu0_enable_v2, | 299 | .enable = octeon_irq_ciu0_enable_v2, |
274 | .disable = octeon_irq_ciu0_disable_all_v2, | 300 | .disable = octeon_irq_ciu0_disable_all_v2, |
275 | .ack = octeon_irq_ciu0_disable_v2, | 301 | .ack = octeon_irq_ciu0_ack_v2, |
276 | .eoi = octeon_irq_ciu0_enable_v2, | 302 | .eoi = octeon_irq_ciu0_eoi_v2, |
277 | #ifdef CONFIG_SMP | 303 | #ifdef CONFIG_SMP |
278 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, | 304 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, |
279 | #endif | 305 | #endif |
@@ -290,6 +316,28 @@ static struct irq_chip octeon_irq_chip_ciu0 = { | |||
290 | #endif | 316 | #endif |
291 | }; | 317 | }; |
292 | 318 | ||
319 | static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = { | ||
320 | .name = "CIU0-T", | ||
321 | .enable = octeon_irq_ciu0_enable_v2, | ||
322 | .disable = octeon_irq_ciu0_disable_all_v2, | ||
323 | .ack = octeon_irq_ciu0_timer_ack_v2, | ||
324 | .eoi = octeon_irq_ciu0_eoi_v2, | ||
325 | #ifdef CONFIG_SMP | ||
326 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, | ||
327 | #endif | ||
328 | }; | ||
329 | |||
330 | static struct irq_chip octeon_irq_chip_ciu0_timer = { | ||
331 | .name = "CIU0-T", | ||
332 | .enable = octeon_irq_ciu0_enable, | ||
333 | .disable = octeon_irq_ciu0_disable, | ||
334 | .ack = octeon_irq_ciu0_timer_ack_v1, | ||
335 | .eoi = octeon_irq_ciu0_eoi, | ||
336 | #ifdef CONFIG_SMP | ||
337 | .set_affinity = octeon_irq_ciu0_set_affinity, | ||
338 | #endif | ||
339 | }; | ||
340 | |||
293 | 341 | ||
294 | static void octeon_irq_ciu1_ack(unsigned int irq) | 342 | static void octeon_irq_ciu1_ack(unsigned int irq) |
295 | { | 343 | { |
@@ -322,19 +370,12 @@ static void octeon_irq_ciu1_enable(unsigned int irq) | |||
322 | uint64_t en1; | 370 | uint64_t en1; |
323 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 371 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
324 | 372 | ||
325 | /* | 373 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
326 | * A read lock is used here to make sure only one core is ever | ||
327 | * updating the CIU enable bits at a time. During an enable | ||
328 | * the cores don't interfere with each other. During a disable | ||
329 | * the write lock stops any enables that might cause a | ||
330 | * problem. | ||
331 | */ | ||
332 | read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); | ||
333 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 374 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
334 | en1 |= 1ull << bit; | 375 | en1 |= 1ull << bit; |
335 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | 376 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
336 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 377 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
337 | read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); | 378 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
338 | } | 379 | } |
339 | 380 | ||
340 | static void octeon_irq_ciu1_disable(unsigned int irq) | 381 | static void octeon_irq_ciu1_disable(unsigned int irq) |
@@ -343,7 +384,7 @@ static void octeon_irq_ciu1_disable(unsigned int irq) | |||
343 | unsigned long flags; | 384 | unsigned long flags; |
344 | uint64_t en1; | 385 | uint64_t en1; |
345 | int cpu; | 386 | int cpu; |
346 | write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); | 387 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
347 | for_each_online_cpu(cpu) { | 388 | for_each_online_cpu(cpu) { |
348 | int coreid = octeon_coreid_for_cpu(cpu); | 389 | int coreid = octeon_coreid_for_cpu(cpu); |
349 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 390 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
@@ -355,7 +396,7 @@ static void octeon_irq_ciu1_disable(unsigned int irq) | |||
355 | * of them are done. | 396 | * of them are done. |
356 | */ | 397 | */ |
357 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); | 398 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); |
358 | write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); | 399 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
359 | } | 400 | } |
360 | 401 | ||
361 | /* | 402 | /* |
@@ -374,7 +415,7 @@ static void octeon_irq_ciu1_enable_v2(unsigned int irq) | |||
374 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} | 415 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} |
375 | * registers. | 416 | * registers. |
376 | */ | 417 | */ |
377 | static void octeon_irq_ciu1_disable_v2(unsigned int irq) | 418 | static void octeon_irq_ciu1_ack_v2(unsigned int irq) |
378 | { | 419 | { |
379 | int index = cvmx_get_core_num() * 2 + 1; | 420 | int index = cvmx_get_core_num() * 2 + 1; |
380 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 421 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
@@ -383,6 +424,20 @@ static void octeon_irq_ciu1_disable_v2(unsigned int irq) | |||
383 | } | 424 | } |
384 | 425 | ||
385 | /* | 426 | /* |
427 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} | ||
428 | * registers. | ||
429 | */ | ||
430 | static void octeon_irq_ciu1_eoi_v2(unsigned int irq) | ||
431 | { | ||
432 | struct irq_desc *desc = irq_desc + irq; | ||
433 | int index = cvmx_get_core_num() * 2 + 1; | ||
434 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | ||
435 | |||
436 | if ((desc->status & IRQ_DISABLED) == 0) | ||
437 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
438 | } | ||
439 | |||
440 | /* | ||
386 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} | 441 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} |
387 | * registers. | 442 | * registers. |
388 | */ | 443 | */ |
@@ -405,7 +460,7 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq, | |||
405 | unsigned long flags; | 460 | unsigned long flags; |
406 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 461 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
407 | 462 | ||
408 | write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); | 463 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
409 | for_each_online_cpu(cpu) { | 464 | for_each_online_cpu(cpu) { |
410 | int coreid = octeon_coreid_for_cpu(cpu); | 465 | int coreid = octeon_coreid_for_cpu(cpu); |
411 | uint64_t en1 = | 466 | uint64_t en1 = |
@@ -422,7 +477,7 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq, | |||
422 | * of them are done. | 477 | * of them are done. |
423 | */ | 478 | */ |
424 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); | 479 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); |
425 | write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); | 480 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
426 | 481 | ||
427 | return 0; | 482 | return 0; |
428 | } | 483 | } |
@@ -455,8 +510,8 @@ static struct irq_chip octeon_irq_chip_ciu1_v2 = { | |||
455 | .name = "CIU0", | 510 | .name = "CIU0", |
456 | .enable = octeon_irq_ciu1_enable_v2, | 511 | .enable = octeon_irq_ciu1_enable_v2, |
457 | .disable = octeon_irq_ciu1_disable_all_v2, | 512 | .disable = octeon_irq_ciu1_disable_all_v2, |
458 | .ack = octeon_irq_ciu1_disable_v2, | 513 | .ack = octeon_irq_ciu1_ack_v2, |
459 | .eoi = octeon_irq_ciu1_enable_v2, | 514 | .eoi = octeon_irq_ciu1_eoi_v2, |
460 | #ifdef CONFIG_SMP | 515 | #ifdef CONFIG_SMP |
461 | .set_affinity = octeon_irq_ciu1_set_affinity_v2, | 516 | .set_affinity = octeon_irq_ciu1_set_affinity_v2, |
462 | #endif | 517 | #endif |
@@ -475,6 +530,8 @@ static struct irq_chip octeon_irq_chip_ciu1 = { | |||
475 | 530 | ||
476 | #ifdef CONFIG_PCI_MSI | 531 | #ifdef CONFIG_PCI_MSI |
477 | 532 | ||
533 | static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock); | ||
534 | |||
478 | static void octeon_irq_msi_ack(unsigned int irq) | 535 | static void octeon_irq_msi_ack(unsigned int irq) |
479 | { | 536 | { |
480 | if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { | 537 | if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { |
@@ -515,12 +572,12 @@ static void octeon_irq_msi_enable(unsigned int irq) | |||
515 | */ | 572 | */ |
516 | uint64_t en; | 573 | uint64_t en; |
517 | unsigned long flags; | 574 | unsigned long flags; |
518 | spin_lock_irqsave(&octeon_irq_msi_lock, flags); | 575 | raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags); |
519 | en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); | 576 | en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); |
520 | en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0); | 577 | en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0); |
521 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); | 578 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); |
522 | cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); | 579 | cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); |
523 | spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); | 580 | raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); |
524 | } | 581 | } |
525 | } | 582 | } |
526 | 583 | ||
@@ -537,12 +594,12 @@ static void octeon_irq_msi_disable(unsigned int irq) | |||
537 | */ | 594 | */ |
538 | uint64_t en; | 595 | uint64_t en; |
539 | unsigned long flags; | 596 | unsigned long flags; |
540 | spin_lock_irqsave(&octeon_irq_msi_lock, flags); | 597 | raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags); |
541 | en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); | 598 | en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); |
542 | en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0)); | 599 | en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0)); |
543 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); | 600 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); |
544 | cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); | 601 | cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); |
545 | spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); | 602 | raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); |
546 | } | 603 | } |
547 | } | 604 | } |
548 | 605 | ||
@@ -559,6 +616,7 @@ void __init arch_init_irq(void) | |||
559 | { | 616 | { |
560 | int irq; | 617 | int irq; |
561 | struct irq_chip *chip0; | 618 | struct irq_chip *chip0; |
619 | struct irq_chip *chip0_timer; | ||
562 | struct irq_chip *chip1; | 620 | struct irq_chip *chip1; |
563 | 621 | ||
564 | #ifdef CONFIG_SMP | 622 | #ifdef CONFIG_SMP |
@@ -574,9 +632,11 @@ void __init arch_init_irq(void) | |||
574 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || | 632 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || |
575 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { | 633 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { |
576 | chip0 = &octeon_irq_chip_ciu0_v2; | 634 | chip0 = &octeon_irq_chip_ciu0_v2; |
635 | chip0_timer = &octeon_irq_chip_ciu0_timer_v2; | ||
577 | chip1 = &octeon_irq_chip_ciu1_v2; | 636 | chip1 = &octeon_irq_chip_ciu1_v2; |
578 | } else { | 637 | } else { |
579 | chip0 = &octeon_irq_chip_ciu0; | 638 | chip0 = &octeon_irq_chip_ciu0; |
639 | chip0_timer = &octeon_irq_chip_ciu0_timer; | ||
580 | chip1 = &octeon_irq_chip_ciu1; | 640 | chip1 = &octeon_irq_chip_ciu1; |
581 | } | 641 | } |
582 | 642 | ||
@@ -590,7 +650,21 @@ void __init arch_init_irq(void) | |||
590 | 650 | ||
591 | /* 24 - 87 CIU_INT_SUM0 */ | 651 | /* 24 - 87 CIU_INT_SUM0 */ |
592 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { | 652 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { |
593 | set_irq_chip_and_handler(irq, chip0, handle_percpu_irq); | 653 | switch (irq) { |
654 | case OCTEON_IRQ_GMX_DRP0: | ||
655 | case OCTEON_IRQ_GMX_DRP1: | ||
656 | case OCTEON_IRQ_IPD_DRP: | ||
657 | case OCTEON_IRQ_KEY_ZERO: | ||
658 | case OCTEON_IRQ_TIMER0: | ||
659 | case OCTEON_IRQ_TIMER1: | ||
660 | case OCTEON_IRQ_TIMER2: | ||
661 | case OCTEON_IRQ_TIMER3: | ||
662 | set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq); | ||
663 | break; | ||
664 | default: | ||
665 | set_irq_chip_and_handler(irq, chip0, handle_percpu_irq); | ||
666 | break; | ||
667 | } | ||
594 | } | 668 | } |
595 | 669 | ||
596 | /* 88 - 151 CIU_INT_SUM1 */ | 670 | /* 88 - 151 CIU_INT_SUM1 */ |