diff options
| -rw-r--r-- | Documentation/devicetree/bindings/mips/cavium/cib.txt | 43 | ||||
| -rw-r--r-- | arch/mips/cavium-octeon/octeon-irq.c | 1049 |
2 files changed, 823 insertions, 269 deletions
diff --git a/Documentation/devicetree/bindings/mips/cavium/cib.txt b/Documentation/devicetree/bindings/mips/cavium/cib.txt new file mode 100644 index 000000000000..f39a1aa2852b --- /dev/null +++ b/Documentation/devicetree/bindings/mips/cavium/cib.txt | |||
| @@ -0,0 +1,43 @@ | |||
| 1 | * Cavium Interrupt Bus widget | ||
| 2 | |||
| 3 | Properties: | ||
| 4 | - compatible: "cavium,octeon-7130-cib" | ||
| 5 | |||
| 6 | Compatibility with cn70XX SoCs. | ||
| 7 | |||
| 8 | - interrupt-controller: This is an interrupt controller. | ||
| 9 | |||
| 10 | - reg: Two elements consisting of the addresses of the RAW and EN | ||
| 11 | registers of the CIB block | ||
| 12 | |||
| 13 | - cavium,max-bits: The index (zero based) of the highest numbered bit | ||
| 14 | in the CIB block. | ||
| 15 | |||
| 16 | - interrupt-parent: Always the CIU on the SoC. | ||
| 17 | |||
| 18 | - interrupts: The CIU line to which the CIB block is connected. | ||
| 19 | |||
| 20 | - #interrupt-cells: Must be <2>. The first cell is the bit within the | ||
| 21 | CIB. The second cell specifies the triggering semantics of the | ||
| 22 | line. | ||
| 23 | |||
| 24 | Example: | ||
| 25 | |||
| 26 | interrupt-controller@107000000e000 { | ||
| 27 | compatible = "cavium,octeon-7130-cib"; | ||
| 28 | reg = <0x10700 0x0000e000 0x0 0x8>, /* RAW */ | ||
| 29 | <0x10700 0x0000e100 0x0 0x8>; /* EN */ | ||
| 30 | cavium,max-bits = <23>; | ||
| 31 | |||
| 32 | interrupt-controller; | ||
| 33 | interrupt-parent = <&ciu>; | ||
| 34 | interrupts = <1 24>; | ||
| 35 | /* Interrupts are specified by two parts: | ||
| 36 | * 1) Bit number in the CIB* registers | ||
| 37 | * 2) Triggering (1 - edge rising | ||
| 38 | * 2 - edge falling | ||
| 39 | * 4 - level active high | ||
| 40 | * 8 - level active low) | ||
| 41 | */ | ||
| 42 | #interrupt-cells = <2>; | ||
| 43 | }; | ||
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 1b25998fe1ea..10f762557b92 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
| @@ -3,12 +3,14 @@ | |||
| 3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. | 4 | * for more details. |
| 5 | * | 5 | * |
| 6 | * Copyright (C) 2004-2012 Cavium, Inc. | 6 | * Copyright (C) 2004-2014 Cavium, Inc. |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/of_address.h> | ||
| 9 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
| 10 | #include <linux/irqdomain.h> | 11 | #include <linux/irqdomain.h> |
| 11 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
| 13 | #include <linux/of_irq.h> | ||
| 12 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
| 13 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 14 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
| @@ -22,16 +24,25 @@ static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); | |||
| 22 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); | 24 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); |
| 23 | static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); | 25 | static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); |
| 24 | 26 | ||
| 27 | struct octeon_irq_ciu_domain_data { | ||
| 28 | int num_sum; /* number of sum registers (2 or 3). */ | ||
| 29 | }; | ||
| 30 | |||
| 25 | static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; | 31 | static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; |
| 26 | 32 | ||
| 27 | union octeon_ciu_chip_data { | 33 | struct octeon_ciu_chip_data { |
| 28 | void *p; | 34 | union { |
| 29 | unsigned long l; | 35 | struct { /* only used for ciu3 */ |
| 30 | struct { | 36 | u64 ciu3_addr; |
| 31 | unsigned long line:6; | 37 | unsigned int intsn; |
| 32 | unsigned long bit:6; | 38 | }; |
| 33 | unsigned long gpio_line:6; | 39 | struct { /* only used for ciu/ciu2 */ |
| 34 | } s; | 40 | u8 line; |
| 41 | u8 bit; | ||
| 42 | u8 gpio_line; | ||
| 43 | }; | ||
| 44 | }; | ||
| 45 | int current_cpu; /* Next CPU expected to take this irq */ | ||
| 35 | }; | 46 | }; |
| 36 | 47 | ||
| 37 | struct octeon_core_chip_data { | 48 | struct octeon_core_chip_data { |
| @@ -45,27 +56,40 @@ struct octeon_core_chip_data { | |||
| 45 | 56 | ||
| 46 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; | 57 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; |
| 47 | 58 | ||
| 48 | static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, | 59 | static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, |
| 49 | struct irq_chip *chip, | 60 | struct irq_chip *chip, |
| 50 | irq_flow_handler_t handler) | 61 | irq_flow_handler_t handler) |
| 51 | { | 62 | { |
| 52 | union octeon_ciu_chip_data cd; | 63 | struct octeon_ciu_chip_data *cd; |
| 64 | |||
| 65 | cd = kzalloc(sizeof(*cd), GFP_KERNEL); | ||
| 66 | if (!cd) | ||
| 67 | return -ENOMEM; | ||
| 53 | 68 | ||
| 54 | irq_set_chip_and_handler(irq, chip, handler); | 69 | irq_set_chip_and_handler(irq, chip, handler); |
| 55 | 70 | ||
| 56 | cd.l = 0; | 71 | cd->line = line; |
| 57 | cd.s.line = line; | 72 | cd->bit = bit; |
| 58 | cd.s.bit = bit; | 73 | cd->gpio_line = gpio_line; |
| 59 | cd.s.gpio_line = gpio_line; | ||
| 60 | 74 | ||
| 61 | irq_set_chip_data(irq, cd.p); | 75 | irq_set_chip_data(irq, cd); |
| 62 | octeon_irq_ciu_to_irq[line][bit] = irq; | 76 | octeon_irq_ciu_to_irq[line][bit] = irq; |
| 77 | return 0; | ||
| 78 | } | ||
| 79 | |||
| 80 | static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) | ||
| 81 | { | ||
| 82 | struct irq_data *data = irq_get_irq_data(irq); | ||
| 83 | struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); | ||
| 84 | |||
| 85 | irq_set_chip_data(irq, NULL); | ||
| 86 | kfree(cd); | ||
| 63 | } | 87 | } |
| 64 | 88 | ||
| 65 | static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, | 89 | static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, |
| 66 | int irq, int line, int bit) | 90 | int irq, int line, int bit) |
| 67 | { | 91 | { |
| 68 | irq_domain_associate(domain, irq, line << 6 | bit); | 92 | return irq_domain_associate(domain, irq, line << 6 | bit); |
| 69 | } | 93 | } |
| 70 | 94 | ||
| 71 | static int octeon_coreid_for_cpu(int cpu) | 95 | static int octeon_coreid_for_cpu(int cpu) |
| @@ -202,9 +226,10 @@ static int next_cpu_for_irq(struct irq_data *data) | |||
| 202 | #ifdef CONFIG_SMP | 226 | #ifdef CONFIG_SMP |
| 203 | int cpu; | 227 | int cpu; |
| 204 | int weight = cpumask_weight(data->affinity); | 228 | int weight = cpumask_weight(data->affinity); |
| 229 | struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); | ||
| 205 | 230 | ||
| 206 | if (weight > 1) { | 231 | if (weight > 1) { |
| 207 | cpu = smp_processor_id(); | 232 | cpu = cd->current_cpu; |
| 208 | for (;;) { | 233 | for (;;) { |
| 209 | cpu = cpumask_next(cpu, data->affinity); | 234 | cpu = cpumask_next(cpu, data->affinity); |
| 210 | if (cpu >= nr_cpu_ids) { | 235 | if (cpu >= nr_cpu_ids) { |
| @@ -219,6 +244,7 @@ static int next_cpu_for_irq(struct irq_data *data) | |||
| 219 | } else { | 244 | } else { |
| 220 | cpu = smp_processor_id(); | 245 | cpu = smp_processor_id(); |
| 221 | } | 246 | } |
| 247 | cd->current_cpu = cpu; | ||
| 222 | return cpu; | 248 | return cpu; |
| 223 | #else | 249 | #else |
| 224 | return smp_processor_id(); | 250 | return smp_processor_id(); |
| @@ -231,15 +257,15 @@ static void octeon_irq_ciu_enable(struct irq_data *data) | |||
| 231 | int coreid = octeon_coreid_for_cpu(cpu); | 257 | int coreid = octeon_coreid_for_cpu(cpu); |
| 232 | unsigned long *pen; | 258 | unsigned long *pen; |
| 233 | unsigned long flags; | 259 | unsigned long flags; |
| 234 | union octeon_ciu_chip_data cd; | 260 | struct octeon_ciu_chip_data *cd; |
| 235 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 261 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
| 236 | 262 | ||
| 237 | cd.p = irq_data_get_irq_chip_data(data); | 263 | cd = irq_data_get_irq_chip_data(data); |
| 238 | 264 | ||
| 239 | raw_spin_lock_irqsave(lock, flags); | 265 | raw_spin_lock_irqsave(lock, flags); |
| 240 | if (cd.s.line == 0) { | 266 | if (cd->line == 0) { |
| 241 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 267 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
| 242 | __set_bit(cd.s.bit, pen); | 268 | __set_bit(cd->bit, pen); |
| 243 | /* | 269 | /* |
| 244 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 270 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
| 245 | * enabling the irq. | 271 | * enabling the irq. |
| @@ -248,7 +274,7 @@ static void octeon_irq_ciu_enable(struct irq_data *data) | |||
| 248 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 274 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
| 249 | } else { | 275 | } else { |
| 250 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 276 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
| 251 | __set_bit(cd.s.bit, pen); | 277 | __set_bit(cd->bit, pen); |
| 252 | /* | 278 | /* |
| 253 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 279 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
| 254 | * enabling the irq. | 280 | * enabling the irq. |
| @@ -263,15 +289,15 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) | |||
| 263 | { | 289 | { |
| 264 | unsigned long *pen; | 290 | unsigned long *pen; |
| 265 | unsigned long flags; | 291 | unsigned long flags; |
| 266 | union octeon_ciu_chip_data cd; | 292 | struct octeon_ciu_chip_data *cd; |
| 267 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); | 293 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); |
| 268 | 294 | ||
| 269 | cd.p = irq_data_get_irq_chip_data(data); | 295 | cd = irq_data_get_irq_chip_data(data); |
| 270 | 296 | ||
| 271 | raw_spin_lock_irqsave(lock, flags); | 297 | raw_spin_lock_irqsave(lock, flags); |
| 272 | if (cd.s.line == 0) { | 298 | if (cd->line == 0) { |
| 273 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); | 299 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); |
| 274 | __set_bit(cd.s.bit, pen); | 300 | __set_bit(cd->bit, pen); |
| 275 | /* | 301 | /* |
| 276 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 302 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
| 277 | * enabling the irq. | 303 | * enabling the irq. |
| @@ -280,7 +306,7 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) | |||
| 280 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | 306 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
| 281 | } else { | 307 | } else { |
| 282 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); | 308 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); |
| 283 | __set_bit(cd.s.bit, pen); | 309 | __set_bit(cd->bit, pen); |
| 284 | /* | 310 | /* |
| 285 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 311 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
| 286 | * enabling the irq. | 312 | * enabling the irq. |
| @@ -295,15 +321,15 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) | |||
| 295 | { | 321 | { |
| 296 | unsigned long *pen; | 322 | unsigned long *pen; |
| 297 | unsigned long flags; | 323 | unsigned long flags; |
| 298 | union octeon_ciu_chip_data cd; | 324 | struct octeon_ciu_chip_data *cd; |
| 299 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); | 325 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); |
| 300 | 326 | ||
| 301 | cd.p = irq_data_get_irq_chip_data(data); | 327 | cd = irq_data_get_irq_chip_data(data); |
| 302 | 328 | ||
| 303 | raw_spin_lock_irqsave(lock, flags); | 329 | raw_spin_lock_irqsave(lock, flags); |
| 304 | if (cd.s.line == 0) { | 330 | if (cd->line == 0) { |
| 305 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); | 331 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); |
| 306 | __clear_bit(cd.s.bit, pen); | 332 | __clear_bit(cd->bit, pen); |
| 307 | /* | 333 | /* |
| 308 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 334 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
| 309 | * enabling the irq. | 335 | * enabling the irq. |
| @@ -312,7 +338,7 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) | |||
| 312 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | 338 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
| 313 | } else { | 339 | } else { |
| 314 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); | 340 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); |
| 315 | __clear_bit(cd.s.bit, pen); | 341 | __clear_bit(cd->bit, pen); |
| 316 | /* | 342 | /* |
| 317 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 343 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
| 318 | * enabling the irq. | 344 | * enabling the irq. |
| @@ -328,27 +354,27 @@ static void octeon_irq_ciu_disable_all(struct irq_data *data) | |||
| 328 | unsigned long flags; | 354 | unsigned long flags; |
| 329 | unsigned long *pen; | 355 | unsigned long *pen; |
| 330 | int cpu; | 356 | int cpu; |
| 331 | union octeon_ciu_chip_data cd; | 357 | struct octeon_ciu_chip_data *cd; |
| 332 | raw_spinlock_t *lock; | 358 | raw_spinlock_t *lock; |
| 333 | 359 | ||
| 334 | cd.p = irq_data_get_irq_chip_data(data); | 360 | cd = irq_data_get_irq_chip_data(data); |
| 335 | 361 | ||
| 336 | for_each_online_cpu(cpu) { | 362 | for_each_online_cpu(cpu) { |
| 337 | int coreid = octeon_coreid_for_cpu(cpu); | 363 | int coreid = octeon_coreid_for_cpu(cpu); |
| 338 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 364 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
| 339 | if (cd.s.line == 0) | 365 | if (cd->line == 0) |
| 340 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 366 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
| 341 | else | 367 | else |
| 342 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 368 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
| 343 | 369 | ||
| 344 | raw_spin_lock_irqsave(lock, flags); | 370 | raw_spin_lock_irqsave(lock, flags); |
| 345 | __clear_bit(cd.s.bit, pen); | 371 | __clear_bit(cd->bit, pen); |
| 346 | /* | 372 | /* |
| 347 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 373 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
| 348 | * enabling the irq. | 374 | * enabling the irq. |
| 349 | */ | 375 | */ |
| 350 | wmb(); | 376 | wmb(); |
| 351 | if (cd.s.line == 0) | 377 | if (cd->line == 0) |
| 352 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 378 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
| 353 | else | 379 | else |
| 354 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 380 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
| @@ -361,27 +387,27 @@ static void octeon_irq_ciu_enable_all(struct irq_data *data) | |||
| 361 | unsigned long flags; | 387 | unsigned long flags; |
| 362 | unsigned long *pen; | 388 | unsigned long *pen; |
| 363 | int cpu; | 389 | int cpu; |
| 364 | union octeon_ciu_chip_data cd; | 390 | struct octeon_ciu_chip_data *cd; |
| 365 | raw_spinlock_t *lock; | 391 | raw_spinlock_t *lock; |
| 366 | 392 | ||
| 367 | cd.p = irq_data_get_irq_chip_data(data); | 393 | cd = irq_data_get_irq_chip_data(data); |
| 368 | 394 | ||
| 369 | for_each_online_cpu(cpu) { | 395 | for_each_online_cpu(cpu) { |
| 370 | int coreid = octeon_coreid_for_cpu(cpu); | 396 | int coreid = octeon_coreid_for_cpu(cpu); |
| 371 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 397 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
| 372 | if (cd.s.line == 0) | 398 | if (cd->line == 0) |
| 373 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 399 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
| 374 | else | 400 | else |
| 375 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 401 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
| 376 | 402 | ||
| 377 | raw_spin_lock_irqsave(lock, flags); | 403 | raw_spin_lock_irqsave(lock, flags); |
| 378 | __set_bit(cd.s.bit, pen); | 404 | __set_bit(cd->bit, pen); |
| 379 | /* | 405 | /* |
| 380 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 406 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
| 381 | * enabling the irq. | 407 | * enabling the irq. |
| 382 | */ | 408 | */ |
| 383 | wmb(); | 409 | wmb(); |
| 384 | if (cd.s.line == 0) | 410 | if (cd->line == 0) |
| 385 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 411 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
| 386 | else | 412 | else |
| 387 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 413 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
| @@ -397,45 +423,106 @@ static void octeon_irq_ciu_enable_v2(struct irq_data *data) | |||
| 397 | { | 423 | { |
| 398 | u64 mask; | 424 | u64 mask; |
| 399 | int cpu = next_cpu_for_irq(data); | 425 | int cpu = next_cpu_for_irq(data); |
| 400 | union octeon_ciu_chip_data cd; | 426 | struct octeon_ciu_chip_data *cd; |
| 401 | 427 | ||
| 402 | cd.p = irq_data_get_irq_chip_data(data); | 428 | cd = irq_data_get_irq_chip_data(data); |
| 403 | mask = 1ull << (cd.s.bit); | 429 | mask = 1ull << (cd->bit); |
| 404 | 430 | ||
| 405 | /* | 431 | /* |
| 406 | * Called under the desc lock, so these should never get out | 432 | * Called under the desc lock, so these should never get out |
| 407 | * of sync. | 433 | * of sync. |
| 408 | */ | 434 | */ |
| 409 | if (cd.s.line == 0) { | 435 | if (cd->line == 0) { |
| 410 | int index = octeon_coreid_for_cpu(cpu) * 2; | 436 | int index = octeon_coreid_for_cpu(cpu) * 2; |
| 411 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 437 | set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); |
| 412 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 438 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
| 413 | } else { | 439 | } else { |
| 414 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 440 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
| 415 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 441 | set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
| 416 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 442 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
| 417 | } | 443 | } |
| 418 | } | 444 | } |
| 419 | 445 | ||
| 420 | /* | 446 | /* |
| 447 | * Enable the irq in the sum2 registers. | ||
| 448 | */ | ||
| 449 | static void octeon_irq_ciu_enable_sum2(struct irq_data *data) | ||
| 450 | { | ||
| 451 | u64 mask; | ||
| 452 | int cpu = next_cpu_for_irq(data); | ||
| 453 | int index = octeon_coreid_for_cpu(cpu); | ||
| 454 | struct octeon_ciu_chip_data *cd; | ||
| 455 | |||
| 456 | cd = irq_data_get_irq_chip_data(data); | ||
| 457 | mask = 1ull << (cd->bit); | ||
| 458 | |||
| 459 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); | ||
| 460 | } | ||
| 461 | |||
| 462 | /* | ||
| 463 | * Disable the irq in the sum2 registers. | ||
| 464 | */ | ||
| 465 | static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data) | ||
| 466 | { | ||
| 467 | u64 mask; | ||
| 468 | int cpu = next_cpu_for_irq(data); | ||
| 469 | int index = octeon_coreid_for_cpu(cpu); | ||
| 470 | struct octeon_ciu_chip_data *cd; | ||
| 471 | |||
| 472 | cd = irq_data_get_irq_chip_data(data); | ||
| 473 | mask = 1ull << (cd->bit); | ||
| 474 | |||
| 475 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); | ||
| 476 | } | ||
| 477 | |||
| 478 | static void octeon_irq_ciu_ack_sum2(struct irq_data *data) | ||
| 479 | { | ||
| 480 | u64 mask; | ||
| 481 | int cpu = next_cpu_for_irq(data); | ||
| 482 | int index = octeon_coreid_for_cpu(cpu); | ||
| 483 | struct octeon_ciu_chip_data *cd; | ||
| 484 | |||
| 485 | cd = irq_data_get_irq_chip_data(data); | ||
| 486 | mask = 1ull << (cd->bit); | ||
| 487 | |||
| 488 | cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask); | ||
| 489 | } | ||
| 490 | |||
| 491 | static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data) | ||
| 492 | { | ||
| 493 | int cpu; | ||
| 494 | struct octeon_ciu_chip_data *cd; | ||
| 495 | u64 mask; | ||
| 496 | |||
| 497 | cd = irq_data_get_irq_chip_data(data); | ||
| 498 | mask = 1ull << (cd->bit); | ||
| 499 | |||
| 500 | for_each_online_cpu(cpu) { | ||
| 501 | int coreid = octeon_coreid_for_cpu(cpu); | ||
| 502 | |||
| 503 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask); | ||
| 504 | } | ||
| 505 | } | ||
| 506 | |||
| 507 | /* | ||
| 421 | * Enable the irq on the current CPU for chips that | 508 | * Enable the irq on the current CPU for chips that |
| 422 | * have the EN*_W1{S,C} registers. | 509 | * have the EN*_W1{S,C} registers. |
| 423 | */ | 510 | */ |
| 424 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) | 511 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) |
| 425 | { | 512 | { |
| 426 | u64 mask; | 513 | u64 mask; |
| 427 | union octeon_ciu_chip_data cd; | 514 | struct octeon_ciu_chip_data *cd; |
| 428 | 515 | ||
| 429 | cd.p = irq_data_get_irq_chip_data(data); | 516 | cd = irq_data_get_irq_chip_data(data); |
| 430 | mask = 1ull << (cd.s.bit); | 517 | mask = 1ull << (cd->bit); |
| 431 | 518 | ||
| 432 | if (cd.s.line == 0) { | 519 | if (cd->line == 0) { |
| 433 | int index = cvmx_get_core_num() * 2; | 520 | int index = cvmx_get_core_num() * 2; |
| 434 | set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); | 521 | set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); |
| 435 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 522 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
| 436 | } else { | 523 | } else { |
| 437 | int index = cvmx_get_core_num() * 2 + 1; | 524 | int index = cvmx_get_core_num() * 2 + 1; |
| 438 | set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); | 525 | set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); |
| 439 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 526 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
| 440 | } | 527 | } |
| 441 | } | 528 | } |
| @@ -443,18 +530,18 @@ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) | |||
| 443 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) | 530 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) |
| 444 | { | 531 | { |
| 445 | u64 mask; | 532 | u64 mask; |
| 446 | union octeon_ciu_chip_data cd; | 533 | struct octeon_ciu_chip_data *cd; |
| 447 | 534 | ||
| 448 | cd.p = irq_data_get_irq_chip_data(data); | 535 | cd = irq_data_get_irq_chip_data(data); |
| 449 | mask = 1ull << (cd.s.bit); | 536 | mask = 1ull << (cd->bit); |
| 450 | 537 | ||
| 451 | if (cd.s.line == 0) { | 538 | if (cd->line == 0) { |
| 452 | int index = cvmx_get_core_num() * 2; | 539 | int index = cvmx_get_core_num() * 2; |
| 453 | clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); | 540 | clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); |
| 454 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 541 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
| 455 | } else { | 542 | } else { |
| 456 | int index = cvmx_get_core_num() * 2 + 1; | 543 | int index = cvmx_get_core_num() * 2 + 1; |
| 457 | clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); | 544 | clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); |
| 458 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 545 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
| 459 | } | 546 | } |
| 460 | } | 547 | } |
| @@ -465,12 +552,12 @@ static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) | |||
| 465 | static void octeon_irq_ciu_ack(struct irq_data *data) | 552 | static void octeon_irq_ciu_ack(struct irq_data *data) |
| 466 | { | 553 | { |
| 467 | u64 mask; | 554 | u64 mask; |
| 468 | union octeon_ciu_chip_data cd; | 555 | struct octeon_ciu_chip_data *cd; |
| 469 | 556 | ||
| 470 | cd.p = irq_data_get_irq_chip_data(data); | 557 | cd = irq_data_get_irq_chip_data(data); |
| 471 | mask = 1ull << (cd.s.bit); | 558 | mask = 1ull << (cd->bit); |
| 472 | 559 | ||
| 473 | if (cd.s.line == 0) { | 560 | if (cd->line == 0) { |
| 474 | int index = cvmx_get_core_num() * 2; | 561 | int index = cvmx_get_core_num() * 2; |
| 475 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | 562 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); |
| 476 | } else { | 563 | } else { |
| @@ -486,21 +573,23 @@ static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) | |||
| 486 | { | 573 | { |
| 487 | int cpu; | 574 | int cpu; |
| 488 | u64 mask; | 575 | u64 mask; |
| 489 | union octeon_ciu_chip_data cd; | 576 | struct octeon_ciu_chip_data *cd; |
| 490 | 577 | ||
| 491 | cd.p = irq_data_get_irq_chip_data(data); | 578 | cd = irq_data_get_irq_chip_data(data); |
| 492 | mask = 1ull << (cd.s.bit); | 579 | mask = 1ull << (cd->bit); |
| 493 | 580 | ||
| 494 | if (cd.s.line == 0) { | 581 | if (cd->line == 0) { |
| 495 | for_each_online_cpu(cpu) { | 582 | for_each_online_cpu(cpu) { |
| 496 | int index = octeon_coreid_for_cpu(cpu) * 2; | 583 | int index = octeon_coreid_for_cpu(cpu) * 2; |
| 497 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 584 | clear_bit(cd->bit, |
| 585 | &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | ||
| 498 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 586 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
| 499 | } | 587 | } |
| 500 | } else { | 588 | } else { |
| 501 | for_each_online_cpu(cpu) { | 589 | for_each_online_cpu(cpu) { |
| 502 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 590 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
| 503 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 591 | clear_bit(cd->bit, |
| 592 | &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | ||
| 504 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 593 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
| 505 | } | 594 | } |
| 506 | } | 595 | } |
| @@ -514,21 +603,23 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) | |||
| 514 | { | 603 | { |
| 515 | int cpu; | 604 | int cpu; |
| 516 | u64 mask; | 605 | u64 mask; |
| 517 | union octeon_ciu_chip_data cd; | 606 | struct octeon_ciu_chip_data *cd; |
| 518 | 607 | ||
| 519 | cd.p = irq_data_get_irq_chip_data(data); | 608 | cd = irq_data_get_irq_chip_data(data); |
| 520 | mask = 1ull << (cd.s.bit); | 609 | mask = 1ull << (cd->bit); |
| 521 | 610 | ||
| 522 | if (cd.s.line == 0) { | 611 | if (cd->line == 0) { |
| 523 | for_each_online_cpu(cpu) { | 612 | for_each_online_cpu(cpu) { |
| 524 | int index = octeon_coreid_for_cpu(cpu) * 2; | 613 | int index = octeon_coreid_for_cpu(cpu) * 2; |
| 525 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 614 | set_bit(cd->bit, |
| 615 | &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | ||
| 526 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 616 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
| 527 | } | 617 | } |
| 528 | } else { | 618 | } else { |
| 529 | for_each_online_cpu(cpu) { | 619 | for_each_online_cpu(cpu) { |
| 530 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 620 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
| 531 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 621 | set_bit(cd->bit, |
| 622 | &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | ||
| 532 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 623 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
| 533 | } | 624 | } |
| 534 | } | 625 | } |
| @@ -537,10 +628,10 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) | |||
| 537 | static void octeon_irq_gpio_setup(struct irq_data *data) | 628 | static void octeon_irq_gpio_setup(struct irq_data *data) |
| 538 | { | 629 | { |
| 539 | union cvmx_gpio_bit_cfgx cfg; | 630 | union cvmx_gpio_bit_cfgx cfg; |
| 540 | union octeon_ciu_chip_data cd; | 631 | struct octeon_ciu_chip_data *cd; |
| 541 | u32 t = irqd_get_trigger_type(data); | 632 | u32 t = irqd_get_trigger_type(data); |
| 542 | 633 | ||
| 543 | cd.p = irq_data_get_irq_chip_data(data); | 634 | cd = irq_data_get_irq_chip_data(data); |
| 544 | 635 | ||
| 545 | cfg.u64 = 0; | 636 | cfg.u64 = 0; |
| 546 | cfg.s.int_en = 1; | 637 | cfg.s.int_en = 1; |
| @@ -551,7 +642,7 @@ static void octeon_irq_gpio_setup(struct irq_data *data) | |||
| 551 | cfg.s.fil_cnt = 7; | 642 | cfg.s.fil_cnt = 7; |
| 552 | cfg.s.fil_sel = 3; | 643 | cfg.s.fil_sel = 3; |
| 553 | 644 | ||
| 554 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); | 645 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64); |
| 555 | } | 646 | } |
| 556 | 647 | ||
| 557 | static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) | 648 | static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) |
| @@ -576,36 +667,36 @@ static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) | |||
| 576 | 667 | ||
| 577 | static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) | 668 | static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) |
| 578 | { | 669 | { |
| 579 | union octeon_ciu_chip_data cd; | 670 | struct octeon_ciu_chip_data *cd; |
| 580 | 671 | ||
| 581 | cd.p = irq_data_get_irq_chip_data(data); | 672 | cd = irq_data_get_irq_chip_data(data); |
| 582 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 673 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); |
| 583 | 674 | ||
| 584 | octeon_irq_ciu_disable_all_v2(data); | 675 | octeon_irq_ciu_disable_all_v2(data); |
| 585 | } | 676 | } |
| 586 | 677 | ||
| 587 | static void octeon_irq_ciu_disable_gpio(struct irq_data *data) | 678 | static void octeon_irq_ciu_disable_gpio(struct irq_data *data) |
| 588 | { | 679 | { |
| 589 | union octeon_ciu_chip_data cd; | 680 | struct octeon_ciu_chip_data *cd; |
| 590 | 681 | ||
| 591 | cd.p = irq_data_get_irq_chip_data(data); | 682 | cd = irq_data_get_irq_chip_data(data); |
| 592 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 683 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); |
| 593 | 684 | ||
| 594 | octeon_irq_ciu_disable_all(data); | 685 | octeon_irq_ciu_disable_all(data); |
| 595 | } | 686 | } |
| 596 | 687 | ||
| 597 | static void octeon_irq_ciu_gpio_ack(struct irq_data *data) | 688 | static void octeon_irq_ciu_gpio_ack(struct irq_data *data) |
| 598 | { | 689 | { |
| 599 | union octeon_ciu_chip_data cd; | 690 | struct octeon_ciu_chip_data *cd; |
| 600 | u64 mask; | 691 | u64 mask; |
| 601 | 692 | ||
| 602 | cd.p = irq_data_get_irq_chip_data(data); | 693 | cd = irq_data_get_irq_chip_data(data); |
| 603 | mask = 1ull << (cd.s.gpio_line); | 694 | mask = 1ull << (cd->gpio_line); |
| 604 | 695 | ||
| 605 | cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); | 696 | cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); |
| 606 | } | 697 | } |
| 607 | 698 | ||
| 608 | static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) | 699 | static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc) |
| 609 | { | 700 | { |
| 610 | if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) | 701 | if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) |
| 611 | handle_edge_irq(irq, desc); | 702 | handle_edge_irq(irq, desc); |
| @@ -644,11 +735,11 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, | |||
| 644 | int cpu; | 735 | int cpu; |
| 645 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 736 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
| 646 | unsigned long flags; | 737 | unsigned long flags; |
| 647 | union octeon_ciu_chip_data cd; | 738 | struct octeon_ciu_chip_data *cd; |
| 648 | unsigned long *pen; | 739 | unsigned long *pen; |
| 649 | raw_spinlock_t *lock; | 740 | raw_spinlock_t *lock; |
| 650 | 741 | ||
| 651 | cd.p = irq_data_get_irq_chip_data(data); | 742 | cd = irq_data_get_irq_chip_data(data); |
| 652 | 743 | ||
| 653 | /* | 744 | /* |
| 654 | * For non-v2 CIU, we will allow only single CPU affinity. | 745 | * For non-v2 CIU, we will allow only single CPU affinity. |
| @@ -668,16 +759,16 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, | |||
| 668 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 759 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
| 669 | raw_spin_lock_irqsave(lock, flags); | 760 | raw_spin_lock_irqsave(lock, flags); |
| 670 | 761 | ||
| 671 | if (cd.s.line == 0) | 762 | if (cd->line == 0) |
| 672 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 763 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
| 673 | else | 764 | else |
| 674 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 765 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
| 675 | 766 | ||
| 676 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 767 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
| 677 | enable_one = 0; | 768 | enable_one = 0; |
| 678 | __set_bit(cd.s.bit, pen); | 769 | __set_bit(cd->bit, pen); |
| 679 | } else { | 770 | } else { |
| 680 | __clear_bit(cd.s.bit, pen); | 771 | __clear_bit(cd->bit, pen); |
| 681 | } | 772 | } |
| 682 | /* | 773 | /* |
| 683 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 774 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
| @@ -685,7 +776,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, | |||
| 685 | */ | 776 | */ |
| 686 | wmb(); | 777 | wmb(); |
| 687 | 778 | ||
| 688 | if (cd.s.line == 0) | 779 | if (cd->line == 0) |
| 689 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 780 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
| 690 | else | 781 | else |
| 691 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 782 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
| @@ -706,24 +797,24 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, | |||
| 706 | int cpu; | 797 | int cpu; |
| 707 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 798 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
| 708 | u64 mask; | 799 | u64 mask; |
| 709 | union octeon_ciu_chip_data cd; | 800 | struct octeon_ciu_chip_data *cd; |
| 710 | 801 | ||
| 711 | if (!enable_one) | 802 | if (!enable_one) |
| 712 | return 0; | 803 | return 0; |
| 713 | 804 | ||
| 714 | cd.p = irq_data_get_irq_chip_data(data); | 805 | cd = irq_data_get_irq_chip_data(data); |
| 715 | mask = 1ull << cd.s.bit; | 806 | mask = 1ull << cd->bit; |
| 716 | 807 | ||
| 717 | if (cd.s.line == 0) { | 808 | if (cd->line == 0) { |
| 718 | for_each_online_cpu(cpu) { | 809 | for_each_online_cpu(cpu) { |
| 719 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 810 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
| 720 | int index = octeon_coreid_for_cpu(cpu) * 2; | 811 | int index = octeon_coreid_for_cpu(cpu) * 2; |
| 721 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 812 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
| 722 | enable_one = false; | 813 | enable_one = false; |
| 723 | set_bit(cd.s.bit, pen); | 814 | set_bit(cd->bit, pen); |
| 724 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 815 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
| 725 | } else { | 816 | } else { |
| 726 | clear_bit(cd.s.bit, pen); | 817 | clear_bit(cd->bit, pen); |
| 727 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 818 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
| 728 | } | 819 | } |
| 729 | } | 820 | } |
| @@ -733,16 +824,44 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, | |||
| 733 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 824 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
| 734 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 825 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
| 735 | enable_one = false; | 826 | enable_one = false; |
| 736 | set_bit(cd.s.bit, pen); | 827 | set_bit(cd->bit, pen); |
| 737 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 828 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
| 738 | } else { | 829 | } else { |
| 739 | clear_bit(cd.s.bit, pen); | 830 | clear_bit(cd->bit, pen); |
| 740 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 831 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
| 741 | } | 832 | } |
| 742 | } | 833 | } |
| 743 | } | 834 | } |
| 744 | return 0; | 835 | return 0; |
| 745 | } | 836 | } |
| 837 | |||
| 838 | static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data, | ||
| 839 | const struct cpumask *dest, | ||
| 840 | bool force) | ||
| 841 | { | ||
| 842 | int cpu; | ||
| 843 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | ||
| 844 | u64 mask; | ||
| 845 | struct octeon_ciu_chip_data *cd; | ||
| 846 | |||
| 847 | if (!enable_one) | ||
| 848 | return 0; | ||
| 849 | |||
| 850 | cd = irq_data_get_irq_chip_data(data); | ||
| 851 | mask = 1ull << cd->bit; | ||
| 852 | |||
| 853 | for_each_online_cpu(cpu) { | ||
| 854 | int index = octeon_coreid_for_cpu(cpu); | ||
| 855 | |||
| 856 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | ||
| 857 | enable_one = false; | ||
| 858 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); | ||
| 859 | } else { | ||
| 860 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); | ||
| 861 | } | ||
| 862 | } | ||
| 863 | return 0; | ||
| 864 | } | ||
| 746 | #endif | 865 | #endif |
| 747 | 866 | ||
| 748 | /* | 867 | /* |
| @@ -773,6 +892,34 @@ static struct irq_chip octeon_irq_chip_ciu_v2_edge = { | |||
| 773 | #endif | 892 | #endif |
| 774 | }; | 893 | }; |
| 775 | 894 | ||
| 895 | /* | ||
| 896 | * Newer octeon chips have support for lockless CIU operation. | ||
| 897 | */ | ||
| 898 | static struct irq_chip octeon_irq_chip_ciu_sum2 = { | ||
| 899 | .name = "CIU", | ||
| 900 | .irq_enable = octeon_irq_ciu_enable_sum2, | ||
| 901 | .irq_disable = octeon_irq_ciu_disable_all_sum2, | ||
| 902 | .irq_mask = octeon_irq_ciu_disable_local_sum2, | ||
| 903 | .irq_unmask = octeon_irq_ciu_enable_sum2, | ||
| 904 | #ifdef CONFIG_SMP | ||
| 905 | .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, | ||
| 906 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
| 907 | #endif | ||
| 908 | }; | ||
| 909 | |||
| 910 | static struct irq_chip octeon_irq_chip_ciu_sum2_edge = { | ||
| 911 | .name = "CIU", | ||
| 912 | .irq_enable = octeon_irq_ciu_enable_sum2, | ||
| 913 | .irq_disable = octeon_irq_ciu_disable_all_sum2, | ||
| 914 | .irq_ack = octeon_irq_ciu_ack_sum2, | ||
| 915 | .irq_mask = octeon_irq_ciu_disable_local_sum2, | ||
| 916 | .irq_unmask = octeon_irq_ciu_enable_sum2, | ||
| 917 | #ifdef CONFIG_SMP | ||
| 918 | .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, | ||
| 919 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
| 920 | #endif | ||
| 921 | }; | ||
| 922 | |||
| 776 | static struct irq_chip octeon_irq_chip_ciu = { | 923 | static struct irq_chip octeon_irq_chip_ciu = { |
| 777 | .name = "CIU", | 924 | .name = "CIU", |
| 778 | .irq_enable = octeon_irq_ciu_enable, | 925 | .irq_enable = octeon_irq_ciu_enable, |
| @@ -994,11 +1141,12 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d, | |||
| 994 | unsigned int *out_type) | 1141 | unsigned int *out_type) |
| 995 | { | 1142 | { |
| 996 | unsigned int ciu, bit; | 1143 | unsigned int ciu, bit; |
| 1144 | struct octeon_irq_ciu_domain_data *dd = d->host_data; | ||
| 997 | 1145 | ||
| 998 | ciu = intspec[0]; | 1146 | ciu = intspec[0]; |
| 999 | bit = intspec[1]; | 1147 | bit = intspec[1]; |
| 1000 | 1148 | ||
| 1001 | if (ciu > 1 || bit > 63) | 1149 | if (ciu >= dd->num_sum || bit > 63) |
| 1002 | return -EINVAL; | 1150 | return -EINVAL; |
| 1003 | 1151 | ||
| 1004 | *out_hwirq = (ciu << 6) | bit; | 1152 | *out_hwirq = (ciu << 6) | bit; |
| @@ -1024,8 +1172,10 @@ static bool octeon_irq_virq_in_range(unsigned int virq) | |||
| 1024 | static int octeon_irq_ciu_map(struct irq_domain *d, | 1172 | static int octeon_irq_ciu_map(struct irq_domain *d, |
| 1025 | unsigned int virq, irq_hw_number_t hw) | 1173 | unsigned int virq, irq_hw_number_t hw) |
| 1026 | { | 1174 | { |
| 1175 | int rv; | ||
| 1027 | unsigned int line = hw >> 6; | 1176 | unsigned int line = hw >> 6; |
| 1028 | unsigned int bit = hw & 63; | 1177 | unsigned int bit = hw & 63; |
| 1178 | struct octeon_irq_ciu_domain_data *dd = d->host_data; | ||
| 1029 | 1179 | ||
| 1030 | if (!octeon_irq_virq_in_range(virq)) | 1180 | if (!octeon_irq_virq_in_range(virq)) |
| 1031 | return -EINVAL; | 1181 | return -EINVAL; |
| @@ -1034,54 +1184,61 @@ static int octeon_irq_ciu_map(struct irq_domain *d, | |||
| 1034 | if (line == 0 && bit >= 16 && bit <32) | 1184 | if (line == 0 && bit >= 16 && bit <32) |
| 1035 | return 0; | 1185 | return 0; |
| 1036 | 1186 | ||
| 1037 | if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) | 1187 | if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0) |
| 1038 | return -EINVAL; | 1188 | return -EINVAL; |
| 1039 | 1189 | ||
| 1040 | if (octeon_irq_ciu_is_edge(line, bit)) | 1190 | if (line == 2) { |
| 1041 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1191 | if (octeon_irq_ciu_is_edge(line, bit)) |
| 1042 | octeon_irq_ciu_chip_edge, | 1192 | rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
| 1043 | handle_edge_irq); | 1193 | &octeon_irq_chip_ciu_sum2_edge, |
| 1044 | else | 1194 | handle_edge_irq); |
| 1045 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1195 | else |
| 1046 | octeon_irq_ciu_chip, | 1196 | rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
| 1047 | handle_level_irq); | 1197 | &octeon_irq_chip_ciu_sum2, |
| 1048 | 1198 | handle_level_irq); | |
| 1049 | return 0; | 1199 | } else { |
| 1200 | if (octeon_irq_ciu_is_edge(line, bit)) | ||
| 1201 | rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, | ||
| 1202 | octeon_irq_ciu_chip_edge, | ||
| 1203 | handle_edge_irq); | ||
| 1204 | else | ||
| 1205 | rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, | ||
| 1206 | octeon_irq_ciu_chip, | ||
| 1207 | handle_level_irq); | ||
| 1208 | } | ||
| 1209 | return rv; | ||
| 1050 | } | 1210 | } |
| 1051 | 1211 | ||
| 1052 | static int octeon_irq_gpio_map_common(struct irq_domain *d, | 1212 | static int octeon_irq_gpio_map(struct irq_domain *d, |
| 1053 | unsigned int virq, irq_hw_number_t hw, | 1213 | unsigned int virq, irq_hw_number_t hw) |
| 1054 | int line_limit, struct irq_chip *chip) | ||
| 1055 | { | 1214 | { |
| 1056 | struct octeon_irq_gpio_domain_data *gpiod = d->host_data; | 1215 | struct octeon_irq_gpio_domain_data *gpiod = d->host_data; |
| 1057 | unsigned int line, bit; | 1216 | unsigned int line, bit; |
| 1217 | int r; | ||
| 1058 | 1218 | ||
| 1059 | if (!octeon_irq_virq_in_range(virq)) | 1219 | if (!octeon_irq_virq_in_range(virq)) |
| 1060 | return -EINVAL; | 1220 | return -EINVAL; |
| 1061 | 1221 | ||
| 1062 | line = (hw + gpiod->base_hwirq) >> 6; | 1222 | line = (hw + gpiod->base_hwirq) >> 6; |
| 1063 | bit = (hw + gpiod->base_hwirq) & 63; | 1223 | bit = (hw + gpiod->base_hwirq) & 63; |
| 1064 | if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) | 1224 | if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || |
| 1225 | octeon_irq_ciu_to_irq[line][bit] != 0) | ||
| 1065 | return -EINVAL; | 1226 | return -EINVAL; |
| 1066 | 1227 | ||
| 1067 | octeon_irq_set_ciu_mapping(virq, line, bit, hw, | 1228 | r = octeon_irq_set_ciu_mapping(virq, line, bit, hw, |
| 1068 | chip, octeon_irq_handle_gpio); | 1229 | octeon_irq_gpio_chip, octeon_irq_handle_trigger); |
| 1069 | return 0; | 1230 | return r; |
| 1070 | } | ||
| 1071 | |||
| 1072 | static int octeon_irq_gpio_map(struct irq_domain *d, | ||
| 1073 | unsigned int virq, irq_hw_number_t hw) | ||
| 1074 | { | ||
| 1075 | return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); | ||
| 1076 | } | 1231 | } |
| 1077 | 1232 | ||
| 1078 | static struct irq_domain_ops octeon_irq_domain_ciu_ops = { | 1233 | static struct irq_domain_ops octeon_irq_domain_ciu_ops = { |
| 1079 | .map = octeon_irq_ciu_map, | 1234 | .map = octeon_irq_ciu_map, |
| 1235 | .unmap = octeon_irq_free_cd, | ||
| 1080 | .xlate = octeon_irq_ciu_xlat, | 1236 | .xlate = octeon_irq_ciu_xlat, |
| 1081 | }; | 1237 | }; |
| 1082 | 1238 | ||
| 1083 | static struct irq_domain_ops octeon_irq_domain_gpio_ops = { | 1239 | static struct irq_domain_ops octeon_irq_domain_gpio_ops = { |
| 1084 | .map = octeon_irq_gpio_map, | 1240 | .map = octeon_irq_gpio_map, |
| 1241 | .unmap = octeon_irq_free_cd, | ||
| 1085 | .xlate = octeon_irq_gpio_xlat, | 1242 | .xlate = octeon_irq_gpio_xlat, |
| 1086 | }; | 1243 | }; |
| 1087 | 1244 | ||
| @@ -1120,6 +1277,26 @@ static void octeon_irq_ip3_ciu(void) | |||
| 1120 | } | 1277 | } |
| 1121 | } | 1278 | } |
| 1122 | 1279 | ||
| 1280 | static void octeon_irq_ip4_ciu(void) | ||
| 1281 | { | ||
| 1282 | int coreid = cvmx_get_core_num(); | ||
| 1283 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid)); | ||
| 1284 | u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid)); | ||
| 1285 | |||
| 1286 | ciu_sum &= ciu_en; | ||
| 1287 | if (likely(ciu_sum)) { | ||
| 1288 | int bit = fls64(ciu_sum) - 1; | ||
| 1289 | int irq = octeon_irq_ciu_to_irq[2][bit]; | ||
| 1290 | |||
| 1291 | if (likely(irq)) | ||
| 1292 | do_IRQ(irq); | ||
| 1293 | else | ||
| 1294 | spurious_interrupt(); | ||
| 1295 | } else { | ||
| 1296 | spurious_interrupt(); | ||
| 1297 | } | ||
| 1298 | } | ||
| 1299 | |||
| 1123 | static bool octeon_irq_use_ip4; | 1300 | static bool octeon_irq_use_ip4; |
| 1124 | 1301 | ||
| 1125 | static void octeon_irq_local_enable_ip4(void *arg) | 1302 | static void octeon_irq_local_enable_ip4(void *arg) |
| @@ -1201,7 +1378,10 @@ static void octeon_irq_setup_secondary_ciu(void) | |||
| 1201 | 1378 | ||
| 1202 | /* Enable the CIU lines */ | 1379 | /* Enable the CIU lines */ |
| 1203 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1380 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
| 1204 | clear_c0_status(STATUSF_IP4); | 1381 | if (octeon_irq_use_ip4) |
| 1382 | set_c0_status(STATUSF_IP4); | ||
| 1383 | else | ||
| 1384 | clear_c0_status(STATUSF_IP4); | ||
| 1205 | } | 1385 | } |
| 1206 | 1386 | ||
| 1207 | static void octeon_irq_setup_secondary_ciu2(void) | 1387 | static void octeon_irq_setup_secondary_ciu2(void) |
| @@ -1217,22 +1397,36 @@ static void octeon_irq_setup_secondary_ciu2(void) | |||
| 1217 | clear_c0_status(STATUSF_IP4); | 1397 | clear_c0_status(STATUSF_IP4); |
| 1218 | } | 1398 | } |
| 1219 | 1399 | ||
| 1220 | static void __init octeon_irq_init_ciu(void) | 1400 | static int __init octeon_irq_init_ciu( |
| 1401 | struct device_node *ciu_node, struct device_node *parent) | ||
| 1221 | { | 1402 | { |
| 1222 | unsigned int i; | 1403 | unsigned int i, r; |
| 1223 | struct irq_chip *chip; | 1404 | struct irq_chip *chip; |
| 1224 | struct irq_chip *chip_edge; | 1405 | struct irq_chip *chip_edge; |
| 1225 | struct irq_chip *chip_mbox; | 1406 | struct irq_chip *chip_mbox; |
| 1226 | struct irq_chip *chip_wd; | 1407 | struct irq_chip *chip_wd; |
| 1227 | struct device_node *gpio_node; | ||
| 1228 | struct device_node *ciu_node; | ||
| 1229 | struct irq_domain *ciu_domain = NULL; | 1408 | struct irq_domain *ciu_domain = NULL; |
| 1409 | struct octeon_irq_ciu_domain_data *dd; | ||
| 1410 | |||
| 1411 | dd = kzalloc(sizeof(*dd), GFP_KERNEL); | ||
| 1412 | if (!dd) | ||
| 1413 | return -ENOMEM; | ||
| 1230 | 1414 | ||
| 1231 | octeon_irq_init_ciu_percpu(); | 1415 | octeon_irq_init_ciu_percpu(); |
| 1232 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; | 1416 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; |
| 1233 | 1417 | ||
| 1234 | octeon_irq_ip2 = octeon_irq_ip2_ciu; | 1418 | octeon_irq_ip2 = octeon_irq_ip2_ciu; |
| 1235 | octeon_irq_ip3 = octeon_irq_ip3_ciu; | 1419 | octeon_irq_ip3 = octeon_irq_ip3_ciu; |
| 1420 | if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) | ||
| 1421 | && !OCTEON_IS_MODEL(OCTEON_CN63XX)) { | ||
| 1422 | octeon_irq_ip4 = octeon_irq_ip4_ciu; | ||
| 1423 | dd->num_sum = 3; | ||
| 1424 | octeon_irq_use_ip4 = true; | ||
| 1425 | } else { | ||
| 1426 | octeon_irq_ip4 = octeon_irq_ip4_mask; | ||
| 1427 | dd->num_sum = 2; | ||
| 1428 | octeon_irq_use_ip4 = false; | ||
| 1429 | } | ||
| 1236 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || | 1430 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || |
| 1237 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || | 1431 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || |
| 1238 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || | 1432 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || |
| @@ -1251,65 +1445,146 @@ static void __init octeon_irq_init_ciu(void) | |||
| 1251 | } | 1445 | } |
| 1252 | octeon_irq_ciu_chip = chip; | 1446 | octeon_irq_ciu_chip = chip; |
| 1253 | octeon_irq_ciu_chip_edge = chip_edge; | 1447 | octeon_irq_ciu_chip_edge = chip_edge; |
| 1254 | octeon_irq_ip4 = octeon_irq_ip4_mask; | ||
| 1255 | 1448 | ||
| 1256 | /* Mips internal */ | 1449 | /* Mips internal */ |
| 1257 | octeon_irq_init_core(); | 1450 | octeon_irq_init_core(); |
| 1258 | 1451 | ||
| 1259 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); | 1452 | ciu_domain = irq_domain_add_tree( |
| 1260 | if (gpio_node) { | 1453 | ciu_node, &octeon_irq_domain_ciu_ops, dd); |
| 1261 | struct octeon_irq_gpio_domain_data *gpiod; | 1454 | irq_set_default_host(ciu_domain); |
| 1262 | |||
| 1263 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | ||
| 1264 | if (gpiod) { | ||
| 1265 | /* gpio domain host_data is the base hwirq number. */ | ||
| 1266 | gpiod->base_hwirq = 16; | ||
| 1267 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); | ||
| 1268 | of_node_put(gpio_node); | ||
| 1269 | } else | ||
| 1270 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | ||
| 1271 | } else | ||
| 1272 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); | ||
| 1273 | |||
| 1274 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); | ||
| 1275 | if (ciu_node) { | ||
| 1276 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); | ||
| 1277 | irq_set_default_host(ciu_domain); | ||
| 1278 | of_node_put(ciu_node); | ||
| 1279 | } else | ||
| 1280 | panic("Cannot find device node for cavium,octeon-3860-ciu."); | ||
| 1281 | 1455 | ||
| 1282 | /* CIU_0 */ | 1456 | /* CIU_0 */ |
| 1283 | for (i = 0; i < 16; i++) | 1457 | for (i = 0; i < 16; i++) { |
| 1284 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); | 1458 | r = octeon_irq_force_ciu_mapping( |
| 1459 | ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); | ||
| 1460 | if (r) | ||
| 1461 | goto err; | ||
| 1462 | } | ||
| 1463 | |||
| 1464 | r = octeon_irq_set_ciu_mapping( | ||
| 1465 | OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); | ||
| 1466 | if (r) | ||
| 1467 | goto err; | ||
| 1468 | r = octeon_irq_set_ciu_mapping( | ||
| 1469 | OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); | ||
| 1470 | if (r) | ||
| 1471 | goto err; | ||
| 1472 | |||
| 1473 | for (i = 0; i < 4; i++) { | ||
| 1474 | r = octeon_irq_force_ciu_mapping( | ||
| 1475 | ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); | ||
| 1476 | if (r) | ||
| 1477 | goto err; | ||
| 1478 | } | ||
| 1479 | for (i = 0; i < 4; i++) { | ||
| 1480 | r = octeon_irq_force_ciu_mapping( | ||
| 1481 | ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); | ||
| 1482 | if (r) | ||
| 1483 | goto err; | ||
| 1484 | } | ||
| 1285 | 1485 | ||
| 1286 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); | 1486 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); |
| 1287 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); | 1487 | if (r) |
| 1488 | goto err; | ||
| 1288 | 1489 | ||
| 1289 | for (i = 0; i < 4; i++) | 1490 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); |
| 1290 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); | 1491 | if (r) |
| 1291 | for (i = 0; i < 4; i++) | 1492 | goto err; |
| 1292 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); | ||
| 1293 | 1493 | ||
| 1294 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); | 1494 | for (i = 0; i < 4; i++) { |
| 1295 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); | 1495 | r = octeon_irq_force_ciu_mapping( |
| 1296 | for (i = 0; i < 4; i++) | 1496 | ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); |
| 1297 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); | 1497 | if (r) |
| 1498 | goto err; | ||
| 1499 | } | ||
| 1500 | |||
| 1501 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); | ||
| 1502 | if (r) | ||
| 1503 | goto err; | ||
| 1298 | 1504 | ||
| 1299 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); | 1505 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); |
| 1300 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); | 1506 | if (r) |
| 1507 | goto err; | ||
| 1301 | 1508 | ||
| 1302 | /* CIU_1 */ | 1509 | /* CIU_1 */ |
| 1303 | for (i = 0; i < 16; i++) | 1510 | for (i = 0; i < 16; i++) { |
| 1304 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); | 1511 | r = octeon_irq_set_ciu_mapping( |
| 1512 | i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, | ||
| 1513 | handle_level_irq); | ||
| 1514 | if (r) | ||
| 1515 | goto err; | ||
| 1516 | } | ||
| 1305 | 1517 | ||
| 1306 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); | 1518 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); |
| 1519 | if (r) | ||
| 1520 | goto err; | ||
| 1307 | 1521 | ||
| 1308 | /* Enable the CIU lines */ | 1522 | /* Enable the CIU lines */ |
| 1309 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1523 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
| 1310 | clear_c0_status(STATUSF_IP4); | 1524 | if (octeon_irq_use_ip4) |
| 1525 | set_c0_status(STATUSF_IP4); | ||
| 1526 | else | ||
| 1527 | clear_c0_status(STATUSF_IP4); | ||
| 1528 | |||
| 1529 | return 0; | ||
| 1530 | err: | ||
| 1531 | return r; | ||
| 1311 | } | 1532 | } |
| 1312 | 1533 | ||
| 1534 | static int __init octeon_irq_init_gpio( | ||
| 1535 | struct device_node *gpio_node, struct device_node *parent) | ||
| 1536 | { | ||
| 1537 | struct octeon_irq_gpio_domain_data *gpiod; | ||
| 1538 | u32 interrupt_cells; | ||
| 1539 | unsigned int base_hwirq; | ||
| 1540 | int r; | ||
| 1541 | |||
| 1542 | r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells); | ||
| 1543 | if (r) | ||
| 1544 | return r; | ||
| 1545 | |||
| 1546 | if (interrupt_cells == 1) { | ||
| 1547 | u32 v; | ||
| 1548 | |||
| 1549 | r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v); | ||
| 1550 | if (r) { | ||
| 1551 | pr_warn("No \"interrupts\" property.\n"); | ||
| 1552 | return r; | ||
| 1553 | } | ||
| 1554 | base_hwirq = v; | ||
| 1555 | } else if (interrupt_cells == 2) { | ||
| 1556 | u32 v0, v1; | ||
| 1557 | |||
| 1558 | r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0); | ||
| 1559 | if (r) { | ||
| 1560 | pr_warn("No \"interrupts\" property.\n"); | ||
| 1561 | return r; | ||
| 1562 | } | ||
| 1563 | r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1); | ||
| 1564 | if (r) { | ||
| 1565 | pr_warn("No \"interrupts\" property.\n"); | ||
| 1566 | return r; | ||
| 1567 | } | ||
| 1568 | base_hwirq = (v0 << 6) | v1; | ||
| 1569 | } else { | ||
| 1570 | pr_warn("Bad \"#interrupt-cells\" property: %u\n", | ||
| 1571 | interrupt_cells); | ||
| 1572 | return -EINVAL; | ||
| 1573 | } | ||
| 1574 | |||
| 1575 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | ||
| 1576 | if (gpiod) { | ||
| 1577 | /* gpio domain host_data is the base hwirq number. */ | ||
| 1578 | gpiod->base_hwirq = base_hwirq; | ||
| 1579 | irq_domain_add_linear( | ||
| 1580 | gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); | ||
| 1581 | } else { | ||
| 1582 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | ||
| 1583 | return -ENOMEM; | ||
| 1584 | } | ||
| 1585 | |||
| 1586 | return 0; | ||
| 1587 | } | ||
| 1313 | /* | 1588 | /* |
| 1314 | * Watchdog interrupts are special. They are associated with a single | 1589 | * Watchdog interrupts are special. They are associated with a single |
| 1315 | * core, so we hardwire the affinity to that core. | 1590 | * core, so we hardwire the affinity to that core. |
| @@ -1319,12 +1594,13 @@ static void octeon_irq_ciu2_wd_enable(struct irq_data *data) | |||
| 1319 | u64 mask; | 1594 | u64 mask; |
| 1320 | u64 en_addr; | 1595 | u64 en_addr; |
| 1321 | int coreid = data->irq - OCTEON_IRQ_WDOG0; | 1596 | int coreid = data->irq - OCTEON_IRQ_WDOG0; |
| 1322 | union octeon_ciu_chip_data cd; | 1597 | struct octeon_ciu_chip_data *cd; |
| 1323 | 1598 | ||
| 1324 | cd.p = irq_data_get_irq_chip_data(data); | 1599 | cd = irq_data_get_irq_chip_data(data); |
| 1325 | mask = 1ull << (cd.s.bit); | 1600 | mask = 1ull << (cd->bit); |
| 1326 | 1601 | ||
| 1327 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1602 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + |
| 1603 | (0x1000ull * cd->line); | ||
| 1328 | cvmx_write_csr(en_addr, mask); | 1604 | cvmx_write_csr(en_addr, mask); |
| 1329 | 1605 | ||
| 1330 | } | 1606 | } |
| @@ -1335,12 +1611,13 @@ static void octeon_irq_ciu2_enable(struct irq_data *data) | |||
| 1335 | u64 en_addr; | 1611 | u64 en_addr; |
| 1336 | int cpu = next_cpu_for_irq(data); | 1612 | int cpu = next_cpu_for_irq(data); |
| 1337 | int coreid = octeon_coreid_for_cpu(cpu); | 1613 | int coreid = octeon_coreid_for_cpu(cpu); |
| 1338 | union octeon_ciu_chip_data cd; | 1614 | struct octeon_ciu_chip_data *cd; |
| 1339 | 1615 | ||
| 1340 | cd.p = irq_data_get_irq_chip_data(data); | 1616 | cd = irq_data_get_irq_chip_data(data); |
| 1341 | mask = 1ull << (cd.s.bit); | 1617 | mask = 1ull << (cd->bit); |
| 1342 | 1618 | ||
| 1343 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1619 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + |
| 1620 | (0x1000ull * cd->line); | ||
| 1344 | cvmx_write_csr(en_addr, mask); | 1621 | cvmx_write_csr(en_addr, mask); |
| 1345 | } | 1622 | } |
| 1346 | 1623 | ||
| @@ -1349,12 +1626,13 @@ static void octeon_irq_ciu2_enable_local(struct irq_data *data) | |||
| 1349 | u64 mask; | 1626 | u64 mask; |
| 1350 | u64 en_addr; | 1627 | u64 en_addr; |
| 1351 | int coreid = cvmx_get_core_num(); | 1628 | int coreid = cvmx_get_core_num(); |
| 1352 | union octeon_ciu_chip_data cd; | 1629 | struct octeon_ciu_chip_data *cd; |
| 1353 | 1630 | ||
| 1354 | cd.p = irq_data_get_irq_chip_data(data); | 1631 | cd = irq_data_get_irq_chip_data(data); |
| 1355 | mask = 1ull << (cd.s.bit); | 1632 | mask = 1ull << (cd->bit); |
| 1356 | 1633 | ||
| 1357 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1634 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + |
| 1635 | (0x1000ull * cd->line); | ||
| 1358 | cvmx_write_csr(en_addr, mask); | 1636 | cvmx_write_csr(en_addr, mask); |
| 1359 | 1637 | ||
| 1360 | } | 1638 | } |
| @@ -1364,12 +1642,13 @@ static void octeon_irq_ciu2_disable_local(struct irq_data *data) | |||
| 1364 | u64 mask; | 1642 | u64 mask; |
| 1365 | u64 en_addr; | 1643 | u64 en_addr; |
| 1366 | int coreid = cvmx_get_core_num(); | 1644 | int coreid = cvmx_get_core_num(); |
| 1367 | union octeon_ciu_chip_data cd; | 1645 | struct octeon_ciu_chip_data *cd; |
| 1368 | 1646 | ||
| 1369 | cd.p = irq_data_get_irq_chip_data(data); | 1647 | cd = irq_data_get_irq_chip_data(data); |
| 1370 | mask = 1ull << (cd.s.bit); | 1648 | mask = 1ull << (cd->bit); |
| 1371 | 1649 | ||
| 1372 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); | 1650 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + |
| 1651 | (0x1000ull * cd->line); | ||
| 1373 | cvmx_write_csr(en_addr, mask); | 1652 | cvmx_write_csr(en_addr, mask); |
| 1374 | 1653 | ||
| 1375 | } | 1654 | } |
| @@ -1379,12 +1658,12 @@ static void octeon_irq_ciu2_ack(struct irq_data *data) | |||
| 1379 | u64 mask; | 1658 | u64 mask; |
| 1380 | u64 en_addr; | 1659 | u64 en_addr; |
| 1381 | int coreid = cvmx_get_core_num(); | 1660 | int coreid = cvmx_get_core_num(); |
| 1382 | union octeon_ciu_chip_data cd; | 1661 | struct octeon_ciu_chip_data *cd; |
| 1383 | 1662 | ||
| 1384 | cd.p = irq_data_get_irq_chip_data(data); | 1663 | cd = irq_data_get_irq_chip_data(data); |
| 1385 | mask = 1ull << (cd.s.bit); | 1664 | mask = 1ull << (cd->bit); |
| 1386 | 1665 | ||
| 1387 | en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); | 1666 | en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line); |
| 1388 | cvmx_write_csr(en_addr, mask); | 1667 | cvmx_write_csr(en_addr, mask); |
| 1389 | 1668 | ||
| 1390 | } | 1669 | } |
| @@ -1393,13 +1672,14 @@ static void octeon_irq_ciu2_disable_all(struct irq_data *data) | |||
| 1393 | { | 1672 | { |
| 1394 | int cpu; | 1673 | int cpu; |
| 1395 | u64 mask; | 1674 | u64 mask; |
| 1396 | union octeon_ciu_chip_data cd; | 1675 | struct octeon_ciu_chip_data *cd; |
| 1397 | 1676 | ||
| 1398 | cd.p = irq_data_get_irq_chip_data(data); | 1677 | cd = irq_data_get_irq_chip_data(data); |
| 1399 | mask = 1ull << (cd.s.bit); | 1678 | mask = 1ull << (cd->bit); |
| 1400 | 1679 | ||
| 1401 | for_each_online_cpu(cpu) { | 1680 | for_each_online_cpu(cpu) { |
| 1402 | u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1681 | u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( |
| 1682 | octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line); | ||
| 1403 | cvmx_write_csr(en_addr, mask); | 1683 | cvmx_write_csr(en_addr, mask); |
| 1404 | } | 1684 | } |
| 1405 | } | 1685 | } |
| @@ -1412,7 +1692,8 @@ static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) | |||
| 1412 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1692 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
| 1413 | 1693 | ||
| 1414 | for_each_online_cpu(cpu) { | 1694 | for_each_online_cpu(cpu) { |
| 1415 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); | 1695 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S( |
| 1696 | octeon_coreid_for_cpu(cpu)); | ||
| 1416 | cvmx_write_csr(en_addr, mask); | 1697 | cvmx_write_csr(en_addr, mask); |
| 1417 | } | 1698 | } |
| 1418 | } | 1699 | } |
| @@ -1425,7 +1706,8 @@ static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) | |||
| 1425 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1706 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
| 1426 | 1707 | ||
| 1427 | for_each_online_cpu(cpu) { | 1708 | for_each_online_cpu(cpu) { |
| 1428 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); | 1709 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C( |
| 1710 | octeon_coreid_for_cpu(cpu)); | ||
| 1429 | cvmx_write_csr(en_addr, mask); | 1711 | cvmx_write_csr(en_addr, mask); |
| 1430 | } | 1712 | } |
| 1431 | } | 1713 | } |
| @@ -1459,21 +1741,25 @@ static int octeon_irq_ciu2_set_affinity(struct irq_data *data, | |||
| 1459 | int cpu; | 1741 | int cpu; |
| 1460 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 1742 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
| 1461 | u64 mask; | 1743 | u64 mask; |
| 1462 | union octeon_ciu_chip_data cd; | 1744 | struct octeon_ciu_chip_data *cd; |
| 1463 | 1745 | ||
| 1464 | if (!enable_one) | 1746 | if (!enable_one) |
| 1465 | return 0; | 1747 | return 0; |
| 1466 | 1748 | ||
| 1467 | cd.p = irq_data_get_irq_chip_data(data); | 1749 | cd = irq_data_get_irq_chip_data(data); |
| 1468 | mask = 1ull << cd.s.bit; | 1750 | mask = 1ull << cd->bit; |
| 1469 | 1751 | ||
| 1470 | for_each_online_cpu(cpu) { | 1752 | for_each_online_cpu(cpu) { |
| 1471 | u64 en_addr; | 1753 | u64 en_addr; |
| 1472 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 1754 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
| 1473 | enable_one = false; | 1755 | enable_one = false; |
| 1474 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1756 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S( |
| 1757 | octeon_coreid_for_cpu(cpu)) + | ||
| 1758 | (0x1000ull * cd->line); | ||
| 1475 | } else { | 1759 | } else { |
| 1476 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1760 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( |
| 1761 | octeon_coreid_for_cpu(cpu)) + | ||
| 1762 | (0x1000ull * cd->line); | ||
| 1477 | } | 1763 | } |
| 1478 | cvmx_write_csr(en_addr, mask); | 1764 | cvmx_write_csr(en_addr, mask); |
| 1479 | } | 1765 | } |
| @@ -1490,10 +1776,11 @@ static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) | |||
| 1490 | 1776 | ||
| 1491 | static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) | 1777 | static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) |
| 1492 | { | 1778 | { |
| 1493 | union octeon_ciu_chip_data cd; | 1779 | struct octeon_ciu_chip_data *cd; |
| 1494 | cd.p = irq_data_get_irq_chip_data(data); | ||
| 1495 | 1780 | ||
| 1496 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 1781 | cd = irq_data_get_irq_chip_data(data); |
| 1782 | |||
| 1783 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); | ||
| 1497 | 1784 | ||
| 1498 | octeon_irq_ciu2_disable_all(data); | 1785 | octeon_irq_ciu2_disable_all(data); |
| 1499 | } | 1786 | } |
| @@ -1632,22 +1919,13 @@ static int octeon_irq_ciu2_map(struct irq_domain *d, | |||
| 1632 | 1919 | ||
| 1633 | return 0; | 1920 | return 0; |
| 1634 | } | 1921 | } |
| 1635 | static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, | ||
| 1636 | unsigned int virq, irq_hw_number_t hw) | ||
| 1637 | { | ||
| 1638 | return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); | ||
| 1639 | } | ||
| 1640 | 1922 | ||
| 1641 | static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { | 1923 | static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { |
| 1642 | .map = octeon_irq_ciu2_map, | 1924 | .map = octeon_irq_ciu2_map, |
| 1925 | .unmap = octeon_irq_free_cd, | ||
| 1643 | .xlate = octeon_irq_ciu2_xlat, | 1926 | .xlate = octeon_irq_ciu2_xlat, |
| 1644 | }; | 1927 | }; |
| 1645 | 1928 | ||
| 1646 | static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { | ||
| 1647 | .map = octeon_irq_ciu2_gpio_map, | ||
| 1648 | .xlate = octeon_irq_gpio_xlat, | ||
| 1649 | }; | ||
| 1650 | |||
| 1651 | static void octeon_irq_ciu2(void) | 1929 | static void octeon_irq_ciu2(void) |
| 1652 | { | 1930 | { |
| 1653 | int line; | 1931 | int line; |
| @@ -1715,16 +1993,16 @@ out: | |||
| 1715 | return; | 1993 | return; |
| 1716 | } | 1994 | } |
| 1717 | 1995 | ||
| 1718 | static void __init octeon_irq_init_ciu2(void) | 1996 | static int __init octeon_irq_init_ciu2( |
| 1997 | struct device_node *ciu_node, struct device_node *parent) | ||
| 1719 | { | 1998 | { |
| 1720 | unsigned int i; | 1999 | unsigned int i, r; |
| 1721 | struct device_node *gpio_node; | ||
| 1722 | struct device_node *ciu_node; | ||
| 1723 | struct irq_domain *ciu_domain = NULL; | 2000 | struct irq_domain *ciu_domain = NULL; |
| 1724 | 2001 | ||
| 1725 | octeon_irq_init_ciu2_percpu(); | 2002 | octeon_irq_init_ciu2_percpu(); |
| 1726 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; | 2003 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; |
| 1727 | 2004 | ||
| 2005 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio; | ||
| 1728 | octeon_irq_ip2 = octeon_irq_ciu2; | 2006 | octeon_irq_ip2 = octeon_irq_ciu2; |
| 1729 | octeon_irq_ip3 = octeon_irq_ciu2_mbox; | 2007 | octeon_irq_ip3 = octeon_irq_ciu2_mbox; |
| 1730 | octeon_irq_ip4 = octeon_irq_ip4_mask; | 2008 | octeon_irq_ip4 = octeon_irq_ip4_mask; |
| @@ -1732,47 +2010,49 @@ static void __init octeon_irq_init_ciu2(void) | |||
| 1732 | /* Mips internal */ | 2010 | /* Mips internal */ |
| 1733 | octeon_irq_init_core(); | 2011 | octeon_irq_init_core(); |
| 1734 | 2012 | ||
| 1735 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); | 2013 | ciu_domain = irq_domain_add_tree( |
| 1736 | if (gpio_node) { | 2014 | ciu_node, &octeon_irq_domain_ciu2_ops, NULL); |
| 1737 | struct octeon_irq_gpio_domain_data *gpiod; | 2015 | irq_set_default_host(ciu_domain); |
| 1738 | |||
| 1739 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | ||
| 1740 | if (gpiod) { | ||
| 1741 | /* gpio domain host_data is the base hwirq number. */ | ||
| 1742 | gpiod->base_hwirq = 7 << 6; | ||
| 1743 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); | ||
| 1744 | of_node_put(gpio_node); | ||
| 1745 | } else | ||
| 1746 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | ||
| 1747 | } else | ||
| 1748 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); | ||
| 1749 | |||
| 1750 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); | ||
| 1751 | if (ciu_node) { | ||
| 1752 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); | ||
| 1753 | irq_set_default_host(ciu_domain); | ||
| 1754 | of_node_put(ciu_node); | ||
| 1755 | } else | ||
| 1756 | panic("Cannot find device node for cavium,octeon-6880-ciu2."); | ||
| 1757 | 2016 | ||
| 1758 | /* CUI2 */ | 2017 | /* CUI2 */ |
| 1759 | for (i = 0; i < 64; i++) | 2018 | for (i = 0; i < 64; i++) { |
| 1760 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); | 2019 | r = octeon_irq_force_ciu_mapping( |
| 2020 | ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); | ||
| 2021 | if (r) | ||
| 2022 | goto err; | ||
| 2023 | } | ||
| 1761 | 2024 | ||
| 1762 | for (i = 0; i < 32; i++) | 2025 | for (i = 0; i < 32; i++) { |
| 1763 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, | 2026 | r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, |
| 1764 | &octeon_irq_chip_ciu2_wd, handle_level_irq); | 2027 | &octeon_irq_chip_ciu2_wd, handle_level_irq); |
| 2028 | if (r) | ||
| 2029 | goto err; | ||
| 2030 | } | ||
| 1765 | 2031 | ||
| 1766 | for (i = 0; i < 4; i++) | 2032 | for (i = 0; i < 4; i++) { |
| 1767 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); | 2033 | r = octeon_irq_force_ciu_mapping( |
| 2034 | ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); | ||
| 2035 | if (r) | ||
| 2036 | goto err; | ||
| 2037 | } | ||
| 1768 | 2038 | ||
| 1769 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); | 2039 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); |
| 2040 | if (r) | ||
| 2041 | goto err; | ||
| 1770 | 2042 | ||
| 1771 | for (i = 0; i < 4; i++) | 2043 | for (i = 0; i < 4; i++) { |
| 1772 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); | 2044 | r = octeon_irq_force_ciu_mapping( |
| 2045 | ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); | ||
| 2046 | if (r) | ||
| 2047 | goto err; | ||
| 2048 | } | ||
| 1773 | 2049 | ||
| 1774 | for (i = 0; i < 4; i++) | 2050 | for (i = 0; i < 4; i++) { |
| 1775 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); | 2051 | r = octeon_irq_force_ciu_mapping( |
| 2052 | ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); | ||
| 2053 | if (r) | ||
| 2054 | goto err; | ||
| 2055 | } | ||
| 1776 | 2056 | ||
| 1777 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 2057 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
| 1778 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 2058 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
| @@ -1782,8 +2062,242 @@ static void __init octeon_irq_init_ciu2(void) | |||
| 1782 | /* Enable the CIU lines */ | 2062 | /* Enable the CIU lines */ |
| 1783 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 2063 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
| 1784 | clear_c0_status(STATUSF_IP4); | 2064 | clear_c0_status(STATUSF_IP4); |
| 2065 | return 0; | ||
| 2066 | err: | ||
| 2067 | return r; | ||
| 2068 | } | ||
| 2069 | |||
| 2070 | struct octeon_irq_cib_host_data { | ||
| 2071 | raw_spinlock_t lock; | ||
| 2072 | u64 raw_reg; | ||
| 2073 | u64 en_reg; | ||
| 2074 | int max_bits; | ||
| 2075 | }; | ||
| 2076 | |||
| 2077 | struct octeon_irq_cib_chip_data { | ||
| 2078 | struct octeon_irq_cib_host_data *host_data; | ||
| 2079 | int bit; | ||
| 2080 | }; | ||
| 2081 | |||
| 2082 | static void octeon_irq_cib_enable(struct irq_data *data) | ||
| 2083 | { | ||
| 2084 | unsigned long flags; | ||
| 2085 | u64 en; | ||
| 2086 | struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); | ||
| 2087 | struct octeon_irq_cib_host_data *host_data = cd->host_data; | ||
| 2088 | |||
| 2089 | raw_spin_lock_irqsave(&host_data->lock, flags); | ||
| 2090 | en = cvmx_read_csr(host_data->en_reg); | ||
| 2091 | en |= 1ull << cd->bit; | ||
| 2092 | cvmx_write_csr(host_data->en_reg, en); | ||
| 2093 | raw_spin_unlock_irqrestore(&host_data->lock, flags); | ||
| 1785 | } | 2094 | } |
| 1786 | 2095 | ||
| 2096 | static void octeon_irq_cib_disable(struct irq_data *data) | ||
| 2097 | { | ||
| 2098 | unsigned long flags; | ||
| 2099 | u64 en; | ||
| 2100 | struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); | ||
| 2101 | struct octeon_irq_cib_host_data *host_data = cd->host_data; | ||
| 2102 | |||
| 2103 | raw_spin_lock_irqsave(&host_data->lock, flags); | ||
| 2104 | en = cvmx_read_csr(host_data->en_reg); | ||
| 2105 | en &= ~(1ull << cd->bit); | ||
| 2106 | cvmx_write_csr(host_data->en_reg, en); | ||
| 2107 | raw_spin_unlock_irqrestore(&host_data->lock, flags); | ||
| 2108 | } | ||
| 2109 | |||
| 2110 | static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t) | ||
| 2111 | { | ||
| 2112 | irqd_set_trigger_type(data, t); | ||
| 2113 | return IRQ_SET_MASK_OK; | ||
| 2114 | } | ||
| 2115 | |||
| 2116 | static struct irq_chip octeon_irq_chip_cib = { | ||
| 2117 | .name = "CIB", | ||
| 2118 | .irq_enable = octeon_irq_cib_enable, | ||
| 2119 | .irq_disable = octeon_irq_cib_disable, | ||
| 2120 | .irq_mask = octeon_irq_cib_disable, | ||
| 2121 | .irq_unmask = octeon_irq_cib_enable, | ||
| 2122 | .irq_set_type = octeon_irq_cib_set_type, | ||
| 2123 | }; | ||
| 2124 | |||
| 2125 | static int octeon_irq_cib_xlat(struct irq_domain *d, | ||
| 2126 | struct device_node *node, | ||
| 2127 | const u32 *intspec, | ||
| 2128 | unsigned int intsize, | ||
| 2129 | unsigned long *out_hwirq, | ||
| 2130 | unsigned int *out_type) | ||
| 2131 | { | ||
| 2132 | unsigned int type = 0; | ||
| 2133 | |||
| 2134 | if (intsize == 2) | ||
| 2135 | type = intspec[1]; | ||
| 2136 | |||
| 2137 | switch (type) { | ||
| 2138 | case 0: /* unofficial value, but we might as well let it work. */ | ||
| 2139 | case 4: /* official value for level triggering. */ | ||
| 2140 | *out_type = IRQ_TYPE_LEVEL_HIGH; | ||
| 2141 | break; | ||
| 2142 | case 1: /* official value for edge triggering. */ | ||
| 2143 | *out_type = IRQ_TYPE_EDGE_RISING; | ||
| 2144 | break; | ||
| 2145 | default: /* Nothing else is acceptable. */ | ||
| 2146 | return -EINVAL; | ||
| 2147 | } | ||
| 2148 | |||
| 2149 | *out_hwirq = intspec[0]; | ||
| 2150 | |||
| 2151 | return 0; | ||
| 2152 | } | ||
| 2153 | |||
| 2154 | static int octeon_irq_cib_map(struct irq_domain *d, | ||
| 2155 | unsigned int virq, irq_hw_number_t hw) | ||
| 2156 | { | ||
| 2157 | struct octeon_irq_cib_host_data *host_data = d->host_data; | ||
| 2158 | struct octeon_irq_cib_chip_data *cd; | ||
| 2159 | |||
| 2160 | if (hw >= host_data->max_bits) { | ||
| 2161 | pr_err("ERROR: %s mapping %u is to big!\n", | ||
| 2162 | d->of_node->name, (unsigned)hw); | ||
| 2163 | return -EINVAL; | ||
| 2164 | } | ||
| 2165 | |||
| 2166 | cd = kzalloc(sizeof(*cd), GFP_KERNEL); | ||
| 2167 | cd->host_data = host_data; | ||
| 2168 | cd->bit = hw; | ||
| 2169 | |||
| 2170 | irq_set_chip_and_handler(virq, &octeon_irq_chip_cib, | ||
| 2171 | handle_simple_irq); | ||
| 2172 | irq_set_chip_data(virq, cd); | ||
| 2173 | return 0; | ||
| 2174 | } | ||
| 2175 | |||
| 2176 | static struct irq_domain_ops octeon_irq_domain_cib_ops = { | ||
| 2177 | .map = octeon_irq_cib_map, | ||
| 2178 | .unmap = octeon_irq_free_cd, | ||
| 2179 | .xlate = octeon_irq_cib_xlat, | ||
| 2180 | }; | ||
| 2181 | |||
| 2182 | /* Chain to real handler. */ | ||
| 2183 | static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) | ||
| 2184 | { | ||
| 2185 | u64 en; | ||
| 2186 | u64 raw; | ||
| 2187 | u64 bits; | ||
| 2188 | int i; | ||
| 2189 | int irq; | ||
| 2190 | struct irq_domain *cib_domain = data; | ||
| 2191 | struct octeon_irq_cib_host_data *host_data = cib_domain->host_data; | ||
| 2192 | |||
| 2193 | en = cvmx_read_csr(host_data->en_reg); | ||
| 2194 | raw = cvmx_read_csr(host_data->raw_reg); | ||
| 2195 | |||
| 2196 | bits = en & raw; | ||
| 2197 | |||
| 2198 | for (i = 0; i < host_data->max_bits; i++) { | ||
| 2199 | if ((bits & 1ull << i) == 0) | ||
| 2200 | continue; | ||
| 2201 | irq = irq_find_mapping(cib_domain, i); | ||
| 2202 | if (!irq) { | ||
| 2203 | unsigned long flags; | ||
| 2204 | |||
| 2205 | pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n", | ||
| 2206 | i, host_data->raw_reg); | ||
| 2207 | raw_spin_lock_irqsave(&host_data->lock, flags); | ||
| 2208 | en = cvmx_read_csr(host_data->en_reg); | ||
| 2209 | en &= ~(1ull << i); | ||
| 2210 | cvmx_write_csr(host_data->en_reg, en); | ||
| 2211 | cvmx_write_csr(host_data->raw_reg, 1ull << i); | ||
| 2212 | raw_spin_unlock_irqrestore(&host_data->lock, flags); | ||
| 2213 | } else { | ||
| 2214 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 2215 | struct irq_data *irq_data = irq_desc_get_irq_data(desc); | ||
| 2216 | /* If edge, acknowledge the bit we will be sending. */ | ||
| 2217 | if (irqd_get_trigger_type(irq_data) & | ||
| 2218 | IRQ_TYPE_EDGE_BOTH) | ||
| 2219 | cvmx_write_csr(host_data->raw_reg, 1ull << i); | ||
| 2220 | generic_handle_irq_desc(irq, desc); | ||
| 2221 | } | ||
| 2222 | } | ||
| 2223 | |||
| 2224 | return IRQ_HANDLED; | ||
| 2225 | } | ||
| 2226 | |||
| 2227 | static int __init octeon_irq_init_cib(struct device_node *ciu_node, | ||
| 2228 | struct device_node *parent) | ||
| 2229 | { | ||
| 2230 | const __be32 *addr; | ||
| 2231 | u32 val; | ||
| 2232 | struct octeon_irq_cib_host_data *host_data; | ||
| 2233 | int parent_irq; | ||
| 2234 | int r; | ||
| 2235 | struct irq_domain *cib_domain; | ||
| 2236 | |||
| 2237 | parent_irq = irq_of_parse_and_map(ciu_node, 0); | ||
| 2238 | if (!parent_irq) { | ||
| 2239 | pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", | ||
| 2240 | ciu_node->name); | ||
| 2241 | return -EINVAL; | ||
| 2242 | } | ||
| 2243 | |||
| 2244 | host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); | ||
| 2245 | raw_spin_lock_init(&host_data->lock); | ||
| 2246 | |||
| 2247 | addr = of_get_address(ciu_node, 0, NULL, NULL); | ||
| 2248 | if (!addr) { | ||
| 2249 | pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); | ||
| 2250 | return -EINVAL; | ||
| 2251 | } | ||
| 2252 | host_data->raw_reg = (u64)phys_to_virt( | ||
| 2253 | of_translate_address(ciu_node, addr)); | ||
| 2254 | |||
| 2255 | addr = of_get_address(ciu_node, 1, NULL, NULL); | ||
| 2256 | if (!addr) { | ||
| 2257 | pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); | ||
| 2258 | return -EINVAL; | ||
| 2259 | } | ||
| 2260 | host_data->en_reg = (u64)phys_to_virt( | ||
| 2261 | of_translate_address(ciu_node, addr)); | ||
| 2262 | |||
| 2263 | r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); | ||
| 2264 | if (r) { | ||
| 2265 | pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", | ||
| 2266 | ciu_node->name); | ||
| 2267 | return r; | ||
| 2268 | } | ||
| 2269 | host_data->max_bits = val; | ||
| 2270 | |||
| 2271 | cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits, | ||
| 2272 | &octeon_irq_domain_cib_ops, | ||
| 2273 | host_data); | ||
| 2274 | if (!cib_domain) { | ||
| 2275 | pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); | ||
| 2276 | return -ENOMEM; | ||
| 2277 | } | ||
| 2278 | |||
| 2279 | cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */ | ||
| 2280 | cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */ | ||
| 2281 | |||
| 2282 | r = request_irq(parent_irq, octeon_irq_cib_handler, | ||
| 2283 | IRQF_NO_THREAD, "cib", cib_domain); | ||
| 2284 | if (r) { | ||
| 2285 | pr_err("request_irq cib failed %d\n", r); | ||
| 2286 | return r; | ||
| 2287 | } | ||
| 2288 | pr_info("CIB interrupt controller probed: %llx %d\n", | ||
| 2289 | host_data->raw_reg, host_data->max_bits); | ||
| 2290 | return 0; | ||
| 2291 | } | ||
| 2292 | |||
| 2293 | static struct of_device_id ciu_types[] __initdata = { | ||
| 2294 | {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu}, | ||
| 2295 | {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio}, | ||
| 2296 | {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2}, | ||
| 2297 | {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib}, | ||
| 2298 | {} | ||
| 2299 | }; | ||
| 2300 | |||
| 1787 | void __init arch_init_irq(void) | 2301 | void __init arch_init_irq(void) |
| 1788 | { | 2302 | { |
| 1789 | #ifdef CONFIG_SMP | 2303 | #ifdef CONFIG_SMP |
| @@ -1791,10 +2305,7 @@ void __init arch_init_irq(void) | |||
| 1791 | cpumask_clear(irq_default_affinity); | 2305 | cpumask_clear(irq_default_affinity); |
| 1792 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | 2306 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
| 1793 | #endif | 2307 | #endif |
| 1794 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | 2308 | of_irq_init(ciu_types); |
| 1795 | octeon_irq_init_ciu2(); | ||
| 1796 | else | ||
| 1797 | octeon_irq_init_ciu(); | ||
| 1798 | } | 2309 | } |
| 1799 | 2310 | ||
| 1800 | asmlinkage void plat_irq_dispatch(void) | 2311 | asmlinkage void plat_irq_dispatch(void) |
| @@ -1808,13 +2319,13 @@ asmlinkage void plat_irq_dispatch(void) | |||
| 1808 | cop0_cause &= cop0_status; | 2319 | cop0_cause &= cop0_status; |
| 1809 | cop0_cause &= ST0_IM; | 2320 | cop0_cause &= ST0_IM; |
| 1810 | 2321 | ||
| 1811 | if (unlikely(cop0_cause & STATUSF_IP2)) | 2322 | if (cop0_cause & STATUSF_IP2) |
| 1812 | octeon_irq_ip2(); | 2323 | octeon_irq_ip2(); |
| 1813 | else if (unlikely(cop0_cause & STATUSF_IP3)) | 2324 | else if (cop0_cause & STATUSF_IP3) |
| 1814 | octeon_irq_ip3(); | 2325 | octeon_irq_ip3(); |
| 1815 | else if (unlikely(cop0_cause & STATUSF_IP4)) | 2326 | else if (cop0_cause & STATUSF_IP4) |
| 1816 | octeon_irq_ip4(); | 2327 | octeon_irq_ip4(); |
| 1817 | else if (likely(cop0_cause)) | 2328 | else if (cop0_cause) |
| 1818 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); | 2329 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); |
| 1819 | else | 2330 | else |
| 1820 | break; | 2331 | break; |
