diff options
author | David S. Miller <davem@davemloft.net> | 2015-03-03 21:16:48 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-03 21:16:48 -0500 |
commit | 71a83a6db6138b9d41d8a0b6b91cb59f6dc4742c (patch) | |
tree | f74b6e4e48257ec6ce40b95645ecb8533b9cc1f8 /arch/mips/cavium-octeon | |
parent | b97526f3ff95f92b107f0fb52cbb8627e395429b (diff) | |
parent | a6c5170d1edea97c538c81e377e56c7b5c5b7e63 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/rocker/rocker.c
The rocker commit was two overlapping changes, one to rename
the ->vport member to ->pport, and another making the bitmask
expression use '1ULL' instead of plain '1'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/mips/cavium-octeon')
-rw-r--r-- | arch/mips/cavium-octeon/csrc-octeon.c | 11 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/dma-octeon.c | 4 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/executive/cvmx-helper-board.c | 2 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/octeon-irq.c | 1094 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/setup.c | 56 |
5 files changed, 885 insertions, 282 deletions
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c index b752c4ed0b79..1882e6475dd0 100644 --- a/arch/mips/cavium-octeon/csrc-octeon.c +++ b/arch/mips/cavium-octeon/csrc-octeon.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <asm/octeon/octeon.h> | 18 | #include <asm/octeon/octeon.h> |
19 | #include <asm/octeon/cvmx-ipd-defs.h> | 19 | #include <asm/octeon/cvmx-ipd-defs.h> |
20 | #include <asm/octeon/cvmx-mio-defs.h> | 20 | #include <asm/octeon/cvmx-mio-defs.h> |
21 | 21 | #include <asm/octeon/cvmx-rst-defs.h> | |
22 | 22 | ||
23 | static u64 f; | 23 | static u64 f; |
24 | static u64 rdiv; | 24 | static u64 rdiv; |
@@ -39,11 +39,20 @@ void __init octeon_setup_delays(void) | |||
39 | 39 | ||
40 | if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { | 40 | if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { |
41 | union cvmx_mio_rst_boot rst_boot; | 41 | union cvmx_mio_rst_boot rst_boot; |
42 | |||
42 | rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); | 43 | rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); |
43 | rdiv = rst_boot.s.c_mul; /* CPU clock */ | 44 | rdiv = rst_boot.s.c_mul; /* CPU clock */ |
44 | sdiv = rst_boot.s.pnr_mul; /* I/O clock */ | 45 | sdiv = rst_boot.s.pnr_mul; /* I/O clock */ |
45 | f = (0x8000000000000000ull / sdiv) * 2; | 46 | f = (0x8000000000000000ull / sdiv) * 2; |
47 | } else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) { | ||
48 | union cvmx_rst_boot rst_boot; | ||
49 | |||
50 | rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); | ||
51 | rdiv = rst_boot.s.c_mul; /* CPU clock */ | ||
52 | sdiv = rst_boot.s.pnr_mul; /* I/O clock */ | ||
53 | f = (0x8000000000000000ull / sdiv) * 2; | ||
46 | } | 54 | } |
55 | |||
47 | } | 56 | } |
48 | 57 | ||
49 | /* | 58 | /* |
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 3778655c4a37..7d8987818ccf 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c | |||
@@ -276,7 +276,7 @@ void __init plat_swiotlb_setup(void) | |||
276 | continue; | 276 | continue; |
277 | 277 | ||
278 | /* These addresses map low for PCI. */ | 278 | /* These addresses map low for PCI. */ |
279 | if (e->addr > 0x410000000ull && !OCTEON_IS_MODEL(OCTEON_CN6XXX)) | 279 | if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2()) |
280 | continue; | 280 | continue; |
281 | 281 | ||
282 | addr_size += e->size; | 282 | addr_size += e->size; |
@@ -308,7 +308,7 @@ void __init plat_swiotlb_setup(void) | |||
308 | #endif | 308 | #endif |
309 | #ifdef CONFIG_USB_OCTEON_OHCI | 309 | #ifdef CONFIG_USB_OCTEON_OHCI |
310 | /* OCTEON II ohci is only 32-bit. */ | 310 | /* OCTEON II ohci is only 32-bit. */ |
311 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul) | 311 | if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul) |
312 | swiotlbsize = 64 * (1<<20); | 312 | swiotlbsize = 64 * (1<<20); |
313 | #endif | 313 | #endif |
314 | swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT; | 314 | swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT; |
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index 5dfef84b9576..9eb0feef4417 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c | |||
@@ -767,7 +767,7 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo | |||
767 | break; | 767 | break; |
768 | } | 768 | } |
769 | /* Most boards except NIC10e use a 12MHz crystal */ | 769 | /* Most boards except NIC10e use a 12MHz crystal */ |
770 | if (OCTEON_IS_MODEL(OCTEON_FAM_2)) | 770 | if (OCTEON_IS_OCTEON2()) |
771 | return USB_CLOCK_TYPE_CRYSTAL_12; | 771 | return USB_CLOCK_TYPE_CRYSTAL_12; |
772 | return USB_CLOCK_TYPE_REF_48; | 772 | return USB_CLOCK_TYPE_REF_48; |
773 | } | 773 | } |
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 2bc4aa95944e..10f762557b92 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -3,12 +3,14 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2004-2012 Cavium, Inc. | 6 | * Copyright (C) 2004-2014 Cavium, Inc. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/of_address.h> | ||
9 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
10 | #include <linux/irqdomain.h> | 11 | #include <linux/irqdomain.h> |
11 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
13 | #include <linux/of_irq.h> | ||
12 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
13 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
14 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
@@ -22,16 +24,25 @@ static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); | |||
22 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); | 24 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); |
23 | static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); | 25 | static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); |
24 | 26 | ||
27 | struct octeon_irq_ciu_domain_data { | ||
28 | int num_sum; /* number of sum registers (2 or 3). */ | ||
29 | }; | ||
30 | |||
25 | static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; | 31 | static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; |
26 | 32 | ||
27 | union octeon_ciu_chip_data { | 33 | struct octeon_ciu_chip_data { |
28 | void *p; | 34 | union { |
29 | unsigned long l; | 35 | struct { /* only used for ciu3 */ |
30 | struct { | 36 | u64 ciu3_addr; |
31 | unsigned long line:6; | 37 | unsigned int intsn; |
32 | unsigned long bit:6; | 38 | }; |
33 | unsigned long gpio_line:6; | 39 | struct { /* only used for ciu/ciu2 */ |
34 | } s; | 40 | u8 line; |
41 | u8 bit; | ||
42 | u8 gpio_line; | ||
43 | }; | ||
44 | }; | ||
45 | int current_cpu; /* Next CPU expected to take this irq */ | ||
35 | }; | 46 | }; |
36 | 47 | ||
37 | struct octeon_core_chip_data { | 48 | struct octeon_core_chip_data { |
@@ -45,27 +56,40 @@ struct octeon_core_chip_data { | |||
45 | 56 | ||
46 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; | 57 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; |
47 | 58 | ||
48 | static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, | 59 | static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, |
49 | struct irq_chip *chip, | 60 | struct irq_chip *chip, |
50 | irq_flow_handler_t handler) | 61 | irq_flow_handler_t handler) |
51 | { | 62 | { |
52 | union octeon_ciu_chip_data cd; | 63 | struct octeon_ciu_chip_data *cd; |
64 | |||
65 | cd = kzalloc(sizeof(*cd), GFP_KERNEL); | ||
66 | if (!cd) | ||
67 | return -ENOMEM; | ||
53 | 68 | ||
54 | irq_set_chip_and_handler(irq, chip, handler); | 69 | irq_set_chip_and_handler(irq, chip, handler); |
55 | 70 | ||
56 | cd.l = 0; | 71 | cd->line = line; |
57 | cd.s.line = line; | 72 | cd->bit = bit; |
58 | cd.s.bit = bit; | 73 | cd->gpio_line = gpio_line; |
59 | cd.s.gpio_line = gpio_line; | ||
60 | 74 | ||
61 | irq_set_chip_data(irq, cd.p); | 75 | irq_set_chip_data(irq, cd); |
62 | octeon_irq_ciu_to_irq[line][bit] = irq; | 76 | octeon_irq_ciu_to_irq[line][bit] = irq; |
77 | return 0; | ||
63 | } | 78 | } |
64 | 79 | ||
65 | static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, | 80 | static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) |
66 | int irq, int line, int bit) | ||
67 | { | 81 | { |
68 | irq_domain_associate(domain, irq, line << 6 | bit); | 82 | struct irq_data *data = irq_get_irq_data(irq); |
83 | struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); | ||
84 | |||
85 | irq_set_chip_data(irq, NULL); | ||
86 | kfree(cd); | ||
87 | } | ||
88 | |||
89 | static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, | ||
90 | int irq, int line, int bit) | ||
91 | { | ||
92 | return irq_domain_associate(domain, irq, line << 6 | bit); | ||
69 | } | 93 | } |
70 | 94 | ||
71 | static int octeon_coreid_for_cpu(int cpu) | 95 | static int octeon_coreid_for_cpu(int cpu) |
@@ -202,9 +226,10 @@ static int next_cpu_for_irq(struct irq_data *data) | |||
202 | #ifdef CONFIG_SMP | 226 | #ifdef CONFIG_SMP |
203 | int cpu; | 227 | int cpu; |
204 | int weight = cpumask_weight(data->affinity); | 228 | int weight = cpumask_weight(data->affinity); |
229 | struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); | ||
205 | 230 | ||
206 | if (weight > 1) { | 231 | if (weight > 1) { |
207 | cpu = smp_processor_id(); | 232 | cpu = cd->current_cpu; |
208 | for (;;) { | 233 | for (;;) { |
209 | cpu = cpumask_next(cpu, data->affinity); | 234 | cpu = cpumask_next(cpu, data->affinity); |
210 | if (cpu >= nr_cpu_ids) { | 235 | if (cpu >= nr_cpu_ids) { |
@@ -219,6 +244,7 @@ static int next_cpu_for_irq(struct irq_data *data) | |||
219 | } else { | 244 | } else { |
220 | cpu = smp_processor_id(); | 245 | cpu = smp_processor_id(); |
221 | } | 246 | } |
247 | cd->current_cpu = cpu; | ||
222 | return cpu; | 248 | return cpu; |
223 | #else | 249 | #else |
224 | return smp_processor_id(); | 250 | return smp_processor_id(); |
@@ -231,15 +257,15 @@ static void octeon_irq_ciu_enable(struct irq_data *data) | |||
231 | int coreid = octeon_coreid_for_cpu(cpu); | 257 | int coreid = octeon_coreid_for_cpu(cpu); |
232 | unsigned long *pen; | 258 | unsigned long *pen; |
233 | unsigned long flags; | 259 | unsigned long flags; |
234 | union octeon_ciu_chip_data cd; | 260 | struct octeon_ciu_chip_data *cd; |
235 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 261 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
236 | 262 | ||
237 | cd.p = irq_data_get_irq_chip_data(data); | 263 | cd = irq_data_get_irq_chip_data(data); |
238 | 264 | ||
239 | raw_spin_lock_irqsave(lock, flags); | 265 | raw_spin_lock_irqsave(lock, flags); |
240 | if (cd.s.line == 0) { | 266 | if (cd->line == 0) { |
241 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 267 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
242 | __set_bit(cd.s.bit, pen); | 268 | __set_bit(cd->bit, pen); |
243 | /* | 269 | /* |
244 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 270 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
245 | * enabling the irq. | 271 | * enabling the irq. |
@@ -248,7 +274,7 @@ static void octeon_irq_ciu_enable(struct irq_data *data) | |||
248 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 274 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
249 | } else { | 275 | } else { |
250 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 276 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
251 | __set_bit(cd.s.bit, pen); | 277 | __set_bit(cd->bit, pen); |
252 | /* | 278 | /* |
253 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 279 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
254 | * enabling the irq. | 280 | * enabling the irq. |
@@ -263,15 +289,15 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) | |||
263 | { | 289 | { |
264 | unsigned long *pen; | 290 | unsigned long *pen; |
265 | unsigned long flags; | 291 | unsigned long flags; |
266 | union octeon_ciu_chip_data cd; | 292 | struct octeon_ciu_chip_data *cd; |
267 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); | 293 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); |
268 | 294 | ||
269 | cd.p = irq_data_get_irq_chip_data(data); | 295 | cd = irq_data_get_irq_chip_data(data); |
270 | 296 | ||
271 | raw_spin_lock_irqsave(lock, flags); | 297 | raw_spin_lock_irqsave(lock, flags); |
272 | if (cd.s.line == 0) { | 298 | if (cd->line == 0) { |
273 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); | 299 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); |
274 | __set_bit(cd.s.bit, pen); | 300 | __set_bit(cd->bit, pen); |
275 | /* | 301 | /* |
276 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 302 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
277 | * enabling the irq. | 303 | * enabling the irq. |
@@ -280,7 +306,7 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) | |||
280 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | 306 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
281 | } else { | 307 | } else { |
282 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); | 308 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); |
283 | __set_bit(cd.s.bit, pen); | 309 | __set_bit(cd->bit, pen); |
284 | /* | 310 | /* |
285 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 311 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
286 | * enabling the irq. | 312 | * enabling the irq. |
@@ -295,15 +321,15 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) | |||
295 | { | 321 | { |
296 | unsigned long *pen; | 322 | unsigned long *pen; |
297 | unsigned long flags; | 323 | unsigned long flags; |
298 | union octeon_ciu_chip_data cd; | 324 | struct octeon_ciu_chip_data *cd; |
299 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); | 325 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); |
300 | 326 | ||
301 | cd.p = irq_data_get_irq_chip_data(data); | 327 | cd = irq_data_get_irq_chip_data(data); |
302 | 328 | ||
303 | raw_spin_lock_irqsave(lock, flags); | 329 | raw_spin_lock_irqsave(lock, flags); |
304 | if (cd.s.line == 0) { | 330 | if (cd->line == 0) { |
305 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); | 331 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); |
306 | __clear_bit(cd.s.bit, pen); | 332 | __clear_bit(cd->bit, pen); |
307 | /* | 333 | /* |
308 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 334 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
309 | * enabling the irq. | 335 | * enabling the irq. |
@@ -312,7 +338,7 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) | |||
312 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | 338 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
313 | } else { | 339 | } else { |
314 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); | 340 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); |
315 | __clear_bit(cd.s.bit, pen); | 341 | __clear_bit(cd->bit, pen); |
316 | /* | 342 | /* |
317 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 343 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
318 | * enabling the irq. | 344 | * enabling the irq. |
@@ -328,27 +354,27 @@ static void octeon_irq_ciu_disable_all(struct irq_data *data) | |||
328 | unsigned long flags; | 354 | unsigned long flags; |
329 | unsigned long *pen; | 355 | unsigned long *pen; |
330 | int cpu; | 356 | int cpu; |
331 | union octeon_ciu_chip_data cd; | 357 | struct octeon_ciu_chip_data *cd; |
332 | raw_spinlock_t *lock; | 358 | raw_spinlock_t *lock; |
333 | 359 | ||
334 | cd.p = irq_data_get_irq_chip_data(data); | 360 | cd = irq_data_get_irq_chip_data(data); |
335 | 361 | ||
336 | for_each_online_cpu(cpu) { | 362 | for_each_online_cpu(cpu) { |
337 | int coreid = octeon_coreid_for_cpu(cpu); | 363 | int coreid = octeon_coreid_for_cpu(cpu); |
338 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 364 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
339 | if (cd.s.line == 0) | 365 | if (cd->line == 0) |
340 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 366 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
341 | else | 367 | else |
342 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 368 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
343 | 369 | ||
344 | raw_spin_lock_irqsave(lock, flags); | 370 | raw_spin_lock_irqsave(lock, flags); |
345 | __clear_bit(cd.s.bit, pen); | 371 | __clear_bit(cd->bit, pen); |
346 | /* | 372 | /* |
347 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 373 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
348 | * enabling the irq. | 374 | * enabling the irq. |
349 | */ | 375 | */ |
350 | wmb(); | 376 | wmb(); |
351 | if (cd.s.line == 0) | 377 | if (cd->line == 0) |
352 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 378 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
353 | else | 379 | else |
354 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 380 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
@@ -361,27 +387,27 @@ static void octeon_irq_ciu_enable_all(struct irq_data *data) | |||
361 | unsigned long flags; | 387 | unsigned long flags; |
362 | unsigned long *pen; | 388 | unsigned long *pen; |
363 | int cpu; | 389 | int cpu; |
364 | union octeon_ciu_chip_data cd; | 390 | struct octeon_ciu_chip_data *cd; |
365 | raw_spinlock_t *lock; | 391 | raw_spinlock_t *lock; |
366 | 392 | ||
367 | cd.p = irq_data_get_irq_chip_data(data); | 393 | cd = irq_data_get_irq_chip_data(data); |
368 | 394 | ||
369 | for_each_online_cpu(cpu) { | 395 | for_each_online_cpu(cpu) { |
370 | int coreid = octeon_coreid_for_cpu(cpu); | 396 | int coreid = octeon_coreid_for_cpu(cpu); |
371 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 397 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
372 | if (cd.s.line == 0) | 398 | if (cd->line == 0) |
373 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 399 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
374 | else | 400 | else |
375 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 401 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
376 | 402 | ||
377 | raw_spin_lock_irqsave(lock, flags); | 403 | raw_spin_lock_irqsave(lock, flags); |
378 | __set_bit(cd.s.bit, pen); | 404 | __set_bit(cd->bit, pen); |
379 | /* | 405 | /* |
380 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 406 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
381 | * enabling the irq. | 407 | * enabling the irq. |
382 | */ | 408 | */ |
383 | wmb(); | 409 | wmb(); |
384 | if (cd.s.line == 0) | 410 | if (cd->line == 0) |
385 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 411 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
386 | else | 412 | else |
387 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 413 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
@@ -397,45 +423,106 @@ static void octeon_irq_ciu_enable_v2(struct irq_data *data) | |||
397 | { | 423 | { |
398 | u64 mask; | 424 | u64 mask; |
399 | int cpu = next_cpu_for_irq(data); | 425 | int cpu = next_cpu_for_irq(data); |
400 | union octeon_ciu_chip_data cd; | 426 | struct octeon_ciu_chip_data *cd; |
401 | 427 | ||
402 | cd.p = irq_data_get_irq_chip_data(data); | 428 | cd = irq_data_get_irq_chip_data(data); |
403 | mask = 1ull << (cd.s.bit); | 429 | mask = 1ull << (cd->bit); |
404 | 430 | ||
405 | /* | 431 | /* |
406 | * Called under the desc lock, so these should never get out | 432 | * Called under the desc lock, so these should never get out |
407 | * of sync. | 433 | * of sync. |
408 | */ | 434 | */ |
409 | if (cd.s.line == 0) { | 435 | if (cd->line == 0) { |
410 | int index = octeon_coreid_for_cpu(cpu) * 2; | 436 | int index = octeon_coreid_for_cpu(cpu) * 2; |
411 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 437 | set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); |
412 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 438 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
413 | } else { | 439 | } else { |
414 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 440 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
415 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 441 | set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
416 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 442 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
417 | } | 443 | } |
418 | } | 444 | } |
419 | 445 | ||
420 | /* | 446 | /* |
447 | * Enable the irq in the sum2 registers. | ||
448 | */ | ||
449 | static void octeon_irq_ciu_enable_sum2(struct irq_data *data) | ||
450 | { | ||
451 | u64 mask; | ||
452 | int cpu = next_cpu_for_irq(data); | ||
453 | int index = octeon_coreid_for_cpu(cpu); | ||
454 | struct octeon_ciu_chip_data *cd; | ||
455 | |||
456 | cd = irq_data_get_irq_chip_data(data); | ||
457 | mask = 1ull << (cd->bit); | ||
458 | |||
459 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Disable the irq in the sum2 registers. | ||
464 | */ | ||
465 | static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data) | ||
466 | { | ||
467 | u64 mask; | ||
468 | int cpu = next_cpu_for_irq(data); | ||
469 | int index = octeon_coreid_for_cpu(cpu); | ||
470 | struct octeon_ciu_chip_data *cd; | ||
471 | |||
472 | cd = irq_data_get_irq_chip_data(data); | ||
473 | mask = 1ull << (cd->bit); | ||
474 | |||
475 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); | ||
476 | } | ||
477 | |||
478 | static void octeon_irq_ciu_ack_sum2(struct irq_data *data) | ||
479 | { | ||
480 | u64 mask; | ||
481 | int cpu = next_cpu_for_irq(data); | ||
482 | int index = octeon_coreid_for_cpu(cpu); | ||
483 | struct octeon_ciu_chip_data *cd; | ||
484 | |||
485 | cd = irq_data_get_irq_chip_data(data); | ||
486 | mask = 1ull << (cd->bit); | ||
487 | |||
488 | cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask); | ||
489 | } | ||
490 | |||
491 | static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data) | ||
492 | { | ||
493 | int cpu; | ||
494 | struct octeon_ciu_chip_data *cd; | ||
495 | u64 mask; | ||
496 | |||
497 | cd = irq_data_get_irq_chip_data(data); | ||
498 | mask = 1ull << (cd->bit); | ||
499 | |||
500 | for_each_online_cpu(cpu) { | ||
501 | int coreid = octeon_coreid_for_cpu(cpu); | ||
502 | |||
503 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask); | ||
504 | } | ||
505 | } | ||
506 | |||
507 | /* | ||
421 | * Enable the irq on the current CPU for chips that | 508 | * Enable the irq on the current CPU for chips that |
422 | * have the EN*_W1{S,C} registers. | 509 | * have the EN*_W1{S,C} registers. |
423 | */ | 510 | */ |
424 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) | 511 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) |
425 | { | 512 | { |
426 | u64 mask; | 513 | u64 mask; |
427 | union octeon_ciu_chip_data cd; | 514 | struct octeon_ciu_chip_data *cd; |
428 | 515 | ||
429 | cd.p = irq_data_get_irq_chip_data(data); | 516 | cd = irq_data_get_irq_chip_data(data); |
430 | mask = 1ull << (cd.s.bit); | 517 | mask = 1ull << (cd->bit); |
431 | 518 | ||
432 | if (cd.s.line == 0) { | 519 | if (cd->line == 0) { |
433 | int index = cvmx_get_core_num() * 2; | 520 | int index = cvmx_get_core_num() * 2; |
434 | set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); | 521 | set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); |
435 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 522 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
436 | } else { | 523 | } else { |
437 | int index = cvmx_get_core_num() * 2 + 1; | 524 | int index = cvmx_get_core_num() * 2 + 1; |
438 | set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); | 525 | set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); |
439 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 526 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
440 | } | 527 | } |
441 | } | 528 | } |
@@ -443,18 +530,18 @@ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) | |||
443 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) | 530 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) |
444 | { | 531 | { |
445 | u64 mask; | 532 | u64 mask; |
446 | union octeon_ciu_chip_data cd; | 533 | struct octeon_ciu_chip_data *cd; |
447 | 534 | ||
448 | cd.p = irq_data_get_irq_chip_data(data); | 535 | cd = irq_data_get_irq_chip_data(data); |
449 | mask = 1ull << (cd.s.bit); | 536 | mask = 1ull << (cd->bit); |
450 | 537 | ||
451 | if (cd.s.line == 0) { | 538 | if (cd->line == 0) { |
452 | int index = cvmx_get_core_num() * 2; | 539 | int index = cvmx_get_core_num() * 2; |
453 | clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); | 540 | clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); |
454 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 541 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
455 | } else { | 542 | } else { |
456 | int index = cvmx_get_core_num() * 2 + 1; | 543 | int index = cvmx_get_core_num() * 2 + 1; |
457 | clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); | 544 | clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); |
458 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 545 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
459 | } | 546 | } |
460 | } | 547 | } |
@@ -465,12 +552,12 @@ static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) | |||
465 | static void octeon_irq_ciu_ack(struct irq_data *data) | 552 | static void octeon_irq_ciu_ack(struct irq_data *data) |
466 | { | 553 | { |
467 | u64 mask; | 554 | u64 mask; |
468 | union octeon_ciu_chip_data cd; | 555 | struct octeon_ciu_chip_data *cd; |
469 | 556 | ||
470 | cd.p = irq_data_get_irq_chip_data(data); | 557 | cd = irq_data_get_irq_chip_data(data); |
471 | mask = 1ull << (cd.s.bit); | 558 | mask = 1ull << (cd->bit); |
472 | 559 | ||
473 | if (cd.s.line == 0) { | 560 | if (cd->line == 0) { |
474 | int index = cvmx_get_core_num() * 2; | 561 | int index = cvmx_get_core_num() * 2; |
475 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | 562 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); |
476 | } else { | 563 | } else { |
@@ -486,21 +573,23 @@ static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) | |||
486 | { | 573 | { |
487 | int cpu; | 574 | int cpu; |
488 | u64 mask; | 575 | u64 mask; |
489 | union octeon_ciu_chip_data cd; | 576 | struct octeon_ciu_chip_data *cd; |
490 | 577 | ||
491 | cd.p = irq_data_get_irq_chip_data(data); | 578 | cd = irq_data_get_irq_chip_data(data); |
492 | mask = 1ull << (cd.s.bit); | 579 | mask = 1ull << (cd->bit); |
493 | 580 | ||
494 | if (cd.s.line == 0) { | 581 | if (cd->line == 0) { |
495 | for_each_online_cpu(cpu) { | 582 | for_each_online_cpu(cpu) { |
496 | int index = octeon_coreid_for_cpu(cpu) * 2; | 583 | int index = octeon_coreid_for_cpu(cpu) * 2; |
497 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 584 | clear_bit(cd->bit, |
585 | &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | ||
498 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 586 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
499 | } | 587 | } |
500 | } else { | 588 | } else { |
501 | for_each_online_cpu(cpu) { | 589 | for_each_online_cpu(cpu) { |
502 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 590 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
503 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 591 | clear_bit(cd->bit, |
592 | &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | ||
504 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 593 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
505 | } | 594 | } |
506 | } | 595 | } |
@@ -514,21 +603,23 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) | |||
514 | { | 603 | { |
515 | int cpu; | 604 | int cpu; |
516 | u64 mask; | 605 | u64 mask; |
517 | union octeon_ciu_chip_data cd; | 606 | struct octeon_ciu_chip_data *cd; |
518 | 607 | ||
519 | cd.p = irq_data_get_irq_chip_data(data); | 608 | cd = irq_data_get_irq_chip_data(data); |
520 | mask = 1ull << (cd.s.bit); | 609 | mask = 1ull << (cd->bit); |
521 | 610 | ||
522 | if (cd.s.line == 0) { | 611 | if (cd->line == 0) { |
523 | for_each_online_cpu(cpu) { | 612 | for_each_online_cpu(cpu) { |
524 | int index = octeon_coreid_for_cpu(cpu) * 2; | 613 | int index = octeon_coreid_for_cpu(cpu) * 2; |
525 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 614 | set_bit(cd->bit, |
615 | &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | ||
526 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 616 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
527 | } | 617 | } |
528 | } else { | 618 | } else { |
529 | for_each_online_cpu(cpu) { | 619 | for_each_online_cpu(cpu) { |
530 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 620 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
531 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 621 | set_bit(cd->bit, |
622 | &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | ||
532 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 623 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
533 | } | 624 | } |
534 | } | 625 | } |
@@ -537,10 +628,10 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) | |||
537 | static void octeon_irq_gpio_setup(struct irq_data *data) | 628 | static void octeon_irq_gpio_setup(struct irq_data *data) |
538 | { | 629 | { |
539 | union cvmx_gpio_bit_cfgx cfg; | 630 | union cvmx_gpio_bit_cfgx cfg; |
540 | union octeon_ciu_chip_data cd; | 631 | struct octeon_ciu_chip_data *cd; |
541 | u32 t = irqd_get_trigger_type(data); | 632 | u32 t = irqd_get_trigger_type(data); |
542 | 633 | ||
543 | cd.p = irq_data_get_irq_chip_data(data); | 634 | cd = irq_data_get_irq_chip_data(data); |
544 | 635 | ||
545 | cfg.u64 = 0; | 636 | cfg.u64 = 0; |
546 | cfg.s.int_en = 1; | 637 | cfg.s.int_en = 1; |
@@ -551,7 +642,7 @@ static void octeon_irq_gpio_setup(struct irq_data *data) | |||
551 | cfg.s.fil_cnt = 7; | 642 | cfg.s.fil_cnt = 7; |
552 | cfg.s.fil_sel = 3; | 643 | cfg.s.fil_sel = 3; |
553 | 644 | ||
554 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); | 645 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64); |
555 | } | 646 | } |
556 | 647 | ||
557 | static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) | 648 | static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) |
@@ -576,36 +667,36 @@ static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) | |||
576 | 667 | ||
577 | static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) | 668 | static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) |
578 | { | 669 | { |
579 | union octeon_ciu_chip_data cd; | 670 | struct octeon_ciu_chip_data *cd; |
580 | 671 | ||
581 | cd.p = irq_data_get_irq_chip_data(data); | 672 | cd = irq_data_get_irq_chip_data(data); |
582 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 673 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); |
583 | 674 | ||
584 | octeon_irq_ciu_disable_all_v2(data); | 675 | octeon_irq_ciu_disable_all_v2(data); |
585 | } | 676 | } |
586 | 677 | ||
587 | static void octeon_irq_ciu_disable_gpio(struct irq_data *data) | 678 | static void octeon_irq_ciu_disable_gpio(struct irq_data *data) |
588 | { | 679 | { |
589 | union octeon_ciu_chip_data cd; | 680 | struct octeon_ciu_chip_data *cd; |
590 | 681 | ||
591 | cd.p = irq_data_get_irq_chip_data(data); | 682 | cd = irq_data_get_irq_chip_data(data); |
592 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 683 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); |
593 | 684 | ||
594 | octeon_irq_ciu_disable_all(data); | 685 | octeon_irq_ciu_disable_all(data); |
595 | } | 686 | } |
596 | 687 | ||
597 | static void octeon_irq_ciu_gpio_ack(struct irq_data *data) | 688 | static void octeon_irq_ciu_gpio_ack(struct irq_data *data) |
598 | { | 689 | { |
599 | union octeon_ciu_chip_data cd; | 690 | struct octeon_ciu_chip_data *cd; |
600 | u64 mask; | 691 | u64 mask; |
601 | 692 | ||
602 | cd.p = irq_data_get_irq_chip_data(data); | 693 | cd = irq_data_get_irq_chip_data(data); |
603 | mask = 1ull << (cd.s.gpio_line); | 694 | mask = 1ull << (cd->gpio_line); |
604 | 695 | ||
605 | cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); | 696 | cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); |
606 | } | 697 | } |
607 | 698 | ||
608 | static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) | 699 | static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc) |
609 | { | 700 | { |
610 | if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) | 701 | if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) |
611 | handle_edge_irq(irq, desc); | 702 | handle_edge_irq(irq, desc); |
@@ -644,11 +735,11 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, | |||
644 | int cpu; | 735 | int cpu; |
645 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 736 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
646 | unsigned long flags; | 737 | unsigned long flags; |
647 | union octeon_ciu_chip_data cd; | 738 | struct octeon_ciu_chip_data *cd; |
648 | unsigned long *pen; | 739 | unsigned long *pen; |
649 | raw_spinlock_t *lock; | 740 | raw_spinlock_t *lock; |
650 | 741 | ||
651 | cd.p = irq_data_get_irq_chip_data(data); | 742 | cd = irq_data_get_irq_chip_data(data); |
652 | 743 | ||
653 | /* | 744 | /* |
654 | * For non-v2 CIU, we will allow only single CPU affinity. | 745 | * For non-v2 CIU, we will allow only single CPU affinity. |
@@ -668,16 +759,16 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, | |||
668 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 759 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
669 | raw_spin_lock_irqsave(lock, flags); | 760 | raw_spin_lock_irqsave(lock, flags); |
670 | 761 | ||
671 | if (cd.s.line == 0) | 762 | if (cd->line == 0) |
672 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 763 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
673 | else | 764 | else |
674 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 765 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
675 | 766 | ||
676 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 767 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
677 | enable_one = 0; | 768 | enable_one = 0; |
678 | __set_bit(cd.s.bit, pen); | 769 | __set_bit(cd->bit, pen); |
679 | } else { | 770 | } else { |
680 | __clear_bit(cd.s.bit, pen); | 771 | __clear_bit(cd->bit, pen); |
681 | } | 772 | } |
682 | /* | 773 | /* |
683 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 774 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
@@ -685,7 +776,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, | |||
685 | */ | 776 | */ |
686 | wmb(); | 777 | wmb(); |
687 | 778 | ||
688 | if (cd.s.line == 0) | 779 | if (cd->line == 0) |
689 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 780 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
690 | else | 781 | else |
691 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 782 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
@@ -706,24 +797,24 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, | |||
706 | int cpu; | 797 | int cpu; |
707 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 798 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
708 | u64 mask; | 799 | u64 mask; |
709 | union octeon_ciu_chip_data cd; | 800 | struct octeon_ciu_chip_data *cd; |
710 | 801 | ||
711 | if (!enable_one) | 802 | if (!enable_one) |
712 | return 0; | 803 | return 0; |
713 | 804 | ||
714 | cd.p = irq_data_get_irq_chip_data(data); | 805 | cd = irq_data_get_irq_chip_data(data); |
715 | mask = 1ull << cd.s.bit; | 806 | mask = 1ull << cd->bit; |
716 | 807 | ||
717 | if (cd.s.line == 0) { | 808 | if (cd->line == 0) { |
718 | for_each_online_cpu(cpu) { | 809 | for_each_online_cpu(cpu) { |
719 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 810 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
720 | int index = octeon_coreid_for_cpu(cpu) * 2; | 811 | int index = octeon_coreid_for_cpu(cpu) * 2; |
721 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 812 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
722 | enable_one = false; | 813 | enable_one = false; |
723 | set_bit(cd.s.bit, pen); | 814 | set_bit(cd->bit, pen); |
724 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 815 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
725 | } else { | 816 | } else { |
726 | clear_bit(cd.s.bit, pen); | 817 | clear_bit(cd->bit, pen); |
727 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 818 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
728 | } | 819 | } |
729 | } | 820 | } |
@@ -733,16 +824,44 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, | |||
733 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 824 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
734 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 825 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
735 | enable_one = false; | 826 | enable_one = false; |
736 | set_bit(cd.s.bit, pen); | 827 | set_bit(cd->bit, pen); |
737 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 828 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
738 | } else { | 829 | } else { |
739 | clear_bit(cd.s.bit, pen); | 830 | clear_bit(cd->bit, pen); |
740 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 831 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
741 | } | 832 | } |
742 | } | 833 | } |
743 | } | 834 | } |
744 | return 0; | 835 | return 0; |
745 | } | 836 | } |
837 | |||
838 | static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data, | ||
839 | const struct cpumask *dest, | ||
840 | bool force) | ||
841 | { | ||
842 | int cpu; | ||
843 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | ||
844 | u64 mask; | ||
845 | struct octeon_ciu_chip_data *cd; | ||
846 | |||
847 | if (!enable_one) | ||
848 | return 0; | ||
849 | |||
850 | cd = irq_data_get_irq_chip_data(data); | ||
851 | mask = 1ull << cd->bit; | ||
852 | |||
853 | for_each_online_cpu(cpu) { | ||
854 | int index = octeon_coreid_for_cpu(cpu); | ||
855 | |||
856 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | ||
857 | enable_one = false; | ||
858 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); | ||
859 | } else { | ||
860 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); | ||
861 | } | ||
862 | } | ||
863 | return 0; | ||
864 | } | ||
746 | #endif | 865 | #endif |
747 | 866 | ||
748 | /* | 867 | /* |
@@ -752,6 +871,18 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = { | |||
752 | .name = "CIU", | 871 | .name = "CIU", |
753 | .irq_enable = octeon_irq_ciu_enable_v2, | 872 | .irq_enable = octeon_irq_ciu_enable_v2, |
754 | .irq_disable = octeon_irq_ciu_disable_all_v2, | 873 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
874 | .irq_mask = octeon_irq_ciu_disable_local_v2, | ||
875 | .irq_unmask = octeon_irq_ciu_enable_v2, | ||
876 | #ifdef CONFIG_SMP | ||
877 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, | ||
878 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
879 | #endif | ||
880 | }; | ||
881 | |||
882 | static struct irq_chip octeon_irq_chip_ciu_v2_edge = { | ||
883 | .name = "CIU", | ||
884 | .irq_enable = octeon_irq_ciu_enable_v2, | ||
885 | .irq_disable = octeon_irq_ciu_disable_all_v2, | ||
755 | .irq_ack = octeon_irq_ciu_ack, | 886 | .irq_ack = octeon_irq_ciu_ack, |
756 | .irq_mask = octeon_irq_ciu_disable_local_v2, | 887 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
757 | .irq_unmask = octeon_irq_ciu_enable_v2, | 888 | .irq_unmask = octeon_irq_ciu_enable_v2, |
@@ -761,10 +892,50 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = { | |||
761 | #endif | 892 | #endif |
762 | }; | 893 | }; |
763 | 894 | ||
895 | /* | ||
896 | * Newer octeon chips have support for lockless CIU operation. | ||
897 | */ | ||
898 | static struct irq_chip octeon_irq_chip_ciu_sum2 = { | ||
899 | .name = "CIU", | ||
900 | .irq_enable = octeon_irq_ciu_enable_sum2, | ||
901 | .irq_disable = octeon_irq_ciu_disable_all_sum2, | ||
902 | .irq_mask = octeon_irq_ciu_disable_local_sum2, | ||
903 | .irq_unmask = octeon_irq_ciu_enable_sum2, | ||
904 | #ifdef CONFIG_SMP | ||
905 | .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, | ||
906 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
907 | #endif | ||
908 | }; | ||
909 | |||
910 | static struct irq_chip octeon_irq_chip_ciu_sum2_edge = { | ||
911 | .name = "CIU", | ||
912 | .irq_enable = octeon_irq_ciu_enable_sum2, | ||
913 | .irq_disable = octeon_irq_ciu_disable_all_sum2, | ||
914 | .irq_ack = octeon_irq_ciu_ack_sum2, | ||
915 | .irq_mask = octeon_irq_ciu_disable_local_sum2, | ||
916 | .irq_unmask = octeon_irq_ciu_enable_sum2, | ||
917 | #ifdef CONFIG_SMP | ||
918 | .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, | ||
919 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
920 | #endif | ||
921 | }; | ||
922 | |||
764 | static struct irq_chip octeon_irq_chip_ciu = { | 923 | static struct irq_chip octeon_irq_chip_ciu = { |
765 | .name = "CIU", | 924 | .name = "CIU", |
766 | .irq_enable = octeon_irq_ciu_enable, | 925 | .irq_enable = octeon_irq_ciu_enable, |
767 | .irq_disable = octeon_irq_ciu_disable_all, | 926 | .irq_disable = octeon_irq_ciu_disable_all, |
927 | .irq_mask = octeon_irq_ciu_disable_local, | ||
928 | .irq_unmask = octeon_irq_ciu_enable, | ||
929 | #ifdef CONFIG_SMP | ||
930 | .irq_set_affinity = octeon_irq_ciu_set_affinity, | ||
931 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
932 | #endif | ||
933 | }; | ||
934 | |||
935 | static struct irq_chip octeon_irq_chip_ciu_edge = { | ||
936 | .name = "CIU", | ||
937 | .irq_enable = octeon_irq_ciu_enable, | ||
938 | .irq_disable = octeon_irq_ciu_disable_all, | ||
768 | .irq_ack = octeon_irq_ciu_ack, | 939 | .irq_ack = octeon_irq_ciu_ack, |
769 | .irq_mask = octeon_irq_ciu_disable_local, | 940 | .irq_mask = octeon_irq_ciu_disable_local, |
770 | .irq_unmask = octeon_irq_ciu_enable, | 941 | .irq_unmask = octeon_irq_ciu_enable, |
@@ -970,11 +1141,12 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d, | |||
970 | unsigned int *out_type) | 1141 | unsigned int *out_type) |
971 | { | 1142 | { |
972 | unsigned int ciu, bit; | 1143 | unsigned int ciu, bit; |
1144 | struct octeon_irq_ciu_domain_data *dd = d->host_data; | ||
973 | 1145 | ||
974 | ciu = intspec[0]; | 1146 | ciu = intspec[0]; |
975 | bit = intspec[1]; | 1147 | bit = intspec[1]; |
976 | 1148 | ||
977 | if (ciu > 1 || bit > 63) | 1149 | if (ciu >= dd->num_sum || bit > 63) |
978 | return -EINVAL; | 1150 | return -EINVAL; |
979 | 1151 | ||
980 | *out_hwirq = (ciu << 6) | bit; | 1152 | *out_hwirq = (ciu << 6) | bit; |
@@ -984,6 +1156,7 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d, | |||
984 | } | 1156 | } |
985 | 1157 | ||
986 | static struct irq_chip *octeon_irq_ciu_chip; | 1158 | static struct irq_chip *octeon_irq_ciu_chip; |
1159 | static struct irq_chip *octeon_irq_ciu_chip_edge; | ||
987 | static struct irq_chip *octeon_irq_gpio_chip; | 1160 | static struct irq_chip *octeon_irq_gpio_chip; |
988 | 1161 | ||
989 | static bool octeon_irq_virq_in_range(unsigned int virq) | 1162 | static bool octeon_irq_virq_in_range(unsigned int virq) |
@@ -999,8 +1172,10 @@ static bool octeon_irq_virq_in_range(unsigned int virq) | |||
999 | static int octeon_irq_ciu_map(struct irq_domain *d, | 1172 | static int octeon_irq_ciu_map(struct irq_domain *d, |
1000 | unsigned int virq, irq_hw_number_t hw) | 1173 | unsigned int virq, irq_hw_number_t hw) |
1001 | { | 1174 | { |
1175 | int rv; | ||
1002 | unsigned int line = hw >> 6; | 1176 | unsigned int line = hw >> 6; |
1003 | unsigned int bit = hw & 63; | 1177 | unsigned int bit = hw & 63; |
1178 | struct octeon_irq_ciu_domain_data *dd = d->host_data; | ||
1004 | 1179 | ||
1005 | if (!octeon_irq_virq_in_range(virq)) | 1180 | if (!octeon_irq_virq_in_range(virq)) |
1006 | return -EINVAL; | 1181 | return -EINVAL; |
@@ -1009,54 +1184,61 @@ static int octeon_irq_ciu_map(struct irq_domain *d, | |||
1009 | if (line == 0 && bit >= 16 && bit <32) | 1184 | if (line == 0 && bit >= 16 && bit <32) |
1010 | return 0; | 1185 | return 0; |
1011 | 1186 | ||
1012 | if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) | 1187 | if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0) |
1013 | return -EINVAL; | 1188 | return -EINVAL; |
1014 | 1189 | ||
1015 | if (octeon_irq_ciu_is_edge(line, bit)) | 1190 | if (line == 2) { |
1016 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1191 | if (octeon_irq_ciu_is_edge(line, bit)) |
1017 | octeon_irq_ciu_chip, | 1192 | rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1018 | handle_edge_irq); | 1193 | &octeon_irq_chip_ciu_sum2_edge, |
1019 | else | 1194 | handle_edge_irq); |
1020 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1195 | else |
1021 | octeon_irq_ciu_chip, | 1196 | rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1022 | handle_level_irq); | 1197 | &octeon_irq_chip_ciu_sum2, |
1023 | 1198 | handle_level_irq); | |
1024 | return 0; | 1199 | } else { |
1200 | if (octeon_irq_ciu_is_edge(line, bit)) | ||
1201 | rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, | ||
1202 | octeon_irq_ciu_chip_edge, | ||
1203 | handle_edge_irq); | ||
1204 | else | ||
1205 | rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, | ||
1206 | octeon_irq_ciu_chip, | ||
1207 | handle_level_irq); | ||
1208 | } | ||
1209 | return rv; | ||
1025 | } | 1210 | } |
1026 | 1211 | ||
1027 | static int octeon_irq_gpio_map_common(struct irq_domain *d, | 1212 | static int octeon_irq_gpio_map(struct irq_domain *d, |
1028 | unsigned int virq, irq_hw_number_t hw, | 1213 | unsigned int virq, irq_hw_number_t hw) |
1029 | int line_limit, struct irq_chip *chip) | ||
1030 | { | 1214 | { |
1031 | struct octeon_irq_gpio_domain_data *gpiod = d->host_data; | 1215 | struct octeon_irq_gpio_domain_data *gpiod = d->host_data; |
1032 | unsigned int line, bit; | 1216 | unsigned int line, bit; |
1217 | int r; | ||
1033 | 1218 | ||
1034 | if (!octeon_irq_virq_in_range(virq)) | 1219 | if (!octeon_irq_virq_in_range(virq)) |
1035 | return -EINVAL; | 1220 | return -EINVAL; |
1036 | 1221 | ||
1037 | line = (hw + gpiod->base_hwirq) >> 6; | 1222 | line = (hw + gpiod->base_hwirq) >> 6; |
1038 | bit = (hw + gpiod->base_hwirq) & 63; | 1223 | bit = (hw + gpiod->base_hwirq) & 63; |
1039 | if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) | 1224 | if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || |
1225 | octeon_irq_ciu_to_irq[line][bit] != 0) | ||
1040 | return -EINVAL; | 1226 | return -EINVAL; |
1041 | 1227 | ||
1042 | octeon_irq_set_ciu_mapping(virq, line, bit, hw, | 1228 | r = octeon_irq_set_ciu_mapping(virq, line, bit, hw, |
1043 | chip, octeon_irq_handle_gpio); | 1229 | octeon_irq_gpio_chip, octeon_irq_handle_trigger); |
1044 | return 0; | 1230 | return r; |
1045 | } | ||
1046 | |||
1047 | static int octeon_irq_gpio_map(struct irq_domain *d, | ||
1048 | unsigned int virq, irq_hw_number_t hw) | ||
1049 | { | ||
1050 | return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); | ||
1051 | } | 1231 | } |
1052 | 1232 | ||
1053 | static struct irq_domain_ops octeon_irq_domain_ciu_ops = { | 1233 | static struct irq_domain_ops octeon_irq_domain_ciu_ops = { |
1054 | .map = octeon_irq_ciu_map, | 1234 | .map = octeon_irq_ciu_map, |
1235 | .unmap = octeon_irq_free_cd, | ||
1055 | .xlate = octeon_irq_ciu_xlat, | 1236 | .xlate = octeon_irq_ciu_xlat, |
1056 | }; | 1237 | }; |
1057 | 1238 | ||
1058 | static struct irq_domain_ops octeon_irq_domain_gpio_ops = { | 1239 | static struct irq_domain_ops octeon_irq_domain_gpio_ops = { |
1059 | .map = octeon_irq_gpio_map, | 1240 | .map = octeon_irq_gpio_map, |
1241 | .unmap = octeon_irq_free_cd, | ||
1060 | .xlate = octeon_irq_gpio_xlat, | 1242 | .xlate = octeon_irq_gpio_xlat, |
1061 | }; | 1243 | }; |
1062 | 1244 | ||
@@ -1095,6 +1277,26 @@ static void octeon_irq_ip3_ciu(void) | |||
1095 | } | 1277 | } |
1096 | } | 1278 | } |
1097 | 1279 | ||
1280 | static void octeon_irq_ip4_ciu(void) | ||
1281 | { | ||
1282 | int coreid = cvmx_get_core_num(); | ||
1283 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid)); | ||
1284 | u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid)); | ||
1285 | |||
1286 | ciu_sum &= ciu_en; | ||
1287 | if (likely(ciu_sum)) { | ||
1288 | int bit = fls64(ciu_sum) - 1; | ||
1289 | int irq = octeon_irq_ciu_to_irq[2][bit]; | ||
1290 | |||
1291 | if (likely(irq)) | ||
1292 | do_IRQ(irq); | ||
1293 | else | ||
1294 | spurious_interrupt(); | ||
1295 | } else { | ||
1296 | spurious_interrupt(); | ||
1297 | } | ||
1298 | } | ||
1299 | |||
1098 | static bool octeon_irq_use_ip4; | 1300 | static bool octeon_irq_use_ip4; |
1099 | 1301 | ||
1100 | static void octeon_irq_local_enable_ip4(void *arg) | 1302 | static void octeon_irq_local_enable_ip4(void *arg) |
@@ -1176,7 +1378,10 @@ static void octeon_irq_setup_secondary_ciu(void) | |||
1176 | 1378 | ||
1177 | /* Enable the CIU lines */ | 1379 | /* Enable the CIU lines */ |
1178 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1380 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1179 | clear_c0_status(STATUSF_IP4); | 1381 | if (octeon_irq_use_ip4) |
1382 | set_c0_status(STATUSF_IP4); | ||
1383 | else | ||
1384 | clear_c0_status(STATUSF_IP4); | ||
1180 | } | 1385 | } |
1181 | 1386 | ||
1182 | static void octeon_irq_setup_secondary_ciu2(void) | 1387 | static void octeon_irq_setup_secondary_ciu2(void) |
@@ -1192,95 +1397,194 @@ static void octeon_irq_setup_secondary_ciu2(void) | |||
1192 | clear_c0_status(STATUSF_IP4); | 1397 | clear_c0_status(STATUSF_IP4); |
1193 | } | 1398 | } |
1194 | 1399 | ||
1195 | static void __init octeon_irq_init_ciu(void) | 1400 | static int __init octeon_irq_init_ciu( |
1401 | struct device_node *ciu_node, struct device_node *parent) | ||
1196 | { | 1402 | { |
1197 | unsigned int i; | 1403 | unsigned int i, r; |
1198 | struct irq_chip *chip; | 1404 | struct irq_chip *chip; |
1405 | struct irq_chip *chip_edge; | ||
1199 | struct irq_chip *chip_mbox; | 1406 | struct irq_chip *chip_mbox; |
1200 | struct irq_chip *chip_wd; | 1407 | struct irq_chip *chip_wd; |
1201 | struct device_node *gpio_node; | ||
1202 | struct device_node *ciu_node; | ||
1203 | struct irq_domain *ciu_domain = NULL; | 1408 | struct irq_domain *ciu_domain = NULL; |
1409 | struct octeon_irq_ciu_domain_data *dd; | ||
1410 | |||
1411 | dd = kzalloc(sizeof(*dd), GFP_KERNEL); | ||
1412 | if (!dd) | ||
1413 | return -ENOMEM; | ||
1204 | 1414 | ||
1205 | octeon_irq_init_ciu_percpu(); | 1415 | octeon_irq_init_ciu_percpu(); |
1206 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; | 1416 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; |
1207 | 1417 | ||
1208 | octeon_irq_ip2 = octeon_irq_ip2_ciu; | 1418 | octeon_irq_ip2 = octeon_irq_ip2_ciu; |
1209 | octeon_irq_ip3 = octeon_irq_ip3_ciu; | 1419 | octeon_irq_ip3 = octeon_irq_ip3_ciu; |
1420 | if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) | ||
1421 | && !OCTEON_IS_MODEL(OCTEON_CN63XX)) { | ||
1422 | octeon_irq_ip4 = octeon_irq_ip4_ciu; | ||
1423 | dd->num_sum = 3; | ||
1424 | octeon_irq_use_ip4 = true; | ||
1425 | } else { | ||
1426 | octeon_irq_ip4 = octeon_irq_ip4_mask; | ||
1427 | dd->num_sum = 2; | ||
1428 | octeon_irq_use_ip4 = false; | ||
1429 | } | ||
1210 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || | 1430 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || |
1211 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || | 1431 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || |
1212 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || | 1432 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || |
1213 | OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | 1433 | OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { |
1214 | chip = &octeon_irq_chip_ciu_v2; | 1434 | chip = &octeon_irq_chip_ciu_v2; |
1435 | chip_edge = &octeon_irq_chip_ciu_v2_edge; | ||
1215 | chip_mbox = &octeon_irq_chip_ciu_mbox_v2; | 1436 | chip_mbox = &octeon_irq_chip_ciu_mbox_v2; |
1216 | chip_wd = &octeon_irq_chip_ciu_wd_v2; | 1437 | chip_wd = &octeon_irq_chip_ciu_wd_v2; |
1217 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; | 1438 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; |
1218 | } else { | 1439 | } else { |
1219 | chip = &octeon_irq_chip_ciu; | 1440 | chip = &octeon_irq_chip_ciu; |
1441 | chip_edge = &octeon_irq_chip_ciu_edge; | ||
1220 | chip_mbox = &octeon_irq_chip_ciu_mbox; | 1442 | chip_mbox = &octeon_irq_chip_ciu_mbox; |
1221 | chip_wd = &octeon_irq_chip_ciu_wd; | 1443 | chip_wd = &octeon_irq_chip_ciu_wd; |
1222 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; | 1444 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; |
1223 | } | 1445 | } |
1224 | octeon_irq_ciu_chip = chip; | 1446 | octeon_irq_ciu_chip = chip; |
1225 | octeon_irq_ip4 = octeon_irq_ip4_mask; | 1447 | octeon_irq_ciu_chip_edge = chip_edge; |
1226 | 1448 | ||
1227 | /* Mips internal */ | 1449 | /* Mips internal */ |
1228 | octeon_irq_init_core(); | 1450 | octeon_irq_init_core(); |
1229 | 1451 | ||
1230 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); | 1452 | ciu_domain = irq_domain_add_tree( |
1231 | if (gpio_node) { | 1453 | ciu_node, &octeon_irq_domain_ciu_ops, dd); |
1232 | struct octeon_irq_gpio_domain_data *gpiod; | 1454 | irq_set_default_host(ciu_domain); |
1233 | |||
1234 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | ||
1235 | if (gpiod) { | ||
1236 | /* gpio domain host_data is the base hwirq number. */ | ||
1237 | gpiod->base_hwirq = 16; | ||
1238 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); | ||
1239 | of_node_put(gpio_node); | ||
1240 | } else | ||
1241 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | ||
1242 | } else | ||
1243 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); | ||
1244 | |||
1245 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); | ||
1246 | if (ciu_node) { | ||
1247 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); | ||
1248 | irq_set_default_host(ciu_domain); | ||
1249 | of_node_put(ciu_node); | ||
1250 | } else | ||
1251 | panic("Cannot find device node for cavium,octeon-3860-ciu."); | ||
1252 | 1455 | ||
1253 | /* CIU_0 */ | 1456 | /* CIU_0 */ |
1254 | for (i = 0; i < 16; i++) | 1457 | for (i = 0; i < 16; i++) { |
1255 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); | 1458 | r = octeon_irq_force_ciu_mapping( |
1459 | ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); | ||
1460 | if (r) | ||
1461 | goto err; | ||
1462 | } | ||
1463 | |||
1464 | r = octeon_irq_set_ciu_mapping( | ||
1465 | OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); | ||
1466 | if (r) | ||
1467 | goto err; | ||
1468 | r = octeon_irq_set_ciu_mapping( | ||
1469 | OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); | ||
1470 | if (r) | ||
1471 | goto err; | ||
1472 | |||
1473 | for (i = 0; i < 4; i++) { | ||
1474 | r = octeon_irq_force_ciu_mapping( | ||
1475 | ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); | ||
1476 | if (r) | ||
1477 | goto err; | ||
1478 | } | ||
1479 | for (i = 0; i < 4; i++) { | ||
1480 | r = octeon_irq_force_ciu_mapping( | ||
1481 | ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); | ||
1482 | if (r) | ||
1483 | goto err; | ||
1484 | } | ||
1256 | 1485 | ||
1257 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); | 1486 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); |
1258 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); | 1487 | if (r) |
1488 | goto err; | ||
1259 | 1489 | ||
1260 | for (i = 0; i < 4; i++) | 1490 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); |
1261 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); | 1491 | if (r) |
1262 | for (i = 0; i < 4; i++) | 1492 | goto err; |
1263 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); | ||
1264 | 1493 | ||
1265 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); | 1494 | for (i = 0; i < 4; i++) { |
1266 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); | 1495 | r = octeon_irq_force_ciu_mapping( |
1267 | for (i = 0; i < 4; i++) | 1496 | ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); |
1268 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); | 1497 | if (r) |
1498 | goto err; | ||
1499 | } | ||
1500 | |||
1501 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); | ||
1502 | if (r) | ||
1503 | goto err; | ||
1269 | 1504 | ||
1270 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); | 1505 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); |
1271 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); | 1506 | if (r) |
1507 | goto err; | ||
1272 | 1508 | ||
1273 | /* CIU_1 */ | 1509 | /* CIU_1 */ |
1274 | for (i = 0; i < 16; i++) | 1510 | for (i = 0; i < 16; i++) { |
1275 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); | 1511 | r = octeon_irq_set_ciu_mapping( |
1512 | i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, | ||
1513 | handle_level_irq); | ||
1514 | if (r) | ||
1515 | goto err; | ||
1516 | } | ||
1276 | 1517 | ||
1277 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); | 1518 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); |
1519 | if (r) | ||
1520 | goto err; | ||
1278 | 1521 | ||
1279 | /* Enable the CIU lines */ | 1522 | /* Enable the CIU lines */ |
1280 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1523 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1281 | clear_c0_status(STATUSF_IP4); | 1524 | if (octeon_irq_use_ip4) |
1525 | set_c0_status(STATUSF_IP4); | ||
1526 | else | ||
1527 | clear_c0_status(STATUSF_IP4); | ||
1528 | |||
1529 | return 0; | ||
1530 | err: | ||
1531 | return r; | ||
1282 | } | 1532 | } |
1283 | 1533 | ||
1534 | static int __init octeon_irq_init_gpio( | ||
1535 | struct device_node *gpio_node, struct device_node *parent) | ||
1536 | { | ||
1537 | struct octeon_irq_gpio_domain_data *gpiod; | ||
1538 | u32 interrupt_cells; | ||
1539 | unsigned int base_hwirq; | ||
1540 | int r; | ||
1541 | |||
1542 | r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells); | ||
1543 | if (r) | ||
1544 | return r; | ||
1545 | |||
1546 | if (interrupt_cells == 1) { | ||
1547 | u32 v; | ||
1548 | |||
1549 | r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v); | ||
1550 | if (r) { | ||
1551 | pr_warn("No \"interrupts\" property.\n"); | ||
1552 | return r; | ||
1553 | } | ||
1554 | base_hwirq = v; | ||
1555 | } else if (interrupt_cells == 2) { | ||
1556 | u32 v0, v1; | ||
1557 | |||
1558 | r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0); | ||
1559 | if (r) { | ||
1560 | pr_warn("No \"interrupts\" property.\n"); | ||
1561 | return r; | ||
1562 | } | ||
1563 | r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1); | ||
1564 | if (r) { | ||
1565 | pr_warn("No \"interrupts\" property.\n"); | ||
1566 | return r; | ||
1567 | } | ||
1568 | base_hwirq = (v0 << 6) | v1; | ||
1569 | } else { | ||
1570 | pr_warn("Bad \"#interrupt-cells\" property: %u\n", | ||
1571 | interrupt_cells); | ||
1572 | return -EINVAL; | ||
1573 | } | ||
1574 | |||
1575 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | ||
1576 | if (gpiod) { | ||
1577 | /* gpio domain host_data is the base hwirq number. */ | ||
1578 | gpiod->base_hwirq = base_hwirq; | ||
1579 | irq_domain_add_linear( | ||
1580 | gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); | ||
1581 | } else { | ||
1582 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | ||
1583 | return -ENOMEM; | ||
1584 | } | ||
1585 | |||
1586 | return 0; | ||
1587 | } | ||
1284 | /* | 1588 | /* |
1285 | * Watchdog interrupts are special. They are associated with a single | 1589 | * Watchdog interrupts are special. They are associated with a single |
1286 | * core, so we hardwire the affinity to that core. | 1590 | * core, so we hardwire the affinity to that core. |
@@ -1290,12 +1594,13 @@ static void octeon_irq_ciu2_wd_enable(struct irq_data *data) | |||
1290 | u64 mask; | 1594 | u64 mask; |
1291 | u64 en_addr; | 1595 | u64 en_addr; |
1292 | int coreid = data->irq - OCTEON_IRQ_WDOG0; | 1596 | int coreid = data->irq - OCTEON_IRQ_WDOG0; |
1293 | union octeon_ciu_chip_data cd; | 1597 | struct octeon_ciu_chip_data *cd; |
1294 | 1598 | ||
1295 | cd.p = irq_data_get_irq_chip_data(data); | 1599 | cd = irq_data_get_irq_chip_data(data); |
1296 | mask = 1ull << (cd.s.bit); | 1600 | mask = 1ull << (cd->bit); |
1297 | 1601 | ||
1298 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1602 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + |
1603 | (0x1000ull * cd->line); | ||
1299 | cvmx_write_csr(en_addr, mask); | 1604 | cvmx_write_csr(en_addr, mask); |
1300 | 1605 | ||
1301 | } | 1606 | } |
@@ -1306,12 +1611,13 @@ static void octeon_irq_ciu2_enable(struct irq_data *data) | |||
1306 | u64 en_addr; | 1611 | u64 en_addr; |
1307 | int cpu = next_cpu_for_irq(data); | 1612 | int cpu = next_cpu_for_irq(data); |
1308 | int coreid = octeon_coreid_for_cpu(cpu); | 1613 | int coreid = octeon_coreid_for_cpu(cpu); |
1309 | union octeon_ciu_chip_data cd; | 1614 | struct octeon_ciu_chip_data *cd; |
1310 | 1615 | ||
1311 | cd.p = irq_data_get_irq_chip_data(data); | 1616 | cd = irq_data_get_irq_chip_data(data); |
1312 | mask = 1ull << (cd.s.bit); | 1617 | mask = 1ull << (cd->bit); |
1313 | 1618 | ||
1314 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1619 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + |
1620 | (0x1000ull * cd->line); | ||
1315 | cvmx_write_csr(en_addr, mask); | 1621 | cvmx_write_csr(en_addr, mask); |
1316 | } | 1622 | } |
1317 | 1623 | ||
@@ -1320,12 +1626,13 @@ static void octeon_irq_ciu2_enable_local(struct irq_data *data) | |||
1320 | u64 mask; | 1626 | u64 mask; |
1321 | u64 en_addr; | 1627 | u64 en_addr; |
1322 | int coreid = cvmx_get_core_num(); | 1628 | int coreid = cvmx_get_core_num(); |
1323 | union octeon_ciu_chip_data cd; | 1629 | struct octeon_ciu_chip_data *cd; |
1324 | 1630 | ||
1325 | cd.p = irq_data_get_irq_chip_data(data); | 1631 | cd = irq_data_get_irq_chip_data(data); |
1326 | mask = 1ull << (cd.s.bit); | 1632 | mask = 1ull << (cd->bit); |
1327 | 1633 | ||
1328 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1634 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + |
1635 | (0x1000ull * cd->line); | ||
1329 | cvmx_write_csr(en_addr, mask); | 1636 | cvmx_write_csr(en_addr, mask); |
1330 | 1637 | ||
1331 | } | 1638 | } |
@@ -1335,12 +1642,13 @@ static void octeon_irq_ciu2_disable_local(struct irq_data *data) | |||
1335 | u64 mask; | 1642 | u64 mask; |
1336 | u64 en_addr; | 1643 | u64 en_addr; |
1337 | int coreid = cvmx_get_core_num(); | 1644 | int coreid = cvmx_get_core_num(); |
1338 | union octeon_ciu_chip_data cd; | 1645 | struct octeon_ciu_chip_data *cd; |
1339 | 1646 | ||
1340 | cd.p = irq_data_get_irq_chip_data(data); | 1647 | cd = irq_data_get_irq_chip_data(data); |
1341 | mask = 1ull << (cd.s.bit); | 1648 | mask = 1ull << (cd->bit); |
1342 | 1649 | ||
1343 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); | 1650 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + |
1651 | (0x1000ull * cd->line); | ||
1344 | cvmx_write_csr(en_addr, mask); | 1652 | cvmx_write_csr(en_addr, mask); |
1345 | 1653 | ||
1346 | } | 1654 | } |
@@ -1350,12 +1658,12 @@ static void octeon_irq_ciu2_ack(struct irq_data *data) | |||
1350 | u64 mask; | 1658 | u64 mask; |
1351 | u64 en_addr; | 1659 | u64 en_addr; |
1352 | int coreid = cvmx_get_core_num(); | 1660 | int coreid = cvmx_get_core_num(); |
1353 | union octeon_ciu_chip_data cd; | 1661 | struct octeon_ciu_chip_data *cd; |
1354 | 1662 | ||
1355 | cd.p = irq_data_get_irq_chip_data(data); | 1663 | cd = irq_data_get_irq_chip_data(data); |
1356 | mask = 1ull << (cd.s.bit); | 1664 | mask = 1ull << (cd->bit); |
1357 | 1665 | ||
1358 | en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); | 1666 | en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line); |
1359 | cvmx_write_csr(en_addr, mask); | 1667 | cvmx_write_csr(en_addr, mask); |
1360 | 1668 | ||
1361 | } | 1669 | } |
@@ -1364,13 +1672,14 @@ static void octeon_irq_ciu2_disable_all(struct irq_data *data) | |||
1364 | { | 1672 | { |
1365 | int cpu; | 1673 | int cpu; |
1366 | u64 mask; | 1674 | u64 mask; |
1367 | union octeon_ciu_chip_data cd; | 1675 | struct octeon_ciu_chip_data *cd; |
1368 | 1676 | ||
1369 | cd.p = irq_data_get_irq_chip_data(data); | 1677 | cd = irq_data_get_irq_chip_data(data); |
1370 | mask = 1ull << (cd.s.bit); | 1678 | mask = 1ull << (cd->bit); |
1371 | 1679 | ||
1372 | for_each_online_cpu(cpu) { | 1680 | for_each_online_cpu(cpu) { |
1373 | u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1681 | u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( |
1682 | octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line); | ||
1374 | cvmx_write_csr(en_addr, mask); | 1683 | cvmx_write_csr(en_addr, mask); |
1375 | } | 1684 | } |
1376 | } | 1685 | } |
@@ -1383,7 +1692,8 @@ static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) | |||
1383 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1692 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1384 | 1693 | ||
1385 | for_each_online_cpu(cpu) { | 1694 | for_each_online_cpu(cpu) { |
1386 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); | 1695 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S( |
1696 | octeon_coreid_for_cpu(cpu)); | ||
1387 | cvmx_write_csr(en_addr, mask); | 1697 | cvmx_write_csr(en_addr, mask); |
1388 | } | 1698 | } |
1389 | } | 1699 | } |
@@ -1396,7 +1706,8 @@ static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) | |||
1396 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1706 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1397 | 1707 | ||
1398 | for_each_online_cpu(cpu) { | 1708 | for_each_online_cpu(cpu) { |
1399 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); | 1709 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C( |
1710 | octeon_coreid_for_cpu(cpu)); | ||
1400 | cvmx_write_csr(en_addr, mask); | 1711 | cvmx_write_csr(en_addr, mask); |
1401 | } | 1712 | } |
1402 | } | 1713 | } |
@@ -1430,21 +1741,25 @@ static int octeon_irq_ciu2_set_affinity(struct irq_data *data, | |||
1430 | int cpu; | 1741 | int cpu; |
1431 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 1742 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
1432 | u64 mask; | 1743 | u64 mask; |
1433 | union octeon_ciu_chip_data cd; | 1744 | struct octeon_ciu_chip_data *cd; |
1434 | 1745 | ||
1435 | if (!enable_one) | 1746 | if (!enable_one) |
1436 | return 0; | 1747 | return 0; |
1437 | 1748 | ||
1438 | cd.p = irq_data_get_irq_chip_data(data); | 1749 | cd = irq_data_get_irq_chip_data(data); |
1439 | mask = 1ull << cd.s.bit; | 1750 | mask = 1ull << cd->bit; |
1440 | 1751 | ||
1441 | for_each_online_cpu(cpu) { | 1752 | for_each_online_cpu(cpu) { |
1442 | u64 en_addr; | 1753 | u64 en_addr; |
1443 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 1754 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
1444 | enable_one = false; | 1755 | enable_one = false; |
1445 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1756 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S( |
1757 | octeon_coreid_for_cpu(cpu)) + | ||
1758 | (0x1000ull * cd->line); | ||
1446 | } else { | 1759 | } else { |
1447 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1760 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( |
1761 | octeon_coreid_for_cpu(cpu)) + | ||
1762 | (0x1000ull * cd->line); | ||
1448 | } | 1763 | } |
1449 | cvmx_write_csr(en_addr, mask); | 1764 | cvmx_write_csr(en_addr, mask); |
1450 | } | 1765 | } |
@@ -1461,10 +1776,11 @@ static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) | |||
1461 | 1776 | ||
1462 | static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) | 1777 | static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) |
1463 | { | 1778 | { |
1464 | union octeon_ciu_chip_data cd; | 1779 | struct octeon_ciu_chip_data *cd; |
1465 | cd.p = irq_data_get_irq_chip_data(data); | 1780 | |
1781 | cd = irq_data_get_irq_chip_data(data); | ||
1466 | 1782 | ||
1467 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 1783 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); |
1468 | 1784 | ||
1469 | octeon_irq_ciu2_disable_all(data); | 1785 | octeon_irq_ciu2_disable_all(data); |
1470 | } | 1786 | } |
@@ -1473,6 +1789,18 @@ static struct irq_chip octeon_irq_chip_ciu2 = { | |||
1473 | .name = "CIU2-E", | 1789 | .name = "CIU2-E", |
1474 | .irq_enable = octeon_irq_ciu2_enable, | 1790 | .irq_enable = octeon_irq_ciu2_enable, |
1475 | .irq_disable = octeon_irq_ciu2_disable_all, | 1791 | .irq_disable = octeon_irq_ciu2_disable_all, |
1792 | .irq_mask = octeon_irq_ciu2_disable_local, | ||
1793 | .irq_unmask = octeon_irq_ciu2_enable, | ||
1794 | #ifdef CONFIG_SMP | ||
1795 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, | ||
1796 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
1797 | #endif | ||
1798 | }; | ||
1799 | |||
1800 | static struct irq_chip octeon_irq_chip_ciu2_edge = { | ||
1801 | .name = "CIU2-E", | ||
1802 | .irq_enable = octeon_irq_ciu2_enable, | ||
1803 | .irq_disable = octeon_irq_ciu2_disable_all, | ||
1476 | .irq_ack = octeon_irq_ciu2_ack, | 1804 | .irq_ack = octeon_irq_ciu2_ack, |
1477 | .irq_mask = octeon_irq_ciu2_disable_local, | 1805 | .irq_mask = octeon_irq_ciu2_disable_local, |
1478 | .irq_unmask = octeon_irq_ciu2_enable, | 1806 | .irq_unmask = octeon_irq_ciu2_enable, |
@@ -1582,7 +1910,7 @@ static int octeon_irq_ciu2_map(struct irq_domain *d, | |||
1582 | 1910 | ||
1583 | if (octeon_irq_ciu2_is_edge(line, bit)) | 1911 | if (octeon_irq_ciu2_is_edge(line, bit)) |
1584 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1912 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1585 | &octeon_irq_chip_ciu2, | 1913 | &octeon_irq_chip_ciu2_edge, |
1586 | handle_edge_irq); | 1914 | handle_edge_irq); |
1587 | else | 1915 | else |
1588 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1916 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
@@ -1591,22 +1919,13 @@ static int octeon_irq_ciu2_map(struct irq_domain *d, | |||
1591 | 1919 | ||
1592 | return 0; | 1920 | return 0; |
1593 | } | 1921 | } |
1594 | static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, | ||
1595 | unsigned int virq, irq_hw_number_t hw) | ||
1596 | { | ||
1597 | return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); | ||
1598 | } | ||
1599 | 1922 | ||
1600 | static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { | 1923 | static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { |
1601 | .map = octeon_irq_ciu2_map, | 1924 | .map = octeon_irq_ciu2_map, |
1925 | .unmap = octeon_irq_free_cd, | ||
1602 | .xlate = octeon_irq_ciu2_xlat, | 1926 | .xlate = octeon_irq_ciu2_xlat, |
1603 | }; | 1927 | }; |
1604 | 1928 | ||
1605 | static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { | ||
1606 | .map = octeon_irq_ciu2_gpio_map, | ||
1607 | .xlate = octeon_irq_gpio_xlat, | ||
1608 | }; | ||
1609 | |||
1610 | static void octeon_irq_ciu2(void) | 1929 | static void octeon_irq_ciu2(void) |
1611 | { | 1930 | { |
1612 | int line; | 1931 | int line; |
@@ -1674,16 +1993,16 @@ out: | |||
1674 | return; | 1993 | return; |
1675 | } | 1994 | } |
1676 | 1995 | ||
1677 | static void __init octeon_irq_init_ciu2(void) | 1996 | static int __init octeon_irq_init_ciu2( |
1997 | struct device_node *ciu_node, struct device_node *parent) | ||
1678 | { | 1998 | { |
1679 | unsigned int i; | 1999 | unsigned int i, r; |
1680 | struct device_node *gpio_node; | ||
1681 | struct device_node *ciu_node; | ||
1682 | struct irq_domain *ciu_domain = NULL; | 2000 | struct irq_domain *ciu_domain = NULL; |
1683 | 2001 | ||
1684 | octeon_irq_init_ciu2_percpu(); | 2002 | octeon_irq_init_ciu2_percpu(); |
1685 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; | 2003 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; |
1686 | 2004 | ||
2005 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio; | ||
1687 | octeon_irq_ip2 = octeon_irq_ciu2; | 2006 | octeon_irq_ip2 = octeon_irq_ciu2; |
1688 | octeon_irq_ip3 = octeon_irq_ciu2_mbox; | 2007 | octeon_irq_ip3 = octeon_irq_ciu2_mbox; |
1689 | octeon_irq_ip4 = octeon_irq_ip4_mask; | 2008 | octeon_irq_ip4 = octeon_irq_ip4_mask; |
@@ -1691,47 +2010,49 @@ static void __init octeon_irq_init_ciu2(void) | |||
1691 | /* Mips internal */ | 2010 | /* Mips internal */ |
1692 | octeon_irq_init_core(); | 2011 | octeon_irq_init_core(); |
1693 | 2012 | ||
1694 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); | 2013 | ciu_domain = irq_domain_add_tree( |
1695 | if (gpio_node) { | 2014 | ciu_node, &octeon_irq_domain_ciu2_ops, NULL); |
1696 | struct octeon_irq_gpio_domain_data *gpiod; | 2015 | irq_set_default_host(ciu_domain); |
1697 | |||
1698 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | ||
1699 | if (gpiod) { | ||
1700 | /* gpio domain host_data is the base hwirq number. */ | ||
1701 | gpiod->base_hwirq = 7 << 6; | ||
1702 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); | ||
1703 | of_node_put(gpio_node); | ||
1704 | } else | ||
1705 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | ||
1706 | } else | ||
1707 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); | ||
1708 | |||
1709 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); | ||
1710 | if (ciu_node) { | ||
1711 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); | ||
1712 | irq_set_default_host(ciu_domain); | ||
1713 | of_node_put(ciu_node); | ||
1714 | } else | ||
1715 | panic("Cannot find device node for cavium,octeon-6880-ciu2."); | ||
1716 | 2016 | ||
1717 | /* CUI2 */ | 2017 | /* CUI2 */ |
1718 | for (i = 0; i < 64; i++) | 2018 | for (i = 0; i < 64; i++) { |
1719 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); | 2019 | r = octeon_irq_force_ciu_mapping( |
2020 | ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); | ||
2021 | if (r) | ||
2022 | goto err; | ||
2023 | } | ||
1720 | 2024 | ||
1721 | for (i = 0; i < 32; i++) | 2025 | for (i = 0; i < 32; i++) { |
1722 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, | 2026 | r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, |
1723 | &octeon_irq_chip_ciu2_wd, handle_level_irq); | 2027 | &octeon_irq_chip_ciu2_wd, handle_level_irq); |
2028 | if (r) | ||
2029 | goto err; | ||
2030 | } | ||
1724 | 2031 | ||
1725 | for (i = 0; i < 4; i++) | 2032 | for (i = 0; i < 4; i++) { |
1726 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); | 2033 | r = octeon_irq_force_ciu_mapping( |
2034 | ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); | ||
2035 | if (r) | ||
2036 | goto err; | ||
2037 | } | ||
1727 | 2038 | ||
1728 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); | 2039 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); |
2040 | if (r) | ||
2041 | goto err; | ||
1729 | 2042 | ||
1730 | for (i = 0; i < 4; i++) | 2043 | for (i = 0; i < 4; i++) { |
1731 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); | 2044 | r = octeon_irq_force_ciu_mapping( |
2045 | ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); | ||
2046 | if (r) | ||
2047 | goto err; | ||
2048 | } | ||
1732 | 2049 | ||
1733 | for (i = 0; i < 4; i++) | 2050 | for (i = 0; i < 4; i++) { |
1734 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); | 2051 | r = octeon_irq_force_ciu_mapping( |
2052 | ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); | ||
2053 | if (r) | ||
2054 | goto err; | ||
2055 | } | ||
1735 | 2056 | ||
1736 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 2057 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
1737 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 2058 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
@@ -1741,8 +2062,242 @@ static void __init octeon_irq_init_ciu2(void) | |||
1741 | /* Enable the CIU lines */ | 2062 | /* Enable the CIU lines */ |
1742 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 2063 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1743 | clear_c0_status(STATUSF_IP4); | 2064 | clear_c0_status(STATUSF_IP4); |
2065 | return 0; | ||
2066 | err: | ||
2067 | return r; | ||
2068 | } | ||
2069 | |||
2070 | struct octeon_irq_cib_host_data { | ||
2071 | raw_spinlock_t lock; | ||
2072 | u64 raw_reg; | ||
2073 | u64 en_reg; | ||
2074 | int max_bits; | ||
2075 | }; | ||
2076 | |||
2077 | struct octeon_irq_cib_chip_data { | ||
2078 | struct octeon_irq_cib_host_data *host_data; | ||
2079 | int bit; | ||
2080 | }; | ||
2081 | |||
2082 | static void octeon_irq_cib_enable(struct irq_data *data) | ||
2083 | { | ||
2084 | unsigned long flags; | ||
2085 | u64 en; | ||
2086 | struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); | ||
2087 | struct octeon_irq_cib_host_data *host_data = cd->host_data; | ||
2088 | |||
2089 | raw_spin_lock_irqsave(&host_data->lock, flags); | ||
2090 | en = cvmx_read_csr(host_data->en_reg); | ||
2091 | en |= 1ull << cd->bit; | ||
2092 | cvmx_write_csr(host_data->en_reg, en); | ||
2093 | raw_spin_unlock_irqrestore(&host_data->lock, flags); | ||
2094 | } | ||
2095 | |||
2096 | static void octeon_irq_cib_disable(struct irq_data *data) | ||
2097 | { | ||
2098 | unsigned long flags; | ||
2099 | u64 en; | ||
2100 | struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); | ||
2101 | struct octeon_irq_cib_host_data *host_data = cd->host_data; | ||
2102 | |||
2103 | raw_spin_lock_irqsave(&host_data->lock, flags); | ||
2104 | en = cvmx_read_csr(host_data->en_reg); | ||
2105 | en &= ~(1ull << cd->bit); | ||
2106 | cvmx_write_csr(host_data->en_reg, en); | ||
2107 | raw_spin_unlock_irqrestore(&host_data->lock, flags); | ||
2108 | } | ||
2109 | |||
2110 | static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t) | ||
2111 | { | ||
2112 | irqd_set_trigger_type(data, t); | ||
2113 | return IRQ_SET_MASK_OK; | ||
2114 | } | ||
2115 | |||
2116 | static struct irq_chip octeon_irq_chip_cib = { | ||
2117 | .name = "CIB", | ||
2118 | .irq_enable = octeon_irq_cib_enable, | ||
2119 | .irq_disable = octeon_irq_cib_disable, | ||
2120 | .irq_mask = octeon_irq_cib_disable, | ||
2121 | .irq_unmask = octeon_irq_cib_enable, | ||
2122 | .irq_set_type = octeon_irq_cib_set_type, | ||
2123 | }; | ||
2124 | |||
2125 | static int octeon_irq_cib_xlat(struct irq_domain *d, | ||
2126 | struct device_node *node, | ||
2127 | const u32 *intspec, | ||
2128 | unsigned int intsize, | ||
2129 | unsigned long *out_hwirq, | ||
2130 | unsigned int *out_type) | ||
2131 | { | ||
2132 | unsigned int type = 0; | ||
2133 | |||
2134 | if (intsize == 2) | ||
2135 | type = intspec[1]; | ||
2136 | |||
2137 | switch (type) { | ||
2138 | case 0: /* unofficial value, but we might as well let it work. */ | ||
2139 | case 4: /* official value for level triggering. */ | ||
2140 | *out_type = IRQ_TYPE_LEVEL_HIGH; | ||
2141 | break; | ||
2142 | case 1: /* official value for edge triggering. */ | ||
2143 | *out_type = IRQ_TYPE_EDGE_RISING; | ||
2144 | break; | ||
2145 | default: /* Nothing else is acceptable. */ | ||
2146 | return -EINVAL; | ||
2147 | } | ||
2148 | |||
2149 | *out_hwirq = intspec[0]; | ||
2150 | |||
2151 | return 0; | ||
2152 | } | ||
2153 | |||
2154 | static int octeon_irq_cib_map(struct irq_domain *d, | ||
2155 | unsigned int virq, irq_hw_number_t hw) | ||
2156 | { | ||
2157 | struct octeon_irq_cib_host_data *host_data = d->host_data; | ||
2158 | struct octeon_irq_cib_chip_data *cd; | ||
2159 | |||
2160 | if (hw >= host_data->max_bits) { | ||
2161 | pr_err("ERROR: %s mapping %u is to big!\n", | ||
2162 | d->of_node->name, (unsigned)hw); | ||
2163 | return -EINVAL; | ||
2164 | } | ||
2165 | |||
2166 | cd = kzalloc(sizeof(*cd), GFP_KERNEL); | ||
2167 | cd->host_data = host_data; | ||
2168 | cd->bit = hw; | ||
2169 | |||
2170 | irq_set_chip_and_handler(virq, &octeon_irq_chip_cib, | ||
2171 | handle_simple_irq); | ||
2172 | irq_set_chip_data(virq, cd); | ||
2173 | return 0; | ||
1744 | } | 2174 | } |
1745 | 2175 | ||
2176 | static struct irq_domain_ops octeon_irq_domain_cib_ops = { | ||
2177 | .map = octeon_irq_cib_map, | ||
2178 | .unmap = octeon_irq_free_cd, | ||
2179 | .xlate = octeon_irq_cib_xlat, | ||
2180 | }; | ||
2181 | |||
2182 | /* Chain to real handler. */ | ||
2183 | static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) | ||
2184 | { | ||
2185 | u64 en; | ||
2186 | u64 raw; | ||
2187 | u64 bits; | ||
2188 | int i; | ||
2189 | int irq; | ||
2190 | struct irq_domain *cib_domain = data; | ||
2191 | struct octeon_irq_cib_host_data *host_data = cib_domain->host_data; | ||
2192 | |||
2193 | en = cvmx_read_csr(host_data->en_reg); | ||
2194 | raw = cvmx_read_csr(host_data->raw_reg); | ||
2195 | |||
2196 | bits = en & raw; | ||
2197 | |||
2198 | for (i = 0; i < host_data->max_bits; i++) { | ||
2199 | if ((bits & 1ull << i) == 0) | ||
2200 | continue; | ||
2201 | irq = irq_find_mapping(cib_domain, i); | ||
2202 | if (!irq) { | ||
2203 | unsigned long flags; | ||
2204 | |||
2205 | pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n", | ||
2206 | i, host_data->raw_reg); | ||
2207 | raw_spin_lock_irqsave(&host_data->lock, flags); | ||
2208 | en = cvmx_read_csr(host_data->en_reg); | ||
2209 | en &= ~(1ull << i); | ||
2210 | cvmx_write_csr(host_data->en_reg, en); | ||
2211 | cvmx_write_csr(host_data->raw_reg, 1ull << i); | ||
2212 | raw_spin_unlock_irqrestore(&host_data->lock, flags); | ||
2213 | } else { | ||
2214 | struct irq_desc *desc = irq_to_desc(irq); | ||
2215 | struct irq_data *irq_data = irq_desc_get_irq_data(desc); | ||
2216 | /* If edge, acknowledge the bit we will be sending. */ | ||
2217 | if (irqd_get_trigger_type(irq_data) & | ||
2218 | IRQ_TYPE_EDGE_BOTH) | ||
2219 | cvmx_write_csr(host_data->raw_reg, 1ull << i); | ||
2220 | generic_handle_irq_desc(irq, desc); | ||
2221 | } | ||
2222 | } | ||
2223 | |||
2224 | return IRQ_HANDLED; | ||
2225 | } | ||
2226 | |||
2227 | static int __init octeon_irq_init_cib(struct device_node *ciu_node, | ||
2228 | struct device_node *parent) | ||
2229 | { | ||
2230 | const __be32 *addr; | ||
2231 | u32 val; | ||
2232 | struct octeon_irq_cib_host_data *host_data; | ||
2233 | int parent_irq; | ||
2234 | int r; | ||
2235 | struct irq_domain *cib_domain; | ||
2236 | |||
2237 | parent_irq = irq_of_parse_and_map(ciu_node, 0); | ||
2238 | if (!parent_irq) { | ||
2239 | pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", | ||
2240 | ciu_node->name); | ||
2241 | return -EINVAL; | ||
2242 | } | ||
2243 | |||
2244 | host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); | ||
2245 | raw_spin_lock_init(&host_data->lock); | ||
2246 | |||
2247 | addr = of_get_address(ciu_node, 0, NULL, NULL); | ||
2248 | if (!addr) { | ||
2249 | pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); | ||
2250 | return -EINVAL; | ||
2251 | } | ||
2252 | host_data->raw_reg = (u64)phys_to_virt( | ||
2253 | of_translate_address(ciu_node, addr)); | ||
2254 | |||
2255 | addr = of_get_address(ciu_node, 1, NULL, NULL); | ||
2256 | if (!addr) { | ||
2257 | pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); | ||
2258 | return -EINVAL; | ||
2259 | } | ||
2260 | host_data->en_reg = (u64)phys_to_virt( | ||
2261 | of_translate_address(ciu_node, addr)); | ||
2262 | |||
2263 | r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); | ||
2264 | if (r) { | ||
2265 | pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", | ||
2266 | ciu_node->name); | ||
2267 | return r; | ||
2268 | } | ||
2269 | host_data->max_bits = val; | ||
2270 | |||
2271 | cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits, | ||
2272 | &octeon_irq_domain_cib_ops, | ||
2273 | host_data); | ||
2274 | if (!cib_domain) { | ||
2275 | pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); | ||
2276 | return -ENOMEM; | ||
2277 | } | ||
2278 | |||
2279 | cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */ | ||
2280 | cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */ | ||
2281 | |||
2282 | r = request_irq(parent_irq, octeon_irq_cib_handler, | ||
2283 | IRQF_NO_THREAD, "cib", cib_domain); | ||
2284 | if (r) { | ||
2285 | pr_err("request_irq cib failed %d\n", r); | ||
2286 | return r; | ||
2287 | } | ||
2288 | pr_info("CIB interrupt controller probed: %llx %d\n", | ||
2289 | host_data->raw_reg, host_data->max_bits); | ||
2290 | return 0; | ||
2291 | } | ||
2292 | |||
2293 | static struct of_device_id ciu_types[] __initdata = { | ||
2294 | {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu}, | ||
2295 | {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio}, | ||
2296 | {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2}, | ||
2297 | {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib}, | ||
2298 | {} | ||
2299 | }; | ||
2300 | |||
1746 | void __init arch_init_irq(void) | 2301 | void __init arch_init_irq(void) |
1747 | { | 2302 | { |
1748 | #ifdef CONFIG_SMP | 2303 | #ifdef CONFIG_SMP |
@@ -1750,10 +2305,7 @@ void __init arch_init_irq(void) | |||
1750 | cpumask_clear(irq_default_affinity); | 2305 | cpumask_clear(irq_default_affinity); |
1751 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | 2306 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
1752 | #endif | 2307 | #endif |
1753 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | 2308 | of_irq_init(ciu_types); |
1754 | octeon_irq_init_ciu2(); | ||
1755 | else | ||
1756 | octeon_irq_init_ciu(); | ||
1757 | } | 2309 | } |
1758 | 2310 | ||
1759 | asmlinkage void plat_irq_dispatch(void) | 2311 | asmlinkage void plat_irq_dispatch(void) |
@@ -1767,13 +2319,13 @@ asmlinkage void plat_irq_dispatch(void) | |||
1767 | cop0_cause &= cop0_status; | 2319 | cop0_cause &= cop0_status; |
1768 | cop0_cause &= ST0_IM; | 2320 | cop0_cause &= ST0_IM; |
1769 | 2321 | ||
1770 | if (unlikely(cop0_cause & STATUSF_IP2)) | 2322 | if (cop0_cause & STATUSF_IP2) |
1771 | octeon_irq_ip2(); | 2323 | octeon_irq_ip2(); |
1772 | else if (unlikely(cop0_cause & STATUSF_IP3)) | 2324 | else if (cop0_cause & STATUSF_IP3) |
1773 | octeon_irq_ip3(); | 2325 | octeon_irq_ip3(); |
1774 | else if (unlikely(cop0_cause & STATUSF_IP4)) | 2326 | else if (cop0_cause & STATUSF_IP4) |
1775 | octeon_irq_ip4(); | 2327 | octeon_irq_ip4(); |
1776 | else if (likely(cop0_cause)) | 2328 | else if (cop0_cause) |
1777 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); | 2329 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); |
1778 | else | 2330 | else |
1779 | break; | 2331 | break; |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 94f888d3384e..a42110e7edbc 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <asm/octeon/octeon.h> | 41 | #include <asm/octeon/octeon.h> |
42 | #include <asm/octeon/pci-octeon.h> | 42 | #include <asm/octeon/pci-octeon.h> |
43 | #include <asm/octeon/cvmx-mio-defs.h> | 43 | #include <asm/octeon/cvmx-mio-defs.h> |
44 | #include <asm/octeon/cvmx-rst-defs.h> | ||
44 | 45 | ||
45 | extern struct plat_smp_ops octeon_smp_ops; | 46 | extern struct plat_smp_ops octeon_smp_ops; |
46 | 47 | ||
@@ -579,12 +580,10 @@ void octeon_user_io_init(void) | |||
579 | /* R/W If set, CVMSEG is available for loads/stores in user | 580 | /* R/W If set, CVMSEG is available for loads/stores in user |
580 | * mode. */ | 581 | * mode. */ |
581 | cvmmemctl.s.cvmsegenau = 0; | 582 | cvmmemctl.s.cvmsegenau = 0; |
582 | /* R/W Size of local memory in cache blocks, 54 (6912 bytes) | ||
583 | * is max legal value. */ | ||
584 | cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE; | ||
585 | 583 | ||
586 | write_c0_cvmmemctl(cvmmemctl.u64); | 584 | write_c0_cvmmemctl(cvmmemctl.u64); |
587 | 585 | ||
586 | /* Setup of CVMSEG is done in kernel-entry-init.h */ | ||
588 | if (smp_processor_id() == 0) | 587 | if (smp_processor_id() == 0) |
589 | pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", | 588 | pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", |
590 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, | 589 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, |
@@ -615,6 +614,7 @@ void __init prom_init(void) | |||
615 | const char *arg; | 614 | const char *arg; |
616 | char *p; | 615 | char *p; |
617 | int i; | 616 | int i; |
617 | u64 t; | ||
618 | int argc; | 618 | int argc; |
619 | #ifdef CONFIG_CAVIUM_RESERVE32 | 619 | #ifdef CONFIG_CAVIUM_RESERVE32 |
620 | int64_t addr = -1; | 620 | int64_t addr = -1; |
@@ -654,15 +654,56 @@ void __init prom_init(void) | |||
654 | sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; | 654 | sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; |
655 | sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; | 655 | sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; |
656 | 656 | ||
657 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | 657 | if (OCTEON_IS_OCTEON2()) { |
658 | /* I/O clock runs at a different rate than the CPU. */ | 658 | /* I/O clock runs at a different rate than the CPU. */ |
659 | union cvmx_mio_rst_boot rst_boot; | 659 | union cvmx_mio_rst_boot rst_boot; |
660 | rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); | 660 | rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); |
661 | octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; | 661 | octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; |
662 | } else if (OCTEON_IS_OCTEON3()) { | ||
663 | /* I/O clock runs at a different rate than the CPU. */ | ||
664 | union cvmx_rst_boot rst_boot; | ||
665 | rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); | ||
666 | octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; | ||
662 | } else { | 667 | } else { |
663 | octeon_io_clock_rate = sysinfo->cpu_clock_hz; | 668 | octeon_io_clock_rate = sysinfo->cpu_clock_hz; |
664 | } | 669 | } |
665 | 670 | ||
671 | t = read_c0_cvmctl(); | ||
672 | if ((t & (1ull << 27)) == 0) { | ||
673 | /* | ||
674 | * Setup the multiplier save/restore code if | ||
675 | * CvmCtl[NOMUL] clear. | ||
676 | */ | ||
677 | void *save; | ||
678 | void *save_end; | ||
679 | void *restore; | ||
680 | void *restore_end; | ||
681 | int save_len; | ||
682 | int restore_len; | ||
683 | int save_max = (char *)octeon_mult_save_end - | ||
684 | (char *)octeon_mult_save; | ||
685 | int restore_max = (char *)octeon_mult_restore_end - | ||
686 | (char *)octeon_mult_restore; | ||
687 | if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) { | ||
688 | save = octeon_mult_save3; | ||
689 | save_end = octeon_mult_save3_end; | ||
690 | restore = octeon_mult_restore3; | ||
691 | restore_end = octeon_mult_restore3_end; | ||
692 | } else { | ||
693 | save = octeon_mult_save2; | ||
694 | save_end = octeon_mult_save2_end; | ||
695 | restore = octeon_mult_restore2; | ||
696 | restore_end = octeon_mult_restore2_end; | ||
697 | } | ||
698 | save_len = (char *)save_end - (char *)save; | ||
699 | restore_len = (char *)restore_end - (char *)restore; | ||
700 | if (!WARN_ON(save_len > save_max || | ||
701 | restore_len > restore_max)) { | ||
702 | memcpy(octeon_mult_save, save, save_len); | ||
703 | memcpy(octeon_mult_restore, restore, restore_len); | ||
704 | } | ||
705 | } | ||
706 | |||
666 | /* | 707 | /* |
667 | * Only enable the LED controller if we're running on a CN38XX, CN58XX, | 708 | * Only enable the LED controller if we're running on a CN38XX, CN58XX, |
668 | * or CN56XX. The CN30XX and CN31XX don't have an LED controller. | 709 | * or CN56XX. The CN30XX and CN31XX don't have an LED controller. |
@@ -1004,7 +1045,7 @@ EXPORT_SYMBOL(prom_putchar); | |||
1004 | 1045 | ||
1005 | void prom_free_prom_memory(void) | 1046 | void prom_free_prom_memory(void) |
1006 | { | 1047 | { |
1007 | if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) { | 1048 | if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) { |
1008 | /* Check for presence of Core-14449 fix. */ | 1049 | /* Check for presence of Core-14449 fix. */ |
1009 | u32 insn; | 1050 | u32 insn; |
1010 | u32 *foo; | 1051 | u32 *foo; |
@@ -1026,8 +1067,9 @@ void prom_free_prom_memory(void) | |||
1026 | panic("No PREF instruction at Core-14449 probe point."); | 1067 | panic("No PREF instruction at Core-14449 probe point."); |
1027 | 1068 | ||
1028 | if (((insn >> 16) & 0x1f) != 28) | 1069 | if (((insn >> 16) & 0x1f) != 28) |
1029 | panic("Core-14449 WAR not in place (%04x).\n" | 1070 | panic("OCTEON II DCache prefetch workaround not in place (%04x).\n" |
1030 | "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn); | 1071 | "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", |
1072 | insn); | ||
1031 | } | 1073 | } |
1032 | } | 1074 | } |
1033 | 1075 | ||