diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-05-14 06:06:36 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-05-14 06:06:36 -0400 |
commit | a18f22a968de17b29f2310cdb7ba69163e65ec15 (patch) | |
tree | a7d56d88fad5e444d7661484109758a2f436129e /arch/mips/cavium-octeon | |
parent | a1c57e0fec53defe745e64417eacdbd3618c3e66 (diff) | |
parent | 798778b8653f64b7b2162ac70eca10367cff6ce8 (diff) |
Merge branch 'consolidate-clksrc-i8253' of master.kernel.org:~rmk/linux-2.6-arm into timers/clocksource
Conflicts:
arch/ia64/kernel/cyclone.c
arch/mips/kernel/i8253.c
arch/x86/kernel/i8253.c
Reason: Resolve conflicts so further cleanups do not conflict further
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/mips/cavium-octeon')
-rw-r--r-- | arch/mips/cavium-octeon/executive/octeon-model.c | 2 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/octeon-irq.c | 1389 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/octeon-platform.c | 2 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/setup.c | 14 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/smp.c | 39 |
5 files changed, 796 insertions, 650 deletions
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c index 9afc3794ed1b..c8d35684504e 100644 --- a/arch/mips/cavium-octeon/executive/octeon-model.c +++ b/arch/mips/cavium-octeon/executive/octeon-model.c | |||
@@ -75,7 +75,7 @@ const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer) | |||
75 | 75 | ||
76 | num_cores = cvmx_octeon_num_cores(); | 76 | num_cores = cvmx_octeon_num_cores(); |
77 | 77 | ||
78 | /* Make sure the non existant devices look disabled */ | 78 | /* Make sure the non existent devices look disabled */ |
79 | switch ((chip_id >> 8) & 0xff) { | 79 | switch ((chip_id >> 8) & 0xff) { |
80 | case 6: /* CN50XX */ | 80 | case 6: /* CN50XX */ |
81 | case 2: /* CN30XX */ | 81 | case 2: /* CN30XX */ |
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index ce7500cdf5b7..ffd4ae660f79 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -3,10 +3,13 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks | 6 | * Copyright (C) 2004-2008, 2009, 2010, 2011 Cavium Networks |
7 | */ | 7 | */ |
8 | #include <linux/irq.h> | 8 | |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/bitops.h> | ||
11 | #include <linux/percpu.h> | ||
12 | #include <linux/irq.h> | ||
10 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
11 | 14 | ||
12 | #include <asm/octeon/octeon.h> | 15 | #include <asm/octeon/octeon.h> |
@@ -14,6 +17,47 @@ | |||
14 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); | 17 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); |
15 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); | 18 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); |
16 | 19 | ||
20 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); | ||
21 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); | ||
22 | |||
23 | static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; | ||
24 | |||
25 | union octeon_ciu_chip_data { | ||
26 | void *p; | ||
27 | unsigned long l; | ||
28 | struct { | ||
29 | unsigned int line:6; | ||
30 | unsigned int bit:6; | ||
31 | } s; | ||
32 | }; | ||
33 | |||
34 | struct octeon_core_chip_data { | ||
35 | struct mutex core_irq_mutex; | ||
36 | bool current_en; | ||
37 | bool desired_en; | ||
38 | u8 bit; | ||
39 | }; | ||
40 | |||
41 | #define MIPS_CORE_IRQ_LINES 8 | ||
42 | |||
43 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; | ||
44 | |||
45 | static void __init octeon_irq_set_ciu_mapping(int irq, int line, int bit, | ||
46 | struct irq_chip *chip, | ||
47 | irq_flow_handler_t handler) | ||
48 | { | ||
49 | union octeon_ciu_chip_data cd; | ||
50 | |||
51 | irq_set_chip_and_handler(irq, chip, handler); | ||
52 | |||
53 | cd.l = 0; | ||
54 | cd.s.line = line; | ||
55 | cd.s.bit = bit; | ||
56 | |||
57 | irq_set_chip_data(irq, cd.p); | ||
58 | octeon_irq_ciu_to_irq[line][bit] = irq; | ||
59 | } | ||
60 | |||
17 | static int octeon_coreid_for_cpu(int cpu) | 61 | static int octeon_coreid_for_cpu(int cpu) |
18 | { | 62 | { |
19 | #ifdef CONFIG_SMP | 63 | #ifdef CONFIG_SMP |
@@ -23,9 +67,20 @@ static int octeon_coreid_for_cpu(int cpu) | |||
23 | #endif | 67 | #endif |
24 | } | 68 | } |
25 | 69 | ||
26 | static void octeon_irq_core_ack(unsigned int irq) | 70 | static int octeon_cpu_for_coreid(int coreid) |
71 | { | ||
72 | #ifdef CONFIG_SMP | ||
73 | return cpu_number_map(coreid); | ||
74 | #else | ||
75 | return smp_processor_id(); | ||
76 | #endif | ||
77 | } | ||
78 | |||
79 | static void octeon_irq_core_ack(struct irq_data *data) | ||
27 | { | 80 | { |
28 | unsigned int bit = irq - OCTEON_IRQ_SW0; | 81 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
82 | unsigned int bit = cd->bit; | ||
83 | |||
29 | /* | 84 | /* |
30 | * We don't need to disable IRQs to make these atomic since | 85 | * We don't need to disable IRQs to make these atomic since |
31 | * they are already disabled earlier in the low level | 86 | * they are already disabled earlier in the low level |
@@ -37,131 +92,121 @@ static void octeon_irq_core_ack(unsigned int irq) | |||
37 | clear_c0_cause(0x100 << bit); | 92 | clear_c0_cause(0x100 << bit); |
38 | } | 93 | } |
39 | 94 | ||
40 | static void octeon_irq_core_eoi(unsigned int irq) | 95 | static void octeon_irq_core_eoi(struct irq_data *data) |
41 | { | 96 | { |
42 | struct irq_desc *desc = irq_to_desc(irq); | 97 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
43 | unsigned int bit = irq - OCTEON_IRQ_SW0; | 98 | |
44 | /* | ||
45 | * If an IRQ is being processed while we are disabling it the | ||
46 | * handler will attempt to unmask the interrupt after it has | ||
47 | * been disabled. | ||
48 | */ | ||
49 | if ((unlikely(desc->status & IRQ_DISABLED))) | ||
50 | return; | ||
51 | /* | 99 | /* |
52 | * We don't need to disable IRQs to make these atomic since | 100 | * We don't need to disable IRQs to make these atomic since |
53 | * they are already disabled earlier in the low level | 101 | * they are already disabled earlier in the low level |
54 | * interrupt code. | 102 | * interrupt code. |
55 | */ | 103 | */ |
56 | set_c0_status(0x100 << bit); | 104 | set_c0_status(0x100 << cd->bit); |
57 | } | 105 | } |
58 | 106 | ||
59 | static void octeon_irq_core_enable(unsigned int irq) | 107 | static void octeon_irq_core_set_enable_local(void *arg) |
60 | { | 108 | { |
61 | unsigned long flags; | 109 | struct irq_data *data = arg; |
62 | unsigned int bit = irq - OCTEON_IRQ_SW0; | 110 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
111 | unsigned int mask = 0x100 << cd->bit; | ||
63 | 112 | ||
64 | /* | 113 | /* |
65 | * We need to disable interrupts to make sure our updates are | 114 | * Interrupts are already disabled, so these are atomic. |
66 | * atomic. | ||
67 | */ | 115 | */ |
68 | local_irq_save(flags); | 116 | if (cd->desired_en) |
69 | set_c0_status(0x100 << bit); | 117 | set_c0_status(mask); |
70 | local_irq_restore(flags); | 118 | else |
119 | clear_c0_status(mask); | ||
120 | |||
71 | } | 121 | } |
72 | 122 | ||
73 | static void octeon_irq_core_disable_local(unsigned int irq) | 123 | static void octeon_irq_core_disable(struct irq_data *data) |
74 | { | 124 | { |
75 | unsigned long flags; | 125 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
76 | unsigned int bit = irq - OCTEON_IRQ_SW0; | 126 | cd->desired_en = false; |
77 | /* | ||
78 | * We need to disable interrupts to make sure our updates are | ||
79 | * atomic. | ||
80 | */ | ||
81 | local_irq_save(flags); | ||
82 | clear_c0_status(0x100 << bit); | ||
83 | local_irq_restore(flags); | ||
84 | } | 127 | } |
85 | 128 | ||
86 | static void octeon_irq_core_disable(unsigned int irq) | 129 | static void octeon_irq_core_enable(struct irq_data *data) |
87 | { | 130 | { |
88 | #ifdef CONFIG_SMP | 131 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
89 | on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local, | 132 | cd->desired_en = true; |
90 | (void *) (long) irq, 1); | ||
91 | #else | ||
92 | octeon_irq_core_disable_local(irq); | ||
93 | #endif | ||
94 | } | 133 | } |
95 | 134 | ||
96 | static struct irq_chip octeon_irq_chip_core = { | 135 | static void octeon_irq_core_bus_lock(struct irq_data *data) |
97 | .name = "Core", | 136 | { |
98 | .enable = octeon_irq_core_enable, | 137 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
99 | .disable = octeon_irq_core_disable, | ||
100 | .ack = octeon_irq_core_ack, | ||
101 | .eoi = octeon_irq_core_eoi, | ||
102 | }; | ||
103 | 138 | ||
139 | mutex_lock(&cd->core_irq_mutex); | ||
140 | } | ||
104 | 141 | ||
105 | static void octeon_irq_ciu0_ack(unsigned int irq) | 142 | static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) |
106 | { | 143 | { |
107 | switch (irq) { | 144 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
108 | case OCTEON_IRQ_GMX_DRP0: | 145 | |
109 | case OCTEON_IRQ_GMX_DRP1: | 146 | if (cd->desired_en != cd->current_en) { |
110 | case OCTEON_IRQ_IPD_DRP: | 147 | on_each_cpu(octeon_irq_core_set_enable_local, data, 1); |
111 | case OCTEON_IRQ_KEY_ZERO: | 148 | |
112 | case OCTEON_IRQ_TIMER0: | 149 | cd->current_en = cd->desired_en; |
113 | case OCTEON_IRQ_TIMER1: | ||
114 | case OCTEON_IRQ_TIMER2: | ||
115 | case OCTEON_IRQ_TIMER3: | ||
116 | { | ||
117 | int index = cvmx_get_core_num() * 2; | ||
118 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | ||
119 | /* | ||
120 | * CIU timer type interrupts must be acknoleged by | ||
121 | * writing a '1' bit to their sum0 bit. | ||
122 | */ | ||
123 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | ||
124 | break; | ||
125 | } | ||
126 | default: | ||
127 | break; | ||
128 | } | 150 | } |
129 | 151 | ||
130 | /* | 152 | mutex_unlock(&cd->core_irq_mutex); |
131 | * In order to avoid any locking accessing the CIU, we | ||
132 | * acknowledge CIU interrupts by disabling all of them. This | ||
133 | * way we can use a per core register and avoid any out of | ||
134 | * core locking requirements. This has the side affect that | ||
135 | * CIU interrupts can't be processed recursively. | ||
136 | * | ||
137 | * We don't need to disable IRQs to make these atomic since | ||
138 | * they are already disabled earlier in the low level | ||
139 | * interrupt code. | ||
140 | */ | ||
141 | clear_c0_status(0x100 << 2); | ||
142 | } | 153 | } |
143 | 154 | ||
144 | static void octeon_irq_ciu0_eoi(unsigned int irq) | 155 | static struct irq_chip octeon_irq_chip_core = { |
156 | .name = "Core", | ||
157 | .irq_enable = octeon_irq_core_enable, | ||
158 | .irq_disable = octeon_irq_core_disable, | ||
159 | .irq_ack = octeon_irq_core_ack, | ||
160 | .irq_eoi = octeon_irq_core_eoi, | ||
161 | .irq_bus_lock = octeon_irq_core_bus_lock, | ||
162 | .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, | ||
163 | |||
164 | .irq_cpu_online = octeon_irq_core_eoi, | ||
165 | .irq_cpu_offline = octeon_irq_core_ack, | ||
166 | .flags = IRQCHIP_ONOFFLINE_ENABLED, | ||
167 | }; | ||
168 | |||
169 | static void __init octeon_irq_init_core(void) | ||
145 | { | 170 | { |
146 | /* | 171 | int i; |
147 | * Enable all CIU interrupts again. We don't need to disable | 172 | int irq; |
148 | * IRQs to make these atomic since they are already disabled | 173 | struct octeon_core_chip_data *cd; |
149 | * earlier in the low level interrupt code. | 174 | |
150 | */ | 175 | for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { |
151 | set_c0_status(0x100 << 2); | 176 | cd = &octeon_irq_core_chip_data[i]; |
177 | cd->current_en = false; | ||
178 | cd->desired_en = false; | ||
179 | cd->bit = i; | ||
180 | mutex_init(&cd->core_irq_mutex); | ||
181 | |||
182 | irq = OCTEON_IRQ_SW0 + i; | ||
183 | switch (irq) { | ||
184 | case OCTEON_IRQ_TIMER: | ||
185 | case OCTEON_IRQ_SW0: | ||
186 | case OCTEON_IRQ_SW1: | ||
187 | case OCTEON_IRQ_5: | ||
188 | case OCTEON_IRQ_PERF: | ||
189 | irq_set_chip_data(irq, cd); | ||
190 | irq_set_chip_and_handler(irq, &octeon_irq_chip_core, | ||
191 | handle_percpu_irq); | ||
192 | break; | ||
193 | default: | ||
194 | break; | ||
195 | } | ||
196 | } | ||
152 | } | 197 | } |
153 | 198 | ||
154 | static int next_coreid_for_irq(struct irq_desc *desc) | 199 | static int next_cpu_for_irq(struct irq_data *data) |
155 | { | 200 | { |
156 | 201 | ||
157 | #ifdef CONFIG_SMP | 202 | #ifdef CONFIG_SMP |
158 | int coreid; | 203 | int cpu; |
159 | int weight = cpumask_weight(desc->affinity); | 204 | int weight = cpumask_weight(data->affinity); |
160 | 205 | ||
161 | if (weight > 1) { | 206 | if (weight > 1) { |
162 | int cpu = smp_processor_id(); | 207 | cpu = smp_processor_id(); |
163 | for (;;) { | 208 | for (;;) { |
164 | cpu = cpumask_next(cpu, desc->affinity); | 209 | cpu = cpumask_next(cpu, data->affinity); |
165 | if (cpu >= nr_cpu_ids) { | 210 | if (cpu >= nr_cpu_ids) { |
166 | cpu = -1; | 211 | cpu = -1; |
167 | continue; | 212 | continue; |
@@ -169,83 +214,175 @@ static int next_coreid_for_irq(struct irq_desc *desc) | |||
169 | break; | 214 | break; |
170 | } | 215 | } |
171 | } | 216 | } |
172 | coreid = octeon_coreid_for_cpu(cpu); | ||
173 | } else if (weight == 1) { | 217 | } else if (weight == 1) { |
174 | coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity)); | 218 | cpu = cpumask_first(data->affinity); |
175 | } else { | 219 | } else { |
176 | coreid = cvmx_get_core_num(); | 220 | cpu = smp_processor_id(); |
177 | } | 221 | } |
178 | return coreid; | 222 | return cpu; |
179 | #else | 223 | #else |
180 | return cvmx_get_core_num(); | 224 | return smp_processor_id(); |
181 | #endif | 225 | #endif |
182 | } | 226 | } |
183 | 227 | ||
184 | static void octeon_irq_ciu0_enable(unsigned int irq) | 228 | static void octeon_irq_ciu_enable(struct irq_data *data) |
185 | { | 229 | { |
186 | struct irq_desc *desc = irq_to_desc(irq); | 230 | int cpu = next_cpu_for_irq(data); |
187 | int coreid = next_coreid_for_irq(desc); | 231 | int coreid = octeon_coreid_for_cpu(cpu); |
232 | unsigned long *pen; | ||
188 | unsigned long flags; | 233 | unsigned long flags; |
189 | uint64_t en0; | 234 | union octeon_ciu_chip_data cd; |
190 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 235 | |
236 | cd.p = irq_data_get_irq_chip_data(data); | ||
191 | 237 | ||
192 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | 238 | if (cd.s.line == 0) { |
193 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 239 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
194 | en0 |= 1ull << bit; | 240 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
195 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 241 | set_bit(cd.s.bit, pen); |
196 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 242 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
197 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | 243 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
244 | } else { | ||
245 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
246 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | ||
247 | set_bit(cd.s.bit, pen); | ||
248 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | ||
249 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
250 | } | ||
198 | } | 251 | } |
199 | 252 | ||
200 | static void octeon_irq_ciu0_enable_mbox(unsigned int irq) | 253 | static void octeon_irq_ciu_enable_local(struct irq_data *data) |
201 | { | 254 | { |
202 | int coreid = cvmx_get_core_num(); | 255 | unsigned long *pen; |
256 | unsigned long flags; | ||
257 | union octeon_ciu_chip_data cd; | ||
258 | |||
259 | cd.p = irq_data_get_irq_chip_data(data); | ||
260 | |||
261 | if (cd.s.line == 0) { | ||
262 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | ||
263 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); | ||
264 | set_bit(cd.s.bit, pen); | ||
265 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | ||
266 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | ||
267 | } else { | ||
268 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
269 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); | ||
270 | set_bit(cd.s.bit, pen); | ||
271 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); | ||
272 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
273 | } | ||
274 | } | ||
275 | |||
276 | static void octeon_irq_ciu_disable_local(struct irq_data *data) | ||
277 | { | ||
278 | unsigned long *pen; | ||
203 | unsigned long flags; | 279 | unsigned long flags; |
204 | uint64_t en0; | 280 | union octeon_ciu_chip_data cd; |
205 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 281 | |
282 | cd.p = irq_data_get_irq_chip_data(data); | ||
206 | 283 | ||
207 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | 284 | if (cd.s.line == 0) { |
208 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 285 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
209 | en0 |= 1ull << bit; | 286 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); |
210 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 287 | clear_bit(cd.s.bit, pen); |
211 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 288 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
212 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | 289 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
290 | } else { | ||
291 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
292 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); | ||
293 | clear_bit(cd.s.bit, pen); | ||
294 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); | ||
295 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
296 | } | ||
213 | } | 297 | } |
214 | 298 | ||
215 | static void octeon_irq_ciu0_disable(unsigned int irq) | 299 | static void octeon_irq_ciu_disable_all(struct irq_data *data) |
216 | { | 300 | { |
217 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | ||
218 | unsigned long flags; | 301 | unsigned long flags; |
219 | uint64_t en0; | 302 | unsigned long *pen; |
220 | int cpu; | 303 | int cpu; |
221 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | 304 | union octeon_ciu_chip_data cd; |
222 | for_each_online_cpu(cpu) { | 305 | |
223 | int coreid = octeon_coreid_for_cpu(cpu); | 306 | wmb(); /* Make sure flag changes arrive before register updates. */ |
224 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 307 | |
225 | en0 &= ~(1ull << bit); | 308 | cd.p = irq_data_get_irq_chip_data(data); |
226 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 309 | |
310 | if (cd.s.line == 0) { | ||
311 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | ||
312 | for_each_online_cpu(cpu) { | ||
313 | int coreid = octeon_coreid_for_cpu(cpu); | ||
314 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | ||
315 | clear_bit(cd.s.bit, pen); | ||
316 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | ||
317 | } | ||
318 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | ||
319 | } else { | ||
320 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
321 | for_each_online_cpu(cpu) { | ||
322 | int coreid = octeon_coreid_for_cpu(cpu); | ||
323 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | ||
324 | clear_bit(cd.s.bit, pen); | ||
325 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | ||
326 | } | ||
327 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
328 | } | ||
329 | } | ||
330 | |||
331 | static void octeon_irq_ciu_enable_all(struct irq_data *data) | ||
332 | { | ||
333 | unsigned long flags; | ||
334 | unsigned long *pen; | ||
335 | int cpu; | ||
336 | union octeon_ciu_chip_data cd; | ||
337 | |||
338 | cd.p = irq_data_get_irq_chip_data(data); | ||
339 | |||
340 | if (cd.s.line == 0) { | ||
341 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | ||
342 | for_each_online_cpu(cpu) { | ||
343 | int coreid = octeon_coreid_for_cpu(cpu); | ||
344 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | ||
345 | set_bit(cd.s.bit, pen); | ||
346 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | ||
347 | } | ||
348 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | ||
349 | } else { | ||
350 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
351 | for_each_online_cpu(cpu) { | ||
352 | int coreid = octeon_coreid_for_cpu(cpu); | ||
353 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | ||
354 | set_bit(cd.s.bit, pen); | ||
355 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | ||
356 | } | ||
357 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
227 | } | 358 | } |
228 | /* | ||
229 | * We need to do a read after the last update to make sure all | ||
230 | * of them are done. | ||
231 | */ | ||
232 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); | ||
233 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | ||
234 | } | 359 | } |
235 | 360 | ||
236 | /* | 361 | /* |
237 | * Enable the irq on the next core in the affinity set for chips that | 362 | * Enable the irq on the next core in the affinity set for chips that |
238 | * have the EN*_W1{S,C} registers. | 363 | * have the EN*_W1{S,C} registers. |
239 | */ | 364 | */ |
240 | static void octeon_irq_ciu0_enable_v2(unsigned int irq) | 365 | static void octeon_irq_ciu_enable_v2(struct irq_data *data) |
241 | { | 366 | { |
242 | int index; | 367 | u64 mask; |
243 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 368 | int cpu = next_cpu_for_irq(data); |
244 | struct irq_desc *desc = irq_to_desc(irq); | 369 | union octeon_ciu_chip_data cd; |
370 | |||
371 | cd.p = irq_data_get_irq_chip_data(data); | ||
372 | mask = 1ull << (cd.s.bit); | ||
245 | 373 | ||
246 | if ((desc->status & IRQ_DISABLED) == 0) { | 374 | /* |
247 | index = next_coreid_for_irq(desc) * 2; | 375 | * Called under the desc lock, so these should never get out |
376 | * of sync. | ||
377 | */ | ||
378 | if (cd.s.line == 0) { | ||
379 | int index = octeon_coreid_for_cpu(cpu) * 2; | ||
380 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | ||
248 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 381 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
382 | } else { | ||
383 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
384 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | ||
385 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
249 | } | 386 | } |
250 | } | 387 | } |
251 | 388 | ||
@@ -253,83 +390,155 @@ static void octeon_irq_ciu0_enable_v2(unsigned int irq) | |||
253 | * Enable the irq on the current CPU for chips that | 390 | * Enable the irq on the current CPU for chips that |
254 | * have the EN*_W1{S,C} registers. | 391 | * have the EN*_W1{S,C} registers. |
255 | */ | 392 | */ |
256 | static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq) | 393 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) |
394 | { | ||
395 | u64 mask; | ||
396 | union octeon_ciu_chip_data cd; | ||
397 | |||
398 | cd.p = irq_data_get_irq_chip_data(data); | ||
399 | mask = 1ull << (cd.s.bit); | ||
400 | |||
401 | if (cd.s.line == 0) { | ||
402 | int index = cvmx_get_core_num() * 2; | ||
403 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); | ||
404 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
405 | } else { | ||
406 | int index = cvmx_get_core_num() * 2 + 1; | ||
407 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); | ||
408 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
409 | } | ||
410 | } | ||
411 | |||
412 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) | ||
257 | { | 413 | { |
258 | int index; | 414 | u64 mask; |
259 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 415 | union octeon_ciu_chip_data cd; |
260 | 416 | ||
261 | index = cvmx_get_core_num() * 2; | 417 | cd.p = irq_data_get_irq_chip_data(data); |
262 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 418 | mask = 1ull << (cd.s.bit); |
419 | |||
420 | if (cd.s.line == 0) { | ||
421 | int index = cvmx_get_core_num() * 2; | ||
422 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); | ||
423 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | ||
424 | } else { | ||
425 | int index = cvmx_get_core_num() * 2 + 1; | ||
426 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); | ||
427 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
428 | } | ||
263 | } | 429 | } |
264 | 430 | ||
265 | /* | 431 | /* |
266 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} | 432 | * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. |
267 | * registers. | ||
268 | */ | 433 | */ |
269 | static void octeon_irq_ciu0_ack_v2(unsigned int irq) | 434 | static void octeon_irq_ciu_ack(struct irq_data *data) |
270 | { | 435 | { |
271 | int index = cvmx_get_core_num() * 2; | 436 | u64 mask; |
272 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 437 | union octeon_ciu_chip_data cd; |
273 | 438 | ||
274 | switch (irq) { | 439 | cd.p = data->chip_data; |
275 | case OCTEON_IRQ_GMX_DRP0: | 440 | mask = 1ull << (cd.s.bit); |
276 | case OCTEON_IRQ_GMX_DRP1: | 441 | |
277 | case OCTEON_IRQ_IPD_DRP: | 442 | if (cd.s.line == 0) { |
278 | case OCTEON_IRQ_KEY_ZERO: | 443 | int index = cvmx_get_core_num() * 2; |
279 | case OCTEON_IRQ_TIMER0: | ||
280 | case OCTEON_IRQ_TIMER1: | ||
281 | case OCTEON_IRQ_TIMER2: | ||
282 | case OCTEON_IRQ_TIMER3: | ||
283 | /* | ||
284 | * CIU timer type interrupts must be acknoleged by | ||
285 | * writing a '1' bit to their sum0 bit. | ||
286 | */ | ||
287 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | 444 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); |
288 | break; | 445 | } else { |
289 | default: | 446 | cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); |
290 | break; | ||
291 | } | 447 | } |
292 | |||
293 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | ||
294 | } | 448 | } |
295 | 449 | ||
296 | /* | 450 | /* |
297 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} | 451 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} |
298 | * registers. | 452 | * registers. |
299 | */ | 453 | */ |
300 | static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq) | 454 | static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) |
301 | { | 455 | { |
302 | struct irq_desc *desc = irq_to_desc(irq); | 456 | int cpu; |
303 | int index = cvmx_get_core_num() * 2; | 457 | u64 mask; |
304 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 458 | union octeon_ciu_chip_data cd; |
305 | 459 | ||
306 | if (likely((desc->status & IRQ_DISABLED) == 0)) | 460 | wmb(); /* Make sure flag changes arrive before register updates. */ |
307 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 461 | |
462 | cd.p = data->chip_data; | ||
463 | mask = 1ull << (cd.s.bit); | ||
464 | |||
465 | if (cd.s.line == 0) { | ||
466 | for_each_online_cpu(cpu) { | ||
467 | int index = octeon_coreid_for_cpu(cpu) * 2; | ||
468 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | ||
469 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | ||
470 | } | ||
471 | } else { | ||
472 | for_each_online_cpu(cpu) { | ||
473 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
474 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | ||
475 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
476 | } | ||
477 | } | ||
308 | } | 478 | } |
309 | 479 | ||
310 | /* | 480 | /* |
311 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} | 481 | * Enable the irq on the all cores for chips that have the EN*_W1{S,C} |
312 | * registers. | 482 | * registers. |
313 | */ | 483 | */ |
314 | static void octeon_irq_ciu0_disable_all_v2(unsigned int irq) | 484 | static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) |
315 | { | 485 | { |
316 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | ||
317 | int index; | ||
318 | int cpu; | 486 | int cpu; |
319 | for_each_online_cpu(cpu) { | 487 | u64 mask; |
320 | index = octeon_coreid_for_cpu(cpu) * 2; | 488 | union octeon_ciu_chip_data cd; |
321 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 489 | |
490 | cd.p = data->chip_data; | ||
491 | mask = 1ull << (cd.s.bit); | ||
492 | |||
493 | if (cd.s.line == 0) { | ||
494 | for_each_online_cpu(cpu) { | ||
495 | int index = octeon_coreid_for_cpu(cpu) * 2; | ||
496 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | ||
497 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
498 | } | ||
499 | } else { | ||
500 | for_each_online_cpu(cpu) { | ||
501 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
502 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | ||
503 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
504 | } | ||
322 | } | 505 | } |
323 | } | 506 | } |
324 | 507 | ||
325 | #ifdef CONFIG_SMP | 508 | #ifdef CONFIG_SMP |
326 | static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) | 509 | |
510 | static void octeon_irq_cpu_offline_ciu(struct irq_data *data) | ||
511 | { | ||
512 | int cpu = smp_processor_id(); | ||
513 | cpumask_t new_affinity; | ||
514 | |||
515 | if (!cpumask_test_cpu(cpu, data->affinity)) | ||
516 | return; | ||
517 | |||
518 | if (cpumask_weight(data->affinity) > 1) { | ||
519 | /* | ||
520 | * It has multi CPU affinity, just remove this CPU | ||
521 | * from the affinity set. | ||
522 | */ | ||
523 | cpumask_copy(&new_affinity, data->affinity); | ||
524 | cpumask_clear_cpu(cpu, &new_affinity); | ||
525 | } else { | ||
526 | /* Otherwise, put it on lowest numbered online CPU. */ | ||
527 | cpumask_clear(&new_affinity); | ||
528 | cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); | ||
529 | } | ||
530 | __irq_set_affinity_locked(data, &new_affinity); | ||
531 | } | ||
532 | |||
533 | static int octeon_irq_ciu_set_affinity(struct irq_data *data, | ||
534 | const struct cpumask *dest, bool force) | ||
327 | { | 535 | { |
328 | int cpu; | 536 | int cpu; |
329 | struct irq_desc *desc = irq_to_desc(irq); | 537 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
330 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
331 | unsigned long flags; | 538 | unsigned long flags; |
332 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 539 | union octeon_ciu_chip_data cd; |
540 | |||
541 | cd.p = data->chip_data; | ||
333 | 542 | ||
334 | /* | 543 | /* |
335 | * For non-v2 CIU, we will allow only single CPU affinity. | 544 | * For non-v2 CIU, we will allow only single CPU affinity. |
@@ -339,26 +548,40 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * | |||
339 | if (cpumask_weight(dest) != 1) | 548 | if (cpumask_weight(dest) != 1) |
340 | return -EINVAL; | 549 | return -EINVAL; |
341 | 550 | ||
342 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | 551 | if (!enable_one) |
343 | for_each_online_cpu(cpu) { | 552 | return 0; |
344 | int coreid = octeon_coreid_for_cpu(cpu); | 553 | |
345 | uint64_t en0 = | 554 | if (cd.s.line == 0) { |
346 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 555 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
347 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 556 | for_each_online_cpu(cpu) { |
348 | enable_one = 0; | 557 | int coreid = octeon_coreid_for_cpu(cpu); |
349 | en0 |= 1ull << bit; | 558 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
350 | } else { | 559 | |
351 | en0 &= ~(1ull << bit); | 560 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
561 | enable_one = false; | ||
562 | set_bit(cd.s.bit, pen); | ||
563 | } else { | ||
564 | clear_bit(cd.s.bit, pen); | ||
565 | } | ||
566 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | ||
352 | } | 567 | } |
353 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 568 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
569 | } else { | ||
570 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
571 | for_each_online_cpu(cpu) { | ||
572 | int coreid = octeon_coreid_for_cpu(cpu); | ||
573 | unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | ||
574 | |||
575 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | ||
576 | enable_one = false; | ||
577 | set_bit(cd.s.bit, pen); | ||
578 | } else { | ||
579 | clear_bit(cd.s.bit, pen); | ||
580 | } | ||
581 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | ||
582 | } | ||
583 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
354 | } | 584 | } |
355 | /* | ||
356 | * We need to do a read after the last update to make sure all | ||
357 | * of them are done. | ||
358 | */ | ||
359 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); | ||
360 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | ||
361 | |||
362 | return 0; | 585 | return 0; |
363 | } | 586 | } |
364 | 587 | ||
@@ -366,22 +589,46 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * | |||
366 | * Set affinity for the irq for chips that have the EN*_W1{S,C} | 589 | * Set affinity for the irq for chips that have the EN*_W1{S,C} |
367 | * registers. | 590 | * registers. |
368 | */ | 591 | */ |
369 | static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq, | 592 | static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, |
370 | const struct cpumask *dest) | 593 | const struct cpumask *dest, |
594 | bool force) | ||
371 | { | 595 | { |
372 | int cpu; | 596 | int cpu; |
373 | int index; | 597 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
374 | struct irq_desc *desc = irq_to_desc(irq); | 598 | u64 mask; |
375 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | 599 | union octeon_ciu_chip_data cd; |
376 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 600 | |
377 | 601 | if (!enable_one) | |
378 | for_each_online_cpu(cpu) { | 602 | return 0; |
379 | index = octeon_coreid_for_cpu(cpu) * 2; | 603 | |
380 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 604 | cd.p = data->chip_data; |
381 | enable_one = 0; | 605 | mask = 1ull << cd.s.bit; |
382 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 606 | |
383 | } else { | 607 | if (cd.s.line == 0) { |
384 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 608 | for_each_online_cpu(cpu) { |
609 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | ||
610 | int index = octeon_coreid_for_cpu(cpu) * 2; | ||
611 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | ||
612 | enable_one = false; | ||
613 | set_bit(cd.s.bit, pen); | ||
614 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
615 | } else { | ||
616 | clear_bit(cd.s.bit, pen); | ||
617 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | ||
618 | } | ||
619 | } | ||
620 | } else { | ||
621 | for_each_online_cpu(cpu) { | ||
622 | unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | ||
623 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
624 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | ||
625 | enable_one = false; | ||
626 | set_bit(cd.s.bit, pen); | ||
627 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
628 | } else { | ||
629 | clear_bit(cd.s.bit, pen); | ||
630 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
631 | } | ||
385 | } | 632 | } |
386 | } | 633 | } |
387 | return 0; | 634 | return 0; |
@@ -389,80 +636,102 @@ static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq, | |||
389 | #endif | 636 | #endif |
390 | 637 | ||
391 | /* | 638 | /* |
639 | * The v1 CIU code already masks things, so supply a dummy version to | ||
640 | * the core chip code. | ||
641 | */ | ||
642 | static void octeon_irq_dummy_mask(struct irq_data *data) | ||
643 | { | ||
644 | } | ||
645 | |||
646 | /* | ||
392 | * Newer octeon chips have support for lockless CIU operation. | 647 | * Newer octeon chips have support for lockless CIU operation. |
393 | */ | 648 | */ |
394 | static struct irq_chip octeon_irq_chip_ciu0_v2 = { | 649 | static struct irq_chip octeon_irq_chip_ciu_v2 = { |
395 | .name = "CIU0", | 650 | .name = "CIU", |
396 | .enable = octeon_irq_ciu0_enable_v2, | 651 | .irq_enable = octeon_irq_ciu_enable_v2, |
397 | .disable = octeon_irq_ciu0_disable_all_v2, | 652 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
398 | .eoi = octeon_irq_ciu0_enable_v2, | 653 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
654 | .irq_unmask = octeon_irq_ciu_enable_v2, | ||
399 | #ifdef CONFIG_SMP | 655 | #ifdef CONFIG_SMP |
400 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, | 656 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, |
657 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
401 | #endif | 658 | #endif |
402 | }; | 659 | }; |
403 | 660 | ||
404 | static struct irq_chip octeon_irq_chip_ciu0 = { | 661 | static struct irq_chip octeon_irq_chip_ciu_edge_v2 = { |
405 | .name = "CIU0", | 662 | .name = "CIU-E", |
406 | .enable = octeon_irq_ciu0_enable, | 663 | .irq_enable = octeon_irq_ciu_enable_v2, |
407 | .disable = octeon_irq_ciu0_disable, | 664 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
408 | .eoi = octeon_irq_ciu0_eoi, | 665 | .irq_ack = octeon_irq_ciu_ack, |
666 | .irq_mask = octeon_irq_ciu_disable_local_v2, | ||
667 | .irq_unmask = octeon_irq_ciu_enable_v2, | ||
409 | #ifdef CONFIG_SMP | 668 | #ifdef CONFIG_SMP |
410 | .set_affinity = octeon_irq_ciu0_set_affinity, | 669 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, |
670 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
411 | #endif | 671 | #endif |
412 | }; | 672 | }; |
413 | 673 | ||
414 | /* The mbox versions don't do any affinity or round-robin. */ | 674 | static struct irq_chip octeon_irq_chip_ciu = { |
415 | static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = { | 675 | .name = "CIU", |
416 | .name = "CIU0-M", | 676 | .irq_enable = octeon_irq_ciu_enable, |
417 | .enable = octeon_irq_ciu0_enable_mbox_v2, | 677 | .irq_disable = octeon_irq_ciu_disable_all, |
418 | .disable = octeon_irq_ciu0_disable, | 678 | .irq_mask = octeon_irq_dummy_mask, |
419 | .eoi = octeon_irq_ciu0_eoi_mbox_v2, | 679 | #ifdef CONFIG_SMP |
680 | .irq_set_affinity = octeon_irq_ciu_set_affinity, | ||
681 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
682 | #endif | ||
420 | }; | 683 | }; |
421 | 684 | ||
422 | static struct irq_chip octeon_irq_chip_ciu0_mbox = { | 685 | static struct irq_chip octeon_irq_chip_ciu_edge = { |
423 | .name = "CIU0-M", | 686 | .name = "CIU-E", |
424 | .enable = octeon_irq_ciu0_enable_mbox, | 687 | .irq_enable = octeon_irq_ciu_enable, |
425 | .disable = octeon_irq_ciu0_disable, | 688 | .irq_disable = octeon_irq_ciu_disable_all, |
426 | .eoi = octeon_irq_ciu0_eoi, | 689 | .irq_mask = octeon_irq_dummy_mask, |
690 | .irq_ack = octeon_irq_ciu_ack, | ||
691 | #ifdef CONFIG_SMP | ||
692 | .irq_set_affinity = octeon_irq_ciu_set_affinity, | ||
693 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
694 | #endif | ||
427 | }; | 695 | }; |
428 | 696 | ||
429 | static void octeon_irq_ciu1_ack(unsigned int irq) | 697 | /* The mbox versions don't do any affinity or round-robin. */ |
430 | { | 698 | static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { |
431 | /* | 699 | .name = "CIU-M", |
432 | * In order to avoid any locking accessing the CIU, we | 700 | .irq_enable = octeon_irq_ciu_enable_all_v2, |
433 | * acknowledge CIU interrupts by disabling all of them. This | 701 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
434 | * way we can use a per core register and avoid any out of | 702 | .irq_ack = octeon_irq_ciu_disable_local_v2, |
435 | * core locking requirements. This has the side affect that | 703 | .irq_eoi = octeon_irq_ciu_enable_local_v2, |
436 | * CIU interrupts can't be processed recursively. We don't | 704 | |
437 | * need to disable IRQs to make these atomic since they are | 705 | .irq_cpu_online = octeon_irq_ciu_enable_local_v2, |
438 | * already disabled earlier in the low level interrupt code. | 706 | .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, |
439 | */ | 707 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
440 | clear_c0_status(0x100 << 3); | 708 | }; |
441 | } | ||
442 | 709 | ||
443 | static void octeon_irq_ciu1_eoi(unsigned int irq) | 710 | static struct irq_chip octeon_irq_chip_ciu_mbox = { |
444 | { | 711 | .name = "CIU-M", |
445 | /* | 712 | .irq_enable = octeon_irq_ciu_enable_all, |
446 | * Enable all CIU interrupts again. We don't need to disable | 713 | .irq_disable = octeon_irq_ciu_disable_all, |
447 | * IRQs to make these atomic since they are already disabled | 714 | |
448 | * earlier in the low level interrupt code. | 715 | .irq_cpu_online = octeon_irq_ciu_enable_local, |
449 | */ | 716 | .irq_cpu_offline = octeon_irq_ciu_disable_local, |
450 | set_c0_status(0x100 << 3); | 717 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
451 | } | 718 | }; |
452 | 719 | ||
453 | static void octeon_irq_ciu1_enable(unsigned int irq) | 720 | /* |
721 | * Watchdog interrupts are special. They are associated with a single | ||
722 | * core, so we hardwire the affinity to that core. | ||
723 | */ | ||
724 | static void octeon_irq_ciu_wd_enable(struct irq_data *data) | ||
454 | { | 725 | { |
455 | struct irq_desc *desc = irq_to_desc(irq); | ||
456 | int coreid = next_coreid_for_irq(desc); | ||
457 | unsigned long flags; | 726 | unsigned long flags; |
458 | uint64_t en1; | 727 | unsigned long *pen; |
459 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 728 | int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
729 | int cpu = octeon_cpu_for_coreid(coreid); | ||
460 | 730 | ||
461 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | 731 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
462 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 732 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
463 | en1 |= 1ull << bit; | 733 | set_bit(coreid, pen); |
464 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | 734 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
465 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
466 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | 735 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
467 | } | 736 | } |
468 | 737 | ||
@@ -470,286 +739,281 @@ static void octeon_irq_ciu1_enable(unsigned int irq) | |||
470 | * Watchdog interrupts are special. They are associated with a single | 739 | * Watchdog interrupts are special. They are associated with a single |
471 | * core, so we hardwire the affinity to that core. | 740 | * core, so we hardwire the affinity to that core. |
472 | */ | 741 | */ |
473 | static void octeon_irq_ciu1_wd_enable(unsigned int irq) | 742 | static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) |
474 | { | 743 | { |
475 | unsigned long flags; | 744 | int coreid = data->irq - OCTEON_IRQ_WDOG0; |
476 | uint64_t en1; | 745 | int cpu = octeon_cpu_for_coreid(coreid); |
477 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | ||
478 | int coreid = bit; | ||
479 | 746 | ||
480 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | 747 | set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
481 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 748 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); |
482 | en1 |= 1ull << bit; | ||
483 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | ||
484 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
485 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
486 | } | 749 | } |
487 | 750 | ||
488 | static void octeon_irq_ciu1_disable(unsigned int irq) | 751 | |
752 | static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { | ||
753 | .name = "CIU-W", | ||
754 | .irq_enable = octeon_irq_ciu1_wd_enable_v2, | ||
755 | .irq_disable = octeon_irq_ciu_disable_all_v2, | ||
756 | .irq_mask = octeon_irq_ciu_disable_local_v2, | ||
757 | .irq_unmask = octeon_irq_ciu_enable_local_v2, | ||
758 | }; | ||
759 | |||
760 | static struct irq_chip octeon_irq_chip_ciu_wd = { | ||
761 | .name = "CIU-W", | ||
762 | .irq_enable = octeon_irq_ciu_wd_enable, | ||
763 | .irq_disable = octeon_irq_ciu_disable_all, | ||
764 | .irq_mask = octeon_irq_dummy_mask, | ||
765 | }; | ||
766 | |||
767 | static void octeon_irq_ip2_v1(void) | ||
489 | { | 768 | { |
490 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 769 | const unsigned long core_id = cvmx_get_core_num(); |
491 | unsigned long flags; | 770 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); |
492 | uint64_t en1; | 771 | |
493 | int cpu; | 772 | ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); |
494 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | 773 | clear_c0_status(STATUSF_IP2); |
495 | for_each_online_cpu(cpu) { | 774 | if (likely(ciu_sum)) { |
496 | int coreid = octeon_coreid_for_cpu(cpu); | 775 | int bit = fls64(ciu_sum) - 1; |
497 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 776 | int irq = octeon_irq_ciu_to_irq[0][bit]; |
498 | en1 &= ~(1ull << bit); | 777 | if (likely(irq)) |
499 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | 778 | do_IRQ(irq); |
779 | else | ||
780 | spurious_interrupt(); | ||
781 | } else { | ||
782 | spurious_interrupt(); | ||
500 | } | 783 | } |
501 | /* | 784 | set_c0_status(STATUSF_IP2); |
502 | * We need to do a read after the last update to make sure all | ||
503 | * of them are done. | ||
504 | */ | ||
505 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); | ||
506 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
507 | } | 785 | } |
508 | 786 | ||
509 | /* | 787 | static void octeon_irq_ip2_v2(void) |
510 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} | ||
511 | * registers. | ||
512 | */ | ||
513 | static void octeon_irq_ciu1_enable_v2(unsigned int irq) | ||
514 | { | 788 | { |
515 | int index; | 789 | const unsigned long core_id = cvmx_get_core_num(); |
516 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 790 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); |
517 | struct irq_desc *desc = irq_to_desc(irq); | 791 | |
518 | 792 | ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); | |
519 | if ((desc->status & IRQ_DISABLED) == 0) { | 793 | if (likely(ciu_sum)) { |
520 | index = next_coreid_for_irq(desc) * 2 + 1; | 794 | int bit = fls64(ciu_sum) - 1; |
521 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 795 | int irq = octeon_irq_ciu_to_irq[0][bit]; |
796 | if (likely(irq)) | ||
797 | do_IRQ(irq); | ||
798 | else | ||
799 | spurious_interrupt(); | ||
800 | } else { | ||
801 | spurious_interrupt(); | ||
522 | } | 802 | } |
523 | } | 803 | } |
524 | 804 | static void octeon_irq_ip3_v1(void) | |
525 | /* | ||
526 | * Watchdog interrupts are special. They are associated with a single | ||
527 | * core, so we hardwire the affinity to that core. | ||
528 | */ | ||
529 | static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq) | ||
530 | { | 805 | { |
531 | int index; | 806 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); |
532 | int coreid = irq - OCTEON_IRQ_WDOG0; | 807 | |
533 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 808 | ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); |
534 | struct irq_desc *desc = irq_to_desc(irq); | 809 | clear_c0_status(STATUSF_IP3); |
535 | 810 | if (likely(ciu_sum)) { | |
536 | if ((desc->status & IRQ_DISABLED) == 0) { | 811 | int bit = fls64(ciu_sum) - 1; |
537 | index = coreid * 2 + 1; | 812 | int irq = octeon_irq_ciu_to_irq[1][bit]; |
538 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 813 | if (likely(irq)) |
814 | do_IRQ(irq); | ||
815 | else | ||
816 | spurious_interrupt(); | ||
817 | } else { | ||
818 | spurious_interrupt(); | ||
539 | } | 819 | } |
820 | set_c0_status(STATUSF_IP3); | ||
540 | } | 821 | } |
541 | 822 | ||
542 | /* | 823 | static void octeon_irq_ip3_v2(void) |
543 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} | ||
544 | * registers. | ||
545 | */ | ||
546 | static void octeon_irq_ciu1_ack_v2(unsigned int irq) | ||
547 | { | 824 | { |
548 | int index = cvmx_get_core_num() * 2 + 1; | 825 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); |
549 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 826 | |
550 | 827 | ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); | |
551 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 828 | if (likely(ciu_sum)) { |
829 | int bit = fls64(ciu_sum) - 1; | ||
830 | int irq = octeon_irq_ciu_to_irq[1][bit]; | ||
831 | if (likely(irq)) | ||
832 | do_IRQ(irq); | ||
833 | else | ||
834 | spurious_interrupt(); | ||
835 | } else { | ||
836 | spurious_interrupt(); | ||
837 | } | ||
552 | } | 838 | } |
553 | 839 | ||
554 | /* | 840 | static void octeon_irq_ip4_mask(void) |
555 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} | ||
556 | * registers. | ||
557 | */ | ||
558 | static void octeon_irq_ciu1_disable_all_v2(unsigned int irq) | ||
559 | { | 841 | { |
560 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 842 | clear_c0_status(STATUSF_IP4); |
561 | int index; | 843 | spurious_interrupt(); |
562 | int cpu; | ||
563 | for_each_online_cpu(cpu) { | ||
564 | index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
565 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
566 | } | ||
567 | } | 844 | } |
568 | 845 | ||
569 | #ifdef CONFIG_SMP | 846 | static void (*octeon_irq_ip2)(void); |
570 | static int octeon_irq_ciu1_set_affinity(unsigned int irq, | 847 | static void (*octeon_irq_ip3)(void); |
571 | const struct cpumask *dest) | 848 | static void (*octeon_irq_ip4)(void); |
572 | { | ||
573 | int cpu; | ||
574 | struct irq_desc *desc = irq_to_desc(irq); | ||
575 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
576 | unsigned long flags; | ||
577 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | ||
578 | 849 | ||
579 | /* | 850 | void __cpuinitdata (*octeon_irq_setup_secondary)(void); |
580 | * For non-v2 CIU, we will allow only single CPU affinity. | ||
581 | * This removes the need to do locking in the .ack/.eoi | ||
582 | * functions. | ||
583 | */ | ||
584 | if (cpumask_weight(dest) != 1) | ||
585 | return -EINVAL; | ||
586 | 851 | ||
587 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | 852 | static void __cpuinit octeon_irq_percpu_enable(void) |
588 | for_each_online_cpu(cpu) { | 853 | { |
589 | int coreid = octeon_coreid_for_cpu(cpu); | 854 | irq_cpu_online(); |
590 | uint64_t en1 = | 855 | } |
591 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 856 | |
592 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 857 | static void __cpuinit octeon_irq_init_ciu_percpu(void) |
593 | enable_one = 0; | 858 | { |
594 | en1 |= 1ull << bit; | 859 | int coreid = cvmx_get_core_num(); |
595 | } else { | ||
596 | en1 &= ~(1ull << bit); | ||
597 | } | ||
598 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | ||
599 | } | ||
600 | /* | 860 | /* |
601 | * We need to do a read after the last update to make sure all | 861 | * Disable All CIU Interrupts. The ones we need will be |
602 | * of them are done. | 862 | * enabled later. Read the SUM register so we know the write |
863 | * completed. | ||
603 | */ | 864 | */ |
604 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); | 865 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); |
605 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | 866 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); |
606 | 867 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); | |
607 | return 0; | 868 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); |
869 | cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); | ||
608 | } | 870 | } |
609 | 871 | ||
610 | /* | 872 | static void __cpuinit octeon_irq_setup_secondary_ciu(void) |
611 | * Set affinity for the irq for chips that have the EN*_W1{S,C} | ||
612 | * registers. | ||
613 | */ | ||
614 | static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq, | ||
615 | const struct cpumask *dest) | ||
616 | { | 873 | { |
617 | int cpu; | ||
618 | int index; | ||
619 | struct irq_desc *desc = irq_to_desc(irq); | ||
620 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
621 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | ||
622 | for_each_online_cpu(cpu) { | ||
623 | index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
624 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | ||
625 | enable_one = 0; | ||
626 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
627 | } else { | ||
628 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
629 | } | ||
630 | } | ||
631 | return 0; | ||
632 | } | ||
633 | #endif | ||
634 | 874 | ||
635 | /* | 875 | __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; |
636 | * Newer octeon chips have support for lockless CIU operation. | 876 | __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; |
637 | */ | ||
638 | static struct irq_chip octeon_irq_chip_ciu1_v2 = { | ||
639 | .name = "CIU1", | ||
640 | .enable = octeon_irq_ciu1_enable_v2, | ||
641 | .disable = octeon_irq_ciu1_disable_all_v2, | ||
642 | .eoi = octeon_irq_ciu1_enable_v2, | ||
643 | #ifdef CONFIG_SMP | ||
644 | .set_affinity = octeon_irq_ciu1_set_affinity_v2, | ||
645 | #endif | ||
646 | }; | ||
647 | 877 | ||
648 | static struct irq_chip octeon_irq_chip_ciu1 = { | 878 | octeon_irq_init_ciu_percpu(); |
649 | .name = "CIU1", | 879 | octeon_irq_percpu_enable(); |
650 | .enable = octeon_irq_ciu1_enable, | ||
651 | .disable = octeon_irq_ciu1_disable, | ||
652 | .eoi = octeon_irq_ciu1_eoi, | ||
653 | #ifdef CONFIG_SMP | ||
654 | .set_affinity = octeon_irq_ciu1_set_affinity, | ||
655 | #endif | ||
656 | }; | ||
657 | 880 | ||
658 | static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = { | 881 | /* Enable the CIU lines */ |
659 | .name = "CIU1-W", | 882 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
660 | .enable = octeon_irq_ciu1_wd_enable_v2, | 883 | clear_c0_status(STATUSF_IP4); |
661 | .disable = octeon_irq_ciu1_disable_all_v2, | 884 | } |
662 | .eoi = octeon_irq_ciu1_wd_enable_v2, | ||
663 | }; | ||
664 | 885 | ||
665 | static struct irq_chip octeon_irq_chip_ciu1_wd = { | 886 | static void __init octeon_irq_init_ciu(void) |
666 | .name = "CIU1-W", | 887 | { |
667 | .enable = octeon_irq_ciu1_wd_enable, | 888 | unsigned int i; |
668 | .disable = octeon_irq_ciu1_disable, | 889 | struct irq_chip *chip; |
669 | .eoi = octeon_irq_ciu1_eoi, | 890 | struct irq_chip *chip_edge; |
670 | }; | 891 | struct irq_chip *chip_mbox; |
892 | struct irq_chip *chip_wd; | ||
893 | |||
894 | octeon_irq_init_ciu_percpu(); | ||
895 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; | ||
671 | 896 | ||
672 | static void (*octeon_ciu0_ack)(unsigned int); | 897 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || |
673 | static void (*octeon_ciu1_ack)(unsigned int); | 898 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || |
899 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || | ||
900 | OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | ||
901 | octeon_irq_ip2 = octeon_irq_ip2_v2; | ||
902 | octeon_irq_ip3 = octeon_irq_ip3_v2; | ||
903 | chip = &octeon_irq_chip_ciu_v2; | ||
904 | chip_edge = &octeon_irq_chip_ciu_edge_v2; | ||
905 | chip_mbox = &octeon_irq_chip_ciu_mbox_v2; | ||
906 | chip_wd = &octeon_irq_chip_ciu_wd_v2; | ||
907 | } else { | ||
908 | octeon_irq_ip2 = octeon_irq_ip2_v1; | ||
909 | octeon_irq_ip3 = octeon_irq_ip3_v1; | ||
910 | chip = &octeon_irq_chip_ciu; | ||
911 | chip_edge = &octeon_irq_chip_ciu_edge; | ||
912 | chip_mbox = &octeon_irq_chip_ciu_mbox; | ||
913 | chip_wd = &octeon_irq_chip_ciu_wd; | ||
914 | } | ||
915 | octeon_irq_ip4 = octeon_irq_ip4_mask; | ||
916 | |||
917 | /* Mips internal */ | ||
918 | octeon_irq_init_core(); | ||
919 | |||
920 | /* CIU_0 */ | ||
921 | for (i = 0; i < 16; i++) | ||
922 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq); | ||
923 | for (i = 0; i < 16; i++) | ||
924 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GPIO0, 0, i + 16, chip, handle_level_irq); | ||
925 | |||
926 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq); | ||
927 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq); | ||
928 | |||
929 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART0, 0, 34, chip, handle_level_irq); | ||
930 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART1, 0, 35, chip, handle_level_irq); | ||
931 | |||
932 | for (i = 0; i < 4; i++) | ||
933 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq); | ||
934 | for (i = 0; i < 4; i++) | ||
935 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq); | ||
936 | |||
937 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI, 0, 45, chip, handle_level_irq); | ||
938 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq); | ||
939 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_TRACE0, 0, 47, chip, handle_level_irq); | ||
940 | |||
941 | for (i = 0; i < 2; i++) | ||
942 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GMX_DRP0, 0, i + 48, chip_edge, handle_edge_irq); | ||
943 | |||
944 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD_DRP, 0, 50, chip_edge, handle_edge_irq); | ||
945 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY_ZERO, 0, 51, chip_edge, handle_edge_irq); | ||
946 | |||
947 | for (i = 0; i < 4; i++) | ||
948 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip_edge, handle_edge_irq); | ||
949 | |||
950 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq); | ||
951 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_PCM, 0, 57, chip, handle_level_irq); | ||
952 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MPI, 0, 58, chip, handle_level_irq); | ||
953 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI2, 0, 59, chip, handle_level_irq); | ||
954 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_POWIQ, 0, 60, chip, handle_level_irq); | ||
955 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPDPPTHR, 0, 61, chip, handle_level_irq); | ||
956 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII0, 0, 62, chip, handle_level_irq); | ||
957 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq); | ||
958 | |||
959 | /* CIU_1 */ | ||
960 | for (i = 0; i < 16; i++) | ||
961 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq); | ||
962 | |||
963 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART2, 1, 16, chip, handle_level_irq); | ||
964 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq); | ||
965 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII1, 1, 18, chip, handle_level_irq); | ||
966 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_NAND, 1, 19, chip, handle_level_irq); | ||
967 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MIO, 1, 20, chip, handle_level_irq); | ||
968 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_IOB, 1, 21, chip, handle_level_irq); | ||
969 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_FPA, 1, 22, chip, handle_level_irq); | ||
970 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_POW, 1, 23, chip, handle_level_irq); | ||
971 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_L2C, 1, 24, chip, handle_level_irq); | ||
972 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD, 1, 25, chip, handle_level_irq); | ||
973 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_PIP, 1, 26, chip, handle_level_irq); | ||
974 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_PKO, 1, 27, chip, handle_level_irq); | ||
975 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_ZIP, 1, 28, chip, handle_level_irq); | ||
976 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_TIM, 1, 29, chip, handle_level_irq); | ||
977 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_RAD, 1, 30, chip, handle_level_irq); | ||
978 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY, 1, 31, chip, handle_level_irq); | ||
979 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFA, 1, 32, chip, handle_level_irq); | ||
980 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_USBCTL, 1, 33, chip, handle_level_irq); | ||
981 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_SLI, 1, 34, chip, handle_level_irq); | ||
982 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_DPI, 1, 35, chip, handle_level_irq); | ||
983 | |||
984 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGX0, 1, 36, chip, handle_level_irq); | ||
985 | |||
986 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGL, 1, 46, chip, handle_level_irq); | ||
987 | |||
988 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_PTP, 1, 47, chip_edge, handle_edge_irq); | ||
989 | |||
990 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM0, 1, 48, chip, handle_level_irq); | ||
991 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM1, 1, 49, chip, handle_level_irq); | ||
992 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO0, 1, 50, chip, handle_level_irq); | ||
993 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO1, 1, 51, chip, handle_level_irq); | ||
994 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_LMC0, 1, 52, chip, handle_level_irq); | ||
995 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFM, 1, 56, chip, handle_level_irq); | ||
996 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_RST, 1, 63, chip, handle_level_irq); | ||
997 | |||
998 | /* Enable the CIU lines */ | ||
999 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | ||
1000 | clear_c0_status(STATUSF_IP4); | ||
1001 | } | ||
674 | 1002 | ||
675 | void __init arch_init_irq(void) | 1003 | void __init arch_init_irq(void) |
676 | { | 1004 | { |
677 | unsigned int irq; | ||
678 | struct irq_chip *chip0; | ||
679 | struct irq_chip *chip0_mbox; | ||
680 | struct irq_chip *chip1; | ||
681 | struct irq_chip *chip1_wd; | ||
682 | |||
683 | #ifdef CONFIG_SMP | 1005 | #ifdef CONFIG_SMP |
684 | /* Set the default affinity to the boot cpu. */ | 1006 | /* Set the default affinity to the boot cpu. */ |
685 | cpumask_clear(irq_default_affinity); | 1007 | cpumask_clear(irq_default_affinity); |
686 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | 1008 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
687 | #endif | 1009 | #endif |
688 | 1010 | octeon_irq_init_ciu(); | |
689 | if (NR_IRQS < OCTEON_IRQ_LAST) | ||
690 | pr_err("octeon_irq_init: NR_IRQS is set too low\n"); | ||
691 | |||
692 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || | ||
693 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || | ||
694 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { | ||
695 | octeon_ciu0_ack = octeon_irq_ciu0_ack_v2; | ||
696 | octeon_ciu1_ack = octeon_irq_ciu1_ack_v2; | ||
697 | chip0 = &octeon_irq_chip_ciu0_v2; | ||
698 | chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2; | ||
699 | chip1 = &octeon_irq_chip_ciu1_v2; | ||
700 | chip1_wd = &octeon_irq_chip_ciu1_wd_v2; | ||
701 | } else { | ||
702 | octeon_ciu0_ack = octeon_irq_ciu0_ack; | ||
703 | octeon_ciu1_ack = octeon_irq_ciu1_ack; | ||
704 | chip0 = &octeon_irq_chip_ciu0; | ||
705 | chip0_mbox = &octeon_irq_chip_ciu0_mbox; | ||
706 | chip1 = &octeon_irq_chip_ciu1; | ||
707 | chip1_wd = &octeon_irq_chip_ciu1_wd; | ||
708 | } | ||
709 | |||
710 | /* 0 - 15 reserved for i8259 master and slave controller. */ | ||
711 | |||
712 | /* 17 - 23 Mips internal */ | ||
713 | for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) { | ||
714 | set_irq_chip_and_handler(irq, &octeon_irq_chip_core, | ||
715 | handle_percpu_irq); | ||
716 | } | ||
717 | |||
718 | /* 24 - 87 CIU_INT_SUM0 */ | ||
719 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { | ||
720 | switch (irq) { | ||
721 | case OCTEON_IRQ_MBOX0: | ||
722 | case OCTEON_IRQ_MBOX1: | ||
723 | set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq); | ||
724 | break; | ||
725 | default: | ||
726 | set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq); | ||
727 | break; | ||
728 | } | ||
729 | } | ||
730 | |||
731 | /* 88 - 151 CIU_INT_SUM1 */ | ||
732 | for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++) | ||
733 | set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq); | ||
734 | |||
735 | for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++) | ||
736 | set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq); | ||
737 | |||
738 | set_c0_status(0x300 << 2); | ||
739 | } | 1011 | } |
740 | 1012 | ||
741 | asmlinkage void plat_irq_dispatch(void) | 1013 | asmlinkage void plat_irq_dispatch(void) |
742 | { | 1014 | { |
743 | const unsigned long core_id = cvmx_get_core_num(); | ||
744 | const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2); | ||
745 | const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2); | ||
746 | const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1; | ||
747 | const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1); | ||
748 | unsigned long cop0_cause; | 1015 | unsigned long cop0_cause; |
749 | unsigned long cop0_status; | 1016 | unsigned long cop0_status; |
750 | uint64_t ciu_en; | ||
751 | uint64_t ciu_sum; | ||
752 | unsigned int irq; | ||
753 | 1017 | ||
754 | while (1) { | 1018 | while (1) { |
755 | cop0_cause = read_c0_cause(); | 1019 | cop0_cause = read_c0_cause(); |
@@ -757,33 +1021,16 @@ asmlinkage void plat_irq_dispatch(void) | |||
757 | cop0_cause &= cop0_status; | 1021 | cop0_cause &= cop0_status; |
758 | cop0_cause &= ST0_IM; | 1022 | cop0_cause &= ST0_IM; |
759 | 1023 | ||
760 | if (unlikely(cop0_cause & STATUSF_IP2)) { | 1024 | if (unlikely(cop0_cause & STATUSF_IP2)) |
761 | ciu_sum = cvmx_read_csr(ciu_sum0_address); | 1025 | octeon_irq_ip2(); |
762 | ciu_en = cvmx_read_csr(ciu_en0_address); | 1026 | else if (unlikely(cop0_cause & STATUSF_IP3)) |
763 | ciu_sum &= ciu_en; | 1027 | octeon_irq_ip3(); |
764 | if (likely(ciu_sum)) { | 1028 | else if (unlikely(cop0_cause & STATUSF_IP4)) |
765 | irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1; | 1029 | octeon_irq_ip4(); |
766 | octeon_ciu0_ack(irq); | 1030 | else if (likely(cop0_cause)) |
767 | do_IRQ(irq); | ||
768 | } else { | ||
769 | spurious_interrupt(); | ||
770 | } | ||
771 | } else if (unlikely(cop0_cause & STATUSF_IP3)) { | ||
772 | ciu_sum = cvmx_read_csr(ciu_sum1_address); | ||
773 | ciu_en = cvmx_read_csr(ciu_en1_address); | ||
774 | ciu_sum &= ciu_en; | ||
775 | if (likely(ciu_sum)) { | ||
776 | irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1; | ||
777 | octeon_ciu1_ack(irq); | ||
778 | do_IRQ(irq); | ||
779 | } else { | ||
780 | spurious_interrupt(); | ||
781 | } | ||
782 | } else if (likely(cop0_cause)) { | ||
783 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); | 1031 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); |
784 | } else { | 1032 | else |
785 | break; | 1033 | break; |
786 | } | ||
787 | } | 1034 | } |
788 | } | 1035 | } |
789 | 1036 | ||
@@ -791,83 +1038,7 @@ asmlinkage void plat_irq_dispatch(void) | |||
791 | 1038 | ||
792 | void fixup_irqs(void) | 1039 | void fixup_irqs(void) |
793 | { | 1040 | { |
794 | int irq; | 1041 | irq_cpu_offline(); |
795 | struct irq_desc *desc; | ||
796 | cpumask_t new_affinity; | ||
797 | unsigned long flags; | ||
798 | int do_set_affinity; | ||
799 | int cpu; | ||
800 | |||
801 | cpu = smp_processor_id(); | ||
802 | |||
803 | for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) | ||
804 | octeon_irq_core_disable_local(irq); | ||
805 | |||
806 | for (irq = OCTEON_IRQ_WORKQ0; irq < OCTEON_IRQ_LAST; irq++) { | ||
807 | desc = irq_to_desc(irq); | ||
808 | switch (irq) { | ||
809 | case OCTEON_IRQ_MBOX0: | ||
810 | case OCTEON_IRQ_MBOX1: | ||
811 | /* The eoi function will disable them on this CPU. */ | ||
812 | desc->chip->eoi(irq); | ||
813 | break; | ||
814 | case OCTEON_IRQ_WDOG0: | ||
815 | case OCTEON_IRQ_WDOG1: | ||
816 | case OCTEON_IRQ_WDOG2: | ||
817 | case OCTEON_IRQ_WDOG3: | ||
818 | case OCTEON_IRQ_WDOG4: | ||
819 | case OCTEON_IRQ_WDOG5: | ||
820 | case OCTEON_IRQ_WDOG6: | ||
821 | case OCTEON_IRQ_WDOG7: | ||
822 | case OCTEON_IRQ_WDOG8: | ||
823 | case OCTEON_IRQ_WDOG9: | ||
824 | case OCTEON_IRQ_WDOG10: | ||
825 | case OCTEON_IRQ_WDOG11: | ||
826 | case OCTEON_IRQ_WDOG12: | ||
827 | case OCTEON_IRQ_WDOG13: | ||
828 | case OCTEON_IRQ_WDOG14: | ||
829 | case OCTEON_IRQ_WDOG15: | ||
830 | /* | ||
831 | * These have special per CPU semantics and | ||
832 | * are handled in the watchdog driver. | ||
833 | */ | ||
834 | break; | ||
835 | default: | ||
836 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
837 | /* | ||
838 | * If this irq has an action, it is in use and | ||
839 | * must be migrated if it has affinity to this | ||
840 | * cpu. | ||
841 | */ | ||
842 | if (desc->action && cpumask_test_cpu(cpu, desc->affinity)) { | ||
843 | if (cpumask_weight(desc->affinity) > 1) { | ||
844 | /* | ||
845 | * It has multi CPU affinity, | ||
846 | * just remove this CPU from | ||
847 | * the affinity set. | ||
848 | */ | ||
849 | cpumask_copy(&new_affinity, desc->affinity); | ||
850 | cpumask_clear_cpu(cpu, &new_affinity); | ||
851 | } else { | ||
852 | /* | ||
853 | * Otherwise, put it on lowest | ||
854 | * numbered online CPU. | ||
855 | */ | ||
856 | cpumask_clear(&new_affinity); | ||
857 | cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); | ||
858 | } | ||
859 | do_set_affinity = 1; | ||
860 | } else { | ||
861 | do_set_affinity = 0; | ||
862 | } | ||
863 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
864 | |||
865 | if (do_set_affinity) | ||
866 | irq_set_affinity(irq, &new_affinity); | ||
867 | |||
868 | break; | ||
869 | } | ||
870 | } | ||
871 | } | 1042 | } |
872 | 1043 | ||
873 | #endif /* CONFIG_HOTPLUG_CPU */ | 1044 | #endif /* CONFIG_HOTPLUG_CPU */ |
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index cecaf62aef32..cd61d7281d91 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c | |||
@@ -75,7 +75,7 @@ static int __init octeon_cf_device_init(void) | |||
75 | * zero. | 75 | * zero. |
76 | */ | 76 | */ |
77 | 77 | ||
78 | /* Asume that CS1 immediately follows. */ | 78 | /* Assume that CS1 immediately follows. */ |
79 | mio_boot_reg_cfg.u64 = | 79 | mio_boot_reg_cfg.u64 = |
80 | cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1)); | 80 | cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1)); |
81 | region_base = mio_boot_reg_cfg.s.base << 16; | 81 | region_base = mio_boot_reg_cfg.s.base << 16; |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index b0c3686c96dd..0707fae3f0ee 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -420,7 +420,6 @@ void octeon_user_io_init(void) | |||
420 | void __init prom_init(void) | 420 | void __init prom_init(void) |
421 | { | 421 | { |
422 | struct cvmx_sysinfo *sysinfo; | 422 | struct cvmx_sysinfo *sysinfo; |
423 | const int coreid = cvmx_get_core_num(); | ||
424 | int i; | 423 | int i; |
425 | int argc; | 424 | int argc; |
426 | #ifdef CONFIG_CAVIUM_RESERVE32 | 425 | #ifdef CONFIG_CAVIUM_RESERVE32 |
@@ -537,17 +536,6 @@ void __init prom_init(void) | |||
537 | 536 | ||
538 | octeon_uart = octeon_get_boot_uart(); | 537 | octeon_uart = octeon_get_boot_uart(); |
539 | 538 | ||
540 | /* | ||
541 | * Disable All CIU Interrupts. The ones we need will be | ||
542 | * enabled later. Read the SUM register so we know the write | ||
543 | * completed. | ||
544 | */ | ||
545 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); | ||
546 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); | ||
547 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); | ||
548 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); | ||
549 | cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); | ||
550 | |||
551 | #ifdef CONFIG_SMP | 539 | #ifdef CONFIG_SMP |
552 | octeon_write_lcd("LinuxSMP"); | 540 | octeon_write_lcd("LinuxSMP"); |
553 | #else | 541 | #else |
@@ -674,7 +662,7 @@ void __init plat_mem_setup(void) | |||
674 | * some memory vectors. When SPARSEMEM is in use, it doesn't | 662 | * some memory vectors. When SPARSEMEM is in use, it doesn't |
675 | * verify that the size is big enough for the final | 663 | * verify that the size is big enough for the final |
676 | * vectors. Making the smallest chuck 4MB seems to be enough | 664 | * vectors. Making the smallest chuck 4MB seems to be enough |
677 | * to consistantly work. | 665 | * to consistently work. |
678 | */ | 666 | */ |
679 | mem_alloc_size = 4 << 20; | 667 | mem_alloc_size = 4 << 20; |
680 | if (mem_alloc_size > MAX_MEMORY) | 668 | if (mem_alloc_size > MAX_MEMORY) |
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 391cefe556b3..ba78b21cc8d0 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c | |||
@@ -171,41 +171,19 @@ static void octeon_boot_secondary(int cpu, struct task_struct *idle) | |||
171 | * After we've done initial boot, this function is called to allow the | 171 | * After we've done initial boot, this function is called to allow the |
172 | * board code to clean up state, if needed | 172 | * board code to clean up state, if needed |
173 | */ | 173 | */ |
174 | static void octeon_init_secondary(void) | 174 | static void __cpuinit octeon_init_secondary(void) |
175 | { | 175 | { |
176 | const int coreid = cvmx_get_core_num(); | ||
177 | union cvmx_ciu_intx_sum0 interrupt_enable; | ||
178 | unsigned int sr; | 176 | unsigned int sr; |
179 | 177 | ||
180 | #ifdef CONFIG_HOTPLUG_CPU | ||
181 | struct linux_app_boot_info *labi; | ||
182 | |||
183 | labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); | ||
184 | |||
185 | if (labi->labi_signature != LABI_SIGNATURE) | ||
186 | panic("The bootloader version on this board is incorrect."); | ||
187 | #endif | ||
188 | |||
189 | sr = set_c0_status(ST0_BEV); | 178 | sr = set_c0_status(ST0_BEV); |
190 | write_c0_ebase((u32)ebase); | 179 | write_c0_ebase((u32)ebase); |
191 | write_c0_status(sr); | 180 | write_c0_status(sr); |
192 | 181 | ||
193 | octeon_check_cpu_bist(); | 182 | octeon_check_cpu_bist(); |
194 | octeon_init_cvmcount(); | 183 | octeon_init_cvmcount(); |
195 | /* | 184 | |
196 | pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid); | 185 | octeon_irq_setup_secondary(); |
197 | */ | 186 | raw_local_irq_enable(); |
198 | /* Enable Mailbox interrupts to this core. These are the only | ||
199 | interrupts allowed on line 3 */ | ||
200 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff); | ||
201 | interrupt_enable.u64 = 0; | ||
202 | interrupt_enable.s.mbox = 0x3; | ||
203 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64); | ||
204 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); | ||
205 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); | ||
206 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); | ||
207 | /* Enable core interrupt processing for 2,3 and 7 */ | ||
208 | set_c0_status(0x8c01); | ||
209 | } | 187 | } |
210 | 188 | ||
211 | /** | 189 | /** |
@@ -214,6 +192,15 @@ static void octeon_init_secondary(void) | |||
214 | */ | 192 | */ |
215 | void octeon_prepare_cpus(unsigned int max_cpus) | 193 | void octeon_prepare_cpus(unsigned int max_cpus) |
216 | { | 194 | { |
195 | #ifdef CONFIG_HOTPLUG_CPU | ||
196 | struct linux_app_boot_info *labi; | ||
197 | |||
198 | labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); | ||
199 | |||
200 | if (labi->labi_signature != LABI_SIGNATURE) | ||
201 | panic("The bootloader version on this board is incorrect."); | ||
202 | #endif | ||
203 | |||
217 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); | 204 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); |
218 | if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, | 205 | if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, |
219 | "mailbox0", mailbox_interrupt)) { | 206 | "mailbox0", mailbox_interrupt)) { |