diff options
Diffstat (limited to 'arch/ia64/sn/kernel/irq.c')
-rw-r--r-- | arch/ia64/sn/kernel/irq.c | 255 |
1 files changed, 135 insertions, 120 deletions
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 0f4e8138658f..84d276a14ecb 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -9,13 +9,13 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
12 | #include <asm/sn/intr.h> | 12 | #include <linux/spinlock.h> |
13 | #include <asm/sn/addrs.h> | 13 | #include <asm/sn/addrs.h> |
14 | #include <asm/sn/arch.h> | 14 | #include <asm/sn/arch.h> |
15 | #include "xtalk/xwidgetdev.h" | 15 | #include <asm/sn/intr.h> |
16 | #include <asm/sn/pcibr_provider.h> | ||
16 | #include <asm/sn/pcibus_provider_defs.h> | 17 | #include <asm/sn/pcibus_provider_defs.h> |
17 | #include <asm/sn/pcidev.h> | 18 | #include <asm/sn/pcidev.h> |
18 | #include "pci/pcibr_provider.h" | ||
19 | #include <asm/sn/shub_mmr.h> | 19 | #include <asm/sn/shub_mmr.h> |
20 | #include <asm/sn/sn_sal.h> | 20 | #include <asm/sn/sn_sal.h> |
21 | 21 | ||
@@ -25,7 +25,8 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); | |||
25 | 25 | ||
26 | extern int sn_force_interrupt_flag; | 26 | extern int sn_force_interrupt_flag; |
27 | extern int sn_ioif_inited; | 27 | extern int sn_ioif_inited; |
28 | struct sn_irq_info **sn_irq; | 28 | static struct list_head **sn_irq_lh; |
29 | static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ | ||
29 | 30 | ||
30 | static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, | 31 | static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, |
31 | u64 sn_irq_info, | 32 | u64 sn_irq_info, |
@@ -101,7 +102,7 @@ static void sn_end_irq(unsigned int irq) | |||
101 | nasid = get_nasid(); | 102 | nasid = get_nasid(); |
102 | event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR | 103 | event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR |
103 | (nasid, SH_EVENT_OCCURRED)); | 104 | (nasid, SH_EVENT_OCCURRED)); |
104 | /* If the UART bit is set here, we may have received an | 105 | /* If the UART bit is set here, we may have received an |
105 | * interrupt from the UART that the driver missed. To | 106 | * interrupt from the UART that the driver missed. To |
106 | * make sure, we IPI ourselves to force us to look again. | 107 | * make sure, we IPI ourselves to force us to look again. |
107 | */ | 108 | */ |
@@ -115,82 +116,84 @@ static void sn_end_irq(unsigned int irq) | |||
115 | force_interrupt(irq); | 116 | force_interrupt(irq); |
116 | } | 117 | } |
117 | 118 | ||
119 | static void sn_irq_info_free(struct rcu_head *head); | ||
120 | |||
118 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | 121 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) |
119 | { | 122 | { |
120 | struct sn_irq_info *sn_irq_info = sn_irq[irq]; | 123 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
121 | struct sn_irq_info *tmp_sn_irq_info; | ||
122 | int cpuid, cpuphys; | 124 | int cpuid, cpuphys; |
123 | nasid_t t_nasid; /* nasid to target */ | ||
124 | int t_slice; /* slice to target */ | ||
125 | |||
126 | /* allocate a temp sn_irq_info struct to get new target info */ | ||
127 | tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL); | ||
128 | if (!tmp_sn_irq_info) | ||
129 | return; | ||
130 | 125 | ||
131 | cpuid = first_cpu(mask); | 126 | cpuid = first_cpu(mask); |
132 | cpuphys = cpu_physical_id(cpuid); | 127 | cpuphys = cpu_physical_id(cpuid); |
133 | t_nasid = cpuid_to_nasid(cpuid); | ||
134 | t_slice = cpuid_to_slice(cpuid); | ||
135 | 128 | ||
136 | while (sn_irq_info) { | 129 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, |
137 | int status; | 130 | sn_irq_lh[irq], list) { |
138 | int local_widget; | 131 | uint64_t bridge; |
139 | uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; | 132 | int local_widget, status; |
140 | nasid_t local_nasid = NASID_GET(bridge); | 133 | nasid_t local_nasid; |
134 | struct sn_irq_info *new_irq_info; | ||
135 | |||
136 | new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); | ||
137 | if (new_irq_info == NULL) | ||
138 | break; | ||
139 | memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); | ||
140 | |||
141 | bridge = (uint64_t) new_irq_info->irq_bridge; | ||
142 | if (!bridge) { | ||
143 | kfree(new_irq_info); | ||
144 | break; /* irq is not a device interrupt */ | ||
145 | } | ||
141 | 146 | ||
142 | if (!bridge) | 147 | local_nasid = NASID_GET(bridge); |
143 | break; /* irq is not a device interrupt */ | ||
144 | 148 | ||
145 | if (local_nasid & 1) | 149 | if (local_nasid & 1) |
146 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | 150 | local_widget = TIO_SWIN_WIDGETNUM(bridge); |
147 | else | 151 | else |
148 | local_widget = SWIN_WIDGETNUM(bridge); | 152 | local_widget = SWIN_WIDGETNUM(bridge); |
149 | 153 | ||
150 | /* Free the old PROM sn_irq_info structure */ | 154 | /* Free the old PROM new_irq_info structure */ |
151 | sn_intr_free(local_nasid, local_widget, sn_irq_info); | 155 | sn_intr_free(local_nasid, local_widget, new_irq_info); |
156 | /* Update kernels new_irq_info with new target info */ | ||
157 | unregister_intr_pda(new_irq_info); | ||
152 | 158 | ||
153 | /* allocate a new PROM sn_irq_info struct */ | 159 | /* allocate a new PROM new_irq_info struct */ |
154 | status = sn_intr_alloc(local_nasid, local_widget, | 160 | status = sn_intr_alloc(local_nasid, local_widget, |
155 | __pa(tmp_sn_irq_info), irq, t_nasid, | 161 | __pa(new_irq_info), irq, |
156 | t_slice); | 162 | cpuid_to_nasid(cpuid), |
157 | 163 | cpuid_to_slice(cpuid)); | |
158 | if (status == 0) { | 164 | |
159 | /* Update kernels sn_irq_info with new target info */ | 165 | /* SAL call failed */ |
160 | unregister_intr_pda(sn_irq_info); | 166 | if (status) { |
161 | sn_irq_info->irq_cpuid = cpuid; | 167 | kfree(new_irq_info); |
162 | sn_irq_info->irq_nasid = t_nasid; | 168 | break; |
163 | sn_irq_info->irq_slice = t_slice; | 169 | } |
164 | sn_irq_info->irq_xtalkaddr = | 170 | |
165 | tmp_sn_irq_info->irq_xtalkaddr; | 171 | new_irq_info->irq_cpuid = cpuid; |
166 | sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie; | 172 | register_intr_pda(new_irq_info); |
167 | register_intr_pda(sn_irq_info); | 173 | |
168 | 174 | if (IS_PCI_BRIDGE_ASIC(new_irq_info->irq_bridge_type)) | |
169 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) { | 175 | pcibr_change_devices_irq(new_irq_info); |
170 | pcibr_change_devices_irq(sn_irq_info); | ||
171 | } | ||
172 | 176 | ||
173 | sn_irq_info = sn_irq_info->irq_next; | 177 | spin_lock(&sn_irq_info_lock); |
178 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); | ||
179 | spin_unlock(&sn_irq_info_lock); | ||
180 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | ||
174 | 181 | ||
175 | #ifdef CONFIG_SMP | 182 | #ifdef CONFIG_SMP |
176 | set_irq_affinity_info((irq & 0xff), cpuphys, 0); | 183 | set_irq_affinity_info((irq & 0xff), cpuphys, 0); |
177 | #endif | 184 | #endif |
178 | } else { | ||
179 | break; /* snp_affinity failed the intr_alloc */ | ||
180 | } | ||
181 | } | 185 | } |
182 | kfree(tmp_sn_irq_info); | ||
183 | } | 186 | } |
184 | 187 | ||
185 | struct hw_interrupt_type irq_type_sn = { | 188 | struct hw_interrupt_type irq_type_sn = { |
186 | "SN hub", | 189 | .typename = "SN hub", |
187 | sn_startup_irq, | 190 | .startup = sn_startup_irq, |
188 | sn_shutdown_irq, | 191 | .shutdown = sn_shutdown_irq, |
189 | sn_enable_irq, | 192 | .enable = sn_enable_irq, |
190 | sn_disable_irq, | 193 | .disable = sn_disable_irq, |
191 | sn_ack_irq, | 194 | .ack = sn_ack_irq, |
192 | sn_end_irq, | 195 | .end = sn_end_irq, |
193 | sn_set_affinity_irq | 196 | .set_affinity = sn_set_affinity_irq |
194 | }; | 197 | }; |
195 | 198 | ||
196 | unsigned int sn_local_vector_to_irq(u8 vector) | 199 | unsigned int sn_local_vector_to_irq(u8 vector) |
@@ -231,19 +234,18 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) | |||
231 | struct sn_irq_info *tmp_irq_info; | 234 | struct sn_irq_info *tmp_irq_info; |
232 | int i, foundmatch; | 235 | int i, foundmatch; |
233 | 236 | ||
237 | rcu_read_lock(); | ||
234 | if (pdacpu(cpu)->sn_last_irq == irq) { | 238 | if (pdacpu(cpu)->sn_last_irq == irq) { |
235 | foundmatch = 0; | 239 | foundmatch = 0; |
236 | for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) { | 240 | for (i = pdacpu(cpu)->sn_last_irq - 1; |
237 | tmp_irq_info = sn_irq[i]; | 241 | i && !foundmatch; i--) { |
238 | while (tmp_irq_info) { | 242 | list_for_each_entry_rcu(tmp_irq_info, |
243 | sn_irq_lh[i], | ||
244 | list) { | ||
239 | if (tmp_irq_info->irq_cpuid == cpu) { | 245 | if (tmp_irq_info->irq_cpuid == cpu) { |
240 | foundmatch++; | 246 | foundmatch = 1; |
241 | break; | 247 | break; |
242 | } | 248 | } |
243 | tmp_irq_info = tmp_irq_info->irq_next; | ||
244 | } | ||
245 | if (foundmatch) { | ||
246 | break; | ||
247 | } | 249 | } |
248 | } | 250 | } |
249 | pdacpu(cpu)->sn_last_irq = i; | 251 | pdacpu(cpu)->sn_last_irq = i; |
@@ -251,60 +253,27 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) | |||
251 | 253 | ||
252 | if (pdacpu(cpu)->sn_first_irq == irq) { | 254 | if (pdacpu(cpu)->sn_first_irq == irq) { |
253 | foundmatch = 0; | 255 | foundmatch = 0; |
254 | for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) { | 256 | for (i = pdacpu(cpu)->sn_first_irq + 1; |
255 | tmp_irq_info = sn_irq[i]; | 257 | i < NR_IRQS && !foundmatch; i++) { |
256 | while (tmp_irq_info) { | 258 | list_for_each_entry_rcu(tmp_irq_info, |
259 | sn_irq_lh[i], | ||
260 | list) { | ||
257 | if (tmp_irq_info->irq_cpuid == cpu) { | 261 | if (tmp_irq_info->irq_cpuid == cpu) { |
258 | foundmatch++; | 262 | foundmatch = 1; |
259 | break; | 263 | break; |
260 | } | 264 | } |
261 | tmp_irq_info = tmp_irq_info->irq_next; | ||
262 | } | ||
263 | if (foundmatch) { | ||
264 | break; | ||
265 | } | 265 | } |
266 | } | 266 | } |
267 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); | 267 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); |
268 | } | 268 | } |
269 | rcu_read_unlock(); | ||
269 | } | 270 | } |
270 | 271 | ||
271 | struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq, | 272 | static void sn_irq_info_free(struct rcu_head *head) |
272 | nasid_t nasid, int slice) | ||
273 | { | 273 | { |
274 | struct sn_irq_info *sn_irq_info; | 274 | struct sn_irq_info *sn_irq_info; |
275 | int status; | ||
276 | |||
277 | sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL); | ||
278 | if (sn_irq_info == NULL) | ||
279 | return NULL; | ||
280 | |||
281 | memset(sn_irq_info, 0x0, sizeof(*sn_irq_info)); | ||
282 | |||
283 | status = | ||
284 | sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq, | ||
285 | nasid, slice); | ||
286 | |||
287 | if (status) { | ||
288 | kfree(sn_irq_info); | ||
289 | return NULL; | ||
290 | } else { | ||
291 | return sn_irq_info; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | void sn_irq_free(struct sn_irq_info *sn_irq_info) | ||
296 | { | ||
297 | uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; | ||
298 | nasid_t local_nasid = NASID_GET(bridge); | ||
299 | int local_widget; | ||
300 | |||
301 | if (local_nasid & 1) /* tio check */ | ||
302 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | ||
303 | else | ||
304 | local_widget = SWIN_WIDGETNUM(bridge); | ||
305 | |||
306 | sn_intr_free(local_nasid, local_widget, sn_irq_info); | ||
307 | 275 | ||
276 | sn_irq_info = container_of(head, struct sn_irq_info, rcu); | ||
308 | kfree(sn_irq_info); | 277 | kfree(sn_irq_info); |
309 | } | 278 | } |
310 | 279 | ||
@@ -314,30 +283,54 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) | |||
314 | int slice = sn_irq_info->irq_slice; | 283 | int slice = sn_irq_info->irq_slice; |
315 | int cpu = nasid_slice_to_cpuid(nasid, slice); | 284 | int cpu = nasid_slice_to_cpuid(nasid, slice); |
316 | 285 | ||
286 | pci_dev_get(pci_dev); | ||
317 | sn_irq_info->irq_cpuid = cpu; | 287 | sn_irq_info->irq_cpuid = cpu; |
318 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); | 288 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); |
319 | 289 | ||
320 | /* link it into the sn_irq[irq] list */ | 290 | /* link it into the sn_irq[irq] list */ |
321 | sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq]; | 291 | spin_lock(&sn_irq_info_lock); |
322 | sn_irq[sn_irq_info->irq_irq] = sn_irq_info; | 292 | list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); |
293 | spin_unlock(&sn_irq_info_lock); | ||
323 | 294 | ||
324 | (void)register_intr_pda(sn_irq_info); | 295 | (void)register_intr_pda(sn_irq_info); |
325 | } | 296 | } |
326 | 297 | ||
298 | void sn_irq_unfixup(struct pci_dev *pci_dev) | ||
299 | { | ||
300 | struct sn_irq_info *sn_irq_info; | ||
301 | |||
302 | /* Only cleanup IRQ stuff if this device has a host bus context */ | ||
303 | if (!SN_PCIDEV_BUSSOFT(pci_dev)) | ||
304 | return; | ||
305 | |||
306 | sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info; | ||
307 | if (!sn_irq_info || !sn_irq_info->irq_irq) { | ||
308 | kfree(sn_irq_info); | ||
309 | return; | ||
310 | } | ||
311 | |||
312 | unregister_intr_pda(sn_irq_info); | ||
313 | spin_lock(&sn_irq_info_lock); | ||
314 | list_del_rcu(&sn_irq_info->list); | ||
315 | spin_unlock(&sn_irq_info_lock); | ||
316 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | ||
317 | pci_dev_put(pci_dev); | ||
318 | } | ||
319 | |||
327 | static void force_interrupt(int irq) | 320 | static void force_interrupt(int irq) |
328 | { | 321 | { |
329 | struct sn_irq_info *sn_irq_info; | 322 | struct sn_irq_info *sn_irq_info; |
330 | 323 | ||
331 | if (!sn_ioif_inited) | 324 | if (!sn_ioif_inited) |
332 | return; | 325 | return; |
333 | sn_irq_info = sn_irq[irq]; | 326 | |
334 | while (sn_irq_info) { | 327 | rcu_read_lock(); |
328 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) { | ||
335 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && | 329 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && |
336 | (sn_irq_info->irq_bridge != NULL)) { | 330 | (sn_irq_info->irq_bridge != NULL)) |
337 | pcibr_force_interrupt(sn_irq_info); | 331 | pcibr_force_interrupt(sn_irq_info); |
338 | } | ||
339 | sn_irq_info = sn_irq_info->irq_next; | ||
340 | } | 332 | } |
333 | rcu_read_unlock(); | ||
341 | } | 334 | } |
342 | 335 | ||
343 | /* | 336 | /* |
@@ -402,19 +395,41 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) | |||
402 | 395 | ||
403 | void sn_lb_int_war_check(void) | 396 | void sn_lb_int_war_check(void) |
404 | { | 397 | { |
398 | struct sn_irq_info *sn_irq_info; | ||
405 | int i; | 399 | int i; |
406 | 400 | ||
407 | if (!sn_ioif_inited || pda->sn_first_irq == 0) | 401 | if (!sn_ioif_inited || pda->sn_first_irq == 0) |
408 | return; | 402 | return; |
403 | |||
404 | rcu_read_lock(); | ||
409 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { | 405 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { |
410 | struct sn_irq_info *sn_irq_info = sn_irq[i]; | 406 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { |
411 | while (sn_irq_info) { | 407 | /* |
412 | /* Only call for PCI bridges that are fully initialized. */ | 408 | * Only call for PCI bridges that are fully |
409 | * initialized. | ||
410 | */ | ||
413 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && | 411 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && |
414 | (sn_irq_info->irq_bridge != NULL)) { | 412 | (sn_irq_info->irq_bridge != NULL)) |
415 | sn_check_intr(i, sn_irq_info); | 413 | sn_check_intr(i, sn_irq_info); |
416 | } | ||
417 | sn_irq_info = sn_irq_info->irq_next; | ||
418 | } | 414 | } |
419 | } | 415 | } |
416 | rcu_read_unlock(); | ||
417 | } | ||
418 | |||
419 | void sn_irq_lh_init(void) | ||
420 | { | ||
421 | int i; | ||
422 | |||
423 | sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL); | ||
424 | if (!sn_irq_lh) | ||
425 | panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); | ||
426 | |||
427 | for (i = 0; i < NR_IRQS; i++) { | ||
428 | sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL); | ||
429 | if (!sn_irq_lh[i]) | ||
430 | panic("SN PCI INIT: Failed IRQ memory allocation\n"); | ||
431 | |||
432 | INIT_LIST_HEAD(sn_irq_lh[i]); | ||
433 | } | ||
434 | |||
420 | } | 435 | } |