diff options
author | Prarit Bhargava <prarit@sgi.com> | 2005-07-06 17:59:44 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-07-06 17:59:44 -0400 |
commit | cb4cb2cb9b0b14bdf2fc7125e099ed7e818cea42 (patch) | |
tree | 11e110ce3201e0cbd3e30f1d4a1b0dc6ebe19bfa /arch | |
parent | bd53d1270f51c6cfb53b06c8f93fd42327871d6b (diff) |
[IA64] hotplug/ia64: SN Hotplug Driver: SN IRQ Fixes
This patch fixes the SN IRQ code such that cpu affinity and
Hotplug can modify IRQ values. The sn_irq_info structures are now locked
using a RCU lock mechanism to avoid lock contention in the lost interrupt
WAR code.
Signed-off-by: Prarit Bhargava <prarit@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/sn/kernel/io_init.c | 39 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/irq.c | 250 |
2 files changed, 155 insertions, 134 deletions
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 783eb4323847..2f03e3f52b63 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <asm/sn/simulator.h> | 21 | #include <asm/sn/simulator.h> |
22 | #include <asm/sn/tioca_provider.h> | 22 | #include <asm/sn/tioca_provider.h> |
23 | 23 | ||
24 | char master_baseio_wid; | ||
25 | nasid_t master_nasid = INVALID_NASID; /* Partition Master */ | 24 | nasid_t master_nasid = INVALID_NASID; /* Partition Master */ |
26 | 25 | ||
27 | struct slab_info { | 26 | struct slab_info { |
@@ -231,11 +230,13 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
231 | { | 230 | { |
232 | int idx; | 231 | int idx; |
233 | int segment = 0; | 232 | int segment = 0; |
234 | uint64_t size; | ||
235 | struct sn_irq_info *sn_irq_info; | ||
236 | struct pci_dev *host_pci_dev; | ||
237 | int status = 0; | 233 | int status = 0; |
238 | struct pcibus_bussoft *bs; | 234 | struct pcibus_bussoft *bs; |
235 | struct pci_bus *host_pci_bus; | ||
236 | struct pci_dev *host_pci_dev; | ||
237 | struct sn_irq_info *sn_irq_info; | ||
238 | unsigned long size; | ||
239 | unsigned int bus_no, devfn; | ||
239 | 240 | ||
240 | dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); | 241 | dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); |
241 | if (SN_PCIDEV_INFO(dev) <= 0) | 242 | if (SN_PCIDEV_INFO(dev) <= 0) |
@@ -253,7 +254,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
253 | (u64) __pa(SN_PCIDEV_INFO(dev)), | 254 | (u64) __pa(SN_PCIDEV_INFO(dev)), |
254 | (u64) __pa(sn_irq_info)); | 255 | (u64) __pa(sn_irq_info)); |
255 | if (status) | 256 | if (status) |
256 | BUG(); /* Cannot get platform pci device information information */ | 257 | BUG(); /* Cannot get platform pci device information */ |
257 | 258 | ||
258 | /* Copy over PIO Mapped Addresses */ | 259 | /* Copy over PIO Mapped Addresses */ |
259 | for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { | 260 | for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { |
@@ -275,15 +276,20 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
275 | dev->resource[idx].parent = &iomem_resource; | 276 | dev->resource[idx].parent = &iomem_resource; |
276 | } | 277 | } |
277 | 278 | ||
278 | /* set up host bus linkages */ | 279 | /* Using the PROMs values for the PCI host bus, get the Linux |
279 | bs = SN_PCIBUS_BUSSOFT(dev->bus); | 280 | * PCI host_pci_dev struct and set up host bus linkages |
280 | host_pci_dev = | 281 | */ |
281 | pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32, | 282 | |
282 | SN_PCIDEV_INFO(dev)-> | 283 | bus_no = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32; |
283 | pdi_slot_host_handle & 0xffffffff); | 284 | devfn = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle & 0xffffffff; |
285 | host_pci_bus = pci_find_bus(pci_domain_nr(dev->bus), bus_no); | ||
286 | host_pci_dev = pci_get_slot(host_pci_bus, devfn); | ||
287 | |||
288 | SN_PCIDEV_INFO(dev)->host_pci_dev = host_pci_dev; | ||
284 | SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = | 289 | SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = |
285 | SN_PCIDEV_INFO(host_pci_dev); | 290 | SN_PCIDEV_INFO(host_pci_dev); |
286 | SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; | 291 | SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; |
292 | bs = SN_PCIBUS_BUSSOFT(dev->bus); | ||
287 | SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs; | 293 | SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs; |
288 | 294 | ||
289 | if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) { | 295 | if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) { |
@@ -297,6 +303,9 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
297 | SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; | 303 | SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; |
298 | dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; | 304 | dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; |
299 | sn_irq_fixup(dev, sn_irq_info); | 305 | sn_irq_fixup(dev, sn_irq_info); |
306 | } else { | ||
307 | SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = NULL; | ||
308 | kfree(sn_irq_info); | ||
300 | } | 309 | } |
301 | } | 310 | } |
302 | 311 | ||
@@ -403,11 +412,7 @@ static int __init sn_pci_init(void) | |||
403 | */ | 412 | */ |
404 | ia64_max_iommu_merge_mask = ~PAGE_MASK; | 413 | ia64_max_iommu_merge_mask = ~PAGE_MASK; |
405 | sn_fixup_ionodes(); | 414 | sn_fixup_ionodes(); |
406 | sn_irq = kmalloc(sizeof(struct sn_irq_info *) * NR_IRQS, GFP_KERNEL); | 415 | sn_irq_lh_init(); |
407 | if (sn_irq <= 0) | ||
408 | BUG(); /* Canno afford to run out of memory. */ | ||
409 | memset(sn_irq, 0, sizeof(struct sn_irq_info *) * NR_IRQS); | ||
410 | |||
411 | sn_init_cpei_timer(); | 416 | sn_init_cpei_timer(); |
412 | 417 | ||
413 | #ifdef CONFIG_PROC_FS | 418 | #ifdef CONFIG_PROC_FS |
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 0f4e8138658f..e6f7551edfda 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
12 | #include <linux/spinlock.h> | ||
12 | #include <asm/sn/intr.h> | 13 | #include <asm/sn/intr.h> |
13 | #include <asm/sn/addrs.h> | 14 | #include <asm/sn/addrs.h> |
14 | #include <asm/sn/arch.h> | 15 | #include <asm/sn/arch.h> |
@@ -25,7 +26,8 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); | |||
25 | 26 | ||
26 | extern int sn_force_interrupt_flag; | 27 | extern int sn_force_interrupt_flag; |
27 | extern int sn_ioif_inited; | 28 | extern int sn_ioif_inited; |
28 | struct sn_irq_info **sn_irq; | 29 | static struct list_head **sn_irq_lh; |
30 | static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ | ||
29 | 31 | ||
30 | static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, | 32 | static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, |
31 | u64 sn_irq_info, | 33 | u64 sn_irq_info, |
@@ -101,7 +103,7 @@ static void sn_end_irq(unsigned int irq) | |||
101 | nasid = get_nasid(); | 103 | nasid = get_nasid(); |
102 | event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR | 104 | event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR |
103 | (nasid, SH_EVENT_OCCURRED)); | 105 | (nasid, SH_EVENT_OCCURRED)); |
104 | /* If the UART bit is set here, we may have received an | 106 | /* If the UART bit is set here, we may have received an |
105 | * interrupt from the UART that the driver missed. To | 107 | * interrupt from the UART that the driver missed. To |
106 | * make sure, we IPI ourselves to force us to look again. | 108 | * make sure, we IPI ourselves to force us to look again. |
107 | */ | 109 | */ |
@@ -115,82 +117,84 @@ static void sn_end_irq(unsigned int irq) | |||
115 | force_interrupt(irq); | 117 | force_interrupt(irq); |
116 | } | 118 | } |
117 | 119 | ||
120 | static void sn_irq_info_free(struct rcu_head *head); | ||
121 | |||
118 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | 122 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) |
119 | { | 123 | { |
120 | struct sn_irq_info *sn_irq_info = sn_irq[irq]; | 124 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
121 | struct sn_irq_info *tmp_sn_irq_info; | ||
122 | int cpuid, cpuphys; | 125 | int cpuid, cpuphys; |
123 | nasid_t t_nasid; /* nasid to target */ | ||
124 | int t_slice; /* slice to target */ | ||
125 | |||
126 | /* allocate a temp sn_irq_info struct to get new target info */ | ||
127 | tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL); | ||
128 | if (!tmp_sn_irq_info) | ||
129 | return; | ||
130 | 126 | ||
131 | cpuid = first_cpu(mask); | 127 | cpuid = first_cpu(mask); |
132 | cpuphys = cpu_physical_id(cpuid); | 128 | cpuphys = cpu_physical_id(cpuid); |
133 | t_nasid = cpuid_to_nasid(cpuid); | ||
134 | t_slice = cpuid_to_slice(cpuid); | ||
135 | 129 | ||
136 | while (sn_irq_info) { | 130 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, |
137 | int status; | 131 | sn_irq_lh[irq], list) { |
138 | int local_widget; | 132 | uint64_t bridge; |
139 | uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; | 133 | int local_widget, status; |
140 | nasid_t local_nasid = NASID_GET(bridge); | 134 | nasid_t local_nasid; |
135 | struct sn_irq_info *new_irq_info; | ||
136 | |||
137 | new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); | ||
138 | if (new_irq_info == NULL) | ||
139 | break; | ||
140 | memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); | ||
141 | |||
142 | bridge = (uint64_t) new_irq_info->irq_bridge; | ||
143 | if (!bridge) { | ||
144 | kfree(new_irq_info); | ||
145 | break; /* irq is not a device interrupt */ | ||
146 | } | ||
141 | 147 | ||
142 | if (!bridge) | 148 | local_nasid = NASID_GET(bridge); |
143 | break; /* irq is not a device interrupt */ | ||
144 | 149 | ||
145 | if (local_nasid & 1) | 150 | if (local_nasid & 1) |
146 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | 151 | local_widget = TIO_SWIN_WIDGETNUM(bridge); |
147 | else | 152 | else |
148 | local_widget = SWIN_WIDGETNUM(bridge); | 153 | local_widget = SWIN_WIDGETNUM(bridge); |
149 | 154 | ||
150 | /* Free the old PROM sn_irq_info structure */ | 155 | /* Free the old PROM new_irq_info structure */ |
151 | sn_intr_free(local_nasid, local_widget, sn_irq_info); | 156 | sn_intr_free(local_nasid, local_widget, new_irq_info); |
157 | /* Update kernels new_irq_info with new target info */ | ||
158 | unregister_intr_pda(new_irq_info); | ||
152 | 159 | ||
153 | /* allocate a new PROM sn_irq_info struct */ | 160 | /* allocate a new PROM new_irq_info struct */ |
154 | status = sn_intr_alloc(local_nasid, local_widget, | 161 | status = sn_intr_alloc(local_nasid, local_widget, |
155 | __pa(tmp_sn_irq_info), irq, t_nasid, | 162 | __pa(new_irq_info), irq, |
156 | t_slice); | 163 | cpuid_to_nasid(cpuid), |
157 | 164 | cpuid_to_slice(cpuid)); | |
158 | if (status == 0) { | 165 | |
159 | /* Update kernels sn_irq_info with new target info */ | 166 | /* SAL call failed */ |
160 | unregister_intr_pda(sn_irq_info); | 167 | if (status) { |
161 | sn_irq_info->irq_cpuid = cpuid; | 168 | kfree(new_irq_info); |
162 | sn_irq_info->irq_nasid = t_nasid; | 169 | break; |
163 | sn_irq_info->irq_slice = t_slice; | 170 | } |
164 | sn_irq_info->irq_xtalkaddr = | ||
165 | tmp_sn_irq_info->irq_xtalkaddr; | ||
166 | sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie; | ||
167 | register_intr_pda(sn_irq_info); | ||
168 | |||
169 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) { | ||
170 | pcibr_change_devices_irq(sn_irq_info); | ||
171 | } | ||
172 | 171 | ||
173 | sn_irq_info = sn_irq_info->irq_next; | 172 | new_irq_info->irq_cpuid = cpuid; |
173 | register_intr_pda(new_irq_info); | ||
174 | |||
175 | if (IS_PCI_BRIDGE_ASIC(new_irq_info->irq_bridge_type)) | ||
176 | pcibr_change_devices_irq(new_irq_info); | ||
177 | |||
178 | spin_lock(&sn_irq_info_lock); | ||
179 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); | ||
180 | spin_unlock(&sn_irq_info_lock); | ||
181 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | ||
174 | 182 | ||
175 | #ifdef CONFIG_SMP | 183 | #ifdef CONFIG_SMP |
176 | set_irq_affinity_info((irq & 0xff), cpuphys, 0); | 184 | set_irq_affinity_info((irq & 0xff), cpuphys, 0); |
177 | #endif | 185 | #endif |
178 | } else { | ||
179 | break; /* snp_affinity failed the intr_alloc */ | ||
180 | } | ||
181 | } | 186 | } |
182 | kfree(tmp_sn_irq_info); | ||
183 | } | 187 | } |
184 | 188 | ||
185 | struct hw_interrupt_type irq_type_sn = { | 189 | struct hw_interrupt_type irq_type_sn = { |
186 | "SN hub", | 190 | .typename = "SN hub", |
187 | sn_startup_irq, | 191 | .startup = sn_startup_irq, |
188 | sn_shutdown_irq, | 192 | .shutdown = sn_shutdown_irq, |
189 | sn_enable_irq, | 193 | .enable = sn_enable_irq, |
190 | sn_disable_irq, | 194 | .disable = sn_disable_irq, |
191 | sn_ack_irq, | 195 | .ack = sn_ack_irq, |
192 | sn_end_irq, | 196 | .end = sn_end_irq, |
193 | sn_set_affinity_irq | 197 | .set_affinity = sn_set_affinity_irq |
194 | }; | 198 | }; |
195 | 199 | ||
196 | unsigned int sn_local_vector_to_irq(u8 vector) | 200 | unsigned int sn_local_vector_to_irq(u8 vector) |
@@ -231,19 +235,18 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) | |||
231 | struct sn_irq_info *tmp_irq_info; | 235 | struct sn_irq_info *tmp_irq_info; |
232 | int i, foundmatch; | 236 | int i, foundmatch; |
233 | 237 | ||
238 | rcu_read_lock(); | ||
234 | if (pdacpu(cpu)->sn_last_irq == irq) { | 239 | if (pdacpu(cpu)->sn_last_irq == irq) { |
235 | foundmatch = 0; | 240 | foundmatch = 0; |
236 | for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) { | 241 | for (i = pdacpu(cpu)->sn_last_irq - 1; |
237 | tmp_irq_info = sn_irq[i]; | 242 | i && !foundmatch; i--) { |
238 | while (tmp_irq_info) { | 243 | list_for_each_entry_rcu(tmp_irq_info, |
244 | sn_irq_lh[i], | ||
245 | list) { | ||
239 | if (tmp_irq_info->irq_cpuid == cpu) { | 246 | if (tmp_irq_info->irq_cpuid == cpu) { |
240 | foundmatch++; | 247 | foundmatch = 1; |
241 | break; | 248 | break; |
242 | } | 249 | } |
243 | tmp_irq_info = tmp_irq_info->irq_next; | ||
244 | } | ||
245 | if (foundmatch) { | ||
246 | break; | ||
247 | } | 250 | } |
248 | } | 251 | } |
249 | pdacpu(cpu)->sn_last_irq = i; | 252 | pdacpu(cpu)->sn_last_irq = i; |
@@ -251,60 +254,27 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) | |||
251 | 254 | ||
252 | if (pdacpu(cpu)->sn_first_irq == irq) { | 255 | if (pdacpu(cpu)->sn_first_irq == irq) { |
253 | foundmatch = 0; | 256 | foundmatch = 0; |
254 | for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) { | 257 | for (i = pdacpu(cpu)->sn_first_irq + 1; |
255 | tmp_irq_info = sn_irq[i]; | 258 | i < NR_IRQS && !foundmatch; i++) { |
256 | while (tmp_irq_info) { | 259 | list_for_each_entry_rcu(tmp_irq_info, |
260 | sn_irq_lh[i], | ||
261 | list) { | ||
257 | if (tmp_irq_info->irq_cpuid == cpu) { | 262 | if (tmp_irq_info->irq_cpuid == cpu) { |
258 | foundmatch++; | 263 | foundmatch = 1; |
259 | break; | 264 | break; |
260 | } | 265 | } |
261 | tmp_irq_info = tmp_irq_info->irq_next; | ||
262 | } | ||
263 | if (foundmatch) { | ||
264 | break; | ||
265 | } | 266 | } |
266 | } | 267 | } |
267 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); | 268 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); |
268 | } | 269 | } |
270 | rcu_read_unlock(); | ||
269 | } | 271 | } |
270 | 272 | ||
271 | struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq, | 273 | static void sn_irq_info_free(struct rcu_head *head) |
272 | nasid_t nasid, int slice) | ||
273 | { | 274 | { |
274 | struct sn_irq_info *sn_irq_info; | 275 | struct sn_irq_info *sn_irq_info; |
275 | int status; | ||
276 | |||
277 | sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL); | ||
278 | if (sn_irq_info == NULL) | ||
279 | return NULL; | ||
280 | |||
281 | memset(sn_irq_info, 0x0, sizeof(*sn_irq_info)); | ||
282 | |||
283 | status = | ||
284 | sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq, | ||
285 | nasid, slice); | ||
286 | |||
287 | if (status) { | ||
288 | kfree(sn_irq_info); | ||
289 | return NULL; | ||
290 | } else { | ||
291 | return sn_irq_info; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | void sn_irq_free(struct sn_irq_info *sn_irq_info) | ||
296 | { | ||
297 | uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; | ||
298 | nasid_t local_nasid = NASID_GET(bridge); | ||
299 | int local_widget; | ||
300 | |||
301 | if (local_nasid & 1) /* tio check */ | ||
302 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | ||
303 | else | ||
304 | local_widget = SWIN_WIDGETNUM(bridge); | ||
305 | |||
306 | sn_intr_free(local_nasid, local_widget, sn_irq_info); | ||
307 | 276 | ||
277 | sn_irq_info = container_of(head, struct sn_irq_info, rcu); | ||
308 | kfree(sn_irq_info); | 278 | kfree(sn_irq_info); |
309 | } | 279 | } |
310 | 280 | ||
@@ -314,30 +284,54 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) | |||
314 | int slice = sn_irq_info->irq_slice; | 284 | int slice = sn_irq_info->irq_slice; |
315 | int cpu = nasid_slice_to_cpuid(nasid, slice); | 285 | int cpu = nasid_slice_to_cpuid(nasid, slice); |
316 | 286 | ||
287 | pci_dev_get(pci_dev); | ||
288 | |||
317 | sn_irq_info->irq_cpuid = cpu; | 289 | sn_irq_info->irq_cpuid = cpu; |
318 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); | 290 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); |
319 | 291 | ||
320 | /* link it into the sn_irq[irq] list */ | 292 | /* link it into the sn_irq[irq] list */ |
321 | sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq]; | 293 | spin_lock(&sn_irq_info_lock); |
322 | sn_irq[sn_irq_info->irq_irq] = sn_irq_info; | 294 | list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); |
295 | spin_unlock(&sn_irq_info_lock); | ||
323 | 296 | ||
324 | (void)register_intr_pda(sn_irq_info); | 297 | (void)register_intr_pda(sn_irq_info); |
325 | } | 298 | } |
326 | 299 | ||
300 | void sn_irq_unfixup(struct pci_dev *pci_dev) | ||
301 | { | ||
302 | struct sn_irq_info *sn_irq_info; | ||
303 | |||
304 | /* Only cleanup IRQ stuff if this device has a host bus context */ | ||
305 | if (!SN_PCIDEV_BUSSOFT(pci_dev)) | ||
306 | return; | ||
307 | |||
308 | sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info; | ||
309 | if (!sn_irq_info || !sn_irq_info->irq_irq) | ||
310 | return; | ||
311 | |||
312 | unregister_intr_pda(sn_irq_info); | ||
313 | spin_lock(&sn_irq_info_lock); | ||
314 | list_del_rcu(&sn_irq_info->list); | ||
315 | spin_unlock(&sn_irq_info_lock); | ||
316 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | ||
317 | |||
318 | pci_dev_put(pci_dev); | ||
319 | } | ||
320 | |||
327 | static void force_interrupt(int irq) | 321 | static void force_interrupt(int irq) |
328 | { | 322 | { |
329 | struct sn_irq_info *sn_irq_info; | 323 | struct sn_irq_info *sn_irq_info; |
330 | 324 | ||
331 | if (!sn_ioif_inited) | 325 | if (!sn_ioif_inited) |
332 | return; | 326 | return; |
333 | sn_irq_info = sn_irq[irq]; | 327 | |
334 | while (sn_irq_info) { | 328 | rcu_read_lock(); |
329 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) { | ||
335 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && | 330 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && |
336 | (sn_irq_info->irq_bridge != NULL)) { | 331 | (sn_irq_info->irq_bridge != NULL)) |
337 | pcibr_force_interrupt(sn_irq_info); | 332 | pcibr_force_interrupt(sn_irq_info); |
338 | } | ||
339 | sn_irq_info = sn_irq_info->irq_next; | ||
340 | } | 333 | } |
334 | rcu_read_unlock(); | ||
341 | } | 335 | } |
342 | 336 | ||
343 | /* | 337 | /* |
@@ -402,19 +396,41 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) | |||
402 | 396 | ||
403 | void sn_lb_int_war_check(void) | 397 | void sn_lb_int_war_check(void) |
404 | { | 398 | { |
399 | struct sn_irq_info *sn_irq_info; | ||
405 | int i; | 400 | int i; |
406 | 401 | ||
407 | if (!sn_ioif_inited || pda->sn_first_irq == 0) | 402 | if (!sn_ioif_inited || pda->sn_first_irq == 0) |
408 | return; | 403 | return; |
404 | |||
405 | rcu_read_lock(); | ||
409 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { | 406 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { |
410 | struct sn_irq_info *sn_irq_info = sn_irq[i]; | 407 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { |
411 | while (sn_irq_info) { | 408 | /* |
412 | /* Only call for PCI bridges that are fully initialized. */ | 409 | * Only call for PCI bridges that are fully |
410 | * initialized. | ||
411 | */ | ||
413 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && | 412 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && |
414 | (sn_irq_info->irq_bridge != NULL)) { | 413 | (sn_irq_info->irq_bridge != NULL)) |
415 | sn_check_intr(i, sn_irq_info); | 414 | sn_check_intr(i, sn_irq_info); |
416 | } | ||
417 | sn_irq_info = sn_irq_info->irq_next; | ||
418 | } | 415 | } |
419 | } | 416 | } |
417 | rcu_read_unlock(); | ||
418 | } | ||
419 | |||
420 | void sn_irq_lh_init(void) | ||
421 | { | ||
422 | int i; | ||
423 | |||
424 | sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL); | ||
425 | if (!sn_irq_lh) | ||
426 | panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); | ||
427 | |||
428 | for (i = 0; i < NR_IRQS; i++) { | ||
429 | sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL); | ||
430 | if (!sn_irq_lh[i]) | ||
431 | panic("SN PCI INIT: Failed IRQ memory allocation\n"); | ||
432 | |||
433 | INIT_LIST_HEAD(sn_irq_lh[i]); | ||
434 | } | ||
435 | |||
420 | } | 436 | } |