aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/kernel/irq.c
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2006-06-23 16:46:23 -0400
committerTony Luck <tony.luck@intel.com>2006-06-23 16:46:23 -0400
commit8cf60e04a131310199d5776e2f9e915f0c468899 (patch)
tree373a68e88e6737713a0a5723d552cdeefffff929 /arch/ia64/sn/kernel/irq.c
parent1323523f505606cfd24af6122369afddefc3b09d (diff)
parent95eaa5fa8eb2c345244acd5f65b200b115ae8c65 (diff)
Auto-update from upstream
Diffstat (limited to 'arch/ia64/sn/kernel/irq.c')
-rw-r--r--arch/ia64/sn/kernel/irq.c142
1 files changed, 84 insertions, 58 deletions
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index c265e02f5036..dc8e2b696713 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -26,11 +26,11 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
26 26
27int sn_force_interrupt_flag = 1; 27int sn_force_interrupt_flag = 1;
28extern int sn_ioif_inited; 28extern int sn_ioif_inited;
29static struct list_head **sn_irq_lh; 29struct list_head **sn_irq_lh;
30static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ 30static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
31 31
32static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, 32u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
33 u64 sn_irq_info, 33 struct sn_irq_info *sn_irq_info,
34 int req_irq, nasid_t req_nasid, 34 int req_irq, nasid_t req_nasid,
35 int req_slice) 35 int req_slice)
36{ 36{
@@ -40,12 +40,13 @@ static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
40 40
41 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, 41 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
42 (u64) SAL_INTR_ALLOC, (u64) local_nasid, 42 (u64) SAL_INTR_ALLOC, (u64) local_nasid,
43 (u64) local_widget, (u64) sn_irq_info, (u64) req_irq, 43 (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
44 (u64) req_nasid, (u64) req_slice); 44 (u64) req_nasid, (u64) req_slice);
45
45 return ret_stuff.status; 46 return ret_stuff.status;
46} 47}
47 48
48static inline void sn_intr_free(nasid_t local_nasid, int local_widget, 49void sn_intr_free(nasid_t local_nasid, int local_widget,
49 struct sn_irq_info *sn_irq_info) 50 struct sn_irq_info *sn_irq_info)
50{ 51{
51 struct ia64_sal_retval ret_stuff; 52 struct ia64_sal_retval ret_stuff;
@@ -112,73 +113,91 @@ static void sn_end_irq(unsigned int irq)
112 113
113static void sn_irq_info_free(struct rcu_head *head); 114static void sn_irq_info_free(struct rcu_head *head);
114 115
115static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) 116struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
117 nasid_t nasid, int slice)
116{ 118{
117 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; 119 int vector;
118 int cpuid, cpuphys; 120 int cpuphys;
121 int64_t bridge;
122 int local_widget, status;
123 nasid_t local_nasid;
124 struct sn_irq_info *new_irq_info;
125 struct sn_pcibus_provider *pci_provider;
119 126
120 cpuid = first_cpu(mask); 127 new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
121 cpuphys = cpu_physical_id(cpuid); 128 if (new_irq_info == NULL)
129 return NULL;
122 130
123 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, 131 memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
124 sn_irq_lh[irq], list) { 132
125 u64 bridge; 133 bridge = (u64) new_irq_info->irq_bridge;
126 int local_widget, status; 134 if (!bridge) {
127 nasid_t local_nasid; 135 kfree(new_irq_info);
128 struct sn_irq_info *new_irq_info; 136 return NULL; /* irq is not a device interrupt */
129 struct sn_pcibus_provider *pci_provider; 137 }
130
131 new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
132 if (new_irq_info == NULL)
133 break;
134 memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
135
136 bridge = (u64) new_irq_info->irq_bridge;
137 if (!bridge) {
138 kfree(new_irq_info);
139 break; /* irq is not a device interrupt */
140 }
141 138
142 local_nasid = NASID_GET(bridge); 139 local_nasid = NASID_GET(bridge);
143 140
144 if (local_nasid & 1) 141 if (local_nasid & 1)
145 local_widget = TIO_SWIN_WIDGETNUM(bridge); 142 local_widget = TIO_SWIN_WIDGETNUM(bridge);
146 else 143 else
147 local_widget = SWIN_WIDGETNUM(bridge); 144 local_widget = SWIN_WIDGETNUM(bridge);
148 145
149 /* Free the old PROM new_irq_info structure */ 146 vector = sn_irq_info->irq_irq;
150 sn_intr_free(local_nasid, local_widget, new_irq_info); 147 /* Free the old PROM new_irq_info structure */
151 /* Update kernels new_irq_info with new target info */ 148 sn_intr_free(local_nasid, local_widget, new_irq_info);
152 unregister_intr_pda(new_irq_info); 149 /* Update kernels new_irq_info with new target info */
150 unregister_intr_pda(new_irq_info);
153 151
154 /* allocate a new PROM new_irq_info struct */ 152 /* allocate a new PROM new_irq_info struct */
155 status = sn_intr_alloc(local_nasid, local_widget, 153 status = sn_intr_alloc(local_nasid, local_widget,
156 __pa(new_irq_info), irq, 154 new_irq_info, vector,
157 cpuid_to_nasid(cpuid), 155 nasid, slice);
158 cpuid_to_slice(cpuid));
159 156
160 /* SAL call failed */ 157 /* SAL call failed */
161 if (status) { 158 if (status) {
162 kfree(new_irq_info); 159 kfree(new_irq_info);
163 break; 160 return NULL;
164 } 161 }
165 162
166 new_irq_info->irq_cpuid = cpuid; 163 cpuphys = nasid_slice_to_cpuid(nasid, slice);
167 register_intr_pda(new_irq_info); 164 new_irq_info->irq_cpuid = cpuphys;
165 register_intr_pda(new_irq_info);
168 166
169 pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; 167 pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
170 if (pci_provider && pci_provider->target_interrupt)
171 (pci_provider->target_interrupt)(new_irq_info);
172 168
173 spin_lock(&sn_irq_info_lock); 169 /*
174 list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); 170 * If this represents a line interrupt, target it. If it's
175 spin_unlock(&sn_irq_info_lock); 171 * an msi (irq_int_bit < 0), it's already targeted.
176 call_rcu(&sn_irq_info->rcu, sn_irq_info_free); 172 */
173 if (new_irq_info->irq_int_bit >= 0 &&
174 pci_provider && pci_provider->target_interrupt)
175 (pci_provider->target_interrupt)(new_irq_info);
176
177 spin_lock(&sn_irq_info_lock);
178 list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
179 spin_unlock(&sn_irq_info_lock);
180 call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
177 181
178#ifdef CONFIG_SMP 182#ifdef CONFIG_SMP
179 set_irq_affinity_info((irq & 0xff), cpuphys, 0); 183 set_irq_affinity_info((vector & 0xff), cpuphys, 0);
180#endif 184#endif
181 } 185
186 return new_irq_info;
187}
188
189static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
190{
191 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
192 nasid_t nasid;
193 int slice;
194
195 nasid = cpuid_to_nasid(first_cpu(mask));
196 slice = cpuid_to_slice(first_cpu(mask));
197
198 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
199 sn_irq_lh[irq], list)
200 (void)sn_retarget_vector(sn_irq_info, nasid, slice);
182} 201}
183 202
184struct hw_interrupt_type irq_type_sn = { 203struct hw_interrupt_type irq_type_sn = {
@@ -202,6 +221,9 @@ void sn_irq_init(void)
202 int i; 221 int i;
203 irq_desc_t *base_desc = irq_desc; 222 irq_desc_t *base_desc = irq_desc;
204 223
224 ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
225 ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
226
205 for (i = 0; i < NR_IRQS; i++) { 227 for (i = 0; i < NR_IRQS; i++) {
206 if (base_desc[i].handler == &no_irq_type) { 228 if (base_desc[i].handler == &no_irq_type) {
207 base_desc[i].handler = &irq_type_sn; 229 base_desc[i].handler = &irq_type_sn;
@@ -285,6 +307,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
285 /* link it into the sn_irq[irq] list */ 307 /* link it into the sn_irq[irq] list */
286 spin_lock(&sn_irq_info_lock); 308 spin_lock(&sn_irq_info_lock);
287 list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); 309 list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
310 reserve_irq_vector(sn_irq_info->irq_irq);
288 spin_unlock(&sn_irq_info_lock); 311 spin_unlock(&sn_irq_info_lock);
289 312
290 register_intr_pda(sn_irq_info); 313 register_intr_pda(sn_irq_info);
@@ -310,8 +333,11 @@ void sn_irq_unfixup(struct pci_dev *pci_dev)
310 spin_lock(&sn_irq_info_lock); 333 spin_lock(&sn_irq_info_lock);
311 list_del_rcu(&sn_irq_info->list); 334 list_del_rcu(&sn_irq_info->list);
312 spin_unlock(&sn_irq_info_lock); 335 spin_unlock(&sn_irq_info_lock);
336 if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
337 free_irq_vector(sn_irq_info->irq_irq);
313 call_rcu(&sn_irq_info->rcu, sn_irq_info_free); 338 call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
314 pci_dev_put(pci_dev); 339 pci_dev_put(pci_dev);
340
315} 341}
316 342
317static inline void 343static inline void