aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2007-07-19 19:34:40 -0400
committerTony Luck <tony.luck@intel.com>2007-07-19 19:34:40 -0400
commitf4fbfb0dda5577075a049eec7fb7ad38abca1912 (patch)
treedfba29efc83cb7c7e4f8e681152c92ee2a32fe9c
parentffc720409ae8d1cb16ae4b9c39e9e744e4c59898 (diff)
parentbf903d0a4503db8ac166ca6135a59bc5f9b91a45 (diff)
Pull vector-domain into release branch
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--arch/ia64/kernel/iosapic.c652
-rw-r--r--arch/ia64/kernel/irq.c2
-rw-r--r--arch/ia64/kernel/irq_ia64.c317
-rw-r--r--arch/ia64/kernel/msi_ia64.c23
-rw-r--r--arch/ia64/kernel/smpboot.c4
-rw-r--r--include/asm-ia64/hw_irq.h18
-rw-r--r--include/asm-ia64/iosapic.h6
-rw-r--r--include/asm-ia64/irq.h9
9 files changed, 634 insertions, 400 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9a541486fb7e..854744bde224 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1885,6 +1885,9 @@ and is between 256 and 4096 characters. It is defined in the file
1885 vdso=1: enable VDSO (default) 1885 vdso=1: enable VDSO (default)
1886 vdso=0: disable VDSO mapping 1886 vdso=0: disable VDSO mapping
1887 1887
1888 vector= [IA-64,SMP]
1889 vector=percpu: enable percpu vector domain
1890
1888 video= [FB] Frame buffer configuration 1891 video= [FB] Frame buffer configuration
1889 See Documentation/fb/modedb.txt. 1892 See Documentation/fb/modedb.txt.
1890 1893
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 37f46527d233..91e6dc1e7baf 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -118,15 +118,25 @@ static DEFINE_SPINLOCK(iosapic_lock);
118 * vector. 118 * vector.
119 */ 119 */
120 120
121struct iosapic_rte_info { 121#define NO_REF_RTE 0
122 struct list_head rte_list; /* node in list of RTEs sharing the 122
123 * same vector */ 123static struct iosapic {
124 char __iomem *addr; /* base address of IOSAPIC */ 124 char __iomem *addr; /* base address of IOSAPIC */
125 unsigned int gsi_base; /* first GSI assigned to this 125 unsigned int gsi_base; /* GSI base */
126 * IOSAPIC */ 126 unsigned short num_rte; /* # of RTEs on this IOSAPIC */
127 int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
128#ifdef CONFIG_NUMA
129 unsigned short node; /* numa node association via pxm */
130#endif
131 spinlock_t lock; /* lock for indirect reg access */
132} iosapic_lists[NR_IOSAPICS];
133
134struct iosapic_rte_info {
135 struct list_head rte_list; /* RTEs sharing the same vector */
127 char rte_index; /* IOSAPIC RTE index */ 136 char rte_index; /* IOSAPIC RTE index */
128 int refcnt; /* reference counter */ 137 int refcnt; /* reference counter */
129 unsigned int flags; /* flags */ 138 unsigned int flags; /* flags */
139 struct iosapic *iosapic;
130} ____cacheline_aligned; 140} ____cacheline_aligned;
131 141
132static struct iosapic_intr_info { 142static struct iosapic_intr_info {
@@ -140,24 +150,23 @@ static struct iosapic_intr_info {
140 unsigned char polarity: 1; /* interrupt polarity 150 unsigned char polarity: 1; /* interrupt polarity
141 * (see iosapic.h) */ 151 * (see iosapic.h) */
142 unsigned char trigger : 1; /* trigger mode (see iosapic.h) */ 152 unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
143} iosapic_intr_info[IA64_NUM_VECTORS]; 153} iosapic_intr_info[NR_IRQS];
144
145static struct iosapic {
146 char __iomem *addr; /* base address of IOSAPIC */
147 unsigned int gsi_base; /* first GSI assigned to this
148 * IOSAPIC */
149 unsigned short num_rte; /* # of RTEs on this IOSAPIC */
150 int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
151#ifdef CONFIG_NUMA
152 unsigned short node; /* numa node association via pxm */
153#endif
154} iosapic_lists[NR_IOSAPICS];
155 154
156static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */ 155static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
157 156
158static int iosapic_kmalloc_ok; 157static int iosapic_kmalloc_ok;
159static LIST_HEAD(free_rte_list); 158static LIST_HEAD(free_rte_list);
160 159
160static inline void
161iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
162{
163 unsigned long flags;
164
165 spin_lock_irqsave(&iosapic->lock, flags);
166 __iosapic_write(iosapic->addr, reg, val);
167 spin_unlock_irqrestore(&iosapic->lock, flags);
168}
169
161/* 170/*
162 * Find an IOSAPIC associated with a GSI 171 * Find an IOSAPIC associated with a GSI
163 */ 172 */
@@ -175,17 +184,18 @@ find_iosapic (unsigned int gsi)
175 return -1; 184 return -1;
176} 185}
177 186
178static inline int 187static inline int __gsi_to_irq(unsigned int gsi)
179_gsi_to_vector (unsigned int gsi)
180{ 188{
189 int irq;
181 struct iosapic_intr_info *info; 190 struct iosapic_intr_info *info;
182 struct iosapic_rte_info *rte; 191 struct iosapic_rte_info *rte;
183 192
184 for (info = iosapic_intr_info; info < 193 for (irq = 0; irq < NR_IRQS; irq++) {
185 iosapic_intr_info + IA64_NUM_VECTORS; ++info) 194 info = &iosapic_intr_info[irq];
186 list_for_each_entry(rte, &info->rtes, rte_list) 195 list_for_each_entry(rte, &info->rtes, rte_list)
187 if (rte->gsi_base + rte->rte_index == gsi) 196 if (rte->iosapic->gsi_base + rte->rte_index == gsi)
188 return info - iosapic_intr_info; 197 return irq;
198 }
189 return -1; 199 return -1;
190} 200}
191 201
@@ -196,7 +206,10 @@ _gsi_to_vector (unsigned int gsi)
196inline int 206inline int
197gsi_to_vector (unsigned int gsi) 207gsi_to_vector (unsigned int gsi)
198{ 208{
199 return _gsi_to_vector(gsi); 209 int irq = __gsi_to_irq(gsi);
210 if (check_irq_used(irq) < 0)
211 return -1;
212 return irq_to_vector(irq);
200} 213}
201 214
202int 215int
@@ -204,66 +217,48 @@ gsi_to_irq (unsigned int gsi)
204{ 217{
205 unsigned long flags; 218 unsigned long flags;
206 int irq; 219 int irq;
207 /* 220
208 * XXX fix me: this assumes an identity mapping between IA-64 vector
209 * and Linux irq numbers...
210 */
211 spin_lock_irqsave(&iosapic_lock, flags); 221 spin_lock_irqsave(&iosapic_lock, flags);
212 { 222 irq = __gsi_to_irq(gsi);
213 irq = _gsi_to_vector(gsi);
214 }
215 spin_unlock_irqrestore(&iosapic_lock, flags); 223 spin_unlock_irqrestore(&iosapic_lock, flags);
216
217 return irq; 224 return irq;
218} 225}
219 226
220static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, 227static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi)
221 unsigned int vec)
222{ 228{
223 struct iosapic_rte_info *rte; 229 struct iosapic_rte_info *rte;
224 230
225 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) 231 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
226 if (rte->gsi_base + rte->rte_index == gsi) 232 if (rte->iosapic->gsi_base + rte->rte_index == gsi)
227 return rte; 233 return rte;
228 return NULL; 234 return NULL;
229} 235}
230 236
231static void 237static void
232set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask) 238set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
233{ 239{
234 unsigned long pol, trigger, dmode; 240 unsigned long pol, trigger, dmode;
235 u32 low32, high32; 241 u32 low32, high32;
236 char __iomem *addr;
237 int rte_index; 242 int rte_index;
238 char redir; 243 char redir;
239 struct iosapic_rte_info *rte; 244 struct iosapic_rte_info *rte;
245 ia64_vector vector = irq_to_vector(irq);
240 246
241 DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest); 247 DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
242 248
243 rte = gsi_vector_to_rte(gsi, vector); 249 rte = find_rte(irq, gsi);
244 if (!rte) 250 if (!rte)
245 return; /* not an IOSAPIC interrupt */ 251 return; /* not an IOSAPIC interrupt */
246 252
247 rte_index = rte->rte_index; 253 rte_index = rte->rte_index;
248 addr = rte->addr; 254 pol = iosapic_intr_info[irq].polarity;
249 pol = iosapic_intr_info[vector].polarity; 255 trigger = iosapic_intr_info[irq].trigger;
250 trigger = iosapic_intr_info[vector].trigger; 256 dmode = iosapic_intr_info[irq].dmode;
251 dmode = iosapic_intr_info[vector].dmode;
252 257
253 redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0; 258 redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
254 259
255#ifdef CONFIG_SMP 260#ifdef CONFIG_SMP
256 { 261 set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
257 unsigned int irq;
258
259 for (irq = 0; irq < NR_IRQS; ++irq)
260 if (irq_to_vector(irq) == vector) {
261 set_irq_affinity_info(irq,
262 (int)(dest & 0xffff),
263 redir);
264 break;
265 }
266 }
267#endif 262#endif
268 263
269 low32 = ((pol << IOSAPIC_POLARITY_SHIFT) | 264 low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
@@ -275,10 +270,10 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
275 /* dest contains both id and eid */ 270 /* dest contains both id and eid */
276 high32 = (dest << IOSAPIC_DEST_SHIFT); 271 high32 = (dest << IOSAPIC_DEST_SHIFT);
277 272
278 iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); 273 iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
279 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); 274 iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
280 iosapic_intr_info[vector].low32 = low32; 275 iosapic_intr_info[irq].low32 = low32;
281 iosapic_intr_info[vector].dest = dest; 276 iosapic_intr_info[irq].dest = dest;
282} 277}
283 278
284static void 279static void
@@ -294,15 +289,18 @@ kexec_disable_iosapic(void)
294{ 289{
295 struct iosapic_intr_info *info; 290 struct iosapic_intr_info *info;
296 struct iosapic_rte_info *rte; 291 struct iosapic_rte_info *rte;
297 u8 vec = 0; 292 ia64_vector vec;
298 for (info = iosapic_intr_info; info < 293 int irq;
299 iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) { 294
295 for (irq = 0; irq < NR_IRQS; irq++) {
296 info = &iosapic_intr_info[irq];
297 vec = irq_to_vector(irq);
300 list_for_each_entry(rte, &info->rtes, 298 list_for_each_entry(rte, &info->rtes,
301 rte_list) { 299 rte_list) {
302 iosapic_write(rte->addr, 300 iosapic_write(rte->iosapic,
303 IOSAPIC_RTE_LOW(rte->rte_index), 301 IOSAPIC_RTE_LOW(rte->rte_index),
304 IOSAPIC_MASK|vec); 302 IOSAPIC_MASK|vec);
305 iosapic_eoi(rte->addr, vec); 303 iosapic_eoi(rte->iosapic->addr, vec);
306 } 304 }
307 } 305 }
308} 306}
@@ -311,54 +309,36 @@ kexec_disable_iosapic(void)
311static void 309static void
312mask_irq (unsigned int irq) 310mask_irq (unsigned int irq)
313{ 311{
314 unsigned long flags;
315 char __iomem *addr;
316 u32 low32; 312 u32 low32;
317 int rte_index; 313 int rte_index;
318 ia64_vector vec = irq_to_vector(irq);
319 struct iosapic_rte_info *rte; 314 struct iosapic_rte_info *rte;
320 315
321 if (list_empty(&iosapic_intr_info[vec].rtes)) 316 if (list_empty(&iosapic_intr_info[irq].rtes))
322 return; /* not an IOSAPIC interrupt! */ 317 return; /* not an IOSAPIC interrupt! */
323 318
324 spin_lock_irqsave(&iosapic_lock, flags); 319 /* set only the mask bit */
325 { 320 low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
326 /* set only the mask bit */ 321 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
327 low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK; 322 rte_index = rte->rte_index;
328 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, 323 iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
329 rte_list) {
330 addr = rte->addr;
331 rte_index = rte->rte_index;
332 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
333 }
334 } 324 }
335 spin_unlock_irqrestore(&iosapic_lock, flags);
336} 325}
337 326
338static void 327static void
339unmask_irq (unsigned int irq) 328unmask_irq (unsigned int irq)
340{ 329{
341 unsigned long flags;
342 char __iomem *addr;
343 u32 low32; 330 u32 low32;
344 int rte_index; 331 int rte_index;
345 ia64_vector vec = irq_to_vector(irq);
346 struct iosapic_rte_info *rte; 332 struct iosapic_rte_info *rte;
347 333
348 if (list_empty(&iosapic_intr_info[vec].rtes)) 334 if (list_empty(&iosapic_intr_info[irq].rtes))
349 return; /* not an IOSAPIC interrupt! */ 335 return; /* not an IOSAPIC interrupt! */
350 336
351 spin_lock_irqsave(&iosapic_lock, flags); 337 low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
352 { 338 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
353 low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK; 339 rte_index = rte->rte_index;
354 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, 340 iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
355 rte_list) {
356 addr = rte->addr;
357 rte_index = rte->rte_index;
358 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
359 }
360 } 341 }
361 spin_unlock_irqrestore(&iosapic_lock, flags);
362} 342}
363 343
364 344
@@ -366,23 +346,24 @@ static void
366iosapic_set_affinity (unsigned int irq, cpumask_t mask) 346iosapic_set_affinity (unsigned int irq, cpumask_t mask)
367{ 347{
368#ifdef CONFIG_SMP 348#ifdef CONFIG_SMP
369 unsigned long flags;
370 u32 high32, low32; 349 u32 high32, low32;
371 int dest, rte_index; 350 int dest, rte_index;
372 char __iomem *addr;
373 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; 351 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
374 ia64_vector vec;
375 struct iosapic_rte_info *rte; 352 struct iosapic_rte_info *rte;
353 struct iosapic *iosapic;
376 354
377 irq &= (~IA64_IRQ_REDIRECTED); 355 irq &= (~IA64_IRQ_REDIRECTED);
378 vec = irq_to_vector(irq);
379 356
357 cpus_and(mask, mask, cpu_online_map);
380 if (cpus_empty(mask)) 358 if (cpus_empty(mask))
381 return; 359 return;
382 360
361 if (reassign_irq_vector(irq, first_cpu(mask)))
362 return;
363
383 dest = cpu_physical_id(first_cpu(mask)); 364 dest = cpu_physical_id(first_cpu(mask));
384 365
385 if (list_empty(&iosapic_intr_info[vec].rtes)) 366 if (list_empty(&iosapic_intr_info[irq].rtes))
386 return; /* not an IOSAPIC interrupt */ 367 return; /* not an IOSAPIC interrupt */
387 368
388 set_irq_affinity_info(irq, dest, redir); 369 set_irq_affinity_info(irq, dest, redir);
@@ -390,31 +371,24 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
390 /* dest contains both id and eid */ 371 /* dest contains both id and eid */
391 high32 = dest << IOSAPIC_DEST_SHIFT; 372 high32 = dest << IOSAPIC_DEST_SHIFT;
392 373
393 spin_lock_irqsave(&iosapic_lock, flags); 374 low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
394 { 375 if (redir)
395 low32 = iosapic_intr_info[vec].low32 & 376 /* change delivery mode to lowest priority */
396 ~(7 << IOSAPIC_DELIVERY_SHIFT); 377 low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
397 378 else
398 if (redir) 379 /* change delivery mode to fixed */
399 /* change delivery mode to lowest priority */ 380 low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
400 low32 |= (IOSAPIC_LOWEST_PRIORITY << 381 low32 &= IOSAPIC_VECTOR_MASK;
401 IOSAPIC_DELIVERY_SHIFT); 382 low32 |= irq_to_vector(irq);
402 else 383
403 /* change delivery mode to fixed */ 384 iosapic_intr_info[irq].low32 = low32;
404 low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT); 385 iosapic_intr_info[irq].dest = dest;
405 386 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
406 iosapic_intr_info[vec].low32 = low32; 387 iosapic = rte->iosapic;
407 iosapic_intr_info[vec].dest = dest; 388 rte_index = rte->rte_index;
408 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, 389 iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
409 rte_list) { 390 iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
410 addr = rte->addr;
411 rte_index = rte->rte_index;
412 iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index),
413 high32);
414 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
415 }
416 } 391 }
417 spin_unlock_irqrestore(&iosapic_lock, flags);
418#endif 392#endif
419} 393}
420 394
@@ -434,10 +408,20 @@ iosapic_end_level_irq (unsigned int irq)
434{ 408{
435 ia64_vector vec = irq_to_vector(irq); 409 ia64_vector vec = irq_to_vector(irq);
436 struct iosapic_rte_info *rte; 410 struct iosapic_rte_info *rte;
411 int do_unmask_irq = 0;
437 412
438 move_native_irq(irq); 413 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
439 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) 414 do_unmask_irq = 1;
440 iosapic_eoi(rte->addr, vec); 415 mask_irq(irq);
416 }
417
418 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
419 iosapic_eoi(rte->iosapic->addr, vec);
420
421 if (unlikely(do_unmask_irq)) {
422 move_masked_irq(irq);
423 unmask_irq(irq);
424 }
441} 425}
442 426
443#define iosapic_shutdown_level_irq mask_irq 427#define iosapic_shutdown_level_irq mask_irq
@@ -519,13 +503,12 @@ iosapic_version (char __iomem *addr)
519 * unsigned int reserved2 : 8; 503 * unsigned int reserved2 : 8;
520 * } 504 * }
521 */ 505 */
522 return iosapic_read(addr, IOSAPIC_VERSION); 506 return __iosapic_read(addr, IOSAPIC_VERSION);
523} 507}
524 508
525static int iosapic_find_sharable_vector (unsigned long trigger, 509static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
526 unsigned long pol)
527{ 510{
528 int i, vector = -1, min_count = -1; 511 int i, irq = -ENOSPC, min_count = -1;
529 struct iosapic_intr_info *info; 512 struct iosapic_intr_info *info;
530 513
531 /* 514 /*
@@ -533,21 +516,21 @@ static int iosapic_find_sharable_vector (unsigned long trigger,
533 * supported yet 516 * supported yet
534 */ 517 */
535 if (trigger == IOSAPIC_EDGE) 518 if (trigger == IOSAPIC_EDGE)
536 return -1; 519 return -EINVAL;
537 520
538 for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) { 521 for (i = 0; i <= NR_IRQS; i++) {
539 info = &iosapic_intr_info[i]; 522 info = &iosapic_intr_info[i];
540 if (info->trigger == trigger && info->polarity == pol && 523 if (info->trigger == trigger && info->polarity == pol &&
541 (info->dmode == IOSAPIC_FIXED || info->dmode == 524 (info->dmode == IOSAPIC_FIXED ||
542 IOSAPIC_LOWEST_PRIORITY)) { 525 info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
526 can_request_irq(i, IRQF_SHARED)) {
543 if (min_count == -1 || info->count < min_count) { 527 if (min_count == -1 || info->count < min_count) {
544 vector = i; 528 irq = i;
545 min_count = info->count; 529 min_count = info->count;
546 } 530 }
547 } 531 }
548 } 532 }
549 533 return irq;
550 return vector;
551} 534}
552 535
553/* 536/*
@@ -555,25 +538,25 @@ static int iosapic_find_sharable_vector (unsigned long trigger,
555 * assign a new vector for the other and make the vector available 538 * assign a new vector for the other and make the vector available
556 */ 539 */
557static void __init 540static void __init
558iosapic_reassign_vector (int vector) 541iosapic_reassign_vector (int irq)
559{ 542{
560 int new_vector; 543 int new_irq;
561 544
562 if (!list_empty(&iosapic_intr_info[vector].rtes)) { 545 if (!list_empty(&iosapic_intr_info[irq].rtes)) {
563 new_vector = assign_irq_vector(AUTO_ASSIGN); 546 new_irq = create_irq();
564 if (new_vector < 0) 547 if (new_irq < 0)
565 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 548 panic("%s: out of interrupt vectors!\n", __FUNCTION__);
566 printk(KERN_INFO "Reassigning vector %d to %d\n", 549 printk(KERN_INFO "Reassigning vector %d to %d\n",
567 vector, new_vector); 550 irq_to_vector(irq), irq_to_vector(new_irq));
568 memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], 551 memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
569 sizeof(struct iosapic_intr_info)); 552 sizeof(struct iosapic_intr_info));
570 INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes); 553 INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes);
571 list_move(iosapic_intr_info[vector].rtes.next, 554 list_move(iosapic_intr_info[irq].rtes.next,
572 &iosapic_intr_info[new_vector].rtes); 555 &iosapic_intr_info[new_irq].rtes);
573 memset(&iosapic_intr_info[vector], 0, 556 memset(&iosapic_intr_info[irq], 0,
574 sizeof(struct iosapic_intr_info)); 557 sizeof(struct iosapic_intr_info));
575 iosapic_intr_info[vector].low32 = IOSAPIC_MASK; 558 iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
576 INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); 559 INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
577 } 560 }
578} 561}
579 562
@@ -610,29 +593,18 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void)
610 return rte; 593 return rte;
611} 594}
612 595
613static void iosapic_free_rte (struct iosapic_rte_info *rte) 596static inline int irq_is_shared (int irq)
614{ 597{
615 if (rte->flags & RTE_PREALLOCATED) 598 return (iosapic_intr_info[irq].count > 1);
616 list_add_tail(&rte->rte_list, &free_rte_list);
617 else
618 kfree(rte);
619}
620
621static inline int vector_is_shared (int vector)
622{
623 return (iosapic_intr_info[vector].count > 1);
624} 599}
625 600
626static int 601static int
627register_intr (unsigned int gsi, int vector, unsigned char delivery, 602register_intr (unsigned int gsi, int irq, unsigned char delivery,
628 unsigned long polarity, unsigned long trigger) 603 unsigned long polarity, unsigned long trigger)
629{ 604{
630 irq_desc_t *idesc; 605 irq_desc_t *idesc;
631 struct hw_interrupt_type *irq_type; 606 struct hw_interrupt_type *irq_type;
632 int rte_index;
633 int index; 607 int index;
634 unsigned long gsi_base;
635 void __iomem *iosapic_address;
636 struct iosapic_rte_info *rte; 608 struct iosapic_rte_info *rte;
637 609
638 index = find_iosapic(gsi); 610 index = find_iosapic(gsi);
@@ -642,10 +614,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
642 return -ENODEV; 614 return -ENODEV;
643 } 615 }
644 616
645 iosapic_address = iosapic_lists[index].addr; 617 rte = find_rte(irq, gsi);
646 gsi_base = iosapic_lists[index].gsi_base;
647
648 rte = gsi_vector_to_rte(gsi, vector);
649 if (!rte) { 618 if (!rte) {
650 rte = iosapic_alloc_rte(); 619 rte = iosapic_alloc_rte();
651 if (!rte) { 620 if (!rte) {
@@ -654,40 +623,42 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
654 return -ENOMEM; 623 return -ENOMEM;
655 } 624 }
656 625
657 rte_index = gsi - gsi_base; 626 rte->iosapic = &iosapic_lists[index];
658 rte->rte_index = rte_index; 627 rte->rte_index = gsi - rte->iosapic->gsi_base;
659 rte->addr = iosapic_address;
660 rte->gsi_base = gsi_base;
661 rte->refcnt++; 628 rte->refcnt++;
662 list_add_tail(&rte->rte_list, &iosapic_intr_info[vector].rtes); 629 list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes);
663 iosapic_intr_info[vector].count++; 630 iosapic_intr_info[irq].count++;
664 iosapic_lists[index].rtes_inuse++; 631 iosapic_lists[index].rtes_inuse++;
665 } 632 }
666 else if (vector_is_shared(vector)) { 633 else if (rte->refcnt == NO_REF_RTE) {
667 struct iosapic_intr_info *info = &iosapic_intr_info[vector]; 634 struct iosapic_intr_info *info = &iosapic_intr_info[irq];
668 if (info->trigger != trigger || info->polarity != polarity) { 635 if (info->count > 0 &&
636 (info->trigger != trigger || info->polarity != polarity)){
669 printk (KERN_WARNING 637 printk (KERN_WARNING
670 "%s: cannot override the interrupt\n", 638 "%s: cannot override the interrupt\n",
671 __FUNCTION__); 639 __FUNCTION__);
672 return -EINVAL; 640 return -EINVAL;
673 } 641 }
642 rte->refcnt++;
643 iosapic_intr_info[irq].count++;
644 iosapic_lists[index].rtes_inuse++;
674 } 645 }
675 646
676 iosapic_intr_info[vector].polarity = polarity; 647 iosapic_intr_info[irq].polarity = polarity;
677 iosapic_intr_info[vector].dmode = delivery; 648 iosapic_intr_info[irq].dmode = delivery;
678 iosapic_intr_info[vector].trigger = trigger; 649 iosapic_intr_info[irq].trigger = trigger;
679 650
680 if (trigger == IOSAPIC_EDGE) 651 if (trigger == IOSAPIC_EDGE)
681 irq_type = &irq_type_iosapic_edge; 652 irq_type = &irq_type_iosapic_edge;
682 else 653 else
683 irq_type = &irq_type_iosapic_level; 654 irq_type = &irq_type_iosapic_level;
684 655
685 idesc = irq_desc + vector; 656 idesc = irq_desc + irq;
686 if (idesc->chip != irq_type) { 657 if (idesc->chip != irq_type) {
687 if (idesc->chip != &no_irq_type) 658 if (idesc->chip != &no_irq_type)
688 printk(KERN_WARNING 659 printk(KERN_WARNING
689 "%s: changing vector %d from %s to %s\n", 660 "%s: changing vector %d from %s to %s\n",
690 __FUNCTION__, vector, 661 __FUNCTION__, irq_to_vector(irq),
691 idesc->chip->name, irq_type->name); 662 idesc->chip->name, irq_type->name);
692 idesc->chip = irq_type; 663 idesc->chip = irq_type;
693 } 664 }
@@ -695,18 +666,19 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
695} 666}
696 667
697static unsigned int 668static unsigned int
698get_target_cpu (unsigned int gsi, int vector) 669get_target_cpu (unsigned int gsi, int irq)
699{ 670{
700#ifdef CONFIG_SMP 671#ifdef CONFIG_SMP
701 static int cpu = -1; 672 static int cpu = -1;
702 extern int cpe_vector; 673 extern int cpe_vector;
674 cpumask_t domain = irq_to_domain(irq);
703 675
704 /* 676 /*
705 * In case of vector shared by multiple RTEs, all RTEs that 677 * In case of vector shared by multiple RTEs, all RTEs that
706 * share the vector need to use the same destination CPU. 678 * share the vector need to use the same destination CPU.
707 */ 679 */
708 if (!list_empty(&iosapic_intr_info[vector].rtes)) 680 if (!list_empty(&iosapic_intr_info[irq].rtes))
709 return iosapic_intr_info[vector].dest; 681 return iosapic_intr_info[irq].dest;
710 682
711 /* 683 /*
712 * If the platform supports redirection via XTP, let it 684 * If the platform supports redirection via XTP, let it
@@ -723,7 +695,7 @@ get_target_cpu (unsigned int gsi, int vector)
723 return cpu_physical_id(smp_processor_id()); 695 return cpu_physical_id(smp_processor_id());
724 696
725#ifdef CONFIG_ACPI 697#ifdef CONFIG_ACPI
726 if (cpe_vector > 0 && vector == IA64_CPEP_VECTOR) 698 if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR)
727 return get_cpei_target_cpu(); 699 return get_cpei_target_cpu();
728#endif 700#endif
729 701
@@ -738,7 +710,7 @@ get_target_cpu (unsigned int gsi, int vector)
738 goto skip_numa_setup; 710 goto skip_numa_setup;
739 711
740 cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); 712 cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
741 713 cpus_and(cpu_mask, cpu_mask, domain);
742 for_each_cpu_mask(numa_cpu, cpu_mask) { 714 for_each_cpu_mask(numa_cpu, cpu_mask) {
743 if (!cpu_online(numa_cpu)) 715 if (!cpu_online(numa_cpu))
744 cpu_clear(numa_cpu, cpu_mask); 716 cpu_clear(numa_cpu, cpu_mask);
@@ -749,8 +721,8 @@ get_target_cpu (unsigned int gsi, int vector)
749 if (!num_cpus) 721 if (!num_cpus)
750 goto skip_numa_setup; 722 goto skip_numa_setup;
751 723
752 /* Use vector assignment to distribute across cpus in node */ 724 /* Use irq assignment to distribute across cpus in node */
753 cpu_index = vector % num_cpus; 725 cpu_index = irq % num_cpus;
754 726
755 for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) 727 for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
756 numa_cpu = next_cpu(numa_cpu, cpu_mask); 728 numa_cpu = next_cpu(numa_cpu, cpu_mask);
@@ -768,7 +740,7 @@ skip_numa_setup:
768 do { 740 do {
769 if (++cpu >= NR_CPUS) 741 if (++cpu >= NR_CPUS)
770 cpu = 0; 742 cpu = 0;
771 } while (!cpu_online(cpu)); 743 } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
772 744
773 return cpu_physical_id(cpu); 745 return cpu_physical_id(cpu);
774#else /* CONFIG_SMP */ 746#else /* CONFIG_SMP */
@@ -785,84 +757,72 @@ int
785iosapic_register_intr (unsigned int gsi, 757iosapic_register_intr (unsigned int gsi,
786 unsigned long polarity, unsigned long trigger) 758 unsigned long polarity, unsigned long trigger)
787{ 759{
788 int vector, mask = 1, err; 760 int irq, mask = 1, err;
789 unsigned int dest; 761 unsigned int dest;
790 unsigned long flags; 762 unsigned long flags;
791 struct iosapic_rte_info *rte; 763 struct iosapic_rte_info *rte;
792 u32 low32; 764 u32 low32;
793again: 765
794 /* 766 /*
795 * If this GSI has already been registered (i.e., it's a 767 * If this GSI has already been registered (i.e., it's a
796 * shared interrupt, or we lost a race to register it), 768 * shared interrupt, or we lost a race to register it),
797 * don't touch the RTE. 769 * don't touch the RTE.
798 */ 770 */
799 spin_lock_irqsave(&iosapic_lock, flags); 771 spin_lock_irqsave(&iosapic_lock, flags);
800 { 772 irq = __gsi_to_irq(gsi);
801 vector = gsi_to_vector(gsi); 773 if (irq > 0) {
802 if (vector > 0) { 774 rte = find_rte(irq, gsi);
803 rte = gsi_vector_to_rte(gsi, vector); 775 if(iosapic_intr_info[irq].count == 0) {
776 assign_irq_vector(irq);
777 dynamic_irq_init(irq);
778 } else if (rte->refcnt != NO_REF_RTE) {
804 rte->refcnt++; 779 rte->refcnt++;
805 spin_unlock_irqrestore(&iosapic_lock, flags); 780 goto unlock_iosapic_lock;
806 return vector;
807 } 781 }
808 } 782 } else
809 spin_unlock_irqrestore(&iosapic_lock, flags); 783 irq = create_irq();
810 784
811 /* If vector is running out, we try to find a sharable vector */ 785 /* If vector is running out, we try to find a sharable vector */
812 vector = assign_irq_vector(AUTO_ASSIGN); 786 if (irq < 0) {
813 if (vector < 0) { 787 irq = iosapic_find_sharable_irq(trigger, polarity);
814 vector = iosapic_find_sharable_vector(trigger, polarity); 788 if (irq < 0)
815 if (vector < 0) 789 goto unlock_iosapic_lock;
816 return -ENOSPC;
817 } 790 }
818 791
819 spin_lock_irqsave(&irq_desc[vector].lock, flags); 792 spin_lock(&irq_desc[irq].lock);
820 spin_lock(&iosapic_lock); 793 dest = get_target_cpu(gsi, irq);
821 { 794 err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY,
822 if (gsi_to_vector(gsi) > 0) { 795 polarity, trigger);
823 if (list_empty(&iosapic_intr_info[vector].rtes)) 796 if (err < 0) {
824 free_irq_vector(vector); 797 irq = err;
825 spin_unlock(&iosapic_lock); 798 goto unlock_all;
826 spin_unlock_irqrestore(&irq_desc[vector].lock,
827 flags);
828 goto again;
829 }
830
831 dest = get_target_cpu(gsi, vector);
832 err = register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY,
833 polarity, trigger);
834 if (err < 0) {
835 spin_unlock(&iosapic_lock);
836 spin_unlock_irqrestore(&irq_desc[vector].lock,
837 flags);
838 return err;
839 }
840
841 /*
842 * If the vector is shared and already unmasked for
843 * other interrupt sources, don't mask it.
844 */
845 low32 = iosapic_intr_info[vector].low32;
846 if (vector_is_shared(vector) && !(low32 & IOSAPIC_MASK))
847 mask = 0;
848 set_rte(gsi, vector, dest, mask);
849 } 799 }
850 spin_unlock(&iosapic_lock); 800
851 spin_unlock_irqrestore(&irq_desc[vector].lock, flags); 801 /*
802 * If the vector is shared and already unmasked for other
803 * interrupt sources, don't mask it.
804 */
805 low32 = iosapic_intr_info[irq].low32;
806 if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK))
807 mask = 0;
808 set_rte(gsi, irq, dest, mask);
852 809
853 printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", 810 printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
854 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), 811 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
855 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 812 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
856 cpu_logical_id(dest), dest, vector); 813 cpu_logical_id(dest), dest, irq_to_vector(irq));
857 814 unlock_all:
858 return vector; 815 spin_unlock(&irq_desc[irq].lock);
816 unlock_iosapic_lock:
817 spin_unlock_irqrestore(&iosapic_lock, flags);
818 return irq;
859} 819}
860 820
861void 821void
862iosapic_unregister_intr (unsigned int gsi) 822iosapic_unregister_intr (unsigned int gsi)
863{ 823{
864 unsigned long flags; 824 unsigned long flags;
865 int irq, vector, index; 825 int irq, index;
866 irq_desc_t *idesc; 826 irq_desc_t *idesc;
867 u32 low32; 827 u32 low32;
868 unsigned long trigger, polarity; 828 unsigned long trigger, polarity;
@@ -881,78 +841,56 @@ iosapic_unregister_intr (unsigned int gsi)
881 WARN_ON(1); 841 WARN_ON(1);
882 return; 842 return;
883 } 843 }
884 vector = irq_to_vector(irq);
885 844
886 idesc = irq_desc + irq; 845 spin_lock_irqsave(&iosapic_lock, flags);
887 spin_lock_irqsave(&idesc->lock, flags); 846 if ((rte = find_rte(irq, gsi)) == NULL) {
888 spin_lock(&iosapic_lock); 847 printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
889 { 848 gsi);
890 if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) { 849 WARN_ON(1);
891 printk(KERN_ERR 850 goto out;
892 "iosapic_unregister_intr(%u) unbalanced\n", 851 }
893 gsi);
894 WARN_ON(1);
895 goto out;
896 }
897 852
898 if (--rte->refcnt > 0) 853 if (--rte->refcnt > 0)
899 goto out; 854 goto out;
900 855
901 /* Mask the interrupt */ 856 idesc = irq_desc + irq;
902 low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK; 857 rte->refcnt = NO_REF_RTE;
903 iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index),
904 low32);
905 858
906 /* Remove the rte entry from the list */ 859 /* Mask the interrupt */
907 list_del(&rte->rte_list); 860 low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
908 iosapic_intr_info[vector].count--; 861 iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
909 iosapic_free_rte(rte);
910 index = find_iosapic(gsi);
911 iosapic_lists[index].rtes_inuse--;
912 WARN_ON(iosapic_lists[index].rtes_inuse < 0);
913
914 trigger = iosapic_intr_info[vector].trigger;
915 polarity = iosapic_intr_info[vector].polarity;
916 dest = iosapic_intr_info[vector].dest;
917 printk(KERN_INFO
918 "GSI %u (%s, %s) -> CPU %d (0x%04x)"
919 " vector %d unregistered\n",
920 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
921 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
922 cpu_logical_id(dest), dest, vector);
923 862
924 if (list_empty(&iosapic_intr_info[vector].rtes)) { 863 iosapic_intr_info[irq].count--;
925 /* Sanity check */ 864 index = find_iosapic(gsi);
926 BUG_ON(iosapic_intr_info[vector].count); 865 iosapic_lists[index].rtes_inuse--;
866 WARN_ON(iosapic_lists[index].rtes_inuse < 0);
927 867
928 /* Clear the interrupt controller descriptor */ 868 trigger = iosapic_intr_info[irq].trigger;
929 idesc->chip = &no_irq_type; 869 polarity = iosapic_intr_info[irq].polarity;
870 dest = iosapic_intr_info[irq].dest;
871 printk(KERN_INFO
872 "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
873 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
874 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
875 cpu_logical_id(dest), dest, irq_to_vector(irq));
930 876
877 if (iosapic_intr_info[irq].count == 0) {
931#ifdef CONFIG_SMP 878#ifdef CONFIG_SMP
932 /* Clear affinity */ 879 /* Clear affinity */
933 cpus_setall(idesc->affinity); 880 cpus_setall(idesc->affinity);
934#endif 881#endif
935 882 /* Clear the interrupt information */
936 /* Clear the interrupt information */ 883 iosapic_intr_info[irq].dest = 0;
937 memset(&iosapic_intr_info[vector], 0, 884 iosapic_intr_info[irq].dmode = 0;
938 sizeof(struct iosapic_intr_info)); 885 iosapic_intr_info[irq].polarity = 0;
939 iosapic_intr_info[vector].low32 |= IOSAPIC_MASK; 886 iosapic_intr_info[irq].trigger = 0;
940 INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); 887 iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
941 888
942 if (idesc->action) { 889 /* Destroy and reserve IRQ */
943 printk(KERN_ERR 890 destroy_and_reserve_irq(irq);
944 "interrupt handlers still exist on"
945 "IRQ %u\n", irq);
946 WARN_ON(1);
947 }
948
949 /* Free the interrupt vector */
950 free_irq_vector(vector);
951 }
952 } 891 }
953 out: 892 out:
954 spin_unlock(&iosapic_lock); 893 spin_unlock_irqrestore(&iosapic_lock, flags);
955 spin_unlock_irqrestore(&idesc->lock, flags);
956} 894}
957 895
958/* 896/*
@@ -965,27 +903,30 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
965{ 903{
966 static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"}; 904 static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
967 unsigned char delivery; 905 unsigned char delivery;
968 int vector, mask = 0; 906 int irq, vector, mask = 0;
969 unsigned int dest = ((id << 8) | eid) & 0xffff; 907 unsigned int dest = ((id << 8) | eid) & 0xffff;
970 908
971 switch (int_type) { 909 switch (int_type) {
972 case ACPI_INTERRUPT_PMI: 910 case ACPI_INTERRUPT_PMI:
973 vector = iosapic_vector; 911 irq = vector = iosapic_vector;
912 bind_irq_vector(irq, vector, CPU_MASK_ALL);
974 /* 913 /*
975 * since PMI vector is alloc'd by FW(ACPI) not by kernel, 914 * since PMI vector is alloc'd by FW(ACPI) not by kernel,
976 * we need to make sure the vector is available 915 * we need to make sure the vector is available
977 */ 916 */
978 iosapic_reassign_vector(vector); 917 iosapic_reassign_vector(irq);
979 delivery = IOSAPIC_PMI; 918 delivery = IOSAPIC_PMI;
980 break; 919 break;
981 case ACPI_INTERRUPT_INIT: 920 case ACPI_INTERRUPT_INIT:
982 vector = assign_irq_vector(AUTO_ASSIGN); 921 irq = create_irq();
983 if (vector < 0) 922 if (irq < 0)
984 panic("%s: out of interrupt vectors!\n", __FUNCTION__); 923 panic("%s: out of interrupt vectors!\n", __FUNCTION__);
924 vector = irq_to_vector(irq);
985 delivery = IOSAPIC_INIT; 925 delivery = IOSAPIC_INIT;
986 break; 926 break;
987 case ACPI_INTERRUPT_CPEI: 927 case ACPI_INTERRUPT_CPEI:
988 vector = IA64_CPE_VECTOR; 928 irq = vector = IA64_CPE_VECTOR;
929 BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
989 delivery = IOSAPIC_LOWEST_PRIORITY; 930 delivery = IOSAPIC_LOWEST_PRIORITY;
990 mask = 1; 931 mask = 1;
991 break; 932 break;
@@ -995,7 +936,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
995 return -1; 936 return -1;
996 } 937 }
997 938
998 register_intr(gsi, vector, delivery, polarity, trigger); 939 register_intr(gsi, irq, delivery, polarity, trigger);
999 940
1000 printk(KERN_INFO 941 printk(KERN_INFO
1001 "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)" 942 "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
@@ -1005,7 +946,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
1005 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 946 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
1006 cpu_logical_id(dest), dest, vector); 947 cpu_logical_id(dest), dest, vector);
1007 948
1008 set_rte(gsi, vector, dest, mask); 949 set_rte(gsi, irq, dest, mask);
1009 return vector; 950 return vector;
1010} 951}
1011 952
@@ -1017,30 +958,32 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
1017 unsigned long polarity, 958 unsigned long polarity,
1018 unsigned long trigger) 959 unsigned long trigger)
1019{ 960{
1020 int vector; 961 int vector, irq;
1021 unsigned int dest = cpu_physical_id(smp_processor_id()); 962 unsigned int dest = cpu_physical_id(smp_processor_id());
1022 963
1023 vector = isa_irq_to_vector(isa_irq); 964 irq = vector = isa_irq_to_vector(isa_irq);
1024 965 BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
1025 register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, polarity, trigger); 966 register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, polarity, trigger);
1026 967
1027 DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n", 968 DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
1028 isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level", 969 isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
1029 polarity == IOSAPIC_POL_HIGH ? "high" : "low", 970 polarity == IOSAPIC_POL_HIGH ? "high" : "low",
1030 cpu_logical_id(dest), dest, vector); 971 cpu_logical_id(dest), dest, vector);
1031 972
1032 set_rte(gsi, vector, dest, 1); 973 set_rte(gsi, irq, dest, 1);
1033} 974}
1034 975
1035void __init 976void __init
1036iosapic_system_init (int system_pcat_compat) 977iosapic_system_init (int system_pcat_compat)
1037{ 978{
1038 int vector; 979 int irq;
1039 980
1040 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) { 981 for (irq = 0; irq < NR_IRQS; ++irq) {
1041 iosapic_intr_info[vector].low32 = IOSAPIC_MASK; 982 iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
1042 /* mark as unused */ 983 /* mark as unused */
1043 INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); 984 INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
985
986 iosapic_intr_info[irq].count = 0;
1044 } 987 }
1045 988
1046 pcat_compat = system_pcat_compat; 989 pcat_compat = system_pcat_compat;
@@ -1108,31 +1051,35 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
1108 unsigned long flags; 1051 unsigned long flags;
1109 1052
1110 spin_lock_irqsave(&iosapic_lock, flags); 1053 spin_lock_irqsave(&iosapic_lock, flags);
1111 { 1054 index = find_iosapic(gsi_base);
1112 addr = ioremap(phys_addr, 0); 1055 if (index >= 0) {
1113 ver = iosapic_version(addr); 1056 spin_unlock_irqrestore(&iosapic_lock, flags);
1057 return -EBUSY;
1058 }
1114 1059
1115 if ((err = iosapic_check_gsi_range(gsi_base, ver))) { 1060 addr = ioremap(phys_addr, 0);
1116 iounmap(addr); 1061 ver = iosapic_version(addr);
1117 spin_unlock_irqrestore(&iosapic_lock, flags); 1062 if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
1118 return err; 1063 iounmap(addr);
1119 } 1064 spin_unlock_irqrestore(&iosapic_lock, flags);
1065 return err;
1066 }
1120 1067
1121 /* 1068 /*
1122 * The MAX_REDIR register holds the highest input pin 1069 * The MAX_REDIR register holds the highest input pin number
1123 * number (starting from 0). 1070 * (starting from 0). We add 1 so that we can use it for
1124 * We add 1 so that we can use it for number of pins (= RTEs) 1071 * number of pins (= RTEs)
1125 */ 1072 */
1126 num_rte = ((ver >> 16) & 0xff) + 1; 1073 num_rte = ((ver >> 16) & 0xff) + 1;
1127 1074
1128 index = iosapic_alloc(); 1075 index = iosapic_alloc();
1129 iosapic_lists[index].addr = addr; 1076 iosapic_lists[index].addr = addr;
1130 iosapic_lists[index].gsi_base = gsi_base; 1077 iosapic_lists[index].gsi_base = gsi_base;
1131 iosapic_lists[index].num_rte = num_rte; 1078 iosapic_lists[index].num_rte = num_rte;
1132#ifdef CONFIG_NUMA 1079#ifdef CONFIG_NUMA
1133 iosapic_lists[index].node = MAX_NUMNODES; 1080 iosapic_lists[index].node = MAX_NUMNODES;
1134#endif 1081#endif
1135 } 1082 spin_lock_init(&iosapic_lists[index].lock);
1136 spin_unlock_irqrestore(&iosapic_lock, flags); 1083 spin_unlock_irqrestore(&iosapic_lock, flags);
1137 1084
1138 if ((gsi_base == 0) && pcat_compat) { 1085 if ((gsi_base == 0) && pcat_compat) {
@@ -1157,25 +1104,22 @@ iosapic_remove (unsigned int gsi_base)
1157 unsigned long flags; 1104 unsigned long flags;
1158 1105
1159 spin_lock_irqsave(&iosapic_lock, flags); 1106 spin_lock_irqsave(&iosapic_lock, flags);
1160 { 1107 index = find_iosapic(gsi_base);
1161 index = find_iosapic(gsi_base); 1108 if (index < 0) {
1162 if (index < 0) { 1109 printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
1163 printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n", 1110 __FUNCTION__, gsi_base);
1164 __FUNCTION__, gsi_base); 1111 goto out;
1165 goto out; 1112 }
1166 }
1167
1168 if (iosapic_lists[index].rtes_inuse) {
1169 err = -EBUSY;
1170 printk(KERN_WARNING
1171 "%s: IOSAPIC for GSI base %u is busy\n",
1172 __FUNCTION__, gsi_base);
1173 goto out;
1174 }
1175 1113
1176 iounmap(iosapic_lists[index].addr); 1114 if (iosapic_lists[index].rtes_inuse) {
1177 iosapic_free(index); 1115 err = -EBUSY;
1116 printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
1117 __FUNCTION__, gsi_base);
1118 goto out;
1178 } 1119 }
1120
1121 iounmap(iosapic_lists[index].addr);
1122 iosapic_free(index);
1179 out: 1123 out:
1180 spin_unlock_irqrestore(&iosapic_lock, flags); 1124 spin_unlock_irqrestore(&iosapic_lock, flags);
1181 return err; 1125 return err;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 407b45870489..cc3ee4ef37af 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -35,7 +35,7 @@ void ack_bad_irq(unsigned int irq)
35#ifdef CONFIG_IA64_GENERIC 35#ifdef CONFIG_IA64_GENERIC
36unsigned int __ia64_local_vector_to_irq (ia64_vector vec) 36unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
37{ 37{
38 return (unsigned int) vec; 38 return __get_cpu_var(vector_irq)[vec];
39} 39}
40#endif 40#endif
41 41
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index bc47049f060f..91797c111162 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -46,6 +46,12 @@
46 46
47#define IRQ_DEBUG 0 47#define IRQ_DEBUG 0
48 48
49#define IRQ_VECTOR_UNASSIGNED (0)
50
51#define IRQ_UNUSED (0)
52#define IRQ_USED (1)
53#define IRQ_RSVD (2)
54
49/* These can be overridden in platform_irq_init */ 55/* These can be overridden in platform_irq_init */
50int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; 56int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
51int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; 57int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
@@ -54,6 +60,8 @@ int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
54void __iomem *ipi_base_addr = ((void __iomem *) 60void __iomem *ipi_base_addr = ((void __iomem *)
55 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); 61 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
56 62
63static cpumask_t vector_allocation_domain(int cpu);
64
57/* 65/*
58 * Legacy IRQ to IA-64 vector translation table. 66 * Legacy IRQ to IA-64 vector translation table.
59 */ 67 */
@@ -64,46 +72,269 @@ __u8 isa_irq_to_vector_map[16] = {
64}; 72};
65EXPORT_SYMBOL(isa_irq_to_vector_map); 73EXPORT_SYMBOL(isa_irq_to_vector_map);
66 74
67static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)]; 75DEFINE_SPINLOCK(vector_lock);
76
77struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
78 [0 ... NR_IRQS - 1] = {
79 .vector = IRQ_VECTOR_UNASSIGNED,
80 .domain = CPU_MASK_NONE
81 }
82};
83
84DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
85 [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
86};
87
88static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = {
89 [0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE
90};
91
92static int irq_status[NR_IRQS] = {
93 [0 ... NR_IRQS -1] = IRQ_UNUSED
94};
95
96int check_irq_used(int irq)
97{
98 if (irq_status[irq] == IRQ_USED)
99 return 1;
100
101 return -1;
102}
103
104static void reserve_irq(unsigned int irq)
105{
106 unsigned long flags;
107
108 spin_lock_irqsave(&vector_lock, flags);
109 irq_status[irq] = IRQ_RSVD;
110 spin_unlock_irqrestore(&vector_lock, flags);
111}
112
113static inline int find_unassigned_irq(void)
114{
115 int irq;
116
117 for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
118 if (irq_status[irq] == IRQ_UNUSED)
119 return irq;
120 return -ENOSPC;
121}
122
123static inline int find_unassigned_vector(cpumask_t domain)
124{
125 cpumask_t mask;
126 int pos;
127
128 cpus_and(mask, domain, cpu_online_map);
129 if (cpus_empty(mask))
130 return -EINVAL;
131
132 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
133 cpus_and(mask, domain, vector_table[pos]);
134 if (!cpus_empty(mask))
135 continue;
136 return IA64_FIRST_DEVICE_VECTOR + pos;
137 }
138 return -ENOSPC;
139}
140
141static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
142{
143 cpumask_t mask;
144 int cpu, pos;
145 struct irq_cfg *cfg = &irq_cfg[irq];
146
147 cpus_and(mask, domain, cpu_online_map);
148 if (cpus_empty(mask))
149 return -EINVAL;
150 if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
151 return 0;
152 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
153 return -EBUSY;
154 for_each_cpu_mask(cpu, mask)
155 per_cpu(vector_irq, cpu)[vector] = irq;
156 cfg->vector = vector;
157 cfg->domain = domain;
158 irq_status[irq] = IRQ_USED;
159 pos = vector - IA64_FIRST_DEVICE_VECTOR;
160 cpus_or(vector_table[pos], vector_table[pos], domain);
161 return 0;
162}
163
164int bind_irq_vector(int irq, int vector, cpumask_t domain)
165{
166 unsigned long flags;
167 int ret;
168
169 spin_lock_irqsave(&vector_lock, flags);
170 ret = __bind_irq_vector(irq, vector, domain);
171 spin_unlock_irqrestore(&vector_lock, flags);
172 return ret;
173}
174
175static void __clear_irq_vector(int irq)
176{
177 int vector, cpu, pos;
178 cpumask_t mask;
179 cpumask_t domain;
180 struct irq_cfg *cfg = &irq_cfg[irq];
181
182 BUG_ON((unsigned)irq >= NR_IRQS);
183 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
184 vector = cfg->vector;
185 domain = cfg->domain;
186 cpus_and(mask, cfg->domain, cpu_online_map);
187 for_each_cpu_mask(cpu, mask)
188 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
189 cfg->vector = IRQ_VECTOR_UNASSIGNED;
190 cfg->domain = CPU_MASK_NONE;
191 irq_status[irq] = IRQ_UNUSED;
192 pos = vector - IA64_FIRST_DEVICE_VECTOR;
193 cpus_andnot(vector_table[pos], vector_table[pos], domain);
194}
195
196static void clear_irq_vector(int irq)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(&vector_lock, flags);
201 __clear_irq_vector(irq);
202 spin_unlock_irqrestore(&vector_lock, flags);
203}
68 204
69int 205int
70assign_irq_vector (int irq) 206assign_irq_vector (int irq)
71{ 207{
72 int pos, vector; 208 unsigned long flags;
73 again: 209 int vector, cpu;
74 pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); 210 cpumask_t domain;
75 vector = IA64_FIRST_DEVICE_VECTOR + pos; 211
76 if (vector > IA64_LAST_DEVICE_VECTOR) 212 vector = -ENOSPC;
77 return -ENOSPC; 213
78 if (test_and_set_bit(pos, ia64_vector_mask)) 214 spin_lock_irqsave(&vector_lock, flags);
79 goto again; 215 if (irq < 0) {
216 goto out;
217 }
218 for_each_online_cpu(cpu) {
219 domain = vector_allocation_domain(cpu);
220 vector = find_unassigned_vector(domain);
221 if (vector >= 0)
222 break;
223 }
224 if (vector < 0)
225 goto out;
226 BUG_ON(__bind_irq_vector(irq, vector, domain));
227 out:
228 spin_unlock_irqrestore(&vector_lock, flags);
80 return vector; 229 return vector;
81} 230}
82 231
83void 232void
84free_irq_vector (int vector) 233free_irq_vector (int vector)
85{ 234{
86 int pos; 235 if (vector < IA64_FIRST_DEVICE_VECTOR ||
87 236 vector > IA64_LAST_DEVICE_VECTOR)
88 if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
89 return; 237 return;
90 238 clear_irq_vector(vector);
91 pos = vector - IA64_FIRST_DEVICE_VECTOR;
92 if (!test_and_clear_bit(pos, ia64_vector_mask))
93 printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
94} 239}
95 240
96int 241int
97reserve_irq_vector (int vector) 242reserve_irq_vector (int vector)
98{ 243{
99 int pos;
100
101 if (vector < IA64_FIRST_DEVICE_VECTOR || 244 if (vector < IA64_FIRST_DEVICE_VECTOR ||
102 vector > IA64_LAST_DEVICE_VECTOR) 245 vector > IA64_LAST_DEVICE_VECTOR)
103 return -EINVAL; 246 return -EINVAL;
247 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
248}
104 249
105 pos = vector - IA64_FIRST_DEVICE_VECTOR; 250/*
106 return test_and_set_bit(pos, ia64_vector_mask); 251 * Initialize vector_irq on a new cpu. This function must be called
252 * with vector_lock held.
253 */
254void __setup_vector_irq(int cpu)
255{
256 int irq, vector;
257
258 /* Clear vector_irq */
259 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
260 per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
261 /* Mark the inuse vectors */
262 for (irq = 0; irq < NR_IRQS; ++irq) {
263 if (!cpu_isset(cpu, irq_cfg[irq].domain))
264 continue;
265 vector = irq_to_vector(irq);
266 per_cpu(vector_irq, cpu)[vector] = irq;
267 }
268}
269
270#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
271static enum vector_domain_type {
272 VECTOR_DOMAIN_NONE,
273 VECTOR_DOMAIN_PERCPU
274} vector_domain_type = VECTOR_DOMAIN_NONE;
275
276static cpumask_t vector_allocation_domain(int cpu)
277{
278 if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
279 return cpumask_of_cpu(cpu);
280 return CPU_MASK_ALL;
281}
282
283static int __init parse_vector_domain(char *arg)
284{
285 if (!arg)
286 return -EINVAL;
287 if (!strcmp(arg, "percpu")) {
288 vector_domain_type = VECTOR_DOMAIN_PERCPU;
289 no_int_routing = 1;
290 }
291 return 1;
292}
293early_param("vector", parse_vector_domain);
294#else
295static cpumask_t vector_allocation_domain(int cpu)
296{
297 return CPU_MASK_ALL;
298}
299#endif
300
301
302void destroy_and_reserve_irq(unsigned int irq)
303{
304 dynamic_irq_cleanup(irq);
305
306 clear_irq_vector(irq);
307 reserve_irq(irq);
308}
309
310static int __reassign_irq_vector(int irq, int cpu)
311{
312 struct irq_cfg *cfg = &irq_cfg[irq];
313 int vector;
314 cpumask_t domain;
315
316 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
317 return -EINVAL;
318 if (cpu_isset(cpu, cfg->domain))
319 return 0;
320 domain = vector_allocation_domain(cpu);
321 vector = find_unassigned_vector(domain);
322 if (vector < 0)
323 return -ENOSPC;
324 __clear_irq_vector(irq);
325 BUG_ON(__bind_irq_vector(irq, vector, domain));
326 return 0;
327}
328
329int reassign_irq_vector(int irq, int cpu)
330{
331 unsigned long flags;
332 int ret;
333
334 spin_lock_irqsave(&vector_lock, flags);
335 ret = __reassign_irq_vector(irq, cpu);
336 spin_unlock_irqrestore(&vector_lock, flags);
337 return ret;
107} 338}
108 339
109/* 340/*
@@ -111,18 +342,35 @@ reserve_irq_vector (int vector)
111 */ 342 */
112int create_irq(void) 343int create_irq(void)
113{ 344{
114 int vector = assign_irq_vector(AUTO_ASSIGN); 345 unsigned long flags;
115 346 int irq, vector, cpu;
116 if (vector >= 0) 347 cpumask_t domain;
117 dynamic_irq_init(vector); 348
118 349 irq = vector = -ENOSPC;
119 return vector; 350 spin_lock_irqsave(&vector_lock, flags);
351 for_each_online_cpu(cpu) {
352 domain = vector_allocation_domain(cpu);
353 vector = find_unassigned_vector(domain);
354 if (vector >= 0)
355 break;
356 }
357 if (vector < 0)
358 goto out;
359 irq = find_unassigned_irq();
360 if (irq < 0)
361 goto out;
362 BUG_ON(__bind_irq_vector(irq, vector, domain));
363 out:
364 spin_unlock_irqrestore(&vector_lock, flags);
365 if (irq >= 0)
366 dynamic_irq_init(irq);
367 return irq;
120} 368}
121 369
122void destroy_irq(unsigned int irq) 370void destroy_irq(unsigned int irq)
123{ 371{
124 dynamic_irq_cleanup(irq); 372 dynamic_irq_cleanup(irq);
125 free_irq_vector(irq); 373 clear_irq_vector(irq);
126} 374}
127 375
128#ifdef CONFIG_SMP 376#ifdef CONFIG_SMP
@@ -301,14 +549,13 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
301 irq_desc_t *desc; 549 irq_desc_t *desc;
302 unsigned int irq; 550 unsigned int irq;
303 551
304 for (irq = 0; irq < NR_IRQS; ++irq) 552 irq = vec;
305 if (irq_to_vector(irq) == vec) { 553 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
306 desc = irq_desc + irq; 554 desc = irq_desc + irq;
307 desc->status |= IRQ_PER_CPU; 555 desc->status |= IRQ_PER_CPU;
308 desc->chip = &irq_type_ia64_lsapic; 556 desc->chip = &irq_type_ia64_lsapic;
309 if (action) 557 if (action)
310 setup_irq(irq, action); 558 setup_irq(irq, action);
311 }
312} 559}
313 560
314void __init 561void __init
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index c81080df70df..2fdbd5c3f213 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -13,6 +13,7 @@
13 13
14#define MSI_DATA_VECTOR_SHIFT 0 14#define MSI_DATA_VECTOR_SHIFT 0
15#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) 15#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
16#define MSI_DATA_VECTOR_MASK 0xffffff00
16 17
17#define MSI_DATA_DELIVERY_SHIFT 8 18#define MSI_DATA_DELIVERY_SHIFT 8
18#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) 19#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
@@ -50,17 +51,29 @@ static struct irq_chip ia64_msi_chip;
50static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) 51static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
51{ 52{
52 struct msi_msg msg; 53 struct msi_msg msg;
53 u32 addr; 54 u32 addr, data;
55 int cpu = first_cpu(cpu_mask);
56
57 if (!cpu_online(cpu))
58 return;
59
60 if (reassign_irq_vector(irq, cpu))
61 return;
54 62
55 read_msi_msg(irq, &msg); 63 read_msi_msg(irq, &msg);
56 64
57 addr = msg.address_lo; 65 addr = msg.address_lo;
58 addr &= MSI_ADDR_DESTID_MASK; 66 addr &= MSI_ADDR_DESTID_MASK;
59 addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask))); 67 addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
60 msg.address_lo = addr; 68 msg.address_lo = addr;
61 69
70 data = msg.data;
71 data &= MSI_DATA_VECTOR_MASK;
72 data |= MSI_DATA_VECTOR(irq_to_vector(irq));
73 msg.data = data;
74
62 write_msi_msg(irq, &msg); 75 write_msi_msg(irq, &msg);
63 irq_desc[irq].affinity = cpu_mask; 76 irq_desc[irq].affinity = cpumask_of_cpu(cpu);
64} 77}
65#endif /* CONFIG_SMP */ 78#endif /* CONFIG_SMP */
66 79
@@ -69,13 +82,15 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
69 struct msi_msg msg; 82 struct msi_msg msg;
70 unsigned long dest_phys_id; 83 unsigned long dest_phys_id;
71 int irq, vector; 84 int irq, vector;
85 cpumask_t mask;
72 86
73 irq = create_irq(); 87 irq = create_irq();
74 if (irq < 0) 88 if (irq < 0)
75 return irq; 89 return irq;
76 90
77 set_irq_msi(irq, desc); 91 set_irq_msi(irq, desc);
78 dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map)); 92 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
93 dest_phys_id = cpu_physical_id(first_cpu(mask));
79 vector = irq_to_vector(irq); 94 vector = irq_to_vector(irq);
80 95
81 msg.address_hi = 0; 96 msg.address_hi = 0;
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 3c9d8e6089cf..9f5c90b594b9 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -395,9 +395,13 @@ smp_callin (void)
395 fix_b0_for_bsp(); 395 fix_b0_for_bsp();
396 396
397 lock_ipi_calllock(); 397 lock_ipi_calllock();
398 spin_lock(&vector_lock);
399 /* Setup the per cpu irq handling data structures */
400 __setup_vector_irq(cpuid);
398 cpu_set(cpuid, cpu_online_map); 401 cpu_set(cpuid, cpu_online_map);
399 unlock_ipi_calllock(); 402 unlock_ipi_calllock();
400 per_cpu(cpu_state, cpuid) = CPU_ONLINE; 403 per_cpu(cpu_state, cpuid) = CPU_ONLINE;
404 spin_unlock(&vector_lock);
401 405
402 smp_setup_percpu_timer(); 406 smp_setup_percpu_timer();
403 407
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h
index c054d7a9aaa7..efa1b8f7251d 100644
--- a/include/asm-ia64/hw_irq.h
+++ b/include/asm-ia64/hw_irq.h
@@ -90,13 +90,27 @@ enum {
90extern __u8 isa_irq_to_vector_map[16]; 90extern __u8 isa_irq_to_vector_map[16];
91#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)] 91#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)]
92 92
93struct irq_cfg {
94 ia64_vector vector;
95 cpumask_t domain;
96};
97extern spinlock_t vector_lock;
98extern struct irq_cfg irq_cfg[NR_IRQS];
99#define irq_to_domain(x) irq_cfg[(x)].domain
100DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
101
93extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ 102extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
94 103
104extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
95extern int assign_irq_vector (int irq); /* allocate a free vector */ 105extern int assign_irq_vector (int irq); /* allocate a free vector */
96extern void free_irq_vector (int vector); 106extern void free_irq_vector (int vector);
97extern int reserve_irq_vector (int vector); 107extern int reserve_irq_vector (int vector);
108extern void __setup_vector_irq(int cpu);
109extern int reassign_irq_vector(int irq, int cpu);
98extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); 110extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
99extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); 111extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
112extern int check_irq_used (int irq);
113extern void destroy_and_reserve_irq (unsigned int irq);
100 114
101static inline void ia64_resend_irq(unsigned int vector) 115static inline void ia64_resend_irq(unsigned int vector)
102{ 116{
@@ -113,7 +127,7 @@ extern irq_desc_t irq_desc[NR_IRQS];
113static inline unsigned int 127static inline unsigned int
114__ia64_local_vector_to_irq (ia64_vector vec) 128__ia64_local_vector_to_irq (ia64_vector vec)
115{ 129{
116 return (unsigned int) vec; 130 return __get_cpu_var(vector_irq)[vec];
117} 131}
118#endif 132#endif
119 133
@@ -131,7 +145,7 @@ __ia64_local_vector_to_irq (ia64_vector vec)
131static inline ia64_vector 145static inline ia64_vector
132irq_to_vector (int irq) 146irq_to_vector (int irq)
133{ 147{
134 return (ia64_vector) irq; 148 return irq_cfg[irq].vector;
135} 149}
136 150
137/* 151/*
diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h
index 421cb6b62a7c..b8f712859140 100644
--- a/include/asm-ia64/iosapic.h
+++ b/include/asm-ia64/iosapic.h
@@ -47,19 +47,21 @@
47#define IOSAPIC_MASK_SHIFT 16 47#define IOSAPIC_MASK_SHIFT 16
48#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT) 48#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT)
49 49
50#define IOSAPIC_VECTOR_MASK 0xffffff00
51
50#ifndef __ASSEMBLY__ 52#ifndef __ASSEMBLY__
51 53
52#ifdef CONFIG_IOSAPIC 54#ifdef CONFIG_IOSAPIC
53 55
54#define NR_IOSAPICS 256 56#define NR_IOSAPICS 256
55 57
56static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg) 58static inline unsigned int __iosapic_read(char __iomem *iosapic, unsigned int reg)
57{ 59{
58 writel(reg, iosapic + IOSAPIC_REG_SELECT); 60 writel(reg, iosapic + IOSAPIC_REG_SELECT);
59 return readl(iosapic + IOSAPIC_WINDOW); 61 return readl(iosapic + IOSAPIC_WINDOW);
60} 62}
61 63
62static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) 64static inline void __iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
63{ 65{
64 writel(reg, iosapic + IOSAPIC_REG_SELECT); 66 writel(reg, iosapic + IOSAPIC_REG_SELECT);
65 writel(val, iosapic + IOSAPIC_WINDOW); 67 writel(val, iosapic + IOSAPIC_WINDOW);
diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h
index 67221615e317..35b360b82e43 100644
--- a/include/asm-ia64/irq.h
+++ b/include/asm-ia64/irq.h
@@ -14,8 +14,13 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16 16
17#define NR_IRQS 256 17#define NR_VECTORS 256
18#define NR_IRQ_VECTORS NR_IRQS 18
19#if (NR_VECTORS + 32 * NR_CPUS) < 1024
20#define NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
21#else
22#define NR_IRQS 1024
23#endif
19 24
20static __inline__ int 25static __inline__ int
21irq_canonicalize (int irq) 26irq_canonicalize (int irq)