diff options
Diffstat (limited to 'arch/x86/kernel/io_apic_32.c')
-rw-r--r-- | arch/x86/kernel/io_apic_32.c | 474 |
1 files changed, 235 insertions, 239 deletions
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index a40d54fc1fdd..d4f9df2b022a 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -58,7 +58,7 @@ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; | |||
58 | static DEFINE_SPINLOCK(ioapic_lock); | 58 | static DEFINE_SPINLOCK(ioapic_lock); |
59 | static DEFINE_SPINLOCK(vector_lock); | 59 | static DEFINE_SPINLOCK(vector_lock); |
60 | 60 | ||
61 | int timer_over_8254 __initdata = 1; | 61 | int timer_through_8259 __initdata; |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Is the SiS APIC rmw bug present ? | 64 | * Is the SiS APIC rmw bug present ? |
@@ -239,7 +239,7 @@ static void __init replace_pin_at_irq(unsigned int irq, | |||
239 | } | 239 | } |
240 | } | 240 | } |
241 | 241 | ||
242 | static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable) | 242 | static void __modify_IO_APIC_irq(unsigned int irq, unsigned long enable, unsigned long disable) |
243 | { | 243 | { |
244 | struct irq_pin_list *entry = irq_2_pin + irq; | 244 | struct irq_pin_list *entry = irq_2_pin + irq; |
245 | unsigned int pin, reg; | 245 | unsigned int pin, reg; |
@@ -259,30 +259,32 @@ static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsign | |||
259 | } | 259 | } |
260 | 260 | ||
261 | /* mask = 1 */ | 261 | /* mask = 1 */ |
262 | static void __mask_IO_APIC_irq (unsigned int irq) | 262 | static void __mask_IO_APIC_irq(unsigned int irq) |
263 | { | 263 | { |
264 | __modify_IO_APIC_irq(irq, 0x00010000, 0); | 264 | __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED, 0); |
265 | } | 265 | } |
266 | 266 | ||
267 | /* mask = 0 */ | 267 | /* mask = 0 */ |
268 | static void __unmask_IO_APIC_irq (unsigned int irq) | 268 | static void __unmask_IO_APIC_irq(unsigned int irq) |
269 | { | 269 | { |
270 | __modify_IO_APIC_irq(irq, 0, 0x00010000); | 270 | __modify_IO_APIC_irq(irq, 0, IO_APIC_REDIR_MASKED); |
271 | } | 271 | } |
272 | 272 | ||
273 | /* mask = 1, trigger = 0 */ | 273 | /* mask = 1, trigger = 0 */ |
274 | static void __mask_and_edge_IO_APIC_irq (unsigned int irq) | 274 | static void __mask_and_edge_IO_APIC_irq(unsigned int irq) |
275 | { | 275 | { |
276 | __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000); | 276 | __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED, |
277 | IO_APIC_REDIR_LEVEL_TRIGGER); | ||
277 | } | 278 | } |
278 | 279 | ||
279 | /* mask = 0, trigger = 1 */ | 280 | /* mask = 0, trigger = 1 */ |
280 | static void __unmask_and_level_IO_APIC_irq (unsigned int irq) | 281 | static void __unmask_and_level_IO_APIC_irq(unsigned int irq) |
281 | { | 282 | { |
282 | __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000); | 283 | __modify_IO_APIC_irq(irq, IO_APIC_REDIR_LEVEL_TRIGGER, |
284 | IO_APIC_REDIR_MASKED); | ||
283 | } | 285 | } |
284 | 286 | ||
285 | static void mask_IO_APIC_irq (unsigned int irq) | 287 | static void mask_IO_APIC_irq(unsigned int irq) |
286 | { | 288 | { |
287 | unsigned long flags; | 289 | unsigned long flags; |
288 | 290 | ||
@@ -291,7 +293,7 @@ static void mask_IO_APIC_irq (unsigned int irq) | |||
291 | spin_unlock_irqrestore(&ioapic_lock, flags); | 293 | spin_unlock_irqrestore(&ioapic_lock, flags); |
292 | } | 294 | } |
293 | 295 | ||
294 | static void unmask_IO_APIC_irq (unsigned int irq) | 296 | static void unmask_IO_APIC_irq(unsigned int irq) |
295 | { | 297 | { |
296 | unsigned long flags; | 298 | unsigned long flags; |
297 | 299 | ||
@@ -303,7 +305,7 @@ static void unmask_IO_APIC_irq (unsigned int irq) | |||
303 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | 305 | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) |
304 | { | 306 | { |
305 | struct IO_APIC_route_entry entry; | 307 | struct IO_APIC_route_entry entry; |
306 | 308 | ||
307 | /* Check delivery_mode to be sure we're not clearing an SMI pin */ | 309 | /* Check delivery_mode to be sure we're not clearing an SMI pin */ |
308 | entry = ioapic_read_entry(apic, pin); | 310 | entry = ioapic_read_entry(apic, pin); |
309 | if (entry.delivery_mode == dest_SMI) | 311 | if (entry.delivery_mode == dest_SMI) |
@@ -315,7 +317,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | |||
315 | ioapic_mask_entry(apic, pin); | 317 | ioapic_mask_entry(apic, pin); |
316 | } | 318 | } |
317 | 319 | ||
318 | static void clear_IO_APIC (void) | 320 | static void clear_IO_APIC(void) |
319 | { | 321 | { |
320 | int apic, pin; | 322 | int apic, pin; |
321 | 323 | ||
@@ -332,7 +334,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask) | |||
332 | struct irq_pin_list *entry = irq_2_pin + irq; | 334 | struct irq_pin_list *entry = irq_2_pin + irq; |
333 | unsigned int apicid_value; | 335 | unsigned int apicid_value; |
334 | cpumask_t tmp; | 336 | cpumask_t tmp; |
335 | 337 | ||
336 | cpus_and(tmp, cpumask, cpu_online_map); | 338 | cpus_and(tmp, cpumask, cpu_online_map); |
337 | if (cpus_empty(tmp)) | 339 | if (cpus_empty(tmp)) |
338 | tmp = TARGET_CPUS; | 340 | tmp = TARGET_CPUS; |
@@ -361,7 +363,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask) | |||
361 | # include <linux/kernel_stat.h> /* kstat */ | 363 | # include <linux/kernel_stat.h> /* kstat */ |
362 | # include <linux/slab.h> /* kmalloc() */ | 364 | # include <linux/slab.h> /* kmalloc() */ |
363 | # include <linux/timer.h> | 365 | # include <linux/timer.h> |
364 | 366 | ||
365 | #define IRQBALANCE_CHECK_ARCH -999 | 367 | #define IRQBALANCE_CHECK_ARCH -999 |
366 | #define MAX_BALANCED_IRQ_INTERVAL (5*HZ) | 368 | #define MAX_BALANCED_IRQ_INTERVAL (5*HZ) |
367 | #define MIN_BALANCED_IRQ_INTERVAL (HZ/2) | 369 | #define MIN_BALANCED_IRQ_INTERVAL (HZ/2) |
@@ -373,14 +375,14 @@ static int physical_balance __read_mostly; | |||
373 | static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL; | 375 | static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL; |
374 | 376 | ||
375 | static struct irq_cpu_info { | 377 | static struct irq_cpu_info { |
376 | unsigned long * last_irq; | 378 | unsigned long *last_irq; |
377 | unsigned long * irq_delta; | 379 | unsigned long *irq_delta; |
378 | unsigned long irq; | 380 | unsigned long irq; |
379 | } irq_cpu_data[NR_CPUS]; | 381 | } irq_cpu_data[NR_CPUS]; |
380 | 382 | ||
381 | #define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq) | 383 | #define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq) |
382 | #define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq]) | 384 | #define LAST_CPU_IRQ(cpu, irq) (irq_cpu_data[cpu].last_irq[irq]) |
383 | #define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq]) | 385 | #define IRQ_DELTA(cpu, irq) (irq_cpu_data[cpu].irq_delta[irq]) |
384 | 386 | ||
385 | #define IDLE_ENOUGH(cpu,now) \ | 387 | #define IDLE_ENOUGH(cpu,now) \ |
386 | (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1)) | 388 | (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1)) |
@@ -419,8 +421,8 @@ inside: | |||
419 | if (cpu == -1) | 421 | if (cpu == -1) |
420 | cpu = NR_CPUS-1; | 422 | cpu = NR_CPUS-1; |
421 | } | 423 | } |
422 | } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) || | 424 | } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu, allowed_mask) || |
423 | (search_idle && !IDLE_ENOUGH(cpu,now))); | 425 | (search_idle && !IDLE_ENOUGH(cpu, now))); |
424 | 426 | ||
425 | return cpu; | 427 | return cpu; |
426 | } | 428 | } |
@@ -430,15 +432,14 @@ static inline void balance_irq(int cpu, int irq) | |||
430 | unsigned long now = jiffies; | 432 | unsigned long now = jiffies; |
431 | cpumask_t allowed_mask; | 433 | cpumask_t allowed_mask; |
432 | unsigned int new_cpu; | 434 | unsigned int new_cpu; |
433 | 435 | ||
434 | if (irqbalance_disabled) | 436 | if (irqbalance_disabled) |
435 | return; | 437 | return; |
436 | 438 | ||
437 | cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]); | 439 | cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]); |
438 | new_cpu = move(cpu, allowed_mask, now, 1); | 440 | new_cpu = move(cpu, allowed_mask, now, 1); |
439 | if (cpu != new_cpu) { | 441 | if (cpu != new_cpu) |
440 | set_pending_irq(irq, cpumask_of_cpu(new_cpu)); | 442 | set_pending_irq(irq, cpumask_of_cpu(new_cpu)); |
441 | } | ||
442 | } | 443 | } |
443 | 444 | ||
444 | static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) | 445 | static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) |
@@ -450,14 +451,14 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) | |||
450 | if (!irq_desc[j].action) | 451 | if (!irq_desc[j].action) |
451 | continue; | 452 | continue; |
452 | /* Is it a significant load ? */ | 453 | /* Is it a significant load ? */ |
453 | if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) < | 454 | if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i), j) < |
454 | useful_load_threshold) | 455 | useful_load_threshold) |
455 | continue; | 456 | continue; |
456 | balance_irq(i, j); | 457 | balance_irq(i, j); |
457 | } | 458 | } |
458 | } | 459 | } |
459 | balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL, | 460 | balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL, |
460 | balanced_irq_interval - BALANCED_IRQ_LESS_DELTA); | 461 | balanced_irq_interval - BALANCED_IRQ_LESS_DELTA); |
461 | return; | 462 | return; |
462 | } | 463 | } |
463 | 464 | ||
@@ -486,22 +487,22 @@ static void do_irq_balance(void) | |||
486 | /* Is this an active IRQ or balancing disabled ? */ | 487 | /* Is this an active IRQ or balancing disabled ? */ |
487 | if (!irq_desc[j].action || irq_balancing_disabled(j)) | 488 | if (!irq_desc[j].action || irq_balancing_disabled(j)) |
488 | continue; | 489 | continue; |
489 | if ( package_index == i ) | 490 | if (package_index == i) |
490 | IRQ_DELTA(package_index,j) = 0; | 491 | IRQ_DELTA(package_index, j) = 0; |
491 | /* Determine the total count per processor per IRQ */ | 492 | /* Determine the total count per processor per IRQ */ |
492 | value_now = (unsigned long) kstat_cpu(i).irqs[j]; | 493 | value_now = (unsigned long) kstat_cpu(i).irqs[j]; |
493 | 494 | ||
494 | /* Determine the activity per processor per IRQ */ | 495 | /* Determine the activity per processor per IRQ */ |
495 | delta = value_now - LAST_CPU_IRQ(i,j); | 496 | delta = value_now - LAST_CPU_IRQ(i, j); |
496 | 497 | ||
497 | /* Update last_cpu_irq[][] for the next time */ | 498 | /* Update last_cpu_irq[][] for the next time */ |
498 | LAST_CPU_IRQ(i,j) = value_now; | 499 | LAST_CPU_IRQ(i, j) = value_now; |
499 | 500 | ||
500 | /* Ignore IRQs whose rate is less than the clock */ | 501 | /* Ignore IRQs whose rate is less than the clock */ |
501 | if (delta < useful_load_threshold) | 502 | if (delta < useful_load_threshold) |
502 | continue; | 503 | continue; |
503 | /* update the load for the processor or package total */ | 504 | /* update the load for the processor or package total */ |
504 | IRQ_DELTA(package_index,j) += delta; | 505 | IRQ_DELTA(package_index, j) += delta; |
505 | 506 | ||
506 | /* Keep track of the higher numbered sibling as well */ | 507 | /* Keep track of the higher numbered sibling as well */ |
507 | if (i != package_index) | 508 | if (i != package_index) |
@@ -527,7 +528,8 @@ static void do_irq_balance(void) | |||
527 | max_cpu_irq = ULONG_MAX; | 528 | max_cpu_irq = ULONG_MAX; |
528 | 529 | ||
529 | tryanothercpu: | 530 | tryanothercpu: |
530 | /* Look for heaviest loaded processor. | 531 | /* |
532 | * Look for heaviest loaded processor. | ||
531 | * We may come back to get the next heaviest loaded processor. | 533 | * We may come back to get the next heaviest loaded processor. |
532 | * Skip processors with trivial loads. | 534 | * Skip processors with trivial loads. |
533 | */ | 535 | */ |
@@ -536,7 +538,7 @@ tryanothercpu: | |||
536 | for_each_online_cpu(i) { | 538 | for_each_online_cpu(i) { |
537 | if (i != CPU_TO_PACKAGEINDEX(i)) | 539 | if (i != CPU_TO_PACKAGEINDEX(i)) |
538 | continue; | 540 | continue; |
539 | if (max_cpu_irq <= CPU_IRQ(i)) | 541 | if (max_cpu_irq <= CPU_IRQ(i)) |
540 | continue; | 542 | continue; |
541 | if (tmp_cpu_irq < CPU_IRQ(i)) { | 543 | if (tmp_cpu_irq < CPU_IRQ(i)) { |
542 | tmp_cpu_irq = CPU_IRQ(i); | 544 | tmp_cpu_irq = CPU_IRQ(i); |
@@ -545,8 +547,9 @@ tryanothercpu: | |||
545 | } | 547 | } |
546 | 548 | ||
547 | if (tmp_loaded == -1) { | 549 | if (tmp_loaded == -1) { |
548 | /* In the case of small number of heavy interrupt sources, | 550 | /* |
549 | * loading some of the cpus too much. We use Ingo's original | 551 | * In the case of small number of heavy interrupt sources, |
552 | * loading some of the cpus too much. We use Ingo's original | ||
550 | * approach to rotate them around. | 553 | * approach to rotate them around. |
551 | */ | 554 | */ |
552 | if (!first_attempt && imbalance >= useful_load_threshold) { | 555 | if (!first_attempt && imbalance >= useful_load_threshold) { |
@@ -555,13 +558,14 @@ tryanothercpu: | |||
555 | } | 558 | } |
556 | goto not_worth_the_effort; | 559 | goto not_worth_the_effort; |
557 | } | 560 | } |
558 | 561 | ||
559 | first_attempt = 0; /* heaviest search */ | 562 | first_attempt = 0; /* heaviest search */ |
560 | max_cpu_irq = tmp_cpu_irq; /* load */ | 563 | max_cpu_irq = tmp_cpu_irq; /* load */ |
561 | max_loaded = tmp_loaded; /* processor */ | 564 | max_loaded = tmp_loaded; /* processor */ |
562 | imbalance = (max_cpu_irq - min_cpu_irq) / 2; | 565 | imbalance = (max_cpu_irq - min_cpu_irq) / 2; |
563 | 566 | ||
564 | /* if imbalance is less than approx 10% of max load, then | 567 | /* |
568 | * if imbalance is less than approx 10% of max load, then | ||
565 | * observe diminishing returns action. - quit | 569 | * observe diminishing returns action. - quit |
566 | */ | 570 | */ |
567 | if (imbalance < (max_cpu_irq >> 3)) | 571 | if (imbalance < (max_cpu_irq >> 3)) |
@@ -577,26 +581,25 @@ tryanotherirq: | |||
577 | /* Is this an active IRQ? */ | 581 | /* Is this an active IRQ? */ |
578 | if (!irq_desc[j].action) | 582 | if (!irq_desc[j].action) |
579 | continue; | 583 | continue; |
580 | if (imbalance <= IRQ_DELTA(max_loaded,j)) | 584 | if (imbalance <= IRQ_DELTA(max_loaded, j)) |
581 | continue; | 585 | continue; |
582 | /* Try to find the IRQ that is closest to the imbalance | 586 | /* Try to find the IRQ that is closest to the imbalance |
583 | * without going over. | 587 | * without going over. |
584 | */ | 588 | */ |
585 | if (move_this_load < IRQ_DELTA(max_loaded,j)) { | 589 | if (move_this_load < IRQ_DELTA(max_loaded, j)) { |
586 | move_this_load = IRQ_DELTA(max_loaded,j); | 590 | move_this_load = IRQ_DELTA(max_loaded, j); |
587 | selected_irq = j; | 591 | selected_irq = j; |
588 | } | 592 | } |
589 | } | 593 | } |
590 | if (selected_irq == -1) { | 594 | if (selected_irq == -1) |
591 | goto tryanothercpu; | 595 | goto tryanothercpu; |
592 | } | ||
593 | 596 | ||
594 | imbalance = move_this_load; | 597 | imbalance = move_this_load; |
595 | 598 | ||
596 | /* For physical_balance case, we accumulated both load | 599 | /* For physical_balance case, we accumulated both load |
597 | * values in the one of the siblings cpu_irq[], | 600 | * values in the one of the siblings cpu_irq[], |
598 | * to use the same code for physical and logical processors | 601 | * to use the same code for physical and logical processors |
599 | * as much as possible. | 602 | * as much as possible. |
600 | * | 603 | * |
601 | * NOTE: the cpu_irq[] array holds the sum of the load for | 604 | * NOTE: the cpu_irq[] array holds the sum of the load for |
602 | * sibling A and sibling B in the slot for the lowest numbered | 605 | * sibling A and sibling B in the slot for the lowest numbered |
@@ -625,11 +628,11 @@ tryanotherirq: | |||
625 | /* mark for change destination */ | 628 | /* mark for change destination */ |
626 | set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded)); | 629 | set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded)); |
627 | 630 | ||
628 | /* Since we made a change, come back sooner to | 631 | /* Since we made a change, come back sooner to |
629 | * check for more variation. | 632 | * check for more variation. |
630 | */ | 633 | */ |
631 | balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL, | 634 | balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL, |
632 | balanced_irq_interval - BALANCED_IRQ_LESS_DELTA); | 635 | balanced_irq_interval - BALANCED_IRQ_LESS_DELTA); |
633 | return; | 636 | return; |
634 | } | 637 | } |
635 | goto tryanotherirq; | 638 | goto tryanotherirq; |
@@ -640,7 +643,7 @@ not_worth_the_effort: | |||
640 | * upward | 643 | * upward |
641 | */ | 644 | */ |
642 | balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL, | 645 | balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL, |
643 | balanced_irq_interval + BALANCED_IRQ_MORE_DELTA); | 646 | balanced_irq_interval + BALANCED_IRQ_MORE_DELTA); |
644 | return; | 647 | return; |
645 | } | 648 | } |
646 | 649 | ||
@@ -679,13 +682,13 @@ static int __init balanced_irq_init(void) | |||
679 | cpumask_t tmp; | 682 | cpumask_t tmp; |
680 | 683 | ||
681 | cpus_shift_right(tmp, cpu_online_map, 2); | 684 | cpus_shift_right(tmp, cpu_online_map, 2); |
682 | c = &boot_cpu_data; | 685 | c = &boot_cpu_data; |
683 | /* When not overwritten by the command line ask subarchitecture. */ | 686 | /* When not overwritten by the command line ask subarchitecture. */ |
684 | if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH) | 687 | if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH) |
685 | irqbalance_disabled = NO_BALANCE_IRQ; | 688 | irqbalance_disabled = NO_BALANCE_IRQ; |
686 | if (irqbalance_disabled) | 689 | if (irqbalance_disabled) |
687 | return 0; | 690 | return 0; |
688 | 691 | ||
689 | /* disable irqbalance completely if there is only one processor online */ | 692 | /* disable irqbalance completely if there is only one processor online */ |
690 | if (num_online_cpus() < 2) { | 693 | if (num_online_cpus() < 2) { |
691 | irqbalance_disabled = 1; | 694 | irqbalance_disabled = 1; |
@@ -699,16 +702,14 @@ static int __init balanced_irq_init(void) | |||
699 | physical_balance = 1; | 702 | physical_balance = 1; |
700 | 703 | ||
701 | for_each_online_cpu(i) { | 704 | for_each_online_cpu(i) { |
702 | irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); | 705 | irq_cpu_data[i].irq_delta = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); |
703 | irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); | 706 | irq_cpu_data[i].last_irq = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); |
704 | if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { | 707 | if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { |
705 | printk(KERN_ERR "balanced_irq_init: out of memory"); | 708 | printk(KERN_ERR "balanced_irq_init: out of memory"); |
706 | goto failed; | 709 | goto failed; |
707 | } | 710 | } |
708 | memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS); | ||
709 | memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS); | ||
710 | } | 711 | } |
711 | 712 | ||
712 | printk(KERN_INFO "Starting balanced_irq\n"); | 713 | printk(KERN_INFO "Starting balanced_irq\n"); |
713 | if (!IS_ERR(kthread_run(balanced_irq, NULL, "kirqd"))) | 714 | if (!IS_ERR(kthread_run(balanced_irq, NULL, "kirqd"))) |
714 | return 0; | 715 | return 0; |
@@ -843,7 +844,7 @@ static int __init find_isa_irq_apic(int irq, int type) | |||
843 | } | 844 | } |
844 | if (i < mp_irq_entries) { | 845 | if (i < mp_irq_entries) { |
845 | int apic; | 846 | int apic; |
846 | for(apic = 0; apic < nr_ioapics; apic++) { | 847 | for (apic = 0; apic < nr_ioapics; apic++) { |
847 | if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic) | 848 | if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic) |
848 | return apic; | 849 | return apic; |
849 | } | 850 | } |
@@ -880,7 +881,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) | |||
880 | !mp_irqs[i].mpc_irqtype && | 881 | !mp_irqs[i].mpc_irqtype && |
881 | (bus == lbus) && | 882 | (bus == lbus) && |
882 | (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) { | 883 | (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) { |
883 | int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq); | 884 | int irq = pin_2_irq(i, apic, mp_irqs[i].mpc_dstirq); |
884 | 885 | ||
885 | if (!(apic || IO_APIC_IRQ(irq))) | 886 | if (!(apic || IO_APIC_IRQ(irq))) |
886 | continue; | 887 | continue; |
@@ -900,7 +901,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) | |||
900 | EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); | 901 | EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); |
901 | 902 | ||
902 | /* | 903 | /* |
903 | * This function currently is only a helper for the i386 smp boot process where | 904 | * This function currently is only a helper for the i386 smp boot process where |
904 | * we need to reprogram the ioredtbls to cater for the cpus which have come online | 905 | * we need to reprogram the ioredtbls to cater for the cpus which have come online |
905 | * so mask in all cases should simply be TARGET_CPUS | 906 | * so mask in all cases should simply be TARGET_CPUS |
906 | */ | 907 | */ |
@@ -975,37 +976,36 @@ static int MPBIOS_polarity(int idx) | |||
975 | /* | 976 | /* |
976 | * Determine IRQ line polarity (high active or low active): | 977 | * Determine IRQ line polarity (high active or low active): |
977 | */ | 978 | */ |
978 | switch (mp_irqs[idx].mpc_irqflag & 3) | 979 | switch (mp_irqs[idx].mpc_irqflag & 3) { |
980 | case 0: /* conforms, ie. bus-type dependent polarity */ | ||
979 | { | 981 | { |
980 | case 0: /* conforms, ie. bus-type dependent polarity */ | 982 | polarity = test_bit(bus, mp_bus_not_pci)? |
981 | { | 983 | default_ISA_polarity(idx): |
982 | polarity = test_bit(bus, mp_bus_not_pci)? | 984 | default_PCI_polarity(idx); |
983 | default_ISA_polarity(idx): | 985 | break; |
984 | default_PCI_polarity(idx); | 986 | } |
985 | break; | 987 | case 1: /* high active */ |
986 | } | 988 | { |
987 | case 1: /* high active */ | 989 | polarity = 0; |
988 | { | 990 | break; |
989 | polarity = 0; | 991 | } |
990 | break; | 992 | case 2: /* reserved */ |
991 | } | 993 | { |
992 | case 2: /* reserved */ | 994 | printk(KERN_WARNING "broken BIOS!!\n"); |
993 | { | 995 | polarity = 1; |
994 | printk(KERN_WARNING "broken BIOS!!\n"); | 996 | break; |
995 | polarity = 1; | 997 | } |
996 | break; | 998 | case 3: /* low active */ |
997 | } | 999 | { |
998 | case 3: /* low active */ | 1000 | polarity = 1; |
999 | { | 1001 | break; |
1000 | polarity = 1; | 1002 | } |
1001 | break; | 1003 | default: /* invalid */ |
1002 | } | 1004 | { |
1003 | default: /* invalid */ | 1005 | printk(KERN_WARNING "broken BIOS!!\n"); |
1004 | { | 1006 | polarity = 1; |
1005 | printk(KERN_WARNING "broken BIOS!!\n"); | 1007 | break; |
1006 | polarity = 1; | 1008 | } |
1007 | break; | ||
1008 | } | ||
1009 | } | 1009 | } |
1010 | return polarity; | 1010 | return polarity; |
1011 | } | 1011 | } |
@@ -1018,69 +1018,67 @@ static int MPBIOS_trigger(int idx) | |||
1018 | /* | 1018 | /* |
1019 | * Determine IRQ trigger mode (edge or level sensitive): | 1019 | * Determine IRQ trigger mode (edge or level sensitive): |
1020 | */ | 1020 | */ |
1021 | switch ((mp_irqs[idx].mpc_irqflag>>2) & 3) | 1021 | switch ((mp_irqs[idx].mpc_irqflag>>2) & 3) { |
1022 | case 0: /* conforms, ie. bus-type dependent */ | ||
1022 | { | 1023 | { |
1023 | case 0: /* conforms, ie. bus-type dependent */ | 1024 | trigger = test_bit(bus, mp_bus_not_pci)? |
1024 | { | 1025 | default_ISA_trigger(idx): |
1025 | trigger = test_bit(bus, mp_bus_not_pci)? | 1026 | default_PCI_trigger(idx); |
1026 | default_ISA_trigger(idx): | ||
1027 | default_PCI_trigger(idx); | ||
1028 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | 1027 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) |
1029 | switch (mp_bus_id_to_type[bus]) | 1028 | switch (mp_bus_id_to_type[bus]) { |
1030 | { | 1029 | case MP_BUS_ISA: /* ISA pin */ |
1031 | case MP_BUS_ISA: /* ISA pin */ | 1030 | { |
1032 | { | 1031 | /* set before the switch */ |
1033 | /* set before the switch */ | ||
1034 | break; | ||
1035 | } | ||
1036 | case MP_BUS_EISA: /* EISA pin */ | ||
1037 | { | ||
1038 | trigger = default_EISA_trigger(idx); | ||
1039 | break; | ||
1040 | } | ||
1041 | case MP_BUS_PCI: /* PCI pin */ | ||
1042 | { | ||
1043 | /* set before the switch */ | ||
1044 | break; | ||
1045 | } | ||
1046 | case MP_BUS_MCA: /* MCA pin */ | ||
1047 | { | ||
1048 | trigger = default_MCA_trigger(idx); | ||
1049 | break; | ||
1050 | } | ||
1051 | default: | ||
1052 | { | ||
1053 | printk(KERN_WARNING "broken BIOS!!\n"); | ||
1054 | trigger = 1; | ||
1055 | break; | ||
1056 | } | ||
1057 | } | ||
1058 | #endif | ||
1059 | break; | 1032 | break; |
1060 | } | 1033 | } |
1061 | case 1: /* edge */ | 1034 | case MP_BUS_EISA: /* EISA pin */ |
1062 | { | 1035 | { |
1063 | trigger = 0; | 1036 | trigger = default_EISA_trigger(idx); |
1064 | break; | 1037 | break; |
1065 | } | 1038 | } |
1066 | case 2: /* reserved */ | 1039 | case MP_BUS_PCI: /* PCI pin */ |
1067 | { | 1040 | { |
1068 | printk(KERN_WARNING "broken BIOS!!\n"); | 1041 | /* set before the switch */ |
1069 | trigger = 1; | ||
1070 | break; | 1042 | break; |
1071 | } | 1043 | } |
1072 | case 3: /* level */ | 1044 | case MP_BUS_MCA: /* MCA pin */ |
1073 | { | 1045 | { |
1074 | trigger = 1; | 1046 | trigger = default_MCA_trigger(idx); |
1075 | break; | 1047 | break; |
1076 | } | 1048 | } |
1077 | default: /* invalid */ | 1049 | default: |
1078 | { | 1050 | { |
1079 | printk(KERN_WARNING "broken BIOS!!\n"); | 1051 | printk(KERN_WARNING "broken BIOS!!\n"); |
1080 | trigger = 0; | 1052 | trigger = 1; |
1081 | break; | 1053 | break; |
1082 | } | 1054 | } |
1083 | } | 1055 | } |
1056 | #endif | ||
1057 | break; | ||
1058 | } | ||
1059 | case 1: /* edge */ | ||
1060 | { | ||
1061 | trigger = 0; | ||
1062 | break; | ||
1063 | } | ||
1064 | case 2: /* reserved */ | ||
1065 | { | ||
1066 | printk(KERN_WARNING "broken BIOS!!\n"); | ||
1067 | trigger = 1; | ||
1068 | break; | ||
1069 | } | ||
1070 | case 3: /* level */ | ||
1071 | { | ||
1072 | trigger = 1; | ||
1073 | break; | ||
1074 | } | ||
1075 | default: /* invalid */ | ||
1076 | { | ||
1077 | printk(KERN_WARNING "broken BIOS!!\n"); | ||
1078 | trigger = 0; | ||
1079 | break; | ||
1080 | } | ||
1081 | } | ||
1084 | return trigger; | 1082 | return trigger; |
1085 | } | 1083 | } |
1086 | 1084 | ||
@@ -1148,8 +1146,8 @@ static inline int IO_APIC_irq_trigger(int irq) | |||
1148 | 1146 | ||
1149 | for (apic = 0; apic < nr_ioapics; apic++) { | 1147 | for (apic = 0; apic < nr_ioapics; apic++) { |
1150 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { | 1148 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { |
1151 | idx = find_irq_entry(apic,pin,mp_INT); | 1149 | idx = find_irq_entry(apic, pin, mp_INT); |
1152 | if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin))) | 1150 | if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) |
1153 | return irq_trigger(idx); | 1151 | return irq_trigger(idx); |
1154 | } | 1152 | } |
1155 | } | 1153 | } |
@@ -1164,7 +1162,7 @@ static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 } | |||
1164 | 1162 | ||
1165 | static int __assign_irq_vector(int irq) | 1163 | static int __assign_irq_vector(int irq) |
1166 | { | 1164 | { |
1167 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; | 1165 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset; |
1168 | int vector, offset; | 1166 | int vector, offset; |
1169 | 1167 | ||
1170 | BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); | 1168 | BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); |
@@ -1237,15 +1235,15 @@ static void __init setup_IO_APIC_irqs(void) | |||
1237 | /* | 1235 | /* |
1238 | * add it to the IO-APIC irq-routing table: | 1236 | * add it to the IO-APIC irq-routing table: |
1239 | */ | 1237 | */ |
1240 | memset(&entry,0,sizeof(entry)); | 1238 | memset(&entry, 0, sizeof(entry)); |
1241 | 1239 | ||
1242 | entry.delivery_mode = INT_DELIVERY_MODE; | 1240 | entry.delivery_mode = INT_DELIVERY_MODE; |
1243 | entry.dest_mode = INT_DEST_MODE; | 1241 | entry.dest_mode = INT_DEST_MODE; |
1244 | entry.mask = 0; /* enable IRQ */ | 1242 | entry.mask = 0; /* enable IRQ */ |
1245 | entry.dest.logical.logical_dest = | 1243 | entry.dest.logical.logical_dest = |
1246 | cpu_mask_to_apicid(TARGET_CPUS); | 1244 | cpu_mask_to_apicid(TARGET_CPUS); |
1247 | 1245 | ||
1248 | idx = find_irq_entry(apic,pin,mp_INT); | 1246 | idx = find_irq_entry(apic, pin, mp_INT); |
1249 | if (idx == -1) { | 1247 | if (idx == -1) { |
1250 | if (first_notcon) { | 1248 | if (first_notcon) { |
1251 | apic_printk(APIC_VERBOSE, KERN_DEBUG | 1249 | apic_printk(APIC_VERBOSE, KERN_DEBUG |
@@ -1289,7 +1287,7 @@ static void __init setup_IO_APIC_irqs(void) | |||
1289 | vector = assign_irq_vector(irq); | 1287 | vector = assign_irq_vector(irq); |
1290 | entry.vector = vector; | 1288 | entry.vector = vector; |
1291 | ioapic_register_intr(irq, vector, IOAPIC_AUTO); | 1289 | ioapic_register_intr(irq, vector, IOAPIC_AUTO); |
1292 | 1290 | ||
1293 | if (!apic && (irq < 16)) | 1291 | if (!apic && (irq < 16)) |
1294 | disable_8259A_irq(irq); | 1292 | disable_8259A_irq(irq); |
1295 | } | 1293 | } |
@@ -1302,25 +1300,21 @@ static void __init setup_IO_APIC_irqs(void) | |||
1302 | } | 1300 | } |
1303 | 1301 | ||
1304 | /* | 1302 | /* |
1305 | * Set up the 8259A-master output pin: | 1303 | * Set up the timer pin, possibly with the 8259A-master behind. |
1306 | */ | 1304 | */ |
1307 | static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector) | 1305 | static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin, |
1306 | int vector) | ||
1308 | { | 1307 | { |
1309 | struct IO_APIC_route_entry entry; | 1308 | struct IO_APIC_route_entry entry; |
1310 | 1309 | ||
1311 | memset(&entry,0,sizeof(entry)); | 1310 | memset(&entry, 0, sizeof(entry)); |
1312 | |||
1313 | disable_8259A_irq(0); | ||
1314 | |||
1315 | /* mask LVT0 */ | ||
1316 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); | ||
1317 | 1311 | ||
1318 | /* | 1312 | /* |
1319 | * We use logical delivery to get the timer IRQ | 1313 | * We use logical delivery to get the timer IRQ |
1320 | * to the first CPU. | 1314 | * to the first CPU. |
1321 | */ | 1315 | */ |
1322 | entry.dest_mode = INT_DEST_MODE; | 1316 | entry.dest_mode = INT_DEST_MODE; |
1323 | entry.mask = 0; /* unmask IRQ now */ | 1317 | entry.mask = 1; /* mask IRQ now */ |
1324 | entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); | 1318 | entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); |
1325 | entry.delivery_mode = INT_DELIVERY_MODE; | 1319 | entry.delivery_mode = INT_DELIVERY_MODE; |
1326 | entry.polarity = 0; | 1320 | entry.polarity = 0; |
@@ -1329,17 +1323,14 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in | |||
1329 | 1323 | ||
1330 | /* | 1324 | /* |
1331 | * The timer IRQ doesn't have to know that behind the | 1325 | * The timer IRQ doesn't have to know that behind the |
1332 | * scene we have a 8259A-master in AEOI mode ... | 1326 | * scene we may have a 8259A-master in AEOI mode ... |
1333 | */ | 1327 | */ |
1334 | irq_desc[0].chip = &ioapic_chip; | 1328 | ioapic_register_intr(0, vector, IOAPIC_EDGE); |
1335 | set_irq_handler(0, handle_edge_irq); | ||
1336 | 1329 | ||
1337 | /* | 1330 | /* |
1338 | * Add it to the IO-APIC irq-routing table: | 1331 | * Add it to the IO-APIC irq-routing table: |
1339 | */ | 1332 | */ |
1340 | ioapic_write_entry(apic, pin, entry); | 1333 | ioapic_write_entry(apic, pin, entry); |
1341 | |||
1342 | enable_8259A_irq(0); | ||
1343 | } | 1334 | } |
1344 | 1335 | ||
1345 | void __init print_IO_APIC(void) | 1336 | void __init print_IO_APIC(void) |
@@ -1354,7 +1345,7 @@ void __init print_IO_APIC(void) | |||
1354 | if (apic_verbosity == APIC_QUIET) | 1345 | if (apic_verbosity == APIC_QUIET) |
1355 | return; | 1346 | return; |
1356 | 1347 | ||
1357 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); | 1348 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); |
1358 | for (i = 0; i < nr_ioapics; i++) | 1349 | for (i = 0; i < nr_ioapics; i++) |
1359 | printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", | 1350 | printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", |
1360 | mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]); | 1351 | mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]); |
@@ -1459,7 +1450,7 @@ void __init print_IO_APIC(void) | |||
1459 | 1450 | ||
1460 | #if 0 | 1451 | #if 0 |
1461 | 1452 | ||
1462 | static void print_APIC_bitfield (int base) | 1453 | static void print_APIC_bitfield(int base) |
1463 | { | 1454 | { |
1464 | unsigned int v; | 1455 | unsigned int v; |
1465 | int i, j; | 1456 | int i, j; |
@@ -1480,7 +1471,7 @@ static void print_APIC_bitfield (int base) | |||
1480 | } | 1471 | } |
1481 | } | 1472 | } |
1482 | 1473 | ||
1483 | void /*__init*/ print_local_APIC(void * dummy) | 1474 | void /*__init*/ print_local_APIC(void *dummy) |
1484 | { | 1475 | { |
1485 | unsigned int v, ver, maxlvt; | 1476 | unsigned int v, ver, maxlvt; |
1486 | 1477 | ||
@@ -1489,6 +1480,7 @@ void /*__init*/ print_local_APIC(void * dummy) | |||
1489 | 1480 | ||
1490 | printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", | 1481 | printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", |
1491 | smp_processor_id(), hard_smp_processor_id()); | 1482 | smp_processor_id(), hard_smp_processor_id()); |
1483 | v = apic_read(APIC_ID); | ||
1492 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, | 1484 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, |
1493 | GET_APIC_ID(read_apic_id())); | 1485 | GET_APIC_ID(read_apic_id())); |
1494 | v = apic_read(APIC_LVR); | 1486 | v = apic_read(APIC_LVR); |
@@ -1563,7 +1555,7 @@ void /*__init*/ print_local_APIC(void * dummy) | |||
1563 | printk("\n"); | 1555 | printk("\n"); |
1564 | } | 1556 | } |
1565 | 1557 | ||
1566 | void print_all_local_APICs (void) | 1558 | void print_all_local_APICs(void) |
1567 | { | 1559 | { |
1568 | on_each_cpu(print_local_APIC, NULL, 1, 1); | 1560 | on_each_cpu(print_local_APIC, NULL, 1, 1); |
1569 | } | 1561 | } |
@@ -1586,11 +1578,11 @@ void /*__init*/ print_PIC(void) | |||
1586 | v = inb(0xa0) << 8 | inb(0x20); | 1578 | v = inb(0xa0) << 8 | inb(0x20); |
1587 | printk(KERN_DEBUG "... PIC IRR: %04x\n", v); | 1579 | printk(KERN_DEBUG "... PIC IRR: %04x\n", v); |
1588 | 1580 | ||
1589 | outb(0x0b,0xa0); | 1581 | outb(0x0b, 0xa0); |
1590 | outb(0x0b,0x20); | 1582 | outb(0x0b, 0x20); |
1591 | v = inb(0xa0) << 8 | inb(0x20); | 1583 | v = inb(0xa0) << 8 | inb(0x20); |
1592 | outb(0x0a,0xa0); | 1584 | outb(0x0a, 0xa0); |
1593 | outb(0x0a,0x20); | 1585 | outb(0x0a, 0x20); |
1594 | 1586 | ||
1595 | spin_unlock_irqrestore(&i8259A_lock, flags); | 1587 | spin_unlock_irqrestore(&i8259A_lock, flags); |
1596 | 1588 | ||
@@ -1626,7 +1618,7 @@ static void __init enable_IO_APIC(void) | |||
1626 | spin_unlock_irqrestore(&ioapic_lock, flags); | 1618 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1627 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; | 1619 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; |
1628 | } | 1620 | } |
1629 | for(apic = 0; apic < nr_ioapics; apic++) { | 1621 | for (apic = 0; apic < nr_ioapics; apic++) { |
1630 | int pin; | 1622 | int pin; |
1631 | /* See if any of the pins is in ExtINT mode */ | 1623 | /* See if any of the pins is in ExtINT mode */ |
1632 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { | 1624 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { |
@@ -1748,7 +1740,7 @@ static void __init setup_ioapic_ids_from_mpc(void) | |||
1748 | spin_lock_irqsave(&ioapic_lock, flags); | 1740 | spin_lock_irqsave(&ioapic_lock, flags); |
1749 | reg_00.raw = io_apic_read(apic, 0); | 1741 | reg_00.raw = io_apic_read(apic, 0); |
1750 | spin_unlock_irqrestore(&ioapic_lock, flags); | 1742 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1751 | 1743 | ||
1752 | old_id = mp_ioapics[apic].mpc_apicid; | 1744 | old_id = mp_ioapics[apic].mpc_apicid; |
1753 | 1745 | ||
1754 | if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) { | 1746 | if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) { |
@@ -1800,7 +1792,7 @@ static void __init setup_ioapic_ids_from_mpc(void) | |||
1800 | /* | 1792 | /* |
1801 | * Read the right value from the MPC table and | 1793 | * Read the right value from the MPC table and |
1802 | * write it into the ID register. | 1794 | * write it into the ID register. |
1803 | */ | 1795 | */ |
1804 | apic_printk(APIC_VERBOSE, KERN_INFO | 1796 | apic_printk(APIC_VERBOSE, KERN_INFO |
1805 | "...changing IO-APIC physical APIC ID to %d ...", | 1797 | "...changing IO-APIC physical APIC ID to %d ...", |
1806 | mp_ioapics[apic].mpc_apicid); | 1798 | mp_ioapics[apic].mpc_apicid); |
@@ -2020,7 +2012,7 @@ static void ack_apic(unsigned int irq) | |||
2020 | ack_APIC_irq(); | 2012 | ack_APIC_irq(); |
2021 | } | 2013 | } |
2022 | 2014 | ||
2023 | static void mask_lapic_irq (unsigned int irq) | 2015 | static void mask_lapic_irq(unsigned int irq) |
2024 | { | 2016 | { |
2025 | unsigned long v; | 2017 | unsigned long v; |
2026 | 2018 | ||
@@ -2028,7 +2020,7 @@ static void mask_lapic_irq (unsigned int irq) | |||
2028 | apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED); | 2020 | apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED); |
2029 | } | 2021 | } |
2030 | 2022 | ||
2031 | static void unmask_lapic_irq (unsigned int irq) | 2023 | static void unmask_lapic_irq(unsigned int irq) |
2032 | { | 2024 | { |
2033 | unsigned long v; | 2025 | unsigned long v; |
2034 | 2026 | ||
@@ -2037,7 +2029,7 @@ static void unmask_lapic_irq (unsigned int irq) | |||
2037 | } | 2029 | } |
2038 | 2030 | ||
2039 | static struct irq_chip lapic_chip __read_mostly = { | 2031 | static struct irq_chip lapic_chip __read_mostly = { |
2040 | .name = "local-APIC-edge", | 2032 | .name = "local-APIC", |
2041 | .mask = mask_lapic_irq, | 2033 | .mask = mask_lapic_irq, |
2042 | .unmask = unmask_lapic_irq, | 2034 | .unmask = unmask_lapic_irq, |
2043 | .eoi = ack_apic, | 2035 | .eoi = ack_apic, |
@@ -2046,14 +2038,14 @@ static struct irq_chip lapic_chip __read_mostly = { | |||
2046 | static void __init setup_nmi(void) | 2038 | static void __init setup_nmi(void) |
2047 | { | 2039 | { |
2048 | /* | 2040 | /* |
2049 | * Dirty trick to enable the NMI watchdog ... | 2041 | * Dirty trick to enable the NMI watchdog ... |
2050 | * We put the 8259A master into AEOI mode and | 2042 | * We put the 8259A master into AEOI mode and |
2051 | * unmask on all local APICs LVT0 as NMI. | 2043 | * unmask on all local APICs LVT0 as NMI. |
2052 | * | 2044 | * |
2053 | * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') | 2045 | * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') |
2054 | * is from Maciej W. Rozycki - so we do not have to EOI from | 2046 | * is from Maciej W. Rozycki - so we do not have to EOI from |
2055 | * the NMI handler or the timer interrupt. | 2047 | * the NMI handler or the timer interrupt. |
2056 | */ | 2048 | */ |
2057 | apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ..."); | 2049 | apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ..."); |
2058 | 2050 | ||
2059 | enable_NMI_through_LVT0(); | 2051 | enable_NMI_through_LVT0(); |
@@ -2129,6 +2121,7 @@ static inline void __init unlock_ExtINT_logic(void) | |||
2129 | static inline void __init check_timer(void) | 2121 | static inline void __init check_timer(void) |
2130 | { | 2122 | { |
2131 | int apic1, pin1, apic2, pin2; | 2123 | int apic1, pin1, apic2, pin2; |
2124 | int no_pin1 = 0; | ||
2132 | int vector; | 2125 | int vector; |
2133 | unsigned int ver; | 2126 | unsigned int ver; |
2134 | unsigned long flags; | 2127 | unsigned long flags; |
@@ -2146,21 +2139,17 @@ static inline void __init check_timer(void) | |||
2146 | set_intr_gate(vector, interrupt[0]); | 2139 | set_intr_gate(vector, interrupt[0]); |
2147 | 2140 | ||
2148 | /* | 2141 | /* |
2149 | * Subtle, code in do_timer_interrupt() expects an AEOI | 2142 | * As IRQ0 is to be enabled in the 8259A, the virtual |
2150 | * mode for the 8259A whenever interrupts are routed | 2143 | * wire has to be disabled in the local APIC. Also |
2151 | * through I/O APICs. Also IRQ0 has to be enabled in | 2144 | * timer interrupts need to be acknowledged manually in |
2152 | * the 8259A which implies the virtual wire has to be | 2145 | * the 8259A for the i82489DX when using the NMI |
2153 | * disabled in the local APIC. Finally timer interrupts | 2146 | * watchdog as that APIC treats NMIs as level-triggered. |
2154 | * need to be acknowledged manually in the 8259A for | 2147 | * The AEOI mode will finish them in the 8259A |
2155 | * timer_interrupt() and for the i82489DX when using | 2148 | * automatically. |
2156 | * the NMI watchdog. | ||
2157 | */ | 2149 | */ |
2158 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); | 2150 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); |
2159 | init_8259A(1); | 2151 | init_8259A(1); |
2160 | timer_ack = !cpu_has_tsc; | 2152 | timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); |
2161 | timer_ack |= (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); | ||
2162 | if (timer_over_8254 > 0) | ||
2163 | enable_8259A_irq(0); | ||
2164 | 2153 | ||
2165 | pin1 = find_isa_irq_pin(0, mp_INT); | 2154 | pin1 = find_isa_irq_pin(0, mp_INT); |
2166 | apic1 = find_isa_irq_apic(0, mp_INT); | 2155 | apic1 = find_isa_irq_apic(0, mp_INT); |
@@ -2170,14 +2159,33 @@ static inline void __init check_timer(void) | |||
2170 | printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", | 2159 | printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", |
2171 | vector, apic1, pin1, apic2, pin2); | 2160 | vector, apic1, pin1, apic2, pin2); |
2172 | 2161 | ||
2162 | /* | ||
2163 | * Some BIOS writers are clueless and report the ExtINTA | ||
2164 | * I/O APIC input from the cascaded 8259A as the timer | ||
2165 | * interrupt input. So just in case, if only one pin | ||
2166 | * was found above, try it both directly and through the | ||
2167 | * 8259A. | ||
2168 | */ | ||
2169 | if (pin1 == -1) { | ||
2170 | pin1 = pin2; | ||
2171 | apic1 = apic2; | ||
2172 | no_pin1 = 1; | ||
2173 | } else if (pin2 == -1) { | ||
2174 | pin2 = pin1; | ||
2175 | apic2 = apic1; | ||
2176 | } | ||
2177 | |||
2173 | if (pin1 != -1) { | 2178 | if (pin1 != -1) { |
2174 | /* | 2179 | /* |
2175 | * Ok, does IRQ0 through the IOAPIC work? | 2180 | * Ok, does IRQ0 through the IOAPIC work? |
2176 | */ | 2181 | */ |
2182 | if (no_pin1) { | ||
2183 | add_pin_to_irq(0, apic1, pin1); | ||
2184 | setup_timer_IRQ0_pin(apic1, pin1, vector); | ||
2185 | } | ||
2177 | unmask_IO_APIC_irq(0); | 2186 | unmask_IO_APIC_irq(0); |
2178 | if (timer_irq_works()) { | 2187 | if (timer_irq_works()) { |
2179 | if (nmi_watchdog == NMI_IO_APIC) { | 2188 | if (nmi_watchdog == NMI_IO_APIC) { |
2180 | disable_8259A_irq(0); | ||
2181 | setup_nmi(); | 2189 | setup_nmi(); |
2182 | enable_8259A_irq(0); | 2190 | enable_8259A_irq(0); |
2183 | } | 2191 | } |
@@ -2186,43 +2194,46 @@ static inline void __init check_timer(void) | |||
2186 | goto out; | 2194 | goto out; |
2187 | } | 2195 | } |
2188 | clear_IO_APIC_pin(apic1, pin1); | 2196 | clear_IO_APIC_pin(apic1, pin1); |
2189 | printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to " | 2197 | if (!no_pin1) |
2190 | "IO-APIC\n"); | 2198 | printk(KERN_ERR "..MP-BIOS bug: " |
2191 | } | 2199 | "8254 timer not connected to IO-APIC\n"); |
2192 | 2200 | ||
2193 | printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... "); | 2201 | printk(KERN_INFO "...trying to set up timer (IRQ0) " |
2194 | if (pin2 != -1) { | 2202 | "through the 8259A ... "); |
2195 | printk("\n..... (found pin %d) ...", pin2); | 2203 | printk("\n..... (found pin %d) ...", pin2); |
2196 | /* | 2204 | /* |
2197 | * legacy devices should be connected to IO APIC #0 | 2205 | * legacy devices should be connected to IO APIC #0 |
2198 | */ | 2206 | */ |
2199 | setup_ExtINT_IRQ0_pin(apic2, pin2, vector); | 2207 | replace_pin_at_irq(0, apic1, pin1, apic2, pin2); |
2208 | setup_timer_IRQ0_pin(apic2, pin2, vector); | ||
2209 | unmask_IO_APIC_irq(0); | ||
2210 | enable_8259A_irq(0); | ||
2200 | if (timer_irq_works()) { | 2211 | if (timer_irq_works()) { |
2201 | printk("works.\n"); | 2212 | printk("works.\n"); |
2202 | if (pin1 != -1) | 2213 | timer_through_8259 = 1; |
2203 | replace_pin_at_irq(0, apic1, pin1, apic2, pin2); | ||
2204 | else | ||
2205 | add_pin_to_irq(0, apic2, pin2); | ||
2206 | if (nmi_watchdog == NMI_IO_APIC) { | 2214 | if (nmi_watchdog == NMI_IO_APIC) { |
2215 | disable_8259A_irq(0); | ||
2207 | setup_nmi(); | 2216 | setup_nmi(); |
2217 | enable_8259A_irq(0); | ||
2208 | } | 2218 | } |
2209 | goto out; | 2219 | goto out; |
2210 | } | 2220 | } |
2211 | /* | 2221 | /* |
2212 | * Cleanup, just in case ... | 2222 | * Cleanup, just in case ... |
2213 | */ | 2223 | */ |
2224 | disable_8259A_irq(0); | ||
2214 | clear_IO_APIC_pin(apic2, pin2); | 2225 | clear_IO_APIC_pin(apic2, pin2); |
2226 | printk(" failed.\n"); | ||
2215 | } | 2227 | } |
2216 | printk(" failed.\n"); | ||
2217 | 2228 | ||
2218 | if (nmi_watchdog == NMI_IO_APIC) { | 2229 | if (nmi_watchdog == NMI_IO_APIC) { |
2219 | printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n"); | 2230 | printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n"); |
2220 | nmi_watchdog = 0; | 2231 | nmi_watchdog = NMI_NONE; |
2221 | } | 2232 | } |
2233 | timer_ack = 0; | ||
2222 | 2234 | ||
2223 | printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); | 2235 | printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); |
2224 | 2236 | ||
2225 | disable_8259A_irq(0); | ||
2226 | set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq, | 2237 | set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq, |
2227 | "fasteoi"); | 2238 | "fasteoi"); |
2228 | apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ | 2239 | apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ |
@@ -2232,12 +2243,12 @@ static inline void __init check_timer(void) | |||
2232 | printk(" works.\n"); | 2243 | printk(" works.\n"); |
2233 | goto out; | 2244 | goto out; |
2234 | } | 2245 | } |
2246 | disable_8259A_irq(0); | ||
2235 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); | 2247 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); |
2236 | printk(" failed.\n"); | 2248 | printk(" failed.\n"); |
2237 | 2249 | ||
2238 | printk(KERN_INFO "...trying to set up timer as ExtINT IRQ..."); | 2250 | printk(KERN_INFO "...trying to set up timer as ExtINT IRQ..."); |
2239 | 2251 | ||
2240 | timer_ack = 0; | ||
2241 | init_8259A(0); | 2252 | init_8259A(0); |
2242 | make_8259A_irq(0); | 2253 | make_8259A_irq(0); |
2243 | apic_write_around(APIC_LVT0, APIC_DM_EXTINT); | 2254 | apic_write_around(APIC_LVT0, APIC_DM_EXTINT); |
@@ -2294,28 +2305,14 @@ void __init setup_IO_APIC(void) | |||
2294 | print_IO_APIC(); | 2305 | print_IO_APIC(); |
2295 | } | 2306 | } |
2296 | 2307 | ||
2297 | static int __init setup_disable_8254_timer(char *s) | ||
2298 | { | ||
2299 | timer_over_8254 = -1; | ||
2300 | return 1; | ||
2301 | } | ||
2302 | static int __init setup_enable_8254_timer(char *s) | ||
2303 | { | ||
2304 | timer_over_8254 = 2; | ||
2305 | return 1; | ||
2306 | } | ||
2307 | |||
2308 | __setup("disable_8254_timer", setup_disable_8254_timer); | ||
2309 | __setup("enable_8254_timer", setup_enable_8254_timer); | ||
2310 | |||
2311 | /* | 2308 | /* |
2312 | * Called after all the initialization is done. If we didnt find any | 2309 | * Called after all the initialization is done. If we didnt find any |
2313 | * APIC bugs then we can allow the modify fast path | 2310 | * APIC bugs then we can allow the modify fast path |
2314 | */ | 2311 | */ |
2315 | 2312 | ||
2316 | static int __init io_apic_bug_finalize(void) | 2313 | static int __init io_apic_bug_finalize(void) |
2317 | { | 2314 | { |
2318 | if(sis_apic_bug == -1) | 2315 | if (sis_apic_bug == -1) |
2319 | sis_apic_bug = 0; | 2316 | sis_apic_bug = 0; |
2320 | return 0; | 2317 | return 0; |
2321 | } | 2318 | } |
@@ -2326,17 +2323,17 @@ struct sysfs_ioapic_data { | |||
2326 | struct sys_device dev; | 2323 | struct sys_device dev; |
2327 | struct IO_APIC_route_entry entry[0]; | 2324 | struct IO_APIC_route_entry entry[0]; |
2328 | }; | 2325 | }; |
2329 | static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS]; | 2326 | static struct sysfs_ioapic_data *mp_ioapic_data[MAX_IO_APICS]; |
2330 | 2327 | ||
2331 | static int ioapic_suspend(struct sys_device *dev, pm_message_t state) | 2328 | static int ioapic_suspend(struct sys_device *dev, pm_message_t state) |
2332 | { | 2329 | { |
2333 | struct IO_APIC_route_entry *entry; | 2330 | struct IO_APIC_route_entry *entry; |
2334 | struct sysfs_ioapic_data *data; | 2331 | struct sysfs_ioapic_data *data; |
2335 | int i; | 2332 | int i; |
2336 | 2333 | ||
2337 | data = container_of(dev, struct sysfs_ioapic_data, dev); | 2334 | data = container_of(dev, struct sysfs_ioapic_data, dev); |
2338 | entry = data->entry; | 2335 | entry = data->entry; |
2339 | for (i = 0; i < nr_ioapic_registers[dev->id]; i ++) | 2336 | for (i = 0; i < nr_ioapic_registers[dev->id]; i++) |
2340 | entry[i] = ioapic_read_entry(dev->id, i); | 2337 | entry[i] = ioapic_read_entry(dev->id, i); |
2341 | 2338 | ||
2342 | return 0; | 2339 | return 0; |
@@ -2349,7 +2346,7 @@ static int ioapic_resume(struct sys_device *dev) | |||
2349 | unsigned long flags; | 2346 | unsigned long flags; |
2350 | union IO_APIC_reg_00 reg_00; | 2347 | union IO_APIC_reg_00 reg_00; |
2351 | int i; | 2348 | int i; |
2352 | 2349 | ||
2353 | data = container_of(dev, struct sysfs_ioapic_data, dev); | 2350 | data = container_of(dev, struct sysfs_ioapic_data, dev); |
2354 | entry = data->entry; | 2351 | entry = data->entry; |
2355 | 2352 | ||
@@ -2360,7 +2357,7 @@ static int ioapic_resume(struct sys_device *dev) | |||
2360 | io_apic_write(dev->id, 0, reg_00.raw); | 2357 | io_apic_write(dev->id, 0, reg_00.raw); |
2361 | } | 2358 | } |
2362 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2359 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2363 | for (i = 0; i < nr_ioapic_registers[dev->id]; i ++) | 2360 | for (i = 0; i < nr_ioapic_registers[dev->id]; i++) |
2364 | ioapic_write_entry(dev->id, i, entry[i]); | 2361 | ioapic_write_entry(dev->id, i, entry[i]); |
2365 | 2362 | ||
2366 | return 0; | 2363 | return 0; |
@@ -2374,24 +2371,23 @@ static struct sysdev_class ioapic_sysdev_class = { | |||
2374 | 2371 | ||
2375 | static int __init ioapic_init_sysfs(void) | 2372 | static int __init ioapic_init_sysfs(void) |
2376 | { | 2373 | { |
2377 | struct sys_device * dev; | 2374 | struct sys_device *dev; |
2378 | int i, size, error = 0; | 2375 | int i, size, error = 0; |
2379 | 2376 | ||
2380 | error = sysdev_class_register(&ioapic_sysdev_class); | 2377 | error = sysdev_class_register(&ioapic_sysdev_class); |
2381 | if (error) | 2378 | if (error) |
2382 | return error; | 2379 | return error; |
2383 | 2380 | ||
2384 | for (i = 0; i < nr_ioapics; i++ ) { | 2381 | for (i = 0; i < nr_ioapics; i++) { |
2385 | size = sizeof(struct sys_device) + nr_ioapic_registers[i] | 2382 | size = sizeof(struct sys_device) + nr_ioapic_registers[i] |
2386 | * sizeof(struct IO_APIC_route_entry); | 2383 | * sizeof(struct IO_APIC_route_entry); |
2387 | mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL); | 2384 | mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); |
2388 | if (!mp_ioapic_data[i]) { | 2385 | if (!mp_ioapic_data[i]) { |
2389 | printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); | 2386 | printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); |
2390 | continue; | 2387 | continue; |
2391 | } | 2388 | } |
2392 | memset(mp_ioapic_data[i], 0, size); | ||
2393 | dev = &mp_ioapic_data[i]->dev; | 2389 | dev = &mp_ioapic_data[i]->dev; |
2394 | dev->id = i; | 2390 | dev->id = i; |
2395 | dev->cls = &ioapic_sysdev_class; | 2391 | dev->cls = &ioapic_sysdev_class; |
2396 | error = sysdev_register(dev); | 2392 | error = sysdev_register(dev); |
2397 | if (error) { | 2393 | if (error) { |
@@ -2466,7 +2462,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
2466 | msg->address_lo = | 2462 | msg->address_lo = |
2467 | MSI_ADDR_BASE_LO | | 2463 | MSI_ADDR_BASE_LO | |
2468 | ((INT_DEST_MODE == 0) ? | 2464 | ((INT_DEST_MODE == 0) ? |
2469 | MSI_ADDR_DEST_MODE_PHYSICAL: | 2465 | MSI_ADDR_DEST_MODE_PHYSICAL: |
2470 | MSI_ADDR_DEST_MODE_LOGICAL) | | 2466 | MSI_ADDR_DEST_MODE_LOGICAL) | |
2471 | ((INT_DELIVERY_MODE != dest_LowestPrio) ? | 2467 | ((INT_DELIVERY_MODE != dest_LowestPrio) ? |
2472 | MSI_ADDR_REDIRECTION_CPU: | 2468 | MSI_ADDR_REDIRECTION_CPU: |
@@ -2477,7 +2473,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
2477 | MSI_DATA_TRIGGER_EDGE | | 2473 | MSI_DATA_TRIGGER_EDGE | |
2478 | MSI_DATA_LEVEL_ASSERT | | 2474 | MSI_DATA_LEVEL_ASSERT | |
2479 | ((INT_DELIVERY_MODE != dest_LowestPrio) ? | 2475 | ((INT_DELIVERY_MODE != dest_LowestPrio) ? |
2480 | MSI_DATA_DELIVERY_FIXED: | 2476 | MSI_DATA_DELIVERY_FIXED: |
2481 | MSI_DATA_DELIVERY_LOWPRI) | | 2477 | MSI_DATA_DELIVERY_LOWPRI) | |
2482 | MSI_DATA_VECTOR(vector); | 2478 | MSI_DATA_VECTOR(vector); |
2483 | } | 2479 | } |
@@ -2648,12 +2644,12 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
2648 | #endif /* CONFIG_HT_IRQ */ | 2644 | #endif /* CONFIG_HT_IRQ */ |
2649 | 2645 | ||
2650 | /* -------------------------------------------------------------------------- | 2646 | /* -------------------------------------------------------------------------- |
2651 | ACPI-based IOAPIC Configuration | 2647 | ACPI-based IOAPIC Configuration |
2652 | -------------------------------------------------------------------------- */ | 2648 | -------------------------------------------------------------------------- */ |
2653 | 2649 | ||
2654 | #ifdef CONFIG_ACPI | 2650 | #ifdef CONFIG_ACPI |
2655 | 2651 | ||
2656 | int __init io_apic_get_unique_id (int ioapic, int apic_id) | 2652 | int __init io_apic_get_unique_id(int ioapic, int apic_id) |
2657 | { | 2653 | { |
2658 | union IO_APIC_reg_00 reg_00; | 2654 | union IO_APIC_reg_00 reg_00; |
2659 | static physid_mask_t apic_id_map = PHYSID_MASK_NONE; | 2655 | static physid_mask_t apic_id_map = PHYSID_MASK_NONE; |
@@ -2662,10 +2658,10 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id) | |||
2662 | int i = 0; | 2658 | int i = 0; |
2663 | 2659 | ||
2664 | /* | 2660 | /* |
2665 | * The P4 platform supports up to 256 APIC IDs on two separate APIC | 2661 | * The P4 platform supports up to 256 APIC IDs on two separate APIC |
2666 | * buses (one for LAPICs, one for IOAPICs), where predecessors only | 2662 | * buses (one for LAPICs, one for IOAPICs), where predecessors only |
2667 | * supports up to 16 on one shared APIC bus. | 2663 | * supports up to 16 on one shared APIC bus. |
2668 | * | 2664 | * |
2669 | * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full | 2665 | * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full |
2670 | * advantage of new APIC bus architecture. | 2666 | * advantage of new APIC bus architecture. |
2671 | */ | 2667 | */ |
@@ -2684,7 +2680,7 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id) | |||
2684 | } | 2680 | } |
2685 | 2681 | ||
2686 | /* | 2682 | /* |
2687 | * Every APIC in a system must have a unique ID or we get lots of nice | 2683 | * Every APIC in a system must have a unique ID or we get lots of nice |
2688 | * 'stuck on smp_invalidate_needed IPI wait' messages. | 2684 | * 'stuck on smp_invalidate_needed IPI wait' messages. |
2689 | */ | 2685 | */ |
2690 | if (check_apicid_used(apic_id_map, apic_id)) { | 2686 | if (check_apicid_used(apic_id_map, apic_id)) { |
@@ -2701,7 +2697,7 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id) | |||
2701 | "trying %d\n", ioapic, apic_id, i); | 2697 | "trying %d\n", ioapic, apic_id, i); |
2702 | 2698 | ||
2703 | apic_id = i; | 2699 | apic_id = i; |
2704 | } | 2700 | } |
2705 | 2701 | ||
2706 | tmp = apicid_to_cpu_present(apic_id); | 2702 | tmp = apicid_to_cpu_present(apic_id); |
2707 | physids_or(apic_id_map, apic_id_map, tmp); | 2703 | physids_or(apic_id_map, apic_id_map, tmp); |
@@ -2728,7 +2724,7 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id) | |||
2728 | } | 2724 | } |
2729 | 2725 | ||
2730 | 2726 | ||
2731 | int __init io_apic_get_version (int ioapic) | 2727 | int __init io_apic_get_version(int ioapic) |
2732 | { | 2728 | { |
2733 | union IO_APIC_reg_01 reg_01; | 2729 | union IO_APIC_reg_01 reg_01; |
2734 | unsigned long flags; | 2730 | unsigned long flags; |
@@ -2741,7 +2737,7 @@ int __init io_apic_get_version (int ioapic) | |||
2741 | } | 2737 | } |
2742 | 2738 | ||
2743 | 2739 | ||
2744 | int __init io_apic_get_redir_entries (int ioapic) | 2740 | int __init io_apic_get_redir_entries(int ioapic) |
2745 | { | 2741 | { |
2746 | union IO_APIC_reg_01 reg_01; | 2742 | union IO_APIC_reg_01 reg_01; |
2747 | unsigned long flags; | 2743 | unsigned long flags; |
@@ -2754,7 +2750,7 @@ int __init io_apic_get_redir_entries (int ioapic) | |||
2754 | } | 2750 | } |
2755 | 2751 | ||
2756 | 2752 | ||
2757 | int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low) | 2753 | int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, int active_high_low) |
2758 | { | 2754 | { |
2759 | struct IO_APIC_route_entry entry; | 2755 | struct IO_APIC_route_entry entry; |
2760 | 2756 | ||
@@ -2770,7 +2766,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a | |||
2770 | * corresponding device driver registers for this IRQ. | 2766 | * corresponding device driver registers for this IRQ. |
2771 | */ | 2767 | */ |
2772 | 2768 | ||
2773 | memset(&entry,0,sizeof(entry)); | 2769 | memset(&entry, 0, sizeof(entry)); |
2774 | 2770 | ||
2775 | entry.delivery_mode = INT_DELIVERY_MODE; | 2771 | entry.delivery_mode = INT_DELIVERY_MODE; |
2776 | entry.dest_mode = INT_DEST_MODE; | 2772 | entry.dest_mode = INT_DEST_MODE; |