aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/io_apic_32.c
diff options
context:
space:
mode:
authorPaolo Ciarrocchi <paolo.ciarrocchi@gmail.com>2008-06-08 07:07:18 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 03:13:23 -0400
commit360624484c81d55f88b1e5f48ce24c9243ce38e5 (patch)
tree51fe4566c35639f847c8bed96c308aac5b0b30fb /arch/x86/kernel/io_apic_32.c
parent46b3b4ef1ea2a0892b9b38b6a0c65a3f33b504aa (diff)
x86: coding style fixes to arch/x86/kernel/io_apic_32.c
Before: total: 91 errors, 73 warnings, 2850 lines checked After: total: 1 errors, 47 warnings, 2848 lines checked Compile tested: paolo@paolo-desktop:/tmp$ size io* text data bss dec hex filename 13836 1756 11104 26696 6848 io_apic_32.o.after 13836 1756 11104 26696 6848 io_apic_32.o.before Signed-off-by: Paolo Ciarrocchi <paolo.ciarrocchi@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/io_apic_32.c')
-rw-r--r--arch/x86/kernel/io_apic_32.c348
1 files changed, 173 insertions, 175 deletions
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 40fbb229fe7d..9a924ebcd74c 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -239,7 +239,7 @@ static void __init replace_pin_at_irq(unsigned int irq,
239 } 239 }
240} 240}
241 241
242static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable) 242static void __modify_IO_APIC_irq(unsigned int irq, unsigned long enable, unsigned long disable)
243{ 243{
244 struct irq_pin_list *entry = irq_2_pin + irq; 244 struct irq_pin_list *entry = irq_2_pin + irq;
245 unsigned int pin, reg; 245 unsigned int pin, reg;
@@ -259,32 +259,32 @@ static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsign
259} 259}
260 260
261/* mask = 1 */ 261/* mask = 1 */
262static void __mask_IO_APIC_irq (unsigned int irq) 262static void __mask_IO_APIC_irq(unsigned int irq)
263{ 263{
264 __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED, 0); 264 __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED, 0);
265} 265}
266 266
267/* mask = 0 */ 267/* mask = 0 */
268static void __unmask_IO_APIC_irq (unsigned int irq) 268static void __unmask_IO_APIC_irq(unsigned int irq)
269{ 269{
270 __modify_IO_APIC_irq(irq, 0, IO_APIC_REDIR_MASKED); 270 __modify_IO_APIC_irq(irq, 0, IO_APIC_REDIR_MASKED);
271} 271}
272 272
273/* mask = 1, trigger = 0 */ 273/* mask = 1, trigger = 0 */
274static void __mask_and_edge_IO_APIC_irq (unsigned int irq) 274static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
275{ 275{
276 __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED, 276 __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED,
277 IO_APIC_REDIR_LEVEL_TRIGGER); 277 IO_APIC_REDIR_LEVEL_TRIGGER);
278} 278}
279 279
280/* mask = 0, trigger = 1 */ 280/* mask = 0, trigger = 1 */
281static void __unmask_and_level_IO_APIC_irq (unsigned int irq) 281static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
282{ 282{
283 __modify_IO_APIC_irq(irq, IO_APIC_REDIR_LEVEL_TRIGGER, 283 __modify_IO_APIC_irq(irq, IO_APIC_REDIR_LEVEL_TRIGGER,
284 IO_APIC_REDIR_MASKED); 284 IO_APIC_REDIR_MASKED);
285} 285}
286 286
287static void mask_IO_APIC_irq (unsigned int irq) 287static void mask_IO_APIC_irq(unsigned int irq)
288{ 288{
289 unsigned long flags; 289 unsigned long flags;
290 290
@@ -293,7 +293,7 @@ static void mask_IO_APIC_irq (unsigned int irq)
293 spin_unlock_irqrestore(&ioapic_lock, flags); 293 spin_unlock_irqrestore(&ioapic_lock, flags);
294} 294}
295 295
296static void unmask_IO_APIC_irq (unsigned int irq) 296static void unmask_IO_APIC_irq(unsigned int irq)
297{ 297{
298 unsigned long flags; 298 unsigned long flags;
299 299
@@ -305,7 +305,7 @@ static void unmask_IO_APIC_irq (unsigned int irq)
305static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 305static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
306{ 306{
307 struct IO_APIC_route_entry entry; 307 struct IO_APIC_route_entry entry;
308 308
309 /* Check delivery_mode to be sure we're not clearing an SMI pin */ 309 /* Check delivery_mode to be sure we're not clearing an SMI pin */
310 entry = ioapic_read_entry(apic, pin); 310 entry = ioapic_read_entry(apic, pin);
311 if (entry.delivery_mode == dest_SMI) 311 if (entry.delivery_mode == dest_SMI)
@@ -317,7 +317,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
317 ioapic_mask_entry(apic, pin); 317 ioapic_mask_entry(apic, pin);
318} 318}
319 319
320static void clear_IO_APIC (void) 320static void clear_IO_APIC(void)
321{ 321{
322 int apic, pin; 322 int apic, pin;
323 323
@@ -334,7 +334,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
334 struct irq_pin_list *entry = irq_2_pin + irq; 334 struct irq_pin_list *entry = irq_2_pin + irq;
335 unsigned int apicid_value; 335 unsigned int apicid_value;
336 cpumask_t tmp; 336 cpumask_t tmp;
337 337
338 cpus_and(tmp, cpumask, cpu_online_map); 338 cpus_and(tmp, cpumask, cpu_online_map);
339 if (cpus_empty(tmp)) 339 if (cpus_empty(tmp))
340 tmp = TARGET_CPUS; 340 tmp = TARGET_CPUS;
@@ -363,7 +363,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
363# include <linux/kernel_stat.h> /* kstat */ 363# include <linux/kernel_stat.h> /* kstat */
364# include <linux/slab.h> /* kmalloc() */ 364# include <linux/slab.h> /* kmalloc() */
365# include <linux/timer.h> 365# include <linux/timer.h>
366 366
367#define IRQBALANCE_CHECK_ARCH -999 367#define IRQBALANCE_CHECK_ARCH -999
368#define MAX_BALANCED_IRQ_INTERVAL (5*HZ) 368#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
369#define MIN_BALANCED_IRQ_INTERVAL (HZ/2) 369#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
@@ -375,14 +375,14 @@ static int physical_balance __read_mostly;
375static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL; 375static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
376 376
377static struct irq_cpu_info { 377static struct irq_cpu_info {
378 unsigned long * last_irq; 378 unsigned long *last_irq;
379 unsigned long * irq_delta; 379 unsigned long *irq_delta;
380 unsigned long irq; 380 unsigned long irq;
381} irq_cpu_data[NR_CPUS]; 381} irq_cpu_data[NR_CPUS];
382 382
383#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq) 383#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
384#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq]) 384#define LAST_CPU_IRQ(cpu, irq) (irq_cpu_data[cpu].last_irq[irq])
385#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq]) 385#define IRQ_DELTA(cpu, irq) (irq_cpu_data[cpu].irq_delta[irq])
386 386
387#define IDLE_ENOUGH(cpu,now) \ 387#define IDLE_ENOUGH(cpu,now) \
388 (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1)) 388 (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
@@ -421,8 +421,8 @@ inside:
421 if (cpu == -1) 421 if (cpu == -1)
422 cpu = NR_CPUS-1; 422 cpu = NR_CPUS-1;
423 } 423 }
424 } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) || 424 } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu, allowed_mask) ||
425 (search_idle && !IDLE_ENOUGH(cpu,now))); 425 (search_idle && !IDLE_ENOUGH(cpu, now)));
426 426
427 return cpu; 427 return cpu;
428} 428}
@@ -432,15 +432,14 @@ static inline void balance_irq(int cpu, int irq)
432 unsigned long now = jiffies; 432 unsigned long now = jiffies;
433 cpumask_t allowed_mask; 433 cpumask_t allowed_mask;
434 unsigned int new_cpu; 434 unsigned int new_cpu;
435 435
436 if (irqbalance_disabled) 436 if (irqbalance_disabled)
437 return; 437 return;
438 438
439 cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]); 439 cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
440 new_cpu = move(cpu, allowed_mask, now, 1); 440 new_cpu = move(cpu, allowed_mask, now, 1);
441 if (cpu != new_cpu) { 441 if (cpu != new_cpu)
442 set_pending_irq(irq, cpumask_of_cpu(new_cpu)); 442 set_pending_irq(irq, cpumask_of_cpu(new_cpu));
443 }
444} 443}
445 444
446static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) 445static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
@@ -452,14 +451,14 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
452 if (!irq_desc[j].action) 451 if (!irq_desc[j].action)
453 continue; 452 continue;
454 /* Is it a significant load ? */ 453 /* Is it a significant load ? */
455 if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) < 454 if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i), j) <
456 useful_load_threshold) 455 useful_load_threshold)
457 continue; 456 continue;
458 balance_irq(i, j); 457 balance_irq(i, j);
459 } 458 }
460 } 459 }
461 balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL, 460 balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
462 balanced_irq_interval - BALANCED_IRQ_LESS_DELTA); 461 balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
463 return; 462 return;
464} 463}
465 464
@@ -488,22 +487,22 @@ static void do_irq_balance(void)
488 /* Is this an active IRQ or balancing disabled ? */ 487 /* Is this an active IRQ or balancing disabled ? */
489 if (!irq_desc[j].action || irq_balancing_disabled(j)) 488 if (!irq_desc[j].action || irq_balancing_disabled(j))
490 continue; 489 continue;
491 if ( package_index == i ) 490 if (package_index == i)
492 IRQ_DELTA(package_index,j) = 0; 491 IRQ_DELTA(package_index, j) = 0;
493 /* Determine the total count per processor per IRQ */ 492 /* Determine the total count per processor per IRQ */
494 value_now = (unsigned long) kstat_cpu(i).irqs[j]; 493 value_now = (unsigned long) kstat_cpu(i).irqs[j];
495 494
496 /* Determine the activity per processor per IRQ */ 495 /* Determine the activity per processor per IRQ */
497 delta = value_now - LAST_CPU_IRQ(i,j); 496 delta = value_now - LAST_CPU_IRQ(i, j);
498 497
499 /* Update last_cpu_irq[][] for the next time */ 498 /* Update last_cpu_irq[][] for the next time */
500 LAST_CPU_IRQ(i,j) = value_now; 499 LAST_CPU_IRQ(i, j) = value_now;
501 500
502 /* Ignore IRQs whose rate is less than the clock */ 501 /* Ignore IRQs whose rate is less than the clock */
503 if (delta < useful_load_threshold) 502 if (delta < useful_load_threshold)
504 continue; 503 continue;
505 /* update the load for the processor or package total */ 504 /* update the load for the processor or package total */
506 IRQ_DELTA(package_index,j) += delta; 505 IRQ_DELTA(package_index, j) += delta;
507 506
508 /* Keep track of the higher numbered sibling as well */ 507 /* Keep track of the higher numbered sibling as well */
509 if (i != package_index) 508 if (i != package_index)
@@ -529,7 +528,8 @@ static void do_irq_balance(void)
529 max_cpu_irq = ULONG_MAX; 528 max_cpu_irq = ULONG_MAX;
530 529
531tryanothercpu: 530tryanothercpu:
532 /* Look for heaviest loaded processor. 531 /*
532 * Look for heaviest loaded processor.
533 * We may come back to get the next heaviest loaded processor. 533 * We may come back to get the next heaviest loaded processor.
534 * Skip processors with trivial loads. 534 * Skip processors with trivial loads.
535 */ 535 */
@@ -538,7 +538,7 @@ tryanothercpu:
538 for_each_online_cpu(i) { 538 for_each_online_cpu(i) {
539 if (i != CPU_TO_PACKAGEINDEX(i)) 539 if (i != CPU_TO_PACKAGEINDEX(i))
540 continue; 540 continue;
541 if (max_cpu_irq <= CPU_IRQ(i)) 541 if (max_cpu_irq <= CPU_IRQ(i))
542 continue; 542 continue;
543 if (tmp_cpu_irq < CPU_IRQ(i)) { 543 if (tmp_cpu_irq < CPU_IRQ(i)) {
544 tmp_cpu_irq = CPU_IRQ(i); 544 tmp_cpu_irq = CPU_IRQ(i);
@@ -547,8 +547,9 @@ tryanothercpu:
547 } 547 }
548 548
549 if (tmp_loaded == -1) { 549 if (tmp_loaded == -1) {
550 /* In the case of small number of heavy interrupt sources, 550 /*
551 * loading some of the cpus too much. We use Ingo's original 551 * In the case of small number of heavy interrupt sources,
552 * loading some of the cpus too much. We use Ingo's original
552 * approach to rotate them around. 553 * approach to rotate them around.
553 */ 554 */
554 if (!first_attempt && imbalance >= useful_load_threshold) { 555 if (!first_attempt && imbalance >= useful_load_threshold) {
@@ -557,13 +558,14 @@ tryanothercpu:
557 } 558 }
558 goto not_worth_the_effort; 559 goto not_worth_the_effort;
559 } 560 }
560 561
561 first_attempt = 0; /* heaviest search */ 562 first_attempt = 0; /* heaviest search */
562 max_cpu_irq = tmp_cpu_irq; /* load */ 563 max_cpu_irq = tmp_cpu_irq; /* load */
563 max_loaded = tmp_loaded; /* processor */ 564 max_loaded = tmp_loaded; /* processor */
564 imbalance = (max_cpu_irq - min_cpu_irq) / 2; 565 imbalance = (max_cpu_irq - min_cpu_irq) / 2;
565 566
566 /* if imbalance is less than approx 10% of max load, then 567 /*
568 * if imbalance is less than approx 10% of max load, then
567 * observe diminishing returns action. - quit 569 * observe diminishing returns action. - quit
568 */ 570 */
569 if (imbalance < (max_cpu_irq >> 3)) 571 if (imbalance < (max_cpu_irq >> 3))
@@ -579,26 +581,25 @@ tryanotherirq:
579 /* Is this an active IRQ? */ 581 /* Is this an active IRQ? */
580 if (!irq_desc[j].action) 582 if (!irq_desc[j].action)
581 continue; 583 continue;
582 if (imbalance <= IRQ_DELTA(max_loaded,j)) 584 if (imbalance <= IRQ_DELTA(max_loaded, j))
583 continue; 585 continue;
584 /* Try to find the IRQ that is closest to the imbalance 586 /* Try to find the IRQ that is closest to the imbalance
585 * without going over. 587 * without going over.
586 */ 588 */
587 if (move_this_load < IRQ_DELTA(max_loaded,j)) { 589 if (move_this_load < IRQ_DELTA(max_loaded, j)) {
588 move_this_load = IRQ_DELTA(max_loaded,j); 590 move_this_load = IRQ_DELTA(max_loaded, j);
589 selected_irq = j; 591 selected_irq = j;
590 } 592 }
591 } 593 }
592 if (selected_irq == -1) { 594 if (selected_irq == -1)
593 goto tryanothercpu; 595 goto tryanothercpu;
594 }
595 596
596 imbalance = move_this_load; 597 imbalance = move_this_load;
597 598
598 /* For physical_balance case, we accumulated both load 599 /* For physical_balance case, we accumulated both load
599 * values in the one of the siblings cpu_irq[], 600 * values in the one of the siblings cpu_irq[],
600 * to use the same code for physical and logical processors 601 * to use the same code for physical and logical processors
601 * as much as possible. 602 * as much as possible.
602 * 603 *
603 * NOTE: the cpu_irq[] array holds the sum of the load for 604 * NOTE: the cpu_irq[] array holds the sum of the load for
604 * sibling A and sibling B in the slot for the lowest numbered 605 * sibling A and sibling B in the slot for the lowest numbered
@@ -627,11 +628,11 @@ tryanotherirq:
627 /* mark for change destination */ 628 /* mark for change destination */
628 set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded)); 629 set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
629 630
630 /* Since we made a change, come back sooner to 631 /* Since we made a change, come back sooner to
631 * check for more variation. 632 * check for more variation.
632 */ 633 */
633 balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL, 634 balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
634 balanced_irq_interval - BALANCED_IRQ_LESS_DELTA); 635 balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
635 return; 636 return;
636 } 637 }
637 goto tryanotherirq; 638 goto tryanotherirq;
@@ -642,7 +643,7 @@ not_worth_the_effort:
642 * upward 643 * upward
643 */ 644 */
644 balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL, 645 balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
645 balanced_irq_interval + BALANCED_IRQ_MORE_DELTA); 646 balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
646 return; 647 return;
647} 648}
648 649
@@ -681,13 +682,13 @@ static int __init balanced_irq_init(void)
681 cpumask_t tmp; 682 cpumask_t tmp;
682 683
683 cpus_shift_right(tmp, cpu_online_map, 2); 684 cpus_shift_right(tmp, cpu_online_map, 2);
684 c = &boot_cpu_data; 685 c = &boot_cpu_data;
685 /* When not overwritten by the command line ask subarchitecture. */ 686 /* When not overwritten by the command line ask subarchitecture. */
686 if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH) 687 if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
687 irqbalance_disabled = NO_BALANCE_IRQ; 688 irqbalance_disabled = NO_BALANCE_IRQ;
688 if (irqbalance_disabled) 689 if (irqbalance_disabled)
689 return 0; 690 return 0;
690 691
691 /* disable irqbalance completely if there is only one processor online */ 692 /* disable irqbalance completely if there is only one processor online */
692 if (num_online_cpus() < 2) { 693 if (num_online_cpus() < 2) {
693 irqbalance_disabled = 1; 694 irqbalance_disabled = 1;
@@ -707,10 +708,10 @@ static int __init balanced_irq_init(void)
707 printk(KERN_ERR "balanced_irq_init: out of memory"); 708 printk(KERN_ERR "balanced_irq_init: out of memory");
708 goto failed; 709 goto failed;
709 } 710 }
710 memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS); 711 memset(irq_cpu_data[i].irq_delta, 0, sizeof(unsigned long) * NR_IRQS);
711 memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS); 712 memset(irq_cpu_data[i].last_irq, 0, sizeof(unsigned long) * NR_IRQS);
712 } 713 }
713 714
714 printk(KERN_INFO "Starting balanced_irq\n"); 715 printk(KERN_INFO "Starting balanced_irq\n");
715 if (!IS_ERR(kthread_run(balanced_irq, NULL, "kirqd"))) 716 if (!IS_ERR(kthread_run(balanced_irq, NULL, "kirqd")))
716 return 0; 717 return 0;
@@ -845,7 +846,7 @@ static int __init find_isa_irq_apic(int irq, int type)
845 } 846 }
846 if (i < mp_irq_entries) { 847 if (i < mp_irq_entries) {
847 int apic; 848 int apic;
848 for(apic = 0; apic < nr_ioapics; apic++) { 849 for (apic = 0; apic < nr_ioapics; apic++) {
849 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic) 850 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
850 return apic; 851 return apic;
851 } 852 }
@@ -882,7 +883,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
882 !mp_irqs[i].mpc_irqtype && 883 !mp_irqs[i].mpc_irqtype &&
883 (bus == lbus) && 884 (bus == lbus) &&
884 (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) { 885 (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
885 int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq); 886 int irq = pin_2_irq(i, apic, mp_irqs[i].mpc_dstirq);
886 887
887 if (!(apic || IO_APIC_IRQ(irq))) 888 if (!(apic || IO_APIC_IRQ(irq)))
888 continue; 889 continue;
@@ -902,7 +903,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
902EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 903EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
903 904
904/* 905/*
905 * This function currently is only a helper for the i386 smp boot process where 906 * This function currently is only a helper for the i386 smp boot process where
906 * we need to reprogram the ioredtbls to cater for the cpus which have come online 907 * we need to reprogram the ioredtbls to cater for the cpus which have come online
907 * so mask in all cases should simply be TARGET_CPUS 908 * so mask in all cases should simply be TARGET_CPUS
908 */ 909 */
@@ -977,37 +978,36 @@ static int MPBIOS_polarity(int idx)
977 /* 978 /*
978 * Determine IRQ line polarity (high active or low active): 979 * Determine IRQ line polarity (high active or low active):
979 */ 980 */
980 switch (mp_irqs[idx].mpc_irqflag & 3) 981 switch (mp_irqs[idx].mpc_irqflag & 3) {
982 case 0: /* conforms, ie. bus-type dependent polarity */
981 { 983 {
982 case 0: /* conforms, ie. bus-type dependent polarity */ 984 polarity = test_bit(bus, mp_bus_not_pci)?
983 { 985 default_ISA_polarity(idx):
984 polarity = test_bit(bus, mp_bus_not_pci)? 986 default_PCI_polarity(idx);
985 default_ISA_polarity(idx): 987 break;
986 default_PCI_polarity(idx); 988 }
987 break; 989 case 1: /* high active */
988 } 990 {
989 case 1: /* high active */ 991 polarity = 0;
990 { 992 break;
991 polarity = 0; 993 }
992 break; 994 case 2: /* reserved */
993 } 995 {
994 case 2: /* reserved */ 996 printk(KERN_WARNING "broken BIOS!!\n");
995 { 997 polarity = 1;
996 printk(KERN_WARNING "broken BIOS!!\n"); 998 break;
997 polarity = 1; 999 }
998 break; 1000 case 3: /* low active */
999 } 1001 {
1000 case 3: /* low active */ 1002 polarity = 1;
1001 { 1003 break;
1002 polarity = 1; 1004 }
1003 break; 1005 default: /* invalid */
1004 } 1006 {
1005 default: /* invalid */ 1007 printk(KERN_WARNING "broken BIOS!!\n");
1006 { 1008 polarity = 1;
1007 printk(KERN_WARNING "broken BIOS!!\n"); 1009 break;
1008 polarity = 1; 1010 }
1009 break;
1010 }
1011 } 1011 }
1012 return polarity; 1012 return polarity;
1013} 1013}
@@ -1020,69 +1020,67 @@ static int MPBIOS_trigger(int idx)
1020 /* 1020 /*
1021 * Determine IRQ trigger mode (edge or level sensitive): 1021 * Determine IRQ trigger mode (edge or level sensitive):
1022 */ 1022 */
1023 switch ((mp_irqs[idx].mpc_irqflag>>2) & 3) 1023 switch ((mp_irqs[idx].mpc_irqflag>>2) & 3) {
1024 case 0: /* conforms, ie. bus-type dependent */
1024 { 1025 {
1025 case 0: /* conforms, ie. bus-type dependent */ 1026 trigger = test_bit(bus, mp_bus_not_pci)?
1026 { 1027 default_ISA_trigger(idx):
1027 trigger = test_bit(bus, mp_bus_not_pci)? 1028 default_PCI_trigger(idx);
1028 default_ISA_trigger(idx):
1029 default_PCI_trigger(idx);
1030#if defined(CONFIG_EISA) || defined(CONFIG_MCA) 1029#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1031 switch (mp_bus_id_to_type[bus]) 1030 switch (mp_bus_id_to_type[bus]) {
1032 { 1031 case MP_BUS_ISA: /* ISA pin */
1033 case MP_BUS_ISA: /* ISA pin */ 1032 {
1034 { 1033 /* set before the switch */
1035 /* set before the switch */
1036 break;
1037 }
1038 case MP_BUS_EISA: /* EISA pin */
1039 {
1040 trigger = default_EISA_trigger(idx);
1041 break;
1042 }
1043 case MP_BUS_PCI: /* PCI pin */
1044 {
1045 /* set before the switch */
1046 break;
1047 }
1048 case MP_BUS_MCA: /* MCA pin */
1049 {
1050 trigger = default_MCA_trigger(idx);
1051 break;
1052 }
1053 default:
1054 {
1055 printk(KERN_WARNING "broken BIOS!!\n");
1056 trigger = 1;
1057 break;
1058 }
1059 }
1060#endif
1061 break; 1034 break;
1062 } 1035 }
1063 case 1: /* edge */ 1036 case MP_BUS_EISA: /* EISA pin */
1064 { 1037 {
1065 trigger = 0; 1038 trigger = default_EISA_trigger(idx);
1066 break; 1039 break;
1067 } 1040 }
1068 case 2: /* reserved */ 1041 case MP_BUS_PCI: /* PCI pin */
1069 { 1042 {
1070 printk(KERN_WARNING "broken BIOS!!\n"); 1043 /* set before the switch */
1071 trigger = 1;
1072 break; 1044 break;
1073 } 1045 }
1074 case 3: /* level */ 1046 case MP_BUS_MCA: /* MCA pin */
1075 { 1047 {
1076 trigger = 1; 1048 trigger = default_MCA_trigger(idx);
1077 break; 1049 break;
1078 } 1050 }
1079 default: /* invalid */ 1051 default:
1080 { 1052 {
1081 printk(KERN_WARNING "broken BIOS!!\n"); 1053 printk(KERN_WARNING "broken BIOS!!\n");
1082 trigger = 0; 1054 trigger = 1;
1083 break; 1055 break;
1084 } 1056 }
1085 } 1057 }
1058#endif
1059 break;
1060 }
1061 case 1: /* edge */
1062 {
1063 trigger = 0;
1064 break;
1065 }
1066 case 2: /* reserved */
1067 {
1068 printk(KERN_WARNING "broken BIOS!!\n");
1069 trigger = 1;
1070 break;
1071 }
1072 case 3: /* level */
1073 {
1074 trigger = 1;
1075 break;
1076 }
1077 default: /* invalid */
1078 {
1079 printk(KERN_WARNING "broken BIOS!!\n");
1080 trigger = 0;
1081 break;
1082 }
1083 }
1086 return trigger; 1084 return trigger;
1087} 1085}
1088 1086
@@ -1150,8 +1148,8 @@ static inline int IO_APIC_irq_trigger(int irq)
1150 1148
1151 for (apic = 0; apic < nr_ioapics; apic++) { 1149 for (apic = 0; apic < nr_ioapics; apic++) {
1152 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1150 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1153 idx = find_irq_entry(apic,pin,mp_INT); 1151 idx = find_irq_entry(apic, pin, mp_INT);
1154 if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin))) 1152 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1155 return irq_trigger(idx); 1153 return irq_trigger(idx);
1156 } 1154 }
1157 } 1155 }
@@ -1166,7 +1164,7 @@ static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }
1166 1164
1167static int __assign_irq_vector(int irq) 1165static int __assign_irq_vector(int irq)
1168{ 1166{
1169 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1167 static int current_vector = FIRST_DEVICE_VECTOR, current_offset;
1170 int vector, offset; 1168 int vector, offset;
1171 1169
1172 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); 1170 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
@@ -1239,15 +1237,15 @@ static void __init setup_IO_APIC_irqs(void)
1239 /* 1237 /*
1240 * add it to the IO-APIC irq-routing table: 1238 * add it to the IO-APIC irq-routing table:
1241 */ 1239 */
1242 memset(&entry,0,sizeof(entry)); 1240 memset(&entry, 0, sizeof(entry));
1243 1241
1244 entry.delivery_mode = INT_DELIVERY_MODE; 1242 entry.delivery_mode = INT_DELIVERY_MODE;
1245 entry.dest_mode = INT_DEST_MODE; 1243 entry.dest_mode = INT_DEST_MODE;
1246 entry.mask = 0; /* enable IRQ */ 1244 entry.mask = 0; /* enable IRQ */
1247 entry.dest.logical.logical_dest = 1245 entry.dest.logical.logical_dest =
1248 cpu_mask_to_apicid(TARGET_CPUS); 1246 cpu_mask_to_apicid(TARGET_CPUS);
1249 1247
1250 idx = find_irq_entry(apic,pin,mp_INT); 1248 idx = find_irq_entry(apic, pin, mp_INT);
1251 if (idx == -1) { 1249 if (idx == -1) {
1252 if (first_notcon) { 1250 if (first_notcon) {
1253 apic_printk(APIC_VERBOSE, KERN_DEBUG 1251 apic_printk(APIC_VERBOSE, KERN_DEBUG
@@ -1291,7 +1289,7 @@ static void __init setup_IO_APIC_irqs(void)
1291 vector = assign_irq_vector(irq); 1289 vector = assign_irq_vector(irq);
1292 entry.vector = vector; 1290 entry.vector = vector;
1293 ioapic_register_intr(irq, vector, IOAPIC_AUTO); 1291 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
1294 1292
1295 if (!apic && (irq < 16)) 1293 if (!apic && (irq < 16))
1296 disable_8259A_irq(irq); 1294 disable_8259A_irq(irq);
1297 } 1295 }
@@ -1311,7 +1309,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1311{ 1309{
1312 struct IO_APIC_route_entry entry; 1310 struct IO_APIC_route_entry entry;
1313 1311
1314 memset(&entry,0,sizeof(entry)); 1312 memset(&entry, 0, sizeof(entry));
1315 1313
1316 /* 1314 /*
1317 * We use logical delivery to get the timer IRQ 1315 * We use logical delivery to get the timer IRQ
@@ -1349,7 +1347,7 @@ void __init print_IO_APIC(void)
1349 if (apic_verbosity == APIC_QUIET) 1347 if (apic_verbosity == APIC_QUIET)
1350 return; 1348 return;
1351 1349
1352 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1350 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1353 for (i = 0; i < nr_ioapics; i++) 1351 for (i = 0; i < nr_ioapics; i++)
1354 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1352 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1355 mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]); 1353 mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
@@ -1454,7 +1452,7 @@ void __init print_IO_APIC(void)
1454 1452
1455#if 0 1453#if 0
1456 1454
1457static void print_APIC_bitfield (int base) 1455static void print_APIC_bitfield(int base)
1458{ 1456{
1459 unsigned int v; 1457 unsigned int v;
1460 int i, j; 1458 int i, j;
@@ -1475,7 +1473,7 @@ static void print_APIC_bitfield (int base)
1475 } 1473 }
1476} 1474}
1477 1475
1478void /*__init*/ print_local_APIC(void * dummy) 1476void /*__init*/ print_local_APIC(void *dummy)
1479{ 1477{
1480 unsigned int v, ver, maxlvt; 1478 unsigned int v, ver, maxlvt;
1481 1479
@@ -1558,7 +1556,7 @@ void /*__init*/ print_local_APIC(void * dummy)
1558 printk("\n"); 1556 printk("\n");
1559} 1557}
1560 1558
1561void print_all_local_APICs (void) 1559void print_all_local_APICs(void)
1562{ 1560{
1563 on_each_cpu(print_local_APIC, NULL, 1, 1); 1561 on_each_cpu(print_local_APIC, NULL, 1, 1);
1564} 1562}
@@ -1581,11 +1579,11 @@ void /*__init*/ print_PIC(void)
1581 v = inb(0xa0) << 8 | inb(0x20); 1579 v = inb(0xa0) << 8 | inb(0x20);
1582 printk(KERN_DEBUG "... PIC IRR: %04x\n", v); 1580 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1583 1581
1584 outb(0x0b,0xa0); 1582 outb(0x0b, 0xa0);
1585 outb(0x0b,0x20); 1583 outb(0x0b, 0x20);
1586 v = inb(0xa0) << 8 | inb(0x20); 1584 v = inb(0xa0) << 8 | inb(0x20);
1587 outb(0x0a,0xa0); 1585 outb(0x0a, 0xa0);
1588 outb(0x0a,0x20); 1586 outb(0x0a, 0x20);
1589 1587
1590 spin_unlock_irqrestore(&i8259A_lock, flags); 1588 spin_unlock_irqrestore(&i8259A_lock, flags);
1591 1589
@@ -1621,7 +1619,7 @@ static void __init enable_IO_APIC(void)
1621 spin_unlock_irqrestore(&ioapic_lock, flags); 1619 spin_unlock_irqrestore(&ioapic_lock, flags);
1622 nr_ioapic_registers[apic] = reg_01.bits.entries+1; 1620 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1623 } 1621 }
1624 for(apic = 0; apic < nr_ioapics; apic++) { 1622 for (apic = 0; apic < nr_ioapics; apic++) {
1625 int pin; 1623 int pin;
1626 /* See if any of the pins is in ExtINT mode */ 1624 /* See if any of the pins is in ExtINT mode */
1627 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1625 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
@@ -1743,7 +1741,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
1743 spin_lock_irqsave(&ioapic_lock, flags); 1741 spin_lock_irqsave(&ioapic_lock, flags);
1744 reg_00.raw = io_apic_read(apic, 0); 1742 reg_00.raw = io_apic_read(apic, 0);
1745 spin_unlock_irqrestore(&ioapic_lock, flags); 1743 spin_unlock_irqrestore(&ioapic_lock, flags);
1746 1744
1747 old_id = mp_ioapics[apic].mpc_apicid; 1745 old_id = mp_ioapics[apic].mpc_apicid;
1748 1746
1749 if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) { 1747 if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
@@ -1795,7 +1793,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
1795 /* 1793 /*
1796 * Read the right value from the MPC table and 1794 * Read the right value from the MPC table and
1797 * write it into the ID register. 1795 * write it into the ID register.
1798 */ 1796 */
1799 apic_printk(APIC_VERBOSE, KERN_INFO 1797 apic_printk(APIC_VERBOSE, KERN_INFO
1800 "...changing IO-APIC physical APIC ID to %d ...", 1798 "...changing IO-APIC physical APIC ID to %d ...",
1801 mp_ioapics[apic].mpc_apicid); 1799 mp_ioapics[apic].mpc_apicid);
@@ -2015,7 +2013,7 @@ static void ack_apic(unsigned int irq)
2015 ack_APIC_irq(); 2013 ack_APIC_irq();
2016} 2014}
2017 2015
2018static void mask_lapic_irq (unsigned int irq) 2016static void mask_lapic_irq(unsigned int irq)
2019{ 2017{
2020 unsigned long v; 2018 unsigned long v;
2021 2019
@@ -2023,7 +2021,7 @@ static void mask_lapic_irq (unsigned int irq)
2023 apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED); 2021 apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
2024} 2022}
2025 2023
2026static void unmask_lapic_irq (unsigned int irq) 2024static void unmask_lapic_irq(unsigned int irq)
2027{ 2025{
2028 unsigned long v; 2026 unsigned long v;
2029 2027
@@ -2041,14 +2039,14 @@ static struct irq_chip lapic_chip __read_mostly = {
2041static void __init setup_nmi(void) 2039static void __init setup_nmi(void)
2042{ 2040{
2043 /* 2041 /*
2044 * Dirty trick to enable the NMI watchdog ... 2042 * Dirty trick to enable the NMI watchdog ...
2045 * We put the 8259A master into AEOI mode and 2043 * We put the 8259A master into AEOI mode and
2046 * unmask on all local APICs LVT0 as NMI. 2044 * unmask on all local APICs LVT0 as NMI.
2047 * 2045 *
2048 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') 2046 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2049 * is from Maciej W. Rozycki - so we do not have to EOI from 2047 * is from Maciej W. Rozycki - so we do not have to EOI from
2050 * the NMI handler or the timer interrupt. 2048 * the NMI handler or the timer interrupt.
2051 */ 2049 */
2052 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ..."); 2050 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2053 2051
2054 enable_NMI_through_LVT0(); 2052 enable_NMI_through_LVT0();
@@ -2312,10 +2310,10 @@ void __init setup_IO_APIC(void)
2312 * Called after all the initialization is done. If we didnt find any 2310 * Called after all the initialization is done. If we didnt find any
2313 * APIC bugs then we can allow the modify fast path 2311 * APIC bugs then we can allow the modify fast path
2314 */ 2312 */
2315 2313
2316static int __init io_apic_bug_finalize(void) 2314static int __init io_apic_bug_finalize(void)
2317{ 2315{
2318 if(sis_apic_bug == -1) 2316 if (sis_apic_bug == -1)
2319 sis_apic_bug = 0; 2317 sis_apic_bug = 0;
2320 return 0; 2318 return 0;
2321} 2319}
@@ -2326,17 +2324,17 @@ struct sysfs_ioapic_data {
2326 struct sys_device dev; 2324 struct sys_device dev;
2327 struct IO_APIC_route_entry entry[0]; 2325 struct IO_APIC_route_entry entry[0];
2328}; 2326};
2329static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS]; 2327static struct sysfs_ioapic_data *mp_ioapic_data[MAX_IO_APICS];
2330 2328
2331static int ioapic_suspend(struct sys_device *dev, pm_message_t state) 2329static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2332{ 2330{
2333 struct IO_APIC_route_entry *entry; 2331 struct IO_APIC_route_entry *entry;
2334 struct sysfs_ioapic_data *data; 2332 struct sysfs_ioapic_data *data;
2335 int i; 2333 int i;
2336 2334
2337 data = container_of(dev, struct sysfs_ioapic_data, dev); 2335 data = container_of(dev, struct sysfs_ioapic_data, dev);
2338 entry = data->entry; 2336 entry = data->entry;
2339 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++) 2337 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2340 entry[i] = ioapic_read_entry(dev->id, i); 2338 entry[i] = ioapic_read_entry(dev->id, i);
2341 2339
2342 return 0; 2340 return 0;
@@ -2349,7 +2347,7 @@ static int ioapic_resume(struct sys_device *dev)
2349 unsigned long flags; 2347 unsigned long flags;
2350 union IO_APIC_reg_00 reg_00; 2348 union IO_APIC_reg_00 reg_00;
2351 int i; 2349 int i;
2352 2350
2353 data = container_of(dev, struct sysfs_ioapic_data, dev); 2351 data = container_of(dev, struct sysfs_ioapic_data, dev);
2354 entry = data->entry; 2352 entry = data->entry;
2355 2353
@@ -2360,7 +2358,7 @@ static int ioapic_resume(struct sys_device *dev)
2360 io_apic_write(dev->id, 0, reg_00.raw); 2358 io_apic_write(dev->id, 0, reg_00.raw);
2361 } 2359 }
2362 spin_unlock_irqrestore(&ioapic_lock, flags); 2360 spin_unlock_irqrestore(&ioapic_lock, flags);
2363 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++) 2361 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2364 ioapic_write_entry(dev->id, i, entry[i]); 2362 ioapic_write_entry(dev->id, i, entry[i]);
2365 2363
2366 return 0; 2364 return 0;
@@ -2374,15 +2372,15 @@ static struct sysdev_class ioapic_sysdev_class = {
2374 2372
2375static int __init ioapic_init_sysfs(void) 2373static int __init ioapic_init_sysfs(void)
2376{ 2374{
2377 struct sys_device * dev; 2375 struct sys_device *dev;
2378 int i, size, error = 0; 2376 int i, size, error = 0;
2379 2377
2380 error = sysdev_class_register(&ioapic_sysdev_class); 2378 error = sysdev_class_register(&ioapic_sysdev_class);
2381 if (error) 2379 if (error)
2382 return error; 2380 return error;
2383 2381
2384 for (i = 0; i < nr_ioapics; i++ ) { 2382 for (i = 0; i < nr_ioapics; i++) {
2385 size = sizeof(struct sys_device) + nr_ioapic_registers[i] 2383 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2386 * sizeof(struct IO_APIC_route_entry); 2384 * sizeof(struct IO_APIC_route_entry);
2387 mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL); 2385 mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
2388 if (!mp_ioapic_data[i]) { 2386 if (!mp_ioapic_data[i]) {
@@ -2391,7 +2389,7 @@ static int __init ioapic_init_sysfs(void)
2391 } 2389 }
2392 memset(mp_ioapic_data[i], 0, size); 2390 memset(mp_ioapic_data[i], 0, size);
2393 dev = &mp_ioapic_data[i]->dev; 2391 dev = &mp_ioapic_data[i]->dev;
2394 dev->id = i; 2392 dev->id = i;
2395 dev->cls = &ioapic_sysdev_class; 2393 dev->cls = &ioapic_sysdev_class;
2396 error = sysdev_register(dev); 2394 error = sysdev_register(dev);
2397 if (error) { 2395 if (error) {
@@ -2466,7 +2464,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
2466 msg->address_lo = 2464 msg->address_lo =
2467 MSI_ADDR_BASE_LO | 2465 MSI_ADDR_BASE_LO |
2468 ((INT_DEST_MODE == 0) ? 2466 ((INT_DEST_MODE == 0) ?
2469 MSI_ADDR_DEST_MODE_PHYSICAL: 2467MSI_ADDR_DEST_MODE_PHYSICAL:
2470 MSI_ADDR_DEST_MODE_LOGICAL) | 2468 MSI_ADDR_DEST_MODE_LOGICAL) |
2471 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 2469 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2472 MSI_ADDR_REDIRECTION_CPU: 2470 MSI_ADDR_REDIRECTION_CPU:
@@ -2477,7 +2475,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
2477 MSI_DATA_TRIGGER_EDGE | 2475 MSI_DATA_TRIGGER_EDGE |
2478 MSI_DATA_LEVEL_ASSERT | 2476 MSI_DATA_LEVEL_ASSERT |
2479 ((INT_DELIVERY_MODE != dest_LowestPrio) ? 2477 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2480 MSI_DATA_DELIVERY_FIXED: 2478MSI_DATA_DELIVERY_FIXED:
2481 MSI_DATA_DELIVERY_LOWPRI) | 2479 MSI_DATA_DELIVERY_LOWPRI) |
2482 MSI_DATA_VECTOR(vector); 2480 MSI_DATA_VECTOR(vector);
2483 } 2481 }
@@ -2648,12 +2646,12 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2648#endif /* CONFIG_HT_IRQ */ 2646#endif /* CONFIG_HT_IRQ */
2649 2647
2650/* -------------------------------------------------------------------------- 2648/* --------------------------------------------------------------------------
2651 ACPI-based IOAPIC Configuration 2649 ACPI-based IOAPIC Configuration
2652 -------------------------------------------------------------------------- */ 2650 -------------------------------------------------------------------------- */
2653 2651
2654#ifdef CONFIG_ACPI 2652#ifdef CONFIG_ACPI
2655 2653
2656int __init io_apic_get_unique_id (int ioapic, int apic_id) 2654int __init io_apic_get_unique_id(int ioapic, int apic_id)
2657{ 2655{
2658 union IO_APIC_reg_00 reg_00; 2656 union IO_APIC_reg_00 reg_00;
2659 static physid_mask_t apic_id_map = PHYSID_MASK_NONE; 2657 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
@@ -2662,10 +2660,10 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id)
2662 int i = 0; 2660 int i = 0;
2663 2661
2664 /* 2662 /*
2665 * The P4 platform supports up to 256 APIC IDs on two separate APIC 2663 * The P4 platform supports up to 256 APIC IDs on two separate APIC
2666 * buses (one for LAPICs, one for IOAPICs), where predecessors only 2664 * buses (one for LAPICs, one for IOAPICs), where predecessors only
2667 * supports up to 16 on one shared APIC bus. 2665 * supports up to 16 on one shared APIC bus.
2668 * 2666 *
2669 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full 2667 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
2670 * advantage of new APIC bus architecture. 2668 * advantage of new APIC bus architecture.
2671 */ 2669 */
@@ -2684,7 +2682,7 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id)
2684 } 2682 }
2685 2683
2686 /* 2684 /*
2687 * Every APIC in a system must have a unique ID or we get lots of nice 2685 * Every APIC in a system must have a unique ID or we get lots of nice
2688 * 'stuck on smp_invalidate_needed IPI wait' messages. 2686 * 'stuck on smp_invalidate_needed IPI wait' messages.
2689 */ 2687 */
2690 if (check_apicid_used(apic_id_map, apic_id)) { 2688 if (check_apicid_used(apic_id_map, apic_id)) {
@@ -2701,7 +2699,7 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id)
2701 "trying %d\n", ioapic, apic_id, i); 2699 "trying %d\n", ioapic, apic_id, i);
2702 2700
2703 apic_id = i; 2701 apic_id = i;
2704 } 2702 }
2705 2703
2706 tmp = apicid_to_cpu_present(apic_id); 2704 tmp = apicid_to_cpu_present(apic_id);
2707 physids_or(apic_id_map, apic_id_map, tmp); 2705 physids_or(apic_id_map, apic_id_map, tmp);
@@ -2728,7 +2726,7 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id)
2728} 2726}
2729 2727
2730 2728
2731int __init io_apic_get_version (int ioapic) 2729int __init io_apic_get_version(int ioapic)
2732{ 2730{
2733 union IO_APIC_reg_01 reg_01; 2731 union IO_APIC_reg_01 reg_01;
2734 unsigned long flags; 2732 unsigned long flags;
@@ -2741,7 +2739,7 @@ int __init io_apic_get_version (int ioapic)
2741} 2739}
2742 2740
2743 2741
2744int __init io_apic_get_redir_entries (int ioapic) 2742int __init io_apic_get_redir_entries(int ioapic)
2745{ 2743{
2746 union IO_APIC_reg_01 reg_01; 2744 union IO_APIC_reg_01 reg_01;
2747 unsigned long flags; 2745 unsigned long flags;
@@ -2754,7 +2752,7 @@ int __init io_apic_get_redir_entries (int ioapic)
2754} 2752}
2755 2753
2756 2754
2757int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low) 2755int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, int active_high_low)
2758{ 2756{
2759 struct IO_APIC_route_entry entry; 2757 struct IO_APIC_route_entry entry;
2760 2758
@@ -2770,7 +2768,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
2770 * corresponding device driver registers for this IRQ. 2768 * corresponding device driver registers for this IRQ.
2771 */ 2769 */
2772 2770
2773 memset(&entry,0,sizeof(entry)); 2771 memset(&entry, 0, sizeof(entry));
2774 2772
2775 entry.delivery_mode = INT_DELIVERY_MODE; 2773 entry.delivery_mode = INT_DELIVERY_MODE;
2776 entry.dest_mode = INT_DEST_MODE; 2774 entry.dest_mode = INT_DEST_MODE;