aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apic
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/apic')
-rw-r--r--arch/x86/kernel/apic/apic.c20
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c5
-rw-r--r--arch/x86/kernel/apic/io_apic.c103
-rw-r--r--arch/x86/kernel/apic/probe_32.c29
-rw-r--r--arch/x86/kernel/apic/probe_64.c13
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c38
6 files changed, 122 insertions, 86 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index aa57c079c98f..dfca210f6a10 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -61,12 +61,6 @@ unsigned int boot_cpu_physical_apicid = -1U;
61 61
62/* 62/*
63 * The highest APIC ID seen during enumeration. 63 * The highest APIC ID seen during enumeration.
64 *
65 * On AMD, this determines the messaging protocol we can use: if all APIC IDs
66 * are in the 0 ... 7 range, then we can use logical addressing which
67 * has some performance advantages (better broadcasting).
68 *
69 * If there's an APIC ID above 8, we use physical addressing.
70 */ 64 */
71unsigned int max_physical_apicid; 65unsigned int max_physical_apicid;
72 66
@@ -1647,9 +1641,7 @@ int __init APIC_init_uniprocessor(void)
1647#endif 1641#endif
1648 1642
1649 enable_IR_x2apic(); 1643 enable_IR_x2apic();
1650#ifdef CONFIG_X86_64
1651 default_setup_apic_routing(); 1644 default_setup_apic_routing();
1652#endif
1653 1645
1654 verify_local_APIC(); 1646 verify_local_APIC();
1655 connect_bsp_APIC(); 1647 connect_bsp_APIC();
@@ -1897,18 +1889,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
1897 if (apicid > max_physical_apicid) 1889 if (apicid > max_physical_apicid)
1898 max_physical_apicid = apicid; 1890 max_physical_apicid = apicid;
1899 1891
1900#ifdef CONFIG_X86_32
1901 switch (boot_cpu_data.x86_vendor) {
1902 case X86_VENDOR_INTEL:
1903 if (num_processors > 8)
1904 def_to_bigsmp = 1;
1905 break;
1906 case X86_VENDOR_AMD:
1907 if (max_physical_apicid >= 8)
1908 def_to_bigsmp = 1;
1909 }
1910#endif
1911
1912#if defined(CONFIG_SMP) || defined(CONFIG_X86_64) 1892#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
1913 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; 1893 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1914 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; 1894 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index eacbd2b31d27..e3c3d820c325 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -240,6 +240,11 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
240 printk(KERN_DEBUG "system APIC only can use physical flat"); 240 printk(KERN_DEBUG "system APIC only can use physical flat");
241 return 1; 241 return 1;
242 } 242 }
243
244 if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) {
245 printk(KERN_DEBUG "IBM Summit detected, will use apic physical");
246 return 1;
247 }
243#endif 248#endif
244 249
245 return 0; 250 return 0;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d55e43d352b3..979589881c80 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1541,6 +1541,56 @@ static void __init setup_IO_APIC_irqs(void)
1541} 1541}
1542 1542
1543/* 1543/*
1544 * for the gsit that is not in first ioapic
1545 * but could not use acpi_register_gsi()
1546 * like some special sci in IBM x3330
1547 */
1548void setup_IO_APIC_irq_extra(u32 gsi)
1549{
1550 int apic_id = 0, pin, idx, irq;
1551 int node = cpu_to_node(boot_cpu_id);
1552 struct irq_desc *desc;
1553 struct irq_cfg *cfg;
1554
1555 /*
1556 * Convert 'gsi' to 'ioapic.pin'.
1557 */
1558 apic_id = mp_find_ioapic(gsi);
1559 if (apic_id < 0)
1560 return;
1561
1562 pin = mp_find_ioapic_pin(apic_id, gsi);
1563 idx = find_irq_entry(apic_id, pin, mp_INT);
1564 if (idx == -1)
1565 return;
1566
1567 irq = pin_2_irq(idx, apic_id, pin);
1568#ifdef CONFIG_SPARSE_IRQ
1569 desc = irq_to_desc(irq);
1570 if (desc)
1571 return;
1572#endif
1573 desc = irq_to_desc_alloc_node(irq, node);
1574 if (!desc) {
1575 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1576 return;
1577 }
1578
1579 cfg = desc->chip_data;
1580 add_pin_to_irq_node(cfg, node, apic_id, pin);
1581
1582 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
1583 pr_debug("Pin %d-%d already programmed\n",
1584 mp_ioapics[apic_id].apicid, pin);
1585 return;
1586 }
1587 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
1588
1589 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1590 irq_trigger(idx), irq_polarity(idx));
1591}
1592
1593/*
1544 * Set up the timer pin, possibly with the 8259A-master behind. 1594 * Set up the timer pin, possibly with the 8259A-master behind.
1545 */ 1595 */
1546static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, 1596static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
@@ -1832,7 +1882,7 @@ __apicdebuginit(void) print_PIC(void)
1832 1882
1833 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1883 printk(KERN_DEBUG "\nprinting PIC contents\n");
1834 1884
1835 spin_lock_irqsave(&i8259A_lock, flags); 1885 raw_spin_lock_irqsave(&i8259A_lock, flags);
1836 1886
1837 v = inb(0xa1) << 8 | inb(0x21); 1887 v = inb(0xa1) << 8 | inb(0x21);
1838 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1888 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
@@ -1846,7 +1896,7 @@ __apicdebuginit(void) print_PIC(void)
1846 outb(0x0a,0xa0); 1896 outb(0x0a,0xa0);
1847 outb(0x0a,0x20); 1897 outb(0x0a,0x20);
1848 1898
1849 spin_unlock_irqrestore(&i8259A_lock, flags); 1899 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1850 1900
1851 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1901 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1852 1902
@@ -2436,6 +2486,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2436 cfg = irq_cfg(irq); 2486 cfg = irq_cfg(irq);
2437 raw_spin_lock(&desc->lock); 2487 raw_spin_lock(&desc->lock);
2438 2488
2489 /*
2490 * Check if the irq migration is in progress. If so, we
2491 * haven't received the cleanup request yet for this irq.
2492 */
2493 if (cfg->move_in_progress)
2494 goto unlock;
2495
2439 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2496 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2440 goto unlock; 2497 goto unlock;
2441 2498
@@ -3223,12 +3280,9 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3223 } 3280 }
3224 raw_spin_unlock_irqrestore(&vector_lock, flags); 3281 raw_spin_unlock_irqrestore(&vector_lock, flags);
3225 3282
3226 if (irq > 0) { 3283 if (irq > 0)
3227 dynamic_irq_init(irq); 3284 dynamic_irq_init_keep_chip_data(irq);
3228 /* restore it, in case dynamic_irq_init clear it */ 3285
3229 if (desc_new)
3230 desc_new->chip_data = cfg_new;
3231 }
3232 return irq; 3286 return irq;
3233} 3287}
3234 3288
@@ -3250,19 +3304,12 @@ int create_irq(void)
3250void destroy_irq(unsigned int irq) 3304void destroy_irq(unsigned int irq)
3251{ 3305{
3252 unsigned long flags; 3306 unsigned long flags;
3253 struct irq_cfg *cfg;
3254 struct irq_desc *desc;
3255 3307
3256 /* store it, in case dynamic_irq_cleanup clear it */ 3308 dynamic_irq_cleanup_keep_chip_data(irq);
3257 desc = irq_to_desc(irq);
3258 cfg = desc->chip_data;
3259 dynamic_irq_cleanup(irq);
3260 /* connect back irq_cfg */
3261 desc->chip_data = cfg;
3262 3309
3263 free_irte(irq); 3310 free_irte(irq);
3264 raw_spin_lock_irqsave(&vector_lock, flags); 3311 raw_spin_lock_irqsave(&vector_lock, flags);
3265 __clear_irq_vector(irq, cfg); 3312 __clear_irq_vector(irq, get_irq_chip_data(irq));
3266 raw_spin_unlock_irqrestore(&vector_lock, flags); 3313 raw_spin_unlock_irqrestore(&vector_lock, flags);
3267} 3314}
3268 3315
@@ -3829,28 +3876,6 @@ void __init probe_nr_irqs_gsi(void)
3829 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3876 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3830} 3877}
3831 3878
3832#ifdef CONFIG_SPARSE_IRQ
3833int __init arch_probe_nr_irqs(void)
3834{
3835 int nr;
3836
3837 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3838 nr_irqs = NR_VECTORS * nr_cpu_ids;
3839
3840 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3841#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3842 /*
3843 * for MSI and HT dyn irq
3844 */
3845 nr += nr_irqs_gsi * 64;
3846#endif
3847 if (nr < nr_irqs)
3848 nr_irqs = nr;
3849
3850 return 0;
3851}
3852#endif
3853
3854static int __io_apic_set_pci_routing(struct device *dev, int irq, 3879static int __io_apic_set_pci_routing(struct device *dev, int irq,
3855 struct io_apic_irq_attr *irq_attr) 3880 struct io_apic_irq_attr *irq_attr)
3856{ 3881{
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 1a6559f6768c..99d2fe016084 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -52,7 +52,32 @@ static int __init print_ipi_mode(void)
52} 52}
53late_initcall(print_ipi_mode); 53late_initcall(print_ipi_mode);
54 54
55void default_setup_apic_routing(void) 55void __init default_setup_apic_routing(void)
56{
57 int version = apic_version[boot_cpu_physical_apicid];
58
59 if (num_possible_cpus() > 8) {
60 switch (boot_cpu_data.x86_vendor) {
61 case X86_VENDOR_INTEL:
62 if (!APIC_XAPIC(version)) {
63 def_to_bigsmp = 0;
64 break;
65 }
66 /* If P4 and above fall through */
67 case X86_VENDOR_AMD:
68 def_to_bigsmp = 1;
69 }
70 }
71
72#ifdef CONFIG_X86_BIGSMP
73 generic_bigsmp_probe();
74#endif
75
76 if (apic->setup_apic_routing)
77 apic->setup_apic_routing();
78}
79
80static void setup_apic_flat_routing(void)
56{ 81{
57#ifdef CONFIG_X86_IO_APIC 82#ifdef CONFIG_X86_IO_APIC
58 printk(KERN_INFO 83 printk(KERN_INFO
@@ -103,7 +128,7 @@ struct apic apic_default = {
103 .init_apic_ldr = default_init_apic_ldr, 128 .init_apic_ldr = default_init_apic_ldr,
104 129
105 .ioapic_phys_id_map = default_ioapic_phys_id_map, 130 .ioapic_phys_id_map = default_ioapic_phys_id_map,
106 .setup_apic_routing = default_setup_apic_routing, 131 .setup_apic_routing = setup_apic_flat_routing,
107 .multi_timer_check = NULL, 132 .multi_timer_check = NULL,
108 .apicid_to_node = default_apicid_to_node, 133 .apicid_to_node = default_apicid_to_node,
109 .cpu_to_logical_apicid = default_cpu_to_logical_apicid, 134 .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index c4cbd3080c1c..83e9be4778e2 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -67,17 +67,8 @@ void __init default_setup_apic_routing(void)
67 } 67 }
68#endif 68#endif
69 69
70 if (apic == &apic_flat) { 70 if (apic == &apic_flat && num_possible_cpus() > 8)
71 switch (boot_cpu_data.x86_vendor) { 71 apic = &apic_physflat;
72 case X86_VENDOR_INTEL:
73 if (num_processors > 8)
74 apic = &apic_physflat;
75 break;
76 case X86_VENDOR_AMD:
77 if (max_physical_apicid >= 8)
78 apic = &apic_physflat;
79 }
80 }
81 72
82 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); 73 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
83 74
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index d56b0efb2057..21db3cbea7dc 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -36,6 +36,8 @@ DEFINE_PER_CPU(int, x2apic_extra_bits);
36 36
37static enum uv_system_type uv_system_type; 37static enum uv_system_type uv_system_type;
38static u64 gru_start_paddr, gru_end_paddr; 38static u64 gru_start_paddr, gru_end_paddr;
39int uv_min_hub_revision_id;
40EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
39 41
40static inline bool is_GRU_range(u64 start, u64 end) 42static inline bool is_GRU_range(u64 start, u64 end)
41{ 43{
@@ -55,12 +57,19 @@ static int early_get_nodeid(void)
55 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr)); 57 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr));
56 node_id.v = *mmr; 58 node_id.v = *mmr;
57 early_iounmap(mmr, sizeof(*mmr)); 59 early_iounmap(mmr, sizeof(*mmr));
60
61 /* Currently, all blades have same revision number */
62 uv_min_hub_revision_id = node_id.s.revision;
63
58 return node_id.s.node_id; 64 return node_id.s.node_id;
59} 65}
60 66
61static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 67static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
62{ 68{
69 int nodeid;
70
63 if (!strcmp(oem_id, "SGI")) { 71 if (!strcmp(oem_id, "SGI")) {
72 nodeid = early_get_nodeid();
64 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; 73 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
65 if (!strcmp(oem_table_id, "UVL")) 74 if (!strcmp(oem_table_id, "UVL"))
66 uv_system_type = UV_LEGACY_APIC; 75 uv_system_type = UV_LEGACY_APIC;
@@ -68,7 +77,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
68 uv_system_type = UV_X2APIC; 77 uv_system_type = UV_X2APIC;
69 else if (!strcmp(oem_table_id, "UVH")) { 78 else if (!strcmp(oem_table_id, "UVH")) {
70 __get_cpu_var(x2apic_extra_bits) = 79 __get_cpu_var(x2apic_extra_bits) =
71 early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1); 80 nodeid << (UV_APIC_PNODE_SHIFT - 1);
72 uv_system_type = UV_NON_UNIQUE_APIC; 81 uv_system_type = UV_NON_UNIQUE_APIC;
73 return 1; 82 return 1;
74 } 83 }
@@ -374,13 +383,13 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
374 383
375enum map_type {map_wb, map_uc}; 384enum map_type {map_wb, map_uc};
376 385
377static __init void map_high(char *id, unsigned long base, int shift, 386static __init void map_high(char *id, unsigned long base, int pshift,
378 int max_pnode, enum map_type map_type) 387 int bshift, int max_pnode, enum map_type map_type)
379{ 388{
380 unsigned long bytes, paddr; 389 unsigned long bytes, paddr;
381 390
382 paddr = base << shift; 391 paddr = base << pshift;
383 bytes = (1UL << shift) * (max_pnode + 1); 392 bytes = (1UL << bshift) * (max_pnode + 1);
384 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 393 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
385 paddr + bytes); 394 paddr + bytes);
386 if (map_type == map_uc) 395 if (map_type == map_uc)
@@ -396,7 +405,7 @@ static __init void map_gru_high(int max_pnode)
396 405
397 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); 406 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
398 if (gru.s.enable) { 407 if (gru.s.enable) {
399 map_high("GRU", gru.s.base, shift, max_pnode, map_wb); 408 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
400 gru_start_paddr = ((u64)gru.s.base << shift); 409 gru_start_paddr = ((u64)gru.s.base << shift);
401 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); 410 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
402 411
@@ -410,7 +419,7 @@ static __init void map_mmr_high(int max_pnode)
410 419
411 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); 420 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
412 if (mmr.s.enable) 421 if (mmr.s.enable)
413 map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); 422 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
414} 423}
415 424
416static __init void map_mmioh_high(int max_pnode) 425static __init void map_mmioh_high(int max_pnode)
@@ -420,7 +429,8 @@ static __init void map_mmioh_high(int max_pnode)
420 429
421 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); 430 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
422 if (mmioh.s.enable) 431 if (mmioh.s.enable)
423 map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); 432 map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io,
433 max_pnode, map_uc);
424} 434}
425 435
426static __init void map_low_mmrs(void) 436static __init void map_low_mmrs(void)
@@ -629,8 +639,10 @@ void __init uv_system_init(void)
629 uv_rtc_init(); 639 uv_rtc_init();
630 640
631 for_each_present_cpu(cpu) { 641 for_each_present_cpu(cpu) {
642 int apicid = per_cpu(x86_cpu_to_apicid, cpu);
643
632 nid = cpu_to_node(cpu); 644 nid = cpu_to_node(cpu);
633 pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); 645 pnode = uv_apicid_to_pnode(apicid);
634 blade = boot_pnode_to_blade(pnode); 646 blade = boot_pnode_to_blade(pnode);
635 lcpu = uv_blade_info[blade].nr_possible_cpus; 647 lcpu = uv_blade_info[blade].nr_possible_cpus;
636 uv_blade_info[blade].nr_possible_cpus++; 648 uv_blade_info[blade].nr_possible_cpus++;
@@ -651,15 +663,13 @@ void __init uv_system_init(void)
651 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra; 663 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
652 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 664 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
653 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; 665 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
654 uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; 666 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
655 uv_node_to_blade[nid] = blade; 667 uv_node_to_blade[nid] = blade;
656 uv_cpu_to_blade[cpu] = blade; 668 uv_cpu_to_blade[cpu] = blade;
657 max_pnode = max(pnode, max_pnode); 669 max_pnode = max(pnode, max_pnode);
658 670
659 printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, " 671 printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
660 "lcpu %d, blade %d\n", 672 cpu, apicid, pnode, nid, lcpu, blade);
661 cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
662 lcpu, blade);
663 } 673 }
664 674
665 /* Add blade/pnode info for nodes without cpus */ 675 /* Add blade/pnode info for nodes without cpus */