diff options
Diffstat (limited to 'arch/x86/kernel/apic')
| -rw-r--r-- | arch/x86/kernel/apic/apic.c | 20 | ||||
| -rw-r--r-- | arch/x86/kernel/apic/apic_flat_64.c | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 7 | ||||
| -rw-r--r-- | arch/x86/kernel/apic/probe_32.c | 29 | ||||
| -rw-r--r-- | arch/x86/kernel/apic/probe_64.c | 13 | ||||
| -rw-r--r-- | arch/x86/kernel/apic/x2apic_uv_x.c | 26 |
6 files changed, 59 insertions, 41 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index aa57c079c98f..dfca210f6a10 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
| @@ -61,12 +61,6 @@ unsigned int boot_cpu_physical_apicid = -1U; | |||
| 61 | 61 | ||
| 62 | /* | 62 | /* |
| 63 | * The highest APIC ID seen during enumeration. | 63 | * The highest APIC ID seen during enumeration. |
| 64 | * | ||
| 65 | * On AMD, this determines the messaging protocol we can use: if all APIC IDs | ||
| 66 | * are in the 0 ... 7 range, then we can use logical addressing which | ||
| 67 | * has some performance advantages (better broadcasting). | ||
| 68 | * | ||
| 69 | * If there's an APIC ID above 8, we use physical addressing. | ||
| 70 | */ | 64 | */ |
| 71 | unsigned int max_physical_apicid; | 65 | unsigned int max_physical_apicid; |
| 72 | 66 | ||
| @@ -1647,9 +1641,7 @@ int __init APIC_init_uniprocessor(void) | |||
| 1647 | #endif | 1641 | #endif |
| 1648 | 1642 | ||
| 1649 | enable_IR_x2apic(); | 1643 | enable_IR_x2apic(); |
| 1650 | #ifdef CONFIG_X86_64 | ||
| 1651 | default_setup_apic_routing(); | 1644 | default_setup_apic_routing(); |
| 1652 | #endif | ||
| 1653 | 1645 | ||
| 1654 | verify_local_APIC(); | 1646 | verify_local_APIC(); |
| 1655 | connect_bsp_APIC(); | 1647 | connect_bsp_APIC(); |
| @@ -1897,18 +1889,6 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
| 1897 | if (apicid > max_physical_apicid) | 1889 | if (apicid > max_physical_apicid) |
| 1898 | max_physical_apicid = apicid; | 1890 | max_physical_apicid = apicid; |
| 1899 | 1891 | ||
| 1900 | #ifdef CONFIG_X86_32 | ||
| 1901 | switch (boot_cpu_data.x86_vendor) { | ||
| 1902 | case X86_VENDOR_INTEL: | ||
| 1903 | if (num_processors > 8) | ||
| 1904 | def_to_bigsmp = 1; | ||
| 1905 | break; | ||
| 1906 | case X86_VENDOR_AMD: | ||
| 1907 | if (max_physical_apicid >= 8) | ||
| 1908 | def_to_bigsmp = 1; | ||
| 1909 | } | ||
| 1910 | #endif | ||
| 1911 | |||
| 1912 | #if defined(CONFIG_SMP) || defined(CONFIG_X86_64) | 1892 | #if defined(CONFIG_SMP) || defined(CONFIG_X86_64) |
| 1913 | early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; | 1893 | early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; |
| 1914 | early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; | 1894 | early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; |
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index eacbd2b31d27..e3c3d820c325 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c | |||
| @@ -240,6 +240,11 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
| 240 | printk(KERN_DEBUG "system APIC only can use physical flat"); | 240 | printk(KERN_DEBUG "system APIC only can use physical flat"); |
| 241 | return 1; | 241 | return 1; |
| 242 | } | 242 | } |
| 243 | |||
| 244 | if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) { | ||
| 245 | printk(KERN_DEBUG "IBM Summit detected, will use apic physical"); | ||
| 246 | return 1; | ||
| 247 | } | ||
| 243 | #endif | 248 | #endif |
| 244 | 249 | ||
| 245 | return 0; | 250 | return 0; |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index de00c4619a55..53243ca7816d 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
| @@ -2434,6 +2434,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
| 2434 | cfg = irq_cfg(irq); | 2434 | cfg = irq_cfg(irq); |
| 2435 | raw_spin_lock(&desc->lock); | 2435 | raw_spin_lock(&desc->lock); |
| 2436 | 2436 | ||
| 2437 | /* | ||
| 2438 | * Check if the irq migration is in progress. If so, we | ||
| 2439 | * haven't received the cleanup request yet for this irq. | ||
| 2440 | */ | ||
| 2441 | if (cfg->move_in_progress) | ||
| 2442 | goto unlock; | ||
| 2443 | |||
| 2437 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | 2444 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
| 2438 | goto unlock; | 2445 | goto unlock; |
| 2439 | 2446 | ||
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 1a6559f6768c..99d2fe016084 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c | |||
| @@ -52,7 +52,32 @@ static int __init print_ipi_mode(void) | |||
| 52 | } | 52 | } |
| 53 | late_initcall(print_ipi_mode); | 53 | late_initcall(print_ipi_mode); |
| 54 | 54 | ||
| 55 | void default_setup_apic_routing(void) | 55 | void __init default_setup_apic_routing(void) |
| 56 | { | ||
| 57 | int version = apic_version[boot_cpu_physical_apicid]; | ||
| 58 | |||
| 59 | if (num_possible_cpus() > 8) { | ||
| 60 | switch (boot_cpu_data.x86_vendor) { | ||
| 61 | case X86_VENDOR_INTEL: | ||
| 62 | if (!APIC_XAPIC(version)) { | ||
| 63 | def_to_bigsmp = 0; | ||
| 64 | break; | ||
| 65 | } | ||
| 66 | /* If P4 and above fall through */ | ||
| 67 | case X86_VENDOR_AMD: | ||
| 68 | def_to_bigsmp = 1; | ||
| 69 | } | ||
| 70 | } | ||
| 71 | |||
| 72 | #ifdef CONFIG_X86_BIGSMP | ||
| 73 | generic_bigsmp_probe(); | ||
| 74 | #endif | ||
| 75 | |||
| 76 | if (apic->setup_apic_routing) | ||
| 77 | apic->setup_apic_routing(); | ||
| 78 | } | ||
| 79 | |||
| 80 | static void setup_apic_flat_routing(void) | ||
| 56 | { | 81 | { |
| 57 | #ifdef CONFIG_X86_IO_APIC | 82 | #ifdef CONFIG_X86_IO_APIC |
| 58 | printk(KERN_INFO | 83 | printk(KERN_INFO |
| @@ -103,7 +128,7 @@ struct apic apic_default = { | |||
| 103 | .init_apic_ldr = default_init_apic_ldr, | 128 | .init_apic_ldr = default_init_apic_ldr, |
| 104 | 129 | ||
| 105 | .ioapic_phys_id_map = default_ioapic_phys_id_map, | 130 | .ioapic_phys_id_map = default_ioapic_phys_id_map, |
| 106 | .setup_apic_routing = default_setup_apic_routing, | 131 | .setup_apic_routing = setup_apic_flat_routing, |
| 107 | .multi_timer_check = NULL, | 132 | .multi_timer_check = NULL, |
| 108 | .apicid_to_node = default_apicid_to_node, | 133 | .apicid_to_node = default_apicid_to_node, |
| 109 | .cpu_to_logical_apicid = default_cpu_to_logical_apicid, | 134 | .cpu_to_logical_apicid = default_cpu_to_logical_apicid, |
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index c4cbd3080c1c..83e9be4778e2 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
| @@ -67,17 +67,8 @@ void __init default_setup_apic_routing(void) | |||
| 67 | } | 67 | } |
| 68 | #endif | 68 | #endif |
| 69 | 69 | ||
| 70 | if (apic == &apic_flat) { | 70 | if (apic == &apic_flat && num_possible_cpus() > 8) |
| 71 | switch (boot_cpu_data.x86_vendor) { | 71 | apic = &apic_physflat; |
| 72 | case X86_VENDOR_INTEL: | ||
| 73 | if (num_processors > 8) | ||
| 74 | apic = &apic_physflat; | ||
| 75 | break; | ||
| 76 | case X86_VENDOR_AMD: | ||
| 77 | if (max_physical_apicid >= 8) | ||
| 78 | apic = &apic_physflat; | ||
| 79 | } | ||
| 80 | } | ||
| 81 | 72 | ||
| 82 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); | 73 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); |
| 83 | 74 | ||
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 5f92494dab61..21db3cbea7dc 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
| @@ -36,6 +36,8 @@ DEFINE_PER_CPU(int, x2apic_extra_bits); | |||
| 36 | 36 | ||
| 37 | static enum uv_system_type uv_system_type; | 37 | static enum uv_system_type uv_system_type; |
| 38 | static u64 gru_start_paddr, gru_end_paddr; | 38 | static u64 gru_start_paddr, gru_end_paddr; |
| 39 | int uv_min_hub_revision_id; | ||
| 40 | EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); | ||
| 39 | 41 | ||
| 40 | static inline bool is_GRU_range(u64 start, u64 end) | 42 | static inline bool is_GRU_range(u64 start, u64 end) |
| 41 | { | 43 | { |
| @@ -55,12 +57,19 @@ static int early_get_nodeid(void) | |||
| 55 | mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr)); | 57 | mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr)); |
| 56 | node_id.v = *mmr; | 58 | node_id.v = *mmr; |
| 57 | early_iounmap(mmr, sizeof(*mmr)); | 59 | early_iounmap(mmr, sizeof(*mmr)); |
| 60 | |||
| 61 | /* Currently, all blades have same revision number */ | ||
| 62 | uv_min_hub_revision_id = node_id.s.revision; | ||
| 63 | |||
| 58 | return node_id.s.node_id; | 64 | return node_id.s.node_id; |
| 59 | } | 65 | } |
| 60 | 66 | ||
| 61 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 67 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
| 62 | { | 68 | { |
| 69 | int nodeid; | ||
| 70 | |||
| 63 | if (!strcmp(oem_id, "SGI")) { | 71 | if (!strcmp(oem_id, "SGI")) { |
| 72 | nodeid = early_get_nodeid(); | ||
| 64 | x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; | 73 | x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; |
| 65 | if (!strcmp(oem_table_id, "UVL")) | 74 | if (!strcmp(oem_table_id, "UVL")) |
| 66 | uv_system_type = UV_LEGACY_APIC; | 75 | uv_system_type = UV_LEGACY_APIC; |
| @@ -68,7 +77,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
| 68 | uv_system_type = UV_X2APIC; | 77 | uv_system_type = UV_X2APIC; |
| 69 | else if (!strcmp(oem_table_id, "UVH")) { | 78 | else if (!strcmp(oem_table_id, "UVH")) { |
| 70 | __get_cpu_var(x2apic_extra_bits) = | 79 | __get_cpu_var(x2apic_extra_bits) = |
| 71 | early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1); | 80 | nodeid << (UV_APIC_PNODE_SHIFT - 1); |
| 72 | uv_system_type = UV_NON_UNIQUE_APIC; | 81 | uv_system_type = UV_NON_UNIQUE_APIC; |
| 73 | return 1; | 82 | return 1; |
| 74 | } | 83 | } |
| @@ -374,13 +383,13 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) | |||
| 374 | 383 | ||
| 375 | enum map_type {map_wb, map_uc}; | 384 | enum map_type {map_wb, map_uc}; |
| 376 | 385 | ||
| 377 | static __init void map_high(char *id, unsigned long base, int shift, | 386 | static __init void map_high(char *id, unsigned long base, int pshift, |
| 378 | int max_pnode, enum map_type map_type) | 387 | int bshift, int max_pnode, enum map_type map_type) |
| 379 | { | 388 | { |
| 380 | unsigned long bytes, paddr; | 389 | unsigned long bytes, paddr; |
| 381 | 390 | ||
| 382 | paddr = base << shift; | 391 | paddr = base << pshift; |
| 383 | bytes = (1UL << shift) * (max_pnode + 1); | 392 | bytes = (1UL << bshift) * (max_pnode + 1); |
| 384 | printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, | 393 | printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, |
| 385 | paddr + bytes); | 394 | paddr + bytes); |
| 386 | if (map_type == map_uc) | 395 | if (map_type == map_uc) |
| @@ -396,7 +405,7 @@ static __init void map_gru_high(int max_pnode) | |||
| 396 | 405 | ||
| 397 | gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); | 406 | gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); |
| 398 | if (gru.s.enable) { | 407 | if (gru.s.enable) { |
| 399 | map_high("GRU", gru.s.base, shift, max_pnode, map_wb); | 408 | map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb); |
| 400 | gru_start_paddr = ((u64)gru.s.base << shift); | 409 | gru_start_paddr = ((u64)gru.s.base << shift); |
| 401 | gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); | 410 | gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); |
| 402 | 411 | ||
| @@ -410,7 +419,7 @@ static __init void map_mmr_high(int max_pnode) | |||
| 410 | 419 | ||
| 411 | mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); | 420 | mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); |
| 412 | if (mmr.s.enable) | 421 | if (mmr.s.enable) |
| 413 | map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); | 422 | map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc); |
| 414 | } | 423 | } |
| 415 | 424 | ||
| 416 | static __init void map_mmioh_high(int max_pnode) | 425 | static __init void map_mmioh_high(int max_pnode) |
| @@ -420,7 +429,8 @@ static __init void map_mmioh_high(int max_pnode) | |||
| 420 | 429 | ||
| 421 | mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); | 430 | mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); |
| 422 | if (mmioh.s.enable) | 431 | if (mmioh.s.enable) |
| 423 | map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); | 432 | map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io, |
| 433 | max_pnode, map_uc); | ||
| 424 | } | 434 | } |
| 425 | 435 | ||
| 426 | static __init void map_low_mmrs(void) | 436 | static __init void map_low_mmrs(void) |
