diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/apic.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 1 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/irqinit_32.c | 12 | ||||
-rw-r--r-- | arch/x86/mach-default/setup.c | 12 | ||||
-rw-r--r-- | arch/x86/mach-voyager/setup.c | 12 | ||||
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 25 |
8 files changed, 55 insertions, 29 deletions
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 4b6df2469fe3..115449f869ee 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
@@ -1436,7 +1436,7 @@ static int __init detect_init_APIC(void) | |||
1436 | switch (boot_cpu_data.x86_vendor) { | 1436 | switch (boot_cpu_data.x86_vendor) { |
1437 | case X86_VENDOR_AMD: | 1437 | case X86_VENDOR_AMD: |
1438 | if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || | 1438 | if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || |
1439 | (boot_cpu_data.x86 == 15)) | 1439 | (boot_cpu_data.x86 >= 15)) |
1440 | break; | 1440 | break; |
1441 | goto no_apic; | 1441 | goto no_apic; |
1442 | case X86_VENDOR_INTEL: | 1442 | case X86_VENDOR_INTEL: |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 48533d77be78..da299eb85fc0 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -36,8 +36,11 @@ static struct _cache_table cache_table[] __cpuinitdata = | |||
36 | { | 36 | { |
37 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | 37 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
38 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ | 38 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ |
39 | { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */ | ||
39 | { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ | 40 | { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ |
40 | { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ | 41 | { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ |
42 | { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ | ||
43 | { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ | ||
41 | { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ | 44 | { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ |
42 | { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 45 | { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
43 | { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ | 46 | { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ |
@@ -85,6 +88,18 @@ static struct _cache_table cache_table[] __cpuinitdata = | |||
85 | { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ | 88 | { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ |
86 | { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ | 89 | { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ |
87 | { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ | 90 | { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ |
91 | { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */ | ||
92 | { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */ | ||
93 | { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */ | ||
94 | { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */ | ||
95 | { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */ | ||
96 | { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ | ||
97 | { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */ | ||
98 | { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ | ||
99 | { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */ | ||
100 | { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */ | ||
101 | { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ | ||
102 | { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ | ||
88 | { 0x00, 0, 0} | 103 | { 0x00, 0, 0} |
89 | }; | 104 | }; |
90 | 105 | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e28c7a987793..a1346217e43c 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -346,6 +346,7 @@ ENTRY(save_args) | |||
346 | popq_cfi %rax /* move return address... */ | 346 | popq_cfi %rax /* move return address... */ |
347 | mov %gs:pda_irqstackptr,%rsp | 347 | mov %gs:pda_irqstackptr,%rsp |
348 | EMPTY_FRAME 0 | 348 | EMPTY_FRAME 0 |
349 | pushq_cfi %rbp /* backlink for unwinder */ | ||
349 | pushq_cfi %rax /* ... to the new stack */ | 350 | pushq_cfi %rax /* ... to the new stack */ |
350 | /* | 351 | /* |
351 | * We entered an interrupt context - irqs are off: | 352 | * We entered an interrupt context - irqs are off: |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 1c4a1302536c..9b0c480c383b 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -2528,14 +2528,15 @@ static void irq_complete_move(struct irq_desc **descp) | |||
2528 | 2528 | ||
2529 | vector = ~get_irq_regs()->orig_ax; | 2529 | vector = ~get_irq_regs()->orig_ax; |
2530 | me = smp_processor_id(); | 2530 | me = smp_processor_id(); |
2531 | |||
2532 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) { | ||
2531 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | 2533 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC |
2532 | *descp = desc = move_irq_desc(desc, me); | 2534 | *descp = desc = move_irq_desc(desc, me); |
2533 | /* get the new one */ | 2535 | /* get the new one */ |
2534 | cfg = desc->chip_data; | 2536 | cfg = desc->chip_data; |
2535 | #endif | 2537 | #endif |
2536 | |||
2537 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | ||
2538 | send_cleanup_vector(cfg); | 2538 | send_cleanup_vector(cfg); |
2539 | } | ||
2539 | } | 2540 | } |
2540 | #else | 2541 | #else |
2541 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2542 | static inline void irq_complete_move(struct irq_desc **descp) {} |
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 1507ad4e674d..10a09c2f1828 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c | |||
@@ -78,15 +78,6 @@ void __init init_ISA_irqs(void) | |||
78 | } | 78 | } |
79 | } | 79 | } |
80 | 80 | ||
81 | /* | ||
82 | * IRQ2 is cascade interrupt to second interrupt controller | ||
83 | */ | ||
84 | static struct irqaction irq2 = { | ||
85 | .handler = no_action, | ||
86 | .mask = CPU_MASK_NONE, | ||
87 | .name = "cascade", | ||
88 | }; | ||
89 | |||
90 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | 81 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { |
91 | [0 ... IRQ0_VECTOR - 1] = -1, | 82 | [0 ... IRQ0_VECTOR - 1] = -1, |
92 | [IRQ0_VECTOR] = 0, | 83 | [IRQ0_VECTOR] = 0, |
@@ -178,9 +169,6 @@ void __init native_init_IRQ(void) | |||
178 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | 169 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); |
179 | #endif | 170 | #endif |
180 | 171 | ||
181 | if (!acpi_ioapic) | ||
182 | setup_irq(2, &irq2); | ||
183 | |||
184 | /* setup after call gates are initialised (usually add in | 172 | /* setup after call gates are initialised (usually add in |
185 | * the architecture specific gates) | 173 | * the architecture specific gates) |
186 | */ | 174 | */ |
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index df167f265622..a265a7c63190 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c | |||
@@ -38,6 +38,15 @@ void __init pre_intr_init_hook(void) | |||
38 | init_ISA_irqs(); | 38 | init_ISA_irqs(); |
39 | } | 39 | } |
40 | 40 | ||
41 | /* | ||
42 | * IRQ2 is cascade interrupt to second interrupt controller | ||
43 | */ | ||
44 | static struct irqaction irq2 = { | ||
45 | .handler = no_action, | ||
46 | .mask = CPU_MASK_NONE, | ||
47 | .name = "cascade", | ||
48 | }; | ||
49 | |||
41 | /** | 50 | /** |
42 | * intr_init_hook - post gate setup interrupt initialisation | 51 | * intr_init_hook - post gate setup interrupt initialisation |
43 | * | 52 | * |
@@ -53,6 +62,9 @@ void __init intr_init_hook(void) | |||
53 | if (x86_quirks->arch_intr_init()) | 62 | if (x86_quirks->arch_intr_init()) |
54 | return; | 63 | return; |
55 | } | 64 | } |
65 | if (!acpi_ioapic) | ||
66 | setup_irq(2, &irq2); | ||
67 | |||
56 | } | 68 | } |
57 | 69 | ||
58 | /** | 70 | /** |
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c index a580b9562e76..d914a7996a66 100644 --- a/arch/x86/mach-voyager/setup.c +++ b/arch/x86/mach-voyager/setup.c | |||
@@ -33,13 +33,23 @@ void __init intr_init_hook(void) | |||
33 | setup_irq(2, &irq2); | 33 | setup_irq(2, &irq2); |
34 | } | 34 | } |
35 | 35 | ||
36 | void __init pre_setup_arch_hook(void) | 36 | static void voyager_disable_tsc(void) |
37 | { | 37 | { |
38 | /* Voyagers run their CPUs from independent clocks, so disable | 38 | /* Voyagers run their CPUs from independent clocks, so disable |
39 | * the TSC code because we can't sync them */ | 39 | * the TSC code because we can't sync them */ |
40 | setup_clear_cpu_cap(X86_FEATURE_TSC); | 40 | setup_clear_cpu_cap(X86_FEATURE_TSC); |
41 | } | 41 | } |
42 | 42 | ||
43 | void __init pre_setup_arch_hook(void) | ||
44 | { | ||
45 | voyager_disable_tsc(); | ||
46 | } | ||
47 | |||
48 | void __init pre_time_init_hook(void) | ||
49 | { | ||
50 | voyager_disable_tsc(); | ||
51 | } | ||
52 | |||
43 | void __init trap_init_hook(void) | 53 | void __init trap_init_hook(void) |
44 | { | 54 | { |
45 | } | 55 | } |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 9840b7ec749a..7ffcdeec4631 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq); | |||
81 | static void disable_local_vic_irq(unsigned int irq); | 81 | static void disable_local_vic_irq(unsigned int irq); |
82 | static void before_handle_vic_irq(unsigned int irq); | 82 | static void before_handle_vic_irq(unsigned int irq); |
83 | static void after_handle_vic_irq(unsigned int irq); | 83 | static void after_handle_vic_irq(unsigned int irq); |
84 | static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); | 84 | static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask); |
85 | static void ack_vic_irq(unsigned int irq); | 85 | static void ack_vic_irq(unsigned int irq); |
86 | static void vic_enable_cpi(void); | 86 | static void vic_enable_cpi(void); |
87 | static void do_boot_cpu(__u8 cpuid); | 87 | static void do_boot_cpu(__u8 cpuid); |
@@ -211,8 +211,6 @@ static __u32 cpu_booted_map; | |||
211 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; | 211 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; |
212 | 212 | ||
213 | /* This is for the new dynamic CPU boot code */ | 213 | /* This is for the new dynamic CPU boot code */ |
214 | cpumask_t cpu_callin_map = CPU_MASK_NONE; | ||
215 | cpumask_t cpu_callout_map = CPU_MASK_NONE; | ||
216 | 214 | ||
217 | /* The per processor IRQ masks (these are usually kept in sync) */ | 215 | /* The per processor IRQ masks (these are usually kept in sync) */ |
218 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | 216 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; |
@@ -378,7 +376,7 @@ void __init find_smp_config(void) | |||
378 | cpus_addr(phys_cpu_present_map)[0] |= | 376 | cpus_addr(phys_cpu_present_map)[0] |= |
379 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | 377 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + |
380 | 3) << 24; | 378 | 3) << 24; |
381 | cpu_possible_map = phys_cpu_present_map; | 379 | init_cpu_possible(&phys_cpu_present_map); |
382 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", | 380 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", |
383 | cpus_addr(phys_cpu_present_map)[0]); | 381 | cpus_addr(phys_cpu_present_map)[0]); |
384 | /* Here we set up the VIC to enable SMP */ | 382 | /* Here we set up the VIC to enable SMP */ |
@@ -1599,16 +1597,16 @@ static void after_handle_vic_irq(unsigned int irq) | |||
1599 | * change the mask and then do an interrupt enable CPI to re-enable on | 1597 | * change the mask and then do an interrupt enable CPI to re-enable on |
1600 | * the selected processors */ | 1598 | * the selected processors */ |
1601 | 1599 | ||
1602 | void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) | 1600 | void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask) |
1603 | { | 1601 | { |
1604 | /* Only extended processors handle interrupts */ | 1602 | /* Only extended processors handle interrupts */ |
1605 | unsigned long real_mask; | 1603 | unsigned long real_mask; |
1606 | unsigned long irq_mask = 1 << irq; | 1604 | unsigned long irq_mask = 1 << irq; |
1607 | int cpu; | 1605 | int cpu; |
1608 | 1606 | ||
1609 | real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; | 1607 | real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors; |
1610 | 1608 | ||
1611 | if (cpus_addr(mask)[0] == 0) | 1609 | if (cpus_addr(*mask)[0] == 0) |
1612 | /* can't have no CPUs to accept the interrupt -- extremely | 1610 | /* can't have no CPUs to accept the interrupt -- extremely |
1613 | * bad things will happen */ | 1611 | * bad things will happen */ |
1614 | return; | 1612 | return; |
@@ -1750,10 +1748,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void) | |||
1750 | init_gdt(smp_processor_id()); | 1748 | init_gdt(smp_processor_id()); |
1751 | switch_to_new_gdt(); | 1749 | switch_to_new_gdt(); |
1752 | 1750 | ||
1753 | cpu_set(smp_processor_id(), cpu_online_map); | 1751 | cpu_online_map = cpumask_of_cpu(smp_processor_id()); |
1754 | cpu_set(smp_processor_id(), cpu_callout_map); | 1752 | cpu_callout_map = cpumask_of_cpu(smp_processor_id()); |
1755 | cpu_set(smp_processor_id(), cpu_possible_map); | 1753 | cpu_callin_map = CPU_MASK_NONE; |
1756 | cpu_set(smp_processor_id(), cpu_present_map); | 1754 | cpu_present_map = cpumask_of_cpu(smp_processor_id()); |
1755 | |||
1757 | } | 1756 | } |
1758 | 1757 | ||
1759 | static int __cpuinit voyager_cpu_up(unsigned int cpu) | 1758 | static int __cpuinit voyager_cpu_up(unsigned int cpu) |
@@ -1783,9 +1782,9 @@ void __init smp_setup_processor_id(void) | |||
1783 | x86_write_percpu(cpu_number, hard_smp_processor_id()); | 1782 | x86_write_percpu(cpu_number, hard_smp_processor_id()); |
1784 | } | 1783 | } |
1785 | 1784 | ||
1786 | static void voyager_send_call_func(cpumask_t callmask) | 1785 | static void voyager_send_call_func(const struct cpumask *callmask) |
1787 | { | 1786 | { |
1788 | __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); | 1787 | __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id()); |
1789 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); | 1788 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); |
1790 | } | 1789 | } |
1791 | 1790 | ||