aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/boot.c90
-rw-r--r--arch/x86/kernel/acpi/sleep.c11
-rw-r--r--arch/x86/kernel/acpi/sleep.h2
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S2
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S2
-rw-r--r--arch/x86/kernel/apic/apic.c8
-rw-r--r--arch/x86/kernel/smpboot.c21
-rw-r--r--arch/x86/kernel/topology.c11
8 files changed, 47 insertions, 100 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 40c76604199f..6c0b43bd024b 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -189,24 +189,31 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
189 return 0; 189 return 0;
190} 190}
191 191
192static void acpi_register_lapic(int id, u8 enabled) 192/**
193 * acpi_register_lapic - register a local apic and generates a logic cpu number
194 * @id: local apic id to register
195 * @enabled: this cpu is enabled or not
196 *
197 * Returns the logic cpu number which maps to the local apic
198 */
199static int acpi_register_lapic(int id, u8 enabled)
193{ 200{
194 unsigned int ver = 0; 201 unsigned int ver = 0;
195 202
196 if (id >= MAX_LOCAL_APIC) { 203 if (id >= MAX_LOCAL_APIC) {
197 printk(KERN_INFO PREFIX "skipped apicid that is too big\n"); 204 printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
198 return; 205 return -EINVAL;
199 } 206 }
200 207
201 if (!enabled) { 208 if (!enabled) {
202 ++disabled_cpus; 209 ++disabled_cpus;
203 return; 210 return -EINVAL;
204 } 211 }
205 212
206 if (boot_cpu_physical_apicid != -1U) 213 if (boot_cpu_physical_apicid != -1U)
207 ver = apic_version[boot_cpu_physical_apicid]; 214 ver = apic_version[boot_cpu_physical_apicid];
208 215
209 generic_processor_info(id, ver); 216 return generic_processor_info(id, ver);
210} 217}
211 218
212static int __init 219static int __init
@@ -614,84 +621,27 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
614#endif 621#endif
615} 622}
616 623
617static int _acpi_map_lsapic(acpi_handle handle, int *pcpu) 624static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
618{ 625{
619 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
620 union acpi_object *obj;
621 struct acpi_madt_local_apic *lapic;
622 cpumask_var_t tmp_map, new_map;
623 u8 physid;
624 int cpu; 626 int cpu;
625 int retval = -ENOMEM;
626
627 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
628 return -EINVAL;
629
630 if (!buffer.length || !buffer.pointer)
631 return -EINVAL;
632
633 obj = buffer.pointer;
634 if (obj->type != ACPI_TYPE_BUFFER ||
635 obj->buffer.length < sizeof(*lapic)) {
636 kfree(buffer.pointer);
637 return -EINVAL;
638 }
639 627
640 lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer; 628 cpu = acpi_register_lapic(physid, ACPI_MADT_ENABLED);
641 629 if (cpu < 0) {
642 if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC || 630 pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
643 !(lapic->lapic_flags & ACPI_MADT_ENABLED)) { 631 return cpu;
644 kfree(buffer.pointer);
645 return -EINVAL;
646 }
647
648 physid = lapic->id;
649
650 kfree(buffer.pointer);
651 buffer.length = ACPI_ALLOCATE_BUFFER;
652 buffer.pointer = NULL;
653 lapic = NULL;
654
655 if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
656 goto out;
657
658 if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
659 goto free_tmp_map;
660
661 cpumask_copy(tmp_map, cpu_present_mask);
662 acpi_register_lapic(physid, ACPI_MADT_ENABLED);
663
664 /*
665 * If acpi_register_lapic successfully generates a new logical cpu
666 * number, then the following will get us exactly what was mapped
667 */
668 cpumask_andnot(new_map, cpu_present_mask, tmp_map);
669 if (cpumask_empty(new_map)) {
670 printk ("Unable to map lapic to logical cpu number\n");
671 retval = -EINVAL;
672 goto free_new_map;
673 } 632 }
674 633
675 acpi_processor_set_pdc(handle); 634 acpi_processor_set_pdc(handle);
676
677 cpu = cpumask_first(new_map);
678 acpi_map_cpu2node(handle, cpu, physid); 635 acpi_map_cpu2node(handle, cpu, physid);
679 636
680 *pcpu = cpu; 637 *pcpu = cpu;
681 retval = 0; 638 return 0;
682
683free_new_map:
684 free_cpumask_var(new_map);
685free_tmp_map:
686 free_cpumask_var(tmp_map);
687out:
688 return retval;
689} 639}
690 640
691/* wrapper to silence section mismatch warning */ 641/* wrapper to silence section mismatch warning */
692int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu) 642int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
693{ 643{
694 return _acpi_map_lsapic(handle, pcpu); 644 return _acpi_map_lsapic(handle, physid, pcpu);
695} 645}
696EXPORT_SYMBOL(acpi_map_lsapic); 646EXPORT_SYMBOL(acpi_map_lsapic);
697 647
@@ -745,7 +695,7 @@ static int __init acpi_parse_sbf(struct acpi_table_header *table)
745#ifdef CONFIG_HPET_TIMER 695#ifdef CONFIG_HPET_TIMER
746#include <asm/hpet.h> 696#include <asm/hpet.h>
747 697
748static struct __initdata resource *hpet_res; 698static struct resource *hpet_res __initdata;
749 699
750static int __init acpi_parse_hpet(struct acpi_table_header *table) 700static int __init acpi_parse_hpet(struct acpi_table_header *table)
751{ 701{
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 33120100ff5e..3a2ae4c88948 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -26,6 +26,17 @@ static char temp_stack[4096];
26#endif 26#endif
27 27
28/** 28/**
29 * x86_acpi_enter_sleep_state - enter sleep state
30 * @state: Sleep state to enter.
31 *
32 * Wrapper around acpi_enter_sleep_state() to be called by assmebly.
33 */
34acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state)
35{
36 return acpi_enter_sleep_state(state);
37}
38
39/**
29 * x86_acpi_suspend_lowlevel - save kernel state 40 * x86_acpi_suspend_lowlevel - save kernel state
30 * 41 *
31 * Create an identity mapped page table and copy the wakeup routine to 42 * Create an identity mapped page table and copy the wakeup routine to
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index c9c2c982d5e4..65c7b606b606 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -17,3 +17,5 @@ extern void wakeup_long64(void);
17extern void do_suspend_lowlevel(void); 17extern void do_suspend_lowlevel(void);
18 18
19extern int x86_acpi_suspend_lowlevel(void); 19extern int x86_acpi_suspend_lowlevel(void);
20
21acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state);
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index d1daa66ab162..665c6b7d2ea9 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -73,7 +73,7 @@ ENTRY(do_suspend_lowlevel)
73 call save_processor_state 73 call save_processor_state
74 call save_registers 74 call save_registers
75 pushl $3 75 pushl $3
76 call acpi_enter_sleep_state 76 call x86_acpi_enter_sleep_state
77 addl $4, %esp 77 addl $4, %esp
78 78
79# In case of S3 failure, we'll emerge here. Jump 79# In case of S3 failure, we'll emerge here. Jump
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 8ea5164cbd04..ae693b51ed8e 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -73,7 +73,7 @@ ENTRY(do_suspend_lowlevel)
73 addq $8, %rsp 73 addq $8, %rsp
74 movl $3, %edi 74 movl $3, %edi
75 xorl %eax, %eax 75 xorl %eax, %eax
76 call acpi_enter_sleep_state 76 call x86_acpi_enter_sleep_state
77 /* in case something went wrong, restore the machine status and go on */ 77 /* in case something went wrong, restore the machine status and go on */
78 jmp resume_point 78 jmp resume_point
79 79
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a7eb82d9b012..ed165d657380 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2107,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
2107 apic_write(APIC_LVT1, value); 2107 apic_write(APIC_LVT1, value);
2108} 2108}
2109 2109
2110void generic_processor_info(int apicid, int version) 2110int generic_processor_info(int apicid, int version)
2111{ 2111{
2112 int cpu, max = nr_cpu_ids; 2112 int cpu, max = nr_cpu_ids;
2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, 2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2127,7 +2127,7 @@ void generic_processor_info(int apicid, int version)
2127 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); 2127 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2128 2128
2129 disabled_cpus++; 2129 disabled_cpus++;
2130 return; 2130 return -ENODEV;
2131 } 2131 }
2132 2132
2133 if (num_processors >= nr_cpu_ids) { 2133 if (num_processors >= nr_cpu_ids) {
@@ -2138,7 +2138,7 @@ void generic_processor_info(int apicid, int version)
2138 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); 2138 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2139 2139
2140 disabled_cpus++; 2140 disabled_cpus++;
2141 return; 2141 return -EINVAL;
2142 } 2142 }
2143 2143
2144 num_processors++; 2144 num_processors++;
@@ -2183,6 +2183,8 @@ void generic_processor_info(int apicid, int version)
2183#endif 2183#endif
2184 set_cpu_possible(cpu, true); 2184 set_cpu_possible(cpu, true);
2185 set_cpu_present(cpu, true); 2185 set_cpu_present(cpu, true);
2186
2187 return cpu;
2186} 2188}
2187 2189
2188int hard_smp_processor_id(void) 2190int hard_smp_processor_id(void)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2a165580fa16..85dc05a3aa02 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -81,27 +81,6 @@
81/* State of each CPU */ 81/* State of each CPU */
82DEFINE_PER_CPU(int, cpu_state) = { 0 }; 82DEFINE_PER_CPU(int, cpu_state) = { 0 };
83 83
84#ifdef CONFIG_HOTPLUG_CPU
85/*
86 * We need this for trampoline_base protection from concurrent accesses when
87 * off- and onlining cores wildly.
88 */
89static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
90
91void cpu_hotplug_driver_lock(void)
92{
93 mutex_lock(&x86_cpu_hotplug_driver_mutex);
94}
95
96void cpu_hotplug_driver_unlock(void)
97{
98 mutex_unlock(&x86_cpu_hotplug_driver_mutex);
99}
100
101ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
102ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
103#endif
104
105/* Number of siblings per CPU package */ 84/* Number of siblings per CPU package */
106int smp_num_siblings = 1; 85int smp_num_siblings = 1;
107EXPORT_SYMBOL(smp_num_siblings); 86EXPORT_SYMBOL(smp_num_siblings);
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 6e60b5fe2244..649b010da00b 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -65,29 +65,32 @@ int __ref _debug_hotplug_cpu(int cpu, int action)
65 if (!cpu_is_hotpluggable(cpu)) 65 if (!cpu_is_hotpluggable(cpu))
66 return -EINVAL; 66 return -EINVAL;
67 67
68 cpu_hotplug_driver_lock(); 68 lock_device_hotplug();
69 69
70 switch (action) { 70 switch (action) {
71 case 0: 71 case 0:
72 ret = cpu_down(cpu); 72 ret = cpu_down(cpu);
73 if (!ret) { 73 if (!ret) {
74 pr_info("CPU %u is now offline\n", cpu); 74 pr_info("CPU %u is now offline\n", cpu);
75 dev->offline = true;
75 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 76 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
76 } else 77 } else
77 pr_debug("Can't offline CPU%d.\n", cpu); 78 pr_debug("Can't offline CPU%d.\n", cpu);
78 break; 79 break;
79 case 1: 80 case 1:
80 ret = cpu_up(cpu); 81 ret = cpu_up(cpu);
81 if (!ret) 82 if (!ret) {
83 dev->offline = false;
82 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 84 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
83 else 85 } else {
84 pr_debug("Can't online CPU%d.\n", cpu); 86 pr_debug("Can't online CPU%d.\n", cpu);
87 }
85 break; 88 break;
86 default: 89 default:
87 ret = -EINVAL; 90 ret = -EINVAL;
88 } 91 }
89 92
90 cpu_hotplug_driver_unlock(); 93 unlock_device_hotplug();
91 94
92 return ret; 95 return ret;
93} 96}