diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2012-05-08 08:07:48 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2012-05-08 08:07:48 -0400 |
commit | 67ba5293f705eb1d1b98710e5ccb0f615936a6fc (patch) | |
tree | cdb4cfd94033b5c0f42eeb4de368802049880a12 /arch/x86 | |
parent | 86627c93b35082f7a0e4d3111546943984b932c7 (diff) | |
parent | d909a81b198a397593495508c4a5755fe95552fb (diff) |
Merge branch 'smp/threadalloc' into smp/hotplug
Reason: Pull in the separate branch which was created so arch/tile can
base further work on it.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_phys.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 8 | ||||
-rw-r--r-- | arch/x86/platform/mrst/mrst.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 15 | ||||
-rw-r--r-- | arch/x86/xen/xen-asm.S | 2 |
7 files changed, 30 insertions, 10 deletions
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index ad6df8ccd715..8692a166dd4e 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -284,6 +284,5 @@ static inline bool is_ia32_task(void) | |||
284 | extern void arch_task_cache_init(void); | 284 | extern void arch_task_cache_init(void); |
285 | extern void free_thread_info(struct thread_info *ti); | 285 | extern void free_thread_info(struct thread_info *ti); |
286 | extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); | 286 | extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); |
287 | #define arch_task_cache_init arch_task_cache_init | ||
288 | #endif | 287 | #endif |
289 | #endif /* _ASM_X86_THREAD_INFO_H */ | 288 | #endif /* _ASM_X86_THREAD_INFO_H */ |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 8a778db45e3a..991e315f4227 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -24,6 +24,12 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
24 | { | 24 | { |
25 | if (x2apic_phys) | 25 | if (x2apic_phys) |
26 | return x2apic_enabled(); | 26 | return x2apic_enabled(); |
27 | else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && | ||
28 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) && | ||
29 | x2apic_enabled()) { | ||
30 | printk(KERN_DEBUG "System requires x2apic physical mode\n"); | ||
31 | return 1; | ||
32 | } | ||
27 | else | 33 | else |
28 | return 0; | 34 | return 0; |
29 | } | 35 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 73d08ed98a64..b8f3653dddbc 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -433,14 +433,14 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, | |||
433 | /* check if @slot is already used or the index is already disabled */ | 433 | /* check if @slot is already used or the index is already disabled */ |
434 | ret = amd_get_l3_disable_slot(nb, slot); | 434 | ret = amd_get_l3_disable_slot(nb, slot); |
435 | if (ret >= 0) | 435 | if (ret >= 0) |
436 | return -EINVAL; | 436 | return -EEXIST; |
437 | 437 | ||
438 | if (index > nb->l3_cache.indices) | 438 | if (index > nb->l3_cache.indices) |
439 | return -EINVAL; | 439 | return -EINVAL; |
440 | 440 | ||
441 | /* check whether the other slot has disabled the same index already */ | 441 | /* check whether the other slot has disabled the same index already */ |
442 | if (index == amd_get_l3_disable_slot(nb, !slot)) | 442 | if (index == amd_get_l3_disable_slot(nb, !slot)) |
443 | return -EINVAL; | 443 | return -EEXIST; |
444 | 444 | ||
445 | amd_l3_disable_index(nb, cpu, slot, index); | 445 | amd_l3_disable_index(nb, cpu, slot, index); |
446 | 446 | ||
@@ -468,8 +468,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
468 | err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); | 468 | err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); |
469 | if (err) { | 469 | if (err) { |
470 | if (err == -EEXIST) | 470 | if (err == -EEXIST) |
471 | printk(KERN_WARNING "L3 disable slot %d in use!\n", | 471 | pr_warning("L3 slot %d in use/index already disabled!\n", |
472 | slot); | 472 | slot); |
473 | return err; | 473 | return err; |
474 | } | 474 | } |
475 | return count; | 475 | return count; |
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index e0a37233c0af..e31bcd8f2eee 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -805,7 +805,7 @@ void intel_scu_devices_create(void) | |||
805 | } else | 805 | } else |
806 | i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1); | 806 | i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1); |
807 | } | 807 | } |
808 | intel_scu_notifier_post(SCU_AVAILABLE, 0L); | 808 | intel_scu_notifier_post(SCU_AVAILABLE, NULL); |
809 | } | 809 | } |
810 | EXPORT_SYMBOL_GPL(intel_scu_devices_create); | 810 | EXPORT_SYMBOL_GPL(intel_scu_devices_create); |
811 | 811 | ||
@@ -814,7 +814,7 @@ void intel_scu_devices_destroy(void) | |||
814 | { | 814 | { |
815 | int i; | 815 | int i; |
816 | 816 | ||
817 | intel_scu_notifier_post(SCU_DOWN, 0L); | 817 | intel_scu_notifier_post(SCU_DOWN, NULL); |
818 | 818 | ||
819 | for (i = 0; i < ipc_next_dev; i++) | 819 | for (i = 0; i < ipc_next_dev; i++) |
820 | platform_device_del(ipc_devs[i]); | 820 | platform_device_del(ipc_devs[i]); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 4f51bebac02c..a8f8844b8d32 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -261,7 +261,8 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
261 | 261 | ||
262 | static bool __init xen_check_mwait(void) | 262 | static bool __init xen_check_mwait(void) |
263 | { | 263 | { |
264 | #ifdef CONFIG_ACPI | 264 | #if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \ |
265 | !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) | ||
265 | struct xen_platform_op op = { | 266 | struct xen_platform_op op = { |
266 | .cmd = XENPF_set_processor_pminfo, | 267 | .cmd = XENPF_set_processor_pminfo, |
267 | .u.set_pminfo.id = -1, | 268 | .u.set_pminfo.id = -1, |
@@ -349,7 +350,6 @@ static void __init xen_init_cpuid_mask(void) | |||
349 | /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ | 350 | /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ |
350 | if ((cx & xsave_mask) != xsave_mask) | 351 | if ((cx & xsave_mask) != xsave_mask) |
351 | cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ | 352 | cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ |
352 | |||
353 | if (xen_check_mwait()) | 353 | if (xen_check_mwait()) |
354 | cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); | 354 | cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); |
355 | } | 355 | } |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 8f44cc1a9291..3700945ed0d5 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -178,6 +178,7 @@ static void __init xen_fill_possible_map(void) | |||
178 | static void __init xen_filter_cpu_maps(void) | 178 | static void __init xen_filter_cpu_maps(void) |
179 | { | 179 | { |
180 | int i, rc; | 180 | int i, rc; |
181 | unsigned int subtract = 0; | ||
181 | 182 | ||
182 | if (!xen_initial_domain()) | 183 | if (!xen_initial_domain()) |
183 | return; | 184 | return; |
@@ -192,8 +193,22 @@ static void __init xen_filter_cpu_maps(void) | |||
192 | } else { | 193 | } else { |
193 | set_cpu_possible(i, false); | 194 | set_cpu_possible(i, false); |
194 | set_cpu_present(i, false); | 195 | set_cpu_present(i, false); |
196 | subtract++; | ||
195 | } | 197 | } |
196 | } | 198 | } |
199 | #ifdef CONFIG_HOTPLUG_CPU | ||
200 | /* This is akin to using 'nr_cpus' on the Linux command line. | ||
201 | * Which is OK as when we use 'dom0_max_vcpus=X' we can only | ||
202 | * have up to X, while nr_cpu_ids is greater than X. This | ||
203 | * normally is not a problem, except when CPU hotplugging | ||
204 | * is involved and then there might be more than X CPUs | ||
205 | * in the guest - which will not work as there is no | ||
206 | * hypercall to expand the max number of VCPUs an already | ||
207 | * running guest has. So cap it up to X. */ | ||
208 | if (subtract) | ||
209 | nr_cpu_ids = nr_cpu_ids - subtract; | ||
210 | #endif | ||
211 | |||
197 | } | 212 | } |
198 | 213 | ||
199 | static void __init xen_smp_prepare_boot_cpu(void) | 214 | static void __init xen_smp_prepare_boot_cpu(void) |
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 79d7362ad6d1..3e45aa000718 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S | |||
@@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct) | |||
96 | 96 | ||
97 | /* check for unmasked and pending */ | 97 | /* check for unmasked and pending */ |
98 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending | 98 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending |
99 | jz 1f | 99 | jnz 1f |
100 | 2: call check_events | 100 | 2: call check_events |
101 | 1: | 101 | 1: |
102 | ENDPATCH(xen_restore_fl_direct) | 102 | ENDPATCH(xen_restore_fl_direct) |