aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-08-25 07:09:58 -0400
committerIngo Molnar <mingo@elte.hu>2010-08-25 07:10:00 -0400
commit7de5d895b2020260190db0021de646f3f22f755e (patch)
tree51d012f0b76a2ec1bd3b4837690faf1087f37056 /arch/x86
parent04fba67163a9e6132614b72b33bb2743bd33ffb3 (diff)
parent502adf5778f4151dcba3f64dd6ed322151f3712c (diff)
Merge branch 'linus' into perf/core
Merge reason: pick up perf fixes Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig9
-rw-r--r--arch/x86/include/asm/pgtable_32.h1
-rw-r--r--arch/x86/include/asm/syscalls.h5
-rw-r--r--arch/x86/include/asm/trampoline.h5
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/head_32.S8
-rw-r--r--arch/x86/kernel/i387.c1
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/kprobes.c25
-rw-r--r--arch/x86/kernel/process.c5
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smpboot.c51
-rw-r--r--arch/x86/kernel/sys_i386_32.c4
-rw-r--r--arch/x86/kernel/trampoline.c18
-rw-r--r--arch/x86/kvm/i8254.c3
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/xen/platform-pci-unplug.c18
18 files changed, 123 insertions, 42 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a84fc34c8f77..cea0cd9a316f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -245,6 +245,11 @@ config ARCH_HWEIGHT_CFLAGS
245 245
246config KTIME_SCALAR 246config KTIME_SCALAR
247 def_bool X86_32 247 def_bool X86_32
248
249config ARCH_CPU_PROBE_RELEASE
250 def_bool y
251 depends on HOTPLUG_CPU
252
248source "init/Kconfig" 253source "init/Kconfig"
249source "kernel/Kconfig.freezer" 254source "kernel/Kconfig.freezer"
250 255
@@ -749,11 +754,11 @@ config IOMMU_API
749 def_bool (AMD_IOMMU || DMAR) 754 def_bool (AMD_IOMMU || DMAR)
750 755
751config MAXSMP 756config MAXSMP
752 bool "Configure Maximum number of SMP Processors and NUMA Nodes" 757 bool "Enable Maximum number of SMP Processors and NUMA Nodes"
753 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL 758 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
754 select CPUMASK_OFFSTACK 759 select CPUMASK_OFFSTACK
755 ---help--- 760 ---help---
756 Configure maximum number of CPUS and NUMA Nodes for this architecture. 761 Enable maximum number of CPUS and NUMA Nodes for this architecture.
757 If unsure, say N. 762 If unsure, say N.
758 763
759config NR_CPUS 764config NR_CPUS
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 2984a25ff383..f686f49e8b7b 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -26,6 +26,7 @@ struct mm_struct;
26struct vm_area_struct; 26struct vm_area_struct;
27 27
28extern pgd_t swapper_pg_dir[1024]; 28extern pgd_t swapper_pg_dir[1024];
29extern pgd_t trampoline_pg_dir[1024];
29 30
30static inline void pgtable_cache_init(void) { } 31static inline void pgtable_cache_init(void) { }
31static inline void check_pgt_cache(void) { } 32static inline void check_pgt_cache(void) { }
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index feb2ff9bfc2d..f1d8b441fc77 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -23,8 +23,9 @@ long sys_iopl(unsigned int, struct pt_regs *);
23/* kernel/process.c */ 23/* kernel/process.c */
24int sys_fork(struct pt_regs *); 24int sys_fork(struct pt_regs *);
25int sys_vfork(struct pt_regs *); 25int sys_vfork(struct pt_regs *);
26long sys_execve(const char __user *, char __user * __user *, 26long sys_execve(const char __user *,
27 char __user * __user *, struct pt_regs *); 27 const char __user *const __user *,
28 const char __user *const __user *, struct pt_regs *);
28long sys_clone(unsigned long, unsigned long, void __user *, 29long sys_clone(unsigned long, unsigned long, void __user *,
29 void __user *, struct pt_regs *); 30 void __user *, struct pt_regs *);
30 31
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index cb507bb05d79..4dde797c0578 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -13,14 +13,17 @@ extern unsigned char *trampoline_base;
13 13
14extern unsigned long init_rsp; 14extern unsigned long init_rsp;
15extern unsigned long initial_code; 15extern unsigned long initial_code;
16extern unsigned long initial_page_table;
16extern unsigned long initial_gs; 17extern unsigned long initial_gs;
17 18
18#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) 19#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
19 20
20extern unsigned long setup_trampoline(void); 21extern unsigned long setup_trampoline(void);
22extern void __init setup_trampoline_page_table(void);
21extern void __init reserve_trampoline_memory(void); 23extern void __init reserve_trampoline_memory(void);
22#else 24#else
23static inline void reserve_trampoline_memory(void) {}; 25static inline void setup_trampoline_page_table(void) {}
26static inline void reserve_trampoline_memory(void) {}
24#endif /* CONFIG_X86_TRAMPOLINE */ 27#endif /* CONFIG_X86_TRAMPOLINE */
25 28
26#endif /* __ASSEMBLY__ */ 29#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 4dc0084ec1b1..f1efebaf5510 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1728,6 +1728,8 @@ __apicdebuginit(void) print_IO_APIC(void)
1728 struct irq_pin_list *entry; 1728 struct irq_pin_list *entry;
1729 1729
1730 cfg = desc->chip_data; 1730 cfg = desc->chip_data;
1731 if (!cfg)
1732 continue;
1731 entry = cfg->irq_2_pin; 1733 entry = cfg->irq_2_pin;
1732 if (!entry) 1734 if (!entry)
1733 continue; 1735 continue;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 60a57b13082d..ba5f62f45f01 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -669,7 +669,7 @@ bool cpu_has_amd_erratum(const int *erratum)
669 } 669 }
670 670
671 /* OSVW unavailable or ID unknown, match family-model-stepping range */ 671 /* OSVW unavailable or ID unknown, match family-model-stepping range */
672 ms = (cpu->x86_model << 8) | cpu->x86_mask; 672 ms = (cpu->x86_model << 4) | cpu->x86_mask;
673 while ((range = *erratum++)) 673 while ((range = *erratum++))
674 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && 674 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
675 (ms >= AMD_MODEL_RANGE_START(range)) && 675 (ms >= AMD_MODEL_RANGE_START(range)) &&
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index ff4c453e13f3..fa8c1b8e09fb 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -334,7 +334,7 @@ ENTRY(startup_32_smp)
334/* 334/*
335 * Enable paging 335 * Enable paging
336 */ 336 */
337 movl $pa(swapper_pg_dir),%eax 337 movl pa(initial_page_table), %eax
338 movl %eax,%cr3 /* set the page table pointer.. */ 338 movl %eax,%cr3 /* set the page table pointer.. */
339 movl %cr0,%eax 339 movl %cr0,%eax
340 orl $X86_CR0_PG,%eax 340 orl $X86_CR0_PG,%eax
@@ -614,6 +614,8 @@ ignore_int:
614.align 4 614.align 4
615ENTRY(initial_code) 615ENTRY(initial_code)
616 .long i386_start_kernel 616 .long i386_start_kernel
617ENTRY(initial_page_table)
618 .long pa(swapper_pg_dir)
617 619
618/* 620/*
619 * BSS section 621 * BSS section
@@ -629,6 +631,10 @@ ENTRY(swapper_pg_dir)
629#endif 631#endif
630swapper_pg_fixmap: 632swapper_pg_fixmap:
631 .fill 1024,4,0 633 .fill 1024,4,0
634#ifdef CONFIG_X86_TRAMPOLINE
635ENTRY(trampoline_pg_dir)
636 .fill 1024,4,0
637#endif
632ENTRY(empty_zero_page) 638ENTRY(empty_zero_page)
633 .fill 4096,1,0 639 .fill 4096,1,0
634 640
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 1f11f5ce668f..a46cb3522c0c 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -40,6 +40,7 @@
40 40
41static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 41static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
42unsigned int xstate_size; 42unsigned int xstate_size;
43EXPORT_SYMBOL_GPL(xstate_size);
43unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32); 44unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
44static struct i387_fxsave_struct fx_scratch __cpuinitdata; 45static struct i387_fxsave_struct fx_scratch __cpuinitdata;
45 46
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index ef10940e1af0..852b81967a37 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -194,7 +194,7 @@ static struct hw_breakpoint {
194 unsigned long addr; 194 unsigned long addr;
195 int len; 195 int len;
196 int type; 196 int type;
197 struct perf_event **pev; 197 struct perf_event * __percpu *pev;
198} breakinfo[HBP_NUM]; 198} breakinfo[HBP_NUM];
199 199
200static unsigned long early_dr7; 200static unsigned long early_dr7;
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 1bfb6cf4dd55..770ebfb349e9 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -709,6 +709,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
709 struct hlist_node *node, *tmp; 709 struct hlist_node *node, *tmp;
710 unsigned long flags, orig_ret_address = 0; 710 unsigned long flags, orig_ret_address = 0;
711 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 711 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
712 kprobe_opcode_t *correct_ret_addr = NULL;
712 713
713 INIT_HLIST_HEAD(&empty_rp); 714 INIT_HLIST_HEAD(&empty_rp);
714 kretprobe_hash_lock(current, &head, &flags); 715 kretprobe_hash_lock(current, &head, &flags);
@@ -740,14 +741,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
740 /* another task is sharing our hash bucket */ 741 /* another task is sharing our hash bucket */
741 continue; 742 continue;
742 743
744 orig_ret_address = (unsigned long)ri->ret_addr;
745
746 if (orig_ret_address != trampoline_address)
747 /*
748 * This is the real return address. Any other
749 * instances associated with this task are for
750 * other calls deeper on the call stack
751 */
752 break;
753 }
754
755 kretprobe_assert(ri, orig_ret_address, trampoline_address);
756
757 correct_ret_addr = ri->ret_addr;
758 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
759 if (ri->task != current)
760 /* another task is sharing our hash bucket */
761 continue;
762
763 orig_ret_address = (unsigned long)ri->ret_addr;
743 if (ri->rp && ri->rp->handler) { 764 if (ri->rp && ri->rp->handler) {
744 __get_cpu_var(current_kprobe) = &ri->rp->kp; 765 __get_cpu_var(current_kprobe) = &ri->rp->kp;
745 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 766 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
767 ri->ret_addr = correct_ret_addr;
746 ri->rp->handler(ri, regs); 768 ri->rp->handler(ri, regs);
747 __get_cpu_var(current_kprobe) = NULL; 769 __get_cpu_var(current_kprobe) = NULL;
748 } 770 }
749 771
750 orig_ret_address = (unsigned long)ri->ret_addr;
751 recycle_rp_inst(ri, &empty_rp); 772 recycle_rp_inst(ri, &empty_rp);
752 773
753 if (orig_ret_address != trampoline_address) 774 if (orig_ret_address != trampoline_address)
@@ -759,8 +780,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
759 break; 780 break;
760 } 781 }
761 782
762 kretprobe_assert(ri, orig_ret_address, trampoline_address);
763
764 kretprobe_hash_unlock(current, &flags); 783 kretprobe_hash_unlock(current, &flags);
765 784
766 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 785 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 64ecaf0af9af..57d1868a86aa 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -301,8 +301,9 @@ EXPORT_SYMBOL(kernel_thread);
301/* 301/*
302 * sys_execve() executes a new program. 302 * sys_execve() executes a new program.
303 */ 303 */
304long sys_execve(const char __user *name, char __user * __user *argv, 304long sys_execve(const char __user *name,
305 char __user * __user *envp, struct pt_regs *regs) 305 const char __user *const __user *argv,
306 const char __user *const __user *envp, struct pt_regs *regs)
306{ 307{
307 long error; 308 long error;
308 char *filename; 309 char *filename;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b008e7883207..c3a4fbb2b996 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1014,6 +1014,8 @@ void __init setup_arch(char **cmdline_p)
1014 paging_init(); 1014 paging_init();
1015 x86_init.paging.pagetable_setup_done(swapper_pg_dir); 1015 x86_init.paging.pagetable_setup_done(swapper_pg_dir);
1016 1016
1017 setup_trampoline_page_table();
1018
1017 tboot_probe(); 1019 tboot_probe();
1018 1020
1019#ifdef CONFIG_X86_64 1021#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a5e928b0cb5f..8b3bfc4dd708 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -73,7 +73,6 @@
73 73
74#ifdef CONFIG_X86_32 74#ifdef CONFIG_X86_32
75u8 apicid_2_node[MAX_APICID]; 75u8 apicid_2_node[MAX_APICID];
76static int low_mappings;
77#endif 76#endif
78 77
79/* State of each CPU */ 78/* State of each CPU */
@@ -91,6 +90,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
91static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); 90static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
92#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 91#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
93#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 92#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
93
94/*
95 * We need this for trampoline_base protection from concurrent accesses when
96 * off- and onlining cores wildly.
97 */
98static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
99
100void cpu_hotplug_driver_lock()
101{
102 mutex_lock(&x86_cpu_hotplug_driver_mutex);
103}
104
105void cpu_hotplug_driver_unlock()
106{
107 mutex_unlock(&x86_cpu_hotplug_driver_mutex);
108}
109
110ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
111ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
94#else 112#else
95static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 113static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
96#define get_idle_for_cpu(x) (idle_thread_array[(x)]) 114#define get_idle_for_cpu(x) (idle_thread_array[(x)])
@@ -281,6 +299,18 @@ notrace static void __cpuinit start_secondary(void *unused)
281 * fragile that we want to limit the things done here to the 299 * fragile that we want to limit the things done here to the
282 * most necessary things. 300 * most necessary things.
283 */ 301 */
302
303#ifdef CONFIG_X86_32
304 /*
305 * Switch away from the trampoline page-table
306 *
307 * Do this before cpu_init() because it needs to access per-cpu
308 * data which may not be mapped in the trampoline page-table.
309 */
310 load_cr3(swapper_pg_dir);
311 __flush_tlb_all();
312#endif
313
284 vmi_bringup(); 314 vmi_bringup();
285 cpu_init(); 315 cpu_init();
286 preempt_disable(); 316 preempt_disable();
@@ -299,12 +329,6 @@ notrace static void __cpuinit start_secondary(void *unused)
299 legacy_pic->chip->unmask(0); 329 legacy_pic->chip->unmask(0);
300 } 330 }
301 331
302#ifdef CONFIG_X86_32
303 while (low_mappings)
304 cpu_relax();
305 __flush_tlb_all();
306#endif
307
308 /* This must be done before setting cpu_online_mask */ 332 /* This must be done before setting cpu_online_mask */
309 set_cpu_sibling_map(raw_smp_processor_id()); 333 set_cpu_sibling_map(raw_smp_processor_id());
310 wmb(); 334 wmb();
@@ -750,6 +774,7 @@ do_rest:
750#ifdef CONFIG_X86_32 774#ifdef CONFIG_X86_32
751 /* Stack for startup_32 can be just as for start_secondary onwards */ 775 /* Stack for startup_32 can be just as for start_secondary onwards */
752 irq_ctx_init(cpu); 776 irq_ctx_init(cpu);
777 initial_page_table = __pa(&trampoline_pg_dir);
753#else 778#else
754 clear_tsk_thread_flag(c_idle.idle, TIF_FORK); 779 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
755 initial_gs = per_cpu_offset(cpu); 780 initial_gs = per_cpu_offset(cpu);
@@ -897,20 +922,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
897 922
898 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 923 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
899 924
900#ifdef CONFIG_X86_32
901 /* init low mem mapping */
902 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
903 min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
904 flush_tlb_all();
905 low_mappings = 1;
906
907 err = do_boot_cpu(apicid, cpu); 925 err = do_boot_cpu(apicid, cpu);
908 926
909 zap_low_mappings(false);
910 low_mappings = 0;
911#else
912 err = do_boot_cpu(apicid, cpu);
913#endif
914 if (err) { 927 if (err) {
915 pr_debug("do_boot_cpu failed %d\n", err); 928 pr_debug("do_boot_cpu failed %d\n", err);
916 return -EIO; 929 return -EIO;
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
index 196552bb412c..d5e06624e34a 100644
--- a/arch/x86/kernel/sys_i386_32.c
+++ b/arch/x86/kernel/sys_i386_32.c
@@ -28,7 +28,9 @@
28 * Do a system call from kernel instead of calling sys_execve so we 28 * Do a system call from kernel instead of calling sys_execve so we
29 * end up with proper pt_regs. 29 * end up with proper pt_regs.
30 */ 30 */
31int kernel_execve(const char *filename, char *const argv[], char *const envp[]) 31int kernel_execve(const char *filename,
32 const char *const argv[],
33 const char *const envp[])
32{ 34{
33 long __res; 35 long __res;
34 asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" 36 asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index c652ef62742d..a874495b3673 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -1,6 +1,7 @@
1#include <linux/io.h> 1#include <linux/io.h>
2 2
3#include <asm/trampoline.h> 3#include <asm/trampoline.h>
4#include <asm/pgtable.h>
4#include <asm/e820.h> 5#include <asm/e820.h>
5 6
6#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) 7#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
@@ -37,3 +38,20 @@ unsigned long __trampinit setup_trampoline(void)
37 memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); 38 memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
38 return virt_to_phys(trampoline_base); 39 return virt_to_phys(trampoline_base);
39} 40}
41
42void __init setup_trampoline_page_table(void)
43{
44#ifdef CONFIG_X86_32
45 /* Copy kernel address range */
46 clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
47 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
48 min_t(unsigned long, KERNEL_PGD_PTRS,
49 KERNEL_PGD_BOUNDARY));
50
51 /* Initialize low mappings */
52 clone_pgd_range(trampoline_pg_dir,
53 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
54 min_t(unsigned long, KERNEL_PGD_PTRS,
55 KERNEL_PGD_BOUNDARY));
56#endif
57}
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 0fd6378981f4..ddeb2314b522 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -697,6 +697,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
697 pit->wq = create_singlethread_workqueue("kvm-pit-wq"); 697 pit->wq = create_singlethread_workqueue("kvm-pit-wq");
698 if (!pit->wq) { 698 if (!pit->wq) {
699 mutex_unlock(&pit->pit_state.lock); 699 mutex_unlock(&pit->pit_state.lock);
700 kvm_free_irq_source_id(kvm, pit->irq_source_id);
700 kfree(pit); 701 kfree(pit);
701 return NULL; 702 return NULL;
702 } 703 }
@@ -742,7 +743,7 @@ fail:
742 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); 743 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
743 kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); 744 kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
744 kvm_free_irq_source_id(kvm, pit->irq_source_id); 745 kvm_free_irq_source_id(kvm, pit->irq_source_id);
745 746 destroy_workqueue(pit->wq);
746 kfree(pit); 747 kfree(pit);
747 return NULL; 748 return NULL;
748} 749}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 25f19078b321..3a09c625d526 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2387,7 +2387,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
2387 if (cpu_has_xsave) 2387 if (cpu_has_xsave)
2388 memcpy(guest_xsave->region, 2388 memcpy(guest_xsave->region,
2389 &vcpu->arch.guest_fpu.state->xsave, 2389 &vcpu->arch.guest_fpu.state->xsave,
2390 sizeof(struct xsave_struct)); 2390 xstate_size);
2391 else { 2391 else {
2392 memcpy(guest_xsave->region, 2392 memcpy(guest_xsave->region,
2393 &vcpu->arch.guest_fpu.state->fxsave, 2393 &vcpu->arch.guest_fpu.state->fxsave,
@@ -2405,7 +2405,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
2405 2405
2406 if (cpu_has_xsave) 2406 if (cpu_has_xsave)
2407 memcpy(&vcpu->arch.guest_fpu.state->xsave, 2407 memcpy(&vcpu->arch.guest_fpu.state->xsave,
2408 guest_xsave->region, sizeof(struct xsave_struct)); 2408 guest_xsave->region, xstate_size);
2409 else { 2409 else {
2410 if (xstate_bv & ~XSTATE_FPSSE) 2410 if (xstate_bv & ~XSTATE_FPSSE)
2411 return -EINVAL; 2411 return -EINVAL;
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 554c002a1e1a..0f456386cce5 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -72,13 +72,17 @@ void __init xen_unplug_emulated_devices(void)
72{ 72{
73 int r; 73 int r;
74 74
75 /* user explicitly requested no unplug */
76 if (xen_emul_unplug & XEN_UNPLUG_NEVER)
77 return;
75 /* check the version of the xen platform PCI device */ 78 /* check the version of the xen platform PCI device */
76 r = check_platform_magic(); 79 r = check_platform_magic();
77 /* If the version matches enable the Xen platform PCI driver. 80 /* If the version matches enable the Xen platform PCI driver.
78 * Also enable the Xen platform PCI driver if the version is really old 81 * Also enable the Xen platform PCI driver if the host does
79 * and the user told us to ignore it. */ 82 * not support the unplug protocol (XEN_PLATFORM_ERR_MAGIC)
83 * but the user told us that unplugging is unnecessary. */
80 if (r && !(r == XEN_PLATFORM_ERR_MAGIC && 84 if (r && !(r == XEN_PLATFORM_ERR_MAGIC &&
81 (xen_emul_unplug & XEN_UNPLUG_IGNORE))) 85 (xen_emul_unplug & XEN_UNPLUG_UNNECESSARY)))
82 return; 86 return;
83 /* Set the default value of xen_emul_unplug depending on whether or 87 /* Set the default value of xen_emul_unplug depending on whether or
84 * not the Xen PV frontends and the Xen platform PCI driver have 88 * not the Xen PV frontends and the Xen platform PCI driver have
@@ -99,7 +103,7 @@ void __init xen_unplug_emulated_devices(void)
99 } 103 }
100 } 104 }
101 /* Now unplug the emulated devices */ 105 /* Now unplug the emulated devices */
102 if (!(xen_emul_unplug & XEN_UNPLUG_IGNORE)) 106 if (!(xen_emul_unplug & XEN_UNPLUG_UNNECESSARY))
103 outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); 107 outw(xen_emul_unplug, XEN_IOPORT_UNPLUG);
104 xen_platform_pci_unplug = xen_emul_unplug; 108 xen_platform_pci_unplug = xen_emul_unplug;
105} 109}
@@ -125,8 +129,10 @@ static int __init parse_xen_emul_unplug(char *arg)
125 xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS; 129 xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS;
126 else if (!strncmp(p, "nics", l)) 130 else if (!strncmp(p, "nics", l))
127 xen_emul_unplug |= XEN_UNPLUG_ALL_NICS; 131 xen_emul_unplug |= XEN_UNPLUG_ALL_NICS;
128 else if (!strncmp(p, "ignore", l)) 132 else if (!strncmp(p, "unnecessary", l))
129 xen_emul_unplug |= XEN_UNPLUG_IGNORE; 133 xen_emul_unplug |= XEN_UNPLUG_UNNECESSARY;
134 else if (!strncmp(p, "never", l))
135 xen_emul_unplug |= XEN_UNPLUG_NEVER;
130 else 136 else
131 printk(KERN_WARNING "unrecognised option '%s' " 137 printk(KERN_WARNING "unrecognised option '%s' "
132 "in parameter 'xen_emul_unplug'\n", p); 138 "in parameter 'xen_emul_unplug'\n", p);