diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 9 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_32.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/syscalls.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/trampoline.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/tsc.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 81 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/head_32.S | 8 | ||||
-rw-r--r-- | arch/x86/kernel/i387.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/kgdb.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes.c | 25 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 51 | ||||
-rw-r--r-- | arch/x86/kernel/sys_i386_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/trampoline.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 38 | ||||
-rw-r--r-- | arch/x86/kvm/i8254.c | 3 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 4 | ||||
-rw-r--r-- | arch/x86/power/cpu.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/platform-pci-unplug.c | 18 |
23 files changed, 230 insertions, 60 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a84fc34c8f7..cea0cd9a316 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -245,6 +245,11 @@ config ARCH_HWEIGHT_CFLAGS | |||
245 | 245 | ||
246 | config KTIME_SCALAR | 246 | config KTIME_SCALAR |
247 | def_bool X86_32 | 247 | def_bool X86_32 |
248 | |||
249 | config ARCH_CPU_PROBE_RELEASE | ||
250 | def_bool y | ||
251 | depends on HOTPLUG_CPU | ||
252 | |||
248 | source "init/Kconfig" | 253 | source "init/Kconfig" |
249 | source "kernel/Kconfig.freezer" | 254 | source "kernel/Kconfig.freezer" |
250 | 255 | ||
@@ -749,11 +754,11 @@ config IOMMU_API | |||
749 | def_bool (AMD_IOMMU || DMAR) | 754 | def_bool (AMD_IOMMU || DMAR) |
750 | 755 | ||
751 | config MAXSMP | 756 | config MAXSMP |
752 | bool "Configure Maximum number of SMP Processors and NUMA Nodes" | 757 | bool "Enable Maximum number of SMP Processors and NUMA Nodes" |
753 | depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL | 758 | depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL |
754 | select CPUMASK_OFFSTACK | 759 | select CPUMASK_OFFSTACK |
755 | ---help--- | 760 | ---help--- |
756 | Configure maximum number of CPUS and NUMA Nodes for this architecture. | 761 | Enable maximum number of CPUS and NUMA Nodes for this architecture. |
757 | If unsure, say N. | 762 | If unsure, say N. |
758 | 763 | ||
759 | config NR_CPUS | 764 | config NR_CPUS |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 2984a25ff38..f686f49e8b7 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -26,6 +26,7 @@ struct mm_struct; | |||
26 | struct vm_area_struct; | 26 | struct vm_area_struct; |
27 | 27 | ||
28 | extern pgd_t swapper_pg_dir[1024]; | 28 | extern pgd_t swapper_pg_dir[1024]; |
29 | extern pgd_t trampoline_pg_dir[1024]; | ||
29 | 30 | ||
30 | static inline void pgtable_cache_init(void) { } | 31 | static inline void pgtable_cache_init(void) { } |
31 | static inline void check_pgt_cache(void) { } | 32 | static inline void check_pgt_cache(void) { } |
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index feb2ff9bfc2..f1d8b441fc7 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h | |||
@@ -23,8 +23,9 @@ long sys_iopl(unsigned int, struct pt_regs *); | |||
23 | /* kernel/process.c */ | 23 | /* kernel/process.c */ |
24 | int sys_fork(struct pt_regs *); | 24 | int sys_fork(struct pt_regs *); |
25 | int sys_vfork(struct pt_regs *); | 25 | int sys_vfork(struct pt_regs *); |
26 | long sys_execve(const char __user *, char __user * __user *, | 26 | long sys_execve(const char __user *, |
27 | char __user * __user *, struct pt_regs *); | 27 | const char __user *const __user *, |
28 | const char __user *const __user *, struct pt_regs *); | ||
28 | long sys_clone(unsigned long, unsigned long, void __user *, | 29 | long sys_clone(unsigned long, unsigned long, void __user *, |
29 | void __user *, struct pt_regs *); | 30 | void __user *, struct pt_regs *); |
30 | 31 | ||
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h index cb507bb05d7..4dde797c057 100644 --- a/arch/x86/include/asm/trampoline.h +++ b/arch/x86/include/asm/trampoline.h | |||
@@ -13,14 +13,17 @@ extern unsigned char *trampoline_base; | |||
13 | 13 | ||
14 | extern unsigned long init_rsp; | 14 | extern unsigned long init_rsp; |
15 | extern unsigned long initial_code; | 15 | extern unsigned long initial_code; |
16 | extern unsigned long initial_page_table; | ||
16 | extern unsigned long initial_gs; | 17 | extern unsigned long initial_gs; |
17 | 18 | ||
18 | #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) | 19 | #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) |
19 | 20 | ||
20 | extern unsigned long setup_trampoline(void); | 21 | extern unsigned long setup_trampoline(void); |
22 | extern void __init setup_trampoline_page_table(void); | ||
21 | extern void __init reserve_trampoline_memory(void); | 23 | extern void __init reserve_trampoline_memory(void); |
22 | #else | 24 | #else |
23 | static inline void reserve_trampoline_memory(void) {}; | 25 | static inline void setup_trampoline_page_table(void) {} |
26 | static inline void reserve_trampoline_memory(void) {} | ||
24 | #endif /* CONFIG_X86_TRAMPOLINE */ | 27 | #endif /* CONFIG_X86_TRAMPOLINE */ |
25 | 28 | ||
26 | #endif /* __ASSEMBLY__ */ | 29 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index c0427295e8f..1ca132fc0d0 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
@@ -59,5 +59,7 @@ extern void check_tsc_sync_source(int cpu); | |||
59 | extern void check_tsc_sync_target(void); | 59 | extern void check_tsc_sync_target(void); |
60 | 60 | ||
61 | extern int notsc_setup(char *); | 61 | extern int notsc_setup(char *); |
62 | extern void save_sched_clock_state(void); | ||
63 | extern void restore_sched_clock_state(void); | ||
62 | 64 | ||
63 | #endif /* _ASM_X86_TSC_H */ | 65 | #endif /* _ASM_X86_TSC_H */ |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 4dc0084ec1b..f1efebaf551 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -1728,6 +1728,8 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1728 | struct irq_pin_list *entry; | 1728 | struct irq_pin_list *entry; |
1729 | 1729 | ||
1730 | cfg = desc->chip_data; | 1730 | cfg = desc->chip_data; |
1731 | if (!cfg) | ||
1732 | continue; | ||
1731 | entry = cfg->irq_2_pin; | 1733 | entry = cfg->irq_2_pin; |
1732 | if (!entry) | 1734 | if (!entry) |
1733 | continue; | 1735 | continue; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 60a57b13082..ba5f62f45f0 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -669,7 +669,7 @@ bool cpu_has_amd_erratum(const int *erratum) | |||
669 | } | 669 | } |
670 | 670 | ||
671 | /* OSVW unavailable or ID unknown, match family-model-stepping range */ | 671 | /* OSVW unavailable or ID unknown, match family-model-stepping range */ |
672 | ms = (cpu->x86_model << 8) | cpu->x86_mask; | 672 | ms = (cpu->x86_model << 4) | cpu->x86_mask; |
673 | while ((range = *erratum++)) | 673 | while ((range = *erratum++)) |
674 | if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && | 674 | if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && |
675 | (ms >= AMD_MODEL_RANGE_START(range)) && | 675 | (ms >= AMD_MODEL_RANGE_START(range)) && |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 214ac860ebe..d8d86d01400 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -491,33 +491,78 @@ static void intel_pmu_enable_all(int added) | |||
491 | * Intel Errata AAP53 (model 30) | 491 | * Intel Errata AAP53 (model 30) |
492 | * Intel Errata BD53 (model 44) | 492 | * Intel Errata BD53 (model 44) |
493 | * | 493 | * |
494 | * These chips need to be 'reset' when adding counters by programming | 494 | * The official story: |
495 | * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5 | 495 | * These chips need to be 'reset' when adding counters by programming the |
496 | * either in sequence on the same PMC or on different PMCs. | 496 | * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either |
497 | * in sequence on the same PMC or on different PMCs. | ||
498 | * | ||
499 | * In practise it appears some of these events do in fact count, and | ||
500 | * we need to programm all 4 events. | ||
497 | */ | 501 | */ |
498 | static void intel_pmu_nhm_enable_all(int added) | 502 | static void intel_pmu_nhm_workaround(void) |
499 | { | 503 | { |
500 | if (added) { | 504 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
501 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 505 | static const unsigned long nhm_magic[4] = { |
502 | int i; | 506 | 0x4300B5, |
507 | 0x4300D2, | ||
508 | 0x4300B1, | ||
509 | 0x4300B1 | ||
510 | }; | ||
511 | struct perf_event *event; | ||
512 | int i; | ||
513 | |||
514 | /* | ||
515 | * The Errata requires below steps: | ||
516 | * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL; | ||
517 | * 2) Configure 4 PERFEVTSELx with the magic events and clear | ||
518 | * the corresponding PMCx; | ||
519 | * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL; | ||
520 | * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL; | ||
521 | * 5) Clear 4 pairs of ERFEVTSELx and PMCx; | ||
522 | */ | ||
523 | |||
524 | /* | ||
525 | * The real steps we choose are a little different from above. | ||
526 | * A) To reduce MSR operations, we don't run step 1) as they | ||
527 | * are already cleared before this function is called; | ||
528 | * B) Call x86_perf_event_update to save PMCx before configuring | ||
529 | * PERFEVTSELx with magic number; | ||
530 | * C) With step 5), we do clear only when the PERFEVTSELx is | ||
531 | * not used currently. | ||
532 | * D) Call x86_perf_event_set_period to restore PMCx; | ||
533 | */ | ||
503 | 534 | ||
504 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2); | 535 | /* We always operate 4 pairs of PERF Counters */ |
505 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1); | 536 | for (i = 0; i < 4; i++) { |
506 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5); | 537 | event = cpuc->events[i]; |
538 | if (event) | ||
539 | x86_perf_event_update(event); | ||
540 | } | ||
507 | 541 | ||
508 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3); | 542 | for (i = 0; i < 4; i++) { |
509 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); | 543 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); |
544 | wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); | ||
545 | } | ||
510 | 546 | ||
511 | for (i = 0; i < 3; i++) { | 547 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); |
512 | struct perf_event *event = cpuc->events[i]; | 548 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); |
513 | 549 | ||
514 | if (!event) | 550 | for (i = 0; i < 4; i++) { |
515 | continue; | 551 | event = cpuc->events[i]; |
516 | 552 | ||
553 | if (event) { | ||
554 | x86_perf_event_set_period(event); | ||
517 | __x86_pmu_enable_event(&event->hw, | 555 | __x86_pmu_enable_event(&event->hw, |
518 | ARCH_PERFMON_EVENTSEL_ENABLE); | 556 | ARCH_PERFMON_EVENTSEL_ENABLE); |
519 | } | 557 | } else |
558 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); | ||
520 | } | 559 | } |
560 | } | ||
561 | |||
562 | static void intel_pmu_nhm_enable_all(int added) | ||
563 | { | ||
564 | if (added) | ||
565 | intel_pmu_nhm_workaround(); | ||
521 | intel_pmu_enable_all(added); | 566 | intel_pmu_enable_all(added); |
522 | } | 567 | } |
523 | 568 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index febb12cea79..7e578e9cc58 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -497,6 +497,8 @@ static int p4_hw_config(struct perf_event *event) | |||
497 | event->hw.config |= event->attr.config & | 497 | event->hw.config |= event->attr.config & |
498 | (p4_config_pack_escr(P4_ESCR_MASK_HT) | | 498 | (p4_config_pack_escr(P4_ESCR_MASK_HT) | |
499 | p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED)); | 499 | p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED)); |
500 | |||
501 | event->hw.config &= ~P4_CCCR_FORCE_OVF; | ||
500 | } | 502 | } |
501 | 503 | ||
502 | rc = x86_setup_perfctr(event); | 504 | rc = x86_setup_perfctr(event); |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index ff4c453e13f..fa8c1b8e09f 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -334,7 +334,7 @@ ENTRY(startup_32_smp) | |||
334 | /* | 334 | /* |
335 | * Enable paging | 335 | * Enable paging |
336 | */ | 336 | */ |
337 | movl $pa(swapper_pg_dir),%eax | 337 | movl pa(initial_page_table), %eax |
338 | movl %eax,%cr3 /* set the page table pointer.. */ | 338 | movl %eax,%cr3 /* set the page table pointer.. */ |
339 | movl %cr0,%eax | 339 | movl %cr0,%eax |
340 | orl $X86_CR0_PG,%eax | 340 | orl $X86_CR0_PG,%eax |
@@ -614,6 +614,8 @@ ignore_int: | |||
614 | .align 4 | 614 | .align 4 |
615 | ENTRY(initial_code) | 615 | ENTRY(initial_code) |
616 | .long i386_start_kernel | 616 | .long i386_start_kernel |
617 | ENTRY(initial_page_table) | ||
618 | .long pa(swapper_pg_dir) | ||
617 | 619 | ||
618 | /* | 620 | /* |
619 | * BSS section | 621 | * BSS section |
@@ -629,6 +631,10 @@ ENTRY(swapper_pg_dir) | |||
629 | #endif | 631 | #endif |
630 | swapper_pg_fixmap: | 632 | swapper_pg_fixmap: |
631 | .fill 1024,4,0 | 633 | .fill 1024,4,0 |
634 | #ifdef CONFIG_X86_TRAMPOLINE | ||
635 | ENTRY(trampoline_pg_dir) | ||
636 | .fill 1024,4,0 | ||
637 | #endif | ||
632 | ENTRY(empty_zero_page) | 638 | ENTRY(empty_zero_page) |
633 | .fill 4096,1,0 | 639 | .fill 4096,1,0 |
634 | 640 | ||
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 1f11f5ce668..a46cb3522c0 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -40,6 +40,7 @@ | |||
40 | 40 | ||
41 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; | 41 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
42 | unsigned int xstate_size; | 42 | unsigned int xstate_size; |
43 | EXPORT_SYMBOL_GPL(xstate_size); | ||
43 | unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32); | 44 | unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32); |
44 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; | 45 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; |
45 | 46 | ||
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index ef10940e1af..852b81967a3 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -194,7 +194,7 @@ static struct hw_breakpoint { | |||
194 | unsigned long addr; | 194 | unsigned long addr; |
195 | int len; | 195 | int len; |
196 | int type; | 196 | int type; |
197 | struct perf_event **pev; | 197 | struct perf_event * __percpu *pev; |
198 | } breakinfo[HBP_NUM]; | 198 | } breakinfo[HBP_NUM]; |
199 | 199 | ||
200 | static unsigned long early_dr7; | 200 | static unsigned long early_dr7; |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 1bfb6cf4dd5..770ebfb349e 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -709,6 +709,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
709 | struct hlist_node *node, *tmp; | 709 | struct hlist_node *node, *tmp; |
710 | unsigned long flags, orig_ret_address = 0; | 710 | unsigned long flags, orig_ret_address = 0; |
711 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; | 711 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
712 | kprobe_opcode_t *correct_ret_addr = NULL; | ||
712 | 713 | ||
713 | INIT_HLIST_HEAD(&empty_rp); | 714 | INIT_HLIST_HEAD(&empty_rp); |
714 | kretprobe_hash_lock(current, &head, &flags); | 715 | kretprobe_hash_lock(current, &head, &flags); |
@@ -740,14 +741,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
740 | /* another task is sharing our hash bucket */ | 741 | /* another task is sharing our hash bucket */ |
741 | continue; | 742 | continue; |
742 | 743 | ||
744 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
745 | |||
746 | if (orig_ret_address != trampoline_address) | ||
747 | /* | ||
748 | * This is the real return address. Any other | ||
749 | * instances associated with this task are for | ||
750 | * other calls deeper on the call stack | ||
751 | */ | ||
752 | break; | ||
753 | } | ||
754 | |||
755 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
756 | |||
757 | correct_ret_addr = ri->ret_addr; | ||
758 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
759 | if (ri->task != current) | ||
760 | /* another task is sharing our hash bucket */ | ||
761 | continue; | ||
762 | |||
763 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
743 | if (ri->rp && ri->rp->handler) { | 764 | if (ri->rp && ri->rp->handler) { |
744 | __get_cpu_var(current_kprobe) = &ri->rp->kp; | 765 | __get_cpu_var(current_kprobe) = &ri->rp->kp; |
745 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | 766 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; |
767 | ri->ret_addr = correct_ret_addr; | ||
746 | ri->rp->handler(ri, regs); | 768 | ri->rp->handler(ri, regs); |
747 | __get_cpu_var(current_kprobe) = NULL; | 769 | __get_cpu_var(current_kprobe) = NULL; |
748 | } | 770 | } |
749 | 771 | ||
750 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
751 | recycle_rp_inst(ri, &empty_rp); | 772 | recycle_rp_inst(ri, &empty_rp); |
752 | 773 | ||
753 | if (orig_ret_address != trampoline_address) | 774 | if (orig_ret_address != trampoline_address) |
@@ -759,8 +780,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
759 | break; | 780 | break; |
760 | } | 781 | } |
761 | 782 | ||
762 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
763 | |||
764 | kretprobe_hash_unlock(current, &flags); | 783 | kretprobe_hash_unlock(current, &flags); |
765 | 784 | ||
766 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { | 785 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 64ecaf0af9a..57d1868a86a 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -301,8 +301,9 @@ EXPORT_SYMBOL(kernel_thread); | |||
301 | /* | 301 | /* |
302 | * sys_execve() executes a new program. | 302 | * sys_execve() executes a new program. |
303 | */ | 303 | */ |
304 | long sys_execve(const char __user *name, char __user * __user *argv, | 304 | long sys_execve(const char __user *name, |
305 | char __user * __user *envp, struct pt_regs *regs) | 305 | const char __user *const __user *argv, |
306 | const char __user *const __user *envp, struct pt_regs *regs) | ||
306 | { | 307 | { |
307 | long error; | 308 | long error; |
308 | char *filename; | 309 | char *filename; |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b008e788320..c3a4fbb2b99 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -1014,6 +1014,8 @@ void __init setup_arch(char **cmdline_p) | |||
1014 | paging_init(); | 1014 | paging_init(); |
1015 | x86_init.paging.pagetable_setup_done(swapper_pg_dir); | 1015 | x86_init.paging.pagetable_setup_done(swapper_pg_dir); |
1016 | 1016 | ||
1017 | setup_trampoline_page_table(); | ||
1018 | |||
1017 | tboot_probe(); | 1019 | tboot_probe(); |
1018 | 1020 | ||
1019 | #ifdef CONFIG_X86_64 | 1021 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a5e928b0cb5..8b3bfc4dd70 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -73,7 +73,6 @@ | |||
73 | 73 | ||
74 | #ifdef CONFIG_X86_32 | 74 | #ifdef CONFIG_X86_32 |
75 | u8 apicid_2_node[MAX_APICID]; | 75 | u8 apicid_2_node[MAX_APICID]; |
76 | static int low_mappings; | ||
77 | #endif | 76 | #endif |
78 | 77 | ||
79 | /* State of each CPU */ | 78 | /* State of each CPU */ |
@@ -91,6 +90,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |||
91 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | 90 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); |
92 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | 91 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) |
93 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | 92 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) |
93 | |||
94 | /* | ||
95 | * We need this for trampoline_base protection from concurrent accesses when | ||
96 | * off- and onlining cores wildly. | ||
97 | */ | ||
98 | static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex); | ||
99 | |||
100 | void cpu_hotplug_driver_lock() | ||
101 | { | ||
102 | mutex_lock(&x86_cpu_hotplug_driver_mutex); | ||
103 | } | ||
104 | |||
105 | void cpu_hotplug_driver_unlock() | ||
106 | { | ||
107 | mutex_unlock(&x86_cpu_hotplug_driver_mutex); | ||
108 | } | ||
109 | |||
110 | ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; } | ||
111 | ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; } | ||
94 | #else | 112 | #else |
95 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | 113 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; |
96 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | 114 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) |
@@ -281,6 +299,18 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
281 | * fragile that we want to limit the things done here to the | 299 | * fragile that we want to limit the things done here to the |
282 | * most necessary things. | 300 | * most necessary things. |
283 | */ | 301 | */ |
302 | |||
303 | #ifdef CONFIG_X86_32 | ||
304 | /* | ||
305 | * Switch away from the trampoline page-table | ||
306 | * | ||
307 | * Do this before cpu_init() because it needs to access per-cpu | ||
308 | * data which may not be mapped in the trampoline page-table. | ||
309 | */ | ||
310 | load_cr3(swapper_pg_dir); | ||
311 | __flush_tlb_all(); | ||
312 | #endif | ||
313 | |||
284 | vmi_bringup(); | 314 | vmi_bringup(); |
285 | cpu_init(); | 315 | cpu_init(); |
286 | preempt_disable(); | 316 | preempt_disable(); |
@@ -299,12 +329,6 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
299 | legacy_pic->chip->unmask(0); | 329 | legacy_pic->chip->unmask(0); |
300 | } | 330 | } |
301 | 331 | ||
302 | #ifdef CONFIG_X86_32 | ||
303 | while (low_mappings) | ||
304 | cpu_relax(); | ||
305 | __flush_tlb_all(); | ||
306 | #endif | ||
307 | |||
308 | /* This must be done before setting cpu_online_mask */ | 332 | /* This must be done before setting cpu_online_mask */ |
309 | set_cpu_sibling_map(raw_smp_processor_id()); | 333 | set_cpu_sibling_map(raw_smp_processor_id()); |
310 | wmb(); | 334 | wmb(); |
@@ -750,6 +774,7 @@ do_rest: | |||
750 | #ifdef CONFIG_X86_32 | 774 | #ifdef CONFIG_X86_32 |
751 | /* Stack for startup_32 can be just as for start_secondary onwards */ | 775 | /* Stack for startup_32 can be just as for start_secondary onwards */ |
752 | irq_ctx_init(cpu); | 776 | irq_ctx_init(cpu); |
777 | initial_page_table = __pa(&trampoline_pg_dir); | ||
753 | #else | 778 | #else |
754 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); | 779 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); |
755 | initial_gs = per_cpu_offset(cpu); | 780 | initial_gs = per_cpu_offset(cpu); |
@@ -897,20 +922,8 @@ int __cpuinit native_cpu_up(unsigned int cpu) | |||
897 | 922 | ||
898 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | 923 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
899 | 924 | ||
900 | #ifdef CONFIG_X86_32 | ||
901 | /* init low mem mapping */ | ||
902 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
903 | min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); | ||
904 | flush_tlb_all(); | ||
905 | low_mappings = 1; | ||
906 | |||
907 | err = do_boot_cpu(apicid, cpu); | 925 | err = do_boot_cpu(apicid, cpu); |
908 | 926 | ||
909 | zap_low_mappings(false); | ||
910 | low_mappings = 0; | ||
911 | #else | ||
912 | err = do_boot_cpu(apicid, cpu); | ||
913 | #endif | ||
914 | if (err) { | 927 | if (err) { |
915 | pr_debug("do_boot_cpu failed %d\n", err); | 928 | pr_debug("do_boot_cpu failed %d\n", err); |
916 | return -EIO; | 929 | return -EIO; |
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c index 196552bb412..d5e06624e34 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c | |||
@@ -28,7 +28,9 @@ | |||
28 | * Do a system call from kernel instead of calling sys_execve so we | 28 | * Do a system call from kernel instead of calling sys_execve so we |
29 | * end up with proper pt_regs. | 29 | * end up with proper pt_regs. |
30 | */ | 30 | */ |
31 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 31 | int kernel_execve(const char *filename, |
32 | const char *const argv[], | ||
33 | const char *const envp[]) | ||
32 | { | 34 | { |
33 | long __res; | 35 | long __res; |
34 | asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" | 36 | asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" |
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index c652ef62742..a874495b367 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/io.h> | 1 | #include <linux/io.h> |
2 | 2 | ||
3 | #include <asm/trampoline.h> | 3 | #include <asm/trampoline.h> |
4 | #include <asm/pgtable.h> | ||
4 | #include <asm/e820.h> | 5 | #include <asm/e820.h> |
5 | 6 | ||
6 | #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) | 7 | #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) |
@@ -37,3 +38,20 @@ unsigned long __trampinit setup_trampoline(void) | |||
37 | memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); | 38 | memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); |
38 | return virt_to_phys(trampoline_base); | 39 | return virt_to_phys(trampoline_base); |
39 | } | 40 | } |
41 | |||
42 | void __init setup_trampoline_page_table(void) | ||
43 | { | ||
44 | #ifdef CONFIG_X86_32 | ||
45 | /* Copy kernel address range */ | ||
46 | clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, | ||
47 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
48 | min_t(unsigned long, KERNEL_PGD_PTRS, | ||
49 | KERNEL_PGD_BOUNDARY)); | ||
50 | |||
51 | /* Initialize low mappings */ | ||
52 | clone_pgd_range(trampoline_pg_dir, | ||
53 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
54 | min_t(unsigned long, KERNEL_PGD_PTRS, | ||
55 | KERNEL_PGD_BOUNDARY)); | ||
56 | #endif | ||
57 | } | ||
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ce8e5023933..d632934cb63 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | |||
626 | local_irq_restore(flags); | 626 | local_irq_restore(flags); |
627 | } | 627 | } |
628 | 628 | ||
629 | static unsigned long long cyc2ns_suspend; | ||
630 | |||
631 | void save_sched_clock_state(void) | ||
632 | { | ||
633 | if (!sched_clock_stable) | ||
634 | return; | ||
635 | |||
636 | cyc2ns_suspend = sched_clock(); | ||
637 | } | ||
638 | |||
639 | /* | ||
640 | * Even on processors with invariant TSC, TSC gets reset in some the | ||
641 | * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to | ||
642 | * arbitrary value (still sync'd across cpu's) during resume from such sleep | ||
643 | * states. To cope up with this, recompute the cyc2ns_offset for each cpu so | ||
644 | * that sched_clock() continues from the point where it was left off during | ||
645 | * suspend. | ||
646 | */ | ||
647 | void restore_sched_clock_state(void) | ||
648 | { | ||
649 | unsigned long long offset; | ||
650 | unsigned long flags; | ||
651 | int cpu; | ||
652 | |||
653 | if (!sched_clock_stable) | ||
654 | return; | ||
655 | |||
656 | local_irq_save(flags); | ||
657 | |||
658 | get_cpu_var(cyc2ns_offset) = 0; | ||
659 | offset = cyc2ns_suspend - sched_clock(); | ||
660 | |||
661 | for_each_possible_cpu(cpu) | ||
662 | per_cpu(cyc2ns_offset, cpu) = offset; | ||
663 | |||
664 | local_irq_restore(flags); | ||
665 | } | ||
666 | |||
629 | #ifdef CONFIG_CPU_FREQ | 667 | #ifdef CONFIG_CPU_FREQ |
630 | 668 | ||
631 | /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency | 669 | /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 0fd6378981f..ddeb2314b52 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -697,6 +697,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) | |||
697 | pit->wq = create_singlethread_workqueue("kvm-pit-wq"); | 697 | pit->wq = create_singlethread_workqueue("kvm-pit-wq"); |
698 | if (!pit->wq) { | 698 | if (!pit->wq) { |
699 | mutex_unlock(&pit->pit_state.lock); | 699 | mutex_unlock(&pit->pit_state.lock); |
700 | kvm_free_irq_source_id(kvm, pit->irq_source_id); | ||
700 | kfree(pit); | 701 | kfree(pit); |
701 | return NULL; | 702 | return NULL; |
702 | } | 703 | } |
@@ -742,7 +743,7 @@ fail: | |||
742 | kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); | 743 | kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); |
743 | kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); | 744 | kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); |
744 | kvm_free_irq_source_id(kvm, pit->irq_source_id); | 745 | kvm_free_irq_source_id(kvm, pit->irq_source_id); |
745 | 746 | destroy_workqueue(pit->wq); | |
746 | kfree(pit); | 747 | kfree(pit); |
747 | return NULL; | 748 | return NULL; |
748 | } | 749 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 25f19078b32..3a09c625d52 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2387,7 +2387,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, | |||
2387 | if (cpu_has_xsave) | 2387 | if (cpu_has_xsave) |
2388 | memcpy(guest_xsave->region, | 2388 | memcpy(guest_xsave->region, |
2389 | &vcpu->arch.guest_fpu.state->xsave, | 2389 | &vcpu->arch.guest_fpu.state->xsave, |
2390 | sizeof(struct xsave_struct)); | 2390 | xstate_size); |
2391 | else { | 2391 | else { |
2392 | memcpy(guest_xsave->region, | 2392 | memcpy(guest_xsave->region, |
2393 | &vcpu->arch.guest_fpu.state->fxsave, | 2393 | &vcpu->arch.guest_fpu.state->fxsave, |
@@ -2405,7 +2405,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, | |||
2405 | 2405 | ||
2406 | if (cpu_has_xsave) | 2406 | if (cpu_has_xsave) |
2407 | memcpy(&vcpu->arch.guest_fpu.state->xsave, | 2407 | memcpy(&vcpu->arch.guest_fpu.state->xsave, |
2408 | guest_xsave->region, sizeof(struct xsave_struct)); | 2408 | guest_xsave->region, xstate_size); |
2409 | else { | 2409 | else { |
2410 | if (xstate_bv & ~XSTATE_FPSSE) | 2410 | if (xstate_bv & ~XSTATE_FPSSE) |
2411 | return -EINVAL; | 2411 | return -EINVAL; |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index e7e8c5f5495..87bb35e34ef 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
@@ -113,6 +113,7 @@ static void __save_processor_state(struct saved_context *ctxt) | |||
113 | void save_processor_state(void) | 113 | void save_processor_state(void) |
114 | { | 114 | { |
115 | __save_processor_state(&saved_context); | 115 | __save_processor_state(&saved_context); |
116 | save_sched_clock_state(); | ||
116 | } | 117 | } |
117 | #ifdef CONFIG_X86_32 | 118 | #ifdef CONFIG_X86_32 |
118 | EXPORT_SYMBOL(save_processor_state); | 119 | EXPORT_SYMBOL(save_processor_state); |
@@ -229,6 +230,7 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
229 | void restore_processor_state(void) | 230 | void restore_processor_state(void) |
230 | { | 231 | { |
231 | __restore_processor_state(&saved_context); | 232 | __restore_processor_state(&saved_context); |
233 | restore_sched_clock_state(); | ||
232 | } | 234 | } |
233 | #ifdef CONFIG_X86_32 | 235 | #ifdef CONFIG_X86_32 |
234 | EXPORT_SYMBOL(restore_processor_state); | 236 | EXPORT_SYMBOL(restore_processor_state); |
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c index 554c002a1e1..0f456386cce 100644 --- a/arch/x86/xen/platform-pci-unplug.c +++ b/arch/x86/xen/platform-pci-unplug.c | |||
@@ -72,13 +72,17 @@ void __init xen_unplug_emulated_devices(void) | |||
72 | { | 72 | { |
73 | int r; | 73 | int r; |
74 | 74 | ||
75 | /* user explicitly requested no unplug */ | ||
76 | if (xen_emul_unplug & XEN_UNPLUG_NEVER) | ||
77 | return; | ||
75 | /* check the version of the xen platform PCI device */ | 78 | /* check the version of the xen platform PCI device */ |
76 | r = check_platform_magic(); | 79 | r = check_platform_magic(); |
77 | /* If the version matches enable the Xen platform PCI driver. | 80 | /* If the version matches enable the Xen platform PCI driver. |
78 | * Also enable the Xen platform PCI driver if the version is really old | 81 | * Also enable the Xen platform PCI driver if the host does |
79 | * and the user told us to ignore it. */ | 82 | * not support the unplug protocol (XEN_PLATFORM_ERR_MAGIC) |
83 | * but the user told us that unplugging is unnecessary. */ | ||
80 | if (r && !(r == XEN_PLATFORM_ERR_MAGIC && | 84 | if (r && !(r == XEN_PLATFORM_ERR_MAGIC && |
81 | (xen_emul_unplug & XEN_UNPLUG_IGNORE))) | 85 | (xen_emul_unplug & XEN_UNPLUG_UNNECESSARY))) |
82 | return; | 86 | return; |
83 | /* Set the default value of xen_emul_unplug depending on whether or | 87 | /* Set the default value of xen_emul_unplug depending on whether or |
84 | * not the Xen PV frontends and the Xen platform PCI driver have | 88 | * not the Xen PV frontends and the Xen platform PCI driver have |
@@ -99,7 +103,7 @@ void __init xen_unplug_emulated_devices(void) | |||
99 | } | 103 | } |
100 | } | 104 | } |
101 | /* Now unplug the emulated devices */ | 105 | /* Now unplug the emulated devices */ |
102 | if (!(xen_emul_unplug & XEN_UNPLUG_IGNORE)) | 106 | if (!(xen_emul_unplug & XEN_UNPLUG_UNNECESSARY)) |
103 | outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); | 107 | outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); |
104 | xen_platform_pci_unplug = xen_emul_unplug; | 108 | xen_platform_pci_unplug = xen_emul_unplug; |
105 | } | 109 | } |
@@ -125,8 +129,10 @@ static int __init parse_xen_emul_unplug(char *arg) | |||
125 | xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS; | 129 | xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS; |
126 | else if (!strncmp(p, "nics", l)) | 130 | else if (!strncmp(p, "nics", l)) |
127 | xen_emul_unplug |= XEN_UNPLUG_ALL_NICS; | 131 | xen_emul_unplug |= XEN_UNPLUG_ALL_NICS; |
128 | else if (!strncmp(p, "ignore", l)) | 132 | else if (!strncmp(p, "unnecessary", l)) |
129 | xen_emul_unplug |= XEN_UNPLUG_IGNORE; | 133 | xen_emul_unplug |= XEN_UNPLUG_UNNECESSARY; |
134 | else if (!strncmp(p, "never", l)) | ||
135 | xen_emul_unplug |= XEN_UNPLUG_NEVER; | ||
130 | else | 136 | else |
131 | printk(KERN_WARNING "unrecognised option '%s' " | 137 | printk(KERN_WARNING "unrecognised option '%s' " |
132 | "in parameter 'xen_emul_unplug'\n", p); | 138 | "in parameter 'xen_emul_unplug'\n", p); |