aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c81
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c2
-rw-r--r--arch/x86/kernel/head_32.S8
-rw-r--r--arch/x86/kernel/i387.c1
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/kprobes.c25
-rw-r--r--arch/x86/kernel/process.c5
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smpboot.c51
-rw-r--r--arch/x86/kernel/sys_i386_32.c4
-rw-r--r--arch/x86/kernel/trampoline.c18
-rw-r--r--arch/x86/kernel/tsc.c38
14 files changed, 195 insertions, 46 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 4dc0084ec1b1..f1efebaf5510 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1728,6 +1728,8 @@ __apicdebuginit(void) print_IO_APIC(void)
1728 struct irq_pin_list *entry; 1728 struct irq_pin_list *entry;
1729 1729
1730 cfg = desc->chip_data; 1730 cfg = desc->chip_data;
1731 if (!cfg)
1732 continue;
1731 entry = cfg->irq_2_pin; 1733 entry = cfg->irq_2_pin;
1732 if (!entry) 1734 if (!entry)
1733 continue; 1735 continue;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 60a57b13082d..ba5f62f45f01 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -669,7 +669,7 @@ bool cpu_has_amd_erratum(const int *erratum)
669 } 669 }
670 670
671 /* OSVW unavailable or ID unknown, match family-model-stepping range */ 671 /* OSVW unavailable or ID unknown, match family-model-stepping range */
672 ms = (cpu->x86_model << 8) | cpu->x86_mask; 672 ms = (cpu->x86_model << 4) | cpu->x86_mask;
673 while ((range = *erratum++)) 673 while ((range = *erratum++))
674 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && 674 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
675 (ms >= AMD_MODEL_RANGE_START(range)) && 675 (ms >= AMD_MODEL_RANGE_START(range)) &&
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 214ac860ebe0..d8d86d014008 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -491,33 +491,78 @@ static void intel_pmu_enable_all(int added)
491 * Intel Errata AAP53 (model 30) 491 * Intel Errata AAP53 (model 30)
492 * Intel Errata BD53 (model 44) 492 * Intel Errata BD53 (model 44)
493 * 493 *
494 * These chips need to be 'reset' when adding counters by programming 494 * The official story:
495 * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5 495 * These chips need to be 'reset' when adding counters by programming the
496 * either in sequence on the same PMC or on different PMCs. 496 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
497 * in sequence on the same PMC or on different PMCs.
498 *
499 * In practise it appears some of these events do in fact count, and
500 * we need to programm all 4 events.
497 */ 501 */
498static void intel_pmu_nhm_enable_all(int added) 502static void intel_pmu_nhm_workaround(void)
499{ 503{
500 if (added) { 504 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
501 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 505 static const unsigned long nhm_magic[4] = {
502 int i; 506 0x4300B5,
507 0x4300D2,
508 0x4300B1,
509 0x4300B1
510 };
511 struct perf_event *event;
512 int i;
513
514 /*
515 * The Errata requires below steps:
516 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
517 * 2) Configure 4 PERFEVTSELx with the magic events and clear
518 * the corresponding PMCx;
519 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
520 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
521 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
522 */
523
524 /*
525 * The real steps we choose are a little different from above.
526 * A) To reduce MSR operations, we don't run step 1) as they
527 * are already cleared before this function is called;
528 * B) Call x86_perf_event_update to save PMCx before configuring
529 * PERFEVTSELx with magic number;
530 * C) With step 5), we do clear only when the PERFEVTSELx is
531 * not used currently.
532 * D) Call x86_perf_event_set_period to restore PMCx;
533 */
503 534
504 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2); 535 /* We always operate 4 pairs of PERF Counters */
505 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1); 536 for (i = 0; i < 4; i++) {
506 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5); 537 event = cpuc->events[i];
538 if (event)
539 x86_perf_event_update(event);
540 }
507 541
508 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3); 542 for (i = 0; i < 4; i++) {
509 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); 543 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
544 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
545 }
510 546
511 for (i = 0; i < 3; i++) { 547 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
512 struct perf_event *event = cpuc->events[i]; 548 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
513 549
514 if (!event) 550 for (i = 0; i < 4; i++) {
515 continue; 551 event = cpuc->events[i];
516 552
553 if (event) {
554 x86_perf_event_set_period(event);
517 __x86_pmu_enable_event(&event->hw, 555 __x86_pmu_enable_event(&event->hw,
518 ARCH_PERFMON_EVENTSEL_ENABLE); 556 ARCH_PERFMON_EVENTSEL_ENABLE);
519 } 557 } else
558 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
520 } 559 }
560}
561
562static void intel_pmu_nhm_enable_all(int added)
563{
564 if (added)
565 intel_pmu_nhm_workaround();
521 intel_pmu_enable_all(added); 566 intel_pmu_enable_all(added);
522} 567}
523 568
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index febb12cea795..7e578e9cc58b 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -497,6 +497,8 @@ static int p4_hw_config(struct perf_event *event)
497 event->hw.config |= event->attr.config & 497 event->hw.config |= event->attr.config &
498 (p4_config_pack_escr(P4_ESCR_MASK_HT) | 498 (p4_config_pack_escr(P4_ESCR_MASK_HT) |
499 p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED)); 499 p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED));
500
501 event->hw.config &= ~P4_CCCR_FORCE_OVF;
500 } 502 }
501 503
502 rc = x86_setup_perfctr(event); 504 rc = x86_setup_perfctr(event);
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index ff4c453e13f3..fa8c1b8e09fb 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -334,7 +334,7 @@ ENTRY(startup_32_smp)
334/* 334/*
335 * Enable paging 335 * Enable paging
336 */ 336 */
337 movl $pa(swapper_pg_dir),%eax 337 movl pa(initial_page_table), %eax
338 movl %eax,%cr3 /* set the page table pointer.. */ 338 movl %eax,%cr3 /* set the page table pointer.. */
339 movl %cr0,%eax 339 movl %cr0,%eax
340 orl $X86_CR0_PG,%eax 340 orl $X86_CR0_PG,%eax
@@ -614,6 +614,8 @@ ignore_int:
614.align 4 614.align 4
615ENTRY(initial_code) 615ENTRY(initial_code)
616 .long i386_start_kernel 616 .long i386_start_kernel
617ENTRY(initial_page_table)
618 .long pa(swapper_pg_dir)
617 619
618/* 620/*
619 * BSS section 621 * BSS section
@@ -629,6 +631,10 @@ ENTRY(swapper_pg_dir)
629#endif 631#endif
630swapper_pg_fixmap: 632swapper_pg_fixmap:
631 .fill 1024,4,0 633 .fill 1024,4,0
634#ifdef CONFIG_X86_TRAMPOLINE
635ENTRY(trampoline_pg_dir)
636 .fill 1024,4,0
637#endif
632ENTRY(empty_zero_page) 638ENTRY(empty_zero_page)
633 .fill 4096,1,0 639 .fill 4096,1,0
634 640
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 1f11f5ce668f..a46cb3522c0c 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -40,6 +40,7 @@
40 40
41static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 41static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
42unsigned int xstate_size; 42unsigned int xstate_size;
43EXPORT_SYMBOL_GPL(xstate_size);
43unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32); 44unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
44static struct i387_fxsave_struct fx_scratch __cpuinitdata; 45static struct i387_fxsave_struct fx_scratch __cpuinitdata;
45 46
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index ef10940e1af0..852b81967a37 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -194,7 +194,7 @@ static struct hw_breakpoint {
194 unsigned long addr; 194 unsigned long addr;
195 int len; 195 int len;
196 int type; 196 int type;
197 struct perf_event **pev; 197 struct perf_event * __percpu *pev;
198} breakinfo[HBP_NUM]; 198} breakinfo[HBP_NUM];
199 199
200static unsigned long early_dr7; 200static unsigned long early_dr7;
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 1bfb6cf4dd55..770ebfb349e9 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -709,6 +709,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
709 struct hlist_node *node, *tmp; 709 struct hlist_node *node, *tmp;
710 unsigned long flags, orig_ret_address = 0; 710 unsigned long flags, orig_ret_address = 0;
711 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 711 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
712 kprobe_opcode_t *correct_ret_addr = NULL;
712 713
713 INIT_HLIST_HEAD(&empty_rp); 714 INIT_HLIST_HEAD(&empty_rp);
714 kretprobe_hash_lock(current, &head, &flags); 715 kretprobe_hash_lock(current, &head, &flags);
@@ -740,14 +741,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
740 /* another task is sharing our hash bucket */ 741 /* another task is sharing our hash bucket */
741 continue; 742 continue;
742 743
744 orig_ret_address = (unsigned long)ri->ret_addr;
745
746 if (orig_ret_address != trampoline_address)
747 /*
748 * This is the real return address. Any other
749 * instances associated with this task are for
750 * other calls deeper on the call stack
751 */
752 break;
753 }
754
755 kretprobe_assert(ri, orig_ret_address, trampoline_address);
756
757 correct_ret_addr = ri->ret_addr;
758 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
759 if (ri->task != current)
760 /* another task is sharing our hash bucket */
761 continue;
762
763 orig_ret_address = (unsigned long)ri->ret_addr;
743 if (ri->rp && ri->rp->handler) { 764 if (ri->rp && ri->rp->handler) {
744 __get_cpu_var(current_kprobe) = &ri->rp->kp; 765 __get_cpu_var(current_kprobe) = &ri->rp->kp;
745 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 766 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
767 ri->ret_addr = correct_ret_addr;
746 ri->rp->handler(ri, regs); 768 ri->rp->handler(ri, regs);
747 __get_cpu_var(current_kprobe) = NULL; 769 __get_cpu_var(current_kprobe) = NULL;
748 } 770 }
749 771
750 orig_ret_address = (unsigned long)ri->ret_addr;
751 recycle_rp_inst(ri, &empty_rp); 772 recycle_rp_inst(ri, &empty_rp);
752 773
753 if (orig_ret_address != trampoline_address) 774 if (orig_ret_address != trampoline_address)
@@ -759,8 +780,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
759 break; 780 break;
760 } 781 }
761 782
762 kretprobe_assert(ri, orig_ret_address, trampoline_address);
763
764 kretprobe_hash_unlock(current, &flags); 783 kretprobe_hash_unlock(current, &flags);
765 784
766 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 785 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 64ecaf0af9af..57d1868a86aa 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -301,8 +301,9 @@ EXPORT_SYMBOL(kernel_thread);
301/* 301/*
302 * sys_execve() executes a new program. 302 * sys_execve() executes a new program.
303 */ 303 */
304long sys_execve(const char __user *name, char __user * __user *argv, 304long sys_execve(const char __user *name,
305 char __user * __user *envp, struct pt_regs *regs) 305 const char __user *const __user *argv,
306 const char __user *const __user *envp, struct pt_regs *regs)
306{ 307{
307 long error; 308 long error;
308 char *filename; 309 char *filename;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b008e7883207..c3a4fbb2b996 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1014,6 +1014,8 @@ void __init setup_arch(char **cmdline_p)
1014 paging_init(); 1014 paging_init();
1015 x86_init.paging.pagetable_setup_done(swapper_pg_dir); 1015 x86_init.paging.pagetable_setup_done(swapper_pg_dir);
1016 1016
1017 setup_trampoline_page_table();
1018
1017 tboot_probe(); 1019 tboot_probe();
1018 1020
1019#ifdef CONFIG_X86_64 1021#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a5e928b0cb5f..8b3bfc4dd708 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -73,7 +73,6 @@
73 73
74#ifdef CONFIG_X86_32 74#ifdef CONFIG_X86_32
75u8 apicid_2_node[MAX_APICID]; 75u8 apicid_2_node[MAX_APICID];
76static int low_mappings;
77#endif 76#endif
78 77
79/* State of each CPU */ 78/* State of each CPU */
@@ -91,6 +90,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
91static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); 90static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
92#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 91#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
93#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 92#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
93
94/*
95 * We need this for trampoline_base protection from concurrent accesses when
96 * off- and onlining cores wildly.
97 */
98static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
99
100void cpu_hotplug_driver_lock()
101{
102 mutex_lock(&x86_cpu_hotplug_driver_mutex);
103}
104
105void cpu_hotplug_driver_unlock()
106{
107 mutex_unlock(&x86_cpu_hotplug_driver_mutex);
108}
109
110ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
111ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
94#else 112#else
95static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 113static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
96#define get_idle_for_cpu(x) (idle_thread_array[(x)]) 114#define get_idle_for_cpu(x) (idle_thread_array[(x)])
@@ -281,6 +299,18 @@ notrace static void __cpuinit start_secondary(void *unused)
281 * fragile that we want to limit the things done here to the 299 * fragile that we want to limit the things done here to the
282 * most necessary things. 300 * most necessary things.
283 */ 301 */
302
303#ifdef CONFIG_X86_32
304 /*
305 * Switch away from the trampoline page-table
306 *
307 * Do this before cpu_init() because it needs to access per-cpu
308 * data which may not be mapped in the trampoline page-table.
309 */
310 load_cr3(swapper_pg_dir);
311 __flush_tlb_all();
312#endif
313
284 vmi_bringup(); 314 vmi_bringup();
285 cpu_init(); 315 cpu_init();
286 preempt_disable(); 316 preempt_disable();
@@ -299,12 +329,6 @@ notrace static void __cpuinit start_secondary(void *unused)
299 legacy_pic->chip->unmask(0); 329 legacy_pic->chip->unmask(0);
300 } 330 }
301 331
302#ifdef CONFIG_X86_32
303 while (low_mappings)
304 cpu_relax();
305 __flush_tlb_all();
306#endif
307
308 /* This must be done before setting cpu_online_mask */ 332 /* This must be done before setting cpu_online_mask */
309 set_cpu_sibling_map(raw_smp_processor_id()); 333 set_cpu_sibling_map(raw_smp_processor_id());
310 wmb(); 334 wmb();
@@ -750,6 +774,7 @@ do_rest:
750#ifdef CONFIG_X86_32 774#ifdef CONFIG_X86_32
751 /* Stack for startup_32 can be just as for start_secondary onwards */ 775 /* Stack for startup_32 can be just as for start_secondary onwards */
752 irq_ctx_init(cpu); 776 irq_ctx_init(cpu);
777 initial_page_table = __pa(&trampoline_pg_dir);
753#else 778#else
754 clear_tsk_thread_flag(c_idle.idle, TIF_FORK); 779 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
755 initial_gs = per_cpu_offset(cpu); 780 initial_gs = per_cpu_offset(cpu);
@@ -897,20 +922,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
897 922
898 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 923 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
899 924
900#ifdef CONFIG_X86_32
901 /* init low mem mapping */
902 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
903 min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
904 flush_tlb_all();
905 low_mappings = 1;
906
907 err = do_boot_cpu(apicid, cpu); 925 err = do_boot_cpu(apicid, cpu);
908 926
909 zap_low_mappings(false);
910 low_mappings = 0;
911#else
912 err = do_boot_cpu(apicid, cpu);
913#endif
914 if (err) { 927 if (err) {
915 pr_debug("do_boot_cpu failed %d\n", err); 928 pr_debug("do_boot_cpu failed %d\n", err);
916 return -EIO; 929 return -EIO;
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
index 196552bb412c..d5e06624e34a 100644
--- a/arch/x86/kernel/sys_i386_32.c
+++ b/arch/x86/kernel/sys_i386_32.c
@@ -28,7 +28,9 @@
28 * Do a system call from kernel instead of calling sys_execve so we 28 * Do a system call from kernel instead of calling sys_execve so we
29 * end up with proper pt_regs. 29 * end up with proper pt_regs.
30 */ 30 */
31int kernel_execve(const char *filename, char *const argv[], char *const envp[]) 31int kernel_execve(const char *filename,
32 const char *const argv[],
33 const char *const envp[])
32{ 34{
33 long __res; 35 long __res;
34 asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" 36 asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index c652ef62742d..a874495b3673 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -1,6 +1,7 @@
1#include <linux/io.h> 1#include <linux/io.h>
2 2
3#include <asm/trampoline.h> 3#include <asm/trampoline.h>
4#include <asm/pgtable.h>
4#include <asm/e820.h> 5#include <asm/e820.h>
5 6
6#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) 7#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
@@ -37,3 +38,20 @@ unsigned long __trampinit setup_trampoline(void)
37 memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); 38 memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
38 return virt_to_phys(trampoline_base); 39 return virt_to_phys(trampoline_base);
39} 40}
41
42void __init setup_trampoline_page_table(void)
43{
44#ifdef CONFIG_X86_32
45 /* Copy kernel address range */
46 clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
47 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
48 min_t(unsigned long, KERNEL_PGD_PTRS,
49 KERNEL_PGD_BOUNDARY));
50
51 /* Initialize low mappings */
52 clone_pgd_range(trampoline_pg_dir,
53 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
54 min_t(unsigned long, KERNEL_PGD_PTRS,
55 KERNEL_PGD_BOUNDARY));
56#endif
57}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index ce8e50239332..d632934cb638 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
626 local_irq_restore(flags); 626 local_irq_restore(flags);
627} 627}
628 628
629static unsigned long long cyc2ns_suspend;
630
631void save_sched_clock_state(void)
632{
633 if (!sched_clock_stable)
634 return;
635
636 cyc2ns_suspend = sched_clock();
637}
638
639/*
640 * Even on processors with invariant TSC, TSC gets reset in some the
641 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
642 * arbitrary value (still sync'd across cpu's) during resume from such sleep
643 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
644 * that sched_clock() continues from the point where it was left off during
645 * suspend.
646 */
647void restore_sched_clock_state(void)
648{
649 unsigned long long offset;
650 unsigned long flags;
651 int cpu;
652
653 if (!sched_clock_stable)
654 return;
655
656 local_irq_save(flags);
657
658 get_cpu_var(cyc2ns_offset) = 0;
659 offset = cyc2ns_suspend - sched_clock();
660
661 for_each_possible_cpu(cpu)
662 per_cpu(cyc2ns_offset, cpu) = offset;
663
664 local_irq_restore(flags);
665}
666
629#ifdef CONFIG_CPU_FREQ 667#ifdef CONFIG_CPU_FREQ
630 668
631/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency 669/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency