diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/msr.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/tsc.h | 8 | ||||
-rw-r--r-- | arch/x86/kernel/hpet.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 2 | ||||
-rw-r--r-- | arch/x86/oprofile/op_model_ppro.c | 9 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 5 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 13 |
8 files changed, 26 insertions, 19 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4cf0ab13d187..93224b569187 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -957,7 +957,7 @@ config ARCH_PHYS_ADDR_T_64BIT | |||
957 | config NUMA | 957 | config NUMA |
958 | bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)" | 958 | bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)" |
959 | depends on SMP | 959 | depends on SMP |
960 | depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL) | 960 | depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && BROKEN) |
961 | default n if X86_PC | 961 | default n if X86_PC |
962 | default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) | 962 | default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) |
963 | help | 963 | help |
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 46be2fa7ac26..c2a812ebde89 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -108,9 +108,7 @@ static __always_inline unsigned long long __native_read_tsc(void) | |||
108 | { | 108 | { |
109 | DECLARE_ARGS(val, low, high); | 109 | DECLARE_ARGS(val, low, high); |
110 | 110 | ||
111 | rdtsc_barrier(); | ||
112 | asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); | 111 | asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); |
113 | rdtsc_barrier(); | ||
114 | 112 | ||
115 | return EAX_EDX_VAL(val, low, high); | 113 | return EAX_EDX_VAL(val, low, high); |
116 | } | 114 | } |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 38ae163cc91b..9cd83a8e40d5 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
@@ -34,6 +34,8 @@ static inline cycles_t get_cycles(void) | |||
34 | 34 | ||
35 | static __always_inline cycles_t vget_cycles(void) | 35 | static __always_inline cycles_t vget_cycles(void) |
36 | { | 36 | { |
37 | cycles_t cycles; | ||
38 | |||
37 | /* | 39 | /* |
38 | * We only do VDSOs on TSC capable CPUs, so this shouldnt | 40 | * We only do VDSOs on TSC capable CPUs, so this shouldnt |
39 | * access boot_cpu_data (which is not VDSO-safe): | 41 | * access boot_cpu_data (which is not VDSO-safe): |
@@ -42,7 +44,11 @@ static __always_inline cycles_t vget_cycles(void) | |||
42 | if (!cpu_has_tsc) | 44 | if (!cpu_has_tsc) |
43 | return 0; | 45 | return 0; |
44 | #endif | 46 | #endif |
45 | return (cycles_t)__native_read_tsc(); | 47 | rdtsc_barrier(); |
48 | cycles = (cycles_t)__native_read_tsc(); | ||
49 | rdtsc_barrier(); | ||
50 | |||
51 | return cycles; | ||
46 | } | 52 | } |
47 | 53 | ||
48 | extern void tsc_init(void); | 54 | extern void tsc_init(void); |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 77017e834cf7..067d8de913f6 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -322,7 +322,7 @@ static int hpet_next_event(unsigned long delta, | |||
322 | * what we wrote hit the chip before we compare it to the | 322 | * what we wrote hit the chip before we compare it to the |
323 | * counter. | 323 | * counter. |
324 | */ | 324 | */ |
325 | WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt); | 325 | WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt); |
326 | 326 | ||
327 | return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | 327 | return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; |
328 | } | 328 | } |
@@ -445,7 +445,7 @@ static int hpet_setup_irq(struct hpet_dev *dev) | |||
445 | { | 445 | { |
446 | 446 | ||
447 | if (request_irq(dev->irq, hpet_interrupt_handler, | 447 | if (request_irq(dev->irq, hpet_interrupt_handler, |
448 | IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev)) | 448 | IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev)) |
449 | return -1; | 449 | return -1; |
450 | 450 | ||
451 | disable_irq(dev->irq); | 451 | disable_irq(dev->irq); |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 2ef80e301925..424093b157d3 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -55,7 +55,7 @@ u64 native_sched_clock(void) | |||
55 | rdtscll(this_offset); | 55 | rdtscll(this_offset); |
56 | 56 | ||
57 | /* return the value in ns */ | 57 | /* return the value in ns */ |
58 | return cycles_2_ns(this_offset); | 58 | return __cycles_2_ns(this_offset); |
59 | } | 59 | } |
60 | 60 | ||
61 | /* We need to define a real function for sched_clock, to override the | 61 | /* We need to define a real function for sched_clock, to override the |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 0620d6d45f7d..3f1b81a83e2e 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -27,8 +27,7 @@ static int num_counters = 2; | |||
27 | static int counter_width = 32; | 27 | static int counter_width = 32; |
28 | 28 | ||
29 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) | 29 | #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) |
30 | #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) | 30 | #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) |
31 | #define CTR_OVERFLOWED(n) (!((n) & (1U<<(counter_width-1)))) | ||
32 | 31 | ||
33 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) | 32 | #define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) |
34 | #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) | 33 | #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) |
@@ -124,14 +123,14 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |||
124 | static int ppro_check_ctrs(struct pt_regs * const regs, | 123 | static int ppro_check_ctrs(struct pt_regs * const regs, |
125 | struct op_msrs const * const msrs) | 124 | struct op_msrs const * const msrs) |
126 | { | 125 | { |
127 | unsigned int low, high; | 126 | u64 val; |
128 | int i; | 127 | int i; |
129 | 128 | ||
130 | for (i = 0 ; i < num_counters; ++i) { | 129 | for (i = 0 ; i < num_counters; ++i) { |
131 | if (!reset_value[i]) | 130 | if (!reset_value[i]) |
132 | continue; | 131 | continue; |
133 | CTR_READ(low, high, msrs, i); | 132 | rdmsrl(msrs->counters[i].addr, val); |
134 | if (CTR_OVERFLOWED(low)) { | 133 | if (CTR_OVERFLOWED(val)) { |
135 | oprofile_add_sample(regs, i); | 134 | oprofile_add_sample(regs, i); |
136 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); | 135 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
137 | } | 136 | } |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index b61534c7a4c4..5e4686d70f62 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -863,15 +863,16 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l | |||
863 | if (PagePinned(virt_to_page(mm->pgd))) { | 863 | if (PagePinned(virt_to_page(mm->pgd))) { |
864 | SetPagePinned(page); | 864 | SetPagePinned(page); |
865 | 865 | ||
866 | vm_unmap_aliases(); | ||
866 | if (!PageHighMem(page)) { | 867 | if (!PageHighMem(page)) { |
867 | make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); | 868 | make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); |
868 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) | 869 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) |
869 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | 870 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); |
870 | } else | 871 | } else { |
871 | /* make sure there are no stray mappings of | 872 | /* make sure there are no stray mappings of |
872 | this page */ | 873 | this page */ |
873 | kmap_flush_unused(); | 874 | kmap_flush_unused(); |
874 | vm_unmap_aliases(); | 875 | } |
875 | } | 876 | } |
876 | } | 877 | } |
877 | 878 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index aba77b2b7d18..688936044dc9 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -850,13 +850,16 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page, | |||
850 | read-only, and can be pinned. */ | 850 | read-only, and can be pinned. */ |
851 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | 851 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) |
852 | { | 852 | { |
853 | vm_unmap_aliases(); | ||
854 | |||
853 | xen_mc_batch(); | 855 | xen_mc_batch(); |
854 | 856 | ||
855 | if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { | 857 | if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { |
856 | /* re-enable interrupts for kmap_flush_unused */ | 858 | /* re-enable interrupts for flushing */ |
857 | xen_mc_issue(0); | 859 | xen_mc_issue(0); |
860 | |||
858 | kmap_flush_unused(); | 861 | kmap_flush_unused(); |
859 | vm_unmap_aliases(); | 862 | |
860 | xen_mc_batch(); | 863 | xen_mc_batch(); |
861 | } | 864 | } |
862 | 865 | ||
@@ -874,7 +877,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | |||
874 | #else /* CONFIG_X86_32 */ | 877 | #else /* CONFIG_X86_32 */ |
875 | #ifdef CONFIG_X86_PAE | 878 | #ifdef CONFIG_X86_PAE |
876 | /* Need to make sure unshared kernel PMD is pinnable */ | 879 | /* Need to make sure unshared kernel PMD is pinnable */ |
877 | xen_pin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), | 880 | xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
878 | PT_PMD); | 881 | PT_PMD); |
879 | #endif | 882 | #endif |
880 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); | 883 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); |
@@ -991,7 +994,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) | |||
991 | 994 | ||
992 | #ifdef CONFIG_X86_PAE | 995 | #ifdef CONFIG_X86_PAE |
993 | /* Need to make sure unshared kernel PMD is unpinned */ | 996 | /* Need to make sure unshared kernel PMD is unpinned */ |
994 | xen_unpin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), | 997 | xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
995 | PT_PMD); | 998 | PT_PMD); |
996 | #endif | 999 | #endif |
997 | 1000 | ||