aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 14:57:22 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 14:57:22 -0500
commit10a0c0f0595b20efa127a1816670c64a3d0e4965 (patch)
tree884e10109861e4e8c05aced820c36b4f06563be9 /arch/x86/include
parentdcd1bfd50ab6952e8c60fd99d065d5be38b4b8b4 (diff)
parent7030a7e9321166eef44c811fe4af4d460360d424 (diff)
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "Misc changes: - fix lguest bug - fix /proc/meminfo output on certain configs - fix pvclock bug - fix reboot on certain iMacs by adding new reboot quirk - fix bootup crash - fix FPU boot line option parsing - add more x86 self-tests - small cleanups, documentation improvements, etc" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/cpu/amd: Remove an unneeded condition in srat_detect_node() x86/vdso/pvclock: Protect STABLE check with the seqcount x86/mm: Improve switch_mm() barrier comments selftests/x86: Test __kernel_sigreturn and __kernel_rt_sigreturn x86/reboot/quirks: Add iMac10,1 to pci_reboot_dmi_table[] lguest: Map switcher text R/O x86/boot: Hide local labels in verify_cpu() x86/fpu: Disable AVX when eagerfpu is off x86/fpu: Disable MPX when eagerfpu is off x86/fpu: Disable XGETBV1 when no XSAVE x86/fpu: Fix early FPU command-line parsing x86/mm: Use PAGE_ALIGNED instead of IS_ALIGNED selftests/x86: Disable the ldt_gdt_64 test for now x86/mm/pat: Make split_page_count() check for empty levels to fix /proc/meminfo output x86/boot: Double BOOT_HEAP_SIZE to 64KB x86/mm: Add barriers and document switch_mm()-vs-flush synchronization
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/boot.h2
-rw-r--r--arch/x86/include/asm/fpu/internal.h1
-rw-r--r--arch/x86/include/asm/fpu/xstate.h11
-rw-r--r--arch/x86/include/asm/lguest.h4
-rw-r--r--arch/x86/include/asm/mmu_context.h34
5 files changed, 44 insertions, 8 deletions
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 4fa687a47a62..6b8d6e8cd449 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -27,7 +27,7 @@
27#define BOOT_HEAP_SIZE 0x400000 27#define BOOT_HEAP_SIZE 0x400000
28#else /* !CONFIG_KERNEL_BZIP2 */ 28#else /* !CONFIG_KERNEL_BZIP2 */
29 29
30#define BOOT_HEAP_SIZE 0x8000 30#define BOOT_HEAP_SIZE 0x10000
31 31
32#endif /* !CONFIG_KERNEL_BZIP2 */ 32#endif /* !CONFIG_KERNEL_BZIP2 */
33 33
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index eadcdd5bb946..0fd440df63f1 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -42,6 +42,7 @@ extern void fpu__init_cpu_xstate(void);
42extern void fpu__init_system(struct cpuinfo_x86 *c); 42extern void fpu__init_system(struct cpuinfo_x86 *c);
43extern void fpu__init_check_bugs(void); 43extern void fpu__init_check_bugs(void);
44extern void fpu__resume_cpu(void); 44extern void fpu__resume_cpu(void);
45extern u64 fpu__get_supported_xfeatures_mask(void);
45 46
46/* 47/*
47 * Debugging facility: 48 * Debugging facility:
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 3a6c89b70307..af30fdeb140d 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -20,15 +20,16 @@
20 20
21/* Supported features which support lazy state saving */ 21/* Supported features which support lazy state saving */
22#define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \ 22#define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \
23 XFEATURE_MASK_SSE | \ 23 XFEATURE_MASK_SSE)
24
25/* Supported features which require eager state saving */
26#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | \
27 XFEATURE_MASK_BNDCSR | \
24 XFEATURE_MASK_YMM | \ 28 XFEATURE_MASK_YMM | \
25 XFEATURE_MASK_OPMASK | \ 29 XFEATURE_MASK_OPMASK | \
26 XFEATURE_MASK_ZMM_Hi256 | \ 30 XFEATURE_MASK_ZMM_Hi256 | \
27 XFEATURE_MASK_Hi16_ZMM) 31 XFEATURE_MASK_Hi16_ZMM)
28 32
29/* Supported features which require eager state saving */
30#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
31
32/* All currently supported features */ 33/* All currently supported features */
33#define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER) 34#define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
34 35
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
index 3bbc07a57a31..73d0c9b92087 100644
--- a/arch/x86/include/asm/lguest.h
+++ b/arch/x86/include/asm/lguest.h
@@ -12,7 +12,9 @@
12#define GUEST_PL 1 12#define GUEST_PL 1
13 13
14/* Page for Switcher text itself, then two pages per cpu */ 14/* Page for Switcher text itself, then two pages per cpu */
15#define TOTAL_SWITCHER_PAGES (1 + 2 * nr_cpu_ids) 15#define SWITCHER_TEXT_PAGES (1)
16#define SWITCHER_STACK_PAGES (2 * nr_cpu_ids)
17#define TOTAL_SWITCHER_PAGES (SWITCHER_TEXT_PAGES + SWITCHER_STACK_PAGES)
16 18
17/* Where we map the Switcher, in both Host and Guest. */ 19/* Where we map the Switcher, in both Host and Guest. */
18extern unsigned long switcher_addr; 20extern unsigned long switcher_addr;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 379cd3658799..bfd9b2a35a0b 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -116,8 +116,36 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
116#endif 116#endif
117 cpumask_set_cpu(cpu, mm_cpumask(next)); 117 cpumask_set_cpu(cpu, mm_cpumask(next));
118 118
119 /* Re-load page tables */ 119 /*
120 * Re-load page tables.
121 *
122 * This logic has an ordering constraint:
123 *
124 * CPU 0: Write to a PTE for 'next'
125 * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
126 * CPU 1: set bit 1 in next's mm_cpumask
127 * CPU 1: load from the PTE that CPU 0 writes (implicit)
128 *
129 * We need to prevent an outcome in which CPU 1 observes
130 * the new PTE value and CPU 0 observes bit 1 clear in
131 * mm_cpumask. (If that occurs, then the IPI will never
132 * be sent, and CPU 0's TLB will contain a stale entry.)
133 *
134 * The bad outcome can occur if either CPU's load is
135 * reordered before that CPU's store, so both CPUs must
136 * execute full barriers to prevent this from happening.
137 *
138 * Thus, switch_mm needs a full barrier between the
139 * store to mm_cpumask and any operation that could load
140 * from next->pgd. TLB fills are special and can happen
141 * due to instruction fetches or for no reason at all,
142 * and neither LOCK nor MFENCE orders them.
143 * Fortunately, load_cr3() is serializing and gives the
144 * ordering guarantee we need.
145 *
146 */
120 load_cr3(next->pgd); 147 load_cr3(next->pgd);
148
121 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 149 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
122 150
123 /* Stop flush ipis for the previous mm */ 151 /* Stop flush ipis for the previous mm */
@@ -156,10 +184,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
156 * schedule, protecting us from simultaneous changes. 184 * schedule, protecting us from simultaneous changes.
157 */ 185 */
158 cpumask_set_cpu(cpu, mm_cpumask(next)); 186 cpumask_set_cpu(cpu, mm_cpumask(next));
187
159 /* 188 /*
160 * We were in lazy tlb mode and leave_mm disabled 189 * We were in lazy tlb mode and leave_mm disabled
161 * tlb flush IPI delivery. We must reload CR3 190 * tlb flush IPI delivery. We must reload CR3
162 * to make sure to use no freed page tables. 191 * to make sure to use no freed page tables.
192 *
193 * As above, load_cr3() is serializing and orders TLB
194 * fills with respect to the mm_cpumask write.
163 */ 195 */
164 load_cr3(next->pgd); 196 load_cr3(next->pgd);
165 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 197 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);