diff options
author | Juergen Gross <jgross@suse.com> | 2018-08-28 03:40:25 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-09-03 10:50:37 -0400 |
commit | fdc0269e8958a1ec95b8ac685c1d372c24c60faa (patch) | |
tree | 5acfe960e9ea5eafe04f7bb819976fb163d12919 | |
parent | 6da63eb241a05b0e676d68975e793c0521387141 (diff) |
x86/paravirt: Move the Xen-only pv_mmu_ops under the PARAVIRT_XXL umbrella
Most of the paravirt ops defined in pv_mmu_ops are for Xen PV guests
only. Define them only if CONFIG_PARAVIRT_XXL is set.
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: xen-devel@lists.xenproject.org
Cc: virtualization@lists.linux-foundation.org
Cc: akataria@vmware.com
Cc: rusty@rustcorp.com.au
Cc: boris.ostrovsky@oracle.com
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/20180828074026.820-15-jgross@suse.com
-rw-r--r-- | arch/x86/include/asm/fixmap.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/mmu_context.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 125 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt_types.h | 33 | ||||
-rw-r--r-- | arch/x86/include/asm/pgalloc.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable.h | 7 | ||||
-rw-r--r-- | arch/x86/include/asm/special_insns.h | 11 | ||||
-rw-r--r-- | arch/x86/kernel/asm-offsets.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/head_64.S | 4 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_patch_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_patch_64.c | 4 |
12 files changed, 103 insertions, 112 deletions
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index e203169931c7..ac80e7eadc3a 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -152,7 +152,7 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); | |||
152 | void native_set_fixmap(enum fixed_addresses idx, | 152 | void native_set_fixmap(enum fixed_addresses idx, |
153 | phys_addr_t phys, pgprot_t flags); | 153 | phys_addr_t phys, pgprot_t flags); |
154 | 154 | ||
155 | #ifndef CONFIG_PARAVIRT | 155 | #ifndef CONFIG_PARAVIRT_XXL |
156 | static inline void __set_fixmap(enum fixed_addresses idx, | 156 | static inline void __set_fixmap(enum fixed_addresses idx, |
157 | phys_addr_t phys, pgprot_t flags) | 157 | phys_addr_t phys, pgprot_t flags) |
158 | { | 158 | { |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index eeeb9289c764..0ca50611e8ce 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -16,12 +16,12 @@ | |||
16 | 16 | ||
17 | extern atomic64_t last_mm_ctx_id; | 17 | extern atomic64_t last_mm_ctx_id; |
18 | 18 | ||
19 | #ifndef CONFIG_PARAVIRT | 19 | #ifndef CONFIG_PARAVIRT_XXL |
20 | static inline void paravirt_activate_mm(struct mm_struct *prev, | 20 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
21 | struct mm_struct *next) | 21 | struct mm_struct *next) |
22 | { | 22 | { |
23 | } | 23 | } |
24 | #endif /* !CONFIG_PARAVIRT */ | 24 | #endif /* !CONFIG_PARAVIRT_XXL */ |
25 | 25 | ||
26 | #ifdef CONFIG_PERF_EVENTS | 26 | #ifdef CONFIG_PERF_EVENTS |
27 | 27 | ||
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 512433c56c33..63ab58dc5b73 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -17,6 +17,62 @@ | |||
17 | #include <linux/cpumask.h> | 17 | #include <linux/cpumask.h> |
18 | #include <asm/frame.h> | 18 | #include <asm/frame.h> |
19 | 19 | ||
20 | static inline unsigned long long paravirt_sched_clock(void) | ||
21 | { | ||
22 | return PVOP_CALL0(unsigned long long, time.sched_clock); | ||
23 | } | ||
24 | |||
25 | struct static_key; | ||
26 | extern struct static_key paravirt_steal_enabled; | ||
27 | extern struct static_key paravirt_steal_rq_enabled; | ||
28 | |||
29 | static inline u64 paravirt_steal_clock(int cpu) | ||
30 | { | ||
31 | return PVOP_CALL1(u64, time.steal_clock, cpu); | ||
32 | } | ||
33 | |||
34 | /* The paravirtualized I/O functions */ | ||
35 | static inline void slow_down_io(void) | ||
36 | { | ||
37 | pv_ops.cpu.io_delay(); | ||
38 | #ifdef REALLY_SLOW_IO | ||
39 | pv_ops.cpu.io_delay(); | ||
40 | pv_ops.cpu.io_delay(); | ||
41 | pv_ops.cpu.io_delay(); | ||
42 | #endif | ||
43 | } | ||
44 | |||
45 | static inline void __flush_tlb(void) | ||
46 | { | ||
47 | PVOP_VCALL0(mmu.flush_tlb_user); | ||
48 | } | ||
49 | |||
50 | static inline void __flush_tlb_global(void) | ||
51 | { | ||
52 | PVOP_VCALL0(mmu.flush_tlb_kernel); | ||
53 | } | ||
54 | |||
55 | static inline void __flush_tlb_one_user(unsigned long addr) | ||
56 | { | ||
57 | PVOP_VCALL1(mmu.flush_tlb_one_user, addr); | ||
58 | } | ||
59 | |||
60 | static inline void flush_tlb_others(const struct cpumask *cpumask, | ||
61 | const struct flush_tlb_info *info) | ||
62 | { | ||
63 | PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info); | ||
64 | } | ||
65 | |||
66 | static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) | ||
67 | { | ||
68 | PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); | ||
69 | } | ||
70 | |||
71 | static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) | ||
72 | { | ||
73 | PVOP_VCALL1(mmu.exit_mmap, mm); | ||
74 | } | ||
75 | |||
20 | #ifdef CONFIG_PARAVIRT_XXL | 76 | #ifdef CONFIG_PARAVIRT_XXL |
21 | static inline void load_sp0(unsigned long sp0) | 77 | static inline void load_sp0(unsigned long sp0) |
22 | { | 78 | { |
@@ -52,7 +108,6 @@ static inline void write_cr0(unsigned long x) | |||
52 | { | 108 | { |
53 | PVOP_VCALL1(cpu.write_cr0, x); | 109 | PVOP_VCALL1(cpu.write_cr0, x); |
54 | } | 110 | } |
55 | #endif | ||
56 | 111 | ||
57 | static inline unsigned long read_cr2(void) | 112 | static inline unsigned long read_cr2(void) |
58 | { | 113 | { |
@@ -74,7 +129,6 @@ static inline void write_cr3(unsigned long x) | |||
74 | PVOP_VCALL1(mmu.write_cr3, x); | 129 | PVOP_VCALL1(mmu.write_cr3, x); |
75 | } | 130 | } |
76 | 131 | ||
77 | #ifdef CONFIG_PARAVIRT_XXL | ||
78 | static inline void __write_cr4(unsigned long x) | 132 | static inline void __write_cr4(unsigned long x) |
79 | { | 133 | { |
80 | PVOP_VCALL1(cpu.write_cr4, x); | 134 | PVOP_VCALL1(cpu.write_cr4, x); |
@@ -172,23 +226,7 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | |||
172 | *p = paravirt_read_msr_safe(msr, &err); | 226 | *p = paravirt_read_msr_safe(msr, &err); |
173 | return err; | 227 | return err; |
174 | } | 228 | } |
175 | #endif | ||
176 | 229 | ||
177 | static inline unsigned long long paravirt_sched_clock(void) | ||
178 | { | ||
179 | return PVOP_CALL0(unsigned long long, time.sched_clock); | ||
180 | } | ||
181 | |||
182 | struct static_key; | ||
183 | extern struct static_key paravirt_steal_enabled; | ||
184 | extern struct static_key paravirt_steal_rq_enabled; | ||
185 | |||
186 | static inline u64 paravirt_steal_clock(int cpu) | ||
187 | { | ||
188 | return PVOP_CALL1(u64, time.steal_clock, cpu); | ||
189 | } | ||
190 | |||
191 | #ifdef CONFIG_PARAVIRT_XXL | ||
192 | static inline unsigned long long paravirt_read_pmc(int counter) | 230 | static inline unsigned long long paravirt_read_pmc(int counter) |
193 | { | 231 | { |
194 | return PVOP_CALL1(u64, cpu.read_pmc, counter); | 232 | return PVOP_CALL1(u64, cpu.read_pmc, counter); |
@@ -267,18 +305,6 @@ static inline void set_iopl_mask(unsigned mask) | |||
267 | { | 305 | { |
268 | PVOP_VCALL1(cpu.set_iopl_mask, mask); | 306 | PVOP_VCALL1(cpu.set_iopl_mask, mask); |
269 | } | 307 | } |
270 | #endif | ||
271 | |||
272 | /* The paravirtualized I/O functions */ | ||
273 | static inline void slow_down_io(void) | ||
274 | { | ||
275 | pv_ops.cpu.io_delay(); | ||
276 | #ifdef REALLY_SLOW_IO | ||
277 | pv_ops.cpu.io_delay(); | ||
278 | pv_ops.cpu.io_delay(); | ||
279 | pv_ops.cpu.io_delay(); | ||
280 | #endif | ||
281 | } | ||
282 | 308 | ||
283 | static inline void paravirt_activate_mm(struct mm_struct *prev, | 309 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
284 | struct mm_struct *next) | 310 | struct mm_struct *next) |
@@ -292,35 +318,6 @@ static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, | |||
292 | PVOP_VCALL2(mmu.dup_mmap, oldmm, mm); | 318 | PVOP_VCALL2(mmu.dup_mmap, oldmm, mm); |
293 | } | 319 | } |
294 | 320 | ||
295 | static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) | ||
296 | { | ||
297 | PVOP_VCALL1(mmu.exit_mmap, mm); | ||
298 | } | ||
299 | |||
300 | static inline void __flush_tlb(void) | ||
301 | { | ||
302 | PVOP_VCALL0(mmu.flush_tlb_user); | ||
303 | } | ||
304 | static inline void __flush_tlb_global(void) | ||
305 | { | ||
306 | PVOP_VCALL0(mmu.flush_tlb_kernel); | ||
307 | } | ||
308 | static inline void __flush_tlb_one_user(unsigned long addr) | ||
309 | { | ||
310 | PVOP_VCALL1(mmu.flush_tlb_one_user, addr); | ||
311 | } | ||
312 | |||
313 | static inline void flush_tlb_others(const struct cpumask *cpumask, | ||
314 | const struct flush_tlb_info *info) | ||
315 | { | ||
316 | PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info); | ||
317 | } | ||
318 | |||
319 | static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) | ||
320 | { | ||
321 | PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); | ||
322 | } | ||
323 | |||
324 | static inline int paravirt_pgd_alloc(struct mm_struct *mm) | 321 | static inline int paravirt_pgd_alloc(struct mm_struct *mm) |
325 | { | 322 | { |
326 | return PVOP_CALL1(int, mmu.pgd_alloc, mm); | 323 | return PVOP_CALL1(int, mmu.pgd_alloc, mm); |
@@ -623,7 +620,6 @@ static inline void pmd_clear(pmd_t *pmdp) | |||
623 | } | 620 | } |
624 | #endif /* CONFIG_X86_PAE */ | 621 | #endif /* CONFIG_X86_PAE */ |
625 | 622 | ||
626 | #ifdef CONFIG_PARAVIRT_XXL | ||
627 | #define __HAVE_ARCH_START_CONTEXT_SWITCH | 623 | #define __HAVE_ARCH_START_CONTEXT_SWITCH |
628 | static inline void arch_start_context_switch(struct task_struct *prev) | 624 | static inline void arch_start_context_switch(struct task_struct *prev) |
629 | { | 625 | { |
@@ -634,7 +630,6 @@ static inline void arch_end_context_switch(struct task_struct *next) | |||
634 | { | 630 | { |
635 | PVOP_VCALL1(cpu.end_context_switch, next); | 631 | PVOP_VCALL1(cpu.end_context_switch, next); |
636 | } | 632 | } |
637 | #endif | ||
638 | 633 | ||
639 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | 634 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
640 | static inline void arch_enter_lazy_mmu_mode(void) | 635 | static inline void arch_enter_lazy_mmu_mode(void) |
@@ -657,6 +652,7 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | |||
657 | { | 652 | { |
658 | pv_ops.mmu.set_fixmap(idx, phys, flags); | 653 | pv_ops.mmu.set_fixmap(idx, phys, flags); |
659 | } | 654 | } |
655 | #endif | ||
660 | 656 | ||
661 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) | 657 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
662 | 658 | ||
@@ -948,15 +944,20 @@ extern void default_banner(void); | |||
948 | #endif /* __ASSEMBLY__ */ | 944 | #endif /* __ASSEMBLY__ */ |
949 | #else /* CONFIG_PARAVIRT */ | 945 | #else /* CONFIG_PARAVIRT */ |
950 | # define default_banner x86_init_noop | 946 | # define default_banner x86_init_noop |
947 | #endif /* !CONFIG_PARAVIRT */ | ||
948 | |||
951 | #ifndef __ASSEMBLY__ | 949 | #ifndef __ASSEMBLY__ |
950 | #ifndef CONFIG_PARAVIRT_XXL | ||
952 | static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, | 951 | static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, |
953 | struct mm_struct *mm) | 952 | struct mm_struct *mm) |
954 | { | 953 | { |
955 | } | 954 | } |
955 | #endif | ||
956 | 956 | ||
957 | #ifndef CONFIG_PARAVIRT | ||
957 | static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) | 958 | static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) |
958 | { | 959 | { |
959 | } | 960 | } |
961 | #endif | ||
960 | #endif /* __ASSEMBLY__ */ | 962 | #endif /* __ASSEMBLY__ */ |
961 | #endif /* !CONFIG_PARAVIRT */ | ||
962 | #endif /* _ASM_X86_PARAVIRT_H */ | 963 | #endif /* _ASM_X86_PARAVIRT_H */ |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index f582093b52dd..a0094aaf073e 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -91,13 +91,14 @@ struct pv_init_ops { | |||
91 | unsigned long addr, unsigned len); | 91 | unsigned long addr, unsigned len); |
92 | } __no_randomize_layout; | 92 | } __no_randomize_layout; |
93 | 93 | ||
94 | 94 | #ifdef CONFIG_PARAVIRT_XXL | |
95 | struct pv_lazy_ops { | 95 | struct pv_lazy_ops { |
96 | /* Set deferred update mode, used for batching operations. */ | 96 | /* Set deferred update mode, used for batching operations. */ |
97 | void (*enter)(void); | 97 | void (*enter)(void); |
98 | void (*leave)(void); | 98 | void (*leave)(void); |
99 | void (*flush)(void); | 99 | void (*flush)(void); |
100 | } __no_randomize_layout; | 100 | } __no_randomize_layout; |
101 | #endif | ||
101 | 102 | ||
102 | struct pv_time_ops { | 103 | struct pv_time_ops { |
103 | unsigned long long (*sched_clock)(void); | 104 | unsigned long long (*sched_clock)(void); |
@@ -205,31 +206,30 @@ struct pv_irq_ops { | |||
205 | } __no_randomize_layout; | 206 | } __no_randomize_layout; |
206 | 207 | ||
207 | struct pv_mmu_ops { | 208 | struct pv_mmu_ops { |
209 | /* TLB operations */ | ||
210 | void (*flush_tlb_user)(void); | ||
211 | void (*flush_tlb_kernel)(void); | ||
212 | void (*flush_tlb_one_user)(unsigned long addr); | ||
213 | void (*flush_tlb_others)(const struct cpumask *cpus, | ||
214 | const struct flush_tlb_info *info); | ||
215 | |||
216 | void (*tlb_remove_table)(struct mmu_gather *tlb, void *table); | ||
217 | |||
218 | /* Hook for intercepting the destruction of an mm_struct. */ | ||
219 | void (*exit_mmap)(struct mm_struct *mm); | ||
220 | |||
221 | #ifdef CONFIG_PARAVIRT_XXL | ||
208 | unsigned long (*read_cr2)(void); | 222 | unsigned long (*read_cr2)(void); |
209 | void (*write_cr2)(unsigned long); | 223 | void (*write_cr2)(unsigned long); |
210 | 224 | ||
211 | unsigned long (*read_cr3)(void); | 225 | unsigned long (*read_cr3)(void); |
212 | void (*write_cr3)(unsigned long); | 226 | void (*write_cr3)(unsigned long); |
213 | 227 | ||
214 | /* | 228 | /* Hooks for intercepting the creation/use of an mm_struct. */ |
215 | * Hooks for intercepting the creation/use/destruction of an | ||
216 | * mm_struct. | ||
217 | */ | ||
218 | void (*activate_mm)(struct mm_struct *prev, | 229 | void (*activate_mm)(struct mm_struct *prev, |
219 | struct mm_struct *next); | 230 | struct mm_struct *next); |
220 | void (*dup_mmap)(struct mm_struct *oldmm, | 231 | void (*dup_mmap)(struct mm_struct *oldmm, |
221 | struct mm_struct *mm); | 232 | struct mm_struct *mm); |
222 | void (*exit_mmap)(struct mm_struct *mm); | ||
223 | |||
224 | |||
225 | /* TLB operations */ | ||
226 | void (*flush_tlb_user)(void); | ||
227 | void (*flush_tlb_kernel)(void); | ||
228 | void (*flush_tlb_one_user)(unsigned long addr); | ||
229 | void (*flush_tlb_others)(const struct cpumask *cpus, | ||
230 | const struct flush_tlb_info *info); | ||
231 | |||
232 | void (*tlb_remove_table)(struct mmu_gather *tlb, void *table); | ||
233 | 233 | ||
234 | /* Hooks for allocating and freeing a pagetable top-level */ | 234 | /* Hooks for allocating and freeing a pagetable top-level */ |
235 | int (*pgd_alloc)(struct mm_struct *mm); | 235 | int (*pgd_alloc)(struct mm_struct *mm); |
@@ -304,6 +304,7 @@ struct pv_mmu_ops { | |||
304 | an mfn. We can tell which is which from the index. */ | 304 | an mfn. We can tell which is which from the index. */ |
305 | void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, | 305 | void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, |
306 | phys_addr_t phys, pgprot_t flags); | 306 | phys_addr_t phys, pgprot_t flags); |
307 | #endif | ||
307 | } __no_randomize_layout; | 308 | } __no_randomize_layout; |
308 | 309 | ||
309 | struct arch_spinlock; | 310 | struct arch_spinlock; |
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index fbd578daa66e..ec7f43327033 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h | |||
@@ -8,7 +8,7 @@ | |||
8 | 8 | ||
9 | static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } | 9 | static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } |
10 | 10 | ||
11 | #ifdef CONFIG_PARAVIRT | 11 | #ifdef CONFIG_PARAVIRT_XXL |
12 | #include <asm/paravirt.h> | 12 | #include <asm/paravirt.h> |
13 | #else | 13 | #else |
14 | #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) | 14 | #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5bb0fe3b7e00..7b0489ca027a 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -55,9 +55,9 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); | |||
55 | 55 | ||
56 | extern pmdval_t early_pmd_flags; | 56 | extern pmdval_t early_pmd_flags; |
57 | 57 | ||
58 | #ifdef CONFIG_PARAVIRT | 58 | #ifdef CONFIG_PARAVIRT_XXL |
59 | #include <asm/paravirt.h> | 59 | #include <asm/paravirt.h> |
60 | #else /* !CONFIG_PARAVIRT */ | 60 | #else /* !CONFIG_PARAVIRT_XXL */ |
61 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | 61 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) |
62 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | 62 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) |
63 | 63 | ||
@@ -111,9 +111,6 @@ extern pmdval_t early_pmd_flags; | |||
111 | #define pte_val(x) native_pte_val(x) | 111 | #define pte_val(x) native_pte_val(x) |
112 | #define __pte(x) native_make_pte(x) | 112 | #define __pte(x) native_make_pte(x) |
113 | 113 | ||
114 | #endif /* CONFIG_PARAVIRT */ | ||
115 | |||
116 | #ifndef CONFIG_PARAVIRT_XXL | ||
117 | #define arch_end_context_switch(prev) do {} while(0) | 114 | #define arch_end_context_switch(prev) do {} while(0) |
118 | #endif /* CONFIG_PARAVIRT_XXL */ | 115 | #endif /* CONFIG_PARAVIRT_XXL */ |
119 | 116 | ||
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index 2aa6ce4bf159..43c029cdc3fe 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h | |||
@@ -141,11 +141,10 @@ static inline unsigned long __read_cr4(void) | |||
141 | return native_read_cr4(); | 141 | return native_read_cr4(); |
142 | } | 142 | } |
143 | 143 | ||
144 | #ifdef CONFIG_PARAVIRT | 144 | #ifdef CONFIG_PARAVIRT_XXL |
145 | #include <asm/paravirt.h> | 145 | #include <asm/paravirt.h> |
146 | #endif | 146 | #else |
147 | 147 | ||
148 | #ifndef CONFIG_PARAVIRT_XXL | ||
149 | static inline unsigned long read_cr0(void) | 148 | static inline unsigned long read_cr0(void) |
150 | { | 149 | { |
151 | return native_read_cr0(); | 150 | return native_read_cr0(); |
@@ -155,9 +154,7 @@ static inline void write_cr0(unsigned long x) | |||
155 | { | 154 | { |
156 | native_write_cr0(x); | 155 | native_write_cr0(x); |
157 | } | 156 | } |
158 | #endif | ||
159 | 157 | ||
160 | #ifndef CONFIG_PARAVIRT | ||
161 | static inline unsigned long read_cr2(void) | 158 | static inline unsigned long read_cr2(void) |
162 | { | 159 | { |
163 | return native_read_cr2(); | 160 | return native_read_cr2(); |
@@ -181,9 +178,7 @@ static inline void write_cr3(unsigned long x) | |||
181 | { | 178 | { |
182 | native_write_cr3(x); | 179 | native_write_cr3(x); |
183 | } | 180 | } |
184 | #endif | ||
185 | 181 | ||
186 | #ifndef CONFIG_PARAVIRT_XXL | ||
187 | static inline void __write_cr4(unsigned long x) | 182 | static inline void __write_cr4(unsigned long x) |
188 | { | 183 | { |
189 | native_write_cr4(x); | 184 | native_write_cr4(x); |
@@ -213,7 +208,7 @@ static inline void load_gs_index(unsigned selector) | |||
213 | 208 | ||
214 | #endif | 209 | #endif |
215 | 210 | ||
216 | #endif/* CONFIG_PARAVIRT_XXL */ | 211 | #endif /* CONFIG_PARAVIRT_XXL */ |
217 | 212 | ||
218 | static inline void clflush(volatile void *__p) | 213 | static inline void clflush(volatile void *__p) |
219 | { | 214 | { |
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 28e7572ff74d..fc02c3cf238f 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c | |||
@@ -64,13 +64,11 @@ void common(void) { | |||
64 | OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext); | 64 | OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext); |
65 | #endif | 65 | #endif |
66 | 66 | ||
67 | #ifdef CONFIG_PARAVIRT | ||
68 | BLANK(); | ||
69 | #ifdef CONFIG_PARAVIRT_XXL | 67 | #ifdef CONFIG_PARAVIRT_XXL |
68 | BLANK(); | ||
70 | OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable); | 69 | OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable); |
71 | OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable); | 70 | OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable); |
72 | OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); | 71 | OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); |
73 | #endif | ||
74 | OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2); | 72 | OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2); |
75 | #endif | 73 | #endif |
76 | 74 | ||
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index a5bd72a0ee1a..827bca2c2782 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -25,14 +25,12 @@ | |||
25 | #include <asm/export.h> | 25 | #include <asm/export.h> |
26 | #include <asm/nospec-branch.h> | 26 | #include <asm/nospec-branch.h> |
27 | 27 | ||
28 | #ifdef CONFIG_PARAVIRT | 28 | #ifdef CONFIG_PARAVIRT_XXL |
29 | #include <asm/asm-offsets.h> | 29 | #include <asm/asm-offsets.h> |
30 | #include <asm/paravirt.h> | 30 | #include <asm/paravirt.h> |
31 | #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg | 31 | #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg |
32 | #else | 32 | #else |
33 | #define GET_CR2_INTO(reg) movq %cr2, reg | 33 | #define GET_CR2_INTO(reg) movq %cr2, reg |
34 | #endif | ||
35 | #ifndef CONFIG_PARAVIRT_XXL | ||
36 | #define INTERRUPT_RETURN iretq | 34 | #define INTERRUPT_RETURN iretq |
37 | #endif | 35 | #endif |
38 | 36 | ||
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 5e8226335eac..bbf006fe78d7 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -384,11 +384,6 @@ struct paravirt_patch_template pv_ops = { | |||
384 | #endif /* CONFIG_PARAVIRT_XXL */ | 384 | #endif /* CONFIG_PARAVIRT_XXL */ |
385 | 385 | ||
386 | /* Mmu ops. */ | 386 | /* Mmu ops. */ |
387 | .mmu.read_cr2 = native_read_cr2, | ||
388 | .mmu.write_cr2 = native_write_cr2, | ||
389 | .mmu.read_cr3 = __native_read_cr3, | ||
390 | .mmu.write_cr3 = native_write_cr3, | ||
391 | |||
392 | .mmu.flush_tlb_user = native_flush_tlb, | 387 | .mmu.flush_tlb_user = native_flush_tlb, |
393 | .mmu.flush_tlb_kernel = native_flush_tlb_global, | 388 | .mmu.flush_tlb_kernel = native_flush_tlb_global, |
394 | .mmu.flush_tlb_one_user = native_flush_tlb_one_user, | 389 | .mmu.flush_tlb_one_user = native_flush_tlb_one_user, |
@@ -396,6 +391,14 @@ struct paravirt_patch_template pv_ops = { | |||
396 | .mmu.tlb_remove_table = | 391 | .mmu.tlb_remove_table = |
397 | (void (*)(struct mmu_gather *, void *))tlb_remove_page, | 392 | (void (*)(struct mmu_gather *, void *))tlb_remove_page, |
398 | 393 | ||
394 | .mmu.exit_mmap = paravirt_nop, | ||
395 | |||
396 | #ifdef CONFIG_PARAVIRT_XXL | ||
397 | .mmu.read_cr2 = native_read_cr2, | ||
398 | .mmu.write_cr2 = native_write_cr2, | ||
399 | .mmu.read_cr3 = __native_read_cr3, | ||
400 | .mmu.write_cr3 = native_write_cr3, | ||
401 | |||
399 | .mmu.pgd_alloc = __paravirt_pgd_alloc, | 402 | .mmu.pgd_alloc = __paravirt_pgd_alloc, |
400 | .mmu.pgd_free = paravirt_nop, | 403 | .mmu.pgd_free = paravirt_nop, |
401 | 404 | ||
@@ -448,7 +451,6 @@ struct paravirt_patch_template pv_ops = { | |||
448 | .mmu.make_pgd = PTE_IDENT, | 451 | .mmu.make_pgd = PTE_IDENT, |
449 | 452 | ||
450 | .mmu.dup_mmap = paravirt_nop, | 453 | .mmu.dup_mmap = paravirt_nop, |
451 | .mmu.exit_mmap = paravirt_nop, | ||
452 | .mmu.activate_mm = paravirt_nop, | 454 | .mmu.activate_mm = paravirt_nop, |
453 | 455 | ||
454 | .mmu.lazy_mode = { | 456 | .mmu.lazy_mode = { |
@@ -458,6 +460,7 @@ struct paravirt_patch_template pv_ops = { | |||
458 | }, | 460 | }, |
459 | 461 | ||
460 | .mmu.set_fixmap = native_set_fixmap, | 462 | .mmu.set_fixmap = native_set_fixmap, |
463 | #endif /* CONFIG_PARAVIRT_XXL */ | ||
461 | 464 | ||
462 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) | 465 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) |
463 | /* Lock ops. */ | 466 | /* Lock ops. */ |
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 1d44705c6528..d460cbcabcfe 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c | |||
@@ -7,10 +7,10 @@ DEF_NATIVE(irq, irq_enable, "sti"); | |||
7 | DEF_NATIVE(irq, restore_fl, "push %eax; popf"); | 7 | DEF_NATIVE(irq, restore_fl, "push %eax; popf"); |
8 | DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); | 8 | DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); |
9 | DEF_NATIVE(cpu, iret, "iret"); | 9 | DEF_NATIVE(cpu, iret, "iret"); |
10 | #endif | ||
11 | DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); | 10 | DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); |
12 | DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); | 11 | DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); |
13 | DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); | 12 | DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); |
13 | #endif | ||
14 | 14 | ||
15 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) | 15 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) |
16 | DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); | 16 | DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); |
@@ -49,10 +49,10 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) | |||
49 | PATCH_SITE(irq, restore_fl); | 49 | PATCH_SITE(irq, restore_fl); |
50 | PATCH_SITE(irq, save_fl); | 50 | PATCH_SITE(irq, save_fl); |
51 | PATCH_SITE(cpu, iret); | 51 | PATCH_SITE(cpu, iret); |
52 | #endif | ||
53 | PATCH_SITE(mmu, read_cr2); | 52 | PATCH_SITE(mmu, read_cr2); |
54 | PATCH_SITE(mmu, read_cr3); | 53 | PATCH_SITE(mmu, read_cr3); |
55 | PATCH_SITE(mmu, write_cr3); | 54 | PATCH_SITE(mmu, write_cr3); |
55 | #endif | ||
56 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) | 56 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) |
57 | case PARAVIRT_PATCH(lock.queued_spin_unlock): | 57 | case PARAVIRT_PATCH(lock.queued_spin_unlock): |
58 | if (pv_is_native_spin_unlock()) { | 58 | if (pv_is_native_spin_unlock()) { |
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index b00937963a0f..5ad5bcda9dc6 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c | |||
@@ -8,11 +8,9 @@ DEF_NATIVE(irq, irq_disable, "cli"); | |||
8 | DEF_NATIVE(irq, irq_enable, "sti"); | 8 | DEF_NATIVE(irq, irq_enable, "sti"); |
9 | DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq"); | 9 | DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq"); |
10 | DEF_NATIVE(irq, save_fl, "pushfq; popq %rax"); | 10 | DEF_NATIVE(irq, save_fl, "pushfq; popq %rax"); |
11 | #endif | ||
12 | DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); | 11 | DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); |
13 | DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); | 12 | DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); |
14 | DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); | 13 | DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); |
15 | #ifdef CONFIG_PARAVIRT_XXL | ||
16 | DEF_NATIVE(cpu, wbinvd, "wbinvd"); | 14 | DEF_NATIVE(cpu, wbinvd, "wbinvd"); |
17 | 15 | ||
18 | DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); | 16 | DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); |
@@ -61,10 +59,10 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) | |||
61 | PATCH_SITE(cpu, usergs_sysret64); | 59 | PATCH_SITE(cpu, usergs_sysret64); |
62 | PATCH_SITE(cpu, swapgs); | 60 | PATCH_SITE(cpu, swapgs); |
63 | PATCH_SITE(cpu, wbinvd); | 61 | PATCH_SITE(cpu, wbinvd); |
64 | #endif | ||
65 | PATCH_SITE(mmu, read_cr2); | 62 | PATCH_SITE(mmu, read_cr2); |
66 | PATCH_SITE(mmu, read_cr3); | 63 | PATCH_SITE(mmu, read_cr3); |
67 | PATCH_SITE(mmu, write_cr3); | 64 | PATCH_SITE(mmu, write_cr3); |
65 | #endif | ||
68 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) | 66 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) |
69 | case PARAVIRT_PATCH(lock.queued_spin_unlock): | 67 | case PARAVIRT_PATCH(lock.queued_spin_unlock): |
70 | if (pv_is_native_spin_unlock()) { | 68 | if (pv_is_native_spin_unlock()) { |