diff options
Diffstat (limited to 'arch/i386/kernel/paravirt.c')
-rw-r--r-- | arch/i386/kernel/paravirt.c | 522 |
1 files changed, 141 insertions, 381 deletions
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c index 2ec331e03fa9..5c10f376bce1 100644 --- a/arch/i386/kernel/paravirt.c +++ b/arch/i386/kernel/paravirt.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/efi.h> | 20 | #include <linux/efi.h> |
21 | #include <linux/bcd.h> | 21 | #include <linux/bcd.h> |
22 | #include <linux/start_kernel.h> | 22 | #include <linux/start_kernel.h> |
23 | #include <linux/highmem.h> | ||
23 | 24 | ||
24 | #include <asm/bug.h> | 25 | #include <asm/bug.h> |
25 | #include <asm/paravirt.h> | 26 | #include <asm/paravirt.h> |
@@ -35,7 +36,7 @@ | |||
35 | #include <asm/timer.h> | 36 | #include <asm/timer.h> |
36 | 37 | ||
37 | /* nop stub */ | 38 | /* nop stub */ |
38 | static void native_nop(void) | 39 | void _paravirt_nop(void) |
39 | { | 40 | { |
40 | } | 41 | } |
41 | 42 | ||
@@ -54,331 +55,148 @@ char *memory_setup(void) | |||
54 | #define DEF_NATIVE(name, code) \ | 55 | #define DEF_NATIVE(name, code) \ |
55 | extern const char start_##name[], end_##name[]; \ | 56 | extern const char start_##name[], end_##name[]; \ |
56 | asm("start_" #name ": " code "; end_" #name ":") | 57 | asm("start_" #name ": " code "; end_" #name ":") |
57 | DEF_NATIVE(cli, "cli"); | 58 | |
58 | DEF_NATIVE(sti, "sti"); | 59 | DEF_NATIVE(irq_disable, "cli"); |
59 | DEF_NATIVE(popf, "push %eax; popf"); | 60 | DEF_NATIVE(irq_enable, "sti"); |
60 | DEF_NATIVE(pushf, "pushf; pop %eax"); | 61 | DEF_NATIVE(restore_fl, "push %eax; popf"); |
61 | DEF_NATIVE(pushf_cli, "pushf; pop %eax; cli"); | 62 | DEF_NATIVE(save_fl, "pushf; pop %eax"); |
62 | DEF_NATIVE(iret, "iret"); | 63 | DEF_NATIVE(iret, "iret"); |
63 | DEF_NATIVE(sti_sysexit, "sti; sysexit"); | 64 | DEF_NATIVE(irq_enable_sysexit, "sti; sysexit"); |
65 | DEF_NATIVE(read_cr2, "mov %cr2, %eax"); | ||
66 | DEF_NATIVE(write_cr3, "mov %eax, %cr3"); | ||
67 | DEF_NATIVE(read_cr3, "mov %cr3, %eax"); | ||
68 | DEF_NATIVE(clts, "clts"); | ||
69 | DEF_NATIVE(read_tsc, "rdtsc"); | ||
64 | 70 | ||
65 | static const struct native_insns | 71 | DEF_NATIVE(ud2a, "ud2a"); |
66 | { | ||
67 | const char *start, *end; | ||
68 | } native_insns[] = { | ||
69 | [PARAVIRT_IRQ_DISABLE] = { start_cli, end_cli }, | ||
70 | [PARAVIRT_IRQ_ENABLE] = { start_sti, end_sti }, | ||
71 | [PARAVIRT_RESTORE_FLAGS] = { start_popf, end_popf }, | ||
72 | [PARAVIRT_SAVE_FLAGS] = { start_pushf, end_pushf }, | ||
73 | [PARAVIRT_SAVE_FLAGS_IRQ_DISABLE] = { start_pushf_cli, end_pushf_cli }, | ||
74 | [PARAVIRT_INTERRUPT_RETURN] = { start_iret, end_iret }, | ||
75 | [PARAVIRT_STI_SYSEXIT] = { start_sti_sysexit, end_sti_sysexit }, | ||
76 | }; | ||
77 | 72 | ||
78 | static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len) | 73 | static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len) |
79 | { | 74 | { |
80 | unsigned int insn_len; | 75 | const unsigned char *start, *end; |
81 | 76 | unsigned ret; | |
82 | /* Don't touch it if we don't have a replacement */ | 77 | |
83 | if (type >= ARRAY_SIZE(native_insns) || !native_insns[type].start) | 78 | switch(type) { |
84 | return len; | 79 | #define SITE(x) case PARAVIRT_PATCH(x): start = start_##x; end = end_##x; goto patch_site |
85 | 80 | SITE(irq_disable); | |
86 | insn_len = native_insns[type].end - native_insns[type].start; | 81 | SITE(irq_enable); |
87 | 82 | SITE(restore_fl); | |
88 | /* Similarly if we can't fit replacement. */ | 83 | SITE(save_fl); |
89 | if (len < insn_len) | 84 | SITE(iret); |
90 | return len; | 85 | SITE(irq_enable_sysexit); |
86 | SITE(read_cr2); | ||
87 | SITE(read_cr3); | ||
88 | SITE(write_cr3); | ||
89 | SITE(clts); | ||
90 | SITE(read_tsc); | ||
91 | #undef SITE | ||
92 | |||
93 | patch_site: | ||
94 | ret = paravirt_patch_insns(insns, len, start, end); | ||
95 | break; | ||
91 | 96 | ||
92 | memcpy(insns, native_insns[type].start, insn_len); | 97 | case PARAVIRT_PATCH(make_pgd): |
93 | return insn_len; | 98 | case PARAVIRT_PATCH(make_pte): |
94 | } | 99 | case PARAVIRT_PATCH(pgd_val): |
100 | case PARAVIRT_PATCH(pte_val): | ||
101 | #ifdef CONFIG_X86_PAE | ||
102 | case PARAVIRT_PATCH(make_pmd): | ||
103 | case PARAVIRT_PATCH(pmd_val): | ||
104 | #endif | ||
105 | /* These functions end up returning exactly what | ||
106 | they're passed, in the same registers. */ | ||
107 | ret = paravirt_patch_nop(); | ||
108 | break; | ||
95 | 109 | ||
96 | static unsigned long native_get_debugreg(int regno) | ||
97 | { | ||
98 | unsigned long val = 0; /* Damn you, gcc! */ | ||
99 | |||
100 | switch (regno) { | ||
101 | case 0: | ||
102 | asm("movl %%db0, %0" :"=r" (val)); break; | ||
103 | case 1: | ||
104 | asm("movl %%db1, %0" :"=r" (val)); break; | ||
105 | case 2: | ||
106 | asm("movl %%db2, %0" :"=r" (val)); break; | ||
107 | case 3: | ||
108 | asm("movl %%db3, %0" :"=r" (val)); break; | ||
109 | case 6: | ||
110 | asm("movl %%db6, %0" :"=r" (val)); break; | ||
111 | case 7: | ||
112 | asm("movl %%db7, %0" :"=r" (val)); break; | ||
113 | default: | 110 | default: |
114 | BUG(); | 111 | ret = paravirt_patch_default(type, clobbers, insns, len); |
115 | } | ||
116 | return val; | ||
117 | } | ||
118 | |||
119 | static void native_set_debugreg(int regno, unsigned long value) | ||
120 | { | ||
121 | switch (regno) { | ||
122 | case 0: | ||
123 | asm("movl %0,%%db0" : /* no output */ :"r" (value)); | ||
124 | break; | ||
125 | case 1: | ||
126 | asm("movl %0,%%db1" : /* no output */ :"r" (value)); | ||
127 | break; | ||
128 | case 2: | ||
129 | asm("movl %0,%%db2" : /* no output */ :"r" (value)); | ||
130 | break; | 112 | break; |
131 | case 3: | ||
132 | asm("movl %0,%%db3" : /* no output */ :"r" (value)); | ||
133 | break; | ||
134 | case 6: | ||
135 | asm("movl %0,%%db6" : /* no output */ :"r" (value)); | ||
136 | break; | ||
137 | case 7: | ||
138 | asm("movl %0,%%db7" : /* no output */ :"r" (value)); | ||
139 | break; | ||
140 | default: | ||
141 | BUG(); | ||
142 | } | 113 | } |
143 | } | ||
144 | |||
145 | void init_IRQ(void) | ||
146 | { | ||
147 | paravirt_ops.init_IRQ(); | ||
148 | } | ||
149 | |||
150 | static void native_clts(void) | ||
151 | { | ||
152 | asm volatile ("clts"); | ||
153 | } | ||
154 | |||
155 | static unsigned long native_read_cr0(void) | ||
156 | { | ||
157 | unsigned long val; | ||
158 | asm volatile("movl %%cr0,%0\n\t" :"=r" (val)); | ||
159 | return val; | ||
160 | } | ||
161 | |||
162 | static void native_write_cr0(unsigned long val) | ||
163 | { | ||
164 | asm volatile("movl %0,%%cr0": :"r" (val)); | ||
165 | } | ||
166 | |||
167 | static unsigned long native_read_cr2(void) | ||
168 | { | ||
169 | unsigned long val; | ||
170 | asm volatile("movl %%cr2,%0\n\t" :"=r" (val)); | ||
171 | return val; | ||
172 | } | ||
173 | |||
174 | static void native_write_cr2(unsigned long val) | ||
175 | { | ||
176 | asm volatile("movl %0,%%cr2": :"r" (val)); | ||
177 | } | ||
178 | |||
179 | static unsigned long native_read_cr3(void) | ||
180 | { | ||
181 | unsigned long val; | ||
182 | asm volatile("movl %%cr3,%0\n\t" :"=r" (val)); | ||
183 | return val; | ||
184 | } | ||
185 | |||
186 | static void native_write_cr3(unsigned long val) | ||
187 | { | ||
188 | asm volatile("movl %0,%%cr3": :"r" (val)); | ||
189 | } | ||
190 | |||
191 | static unsigned long native_read_cr4(void) | ||
192 | { | ||
193 | unsigned long val; | ||
194 | asm volatile("movl %%cr4,%0\n\t" :"=r" (val)); | ||
195 | return val; | ||
196 | } | ||
197 | |||
198 | static unsigned long native_read_cr4_safe(void) | ||
199 | { | ||
200 | unsigned long val; | ||
201 | /* This could fault if %cr4 does not exist */ | ||
202 | asm("1: movl %%cr4, %0 \n" | ||
203 | "2: \n" | ||
204 | ".section __ex_table,\"a\" \n" | ||
205 | ".long 1b,2b \n" | ||
206 | ".previous \n" | ||
207 | : "=r" (val): "0" (0)); | ||
208 | return val; | ||
209 | } | ||
210 | |||
211 | static void native_write_cr4(unsigned long val) | ||
212 | { | ||
213 | asm volatile("movl %0,%%cr4": :"r" (val)); | ||
214 | } | ||
215 | |||
216 | static unsigned long native_save_fl(void) | ||
217 | { | ||
218 | unsigned long f; | ||
219 | asm volatile("pushfl ; popl %0":"=g" (f): /* no input */); | ||
220 | return f; | ||
221 | } | ||
222 | |||
223 | static void native_restore_fl(unsigned long f) | ||
224 | { | ||
225 | asm volatile("pushl %0 ; popfl": /* no output */ | ||
226 | :"g" (f) | ||
227 | :"memory", "cc"); | ||
228 | } | ||
229 | |||
230 | static void native_irq_disable(void) | ||
231 | { | ||
232 | asm volatile("cli": : :"memory"); | ||
233 | } | ||
234 | |||
235 | static void native_irq_enable(void) | ||
236 | { | ||
237 | asm volatile("sti": : :"memory"); | ||
238 | } | ||
239 | |||
240 | static void native_safe_halt(void) | ||
241 | { | ||
242 | asm volatile("sti; hlt": : :"memory"); | ||
243 | } | ||
244 | 114 | ||
245 | static void native_halt(void) | 115 | return ret; |
246 | { | ||
247 | asm volatile("hlt": : :"memory"); | ||
248 | } | 116 | } |
249 | 117 | ||
250 | static void native_wbinvd(void) | 118 | unsigned paravirt_patch_nop(void) |
251 | { | 119 | { |
252 | asm volatile("wbinvd": : :"memory"); | 120 | return 0; |
253 | } | 121 | } |
254 | 122 | ||
255 | static unsigned long long native_read_msr(unsigned int msr, int *err) | 123 | unsigned paravirt_patch_ignore(unsigned len) |
256 | { | 124 | { |
257 | unsigned long long val; | 125 | return len; |
258 | |||
259 | asm volatile("2: rdmsr ; xorl %0,%0\n" | ||
260 | "1:\n\t" | ||
261 | ".section .fixup,\"ax\"\n\t" | ||
262 | "3: movl %3,%0 ; jmp 1b\n\t" | ||
263 | ".previous\n\t" | ||
264 | ".section __ex_table,\"a\"\n" | ||
265 | " .align 4\n\t" | ||
266 | " .long 2b,3b\n\t" | ||
267 | ".previous" | ||
268 | : "=r" (*err), "=A" (val) | ||
269 | : "c" (msr), "i" (-EFAULT)); | ||
270 | |||
271 | return val; | ||
272 | } | 126 | } |
273 | 127 | ||
274 | static int native_write_msr(unsigned int msr, unsigned long long val) | 128 | unsigned paravirt_patch_call(void *target, u16 tgt_clobbers, |
129 | void *site, u16 site_clobbers, | ||
130 | unsigned len) | ||
275 | { | 131 | { |
276 | int err; | 132 | unsigned char *call = site; |
277 | asm volatile("2: wrmsr ; xorl %0,%0\n" | 133 | unsigned long delta = (unsigned long)target - (unsigned long)(call+5); |
278 | "1:\n\t" | ||
279 | ".section .fixup,\"ax\"\n\t" | ||
280 | "3: movl %4,%0 ; jmp 1b\n\t" | ||
281 | ".previous\n\t" | ||
282 | ".section __ex_table,\"a\"\n" | ||
283 | " .align 4\n\t" | ||
284 | " .long 2b,3b\n\t" | ||
285 | ".previous" | ||
286 | : "=a" (err) | ||
287 | : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)), | ||
288 | "i" (-EFAULT)); | ||
289 | return err; | ||
290 | } | ||
291 | 134 | ||
292 | static unsigned long long native_read_tsc(void) | 135 | if (tgt_clobbers & ~site_clobbers) |
293 | { | 136 | return len; /* target would clobber too much for this site */ |
294 | unsigned long long val; | 137 | if (len < 5) |
295 | asm volatile("rdtsc" : "=A" (val)); | 138 | return len; /* call too long for patch site */ |
296 | return val; | ||
297 | } | ||
298 | 139 | ||
299 | static unsigned long long native_read_pmc(void) | 140 | *call++ = 0xe8; /* call */ |
300 | { | 141 | *(unsigned long *)call = delta; |
301 | unsigned long long val; | ||
302 | asm volatile("rdpmc" : "=A" (val)); | ||
303 | return val; | ||
304 | } | ||
305 | 142 | ||
306 | static void native_load_tr_desc(void) | 143 | return 5; |
307 | { | ||
308 | asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); | ||
309 | } | 144 | } |
310 | 145 | ||
311 | static void native_load_gdt(const struct Xgt_desc_struct *dtr) | 146 | unsigned paravirt_patch_jmp(void *target, void *site, unsigned len) |
312 | { | 147 | { |
313 | asm volatile("lgdt %0"::"m" (*dtr)); | 148 | unsigned char *jmp = site; |
314 | } | 149 | unsigned long delta = (unsigned long)target - (unsigned long)(jmp+5); |
315 | 150 | ||
316 | static void native_load_idt(const struct Xgt_desc_struct *dtr) | 151 | if (len < 5) |
317 | { | 152 | return len; /* call too long for patch site */ |
318 | asm volatile("lidt %0"::"m" (*dtr)); | ||
319 | } | ||
320 | 153 | ||
321 | static void native_store_gdt(struct Xgt_desc_struct *dtr) | 154 | *jmp++ = 0xe9; /* jmp */ |
322 | { | 155 | *(unsigned long *)jmp = delta; |
323 | asm ("sgdt %0":"=m" (*dtr)); | ||
324 | } | ||
325 | 156 | ||
326 | static void native_store_idt(struct Xgt_desc_struct *dtr) | 157 | return 5; |
327 | { | ||
328 | asm ("sidt %0":"=m" (*dtr)); | ||
329 | } | 158 | } |
330 | 159 | ||
331 | static unsigned long native_store_tr(void) | 160 | unsigned paravirt_patch_default(u8 type, u16 clobbers, void *site, unsigned len) |
332 | { | 161 | { |
333 | unsigned long tr; | 162 | void *opfunc = *((void **)¶virt_ops + type); |
334 | asm ("str %0":"=r" (tr)); | 163 | unsigned ret; |
335 | return tr; | ||
336 | } | ||
337 | 164 | ||
338 | static void native_load_tls(struct thread_struct *t, unsigned int cpu) | 165 | if (opfunc == NULL) |
339 | { | 166 | /* If there's no function, patch it with a ud2a (BUG) */ |
340 | #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] | 167 | ret = paravirt_patch_insns(site, len, start_ud2a, end_ud2a); |
341 | C(0); C(1); C(2); | 168 | else if (opfunc == paravirt_nop) |
342 | #undef C | 169 | /* If the operation is a nop, then nop the callsite */ |
343 | } | 170 | ret = paravirt_patch_nop(); |
171 | else if (type == PARAVIRT_PATCH(iret) || | ||
172 | type == PARAVIRT_PATCH(irq_enable_sysexit)) | ||
173 | /* If operation requires a jmp, then jmp */ | ||
174 | ret = paravirt_patch_jmp(opfunc, site, len); | ||
175 | else | ||
176 | /* Otherwise call the function; assume target could | ||
177 | clobber any caller-save reg */ | ||
178 | ret = paravirt_patch_call(opfunc, CLBR_ANY, | ||
179 | site, clobbers, len); | ||
344 | 180 | ||
345 | static inline void native_write_dt_entry(void *dt, int entry, u32 entry_low, u32 entry_high) | 181 | return ret; |
346 | { | ||
347 | u32 *lp = (u32 *)((char *)dt + entry*8); | ||
348 | lp[0] = entry_low; | ||
349 | lp[1] = entry_high; | ||
350 | } | 182 | } |
351 | 183 | ||
352 | static void native_write_ldt_entry(void *dt, int entrynum, u32 low, u32 high) | 184 | unsigned paravirt_patch_insns(void *site, unsigned len, |
185 | const char *start, const char *end) | ||
353 | { | 186 | { |
354 | native_write_dt_entry(dt, entrynum, low, high); | 187 | unsigned insn_len = end - start; |
355 | } | ||
356 | 188 | ||
357 | static void native_write_gdt_entry(void *dt, int entrynum, u32 low, u32 high) | 189 | if (insn_len > len || start == NULL) |
358 | { | 190 | insn_len = len; |
359 | native_write_dt_entry(dt, entrynum, low, high); | 191 | else |
360 | } | 192 | memcpy(site, start, insn_len); |
361 | |||
362 | static void native_write_idt_entry(void *dt, int entrynum, u32 low, u32 high) | ||
363 | { | ||
364 | native_write_dt_entry(dt, entrynum, low, high); | ||
365 | } | ||
366 | 193 | ||
367 | static void native_load_esp0(struct tss_struct *tss, | 194 | return insn_len; |
368 | struct thread_struct *thread) | ||
369 | { | ||
370 | tss->esp0 = thread->esp0; | ||
371 | |||
372 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | ||
373 | if (unlikely(tss->ss1 != thread->sysenter_cs)) { | ||
374 | tss->ss1 = thread->sysenter_cs; | ||
375 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
376 | } | ||
377 | } | 195 | } |
378 | 196 | ||
379 | static void native_io_delay(void) | 197 | void init_IRQ(void) |
380 | { | 198 | { |
381 | asm volatile("outb %al,$0x80"); | 199 | paravirt_ops.init_IRQ(); |
382 | } | 200 | } |
383 | 201 | ||
384 | static void native_flush_tlb(void) | 202 | static void native_flush_tlb(void) |
@@ -395,83 +213,11 @@ static void native_flush_tlb_global(void) | |||
395 | __native_flush_tlb_global(); | 213 | __native_flush_tlb_global(); |
396 | } | 214 | } |
397 | 215 | ||
398 | static void native_flush_tlb_single(u32 addr) | 216 | static void native_flush_tlb_single(unsigned long addr) |
399 | { | 217 | { |
400 | __native_flush_tlb_single(addr); | 218 | __native_flush_tlb_single(addr); |
401 | } | 219 | } |
402 | 220 | ||
403 | #ifndef CONFIG_X86_PAE | ||
404 | static void native_set_pte(pte_t *ptep, pte_t pteval) | ||
405 | { | ||
406 | *ptep = pteval; | ||
407 | } | ||
408 | |||
409 | static void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval) | ||
410 | { | ||
411 | *ptep = pteval; | ||
412 | } | ||
413 | |||
414 | static void native_set_pmd(pmd_t *pmdp, pmd_t pmdval) | ||
415 | { | ||
416 | *pmdp = pmdval; | ||
417 | } | ||
418 | |||
419 | #else /* CONFIG_X86_PAE */ | ||
420 | |||
421 | static void native_set_pte(pte_t *ptep, pte_t pte) | ||
422 | { | ||
423 | ptep->pte_high = pte.pte_high; | ||
424 | smp_wmb(); | ||
425 | ptep->pte_low = pte.pte_low; | ||
426 | } | ||
427 | |||
428 | static void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pte) | ||
429 | { | ||
430 | ptep->pte_high = pte.pte_high; | ||
431 | smp_wmb(); | ||
432 | ptep->pte_low = pte.pte_low; | ||
433 | } | ||
434 | |||
435 | static void native_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) | ||
436 | { | ||
437 | ptep->pte_low = 0; | ||
438 | smp_wmb(); | ||
439 | ptep->pte_high = pte.pte_high; | ||
440 | smp_wmb(); | ||
441 | ptep->pte_low = pte.pte_low; | ||
442 | } | ||
443 | |||
444 | static void native_set_pte_atomic(pte_t *ptep, pte_t pteval) | ||
445 | { | ||
446 | set_64bit((unsigned long long *)ptep,pte_val(pteval)); | ||
447 | } | ||
448 | |||
449 | static void native_set_pmd(pmd_t *pmdp, pmd_t pmdval) | ||
450 | { | ||
451 | set_64bit((unsigned long long *)pmdp,pmd_val(pmdval)); | ||
452 | } | ||
453 | |||
454 | static void native_set_pud(pud_t *pudp, pud_t pudval) | ||
455 | { | ||
456 | *pudp = pudval; | ||
457 | } | ||
458 | |||
459 | static void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
460 | { | ||
461 | ptep->pte_low = 0; | ||
462 | smp_wmb(); | ||
463 | ptep->pte_high = 0; | ||
464 | } | ||
465 | |||
466 | static void native_pmd_clear(pmd_t *pmd) | ||
467 | { | ||
468 | u32 *tmp = (u32 *)pmd; | ||
469 | *tmp = 0; | ||
470 | smp_wmb(); | ||
471 | *(tmp + 1) = 0; | ||
472 | } | ||
473 | #endif /* CONFIG_X86_PAE */ | ||
474 | |||
475 | /* These are in entry.S */ | 221 | /* These are in entry.S */ |
476 | extern void native_iret(void); | 222 | extern void native_iret(void); |
477 | extern void native_irq_enable_sysexit(void); | 223 | extern void native_irq_enable_sysexit(void); |
@@ -487,10 +233,11 @@ struct paravirt_ops paravirt_ops = { | |||
487 | .name = "bare hardware", | 233 | .name = "bare hardware", |
488 | .paravirt_enabled = 0, | 234 | .paravirt_enabled = 0, |
489 | .kernel_rpl = 0, | 235 | .kernel_rpl = 0, |
236 | .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ | ||
490 | 237 | ||
491 | .patch = native_patch, | 238 | .patch = native_patch, |
492 | .banner = default_banner, | 239 | .banner = default_banner, |
493 | .arch_setup = native_nop, | 240 | .arch_setup = paravirt_nop, |
494 | .memory_setup = machine_specific_memory_setup, | 241 | .memory_setup = machine_specific_memory_setup, |
495 | .get_wallclock = native_get_wallclock, | 242 | .get_wallclock = native_get_wallclock, |
496 | .set_wallclock = native_set_wallclock, | 243 | .set_wallclock = native_set_wallclock, |
@@ -517,8 +264,8 @@ struct paravirt_ops paravirt_ops = { | |||
517 | .safe_halt = native_safe_halt, | 264 | .safe_halt = native_safe_halt, |
518 | .halt = native_halt, | 265 | .halt = native_halt, |
519 | .wbinvd = native_wbinvd, | 266 | .wbinvd = native_wbinvd, |
520 | .read_msr = native_read_msr, | 267 | .read_msr = native_read_msr_safe, |
521 | .write_msr = native_write_msr, | 268 | .write_msr = native_write_msr_safe, |
522 | .read_tsc = native_read_tsc, | 269 | .read_tsc = native_read_tsc, |
523 | .read_pmc = native_read_pmc, | 270 | .read_pmc = native_read_pmc, |
524 | .get_scheduled_cycles = native_read_tsc, | 271 | .get_scheduled_cycles = native_read_tsc, |
@@ -531,9 +278,9 @@ struct paravirt_ops paravirt_ops = { | |||
531 | .store_idt = native_store_idt, | 278 | .store_idt = native_store_idt, |
532 | .store_tr = native_store_tr, | 279 | .store_tr = native_store_tr, |
533 | .load_tls = native_load_tls, | 280 | .load_tls = native_load_tls, |
534 | .write_ldt_entry = native_write_ldt_entry, | 281 | .write_ldt_entry = write_dt_entry, |
535 | .write_gdt_entry = native_write_gdt_entry, | 282 | .write_gdt_entry = write_dt_entry, |
536 | .write_idt_entry = native_write_idt_entry, | 283 | .write_idt_entry = write_dt_entry, |
537 | .load_esp0 = native_load_esp0, | 284 | .load_esp0 = native_load_esp0, |
538 | 285 | ||
539 | .set_iopl_mask = native_set_iopl_mask, | 286 | .set_iopl_mask = native_set_iopl_mask, |
@@ -545,44 +292,57 @@ struct paravirt_ops paravirt_ops = { | |||
545 | .apic_read = native_apic_read, | 292 | .apic_read = native_apic_read, |
546 | .setup_boot_clock = setup_boot_APIC_clock, | 293 | .setup_boot_clock = setup_boot_APIC_clock, |
547 | .setup_secondary_clock = setup_secondary_APIC_clock, | 294 | .setup_secondary_clock = setup_secondary_APIC_clock, |
295 | .startup_ipi_hook = paravirt_nop, | ||
548 | #endif | 296 | #endif |
549 | .set_lazy_mode = (void *)native_nop, | 297 | .set_lazy_mode = paravirt_nop, |
298 | |||
299 | .pagetable_setup_start = native_pagetable_setup_start, | ||
300 | .pagetable_setup_done = native_pagetable_setup_done, | ||
550 | 301 | ||
551 | .flush_tlb_user = native_flush_tlb, | 302 | .flush_tlb_user = native_flush_tlb, |
552 | .flush_tlb_kernel = native_flush_tlb_global, | 303 | .flush_tlb_kernel = native_flush_tlb_global, |
553 | .flush_tlb_single = native_flush_tlb_single, | 304 | .flush_tlb_single = native_flush_tlb_single, |
305 | .flush_tlb_others = native_flush_tlb_others, | ||
554 | 306 | ||
555 | .map_pt_hook = (void *)native_nop, | 307 | .alloc_pt = paravirt_nop, |
556 | 308 | .alloc_pd = paravirt_nop, | |
557 | .alloc_pt = (void *)native_nop, | 309 | .alloc_pd_clone = paravirt_nop, |
558 | .alloc_pd = (void *)native_nop, | 310 | .release_pt = paravirt_nop, |
559 | .alloc_pd_clone = (void *)native_nop, | 311 | .release_pd = paravirt_nop, |
560 | .release_pt = (void *)native_nop, | ||
561 | .release_pd = (void *)native_nop, | ||
562 | 312 | ||
563 | .set_pte = native_set_pte, | 313 | .set_pte = native_set_pte, |
564 | .set_pte_at = native_set_pte_at, | 314 | .set_pte_at = native_set_pte_at, |
565 | .set_pmd = native_set_pmd, | 315 | .set_pmd = native_set_pmd, |
566 | .pte_update = (void *)native_nop, | 316 | .pte_update = paravirt_nop, |
567 | .pte_update_defer = (void *)native_nop, | 317 | .pte_update_defer = paravirt_nop, |
318 | |||
319 | #ifdef CONFIG_HIGHPTE | ||
320 | .kmap_atomic_pte = kmap_atomic, | ||
321 | #endif | ||
322 | |||
568 | #ifdef CONFIG_X86_PAE | 323 | #ifdef CONFIG_X86_PAE |
569 | .set_pte_atomic = native_set_pte_atomic, | 324 | .set_pte_atomic = native_set_pte_atomic, |
570 | .set_pte_present = native_set_pte_present, | 325 | .set_pte_present = native_set_pte_present, |
571 | .set_pud = native_set_pud, | 326 | .set_pud = native_set_pud, |
572 | .pte_clear = native_pte_clear, | 327 | .pte_clear = native_pte_clear, |
573 | .pmd_clear = native_pmd_clear, | 328 | .pmd_clear = native_pmd_clear, |
329 | |||
330 | .pmd_val = native_pmd_val, | ||
331 | .make_pmd = native_make_pmd, | ||
574 | #endif | 332 | #endif |
575 | 333 | ||
334 | .pte_val = native_pte_val, | ||
335 | .pgd_val = native_pgd_val, | ||
336 | |||
337 | .make_pte = native_make_pte, | ||
338 | .make_pgd = native_make_pgd, | ||
339 | |||
576 | .irq_enable_sysexit = native_irq_enable_sysexit, | 340 | .irq_enable_sysexit = native_irq_enable_sysexit, |
577 | .iret = native_iret, | 341 | .iret = native_iret, |
578 | 342 | ||
579 | .startup_ipi_hook = (void *)native_nop, | 343 | .dup_mmap = paravirt_nop, |
344 | .exit_mmap = paravirt_nop, | ||
345 | .activate_mm = paravirt_nop, | ||
580 | }; | 346 | }; |
581 | 347 | ||
582 | /* | 348 | EXPORT_SYMBOL(paravirt_ops); |
583 | * NOTE: CONFIG_PARAVIRT is experimental and the paravirt_ops | ||
584 | * semantics are subject to change. Hence we only do this | ||
585 | * internal-only export of this, until it gets sorted out and | ||
586 | * all lowlevel CPU ops used by modules are separately exported. | ||
587 | */ | ||
588 | EXPORT_SYMBOL_GPL(paravirt_ops); | ||