diff options
Diffstat (limited to 'arch/x86/kernel/vmi_32.c')
-rw-r--r-- | arch/x86/kernel/vmi_32.c | 981 |
1 files changed, 981 insertions, 0 deletions
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c new file mode 100644 index 000000000000..18673e0f193b --- /dev/null +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -0,0 +1,981 @@ | |||
1 | /* | ||
2 | * VMI specific paravirt-ops implementation | ||
3 | * | ||
4 | * Copyright (C) 2005, VMware, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
14 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
15 | * details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | * | ||
21 | * Send feedback to zach@vmware.com | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include <linux/module.h> | ||
26 | #include <linux/cpu.h> | ||
27 | #include <linux/bootmem.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/highmem.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <asm/vmi.h> | ||
32 | #include <asm/io.h> | ||
33 | #include <asm/fixmap.h> | ||
34 | #include <asm/apicdef.h> | ||
35 | #include <asm/apic.h> | ||
36 | #include <asm/processor.h> | ||
37 | #include <asm/timer.h> | ||
38 | #include <asm/vmi_time.h> | ||
39 | #include <asm/kmap_types.h> | ||
40 | |||
41 | /* Convenient for calling VMI functions indirectly in the ROM */ | ||
42 | typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); | ||
43 | typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int); | ||
44 | |||
45 | #define call_vrom_func(rom,func) \ | ||
46 | (((VROMFUNC *)(rom->func))()) | ||
47 | |||
48 | #define call_vrom_long_func(rom,func,arg) \ | ||
49 | (((VROMLONGFUNC *)(rom->func)) (arg)) | ||
50 | |||
51 | static struct vrom_header *vmi_rom; | ||
52 | static int disable_pge; | ||
53 | static int disable_pse; | ||
54 | static int disable_sep; | ||
55 | static int disable_tsc; | ||
56 | static int disable_mtrr; | ||
57 | static int disable_noidle; | ||
58 | static int disable_vmi_timer; | ||
59 | |||
60 | /* Cached VMI operations */ | ||
61 | static struct { | ||
62 | void (*cpuid)(void /* non-c */); | ||
63 | void (*_set_ldt)(u32 selector); | ||
64 | void (*set_tr)(u32 selector); | ||
65 | void (*set_kernel_stack)(u32 selector, u32 esp0); | ||
66 | void (*allocate_page)(u32, u32, u32, u32, u32); | ||
67 | void (*release_page)(u32, u32); | ||
68 | void (*set_pte)(pte_t, pte_t *, unsigned); | ||
69 | void (*update_pte)(pte_t *, unsigned); | ||
70 | void (*set_linear_mapping)(int, void *, u32, u32); | ||
71 | void (*_flush_tlb)(int); | ||
72 | void (*set_initial_ap_state)(int, int); | ||
73 | void (*halt)(void); | ||
74 | void (*set_lazy_mode)(int mode); | ||
75 | } vmi_ops; | ||
76 | |||
77 | /* Cached VMI operations */ | ||
78 | struct vmi_timer_ops vmi_timer_ops; | ||
79 | |||
80 | /* | ||
81 | * VMI patching routines. | ||
82 | */ | ||
83 | #define MNEM_CALL 0xe8 | ||
84 | #define MNEM_JMP 0xe9 | ||
85 | #define MNEM_RET 0xc3 | ||
86 | |||
87 | #define IRQ_PATCH_INT_MASK 0 | ||
88 | #define IRQ_PATCH_DISABLE 5 | ||
89 | |||
90 | static inline void patch_offset(void *insnbuf, | ||
91 | unsigned long eip, unsigned long dest) | ||
92 | { | ||
93 | *(unsigned long *)(insnbuf+1) = dest-eip-5; | ||
94 | } | ||
95 | |||
96 | static unsigned patch_internal(int call, unsigned len, void *insnbuf, | ||
97 | unsigned long eip) | ||
98 | { | ||
99 | u64 reloc; | ||
100 | struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; | ||
101 | reloc = call_vrom_long_func(vmi_rom, get_reloc, call); | ||
102 | switch(rel->type) { | ||
103 | case VMI_RELOCATION_CALL_REL: | ||
104 | BUG_ON(len < 5); | ||
105 | *(char *)insnbuf = MNEM_CALL; | ||
106 | patch_offset(insnbuf, eip, (unsigned long)rel->eip); | ||
107 | return 5; | ||
108 | |||
109 | case VMI_RELOCATION_JUMP_REL: | ||
110 | BUG_ON(len < 5); | ||
111 | *(char *)insnbuf = MNEM_JMP; | ||
112 | patch_offset(insnbuf, eip, (unsigned long)rel->eip); | ||
113 | return 5; | ||
114 | |||
115 | case VMI_RELOCATION_NOP: | ||
116 | /* obliterate the whole thing */ | ||
117 | return 0; | ||
118 | |||
119 | case VMI_RELOCATION_NONE: | ||
120 | /* leave native code in place */ | ||
121 | break; | ||
122 | |||
123 | default: | ||
124 | BUG(); | ||
125 | } | ||
126 | return len; | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Apply patch if appropriate, return length of new instruction | ||
131 | * sequence. The callee does nop padding for us. | ||
132 | */ | ||
133 | static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, | ||
134 | unsigned long eip, unsigned len) | ||
135 | { | ||
136 | switch (type) { | ||
137 | case PARAVIRT_PATCH(irq_disable): | ||
138 | return patch_internal(VMI_CALL_DisableInterrupts, len, | ||
139 | insns, eip); | ||
140 | case PARAVIRT_PATCH(irq_enable): | ||
141 | return patch_internal(VMI_CALL_EnableInterrupts, len, | ||
142 | insns, eip); | ||
143 | case PARAVIRT_PATCH(restore_fl): | ||
144 | return patch_internal(VMI_CALL_SetInterruptMask, len, | ||
145 | insns, eip); | ||
146 | case PARAVIRT_PATCH(save_fl): | ||
147 | return patch_internal(VMI_CALL_GetInterruptMask, len, | ||
148 | insns, eip); | ||
149 | case PARAVIRT_PATCH(iret): | ||
150 | return patch_internal(VMI_CALL_IRET, len, insns, eip); | ||
151 | case PARAVIRT_PATCH(irq_enable_sysexit): | ||
152 | return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip); | ||
153 | default: | ||
154 | break; | ||
155 | } | ||
156 | return len; | ||
157 | } | ||
158 | |||
159 | /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */ | ||
160 | static void vmi_cpuid(unsigned int *eax, unsigned int *ebx, | ||
161 | unsigned int *ecx, unsigned int *edx) | ||
162 | { | ||
163 | int override = 0; | ||
164 | if (*eax == 1) | ||
165 | override = 1; | ||
166 | asm volatile ("call *%6" | ||
167 | : "=a" (*eax), | ||
168 | "=b" (*ebx), | ||
169 | "=c" (*ecx), | ||
170 | "=d" (*edx) | ||
171 | : "0" (*eax), "2" (*ecx), "r" (vmi_ops.cpuid)); | ||
172 | if (override) { | ||
173 | if (disable_pse) | ||
174 | *edx &= ~X86_FEATURE_PSE; | ||
175 | if (disable_pge) | ||
176 | *edx &= ~X86_FEATURE_PGE; | ||
177 | if (disable_sep) | ||
178 | *edx &= ~X86_FEATURE_SEP; | ||
179 | if (disable_tsc) | ||
180 | *edx &= ~X86_FEATURE_TSC; | ||
181 | if (disable_mtrr) | ||
182 | *edx &= ~X86_FEATURE_MTRR; | ||
183 | } | ||
184 | } | ||
185 | |||
186 | static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new) | ||
187 | { | ||
188 | if (gdt[nr].a != new->a || gdt[nr].b != new->b) | ||
189 | write_gdt_entry(gdt, nr, new->a, new->b); | ||
190 | } | ||
191 | |||
192 | static void vmi_load_tls(struct thread_struct *t, unsigned int cpu) | ||
193 | { | ||
194 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | ||
195 | vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]); | ||
196 | vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]); | ||
197 | vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]); | ||
198 | } | ||
199 | |||
200 | static void vmi_set_ldt(const void *addr, unsigned entries) | ||
201 | { | ||
202 | unsigned cpu = smp_processor_id(); | ||
203 | u32 low, high; | ||
204 | |||
205 | pack_descriptor(&low, &high, (unsigned long)addr, | ||
206 | entries * sizeof(struct desc_struct) - 1, | ||
207 | DESCTYPE_LDT, 0); | ||
208 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, low, high); | ||
209 | vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0); | ||
210 | } | ||
211 | |||
212 | static void vmi_set_tr(void) | ||
213 | { | ||
214 | vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct)); | ||
215 | } | ||
216 | |||
217 | static void vmi_load_esp0(struct tss_struct *tss, | ||
218 | struct thread_struct *thread) | ||
219 | { | ||
220 | tss->x86_tss.esp0 = thread->esp0; | ||
221 | |||
222 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | ||
223 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { | ||
224 | tss->x86_tss.ss1 = thread->sysenter_cs; | ||
225 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
226 | } | ||
227 | vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.esp0); | ||
228 | } | ||
229 | |||
230 | static void vmi_flush_tlb_user(void) | ||
231 | { | ||
232 | vmi_ops._flush_tlb(VMI_FLUSH_TLB); | ||
233 | } | ||
234 | |||
235 | static void vmi_flush_tlb_kernel(void) | ||
236 | { | ||
237 | vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL); | ||
238 | } | ||
239 | |||
240 | /* Stub to do nothing at all; used for delays and unimplemented calls */ | ||
241 | static void vmi_nop(void) | ||
242 | { | ||
243 | } | ||
244 | |||
245 | #ifdef CONFIG_DEBUG_PAGE_TYPE | ||
246 | |||
247 | #ifdef CONFIG_X86_PAE | ||
248 | #define MAX_BOOT_PTS (2048+4+1) | ||
249 | #else | ||
250 | #define MAX_BOOT_PTS (1024+1) | ||
251 | #endif | ||
252 | |||
253 | /* | ||
254 | * During boot, mem_map is not yet available in paging_init, so stash | ||
255 | * all the boot page allocations here. | ||
256 | */ | ||
257 | static struct { | ||
258 | u32 pfn; | ||
259 | int type; | ||
260 | } boot_page_allocations[MAX_BOOT_PTS]; | ||
261 | static int num_boot_page_allocations; | ||
262 | static int boot_allocations_applied; | ||
263 | |||
264 | void vmi_apply_boot_page_allocations(void) | ||
265 | { | ||
266 | int i; | ||
267 | BUG_ON(!mem_map); | ||
268 | for (i = 0; i < num_boot_page_allocations; i++) { | ||
269 | struct page *page = pfn_to_page(boot_page_allocations[i].pfn); | ||
270 | page->type = boot_page_allocations[i].type; | ||
271 | page->type = boot_page_allocations[i].type & | ||
272 | ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); | ||
273 | } | ||
274 | boot_allocations_applied = 1; | ||
275 | } | ||
276 | |||
277 | static void record_page_type(u32 pfn, int type) | ||
278 | { | ||
279 | BUG_ON(num_boot_page_allocations >= MAX_BOOT_PTS); | ||
280 | boot_page_allocations[num_boot_page_allocations].pfn = pfn; | ||
281 | boot_page_allocations[num_boot_page_allocations].type = type; | ||
282 | num_boot_page_allocations++; | ||
283 | } | ||
284 | |||
285 | static void check_zeroed_page(u32 pfn, int type, struct page *page) | ||
286 | { | ||
287 | u32 *ptr; | ||
288 | int i; | ||
289 | int limit = PAGE_SIZE / sizeof(int); | ||
290 | |||
291 | if (page_address(page)) | ||
292 | ptr = (u32 *)page_address(page); | ||
293 | else | ||
294 | ptr = (u32 *)__va(pfn << PAGE_SHIFT); | ||
295 | /* | ||
296 | * When cloning the root in non-PAE mode, only the userspace | ||
297 | * pdes need to be zeroed. | ||
298 | */ | ||
299 | if (type & VMI_PAGE_CLONE) | ||
300 | limit = USER_PTRS_PER_PGD; | ||
301 | for (i = 0; i < limit; i++) | ||
302 | BUG_ON(ptr[i]); | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * We stash the page type into struct page so we can verify the page | ||
307 | * types are used properly. | ||
308 | */ | ||
309 | static void vmi_set_page_type(u32 pfn, int type) | ||
310 | { | ||
311 | /* PAE can have multiple roots per page - don't track */ | ||
312 | if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP)) | ||
313 | return; | ||
314 | |||
315 | if (boot_allocations_applied) { | ||
316 | struct page *page = pfn_to_page(pfn); | ||
317 | if (type != VMI_PAGE_NORMAL) | ||
318 | BUG_ON(page->type); | ||
319 | else | ||
320 | BUG_ON(page->type == VMI_PAGE_NORMAL); | ||
321 | page->type = type & ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); | ||
322 | if (type & VMI_PAGE_ZEROED) | ||
323 | check_zeroed_page(pfn, type, page); | ||
324 | } else { | ||
325 | record_page_type(pfn, type); | ||
326 | } | ||
327 | } | ||
328 | |||
329 | static void vmi_check_page_type(u32 pfn, int type) | ||
330 | { | ||
331 | /* PAE can have multiple roots per page - skip checks */ | ||
332 | if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP)) | ||
333 | return; | ||
334 | |||
335 | type &= ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); | ||
336 | if (boot_allocations_applied) { | ||
337 | struct page *page = pfn_to_page(pfn); | ||
338 | BUG_ON((page->type ^ type) & VMI_PAGE_PAE); | ||
339 | BUG_ON(type == VMI_PAGE_NORMAL && page->type); | ||
340 | BUG_ON((type & page->type) == 0); | ||
341 | } | ||
342 | } | ||
343 | #else | ||
344 | #define vmi_set_page_type(p,t) do { } while (0) | ||
345 | #define vmi_check_page_type(p,t) do { } while (0) | ||
346 | #endif | ||
347 | |||
348 | #ifdef CONFIG_HIGHPTE | ||
349 | static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type) | ||
350 | { | ||
351 | void *va = kmap_atomic(page, type); | ||
352 | |||
353 | /* | ||
354 | * Internally, the VMI ROM must map virtual addresses to physical | ||
355 | * addresses for processing MMU updates. By the time MMU updates | ||
356 | * are issued, this information is typically already lost. | ||
357 | * Fortunately, the VMI provides a cache of mapping slots for active | ||
358 | * page tables. | ||
359 | * | ||
360 | * We use slot zero for the linear mapping of physical memory, and | ||
361 | * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1. | ||
362 | * | ||
363 | * args: SLOT VA COUNT PFN | ||
364 | */ | ||
365 | BUG_ON(type != KM_PTE0 && type != KM_PTE1); | ||
366 | vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page)); | ||
367 | |||
368 | return va; | ||
369 | } | ||
370 | #endif | ||
371 | |||
372 | static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn) | ||
373 | { | ||
374 | vmi_set_page_type(pfn, VMI_PAGE_L1); | ||
375 | vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); | ||
376 | } | ||
377 | |||
378 | static void vmi_allocate_pd(u32 pfn) | ||
379 | { | ||
380 | /* | ||
381 | * This call comes in very early, before mem_map is setup. | ||
382 | * It is called only for swapper_pg_dir, which already has | ||
383 | * data on it. | ||
384 | */ | ||
385 | vmi_set_page_type(pfn, VMI_PAGE_L2); | ||
386 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); | ||
387 | } | ||
388 | |||
389 | static void vmi_allocate_pd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count) | ||
390 | { | ||
391 | vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); | ||
392 | vmi_check_page_type(clonepfn, VMI_PAGE_L2); | ||
393 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); | ||
394 | } | ||
395 | |||
396 | static void vmi_release_pt(u32 pfn) | ||
397 | { | ||
398 | vmi_ops.release_page(pfn, VMI_PAGE_L1); | ||
399 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); | ||
400 | } | ||
401 | |||
402 | static void vmi_release_pd(u32 pfn) | ||
403 | { | ||
404 | vmi_ops.release_page(pfn, VMI_PAGE_L2); | ||
405 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * Helper macros for MMU update flags. We can defer updates until a flush | ||
410 | * or page invalidation only if the update is to the current address space | ||
411 | * (otherwise, there is no flush). We must check against init_mm, since | ||
412 | * this could be a kernel update, which usually passes init_mm, although | ||
413 | * sometimes this check can be skipped if we know the particular function | ||
414 | * is only called on user mode PTEs. We could change the kernel to pass | ||
415 | * current->active_mm here, but in particular, I was unsure if changing | ||
416 | * mm/highmem.c to do this would still be correct on other architectures. | ||
417 | */ | ||
418 | #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \ | ||
419 | (!mustbeuser && (mm) == &init_mm)) | ||
420 | #define vmi_flags_addr(mm, addr, level, user) \ | ||
421 | ((level) | (is_current_as(mm, user) ? \ | ||
422 | (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) | ||
423 | #define vmi_flags_addr_defer(mm, addr, level, user) \ | ||
424 | ((level) | (is_current_as(mm, user) ? \ | ||
425 | (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) | ||
426 | |||
427 | static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
428 | { | ||
429 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | ||
430 | vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); | ||
431 | } | ||
432 | |||
433 | static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
434 | { | ||
435 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | ||
436 | vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0)); | ||
437 | } | ||
438 | |||
439 | static void vmi_set_pte(pte_t *ptep, pte_t pte) | ||
440 | { | ||
441 | /* XXX because of set_pmd_pte, this can be called on PT or PD layers */ | ||
442 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE | VMI_PAGE_PD); | ||
443 | vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT); | ||
444 | } | ||
445 | |||
446 | static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) | ||
447 | { | ||
448 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | ||
449 | vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); | ||
450 | } | ||
451 | |||
452 | static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval) | ||
453 | { | ||
454 | #ifdef CONFIG_X86_PAE | ||
455 | const pte_t pte = { pmdval.pmd, pmdval.pmd >> 32 }; | ||
456 | vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PMD); | ||
457 | #else | ||
458 | const pte_t pte = { pmdval.pud.pgd.pgd }; | ||
459 | vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PGD); | ||
460 | #endif | ||
461 | vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD); | ||
462 | } | ||
463 | |||
464 | #ifdef CONFIG_X86_PAE | ||
465 | |||
466 | static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval) | ||
467 | { | ||
468 | /* | ||
469 | * XXX This is called from set_pmd_pte, but at both PT | ||
470 | * and PD layers so the VMI_PAGE_PT flag is wrong. But | ||
471 | * it is only called for large page mapping changes, | ||
472 | * the Xen backend, doesn't support large pages, and the | ||
473 | * ESX backend doesn't depend on the flag. | ||
474 | */ | ||
475 | set_64bit((unsigned long long *)ptep,pte_val(pteval)); | ||
476 | vmi_ops.update_pte(ptep, VMI_PAGE_PT); | ||
477 | } | ||
478 | |||
479 | static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) | ||
480 | { | ||
481 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | ||
482 | vmi_ops.set_pte(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1)); | ||
483 | } | ||
484 | |||
485 | static void vmi_set_pud(pud_t *pudp, pud_t pudval) | ||
486 | { | ||
487 | /* Um, eww */ | ||
488 | const pte_t pte = { pudval.pgd.pgd, pudval.pgd.pgd >> 32 }; | ||
489 | vmi_check_page_type(__pa(pudp) >> PAGE_SHIFT, VMI_PAGE_PGD); | ||
490 | vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP); | ||
491 | } | ||
492 | |||
493 | static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
494 | { | ||
495 | const pte_t pte = { 0 }; | ||
496 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | ||
497 | vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); | ||
498 | } | ||
499 | |||
500 | static void vmi_pmd_clear(pmd_t *pmd) | ||
501 | { | ||
502 | const pte_t pte = { 0 }; | ||
503 | vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD); | ||
504 | vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD); | ||
505 | } | ||
506 | #endif | ||
507 | |||
508 | #ifdef CONFIG_SMP | ||
509 | static void __devinit | ||
510 | vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, | ||
511 | unsigned long start_esp) | ||
512 | { | ||
513 | struct vmi_ap_state ap; | ||
514 | |||
515 | /* Default everything to zero. This is fine for most GPRs. */ | ||
516 | memset(&ap, 0, sizeof(struct vmi_ap_state)); | ||
517 | |||
518 | ap.gdtr_limit = GDT_SIZE - 1; | ||
519 | ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid); | ||
520 | |||
521 | ap.idtr_limit = IDT_ENTRIES * 8 - 1; | ||
522 | ap.idtr_base = (unsigned long) idt_table; | ||
523 | |||
524 | ap.ldtr = 0; | ||
525 | |||
526 | ap.cs = __KERNEL_CS; | ||
527 | ap.eip = (unsigned long) start_eip; | ||
528 | ap.ss = __KERNEL_DS; | ||
529 | ap.esp = (unsigned long) start_esp; | ||
530 | |||
531 | ap.ds = __USER_DS; | ||
532 | ap.es = __USER_DS; | ||
533 | ap.fs = __KERNEL_PERCPU; | ||
534 | ap.gs = 0; | ||
535 | |||
536 | ap.eflags = 0; | ||
537 | |||
538 | #ifdef CONFIG_X86_PAE | ||
539 | /* efer should match BSP efer. */ | ||
540 | if (cpu_has_nx) { | ||
541 | unsigned l, h; | ||
542 | rdmsr(MSR_EFER, l, h); | ||
543 | ap.efer = (unsigned long long) h << 32 | l; | ||
544 | } | ||
545 | #endif | ||
546 | |||
547 | ap.cr3 = __pa(swapper_pg_dir); | ||
548 | /* Protected mode, paging, AM, WP, NE, MP. */ | ||
549 | ap.cr0 = 0x80050023; | ||
550 | ap.cr4 = mmu_cr4_features; | ||
551 | vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid); | ||
552 | } | ||
553 | #endif | ||
554 | |||
555 | static void vmi_set_lazy_mode(enum paravirt_lazy_mode mode) | ||
556 | { | ||
557 | static DEFINE_PER_CPU(enum paravirt_lazy_mode, lazy_mode); | ||
558 | |||
559 | if (!vmi_ops.set_lazy_mode) | ||
560 | return; | ||
561 | |||
562 | /* Modes should never nest or overlap */ | ||
563 | BUG_ON(__get_cpu_var(lazy_mode) && !(mode == PARAVIRT_LAZY_NONE || | ||
564 | mode == PARAVIRT_LAZY_FLUSH)); | ||
565 | |||
566 | if (mode == PARAVIRT_LAZY_FLUSH) { | ||
567 | vmi_ops.set_lazy_mode(0); | ||
568 | vmi_ops.set_lazy_mode(__get_cpu_var(lazy_mode)); | ||
569 | } else { | ||
570 | vmi_ops.set_lazy_mode(mode); | ||
571 | __get_cpu_var(lazy_mode) = mode; | ||
572 | } | ||
573 | } | ||
574 | |||
575 | static inline int __init check_vmi_rom(struct vrom_header *rom) | ||
576 | { | ||
577 | struct pci_header *pci; | ||
578 | struct pnp_header *pnp; | ||
579 | const char *manufacturer = "UNKNOWN"; | ||
580 | const char *product = "UNKNOWN"; | ||
581 | const char *license = "unspecified"; | ||
582 | |||
583 | if (rom->rom_signature != 0xaa55) | ||
584 | return 0; | ||
585 | if (rom->vrom_signature != VMI_SIGNATURE) | ||
586 | return 0; | ||
587 | if (rom->api_version_maj != VMI_API_REV_MAJOR || | ||
588 | rom->api_version_min+1 < VMI_API_REV_MINOR+1) { | ||
589 | printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n", | ||
590 | rom->api_version_maj, | ||
591 | rom->api_version_min); | ||
592 | return 0; | ||
593 | } | ||
594 | |||
595 | /* | ||
596 | * Relying on the VMI_SIGNATURE field is not 100% safe, so check | ||
597 | * the PCI header and device type to make sure this is really a | ||
598 | * VMI device. | ||
599 | */ | ||
600 | if (!rom->pci_header_offs) { | ||
601 | printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n"); | ||
602 | return 0; | ||
603 | } | ||
604 | |||
605 | pci = (struct pci_header *)((char *)rom+rom->pci_header_offs); | ||
606 | if (pci->vendorID != PCI_VENDOR_ID_VMWARE || | ||
607 | pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) { | ||
608 | /* Allow it to run... anyways, but warn */ | ||
609 | printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n"); | ||
610 | } | ||
611 | |||
612 | if (rom->pnp_header_offs) { | ||
613 | pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs); | ||
614 | if (pnp->manufacturer_offset) | ||
615 | manufacturer = (const char *)rom+pnp->manufacturer_offset; | ||
616 | if (pnp->product_offset) | ||
617 | product = (const char *)rom+pnp->product_offset; | ||
618 | } | ||
619 | |||
620 | if (rom->license_offs) | ||
621 | license = (char *)rom+rom->license_offs; | ||
622 | |||
623 | printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n", | ||
624 | manufacturer, product, | ||
625 | rom->api_version_maj, rom->api_version_min, | ||
626 | pci->rom_version_maj, pci->rom_version_min); | ||
627 | |||
628 | /* Don't allow BSD/MIT here for now because we don't want to end up | ||
629 | with any binary only shim layers */ | ||
630 | if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) { | ||
631 | printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n", | ||
632 | license); | ||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | return 1; | ||
637 | } | ||
638 | |||
639 | /* | ||
640 | * Probe for the VMI option ROM | ||
641 | */ | ||
642 | static inline int __init probe_vmi_rom(void) | ||
643 | { | ||
644 | unsigned long base; | ||
645 | |||
646 | /* VMI ROM is in option ROM area, check signature */ | ||
647 | for (base = 0xC0000; base < 0xE0000; base += 2048) { | ||
648 | struct vrom_header *romstart; | ||
649 | romstart = (struct vrom_header *)isa_bus_to_virt(base); | ||
650 | if (check_vmi_rom(romstart)) { | ||
651 | vmi_rom = romstart; | ||
652 | return 1; | ||
653 | } | ||
654 | } | ||
655 | return 0; | ||
656 | } | ||
657 | |||
658 | /* | ||
659 | * VMI setup common to all processors | ||
660 | */ | ||
661 | void vmi_bringup(void) | ||
662 | { | ||
663 | /* We must establish the lowmem mapping for MMU ops to work */ | ||
664 | if (vmi_ops.set_linear_mapping) | ||
665 | vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0); | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * Return a pointer to a VMI function or NULL if unimplemented | ||
670 | */ | ||
671 | static void *vmi_get_function(int vmicall) | ||
672 | { | ||
673 | u64 reloc; | ||
674 | const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; | ||
675 | reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall); | ||
676 | BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); | ||
677 | if (rel->type == VMI_RELOCATION_CALL_REL) | ||
678 | return (void *)rel->eip; | ||
679 | else | ||
680 | return NULL; | ||
681 | } | ||
682 | |||
683 | /* | ||
684 | * Helper macro for making the VMI paravirt-ops fill code readable. | ||
685 | * For unimplemented operations, fall back to default, unless nop | ||
686 | * is returned by the ROM. | ||
687 | */ | ||
688 | #define para_fill(opname, vmicall) \ | ||
689 | do { \ | ||
690 | reloc = call_vrom_long_func(vmi_rom, get_reloc, \ | ||
691 | VMI_CALL_##vmicall); \ | ||
692 | if (rel->type == VMI_RELOCATION_CALL_REL) \ | ||
693 | paravirt_ops.opname = (void *)rel->eip; \ | ||
694 | else if (rel->type == VMI_RELOCATION_NOP) \ | ||
695 | paravirt_ops.opname = (void *)vmi_nop; \ | ||
696 | else if (rel->type != VMI_RELOCATION_NONE) \ | ||
697 | printk(KERN_WARNING "VMI: Unknown relocation " \ | ||
698 | "type %d for " #vmicall"\n",\ | ||
699 | rel->type); \ | ||
700 | } while (0) | ||
701 | |||
702 | /* | ||
703 | * Helper macro for making the VMI paravirt-ops fill code readable. | ||
704 | * For cached operations which do not match the VMI ROM ABI and must | ||
705 | * go through a tranlation stub. Ignore NOPs, since it is not clear | ||
706 | * a NOP * VMI function corresponds to a NOP paravirt-op when the | ||
707 | * functions are not in 1-1 correspondence. | ||
708 | */ | ||
709 | #define para_wrap(opname, wrapper, cache, vmicall) \ | ||
710 | do { \ | ||
711 | reloc = call_vrom_long_func(vmi_rom, get_reloc, \ | ||
712 | VMI_CALL_##vmicall); \ | ||
713 | BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \ | ||
714 | if (rel->type == VMI_RELOCATION_CALL_REL) { \ | ||
715 | paravirt_ops.opname = wrapper; \ | ||
716 | vmi_ops.cache = (void *)rel->eip; \ | ||
717 | } \ | ||
718 | } while (0) | ||
719 | |||
720 | /* | ||
721 | * Activate the VMI interface and switch into paravirtualized mode | ||
722 | */ | ||
723 | static inline int __init activate_vmi(void) | ||
724 | { | ||
725 | short kernel_cs; | ||
726 | u64 reloc; | ||
727 | const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; | ||
728 | |||
729 | if (call_vrom_func(vmi_rom, vmi_init) != 0) { | ||
730 | printk(KERN_ERR "VMI ROM failed to initialize!"); | ||
731 | return 0; | ||
732 | } | ||
733 | savesegment(cs, kernel_cs); | ||
734 | |||
735 | paravirt_ops.paravirt_enabled = 1; | ||
736 | paravirt_ops.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; | ||
737 | |||
738 | paravirt_ops.patch = vmi_patch; | ||
739 | paravirt_ops.name = "vmi"; | ||
740 | |||
741 | /* | ||
742 | * Many of these operations are ABI compatible with VMI. | ||
743 | * This means we can fill in the paravirt-ops with direct | ||
744 | * pointers into the VMI ROM. If the calling convention for | ||
745 | * these operations changes, this code needs to be updated. | ||
746 | * | ||
747 | * Exceptions | ||
748 | * CPUID paravirt-op uses pointers, not the native ISA | ||
749 | * halt has no VMI equivalent; all VMI halts are "safe" | ||
750 | * no MSR support yet - just trap and emulate. VMI uses the | ||
751 | * same ABI as the native ISA, but Linux wants exceptions | ||
752 | * from bogus MSR read / write handled | ||
753 | * rdpmc is not yet used in Linux | ||
754 | */ | ||
755 | |||
756 | /* CPUID is special, so very special it gets wrapped like a present */ | ||
757 | para_wrap(cpuid, vmi_cpuid, cpuid, CPUID); | ||
758 | |||
759 | para_fill(clts, CLTS); | ||
760 | para_fill(get_debugreg, GetDR); | ||
761 | para_fill(set_debugreg, SetDR); | ||
762 | para_fill(read_cr0, GetCR0); | ||
763 | para_fill(read_cr2, GetCR2); | ||
764 | para_fill(read_cr3, GetCR3); | ||
765 | para_fill(read_cr4, GetCR4); | ||
766 | para_fill(write_cr0, SetCR0); | ||
767 | para_fill(write_cr2, SetCR2); | ||
768 | para_fill(write_cr3, SetCR3); | ||
769 | para_fill(write_cr4, SetCR4); | ||
770 | para_fill(save_fl, GetInterruptMask); | ||
771 | para_fill(restore_fl, SetInterruptMask); | ||
772 | para_fill(irq_disable, DisableInterrupts); | ||
773 | para_fill(irq_enable, EnableInterrupts); | ||
774 | |||
775 | para_fill(wbinvd, WBINVD); | ||
776 | para_fill(read_tsc, RDTSC); | ||
777 | |||
778 | /* The following we emulate with trap and emulate for now */ | ||
779 | /* paravirt_ops.read_msr = vmi_rdmsr */ | ||
780 | /* paravirt_ops.write_msr = vmi_wrmsr */ | ||
781 | /* paravirt_ops.rdpmc = vmi_rdpmc */ | ||
782 | |||
783 | /* TR interface doesn't pass TR value, wrap */ | ||
784 | para_wrap(load_tr_desc, vmi_set_tr, set_tr, SetTR); | ||
785 | |||
786 | /* LDT is special, too */ | ||
787 | para_wrap(set_ldt, vmi_set_ldt, _set_ldt, SetLDT); | ||
788 | |||
789 | para_fill(load_gdt, SetGDT); | ||
790 | para_fill(load_idt, SetIDT); | ||
791 | para_fill(store_gdt, GetGDT); | ||
792 | para_fill(store_idt, GetIDT); | ||
793 | para_fill(store_tr, GetTR); | ||
794 | paravirt_ops.load_tls = vmi_load_tls; | ||
795 | para_fill(write_ldt_entry, WriteLDTEntry); | ||
796 | para_fill(write_gdt_entry, WriteGDTEntry); | ||
797 | para_fill(write_idt_entry, WriteIDTEntry); | ||
798 | para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); | ||
799 | para_fill(set_iopl_mask, SetIOPLMask); | ||
800 | para_fill(io_delay, IODelay); | ||
801 | para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); | ||
802 | |||
803 | /* user and kernel flush are just handled with different flags to FlushTLB */ | ||
804 | para_wrap(flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB); | ||
805 | para_wrap(flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB); | ||
806 | para_fill(flush_tlb_single, InvalPage); | ||
807 | |||
808 | /* | ||
809 | * Until a standard flag format can be agreed on, we need to | ||
810 | * implement these as wrappers in Linux. Get the VMI ROM | ||
811 | * function pointers for the two backend calls. | ||
812 | */ | ||
813 | #ifdef CONFIG_X86_PAE | ||
814 | vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong); | ||
815 | vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong); | ||
816 | #else | ||
817 | vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE); | ||
818 | vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE); | ||
819 | #endif | ||
820 | |||
821 | if (vmi_ops.set_pte) { | ||
822 | paravirt_ops.set_pte = vmi_set_pte; | ||
823 | paravirt_ops.set_pte_at = vmi_set_pte_at; | ||
824 | paravirt_ops.set_pmd = vmi_set_pmd; | ||
825 | #ifdef CONFIG_X86_PAE | ||
826 | paravirt_ops.set_pte_atomic = vmi_set_pte_atomic; | ||
827 | paravirt_ops.set_pte_present = vmi_set_pte_present; | ||
828 | paravirt_ops.set_pud = vmi_set_pud; | ||
829 | paravirt_ops.pte_clear = vmi_pte_clear; | ||
830 | paravirt_ops.pmd_clear = vmi_pmd_clear; | ||
831 | #endif | ||
832 | } | ||
833 | |||
834 | if (vmi_ops.update_pte) { | ||
835 | paravirt_ops.pte_update = vmi_update_pte; | ||
836 | paravirt_ops.pte_update_defer = vmi_update_pte_defer; | ||
837 | } | ||
838 | |||
839 | vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); | ||
840 | if (vmi_ops.allocate_page) { | ||
841 | paravirt_ops.alloc_pt = vmi_allocate_pt; | ||
842 | paravirt_ops.alloc_pd = vmi_allocate_pd; | ||
843 | paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; | ||
844 | } | ||
845 | |||
846 | vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); | ||
847 | if (vmi_ops.release_page) { | ||
848 | paravirt_ops.release_pt = vmi_release_pt; | ||
849 | paravirt_ops.release_pd = vmi_release_pd; | ||
850 | } | ||
851 | |||
852 | /* Set linear is needed in all cases */ | ||
853 | vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); | ||
854 | #ifdef CONFIG_HIGHPTE | ||
855 | if (vmi_ops.set_linear_mapping) | ||
856 | paravirt_ops.kmap_atomic_pte = vmi_kmap_atomic_pte; | ||
857 | #endif | ||
858 | |||
859 | /* | ||
860 | * These MUST always be patched. Don't support indirect jumps | ||
861 | * through these operations, as the VMI interface may use either | ||
862 | * a jump or a call to get to these operations, depending on | ||
863 | * the backend. They are performance critical anyway, so requiring | ||
864 | * a patch is not a big problem. | ||
865 | */ | ||
866 | paravirt_ops.irq_enable_sysexit = (void *)0xfeedbab0; | ||
867 | paravirt_ops.iret = (void *)0xbadbab0; | ||
868 | |||
869 | #ifdef CONFIG_SMP | ||
870 | para_wrap(startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState); | ||
871 | #endif | ||
872 | |||
873 | #ifdef CONFIG_X86_LOCAL_APIC | ||
874 | para_fill(apic_read, APICRead); | ||
875 | para_fill(apic_write, APICWrite); | ||
876 | para_fill(apic_write_atomic, APICWrite); | ||
877 | #endif | ||
878 | |||
879 | /* | ||
880 | * Check for VMI timer functionality by probing for a cycle frequency method | ||
881 | */ | ||
882 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency); | ||
883 | if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) { | ||
884 | vmi_timer_ops.get_cycle_frequency = (void *)rel->eip; | ||
885 | vmi_timer_ops.get_cycle_counter = | ||
886 | vmi_get_function(VMI_CALL_GetCycleCounter); | ||
887 | vmi_timer_ops.get_wallclock = | ||
888 | vmi_get_function(VMI_CALL_GetWallclockTime); | ||
889 | vmi_timer_ops.wallclock_updated = | ||
890 | vmi_get_function(VMI_CALL_WallclockUpdated); | ||
891 | vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); | ||
892 | vmi_timer_ops.cancel_alarm = | ||
893 | vmi_get_function(VMI_CALL_CancelAlarm); | ||
894 | paravirt_ops.time_init = vmi_time_init; | ||
895 | paravirt_ops.get_wallclock = vmi_get_wallclock; | ||
896 | paravirt_ops.set_wallclock = vmi_set_wallclock; | ||
897 | #ifdef CONFIG_X86_LOCAL_APIC | ||
898 | paravirt_ops.setup_boot_clock = vmi_time_bsp_init; | ||
899 | paravirt_ops.setup_secondary_clock = vmi_time_ap_init; | ||
900 | #endif | ||
901 | paravirt_ops.sched_clock = vmi_sched_clock; | ||
902 | paravirt_ops.get_cpu_khz = vmi_cpu_khz; | ||
903 | |||
904 | /* We have true wallclock functions; disable CMOS clock sync */ | ||
905 | no_sync_cmos_clock = 1; | ||
906 | } else { | ||
907 | disable_noidle = 1; | ||
908 | disable_vmi_timer = 1; | ||
909 | } | ||
910 | |||
911 | para_fill(safe_halt, Halt); | ||
912 | |||
913 | /* | ||
914 | * Alternative instruction rewriting doesn't happen soon enough | ||
915 | * to convert VMI_IRET to a call instead of a jump; so we have | ||
916 | * to do this before IRQs get reenabled. Fortunately, it is | ||
917 | * idempotent. | ||
918 | */ | ||
919 | apply_paravirt(__parainstructions, __parainstructions_end); | ||
920 | |||
921 | vmi_bringup(); | ||
922 | |||
923 | return 1; | ||
924 | } | ||
925 | |||
926 | #undef para_fill | ||
927 | |||
928 | void __init vmi_init(void) | ||
929 | { | ||
930 | unsigned long flags; | ||
931 | |||
932 | if (!vmi_rom) | ||
933 | probe_vmi_rom(); | ||
934 | else | ||
935 | check_vmi_rom(vmi_rom); | ||
936 | |||
937 | /* In case probing for or validating the ROM failed, basil */ | ||
938 | if (!vmi_rom) | ||
939 | return; | ||
940 | |||
941 | reserve_top_address(-vmi_rom->virtual_top); | ||
942 | |||
943 | local_irq_save(flags); | ||
944 | activate_vmi(); | ||
945 | |||
946 | #ifdef CONFIG_X86_IO_APIC | ||
947 | /* This is virtual hardware; timer routing is wired correctly */ | ||
948 | no_timer_check = 1; | ||
949 | #endif | ||
950 | local_irq_restore(flags & X86_EFLAGS_IF); | ||
951 | } | ||
952 | |||
953 | static int __init parse_vmi(char *arg) | ||
954 | { | ||
955 | if (!arg) | ||
956 | return -EINVAL; | ||
957 | |||
958 | if (!strcmp(arg, "disable_pge")) { | ||
959 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | ||
960 | disable_pge = 1; | ||
961 | } else if (!strcmp(arg, "disable_pse")) { | ||
962 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); | ||
963 | disable_pse = 1; | ||
964 | } else if (!strcmp(arg, "disable_sep")) { | ||
965 | clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability); | ||
966 | disable_sep = 1; | ||
967 | } else if (!strcmp(arg, "disable_tsc")) { | ||
968 | clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); | ||
969 | disable_tsc = 1; | ||
970 | } else if (!strcmp(arg, "disable_mtrr")) { | ||
971 | clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability); | ||
972 | disable_mtrr = 1; | ||
973 | } else if (!strcmp(arg, "disable_timer")) { | ||
974 | disable_vmi_timer = 1; | ||
975 | disable_noidle = 1; | ||
976 | } else if (!strcmp(arg, "disable_noidle")) | ||
977 | disable_noidle = 1; | ||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | early_param("vmi", parse_vmi); | ||