diff options
author | Chris Wright <chrisw@sous-sol.org> | 2011-11-01 20:28:47 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-12-27 04:17:08 -0500 |
commit | 5202397df819d3c5a3f201bd4af6b86542115fb6 (patch) | |
tree | f46db8ad2eea7a5acd9dbdbfc42c8451539203bd /arch/x86/kernel/kvm.c | |
parent | 1a214246cbb431f7430f7d0c0fb66218a6f442d2 (diff) |
KVM guest: remove KVM guest pv mmu support
This has not been used for some years now. It's time to remove it.
Signed-off-by: Chris Wright <chrisw@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kernel/kvm.c')
-rw-r--r-- | arch/x86/kernel/kvm.c | 181 |
1 files changed, 0 insertions, 181 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index a9c2116001d6..f0c6fd6f176b 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -39,8 +39,6 @@ | |||
39 | #include <asm/desc.h> | 39 | #include <asm/desc.h> |
40 | #include <asm/tlbflush.h> | 40 | #include <asm/tlbflush.h> |
41 | 41 | ||
42 | #define MMU_QUEUE_SIZE 1024 | ||
43 | |||
44 | static int kvmapf = 1; | 42 | static int kvmapf = 1; |
45 | 43 | ||
46 | static int parse_no_kvmapf(char *arg) | 44 | static int parse_no_kvmapf(char *arg) |
@@ -60,21 +58,10 @@ static int parse_no_stealacc(char *arg) | |||
60 | 58 | ||
61 | early_param("no-steal-acc", parse_no_stealacc); | 59 | early_param("no-steal-acc", parse_no_stealacc); |
62 | 60 | ||
63 | struct kvm_para_state { | ||
64 | u8 mmu_queue[MMU_QUEUE_SIZE]; | ||
65 | int mmu_queue_len; | ||
66 | }; | ||
67 | |||
68 | static DEFINE_PER_CPU(struct kvm_para_state, para_state); | ||
69 | static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); | 61 | static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); |
70 | static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); | 62 | static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); |
71 | static int has_steal_clock = 0; | 63 | static int has_steal_clock = 0; |
72 | 64 | ||
73 | static struct kvm_para_state *kvm_para_state(void) | ||
74 | { | ||
75 | return &per_cpu(para_state, raw_smp_processor_id()); | ||
76 | } | ||
77 | |||
78 | /* | 65 | /* |
79 | * No need for any "IO delay" on KVM | 66 | * No need for any "IO delay" on KVM |
80 | */ | 67 | */ |
@@ -271,151 +258,6 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
271 | } | 258 | } |
272 | } | 259 | } |
273 | 260 | ||
274 | static void kvm_mmu_op(void *buffer, unsigned len) | ||
275 | { | ||
276 | int r; | ||
277 | unsigned long a1, a2; | ||
278 | |||
279 | do { | ||
280 | a1 = __pa(buffer); | ||
281 | a2 = 0; /* on i386 __pa() always returns <4G */ | ||
282 | r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2); | ||
283 | buffer += r; | ||
284 | len -= r; | ||
285 | } while (len); | ||
286 | } | ||
287 | |||
288 | static void mmu_queue_flush(struct kvm_para_state *state) | ||
289 | { | ||
290 | if (state->mmu_queue_len) { | ||
291 | kvm_mmu_op(state->mmu_queue, state->mmu_queue_len); | ||
292 | state->mmu_queue_len = 0; | ||
293 | } | ||
294 | } | ||
295 | |||
296 | static void kvm_deferred_mmu_op(void *buffer, int len) | ||
297 | { | ||
298 | struct kvm_para_state *state = kvm_para_state(); | ||
299 | |||
300 | if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) { | ||
301 | kvm_mmu_op(buffer, len); | ||
302 | return; | ||
303 | } | ||
304 | if (state->mmu_queue_len + len > sizeof state->mmu_queue) | ||
305 | mmu_queue_flush(state); | ||
306 | memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len); | ||
307 | state->mmu_queue_len += len; | ||
308 | } | ||
309 | |||
310 | static void kvm_mmu_write(void *dest, u64 val) | ||
311 | { | ||
312 | __u64 pte_phys; | ||
313 | struct kvm_mmu_op_write_pte wpte; | ||
314 | |||
315 | #ifdef CONFIG_HIGHPTE | ||
316 | struct page *page; | ||
317 | unsigned long dst = (unsigned long) dest; | ||
318 | |||
319 | page = kmap_atomic_to_page(dest); | ||
320 | pte_phys = page_to_pfn(page); | ||
321 | pte_phys <<= PAGE_SHIFT; | ||
322 | pte_phys += (dst & ~(PAGE_MASK)); | ||
323 | #else | ||
324 | pte_phys = (unsigned long)__pa(dest); | ||
325 | #endif | ||
326 | wpte.header.op = KVM_MMU_OP_WRITE_PTE; | ||
327 | wpte.pte_val = val; | ||
328 | wpte.pte_phys = pte_phys; | ||
329 | |||
330 | kvm_deferred_mmu_op(&wpte, sizeof wpte); | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * We only need to hook operations that are MMU writes. We hook these so that | ||
335 | * we can use lazy MMU mode to batch these operations. We could probably | ||
336 | * improve the performance of the host code if we used some of the information | ||
337 | * here to simplify processing of batched writes. | ||
338 | */ | ||
339 | static void kvm_set_pte(pte_t *ptep, pte_t pte) | ||
340 | { | ||
341 | kvm_mmu_write(ptep, pte_val(pte)); | ||
342 | } | ||
343 | |||
344 | static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
345 | pte_t *ptep, pte_t pte) | ||
346 | { | ||
347 | kvm_mmu_write(ptep, pte_val(pte)); | ||
348 | } | ||
349 | |||
350 | static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd) | ||
351 | { | ||
352 | kvm_mmu_write(pmdp, pmd_val(pmd)); | ||
353 | } | ||
354 | |||
355 | #if PAGETABLE_LEVELS >= 3 | ||
356 | #ifdef CONFIG_X86_PAE | ||
357 | static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte) | ||
358 | { | ||
359 | kvm_mmu_write(ptep, pte_val(pte)); | ||
360 | } | ||
361 | |||
362 | static void kvm_pte_clear(struct mm_struct *mm, | ||
363 | unsigned long addr, pte_t *ptep) | ||
364 | { | ||
365 | kvm_mmu_write(ptep, 0); | ||
366 | } | ||
367 | |||
368 | static void kvm_pmd_clear(pmd_t *pmdp) | ||
369 | { | ||
370 | kvm_mmu_write(pmdp, 0); | ||
371 | } | ||
372 | #endif | ||
373 | |||
374 | static void kvm_set_pud(pud_t *pudp, pud_t pud) | ||
375 | { | ||
376 | kvm_mmu_write(pudp, pud_val(pud)); | ||
377 | } | ||
378 | |||
379 | #if PAGETABLE_LEVELS == 4 | ||
380 | static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd) | ||
381 | { | ||
382 | kvm_mmu_write(pgdp, pgd_val(pgd)); | ||
383 | } | ||
384 | #endif | ||
385 | #endif /* PAGETABLE_LEVELS >= 3 */ | ||
386 | |||
387 | static void kvm_flush_tlb(void) | ||
388 | { | ||
389 | struct kvm_mmu_op_flush_tlb ftlb = { | ||
390 | .header.op = KVM_MMU_OP_FLUSH_TLB, | ||
391 | }; | ||
392 | |||
393 | kvm_deferred_mmu_op(&ftlb, sizeof ftlb); | ||
394 | } | ||
395 | |||
396 | static void kvm_release_pt(unsigned long pfn) | ||
397 | { | ||
398 | struct kvm_mmu_op_release_pt rpt = { | ||
399 | .header.op = KVM_MMU_OP_RELEASE_PT, | ||
400 | .pt_phys = (u64)pfn << PAGE_SHIFT, | ||
401 | }; | ||
402 | |||
403 | kvm_mmu_op(&rpt, sizeof rpt); | ||
404 | } | ||
405 | |||
406 | static void kvm_enter_lazy_mmu(void) | ||
407 | { | ||
408 | paravirt_enter_lazy_mmu(); | ||
409 | } | ||
410 | |||
411 | static void kvm_leave_lazy_mmu(void) | ||
412 | { | ||
413 | struct kvm_para_state *state = kvm_para_state(); | ||
414 | |||
415 | mmu_queue_flush(state); | ||
416 | paravirt_leave_lazy_mmu(); | ||
417 | } | ||
418 | |||
419 | static void __init paravirt_ops_setup(void) | 261 | static void __init paravirt_ops_setup(void) |
420 | { | 262 | { |
421 | pv_info.name = "KVM"; | 263 | pv_info.name = "KVM"; |
@@ -424,29 +266,6 @@ static void __init paravirt_ops_setup(void) | |||
424 | if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) | 266 | if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) |
425 | pv_cpu_ops.io_delay = kvm_io_delay; | 267 | pv_cpu_ops.io_delay = kvm_io_delay; |
426 | 268 | ||
427 | if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) { | ||
428 | pv_mmu_ops.set_pte = kvm_set_pte; | ||
429 | pv_mmu_ops.set_pte_at = kvm_set_pte_at; | ||
430 | pv_mmu_ops.set_pmd = kvm_set_pmd; | ||
431 | #if PAGETABLE_LEVELS >= 3 | ||
432 | #ifdef CONFIG_X86_PAE | ||
433 | pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic; | ||
434 | pv_mmu_ops.pte_clear = kvm_pte_clear; | ||
435 | pv_mmu_ops.pmd_clear = kvm_pmd_clear; | ||
436 | #endif | ||
437 | pv_mmu_ops.set_pud = kvm_set_pud; | ||
438 | #if PAGETABLE_LEVELS == 4 | ||
439 | pv_mmu_ops.set_pgd = kvm_set_pgd; | ||
440 | #endif | ||
441 | #endif | ||
442 | pv_mmu_ops.flush_tlb_user = kvm_flush_tlb; | ||
443 | pv_mmu_ops.release_pte = kvm_release_pt; | ||
444 | pv_mmu_ops.release_pmd = kvm_release_pt; | ||
445 | pv_mmu_ops.release_pud = kvm_release_pt; | ||
446 | |||
447 | pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu; | ||
448 | pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu; | ||
449 | } | ||
450 | #ifdef CONFIG_X86_IO_APIC | 269 | #ifdef CONFIG_X86_IO_APIC |
451 | no_timer_check = 1; | 270 | no_timer_check = 1; |
452 | #endif | 271 | #endif |