aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c279
-rw-r--r--arch/x86/kvm/mmu_audit.c297
2 files changed, 298 insertions, 278 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8b750ff6911a..d2dad65a45f8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3490,282 +3490,5 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3490EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); 3490EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3491 3491
3492#ifdef CONFIG_KVM_MMU_AUDIT 3492#ifdef CONFIG_KVM_MMU_AUDIT
3493static const char *audit_msg; 3493#include "mmu_audit.c"
3494
3495typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
3496
3497static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
3498 inspect_spte_fn fn)
3499{
3500 int i;
3501
3502 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3503 u64 ent = sp->spt[i];
3504
3505 if (is_shadow_present_pte(ent)) {
3506 if (!is_last_spte(ent, sp->role.level)) {
3507 struct kvm_mmu_page *child;
3508 child = page_header(ent & PT64_BASE_ADDR_MASK);
3509 __mmu_spte_walk(kvm, child, fn);
3510 } else
3511 fn(kvm, &sp->spt[i]);
3512 }
3513 }
3514}
3515
3516static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
3517{
3518 int i;
3519 struct kvm_mmu_page *sp;
3520
3521 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3522 return;
3523 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3524 hpa_t root = vcpu->arch.mmu.root_hpa;
3525 sp = page_header(root);
3526 __mmu_spte_walk(vcpu->kvm, sp, fn);
3527 return;
3528 }
3529 for (i = 0; i < 4; ++i) {
3530 hpa_t root = vcpu->arch.mmu.pae_root[i];
3531
3532 if (root && VALID_PAGE(root)) {
3533 root &= PT64_BASE_ADDR_MASK;
3534 sp = page_header(root);
3535 __mmu_spte_walk(vcpu->kvm, sp, fn);
3536 }
3537 }
3538 return;
3539}
3540
3541static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3542 gva_t va, int level)
3543{
3544 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
3545 int i;
3546 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
3547
3548 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
3549 u64 *sptep = pt + i;
3550 struct kvm_mmu_page *sp;
3551 gfn_t gfn;
3552 pfn_t pfn;
3553 hpa_t hpa;
3554
3555 sp = page_header(__pa(sptep));
3556
3557 if (sp->unsync) {
3558 if (level != PT_PAGE_TABLE_LEVEL) {
3559 printk(KERN_ERR "audit: (%s) error: unsync sp: %p level = %d\n",
3560 audit_msg, sp, level);
3561 return;
3562 }
3563
3564 if (*sptep == shadow_notrap_nonpresent_pte) {
3565 printk(KERN_ERR "audit: (%s) error: notrap spte in unsync sp: %p\n",
3566 audit_msg, sp);
3567 return;
3568 }
3569 }
3570
3571 if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
3572 printk(KERN_ERR "audit: (%s) error: notrap spte in direct sp: %p\n",
3573 audit_msg, sp);
3574 return;
3575 }
3576
3577 if (!is_shadow_present_pte(*sptep) ||
3578 !is_last_spte(*sptep, level))
3579 return;
3580
3581 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3582 pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
3583
3584 if (is_error_pfn(pfn)) {
3585 kvm_release_pfn_clean(pfn);
3586 return;
3587 }
3588
3589 hpa = pfn << PAGE_SHIFT;
3590
3591 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
3592 printk(KERN_ERR "xx audit error: (%s) levels %d"
3593 " gva %lx pfn %llx hpa %llx ent %llxn",
3594 audit_msg, vcpu->arch.mmu.root_level,
3595 va, pfn, hpa, *sptep);
3596 }
3597}
3598
3599static void audit_mappings(struct kvm_vcpu *vcpu)
3600{
3601 unsigned i;
3602
3603 if (vcpu->arch.mmu.root_level == 4)
3604 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3605 else
3606 for (i = 0; i < 4; ++i)
3607 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3608 audit_mappings_page(vcpu,
3609 vcpu->arch.mmu.pae_root[i],
3610 i << 30,
3611 2);
3612}
3613
3614void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
3615{
3616 unsigned long *rmapp;
3617 struct kvm_mmu_page *rev_sp;
3618 gfn_t gfn;
3619
3620
3621 rev_sp = page_header(__pa(sptep));
3622 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
3623
3624 if (!gfn_to_memslot(kvm, gfn)) {
3625 if (!printk_ratelimit())
3626 return;
3627 printk(KERN_ERR "%s: no memslot for gfn %llx\n",
3628 audit_msg, gfn);
3629 printk(KERN_ERR "%s: index %ld of sp (gfn=%llx)\n",
3630 audit_msg, (long int)(sptep - rev_sp->spt),
3631 rev_sp->gfn);
3632 dump_stack();
3633 return;
3634 }
3635
3636 rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
3637 if (!*rmapp) {
3638 if (!printk_ratelimit())
3639 return;
3640 printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
3641 audit_msg, *sptep);
3642 dump_stack();
3643 }
3644}
3645
3646void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu)
3647{
3648 mmu_spte_walk(vcpu, inspect_spte_has_rmap);
3649}
3650
3651static void check_mappings_rmap(struct kvm_vcpu *vcpu)
3652{
3653 struct kvm_mmu_page *sp;
3654 int i;
3655
3656 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3657 u64 *pt = sp->spt;
3658
3659 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3660 continue;
3661
3662 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3663 if (!is_rmap_spte(pt[i]))
3664 continue;
3665
3666 inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
3667 }
3668 }
3669 return;
3670}
3671
3672static void audit_rmap(struct kvm_vcpu *vcpu)
3673{
3674 check_mappings_rmap(vcpu);
3675}
3676
3677static void audit_write_protection(struct kvm_vcpu *vcpu)
3678{
3679 struct kvm_mmu_page *sp;
3680 struct kvm_memory_slot *slot;
3681 unsigned long *rmapp;
3682 u64 *spte;
3683
3684 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3685 if (sp->role.direct)
3686 continue;
3687 if (sp->unsync)
3688 continue;
3689 if (sp->role.invalid)
3690 continue;
3691
3692 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
3693 rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
3694
3695 spte = rmap_next(vcpu->kvm, rmapp, NULL);
3696 while (spte) {
3697 if (is_writable_pte(*spte))
3698 printk(KERN_ERR "%s: (%s) shadow page has "
3699 "writable mappings: gfn %llx role %x\n",
3700 __func__, audit_msg, sp->gfn,
3701 sp->role.word);
3702 spte = rmap_next(vcpu->kvm, rmapp, spte);
3703 }
3704 }
3705}
3706
3707static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int audit_point)
3708{
3709 audit_msg = audit_point_name[audit_point];
3710 audit_rmap(vcpu);
3711 audit_write_protection(vcpu);
3712 if (strcmp("pre pte write", audit_msg) != 0)
3713 audit_mappings(vcpu);
3714 audit_sptes_have_rmaps(vcpu);
3715}
3716
3717static bool mmu_audit;
3718
3719static void mmu_audit_enable(void)
3720{
3721 int ret;
3722
3723 if (mmu_audit)
3724 return;
3725
3726 ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
3727 WARN_ON(ret);
3728
3729 mmu_audit = true;
3730}
3731
3732static void mmu_audit_disable(void)
3733{
3734 if (!mmu_audit)
3735 return;
3736
3737 unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
3738 tracepoint_synchronize_unregister();
3739 mmu_audit = false;
3740}
3741
3742static int mmu_audit_set(const char *val, const struct kernel_param *kp)
3743{
3744 int ret;
3745 unsigned long enable;
3746
3747 ret = strict_strtoul(val, 10, &enable);
3748 if (ret < 0)
3749 return -EINVAL;
3750
3751 switch (enable) {
3752 case 0:
3753 mmu_audit_disable();
3754 break;
3755 case 1:
3756 mmu_audit_enable();
3757 break;
3758 default:
3759 return -EINVAL;
3760 }
3761
3762 return 0;
3763}
3764
3765static struct kernel_param_ops audit_param_ops = {
3766 .set = mmu_audit_set,
3767 .get = param_get_bool,
3768};
3769
3770module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);
3771#endif 3494#endif
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
new file mode 100644
index 000000000000..fb8a461333c5
--- /dev/null
+++ b/arch/x86/kvm/mmu_audit.c
@@ -0,0 +1,297 @@
1/*
2 * mmu_audit.c:
3 *
4 * Audit code for KVM MMU
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affilates.
8 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 * Marcelo Tosatti <mtosatti@redhat.com>
13 * Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20static const char *audit_msg;
21
22typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
23
24static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
25 inspect_spte_fn fn)
26{
27 int i;
28
29 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
30 u64 ent = sp->spt[i];
31
32 if (is_shadow_present_pte(ent)) {
33 if (!is_last_spte(ent, sp->role.level)) {
34 struct kvm_mmu_page *child;
35 child = page_header(ent & PT64_BASE_ADDR_MASK);
36 __mmu_spte_walk(kvm, child, fn);
37 } else
38 fn(kvm, &sp->spt[i]);
39 }
40 }
41}
42
43static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
44{
45 int i;
46 struct kvm_mmu_page *sp;
47
48 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
49 return;
50 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
51 hpa_t root = vcpu->arch.mmu.root_hpa;
52 sp = page_header(root);
53 __mmu_spte_walk(vcpu->kvm, sp, fn);
54 return;
55 }
56 for (i = 0; i < 4; ++i) {
57 hpa_t root = vcpu->arch.mmu.pae_root[i];
58
59 if (root && VALID_PAGE(root)) {
60 root &= PT64_BASE_ADDR_MASK;
61 sp = page_header(root);
62 __mmu_spte_walk(vcpu->kvm, sp, fn);
63 }
64 }
65 return;
66}
67
68static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
69 gva_t va, int level)
70{
71 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
72 int i;
73 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
74
75 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
76 u64 *sptep = pt + i;
77 struct kvm_mmu_page *sp;
78 gfn_t gfn;
79 pfn_t pfn;
80 hpa_t hpa;
81
82 sp = page_header(__pa(sptep));
83
84 if (sp->unsync) {
85 if (level != PT_PAGE_TABLE_LEVEL) {
86 printk(KERN_ERR "audit: (%s) error: unsync sp: %p level = %d\n",
87 audit_msg, sp, level);
88 return;
89 }
90
91 if (*sptep == shadow_notrap_nonpresent_pte) {
92 printk(KERN_ERR "audit: (%s) error: notrap spte in unsync sp: %p\n",
93 audit_msg, sp);
94 return;
95 }
96 }
97
98 if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
99 printk(KERN_ERR "audit: (%s) error: notrap spte in direct sp: %p\n",
100 audit_msg, sp);
101 return;
102 }
103
104 if (!is_shadow_present_pte(*sptep) ||
105 !is_last_spte(*sptep, level))
106 return;
107
108 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
109 pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
110
111 if (is_error_pfn(pfn)) {
112 kvm_release_pfn_clean(pfn);
113 return;
114 }
115
116 hpa = pfn << PAGE_SHIFT;
117
118 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
119 printk(KERN_ERR "xx audit error: (%s) levels %d"
120 " gva %lx pfn %llx hpa %llx ent %llxn",
121 audit_msg, vcpu->arch.mmu.root_level,
122 va, pfn, hpa, *sptep);
123 }
124}
125
126static void audit_mappings(struct kvm_vcpu *vcpu)
127{
128 unsigned i;
129
130 if (vcpu->arch.mmu.root_level == 4)
131 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
132 else
133 for (i = 0; i < 4; ++i)
134 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
135 audit_mappings_page(vcpu,
136 vcpu->arch.mmu.pae_root[i],
137 i << 30,
138 2);
139}
140
141void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
142{
143 unsigned long *rmapp;
144 struct kvm_mmu_page *rev_sp;
145 gfn_t gfn;
146
147
148 rev_sp = page_header(__pa(sptep));
149 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
150
151 if (!gfn_to_memslot(kvm, gfn)) {
152 if (!printk_ratelimit())
153 return;
154 printk(KERN_ERR "%s: no memslot for gfn %llx\n",
155 audit_msg, gfn);
156 printk(KERN_ERR "%s: index %ld of sp (gfn=%llx)\n",
157 audit_msg, (long int)(sptep - rev_sp->spt),
158 rev_sp->gfn);
159 dump_stack();
160 return;
161 }
162
163 rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
164 if (!*rmapp) {
165 if (!printk_ratelimit())
166 return;
167 printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
168 audit_msg, *sptep);
169 dump_stack();
170 }
171}
172
173void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu)
174{
175 mmu_spte_walk(vcpu, inspect_spte_has_rmap);
176}
177
178static void check_mappings_rmap(struct kvm_vcpu *vcpu)
179{
180 struct kvm_mmu_page *sp;
181 int i;
182
183 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
184 u64 *pt = sp->spt;
185
186 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
187 continue;
188
189 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
190 if (!is_rmap_spte(pt[i]))
191 continue;
192
193 inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
194 }
195 }
196 return;
197}
198
199static void audit_rmap(struct kvm_vcpu *vcpu)
200{
201 check_mappings_rmap(vcpu);
202}
203
204static void audit_write_protection(struct kvm_vcpu *vcpu)
205{
206 struct kvm_mmu_page *sp;
207 struct kvm_memory_slot *slot;
208 unsigned long *rmapp;
209 u64 *spte;
210
211 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
212 if (sp->role.direct)
213 continue;
214 if (sp->unsync)
215 continue;
216 if (sp->role.invalid)
217 continue;
218
219 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
220 rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
221
222 spte = rmap_next(vcpu->kvm, rmapp, NULL);
223 while (spte) {
224 if (is_writable_pte(*spte))
225 printk(KERN_ERR "%s: (%s) shadow page has "
226 "writable mappings: gfn %llx role %x\n",
227 __func__, audit_msg, sp->gfn,
228 sp->role.word);
229 spte = rmap_next(vcpu->kvm, rmapp, spte);
230 }
231 }
232}
233
234static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int audit_point)
235{
236 audit_msg = audit_point_name[audit_point];
237 audit_rmap(vcpu);
238 audit_write_protection(vcpu);
239 if (strcmp("pre pte write", audit_msg) != 0)
240 audit_mappings(vcpu);
241 audit_sptes_have_rmaps(vcpu);
242}
243
244static bool mmu_audit;
245
246static void mmu_audit_enable(void)
247{
248 int ret;
249
250 if (mmu_audit)
251 return;
252
253 ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
254 WARN_ON(ret);
255
256 mmu_audit = true;
257}
258
259static void mmu_audit_disable(void)
260{
261 if (!mmu_audit)
262 return;
263
264 unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
265 tracepoint_synchronize_unregister();
266 mmu_audit = false;
267}
268
269static int mmu_audit_set(const char *val, const struct kernel_param *kp)
270{
271 int ret;
272 unsigned long enable;
273
274 ret = strict_strtoul(val, 10, &enable);
275 if (ret < 0)
276 return -EINVAL;
277
278 switch (enable) {
279 case 0:
280 mmu_audit_disable();
281 break;
282 case 1:
283 mmu_audit_enable();
284 break;
285 default:
286 return -EINVAL;
287 }
288
289 return 0;
290}
291
292static struct kernel_param_ops audit_param_ops = {
293 .set = mmu_audit_set,
294 .get = param_get_bool,
295};
296
297module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);