aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-03-28 16:29:51 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-03-28 16:29:51 -0400
commited40d0c472b136682b2fcba05f89762859c7374f (patch)
tree076b83a26bcd63d6158463735dd34c10bbc591dc /arch/x86/kvm
parent9e495834e59ca9b29f1a1f63b9f5533bb022ac49 (diff)
parent5d80f8e5a9dc9c9a94d4aeaa567e219a808b8a4a (diff)
Merge branch 'origin' into devel
Conflicts: sound/soc/pxa/pxa2xx-i2s.c
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/Kconfig4
-rw-r--r--arch/x86/kvm/i8254.c21
-rw-r--r--arch/x86/kvm/i8254.h2
-rw-r--r--arch/x86/kvm/i8259.c25
-rw-r--r--arch/x86/kvm/irq.h2
-rw-r--r--arch/x86/kvm/kvm_svm.h16
-rw-r--r--arch/x86/kvm/mmu.c237
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/paging_tmpl.h219
-rw-r--r--arch/x86/kvm/svm.c916
-rw-r--r--arch/x86/kvm/vmx.c393
-rw-r--r--arch/x86/kvm/x86.c432
-rw-r--r--arch/x86/kvm/x86_emulate.c56
13 files changed, 1660 insertions, 665 deletions
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index b81125f0bdee..0a303c3ed11f 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -4,6 +4,10 @@
4config HAVE_KVM 4config HAVE_KVM
5 bool 5 bool
6 6
7config HAVE_KVM_IRQCHIP
8 bool
9 default y
10
7menuconfig VIRTUALIZATION 11menuconfig VIRTUALIZATION
8 bool "Virtualization" 12 bool "Virtualization"
9 depends on HAVE_KVM || X86 13 depends on HAVE_KVM || X86
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 72bd275a9b5c..c13bb92d3157 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -201,6 +201,9 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps)
201 if (!atomic_inc_and_test(&pt->pending)) 201 if (!atomic_inc_and_test(&pt->pending))
202 set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); 202 set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests);
203 203
204 if (!pt->reinject)
205 atomic_set(&pt->pending, 1);
206
204 if (vcpu0 && waitqueue_active(&vcpu0->wq)) 207 if (vcpu0 && waitqueue_active(&vcpu0->wq))
205 wake_up_interruptible(&vcpu0->wq); 208 wake_up_interruptible(&vcpu0->wq);
206 209
@@ -536,6 +539,16 @@ void kvm_pit_reset(struct kvm_pit *pit)
536 pit->pit_state.irq_ack = 1; 539 pit->pit_state.irq_ack = 1;
537} 540}
538 541
542static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
543{
544 struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
545
546 if (!mask) {
547 atomic_set(&pit->pit_state.pit_timer.pending, 0);
548 pit->pit_state.irq_ack = 1;
549 }
550}
551
539struct kvm_pit *kvm_create_pit(struct kvm *kvm) 552struct kvm_pit *kvm_create_pit(struct kvm *kvm)
540{ 553{
541 struct kvm_pit *pit; 554 struct kvm_pit *pit;
@@ -545,9 +558,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
545 if (!pit) 558 if (!pit)
546 return NULL; 559 return NULL;
547 560
548 mutex_lock(&kvm->lock);
549 pit->irq_source_id = kvm_request_irq_source_id(kvm); 561 pit->irq_source_id = kvm_request_irq_source_id(kvm);
550 mutex_unlock(&kvm->lock);
551 if (pit->irq_source_id < 0) { 562 if (pit->irq_source_id < 0) {
552 kfree(pit); 563 kfree(pit);
553 return NULL; 564 return NULL;
@@ -580,10 +591,14 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
580 pit_state->irq_ack_notifier.gsi = 0; 591 pit_state->irq_ack_notifier.gsi = 0;
581 pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; 592 pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
582 kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); 593 kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
594 pit_state->pit_timer.reinject = true;
583 mutex_unlock(&pit->pit_state.lock); 595 mutex_unlock(&pit->pit_state.lock);
584 596
585 kvm_pit_reset(pit); 597 kvm_pit_reset(pit);
586 598
599 pit->mask_notifier.func = pit_mask_notifer;
600 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
601
587 return pit; 602 return pit;
588} 603}
589 604
@@ -592,6 +607,8 @@ void kvm_free_pit(struct kvm *kvm)
592 struct hrtimer *timer; 607 struct hrtimer *timer;
593 608
594 if (kvm->arch.vpit) { 609 if (kvm->arch.vpit) {
610 kvm_unregister_irq_mask_notifier(kvm, 0,
611 &kvm->arch.vpit->mask_notifier);
595 mutex_lock(&kvm->arch.vpit->pit_state.lock); 612 mutex_lock(&kvm->arch.vpit->pit_state.lock);
596 timer = &kvm->arch.vpit->pit_state.pit_timer.timer; 613 timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
597 hrtimer_cancel(timer); 614 hrtimer_cancel(timer);
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index 4178022b97aa..6acbe4b505d5 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -9,6 +9,7 @@ struct kvm_kpit_timer {
9 s64 period; /* unit: ns */ 9 s64 period; /* unit: ns */
10 s64 scheduled; 10 s64 scheduled;
11 atomic_t pending; 11 atomic_t pending;
12 bool reinject;
12}; 13};
13 14
14struct kvm_kpit_channel_state { 15struct kvm_kpit_channel_state {
@@ -45,6 +46,7 @@ struct kvm_pit {
45 struct kvm *kvm; 46 struct kvm *kvm;
46 struct kvm_kpit_state pit_state; 47 struct kvm_kpit_state pit_state;
47 int irq_source_id; 48 int irq_source_id;
49 struct kvm_irq_mask_notifier mask_notifier;
48}; 50};
49 51
50#define KVM_PIT_BASE_ADDRESS 0x40 52#define KVM_PIT_BASE_ADDRESS 0x40
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 179dcb0103fd..1ccb50c74f18 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -32,11 +32,13 @@
32#include <linux/kvm_host.h> 32#include <linux/kvm_host.h>
33 33
34static void pic_lock(struct kvm_pic *s) 34static void pic_lock(struct kvm_pic *s)
35 __acquires(&s->lock)
35{ 36{
36 spin_lock(&s->lock); 37 spin_lock(&s->lock);
37} 38}
38 39
39static void pic_unlock(struct kvm_pic *s) 40static void pic_unlock(struct kvm_pic *s)
41 __releases(&s->lock)
40{ 42{
41 struct kvm *kvm = s->kvm; 43 struct kvm *kvm = s->kvm;
42 unsigned acks = s->pending_acks; 44 unsigned acks = s->pending_acks;
@@ -49,7 +51,8 @@ static void pic_unlock(struct kvm_pic *s)
49 spin_unlock(&s->lock); 51 spin_unlock(&s->lock);
50 52
51 while (acks) { 53 while (acks) {
52 kvm_notify_acked_irq(kvm, __ffs(acks)); 54 kvm_notify_acked_irq(kvm, SELECT_PIC(__ffs(acks)),
55 __ffs(acks));
53 acks &= acks - 1; 56 acks &= acks - 1;
54 } 57 }
55 58
@@ -76,12 +79,13 @@ void kvm_pic_clear_isr_ack(struct kvm *kvm)
76/* 79/*
77 * set irq level. If an edge is detected, then the IRR is set to 1 80 * set irq level. If an edge is detected, then the IRR is set to 1
78 */ 81 */
79static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) 82static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
80{ 83{
81 int mask; 84 int mask, ret = 1;
82 mask = 1 << irq; 85 mask = 1 << irq;
83 if (s->elcr & mask) /* level triggered */ 86 if (s->elcr & mask) /* level triggered */
84 if (level) { 87 if (level) {
88 ret = !(s->irr & mask);
85 s->irr |= mask; 89 s->irr |= mask;
86 s->last_irr |= mask; 90 s->last_irr |= mask;
87 } else { 91 } else {
@@ -90,11 +94,15 @@ static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
90 } 94 }
91 else /* edge triggered */ 95 else /* edge triggered */
92 if (level) { 96 if (level) {
93 if ((s->last_irr & mask) == 0) 97 if ((s->last_irr & mask) == 0) {
98 ret = !(s->irr & mask);
94 s->irr |= mask; 99 s->irr |= mask;
100 }
95 s->last_irr |= mask; 101 s->last_irr |= mask;
96 } else 102 } else
97 s->last_irr &= ~mask; 103 s->last_irr &= ~mask;
104
105 return (s->imr & mask) ? -1 : ret;
98} 106}
99 107
100/* 108/*
@@ -171,16 +179,19 @@ void kvm_pic_update_irq(struct kvm_pic *s)
171 pic_unlock(s); 179 pic_unlock(s);
172} 180}
173 181
174void kvm_pic_set_irq(void *opaque, int irq, int level) 182int kvm_pic_set_irq(void *opaque, int irq, int level)
175{ 183{
176 struct kvm_pic *s = opaque; 184 struct kvm_pic *s = opaque;
185 int ret = -1;
177 186
178 pic_lock(s); 187 pic_lock(s);
179 if (irq >= 0 && irq < PIC_NUM_PINS) { 188 if (irq >= 0 && irq < PIC_NUM_PINS) {
180 pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); 189 ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
181 pic_update_irq(s); 190 pic_update_irq(s);
182 } 191 }
183 pic_unlock(s); 192 pic_unlock(s);
193
194 return ret;
184} 195}
185 196
186/* 197/*
@@ -232,7 +243,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
232 } 243 }
233 pic_update_irq(s); 244 pic_update_irq(s);
234 pic_unlock(s); 245 pic_unlock(s);
235 kvm_notify_acked_irq(kvm, irq); 246 kvm_notify_acked_irq(kvm, SELECT_PIC(irq), irq);
236 247
237 return intno; 248 return intno;
238} 249}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 82579ee538d0..9f593188129e 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -32,6 +32,8 @@
32#include "lapic.h" 32#include "lapic.h"
33 33
34#define PIC_NUM_PINS 16 34#define PIC_NUM_PINS 16
35#define SELECT_PIC(irq) \
36 ((irq) < 8 ? KVM_IRQCHIP_PIC_MASTER : KVM_IRQCHIP_PIC_SLAVE)
35 37
36struct kvm; 38struct kvm;
37struct kvm_vcpu; 39struct kvm_vcpu;
diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h
index 8e5ee99551f6..ed66e4c078dc 100644
--- a/arch/x86/kvm/kvm_svm.h
+++ b/arch/x86/kvm/kvm_svm.h
@@ -18,7 +18,6 @@ static const u32 host_save_user_msrs[] = {
18}; 18};
19 19
20#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) 20#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
21#define NUM_DB_REGS 4
22 21
23struct kvm_vcpu; 22struct kvm_vcpu;
24 23
@@ -29,18 +28,23 @@ struct vcpu_svm {
29 struct svm_cpu_data *svm_data; 28 struct svm_cpu_data *svm_data;
30 uint64_t asid_generation; 29 uint64_t asid_generation;
31 30
32 unsigned long db_regs[NUM_DB_REGS];
33
34 u64 next_rip; 31 u64 next_rip;
35 32
36 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; 33 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
37 u64 host_gs_base; 34 u64 host_gs_base;
38 unsigned long host_cr2; 35 unsigned long host_cr2;
39 unsigned long host_db_regs[NUM_DB_REGS];
40 unsigned long host_dr6;
41 unsigned long host_dr7;
42 36
43 u32 *msrpm; 37 u32 *msrpm;
38 struct vmcb *hsave;
39 u64 hsave_msr;
40
41 u64 nested_vmcb;
42
43 /* These are the merged vectors */
44 u32 *nested_msrpm;
45
46 /* gpa pointers to the real vectors */
47 u64 nested_vmcb_msrpm;
44}; 48};
45 49
46#endif 50#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2d4477c71473..2a36f7f7c4c7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -145,11 +145,20 @@ struct kvm_rmap_desc {
145 struct kvm_rmap_desc *more; 145 struct kvm_rmap_desc *more;
146}; 146};
147 147
148struct kvm_shadow_walk { 148struct kvm_shadow_walk_iterator {
149 int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu, 149 u64 addr;
150 u64 addr, u64 *spte, int level); 150 hpa_t shadow_addr;
151 int level;
152 u64 *sptep;
153 unsigned index;
151}; 154};
152 155
156#define for_each_shadow_entry(_vcpu, _addr, _walker) \
157 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
158 shadow_walk_okay(&(_walker)); \
159 shadow_walk_next(&(_walker)))
160
161
153struct kvm_unsync_walk { 162struct kvm_unsync_walk {
154 int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk); 163 int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
155}; 164};
@@ -343,7 +352,6 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
343 352
344 BUG_ON(!mc->nobjs); 353 BUG_ON(!mc->nobjs);
345 p = mc->objects[--mc->nobjs]; 354 p = mc->objects[--mc->nobjs];
346 memset(p, 0, size);
347 return p; 355 return p;
348} 356}
349 357
@@ -794,10 +802,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
794 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 802 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
795 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); 803 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
796 INIT_LIST_HEAD(&sp->oos_link); 804 INIT_LIST_HEAD(&sp->oos_link);
797 ASSERT(is_empty_shadow_page(sp->spt));
798 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 805 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
799 sp->multimapped = 0; 806 sp->multimapped = 0;
800 sp->global = 1;
801 sp->parent_pte = parent_pte; 807 sp->parent_pte = parent_pte;
802 --vcpu->kvm->arch.n_free_mmu_pages; 808 --vcpu->kvm->arch.n_free_mmu_pages;
803 return sp; 809 return sp;
@@ -983,8 +989,8 @@ struct kvm_mmu_pages {
983 idx < 512; \ 989 idx < 512; \
984 idx = find_next_bit(bitmap, 512, idx+1)) 990 idx = find_next_bit(bitmap, 512, idx+1))
985 991
986int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, 992static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
987 int idx) 993 int idx)
988{ 994{
989 int i; 995 int i;
990 996
@@ -1059,7 +1065,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1059 index = kvm_page_table_hashfn(gfn); 1065 index = kvm_page_table_hashfn(gfn);
1060 bucket = &kvm->arch.mmu_page_hash[index]; 1066 bucket = &kvm->arch.mmu_page_hash[index];
1061 hlist_for_each_entry(sp, node, bucket, hash_link) 1067 hlist_for_each_entry(sp, node, bucket, hash_link)
1062 if (sp->gfn == gfn && !sp->role.metaphysical 1068 if (sp->gfn == gfn && !sp->role.direct
1063 && !sp->role.invalid) { 1069 && !sp->role.invalid) {
1064 pgprintk("%s: found role %x\n", 1070 pgprintk("%s: found role %x\n",
1065 __func__, sp->role.word); 1071 __func__, sp->role.word);
@@ -1115,8 +1121,9 @@ struct mmu_page_path {
1115 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ 1121 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
1116 i = mmu_pages_next(&pvec, &parents, i)) 1122 i = mmu_pages_next(&pvec, &parents, i))
1117 1123
1118int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, 1124static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1119 int i) 1125 struct mmu_page_path *parents,
1126 int i)
1120{ 1127{
1121 int n; 1128 int n;
1122 1129
@@ -1135,7 +1142,7 @@ int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents,
1135 return n; 1142 return n;
1136} 1143}
1137 1144
1138void mmu_pages_clear_parents(struct mmu_page_path *parents) 1145static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1139{ 1146{
1140 struct kvm_mmu_page *sp; 1147 struct kvm_mmu_page *sp;
1141 unsigned int level = 0; 1148 unsigned int level = 0;
@@ -1193,7 +1200,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1193 gfn_t gfn, 1200 gfn_t gfn,
1194 gva_t gaddr, 1201 gva_t gaddr,
1195 unsigned level, 1202 unsigned level,
1196 int metaphysical, 1203 int direct,
1197 unsigned access, 1204 unsigned access,
1198 u64 *parent_pte) 1205 u64 *parent_pte)
1199{ 1206{
@@ -1204,10 +1211,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1204 struct kvm_mmu_page *sp; 1211 struct kvm_mmu_page *sp;
1205 struct hlist_node *node, *tmp; 1212 struct hlist_node *node, *tmp;
1206 1213
1207 role.word = 0; 1214 role = vcpu->arch.mmu.base_role;
1208 role.glevels = vcpu->arch.mmu.root_level;
1209 role.level = level; 1215 role.level = level;
1210 role.metaphysical = metaphysical; 1216 role.direct = direct;
1211 role.access = access; 1217 role.access = access;
1212 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { 1218 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1213 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); 1219 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
@@ -1242,8 +1248,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1242 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); 1248 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1243 sp->gfn = gfn; 1249 sp->gfn = gfn;
1244 sp->role = role; 1250 sp->role = role;
1251 sp->global = role.cr4_pge;
1245 hlist_add_head(&sp->hash_link, bucket); 1252 hlist_add_head(&sp->hash_link, bucket);
1246 if (!metaphysical) { 1253 if (!direct) {
1247 if (rmap_write_protect(vcpu->kvm, gfn)) 1254 if (rmap_write_protect(vcpu->kvm, gfn))
1248 kvm_flush_remote_tlbs(vcpu->kvm); 1255 kvm_flush_remote_tlbs(vcpu->kvm);
1249 account_shadowed(vcpu->kvm, gfn); 1256 account_shadowed(vcpu->kvm, gfn);
@@ -1255,35 +1262,35 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1255 return sp; 1262 return sp;
1256} 1263}
1257 1264
1258static int walk_shadow(struct kvm_shadow_walk *walker, 1265static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1259 struct kvm_vcpu *vcpu, u64 addr) 1266 struct kvm_vcpu *vcpu, u64 addr)
1260{ 1267{
1261 hpa_t shadow_addr; 1268 iterator->addr = addr;
1262 int level; 1269 iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1263 int r; 1270 iterator->level = vcpu->arch.mmu.shadow_root_level;
1264 u64 *sptep; 1271 if (iterator->level == PT32E_ROOT_LEVEL) {
1265 unsigned index; 1272 iterator->shadow_addr
1266 1273 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1267 shadow_addr = vcpu->arch.mmu.root_hpa; 1274 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1268 level = vcpu->arch.mmu.shadow_root_level; 1275 --iterator->level;
1269 if (level == PT32E_ROOT_LEVEL) { 1276 if (!iterator->shadow_addr)
1270 shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; 1277 iterator->level = 0;
1271 shadow_addr &= PT64_BASE_ADDR_MASK;
1272 if (!shadow_addr)
1273 return 1;
1274 --level;
1275 } 1278 }
1279}
1276 1280
1277 while (level >= PT_PAGE_TABLE_LEVEL) { 1281static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1278 index = SHADOW_PT_INDEX(addr, level); 1282{
1279 sptep = ((u64 *)__va(shadow_addr)) + index; 1283 if (iterator->level < PT_PAGE_TABLE_LEVEL)
1280 r = walker->entry(walker, vcpu, addr, sptep, level); 1284 return false;
1281 if (r) 1285 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1282 return r; 1286 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1283 shadow_addr = *sptep & PT64_BASE_ADDR_MASK; 1287 return true;
1284 --level; 1288}
1285 } 1289
1286 return 0; 1290static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1291{
1292 iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1293 --iterator->level;
1287} 1294}
1288 1295
1289static void kvm_mmu_page_unlink_children(struct kvm *kvm, 1296static void kvm_mmu_page_unlink_children(struct kvm *kvm,
@@ -1388,7 +1395,7 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1388 kvm_mmu_page_unlink_children(kvm, sp); 1395 kvm_mmu_page_unlink_children(kvm, sp);
1389 kvm_mmu_unlink_parents(kvm, sp); 1396 kvm_mmu_unlink_parents(kvm, sp);
1390 kvm_flush_remote_tlbs(kvm); 1397 kvm_flush_remote_tlbs(kvm);
1391 if (!sp->role.invalid && !sp->role.metaphysical) 1398 if (!sp->role.invalid && !sp->role.direct)
1392 unaccount_shadowed(kvm, sp->gfn); 1399 unaccount_shadowed(kvm, sp->gfn);
1393 if (sp->unsync) 1400 if (sp->unsync)
1394 kvm_unlink_unsync_page(kvm, sp); 1401 kvm_unlink_unsync_page(kvm, sp);
@@ -1451,7 +1458,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1451 index = kvm_page_table_hashfn(gfn); 1458 index = kvm_page_table_hashfn(gfn);
1452 bucket = &kvm->arch.mmu_page_hash[index]; 1459 bucket = &kvm->arch.mmu_page_hash[index];
1453 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) 1460 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1454 if (sp->gfn == gfn && !sp->role.metaphysical) { 1461 if (sp->gfn == gfn && !sp->role.direct) {
1455 pgprintk("%s: gfn %lx role %x\n", __func__, gfn, 1462 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1456 sp->role.word); 1463 sp->role.word);
1457 r = 1; 1464 r = 1;
@@ -1463,11 +1470,20 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1463 1470
1464static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) 1471static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1465{ 1472{
1473 unsigned index;
1474 struct hlist_head *bucket;
1466 struct kvm_mmu_page *sp; 1475 struct kvm_mmu_page *sp;
1476 struct hlist_node *node, *nn;
1467 1477
1468 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { 1478 index = kvm_page_table_hashfn(gfn);
1469 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word); 1479 bucket = &kvm->arch.mmu_page_hash[index];
1470 kvm_mmu_zap_page(kvm, sp); 1480 hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
1481 if (sp->gfn == gfn && !sp->role.direct
1482 && !sp->role.invalid) {
1483 pgprintk("%s: zap %lx %x\n",
1484 __func__, gfn, sp->role.word);
1485 kvm_mmu_zap_page(kvm, sp);
1486 }
1471 } 1487 }
1472} 1488}
1473 1489
@@ -1622,7 +1638,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1622 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1638 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1623 /* don't unsync if pagetable is shadowed with multiple roles */ 1639 /* don't unsync if pagetable is shadowed with multiple roles */
1624 hlist_for_each_entry_safe(s, node, n, bucket, hash_link) { 1640 hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1625 if (s->gfn != sp->gfn || s->role.metaphysical) 1641 if (s->gfn != sp->gfn || s->role.direct)
1626 continue; 1642 continue;
1627 if (s->role.word != sp->role.word) 1643 if (s->role.word != sp->role.word)
1628 return 1; 1644 return 1;
@@ -1669,8 +1685,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1669 u64 mt_mask = shadow_mt_mask; 1685 u64 mt_mask = shadow_mt_mask;
1670 struct kvm_mmu_page *sp = page_header(__pa(shadow_pte)); 1686 struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
1671 1687
1672 if (!(vcpu->arch.cr4 & X86_CR4_PGE))
1673 global = 0;
1674 if (!global && sp->global) { 1688 if (!global && sp->global) {
1675 sp->global = 0; 1689 sp->global = 0;
1676 if (sp->unsync) { 1690 if (sp->unsync) {
@@ -1777,12 +1791,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1777 pgprintk("hfn old %lx new %lx\n", 1791 pgprintk("hfn old %lx new %lx\n",
1778 spte_to_pfn(*shadow_pte), pfn); 1792 spte_to_pfn(*shadow_pte), pfn);
1779 rmap_remove(vcpu->kvm, shadow_pte); 1793 rmap_remove(vcpu->kvm, shadow_pte);
1780 } else { 1794 } else
1781 if (largepage) 1795 was_rmapped = 1;
1782 was_rmapped = is_large_pte(*shadow_pte);
1783 else
1784 was_rmapped = 1;
1785 }
1786 } 1796 }
1787 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, 1797 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
1788 dirty, largepage, global, gfn, pfn, speculative, true)) { 1798 dirty, largepage, global, gfn, pfn, speculative, true)) {
@@ -1820,67 +1830,42 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1820{ 1830{
1821} 1831}
1822 1832
1823struct direct_shadow_walk { 1833static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1824 struct kvm_shadow_walk walker; 1834 int largepage, gfn_t gfn, pfn_t pfn)
1825 pfn_t pfn;
1826 int write;
1827 int largepage;
1828 int pt_write;
1829};
1830
1831static int direct_map_entry(struct kvm_shadow_walk *_walk,
1832 struct kvm_vcpu *vcpu,
1833 u64 addr, u64 *sptep, int level)
1834{ 1835{
1835 struct direct_shadow_walk *walk = 1836 struct kvm_shadow_walk_iterator iterator;
1836 container_of(_walk, struct direct_shadow_walk, walker);
1837 struct kvm_mmu_page *sp; 1837 struct kvm_mmu_page *sp;
1838 int pt_write = 0;
1838 gfn_t pseudo_gfn; 1839 gfn_t pseudo_gfn;
1839 gfn_t gfn = addr >> PAGE_SHIFT;
1840
1841 if (level == PT_PAGE_TABLE_LEVEL
1842 || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
1843 mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
1844 0, walk->write, 1, &walk->pt_write,
1845 walk->largepage, 0, gfn, walk->pfn, false);
1846 ++vcpu->stat.pf_fixed;
1847 return 1;
1848 }
1849 1840
1850 if (*sptep == shadow_trap_nonpresent_pte) { 1841 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1851 pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; 1842 if (iterator.level == PT_PAGE_TABLE_LEVEL
1852 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1, 1843 || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
1853 1, ACC_ALL, sptep); 1844 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1854 if (!sp) { 1845 0, write, 1, &pt_write,
1855 pgprintk("nonpaging_map: ENOMEM\n"); 1846 largepage, 0, gfn, pfn, false);
1856 kvm_release_pfn_clean(walk->pfn); 1847 ++vcpu->stat.pf_fixed;
1857 return -ENOMEM; 1848 break;
1858 } 1849 }
1859 1850
1860 set_shadow_pte(sptep, 1851 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
1861 __pa(sp->spt) 1852 pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1862 | PT_PRESENT_MASK | PT_WRITABLE_MASK 1853 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
1863 | shadow_user_mask | shadow_x_mask); 1854 iterator.level - 1,
1864 } 1855 1, ACC_ALL, iterator.sptep);
1865 return 0; 1856 if (!sp) {
1866} 1857 pgprintk("nonpaging_map: ENOMEM\n");
1858 kvm_release_pfn_clean(pfn);
1859 return -ENOMEM;
1860 }
1867 1861
1868static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, 1862 set_shadow_pte(iterator.sptep,
1869 int largepage, gfn_t gfn, pfn_t pfn) 1863 __pa(sp->spt)
1870{ 1864 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1871 int r; 1865 | shadow_user_mask | shadow_x_mask);
1872 struct direct_shadow_walk walker = { 1866 }
1873 .walker = { .entry = direct_map_entry, }, 1867 }
1874 .pfn = pfn, 1868 return pt_write;
1875 .largepage = largepage,
1876 .write = write,
1877 .pt_write = 0,
1878 };
1879
1880 r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
1881 if (r < 0)
1882 return r;
1883 return walker.pt_write;
1884} 1869}
1885 1870
1886static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) 1871static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
@@ -1962,7 +1947,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1962 int i; 1947 int i;
1963 gfn_t root_gfn; 1948 gfn_t root_gfn;
1964 struct kvm_mmu_page *sp; 1949 struct kvm_mmu_page *sp;
1965 int metaphysical = 0; 1950 int direct = 0;
1966 1951
1967 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; 1952 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1968 1953
@@ -1971,18 +1956,18 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1971 1956
1972 ASSERT(!VALID_PAGE(root)); 1957 ASSERT(!VALID_PAGE(root));
1973 if (tdp_enabled) 1958 if (tdp_enabled)
1974 metaphysical = 1; 1959 direct = 1;
1975 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, 1960 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1976 PT64_ROOT_LEVEL, metaphysical, 1961 PT64_ROOT_LEVEL, direct,
1977 ACC_ALL, NULL); 1962 ACC_ALL, NULL);
1978 root = __pa(sp->spt); 1963 root = __pa(sp->spt);
1979 ++sp->root_count; 1964 ++sp->root_count;
1980 vcpu->arch.mmu.root_hpa = root; 1965 vcpu->arch.mmu.root_hpa = root;
1981 return; 1966 return;
1982 } 1967 }
1983 metaphysical = !is_paging(vcpu); 1968 direct = !is_paging(vcpu);
1984 if (tdp_enabled) 1969 if (tdp_enabled)
1985 metaphysical = 1; 1970 direct = 1;
1986 for (i = 0; i < 4; ++i) { 1971 for (i = 0; i < 4; ++i) {
1987 hpa_t root = vcpu->arch.mmu.pae_root[i]; 1972 hpa_t root = vcpu->arch.mmu.pae_root[i];
1988 1973
@@ -1996,7 +1981,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1996 } else if (vcpu->arch.mmu.root_level == 0) 1981 } else if (vcpu->arch.mmu.root_level == 0)
1997 root_gfn = 0; 1982 root_gfn = 0;
1998 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, 1983 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1999 PT32_ROOT_LEVEL, metaphysical, 1984 PT32_ROOT_LEVEL, direct,
2000 ACC_ALL, NULL); 1985 ACC_ALL, NULL);
2001 root = __pa(sp->spt); 1986 root = __pa(sp->spt);
2002 ++sp->root_count; 1987 ++sp->root_count;
@@ -2251,17 +2236,23 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2251 2236
2252static int init_kvm_softmmu(struct kvm_vcpu *vcpu) 2237static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2253{ 2238{
2239 int r;
2240
2254 ASSERT(vcpu); 2241 ASSERT(vcpu);
2255 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 2242 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2256 2243
2257 if (!is_paging(vcpu)) 2244 if (!is_paging(vcpu))
2258 return nonpaging_init_context(vcpu); 2245 r = nonpaging_init_context(vcpu);
2259 else if (is_long_mode(vcpu)) 2246 else if (is_long_mode(vcpu))
2260 return paging64_init_context(vcpu); 2247 r = paging64_init_context(vcpu);
2261 else if (is_pae(vcpu)) 2248 else if (is_pae(vcpu))
2262 return paging32E_init_context(vcpu); 2249 r = paging32E_init_context(vcpu);
2263 else 2250 else
2264 return paging32_init_context(vcpu); 2251 r = paging32_init_context(vcpu);
2252
2253 vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
2254
2255 return r;
2265} 2256}
2266 2257
2267static int init_kvm_mmu(struct kvm_vcpu *vcpu) 2258static int init_kvm_mmu(struct kvm_vcpu *vcpu)
@@ -2492,7 +2483,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2492 index = kvm_page_table_hashfn(gfn); 2483 index = kvm_page_table_hashfn(gfn);
2493 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 2484 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2494 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { 2485 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2495 if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid) 2486 if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2496 continue; 2487 continue;
2497 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; 2488 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2498 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); 2489 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
@@ -3130,7 +3121,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
3130 gfn_t gfn; 3121 gfn_t gfn;
3131 3122
3132 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { 3123 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3133 if (sp->role.metaphysical) 3124 if (sp->role.direct)
3134 continue; 3125 continue;
3135 3126
3136 gfn = unalias_gfn(vcpu->kvm, sp->gfn); 3127 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 258e5d56298e..eaab2145f62b 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -54,7 +54,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
54static inline int is_long_mode(struct kvm_vcpu *vcpu) 54static inline int is_long_mode(struct kvm_vcpu *vcpu)
55{ 55{
56#ifdef CONFIG_X86_64 56#ifdef CONFIG_X86_64
57 return vcpu->arch.shadow_efer & EFER_LME; 57 return vcpu->arch.shadow_efer & EFER_LMA;
58#else 58#else
59 return 0; 59 return 0;
60#endif 60#endif
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 9fd78b6e17ad..6bd70206c561 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -25,7 +25,6 @@
25#if PTTYPE == 64 25#if PTTYPE == 64
26 #define pt_element_t u64 26 #define pt_element_t u64
27 #define guest_walker guest_walker64 27 #define guest_walker guest_walker64
28 #define shadow_walker shadow_walker64
29 #define FNAME(name) paging##64_##name 28 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK 29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK 30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
@@ -42,7 +41,6 @@
42#elif PTTYPE == 32 41#elif PTTYPE == 32
43 #define pt_element_t u32 42 #define pt_element_t u32
44 #define guest_walker guest_walker32 43 #define guest_walker guest_walker32
45 #define shadow_walker shadow_walker32
46 #define FNAME(name) paging##32_##name 44 #define FNAME(name) paging##32_##name
47 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK 45 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
48 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK 46 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
@@ -73,18 +71,6 @@ struct guest_walker {
73 u32 error_code; 71 u32 error_code;
74}; 72};
75 73
76struct shadow_walker {
77 struct kvm_shadow_walk walker;
78 struct guest_walker *guest_walker;
79 int user_fault;
80 int write_fault;
81 int largepage;
82 int *ptwrite;
83 pfn_t pfn;
84 u64 *sptep;
85 gpa_t pte_gpa;
86};
87
88static gfn_t gpte_to_gfn(pt_element_t gpte) 74static gfn_t gpte_to_gfn(pt_element_t gpte)
89{ 75{
90 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; 76 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -283,91 +269,79 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
283/* 269/*
284 * Fetch a shadow pte for a specific level in the paging hierarchy. 270 * Fetch a shadow pte for a specific level in the paging hierarchy.
285 */ 271 */
286static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, 272static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
287 struct kvm_vcpu *vcpu, u64 addr, 273 struct guest_walker *gw,
288 u64 *sptep, int level) 274 int user_fault, int write_fault, int largepage,
275 int *ptwrite, pfn_t pfn)
289{ 276{
290 struct shadow_walker *sw =
291 container_of(_sw, struct shadow_walker, walker);
292 struct guest_walker *gw = sw->guest_walker;
293 unsigned access = gw->pt_access; 277 unsigned access = gw->pt_access;
294 struct kvm_mmu_page *shadow_page; 278 struct kvm_mmu_page *shadow_page;
295 u64 spte; 279 u64 spte, *sptep;
296 int metaphysical; 280 int direct;
297 gfn_t table_gfn; 281 gfn_t table_gfn;
298 int r; 282 int r;
283 int level;
299 pt_element_t curr_pte; 284 pt_element_t curr_pte;
285 struct kvm_shadow_walk_iterator iterator;
300 286
301 if (level == PT_PAGE_TABLE_LEVEL 287 if (!is_present_pte(gw->ptes[gw->level - 1]))
302 || (sw->largepage && level == PT_DIRECTORY_LEVEL)) { 288 return NULL;
303 mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
304 sw->user_fault, sw->write_fault,
305 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
306 sw->ptwrite, sw->largepage,
307 gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
308 gw->gfn, sw->pfn, false);
309 sw->sptep = sptep;
310 return 1;
311 }
312 289
313 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) 290 for_each_shadow_entry(vcpu, addr, iterator) {
314 return 0; 291 level = iterator.level;
292 sptep = iterator.sptep;
293 if (level == PT_PAGE_TABLE_LEVEL
294 || (largepage && level == PT_DIRECTORY_LEVEL)) {
295 mmu_set_spte(vcpu, sptep, access,
296 gw->pte_access & access,
297 user_fault, write_fault,
298 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
299 ptwrite, largepage,
300 gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
301 gw->gfn, pfn, false);
302 break;
303 }
315 304
316 if (is_large_pte(*sptep)) { 305 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
317 set_shadow_pte(sptep, shadow_trap_nonpresent_pte); 306 continue;
318 kvm_flush_remote_tlbs(vcpu->kvm);
319 rmap_remove(vcpu->kvm, sptep);
320 }
321 307
322 if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) { 308 if (is_large_pte(*sptep)) {
323 metaphysical = 1; 309 rmap_remove(vcpu->kvm, sptep);
324 if (!is_dirty_pte(gw->ptes[level - 1])) 310 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
325 access &= ~ACC_WRITE_MASK; 311 kvm_flush_remote_tlbs(vcpu->kvm);
326 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
327 } else {
328 metaphysical = 0;
329 table_gfn = gw->table_gfn[level - 2];
330 }
331 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
332 metaphysical, access, sptep);
333 if (!metaphysical) {
334 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
335 &curr_pte, sizeof(curr_pte));
336 if (r || curr_pte != gw->ptes[level - 2]) {
337 kvm_mmu_put_page(shadow_page, sptep);
338 kvm_release_pfn_clean(sw->pfn);
339 sw->sptep = NULL;
340 return 1;
341 } 312 }
342 }
343 313
344 spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK 314 if (level == PT_DIRECTORY_LEVEL
345 | PT_WRITABLE_MASK | PT_USER_MASK; 315 && gw->level == PT_DIRECTORY_LEVEL) {
346 *sptep = spte; 316 direct = 1;
347 return 0; 317 if (!is_dirty_pte(gw->ptes[level - 1]))
348} 318 access &= ~ACC_WRITE_MASK;
349 319 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
350static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 320 } else {
351 struct guest_walker *guest_walker, 321 direct = 0;
352 int user_fault, int write_fault, int largepage, 322 table_gfn = gw->table_gfn[level - 2];
353 int *ptwrite, pfn_t pfn) 323 }
354{ 324 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
355 struct shadow_walker walker = { 325 direct, access, sptep);
356 .walker = { .entry = FNAME(shadow_walk_entry), }, 326 if (!direct) {
357 .guest_walker = guest_walker, 327 r = kvm_read_guest_atomic(vcpu->kvm,
358 .user_fault = user_fault, 328 gw->pte_gpa[level - 2],
359 .write_fault = write_fault, 329 &curr_pte, sizeof(curr_pte));
360 .largepage = largepage, 330 if (r || curr_pte != gw->ptes[level - 2]) {
361 .ptwrite = ptwrite, 331 kvm_mmu_put_page(shadow_page, sptep);
362 .pfn = pfn, 332 kvm_release_pfn_clean(pfn);
363 }; 333 sptep = NULL;
364 334 break;
365 if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1])) 335 }
366 return NULL; 336 }
367 337
368 walk_shadow(&walker.walker, vcpu, addr); 338 spte = __pa(shadow_page->spt)
339 | PT_PRESENT_MASK | PT_ACCESSED_MASK
340 | PT_WRITABLE_MASK | PT_USER_MASK;
341 *sptep = spte;
342 }
369 343
370 return walker.sptep; 344 return sptep;
371} 345}
372 346
373/* 347/*
@@ -465,54 +439,56 @@ out_unlock:
465 return 0; 439 return 0;
466} 440}
467 441
468static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, 442static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
469 struct kvm_vcpu *vcpu, u64 addr,
470 u64 *sptep, int level)
471{ 443{
472 struct shadow_walker *sw = 444 struct kvm_shadow_walk_iterator iterator;
473 container_of(_sw, struct shadow_walker, walker); 445 pt_element_t gpte;
474 446 gpa_t pte_gpa = -1;
475 /* FIXME: properly handle invlpg on large guest pages */ 447 int level;
476 if (level == PT_PAGE_TABLE_LEVEL || 448 u64 *sptep;
477 ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { 449 int need_flush = 0;
478 struct kvm_mmu_page *sp = page_header(__pa(sptep));
479 450
480 sw->pte_gpa = (sp->gfn << PAGE_SHIFT); 451 spin_lock(&vcpu->kvm->mmu_lock);
481 sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
482 452
483 if (is_shadow_present_pte(*sptep)) { 453 for_each_shadow_entry(vcpu, gva, iterator) {
484 rmap_remove(vcpu->kvm, sptep); 454 level = iterator.level;
485 if (is_large_pte(*sptep)) 455 sptep = iterator.sptep;
486 --vcpu->kvm->stat.lpages; 456
457 /* FIXME: properly handle invlpg on large guest pages */
458 if (level == PT_PAGE_TABLE_LEVEL ||
459 ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
460 struct kvm_mmu_page *sp = page_header(__pa(sptep));
461
462 pte_gpa = (sp->gfn << PAGE_SHIFT);
463 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
464
465 if (is_shadow_present_pte(*sptep)) {
466 rmap_remove(vcpu->kvm, sptep);
467 if (is_large_pte(*sptep))
468 --vcpu->kvm->stat.lpages;
469 need_flush = 1;
470 }
471 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
472 break;
487 } 473 }
488 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
489 return 1;
490 }
491 if (!is_shadow_present_pte(*sptep))
492 return 1;
493 return 0;
494}
495 474
496static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) 475 if (!is_shadow_present_pte(*sptep))
497{ 476 break;
498 pt_element_t gpte; 477 }
499 struct shadow_walker walker = {
500 .walker = { .entry = FNAME(shadow_invlpg_entry), },
501 .pte_gpa = -1,
502 };
503 478
504 spin_lock(&vcpu->kvm->mmu_lock); 479 if (need_flush)
505 walk_shadow(&walker.walker, vcpu, gva); 480 kvm_flush_remote_tlbs(vcpu->kvm);
506 spin_unlock(&vcpu->kvm->mmu_lock); 481 spin_unlock(&vcpu->kvm->mmu_lock);
507 if (walker.pte_gpa == -1) 482
483 if (pte_gpa == -1)
508 return; 484 return;
509 if (kvm_read_guest_atomic(vcpu->kvm, walker.pte_gpa, &gpte, 485 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
510 sizeof(pt_element_t))) 486 sizeof(pt_element_t)))
511 return; 487 return;
512 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) { 488 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) {
513 if (mmu_topup_memory_caches(vcpu)) 489 if (mmu_topup_memory_caches(vcpu))
514 return; 490 return;
515 kvm_mmu_pte_write(vcpu, walker.pte_gpa, (const u8 *)&gpte, 491 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
516 sizeof(pt_element_t), 0); 492 sizeof(pt_element_t), 0);
517 } 493 }
518} 494}
@@ -540,7 +516,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
540 pt_element_t pt[256 / sizeof(pt_element_t)]; 516 pt_element_t pt[256 / sizeof(pt_element_t)];
541 gpa_t pte_gpa; 517 gpa_t pte_gpa;
542 518
543 if (sp->role.metaphysical 519 if (sp->role.direct
544 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { 520 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
545 nonpaging_prefetch_page(vcpu, sp); 521 nonpaging_prefetch_page(vcpu, sp);
546 return; 522 return;
@@ -619,7 +595,6 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
619 595
620#undef pt_element_t 596#undef pt_element_t
621#undef guest_walker 597#undef guest_walker
622#undef shadow_walker
623#undef FNAME 598#undef FNAME
624#undef PT_BASE_ADDR_MASK 599#undef PT_BASE_ADDR_MASK
625#undef PT_INDEX 600#undef PT_INDEX
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a9e769e4e251..1821c2078199 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -38,9 +38,6 @@ MODULE_LICENSE("GPL");
38#define IOPM_ALLOC_ORDER 2 38#define IOPM_ALLOC_ORDER 2
39#define MSRPM_ALLOC_ORDER 1 39#define MSRPM_ALLOC_ORDER 1
40 40
41#define DR7_GD_MASK (1 << 13)
42#define DR6_BD_MASK (1 << 13)
43
44#define SEG_TYPE_LDT 2 41#define SEG_TYPE_LDT 2
45#define SEG_TYPE_BUSY_TSS16 3 42#define SEG_TYPE_BUSY_TSS16 3
46 43
@@ -50,6 +47,15 @@ MODULE_LICENSE("GPL");
50 47
51#define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) 48#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
52 49
50/* Turn on to get debugging output*/
51/* #define NESTED_DEBUG */
52
53#ifdef NESTED_DEBUG
54#define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args)
55#else
56#define nsvm_printk(fmt, args...) do {} while(0)
57#endif
58
53/* enable NPT for AMD64 and X86 with PAE */ 59/* enable NPT for AMD64 and X86 with PAE */
54#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 60#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
55static bool npt_enabled = true; 61static bool npt_enabled = true;
@@ -60,14 +66,29 @@ static int npt = 1;
60 66
61module_param(npt, int, S_IRUGO); 67module_param(npt, int, S_IRUGO);
62 68
69static int nested = 0;
70module_param(nested, int, S_IRUGO);
71
63static void kvm_reput_irq(struct vcpu_svm *svm); 72static void kvm_reput_irq(struct vcpu_svm *svm);
64static void svm_flush_tlb(struct kvm_vcpu *vcpu); 73static void svm_flush_tlb(struct kvm_vcpu *vcpu);
65 74
75static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override);
76static int nested_svm_vmexit(struct vcpu_svm *svm);
77static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
78 void *arg2, void *opaque);
79static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
80 bool has_error_code, u32 error_code);
81
66static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 82static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
67{ 83{
68 return container_of(vcpu, struct vcpu_svm, vcpu); 84 return container_of(vcpu, struct vcpu_svm, vcpu);
69} 85}
70 86
87static inline bool is_nested(struct vcpu_svm *svm)
88{
89 return svm->nested_vmcb;
90}
91
71static unsigned long iopm_base; 92static unsigned long iopm_base;
72 93
73struct kvm_ldttss_desc { 94struct kvm_ldttss_desc {
@@ -157,32 +178,6 @@ static inline void kvm_write_cr2(unsigned long val)
157 asm volatile ("mov %0, %%cr2" :: "r" (val)); 178 asm volatile ("mov %0, %%cr2" :: "r" (val));
158} 179}
159 180
160static inline unsigned long read_dr6(void)
161{
162 unsigned long dr6;
163
164 asm volatile ("mov %%dr6, %0" : "=r" (dr6));
165 return dr6;
166}
167
168static inline void write_dr6(unsigned long val)
169{
170 asm volatile ("mov %0, %%dr6" :: "r" (val));
171}
172
173static inline unsigned long read_dr7(void)
174{
175 unsigned long dr7;
176
177 asm volatile ("mov %%dr7, %0" : "=r" (dr7));
178 return dr7;
179}
180
181static inline void write_dr7(unsigned long val)
182{
183 asm volatile ("mov %0, %%dr7" :: "r" (val));
184}
185
186static inline void force_new_asid(struct kvm_vcpu *vcpu) 181static inline void force_new_asid(struct kvm_vcpu *vcpu)
187{ 182{
188 to_svm(vcpu)->asid_generation--; 183 to_svm(vcpu)->asid_generation--;
@@ -198,7 +193,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
198 if (!npt_enabled && !(efer & EFER_LMA)) 193 if (!npt_enabled && !(efer & EFER_LMA))
199 efer &= ~EFER_LME; 194 efer &= ~EFER_LME;
200 195
201 to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; 196 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
202 vcpu->arch.shadow_efer = efer; 197 vcpu->arch.shadow_efer = efer;
203} 198}
204 199
@@ -207,6 +202,11 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
207{ 202{
208 struct vcpu_svm *svm = to_svm(vcpu); 203 struct vcpu_svm *svm = to_svm(vcpu);
209 204
205 /* If we are within a nested VM we'd better #VMEXIT and let the
206 guest handle the exception */
207 if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
208 return;
209
210 svm->vmcb->control.event_inj = nr 210 svm->vmcb->control.event_inj = nr
211 | SVM_EVTINJ_VALID 211 | SVM_EVTINJ_VALID
212 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) 212 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
@@ -242,7 +242,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
242 kvm_rip_write(vcpu, svm->next_rip); 242 kvm_rip_write(vcpu, svm->next_rip);
243 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 243 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
244 244
245 vcpu->arch.interrupt_window_open = 1; 245 vcpu->arch.interrupt_window_open = (svm->vcpu.arch.hflags & HF_GIF_MASK);
246} 246}
247 247
248static int has_svm(void) 248static int has_svm(void)
@@ -250,7 +250,7 @@ static int has_svm(void)
250 const char *msg; 250 const char *msg;
251 251
252 if (!cpu_has_svm(&msg)) { 252 if (!cpu_has_svm(&msg)) {
253 printk(KERN_INFO "has_svn: %s\n", msg); 253 printk(KERN_INFO "has_svm: %s\n", msg);
254 return 0; 254 return 0;
255 } 255 }
256 256
@@ -292,7 +292,7 @@ static void svm_hardware_enable(void *garbage)
292 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); 292 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
293 293
294 rdmsrl(MSR_EFER, efer); 294 rdmsrl(MSR_EFER, efer);
295 wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK); 295 wrmsrl(MSR_EFER, efer | EFER_SVME);
296 296
297 wrmsrl(MSR_VM_HSAVE_PA, 297 wrmsrl(MSR_VM_HSAVE_PA,
298 page_to_pfn(svm_data->save_area) << PAGE_SHIFT); 298 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
@@ -417,6 +417,14 @@ static __init int svm_hardware_setup(void)
417 if (boot_cpu_has(X86_FEATURE_NX)) 417 if (boot_cpu_has(X86_FEATURE_NX))
418 kvm_enable_efer_bits(EFER_NX); 418 kvm_enable_efer_bits(EFER_NX);
419 419
420 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
421 kvm_enable_efer_bits(EFER_FFXSR);
422
423 if (nested) {
424 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
425 kvm_enable_efer_bits(EFER_SVME);
426 }
427
420 for_each_online_cpu(cpu) { 428 for_each_online_cpu(cpu) {
421 r = svm_cpu_init(cpu); 429 r = svm_cpu_init(cpu);
422 if (r) 430 if (r)
@@ -559,7 +567,7 @@ static void init_vmcb(struct vcpu_svm *svm)
559 init_sys_seg(&save->ldtr, SEG_TYPE_LDT); 567 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
560 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); 568 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
561 569
562 save->efer = MSR_EFER_SVME_MASK; 570 save->efer = EFER_SVME;
563 save->dr6 = 0xffff0ff0; 571 save->dr6 = 0xffff0ff0;
564 save->dr7 = 0x400; 572 save->dr7 = 0x400;
565 save->rflags = 2; 573 save->rflags = 2;
@@ -591,6 +599,9 @@ static void init_vmcb(struct vcpu_svm *svm)
591 save->cr4 = 0; 599 save->cr4 = 0;
592 } 600 }
593 force_new_asid(&svm->vcpu); 601 force_new_asid(&svm->vcpu);
602
603 svm->nested_vmcb = 0;
604 svm->vcpu.arch.hflags = HF_GIF_MASK;
594} 605}
595 606
596static int svm_vcpu_reset(struct kvm_vcpu *vcpu) 607static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -615,6 +626,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
615 struct vcpu_svm *svm; 626 struct vcpu_svm *svm;
616 struct page *page; 627 struct page *page;
617 struct page *msrpm_pages; 628 struct page *msrpm_pages;
629 struct page *hsave_page;
630 struct page *nested_msrpm_pages;
618 int err; 631 int err;
619 632
620 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 633 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
@@ -637,14 +650,25 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
637 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); 650 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
638 if (!msrpm_pages) 651 if (!msrpm_pages)
639 goto uninit; 652 goto uninit;
653
654 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
655 if (!nested_msrpm_pages)
656 goto uninit;
657
640 svm->msrpm = page_address(msrpm_pages); 658 svm->msrpm = page_address(msrpm_pages);
641 svm_vcpu_init_msrpm(svm->msrpm); 659 svm_vcpu_init_msrpm(svm->msrpm);
642 660
661 hsave_page = alloc_page(GFP_KERNEL);
662 if (!hsave_page)
663 goto uninit;
664 svm->hsave = page_address(hsave_page);
665
666 svm->nested_msrpm = page_address(nested_msrpm_pages);
667
643 svm->vmcb = page_address(page); 668 svm->vmcb = page_address(page);
644 clear_page(svm->vmcb); 669 clear_page(svm->vmcb);
645 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; 670 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
646 svm->asid_generation = 0; 671 svm->asid_generation = 0;
647 memset(svm->db_regs, 0, sizeof(svm->db_regs));
648 init_vmcb(svm); 672 init_vmcb(svm);
649 673
650 fx_init(&svm->vcpu); 674 fx_init(&svm->vcpu);
@@ -669,6 +693,8 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
669 693
670 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); 694 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
671 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); 695 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
696 __free_page(virt_to_page(svm->hsave));
697 __free_pages(virt_to_page(svm->nested_msrpm), MSRPM_ALLOC_ORDER);
672 kvm_vcpu_uninit(vcpu); 698 kvm_vcpu_uninit(vcpu);
673 kmem_cache_free(kvm_vcpu_cache, svm); 699 kmem_cache_free(kvm_vcpu_cache, svm);
674} 700}
@@ -718,6 +744,16 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
718 to_svm(vcpu)->vmcb->save.rflags = rflags; 744 to_svm(vcpu)->vmcb->save.rflags = rflags;
719} 745}
720 746
747static void svm_set_vintr(struct vcpu_svm *svm)
748{
749 svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
750}
751
752static void svm_clear_vintr(struct vcpu_svm *svm)
753{
754 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
755}
756
721static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) 757static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
722{ 758{
723 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; 759 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
@@ -760,20 +796,37 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
760 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; 796 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
761 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; 797 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
762 798
763 /* 799 switch (seg) {
764 * SVM always stores 0 for the 'G' bit in the CS selector in 800 case VCPU_SREG_CS:
765 * the VMCB on a VMEXIT. This hurts cross-vendor migration: 801 /*
766 * Intel's VMENTRY has a check on the 'G' bit. 802 * SVM always stores 0 for the 'G' bit in the CS selector in
767 */ 803 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
768 if (seg == VCPU_SREG_CS) 804 * Intel's VMENTRY has a check on the 'G' bit.
805 */
769 var->g = s->limit > 0xfffff; 806 var->g = s->limit > 0xfffff;
770 807 break;
771 /* 808 case VCPU_SREG_TR:
772 * Work around a bug where the busy flag in the tr selector 809 /*
773 * isn't exposed 810 * Work around a bug where the busy flag in the tr selector
774 */ 811 * isn't exposed
775 if (seg == VCPU_SREG_TR) 812 */
776 var->type |= 0x2; 813 var->type |= 0x2;
814 break;
815 case VCPU_SREG_DS:
816 case VCPU_SREG_ES:
817 case VCPU_SREG_FS:
818 case VCPU_SREG_GS:
819 /*
820 * The accessed bit must always be set in the segment
821 * descriptor cache, although it can be cleared in the
822 * descriptor, the cached bit always remains at 1. Since
823 * Intel has a check on this, set it here to support
824 * cross-vendor migration.
825 */
826 if (!var->unusable)
827 var->type |= 0x1;
828 break;
829 }
777 830
778 var->unusable = !var->present; 831 var->unusable = !var->present;
779} 832}
@@ -905,9 +958,37 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
905 958
906} 959}
907 960
908static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) 961static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
909{ 962{
910 return -EOPNOTSUPP; 963 int old_debug = vcpu->guest_debug;
964 struct vcpu_svm *svm = to_svm(vcpu);
965
966 vcpu->guest_debug = dbg->control;
967
968 svm->vmcb->control.intercept_exceptions &=
969 ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
970 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
971 if (vcpu->guest_debug &
972 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
973 svm->vmcb->control.intercept_exceptions |=
974 1 << DB_VECTOR;
975 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
976 svm->vmcb->control.intercept_exceptions |=
977 1 << BP_VECTOR;
978 } else
979 vcpu->guest_debug = 0;
980
981 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
982 svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
983 else
984 svm->vmcb->save.dr7 = vcpu->arch.dr7;
985
986 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
987 svm->vmcb->save.rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
988 else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
989 svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
990
991 return 0;
911} 992}
912 993
913static int svm_get_irq(struct kvm_vcpu *vcpu) 994static int svm_get_irq(struct kvm_vcpu *vcpu)
@@ -949,7 +1030,29 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
949 1030
950static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) 1031static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
951{ 1032{
952 unsigned long val = to_svm(vcpu)->db_regs[dr]; 1033 struct vcpu_svm *svm = to_svm(vcpu);
1034 unsigned long val;
1035
1036 switch (dr) {
1037 case 0 ... 3:
1038 val = vcpu->arch.db[dr];
1039 break;
1040 case 6:
1041 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1042 val = vcpu->arch.dr6;
1043 else
1044 val = svm->vmcb->save.dr6;
1045 break;
1046 case 7:
1047 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1048 val = vcpu->arch.dr7;
1049 else
1050 val = svm->vmcb->save.dr7;
1051 break;
1052 default:
1053 val = 0;
1054 }
1055
953 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); 1056 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
954 return val; 1057 return val;
955} 1058}
@@ -959,33 +1062,40 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
959{ 1062{
960 struct vcpu_svm *svm = to_svm(vcpu); 1063 struct vcpu_svm *svm = to_svm(vcpu);
961 1064
962 *exception = 0; 1065 KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler);
963 1066
964 if (svm->vmcb->save.dr7 & DR7_GD_MASK) { 1067 *exception = 0;
965 svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
966 svm->vmcb->save.dr6 |= DR6_BD_MASK;
967 *exception = DB_VECTOR;
968 return;
969 }
970 1068
971 switch (dr) { 1069 switch (dr) {
972 case 0 ... 3: 1070 case 0 ... 3:
973 svm->db_regs[dr] = value; 1071 vcpu->arch.db[dr] = value;
1072 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1073 vcpu->arch.eff_db[dr] = value;
974 return; 1074 return;
975 case 4 ... 5: 1075 case 4 ... 5:
976 if (vcpu->arch.cr4 & X86_CR4_DE) { 1076 if (vcpu->arch.cr4 & X86_CR4_DE)
977 *exception = UD_VECTOR; 1077 *exception = UD_VECTOR;
1078 return;
1079 case 6:
1080 if (value & 0xffffffff00000000ULL) {
1081 *exception = GP_VECTOR;
978 return; 1082 return;
979 } 1083 }
980 case 7: { 1084 vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
981 if (value & ~((1ULL << 32) - 1)) { 1085 return;
1086 case 7:
1087 if (value & 0xffffffff00000000ULL) {
982 *exception = GP_VECTOR; 1088 *exception = GP_VECTOR;
983 return; 1089 return;
984 } 1090 }
985 svm->vmcb->save.dr7 = value; 1091 vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
1092 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1093 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1094 vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
1095 }
986 return; 1096 return;
987 }
988 default: 1097 default:
1098 /* FIXME: Possible case? */
989 printk(KERN_DEBUG "%s: unexpected dr %u\n", 1099 printk(KERN_DEBUG "%s: unexpected dr %u\n",
990 __func__, dr); 1100 __func__, dr);
991 *exception = UD_VECTOR; 1101 *exception = UD_VECTOR;
@@ -1031,6 +1141,27 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1031 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1141 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1032} 1142}
1033 1143
1144static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1145{
1146 if (!(svm->vcpu.guest_debug &
1147 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
1148 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1149 return 1;
1150 }
1151 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1152 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1153 kvm_run->debug.arch.exception = DB_VECTOR;
1154 return 0;
1155}
1156
1157static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1158{
1159 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1160 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1161 kvm_run->debug.arch.exception = BP_VECTOR;
1162 return 0;
1163}
1164
1034static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1165static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1035{ 1166{
1036 int er; 1167 int er;
@@ -1080,7 +1211,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1080static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1211static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1081{ 1212{
1082 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ 1213 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1083 int size, down, in, string, rep; 1214 int size, in, string;
1084 unsigned port; 1215 unsigned port;
1085 1216
1086 ++svm->vcpu.stat.io_exits; 1217 ++svm->vcpu.stat.io_exits;
@@ -1099,8 +1230,6 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1099 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; 1230 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1100 port = io_info >> 16; 1231 port = io_info >> 16;
1101 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; 1232 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1102 rep = (io_info & SVM_IOIO_REP_MASK) != 0;
1103 down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
1104 1233
1105 skip_emulated_instruction(&svm->vcpu); 1234 skip_emulated_instruction(&svm->vcpu);
1106 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); 1235 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
@@ -1139,6 +1268,567 @@ static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1139 return 1; 1268 return 1;
1140} 1269}
1141 1270
1271static int nested_svm_check_permissions(struct vcpu_svm *svm)
1272{
1273 if (!(svm->vcpu.arch.shadow_efer & EFER_SVME)
1274 || !is_paging(&svm->vcpu)) {
1275 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1276 return 1;
1277 }
1278
1279 if (svm->vmcb->save.cpl) {
1280 kvm_inject_gp(&svm->vcpu, 0);
1281 return 1;
1282 }
1283
1284 return 0;
1285}
1286
1287static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1288 bool has_error_code, u32 error_code)
1289{
1290 if (is_nested(svm)) {
1291 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1292 svm->vmcb->control.exit_code_hi = 0;
1293 svm->vmcb->control.exit_info_1 = error_code;
1294 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1295 if (nested_svm_exit_handled(svm, false)) {
1296 nsvm_printk("VMexit -> EXCP 0x%x\n", nr);
1297
1298 nested_svm_vmexit(svm);
1299 return 1;
1300 }
1301 }
1302
1303 return 0;
1304}
1305
1306static inline int nested_svm_intr(struct vcpu_svm *svm)
1307{
1308 if (is_nested(svm)) {
1309 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1310 return 0;
1311
1312 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
1313 return 0;
1314
1315 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1316
1317 if (nested_svm_exit_handled(svm, false)) {
1318 nsvm_printk("VMexit -> INTR\n");
1319 nested_svm_vmexit(svm);
1320 return 1;
1321 }
1322 }
1323
1324 return 0;
1325}
1326
1327static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa)
1328{
1329 struct page *page;
1330
1331 down_read(&current->mm->mmap_sem);
1332 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
1333 up_read(&current->mm->mmap_sem);
1334
1335 if (is_error_page(page)) {
1336 printk(KERN_INFO "%s: could not find page at 0x%llx\n",
1337 __func__, gpa);
1338 kvm_release_page_clean(page);
1339 kvm_inject_gp(&svm->vcpu, 0);
1340 return NULL;
1341 }
1342 return page;
1343}
1344
1345static int nested_svm_do(struct vcpu_svm *svm,
1346 u64 arg1_gpa, u64 arg2_gpa, void *opaque,
1347 int (*handler)(struct vcpu_svm *svm,
1348 void *arg1,
1349 void *arg2,
1350 void *opaque))
1351{
1352 struct page *arg1_page;
1353 struct page *arg2_page = NULL;
1354 void *arg1;
1355 void *arg2 = NULL;
1356 int retval;
1357
1358 arg1_page = nested_svm_get_page(svm, arg1_gpa);
1359 if(arg1_page == NULL)
1360 return 1;
1361
1362 if (arg2_gpa) {
1363 arg2_page = nested_svm_get_page(svm, arg2_gpa);
1364 if(arg2_page == NULL) {
1365 kvm_release_page_clean(arg1_page);
1366 return 1;
1367 }
1368 }
1369
1370 arg1 = kmap_atomic(arg1_page, KM_USER0);
1371 if (arg2_gpa)
1372 arg2 = kmap_atomic(arg2_page, KM_USER1);
1373
1374 retval = handler(svm, arg1, arg2, opaque);
1375
1376 kunmap_atomic(arg1, KM_USER0);
1377 if (arg2_gpa)
1378 kunmap_atomic(arg2, KM_USER1);
1379
1380 kvm_release_page_dirty(arg1_page);
1381 if (arg2_gpa)
1382 kvm_release_page_dirty(arg2_page);
1383
1384 return retval;
1385}
1386
1387static int nested_svm_exit_handled_real(struct vcpu_svm *svm,
1388 void *arg1,
1389 void *arg2,
1390 void *opaque)
1391{
1392 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1393 bool kvm_overrides = *(bool *)opaque;
1394 u32 exit_code = svm->vmcb->control.exit_code;
1395
1396 if (kvm_overrides) {
1397 switch (exit_code) {
1398 case SVM_EXIT_INTR:
1399 case SVM_EXIT_NMI:
1400 return 0;
1401 /* For now we are always handling NPFs when using them */
1402 case SVM_EXIT_NPF:
1403 if (npt_enabled)
1404 return 0;
1405 break;
1406 /* When we're shadowing, trap PFs */
1407 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
1408 if (!npt_enabled)
1409 return 0;
1410 break;
1411 default:
1412 break;
1413 }
1414 }
1415
1416 switch (exit_code) {
1417 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
1418 u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
1419 if (nested_vmcb->control.intercept_cr_read & cr_bits)
1420 return 1;
1421 break;
1422 }
1423 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
1424 u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
1425 if (nested_vmcb->control.intercept_cr_write & cr_bits)
1426 return 1;
1427 break;
1428 }
1429 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
1430 u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
1431 if (nested_vmcb->control.intercept_dr_read & dr_bits)
1432 return 1;
1433 break;
1434 }
1435 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
1436 u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
1437 if (nested_vmcb->control.intercept_dr_write & dr_bits)
1438 return 1;
1439 break;
1440 }
1441 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1442 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1443 if (nested_vmcb->control.intercept_exceptions & excp_bits)
1444 return 1;
1445 break;
1446 }
1447 default: {
1448 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
1449 nsvm_printk("exit code: 0x%x\n", exit_code);
1450 if (nested_vmcb->control.intercept & exit_bits)
1451 return 1;
1452 }
1453 }
1454
1455 return 0;
1456}
1457
1458static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
1459 void *arg1, void *arg2,
1460 void *opaque)
1461{
1462 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1463 u8 *msrpm = (u8 *)arg2;
1464 u32 t0, t1;
1465 u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1466 u32 param = svm->vmcb->control.exit_info_1 & 1;
1467
1468 if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1469 return 0;
1470
1471 switch(msr) {
1472 case 0 ... 0x1fff:
1473 t0 = (msr * 2) % 8;
1474 t1 = msr / 8;
1475 break;
1476 case 0xc0000000 ... 0xc0001fff:
1477 t0 = (8192 + msr - 0xc0000000) * 2;
1478 t1 = (t0 / 8);
1479 t0 %= 8;
1480 break;
1481 case 0xc0010000 ... 0xc0011fff:
1482 t0 = (16384 + msr - 0xc0010000) * 2;
1483 t1 = (t0 / 8);
1484 t0 %= 8;
1485 break;
1486 default:
1487 return 1;
1488 break;
1489 }
1490 if (msrpm[t1] & ((1 << param) << t0))
1491 return 1;
1492
1493 return 0;
1494}
1495
1496static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
1497{
1498 bool k = kvm_override;
1499
1500 switch (svm->vmcb->control.exit_code) {
1501 case SVM_EXIT_MSR:
1502 return nested_svm_do(svm, svm->nested_vmcb,
1503 svm->nested_vmcb_msrpm, NULL,
1504 nested_svm_exit_handled_msr);
1505 default: break;
1506 }
1507
1508 return nested_svm_do(svm, svm->nested_vmcb, 0, &k,
1509 nested_svm_exit_handled_real);
1510}
1511
1512static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
1513 void *arg2, void *opaque)
1514{
1515 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1516 struct vmcb *hsave = svm->hsave;
1517 u64 nested_save[] = { nested_vmcb->save.cr0,
1518 nested_vmcb->save.cr3,
1519 nested_vmcb->save.cr4,
1520 nested_vmcb->save.efer,
1521 nested_vmcb->control.intercept_cr_read,
1522 nested_vmcb->control.intercept_cr_write,
1523 nested_vmcb->control.intercept_dr_read,
1524 nested_vmcb->control.intercept_dr_write,
1525 nested_vmcb->control.intercept_exceptions,
1526 nested_vmcb->control.intercept,
1527 nested_vmcb->control.msrpm_base_pa,
1528 nested_vmcb->control.iopm_base_pa,
1529 nested_vmcb->control.tsc_offset };
1530
1531 /* Give the current vmcb to the guest */
1532 memcpy(nested_vmcb, svm->vmcb, sizeof(struct vmcb));
1533 nested_vmcb->save.cr0 = nested_save[0];
1534 if (!npt_enabled)
1535 nested_vmcb->save.cr3 = nested_save[1];
1536 nested_vmcb->save.cr4 = nested_save[2];
1537 nested_vmcb->save.efer = nested_save[3];
1538 nested_vmcb->control.intercept_cr_read = nested_save[4];
1539 nested_vmcb->control.intercept_cr_write = nested_save[5];
1540 nested_vmcb->control.intercept_dr_read = nested_save[6];
1541 nested_vmcb->control.intercept_dr_write = nested_save[7];
1542 nested_vmcb->control.intercept_exceptions = nested_save[8];
1543 nested_vmcb->control.intercept = nested_save[9];
1544 nested_vmcb->control.msrpm_base_pa = nested_save[10];
1545 nested_vmcb->control.iopm_base_pa = nested_save[11];
1546 nested_vmcb->control.tsc_offset = nested_save[12];
1547
1548 /* We always set V_INTR_MASKING and remember the old value in hflags */
1549 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1550 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1551
1552 if ((nested_vmcb->control.int_ctl & V_IRQ_MASK) &&
1553 (nested_vmcb->control.int_vector)) {
1554 nsvm_printk("WARNING: IRQ 0x%x still enabled on #VMEXIT\n",
1555 nested_vmcb->control.int_vector);
1556 }
1557
1558 /* Restore the original control entries */
1559 svm->vmcb->control = hsave->control;
1560
1561 /* Kill any pending exceptions */
1562 if (svm->vcpu.arch.exception.pending == true)
1563 nsvm_printk("WARNING: Pending Exception\n");
1564 svm->vcpu.arch.exception.pending = false;
1565
1566 /* Restore selected save entries */
1567 svm->vmcb->save.es = hsave->save.es;
1568 svm->vmcb->save.cs = hsave->save.cs;
1569 svm->vmcb->save.ss = hsave->save.ss;
1570 svm->vmcb->save.ds = hsave->save.ds;
1571 svm->vmcb->save.gdtr = hsave->save.gdtr;
1572 svm->vmcb->save.idtr = hsave->save.idtr;
1573 svm->vmcb->save.rflags = hsave->save.rflags;
1574 svm_set_efer(&svm->vcpu, hsave->save.efer);
1575 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
1576 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
1577 if (npt_enabled) {
1578 svm->vmcb->save.cr3 = hsave->save.cr3;
1579 svm->vcpu.arch.cr3 = hsave->save.cr3;
1580 } else {
1581 kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
1582 }
1583 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
1584 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
1585 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
1586 svm->vmcb->save.dr7 = 0;
1587 svm->vmcb->save.cpl = 0;
1588 svm->vmcb->control.exit_int_info = 0;
1589
1590 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
1591 /* Exit nested SVM mode */
1592 svm->nested_vmcb = 0;
1593
1594 return 0;
1595}
1596
1597static int nested_svm_vmexit(struct vcpu_svm *svm)
1598{
1599 nsvm_printk("VMexit\n");
1600 if (nested_svm_do(svm, svm->nested_vmcb, 0,
1601 NULL, nested_svm_vmexit_real))
1602 return 1;
1603
1604 kvm_mmu_reset_context(&svm->vcpu);
1605 kvm_mmu_load(&svm->vcpu);
1606
1607 return 0;
1608}
1609
1610static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1,
1611 void *arg2, void *opaque)
1612{
1613 int i;
1614 u32 *nested_msrpm = (u32*)arg1;
1615 for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
1616 svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
1617 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm);
1618
1619 return 0;
1620}
1621
1622static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
1623 void *arg2, void *opaque)
1624{
1625 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1626 struct vmcb *hsave = svm->hsave;
1627
1628 /* nested_vmcb is our indicator if nested SVM is activated */
1629 svm->nested_vmcb = svm->vmcb->save.rax;
1630
1631 /* Clear internal status */
1632 svm->vcpu.arch.exception.pending = false;
1633
1634 /* Save the old vmcb, so we don't need to pick what we save, but
1635 can restore everything when a VMEXIT occurs */
1636 memcpy(hsave, svm->vmcb, sizeof(struct vmcb));
1637 /* We need to remember the original CR3 in the SPT case */
1638 if (!npt_enabled)
1639 hsave->save.cr3 = svm->vcpu.arch.cr3;
1640 hsave->save.cr4 = svm->vcpu.arch.cr4;
1641 hsave->save.rip = svm->next_rip;
1642
1643 if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
1644 svm->vcpu.arch.hflags |= HF_HIF_MASK;
1645 else
1646 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
1647
1648 /* Load the nested guest state */
1649 svm->vmcb->save.es = nested_vmcb->save.es;
1650 svm->vmcb->save.cs = nested_vmcb->save.cs;
1651 svm->vmcb->save.ss = nested_vmcb->save.ss;
1652 svm->vmcb->save.ds = nested_vmcb->save.ds;
1653 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
1654 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
1655 svm->vmcb->save.rflags = nested_vmcb->save.rflags;
1656 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
1657 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
1658 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
1659 if (npt_enabled) {
1660 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
1661 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
1662 } else {
1663 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
1664 kvm_mmu_reset_context(&svm->vcpu);
1665 }
1666 svm->vmcb->save.cr2 = nested_vmcb->save.cr2;
1667 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
1668 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
1669 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
1670 /* In case we don't even reach vcpu_run, the fields are not updated */
1671 svm->vmcb->save.rax = nested_vmcb->save.rax;
1672 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
1673 svm->vmcb->save.rip = nested_vmcb->save.rip;
1674 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
1675 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
1676 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
1677
1678 /* We don't want a nested guest to be more powerful than the guest,
1679 so all intercepts are ORed */
1680 svm->vmcb->control.intercept_cr_read |=
1681 nested_vmcb->control.intercept_cr_read;
1682 svm->vmcb->control.intercept_cr_write |=
1683 nested_vmcb->control.intercept_cr_write;
1684 svm->vmcb->control.intercept_dr_read |=
1685 nested_vmcb->control.intercept_dr_read;
1686 svm->vmcb->control.intercept_dr_write |=
1687 nested_vmcb->control.intercept_dr_write;
1688 svm->vmcb->control.intercept_exceptions |=
1689 nested_vmcb->control.intercept_exceptions;
1690
1691 svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1692
1693 svm->nested_vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
1694
1695 force_new_asid(&svm->vcpu);
1696 svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info;
1697 svm->vmcb->control.exit_int_info_err = nested_vmcb->control.exit_int_info_err;
1698 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
1699 if (nested_vmcb->control.int_ctl & V_IRQ_MASK) {
1700 nsvm_printk("nSVM Injecting Interrupt: 0x%x\n",
1701 nested_vmcb->control.int_ctl);
1702 }
1703 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
1704 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
1705 else
1706 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
1707
1708 nsvm_printk("nSVM exit_int_info: 0x%x | int_state: 0x%x\n",
1709 nested_vmcb->control.exit_int_info,
1710 nested_vmcb->control.int_state);
1711
1712 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
1713 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
1714 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
1715 if (nested_vmcb->control.event_inj & SVM_EVTINJ_VALID)
1716 nsvm_printk("Injecting Event: 0x%x\n",
1717 nested_vmcb->control.event_inj);
1718 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
1719 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
1720
1721 svm->vcpu.arch.hflags |= HF_GIF_MASK;
1722
1723 return 0;
1724}
1725
1726static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1727{
1728 to_vmcb->save.fs = from_vmcb->save.fs;
1729 to_vmcb->save.gs = from_vmcb->save.gs;
1730 to_vmcb->save.tr = from_vmcb->save.tr;
1731 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
1732 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
1733 to_vmcb->save.star = from_vmcb->save.star;
1734 to_vmcb->save.lstar = from_vmcb->save.lstar;
1735 to_vmcb->save.cstar = from_vmcb->save.cstar;
1736 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
1737 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
1738 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
1739 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
1740
1741 return 1;
1742}
1743
1744static int nested_svm_vmload(struct vcpu_svm *svm, void *nested_vmcb,
1745 void *arg2, void *opaque)
1746{
1747 return nested_svm_vmloadsave((struct vmcb *)nested_vmcb, svm->vmcb);
1748}
1749
1750static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
1751 void *arg2, void *opaque)
1752{
1753 return nested_svm_vmloadsave(svm->vmcb, (struct vmcb *)nested_vmcb);
1754}
1755
1756static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1757{
1758 if (nested_svm_check_permissions(svm))
1759 return 1;
1760
1761 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1762 skip_emulated_instruction(&svm->vcpu);
1763
1764 nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmload);
1765
1766 return 1;
1767}
1768
1769static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1770{
1771 if (nested_svm_check_permissions(svm))
1772 return 1;
1773
1774 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1775 skip_emulated_instruction(&svm->vcpu);
1776
1777 nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmsave);
1778
1779 return 1;
1780}
1781
1782static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1783{
1784 nsvm_printk("VMrun\n");
1785 if (nested_svm_check_permissions(svm))
1786 return 1;
1787
1788 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1789 skip_emulated_instruction(&svm->vcpu);
1790
1791 if (nested_svm_do(svm, svm->vmcb->save.rax, 0,
1792 NULL, nested_svm_vmrun))
1793 return 1;
1794
1795 if (nested_svm_do(svm, svm->nested_vmcb_msrpm, 0,
1796 NULL, nested_svm_vmrun_msrpm))
1797 return 1;
1798
1799 return 1;
1800}
1801
1802static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1803{
1804 if (nested_svm_check_permissions(svm))
1805 return 1;
1806
1807 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1808 skip_emulated_instruction(&svm->vcpu);
1809
1810 svm->vcpu.arch.hflags |= HF_GIF_MASK;
1811
1812 return 1;
1813}
1814
1815static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1816{
1817 if (nested_svm_check_permissions(svm))
1818 return 1;
1819
1820 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1821 skip_emulated_instruction(&svm->vcpu);
1822
1823 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
1824
1825 /* After a CLGI no interrupts should come */
1826 svm_clear_vintr(svm);
1827 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1828
1829 return 1;
1830}
1831
1142static int invalid_op_interception(struct vcpu_svm *svm, 1832static int invalid_op_interception(struct vcpu_svm *svm,
1143 struct kvm_run *kvm_run) 1833 struct kvm_run *kvm_run)
1144{ 1834{
@@ -1250,6 +1940,15 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1250 case MSR_IA32_LASTINTTOIP: 1940 case MSR_IA32_LASTINTTOIP:
1251 *data = svm->vmcb->save.last_excp_to; 1941 *data = svm->vmcb->save.last_excp_to;
1252 break; 1942 break;
1943 case MSR_VM_HSAVE_PA:
1944 *data = svm->hsave_msr;
1945 break;
1946 case MSR_VM_CR:
1947 *data = 0;
1948 break;
1949 case MSR_IA32_UCODE_REV:
1950 *data = 0x01000065;
1951 break;
1253 default: 1952 default:
1254 return kvm_get_msr_common(vcpu, ecx, data); 1953 return kvm_get_msr_common(vcpu, ecx, data);
1255 } 1954 }
@@ -1344,6 +2043,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1344 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data); 2043 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
1345 2044
1346 break; 2045 break;
2046 case MSR_VM_HSAVE_PA:
2047 svm->hsave_msr = data;
2048 break;
1347 default: 2049 default:
1348 return kvm_set_msr_common(vcpu, ecx, data); 2050 return kvm_set_msr_common(vcpu, ecx, data);
1349 } 2051 }
@@ -1380,7 +2082,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm,
1380{ 2082{
1381 KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler); 2083 KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
1382 2084
1383 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); 2085 svm_clear_vintr(svm);
1384 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; 2086 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1385 /* 2087 /*
1386 * If the user space waits to inject interrupts, exit as soon as 2088 * If the user space waits to inject interrupts, exit as soon as
@@ -1417,6 +2119,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
1417 [SVM_EXIT_WRITE_DR3] = emulate_on_interception, 2119 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
1418 [SVM_EXIT_WRITE_DR5] = emulate_on_interception, 2120 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
1419 [SVM_EXIT_WRITE_DR7] = emulate_on_interception, 2121 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
2122 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
2123 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
1420 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, 2124 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
1421 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, 2125 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
1422 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, 2126 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
@@ -1436,12 +2140,12 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
1436 [SVM_EXIT_MSR] = msr_interception, 2140 [SVM_EXIT_MSR] = msr_interception,
1437 [SVM_EXIT_TASK_SWITCH] = task_switch_interception, 2141 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
1438 [SVM_EXIT_SHUTDOWN] = shutdown_interception, 2142 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
1439 [SVM_EXIT_VMRUN] = invalid_op_interception, 2143 [SVM_EXIT_VMRUN] = vmrun_interception,
1440 [SVM_EXIT_VMMCALL] = vmmcall_interception, 2144 [SVM_EXIT_VMMCALL] = vmmcall_interception,
1441 [SVM_EXIT_VMLOAD] = invalid_op_interception, 2145 [SVM_EXIT_VMLOAD] = vmload_interception,
1442 [SVM_EXIT_VMSAVE] = invalid_op_interception, 2146 [SVM_EXIT_VMSAVE] = vmsave_interception,
1443 [SVM_EXIT_STGI] = invalid_op_interception, 2147 [SVM_EXIT_STGI] = stgi_interception,
1444 [SVM_EXIT_CLGI] = invalid_op_interception, 2148 [SVM_EXIT_CLGI] = clgi_interception,
1445 [SVM_EXIT_SKINIT] = invalid_op_interception, 2149 [SVM_EXIT_SKINIT] = invalid_op_interception,
1446 [SVM_EXIT_WBINVD] = emulate_on_interception, 2150 [SVM_EXIT_WBINVD] = emulate_on_interception,
1447 [SVM_EXIT_MONITOR] = invalid_op_interception, 2151 [SVM_EXIT_MONITOR] = invalid_op_interception,
@@ -1457,6 +2161,17 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1457 KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip, 2161 KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
1458 (u32)((u64)svm->vmcb->save.rip >> 32), entryexit); 2162 (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
1459 2163
2164 if (is_nested(svm)) {
2165 nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n",
2166 exit_code, svm->vmcb->control.exit_info_1,
2167 svm->vmcb->control.exit_info_2, svm->vmcb->save.rip);
2168 if (nested_svm_exit_handled(svm, true)) {
2169 nested_svm_vmexit(svm);
2170 nsvm_printk("-> #VMEXIT\n");
2171 return 1;
2172 }
2173 }
2174
1460 if (npt_enabled) { 2175 if (npt_enabled) {
1461 int mmu_reload = 0; 2176 int mmu_reload = 0;
1462 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { 2177 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
@@ -1544,6 +2259,8 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
1544{ 2259{
1545 struct vcpu_svm *svm = to_svm(vcpu); 2260 struct vcpu_svm *svm = to_svm(vcpu);
1546 2261
2262 nested_svm_intr(svm);
2263
1547 svm_inject_irq(svm, irq); 2264 svm_inject_irq(svm, irq);
1548} 2265}
1549 2266
@@ -1589,11 +2306,17 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
1589 if (!kvm_cpu_has_interrupt(vcpu)) 2306 if (!kvm_cpu_has_interrupt(vcpu))
1590 goto out; 2307 goto out;
1591 2308
2309 if (nested_svm_intr(svm))
2310 goto out;
2311
2312 if (!(svm->vcpu.arch.hflags & HF_GIF_MASK))
2313 goto out;
2314
1592 if (!(vmcb->save.rflags & X86_EFLAGS_IF) || 2315 if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
1593 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || 2316 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
1594 (vmcb->control.event_inj & SVM_EVTINJ_VALID)) { 2317 (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
1595 /* unable to deliver irq, set pending irq */ 2318 /* unable to deliver irq, set pending irq */
1596 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 2319 svm_set_vintr(svm);
1597 svm_inject_irq(svm, 0x0); 2320 svm_inject_irq(svm, 0x0);
1598 goto out; 2321 goto out;
1599 } 2322 }
@@ -1615,7 +2338,8 @@ static void kvm_reput_irq(struct vcpu_svm *svm)
1615 } 2338 }
1616 2339
1617 svm->vcpu.arch.interrupt_window_open = 2340 svm->vcpu.arch.interrupt_window_open =
1618 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); 2341 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
2342 (svm->vcpu.arch.hflags & HF_GIF_MASK);
1619} 2343}
1620 2344
1621static void svm_do_inject_vector(struct vcpu_svm *svm) 2345static void svm_do_inject_vector(struct vcpu_svm *svm)
@@ -1637,9 +2361,13 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1637 struct vcpu_svm *svm = to_svm(vcpu); 2361 struct vcpu_svm *svm = to_svm(vcpu);
1638 struct vmcb_control_area *control = &svm->vmcb->control; 2362 struct vmcb_control_area *control = &svm->vmcb->control;
1639 2363
2364 if (nested_svm_intr(svm))
2365 return;
2366
1640 svm->vcpu.arch.interrupt_window_open = 2367 svm->vcpu.arch.interrupt_window_open =
1641 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && 2368 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
1642 (svm->vmcb->save.rflags & X86_EFLAGS_IF)); 2369 (svm->vmcb->save.rflags & X86_EFLAGS_IF) &&
2370 (svm->vcpu.arch.hflags & HF_GIF_MASK));
1643 2371
1644 if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary) 2372 if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
1645 /* 2373 /*
@@ -1652,9 +2380,9 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1652 */ 2380 */
1653 if (!svm->vcpu.arch.interrupt_window_open && 2381 if (!svm->vcpu.arch.interrupt_window_open &&
1654 (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window)) 2382 (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
1655 control->intercept |= 1ULL << INTERCEPT_VINTR; 2383 svm_set_vintr(svm);
1656 else 2384 else
1657 control->intercept &= ~(1ULL << INTERCEPT_VINTR); 2385 svm_clear_vintr(svm);
1658} 2386}
1659 2387
1660static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) 2388static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
@@ -1662,22 +2390,6 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
1662 return 0; 2390 return 0;
1663} 2391}
1664 2392
1665static void save_db_regs(unsigned long *db_regs)
1666{
1667 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1668 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1669 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1670 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
1671}
1672
1673static void load_db_regs(unsigned long *db_regs)
1674{
1675 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1676 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1677 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1678 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
1679}
1680
1681static void svm_flush_tlb(struct kvm_vcpu *vcpu) 2393static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1682{ 2394{
1683 force_new_asid(vcpu); 2395 force_new_asid(vcpu);
@@ -1736,19 +2448,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1736 gs_selector = kvm_read_gs(); 2448 gs_selector = kvm_read_gs();
1737 ldt_selector = kvm_read_ldt(); 2449 ldt_selector = kvm_read_ldt();
1738 svm->host_cr2 = kvm_read_cr2(); 2450 svm->host_cr2 = kvm_read_cr2();
1739 svm->host_dr6 = read_dr6(); 2451 if (!is_nested(svm))
1740 svm->host_dr7 = read_dr7(); 2452 svm->vmcb->save.cr2 = vcpu->arch.cr2;
1741 svm->vmcb->save.cr2 = vcpu->arch.cr2;
1742 /* required for live migration with NPT */ 2453 /* required for live migration with NPT */
1743 if (npt_enabled) 2454 if (npt_enabled)
1744 svm->vmcb->save.cr3 = vcpu->arch.cr3; 2455 svm->vmcb->save.cr3 = vcpu->arch.cr3;
1745 2456
1746 if (svm->vmcb->save.dr7 & 0xff) {
1747 write_dr7(0);
1748 save_db_regs(svm->host_db_regs);
1749 load_db_regs(svm->db_regs);
1750 }
1751
1752 clgi(); 2457 clgi();
1753 2458
1754 local_irq_enable(); 2459 local_irq_enable();
@@ -1824,16 +2529,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1824#endif 2529#endif
1825 ); 2530 );
1826 2531
1827 if ((svm->vmcb->save.dr7 & 0xff))
1828 load_db_regs(svm->host_db_regs);
1829
1830 vcpu->arch.cr2 = svm->vmcb->save.cr2; 2532 vcpu->arch.cr2 = svm->vmcb->save.cr2;
1831 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; 2533 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
1832 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 2534 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
1833 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 2535 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
1834 2536
1835 write_dr6(svm->host_dr6);
1836 write_dr7(svm->host_dr7);
1837 kvm_write_cr2(svm->host_cr2); 2537 kvm_write_cr2(svm->host_cr2);
1838 2538
1839 kvm_load_fs(fs_selector); 2539 kvm_load_fs(fs_selector);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 7611af576829..bb481330716f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -91,6 +91,7 @@ struct vcpu_vmx {
91 } rmode; 91 } rmode;
92 int vpid; 92 int vpid;
93 bool emulation_required; 93 bool emulation_required;
94 enum emulation_result invalid_state_emulation_result;
94 95
95 /* Support for vnmi-less CPUs */ 96 /* Support for vnmi-less CPUs */
96 int soft_vnmi_blocked; 97 int soft_vnmi_blocked;
@@ -189,21 +190,21 @@ static inline int is_page_fault(u32 intr_info)
189{ 190{
190 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | 191 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
191 INTR_INFO_VALID_MASK)) == 192 INTR_INFO_VALID_MASK)) ==
192 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); 193 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
193} 194}
194 195
195static inline int is_no_device(u32 intr_info) 196static inline int is_no_device(u32 intr_info)
196{ 197{
197 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | 198 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
198 INTR_INFO_VALID_MASK)) == 199 INTR_INFO_VALID_MASK)) ==
199 (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); 200 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
200} 201}
201 202
202static inline int is_invalid_opcode(u32 intr_info) 203static inline int is_invalid_opcode(u32 intr_info)
203{ 204{
204 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | 205 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
205 INTR_INFO_VALID_MASK)) == 206 INTR_INFO_VALID_MASK)) ==
206 (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); 207 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
207} 208}
208 209
209static inline int is_external_interrupt(u32 intr_info) 210static inline int is_external_interrupt(u32 intr_info)
@@ -480,8 +481,13 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
480 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR); 481 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
481 if (!vcpu->fpu_active) 482 if (!vcpu->fpu_active)
482 eb |= 1u << NM_VECTOR; 483 eb |= 1u << NM_VECTOR;
483 if (vcpu->guest_debug.enabled) 484 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
484 eb |= 1u << DB_VECTOR; 485 if (vcpu->guest_debug &
486 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
487 eb |= 1u << DB_VECTOR;
488 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
489 eb |= 1u << BP_VECTOR;
490 }
485 if (vcpu->arch.rmode.active) 491 if (vcpu->arch.rmode.active)
486 eb = ~0; 492 eb = ~0;
487 if (vm_need_ept()) 493 if (vm_need_ept())
@@ -747,29 +753,33 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
747 bool has_error_code, u32 error_code) 753 bool has_error_code, u32 error_code)
748{ 754{
749 struct vcpu_vmx *vmx = to_vmx(vcpu); 755 struct vcpu_vmx *vmx = to_vmx(vcpu);
756 u32 intr_info = nr | INTR_INFO_VALID_MASK;
750 757
751 if (has_error_code) 758 if (has_error_code) {
752 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 759 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
760 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
761 }
753 762
754 if (vcpu->arch.rmode.active) { 763 if (vcpu->arch.rmode.active) {
755 vmx->rmode.irq.pending = true; 764 vmx->rmode.irq.pending = true;
756 vmx->rmode.irq.vector = nr; 765 vmx->rmode.irq.vector = nr;
757 vmx->rmode.irq.rip = kvm_rip_read(vcpu); 766 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
758 if (nr == BP_VECTOR) 767 if (nr == BP_VECTOR || nr == OF_VECTOR)
759 vmx->rmode.irq.rip++; 768 vmx->rmode.irq.rip++;
760 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 769 intr_info |= INTR_TYPE_SOFT_INTR;
761 nr | INTR_TYPE_SOFT_INTR 770 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
762 | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0)
763 | INTR_INFO_VALID_MASK);
764 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); 771 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
765 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); 772 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
766 return; 773 return;
767 } 774 }
768 775
769 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 776 if (nr == BP_VECTOR || nr == OF_VECTOR) {
770 nr | INTR_TYPE_EXCEPTION 777 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
771 | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) 778 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
772 | INTR_INFO_VALID_MASK); 779 } else
780 intr_info |= INTR_TYPE_HARD_EXCEPTION;
781
782 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
773} 783}
774 784
775static bool vmx_exception_injected(struct kvm_vcpu *vcpu) 785static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
@@ -856,11 +866,8 @@ static u64 guest_read_tsc(void)
856 * writes 'guest_tsc' into guest's timestamp counter "register" 866 * writes 'guest_tsc' into guest's timestamp counter "register"
857 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc 867 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
858 */ 868 */
859static void guest_write_tsc(u64 guest_tsc) 869static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
860{ 870{
861 u64 host_tsc;
862
863 rdtscll(host_tsc);
864 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); 871 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
865} 872}
866 873
@@ -925,14 +932,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
925{ 932{
926 struct vcpu_vmx *vmx = to_vmx(vcpu); 933 struct vcpu_vmx *vmx = to_vmx(vcpu);
927 struct kvm_msr_entry *msr; 934 struct kvm_msr_entry *msr;
935 u64 host_tsc;
928 int ret = 0; 936 int ret = 0;
929 937
930 switch (msr_index) { 938 switch (msr_index) {
931#ifdef CONFIG_X86_64
932 case MSR_EFER: 939 case MSR_EFER:
933 vmx_load_host_state(vmx); 940 vmx_load_host_state(vmx);
934 ret = kvm_set_msr_common(vcpu, msr_index, data); 941 ret = kvm_set_msr_common(vcpu, msr_index, data);
935 break; 942 break;
943#ifdef CONFIG_X86_64
936 case MSR_FS_BASE: 944 case MSR_FS_BASE:
937 vmcs_writel(GUEST_FS_BASE, data); 945 vmcs_writel(GUEST_FS_BASE, data);
938 break; 946 break;
@@ -950,7 +958,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
950 vmcs_writel(GUEST_SYSENTER_ESP, data); 958 vmcs_writel(GUEST_SYSENTER_ESP, data);
951 break; 959 break;
952 case MSR_IA32_TIME_STAMP_COUNTER: 960 case MSR_IA32_TIME_STAMP_COUNTER:
953 guest_write_tsc(data); 961 rdtscll(host_tsc);
962 guest_write_tsc(data, host_tsc);
954 break; 963 break;
955 case MSR_P6_PERFCTR0: 964 case MSR_P6_PERFCTR0:
956 case MSR_P6_PERFCTR1: 965 case MSR_P6_PERFCTR1:
@@ -999,40 +1008,28 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
999 } 1008 }
1000} 1009}
1001 1010
1002static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) 1011static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
1003{ 1012{
1004 unsigned long dr7 = 0x400; 1013 int old_debug = vcpu->guest_debug;
1005 int old_singlestep; 1014 unsigned long flags;
1006
1007 old_singlestep = vcpu->guest_debug.singlestep;
1008
1009 vcpu->guest_debug.enabled = dbg->enabled;
1010 if (vcpu->guest_debug.enabled) {
1011 int i;
1012 1015
1013 dr7 |= 0x200; /* exact */ 1016 vcpu->guest_debug = dbg->control;
1014 for (i = 0; i < 4; ++i) { 1017 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
1015 if (!dbg->breakpoints[i].enabled) 1018 vcpu->guest_debug = 0;
1016 continue;
1017 vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
1018 dr7 |= 2 << (i*2); /* global enable */
1019 dr7 |= 0 << (i*4+16); /* execution breakpoint */
1020 }
1021 1019
1022 vcpu->guest_debug.singlestep = dbg->singlestep; 1020 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1023 } else 1021 vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
1024 vcpu->guest_debug.singlestep = 0; 1022 else
1025 1023 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
1026 if (old_singlestep && !vcpu->guest_debug.singlestep) {
1027 unsigned long flags;
1028 1024
1029 flags = vmcs_readl(GUEST_RFLAGS); 1025 flags = vmcs_readl(GUEST_RFLAGS);
1026 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1027 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1028 else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
1030 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); 1029 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1031 vmcs_writel(GUEST_RFLAGS, flags); 1030 vmcs_writel(GUEST_RFLAGS, flags);
1032 }
1033 1031
1034 update_exception_bitmap(vcpu); 1032 update_exception_bitmap(vcpu);
1035 vmcs_writel(GUEST_DR7, dr7);
1036 1033
1037 return 0; 1034 return 0;
1038} 1035}
@@ -1433,6 +1430,29 @@ continue_rmode:
1433 init_rmode(vcpu->kvm); 1430 init_rmode(vcpu->kvm);
1434} 1431}
1435 1432
1433static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1434{
1435 struct vcpu_vmx *vmx = to_vmx(vcpu);
1436 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1437
1438 vcpu->arch.shadow_efer = efer;
1439 if (!msr)
1440 return;
1441 if (efer & EFER_LMA) {
1442 vmcs_write32(VM_ENTRY_CONTROLS,
1443 vmcs_read32(VM_ENTRY_CONTROLS) |
1444 VM_ENTRY_IA32E_MODE);
1445 msr->data = efer;
1446 } else {
1447 vmcs_write32(VM_ENTRY_CONTROLS,
1448 vmcs_read32(VM_ENTRY_CONTROLS) &
1449 ~VM_ENTRY_IA32E_MODE);
1450
1451 msr->data = efer & ~EFER_LME;
1452 }
1453 setup_msrs(vmx);
1454}
1455
1436#ifdef CONFIG_X86_64 1456#ifdef CONFIG_X86_64
1437 1457
1438static void enter_lmode(struct kvm_vcpu *vcpu) 1458static void enter_lmode(struct kvm_vcpu *vcpu)
@@ -1447,13 +1467,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
1447 (guest_tr_ar & ~AR_TYPE_MASK) 1467 (guest_tr_ar & ~AR_TYPE_MASK)
1448 | AR_TYPE_BUSY_64_TSS); 1468 | AR_TYPE_BUSY_64_TSS);
1449 } 1469 }
1450
1451 vcpu->arch.shadow_efer |= EFER_LMA; 1470 vcpu->arch.shadow_efer |= EFER_LMA;
1452 1471 vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
1453 find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
1454 vmcs_write32(VM_ENTRY_CONTROLS,
1455 vmcs_read32(VM_ENTRY_CONTROLS)
1456 | VM_ENTRY_IA32E_MODE);
1457} 1472}
1458 1473
1459static void exit_lmode(struct kvm_vcpu *vcpu) 1474static void exit_lmode(struct kvm_vcpu *vcpu)
@@ -1612,30 +1627,6 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1612 vmcs_writel(GUEST_CR4, hw_cr4); 1627 vmcs_writel(GUEST_CR4, hw_cr4);
1613} 1628}
1614 1629
1615static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1616{
1617 struct vcpu_vmx *vmx = to_vmx(vcpu);
1618 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1619
1620 vcpu->arch.shadow_efer = efer;
1621 if (!msr)
1622 return;
1623 if (efer & EFER_LMA) {
1624 vmcs_write32(VM_ENTRY_CONTROLS,
1625 vmcs_read32(VM_ENTRY_CONTROLS) |
1626 VM_ENTRY_IA32E_MODE);
1627 msr->data = efer;
1628
1629 } else {
1630 vmcs_write32(VM_ENTRY_CONTROLS,
1631 vmcs_read32(VM_ENTRY_CONTROLS) &
1632 ~VM_ENTRY_IA32E_MODE);
1633
1634 msr->data = efer & ~EFER_LME;
1635 }
1636 setup_msrs(vmx);
1637}
1638
1639static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) 1630static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1640{ 1631{
1641 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 1632 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -1653,7 +1644,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
1653 var->limit = vmcs_read32(sf->limit); 1644 var->limit = vmcs_read32(sf->limit);
1654 var->selector = vmcs_read16(sf->selector); 1645 var->selector = vmcs_read16(sf->selector);
1655 ar = vmcs_read32(sf->ar_bytes); 1646 ar = vmcs_read32(sf->ar_bytes);
1656 if (ar & AR_UNUSABLE_MASK) 1647 if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
1657 ar = 0; 1648 ar = 0;
1658 var->type = ar & 15; 1649 var->type = ar & 15;
1659 var->s = (ar >> 4) & 1; 1650 var->s = (ar >> 4) & 1;
@@ -1788,14 +1779,16 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu)
1788 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 1779 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
1789 cs_rpl = cs.selector & SELECTOR_RPL_MASK; 1780 cs_rpl = cs.selector & SELECTOR_RPL_MASK;
1790 1781
1782 if (cs.unusable)
1783 return false;
1791 if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK)) 1784 if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
1792 return false; 1785 return false;
1793 if (!cs.s) 1786 if (!cs.s)
1794 return false; 1787 return false;
1795 if (!(~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK))) { 1788 if (cs.type & AR_TYPE_WRITEABLE_MASK) {
1796 if (cs.dpl > cs_rpl) 1789 if (cs.dpl > cs_rpl)
1797 return false; 1790 return false;
1798 } else if (cs.type & AR_TYPE_CODE_MASK) { 1791 } else {
1799 if (cs.dpl != cs_rpl) 1792 if (cs.dpl != cs_rpl)
1800 return false; 1793 return false;
1801 } 1794 }
@@ -1814,7 +1807,9 @@ static bool stack_segment_valid(struct kvm_vcpu *vcpu)
1814 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 1807 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
1815 ss_rpl = ss.selector & SELECTOR_RPL_MASK; 1808 ss_rpl = ss.selector & SELECTOR_RPL_MASK;
1816 1809
1817 if ((ss.type != 3) || (ss.type != 7)) 1810 if (ss.unusable)
1811 return true;
1812 if (ss.type != 3 && ss.type != 7)
1818 return false; 1813 return false;
1819 if (!ss.s) 1814 if (!ss.s)
1820 return false; 1815 return false;
@@ -1834,6 +1829,8 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
1834 vmx_get_segment(vcpu, &var, seg); 1829 vmx_get_segment(vcpu, &var, seg);
1835 rpl = var.selector & SELECTOR_RPL_MASK; 1830 rpl = var.selector & SELECTOR_RPL_MASK;
1836 1831
1832 if (var.unusable)
1833 return true;
1837 if (!var.s) 1834 if (!var.s)
1838 return false; 1835 return false;
1839 if (!var.present) 1836 if (!var.present)
@@ -1855,9 +1852,11 @@ static bool tr_valid(struct kvm_vcpu *vcpu)
1855 1852
1856 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); 1853 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
1857 1854
1855 if (tr.unusable)
1856 return false;
1858 if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */ 1857 if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
1859 return false; 1858 return false;
1860 if ((tr.type != 3) || (tr.type != 11)) /* TODO: Check if guest is in IA32e mode */ 1859 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
1861 return false; 1860 return false;
1862 if (!tr.present) 1861 if (!tr.present)
1863 return false; 1862 return false;
@@ -1871,6 +1870,8 @@ static bool ldtr_valid(struct kvm_vcpu *vcpu)
1871 1870
1872 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); 1871 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
1873 1872
1873 if (ldtr.unusable)
1874 return true;
1874 if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */ 1875 if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
1875 return false; 1876 return false;
1876 if (ldtr.type != 2) 1877 if (ldtr.type != 2)
@@ -2112,7 +2113,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2112{ 2113{
2113 u32 host_sysenter_cs, msr_low, msr_high; 2114 u32 host_sysenter_cs, msr_low, msr_high;
2114 u32 junk; 2115 u32 junk;
2115 u64 host_pat; 2116 u64 host_pat, tsc_this, tsc_base;
2116 unsigned long a; 2117 unsigned long a;
2117 struct descriptor_table dt; 2118 struct descriptor_table dt;
2118 int i; 2119 int i;
@@ -2240,6 +2241,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2240 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); 2241 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
2241 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); 2242 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
2242 2243
2244 tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
2245 rdtscll(tsc_this);
2246 if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc)
2247 tsc_base = tsc_this;
2248
2249 guest_write_tsc(0, tsc_base);
2243 2250
2244 return 0; 2251 return 0;
2245} 2252}
@@ -2319,7 +2326,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2319 kvm_rip_write(vcpu, 0); 2326 kvm_rip_write(vcpu, 0);
2320 kvm_register_write(vcpu, VCPU_REGS_RSP, 0); 2327 kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
2321 2328
2322 /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
2323 vmcs_writel(GUEST_DR7, 0x400); 2329 vmcs_writel(GUEST_DR7, 0x400);
2324 2330
2325 vmcs_writel(GUEST_GDTR_BASE, 0); 2331 vmcs_writel(GUEST_GDTR_BASE, 0);
@@ -2332,8 +2338,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2332 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); 2338 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
2333 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0); 2339 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
2334 2340
2335 guest_write_tsc(0);
2336
2337 /* Special registers */ 2341 /* Special registers */
2338 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 2342 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
2339 2343
@@ -2486,6 +2490,11 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
2486{ 2490{
2487 vmx_update_window_states(vcpu); 2491 vmx_update_window_states(vcpu);
2488 2492
2493 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
2494 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
2495 GUEST_INTR_STATE_STI |
2496 GUEST_INTR_STATE_MOV_SS);
2497
2489 if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { 2498 if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
2490 if (vcpu->arch.interrupt.pending) { 2499 if (vcpu->arch.interrupt.pending) {
2491 enable_nmi_window(vcpu); 2500 enable_nmi_window(vcpu);
@@ -2536,24 +2545,6 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
2536 return 0; 2545 return 0;
2537} 2546}
2538 2547
2539static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
2540{
2541 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
2542
2543 set_debugreg(dbg->bp[0], 0);
2544 set_debugreg(dbg->bp[1], 1);
2545 set_debugreg(dbg->bp[2], 2);
2546 set_debugreg(dbg->bp[3], 3);
2547
2548 if (dbg->singlestep) {
2549 unsigned long flags;
2550
2551 flags = vmcs_readl(GUEST_RFLAGS);
2552 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
2553 vmcs_writel(GUEST_RFLAGS, flags);
2554 }
2555}
2556
2557static int handle_rmode_exception(struct kvm_vcpu *vcpu, 2548static int handle_rmode_exception(struct kvm_vcpu *vcpu,
2558 int vec, u32 err_code) 2549 int vec, u32 err_code)
2559{ 2550{
@@ -2570,9 +2561,17 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
2570 * the required debugging infrastructure rework. 2561 * the required debugging infrastructure rework.
2571 */ 2562 */
2572 switch (vec) { 2563 switch (vec) {
2573 case DE_VECTOR:
2574 case DB_VECTOR: 2564 case DB_VECTOR:
2565 if (vcpu->guest_debug &
2566 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
2567 return 0;
2568 kvm_queue_exception(vcpu, vec);
2569 return 1;
2575 case BP_VECTOR: 2570 case BP_VECTOR:
2571 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2572 return 0;
2573 /* fall through */
2574 case DE_VECTOR:
2576 case OF_VECTOR: 2575 case OF_VECTOR:
2577 case BR_VECTOR: 2576 case BR_VECTOR:
2578 case UD_VECTOR: 2577 case UD_VECTOR:
@@ -2589,8 +2588,8 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
2589static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2588static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2590{ 2589{
2591 struct vcpu_vmx *vmx = to_vmx(vcpu); 2590 struct vcpu_vmx *vmx = to_vmx(vcpu);
2592 u32 intr_info, error_code; 2591 u32 intr_info, ex_no, error_code;
2593 unsigned long cr2, rip; 2592 unsigned long cr2, rip, dr6;
2594 u32 vect_info; 2593 u32 vect_info;
2595 enum emulation_result er; 2594 enum emulation_result er;
2596 2595
@@ -2649,14 +2648,30 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2649 return 1; 2648 return 1;
2650 } 2649 }
2651 2650
2652 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == 2651 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
2653 (INTR_TYPE_EXCEPTION | 1)) { 2652 switch (ex_no) {
2653 case DB_VECTOR:
2654 dr6 = vmcs_readl(EXIT_QUALIFICATION);
2655 if (!(vcpu->guest_debug &
2656 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
2657 vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
2658 kvm_queue_exception(vcpu, DB_VECTOR);
2659 return 1;
2660 }
2661 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
2662 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
2663 /* fall through */
2664 case BP_VECTOR:
2654 kvm_run->exit_reason = KVM_EXIT_DEBUG; 2665 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2655 return 0; 2666 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
2667 kvm_run->debug.arch.exception = ex_no;
2668 break;
2669 default:
2670 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
2671 kvm_run->ex.exception = ex_no;
2672 kvm_run->ex.error_code = error_code;
2673 break;
2656 } 2674 }
2657 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
2658 kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
2659 kvm_run->ex.error_code = error_code;
2660 return 0; 2675 return 0;
2661} 2676}
2662 2677
@@ -2677,7 +2692,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2677static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2692static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2678{ 2693{
2679 unsigned long exit_qualification; 2694 unsigned long exit_qualification;
2680 int size, down, in, string, rep; 2695 int size, in, string;
2681 unsigned port; 2696 unsigned port;
2682 2697
2683 ++vcpu->stat.io_exits; 2698 ++vcpu->stat.io_exits;
@@ -2693,8 +2708,6 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2693 2708
2694 size = (exit_qualification & 7) + 1; 2709 size = (exit_qualification & 7) + 1;
2695 in = (exit_qualification & 8) != 0; 2710 in = (exit_qualification & 8) != 0;
2696 down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
2697 rep = (exit_qualification & 32) != 0;
2698 port = exit_qualification >> 16; 2711 port = exit_qualification >> 16;
2699 2712
2700 skip_emulated_instruction(vcpu); 2713 skip_emulated_instruction(vcpu);
@@ -2795,21 +2808,44 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2795 unsigned long val; 2808 unsigned long val;
2796 int dr, reg; 2809 int dr, reg;
2797 2810
2798 /* 2811 dr = vmcs_readl(GUEST_DR7);
2799 * FIXME: this code assumes the host is debugging the guest. 2812 if (dr & DR7_GD) {
2800 * need to deal with guest debugging itself too. 2813 /*
2801 */ 2814 * As the vm-exit takes precedence over the debug trap, we
2815 * need to emulate the latter, either for the host or the
2816 * guest debugging itself.
2817 */
2818 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
2819 kvm_run->debug.arch.dr6 = vcpu->arch.dr6;
2820 kvm_run->debug.arch.dr7 = dr;
2821 kvm_run->debug.arch.pc =
2822 vmcs_readl(GUEST_CS_BASE) +
2823 vmcs_readl(GUEST_RIP);
2824 kvm_run->debug.arch.exception = DB_VECTOR;
2825 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2826 return 0;
2827 } else {
2828 vcpu->arch.dr7 &= ~DR7_GD;
2829 vcpu->arch.dr6 |= DR6_BD;
2830 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
2831 kvm_queue_exception(vcpu, DB_VECTOR);
2832 return 1;
2833 }
2834 }
2835
2802 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 2836 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2803 dr = exit_qualification & 7; 2837 dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
2804 reg = (exit_qualification >> 8) & 15; 2838 reg = DEBUG_REG_ACCESS_REG(exit_qualification);
2805 if (exit_qualification & 16) { 2839 if (exit_qualification & TYPE_MOV_FROM_DR) {
2806 /* mov from dr */
2807 switch (dr) { 2840 switch (dr) {
2841 case 0 ... 3:
2842 val = vcpu->arch.db[dr];
2843 break;
2808 case 6: 2844 case 6:
2809 val = 0xffff0ff0; 2845 val = vcpu->arch.dr6;
2810 break; 2846 break;
2811 case 7: 2847 case 7:
2812 val = 0x400; 2848 val = vcpu->arch.dr7;
2813 break; 2849 break;
2814 default: 2850 default:
2815 val = 0; 2851 val = 0;
@@ -2817,7 +2853,38 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2817 kvm_register_write(vcpu, reg, val); 2853 kvm_register_write(vcpu, reg, val);
2818 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); 2854 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
2819 } else { 2855 } else {
2820 /* mov to dr */ 2856 val = vcpu->arch.regs[reg];
2857 switch (dr) {
2858 case 0 ... 3:
2859 vcpu->arch.db[dr] = val;
2860 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2861 vcpu->arch.eff_db[dr] = val;
2862 break;
2863 case 4 ... 5:
2864 if (vcpu->arch.cr4 & X86_CR4_DE)
2865 kvm_queue_exception(vcpu, UD_VECTOR);
2866 break;
2867 case 6:
2868 if (val & 0xffffffff00000000ULL) {
2869 kvm_queue_exception(vcpu, GP_VECTOR);
2870 break;
2871 }
2872 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
2873 break;
2874 case 7:
2875 if (val & 0xffffffff00000000ULL) {
2876 kvm_queue_exception(vcpu, GP_VECTOR);
2877 break;
2878 }
2879 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
2880 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
2881 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
2882 vcpu->arch.switch_db_regs =
2883 (val & DR7_BP_EN_MASK);
2884 }
2885 break;
2886 }
2887 KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)val, handler);
2821 } 2888 }
2822 skip_emulated_instruction(vcpu); 2889 skip_emulated_instruction(vcpu);
2823 return 1; 2890 return 1;
@@ -2968,17 +3035,25 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2968 } 3035 }
2969 tss_selector = exit_qualification; 3036 tss_selector = exit_qualification;
2970 3037
2971 return kvm_task_switch(vcpu, tss_selector, reason); 3038 if (!kvm_task_switch(vcpu, tss_selector, reason))
3039 return 0;
3040
3041 /* clear all local breakpoint enable flags */
3042 vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
3043
3044 /*
3045 * TODO: What about debug traps on tss switch?
3046 * Are we supposed to inject them and update dr6?
3047 */
3048
3049 return 1;
2972} 3050}
2973 3051
2974static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 3052static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2975{ 3053{
2976 u64 exit_qualification; 3054 u64 exit_qualification;
2977 enum emulation_result er;
2978 gpa_t gpa; 3055 gpa_t gpa;
2979 unsigned long hva;
2980 int gla_validity; 3056 int gla_validity;
2981 int r;
2982 3057
2983 exit_qualification = vmcs_read64(EXIT_QUALIFICATION); 3058 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
2984 3059
@@ -3001,32 +3076,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3001 } 3076 }
3002 3077
3003 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 3078 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
3004 hva = gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT); 3079 return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
3005 if (!kvm_is_error_hva(hva)) {
3006 r = kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
3007 if (r < 0) {
3008 printk(KERN_ERR "EPT: Not enough memory!\n");
3009 return -ENOMEM;
3010 }
3011 return 1;
3012 } else {
3013 /* must be MMIO */
3014 er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
3015
3016 if (er == EMULATE_FAIL) {
3017 printk(KERN_ERR
3018 "EPT: Fail to handle EPT violation vmexit!er is %d\n",
3019 er);
3020 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
3021 (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
3022 (long unsigned int)vmcs_read64(GUEST_LINEAR_ADDRESS));
3023 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
3024 (long unsigned int)exit_qualification);
3025 return -ENOTSUPP;
3026 } else if (er == EMULATE_DO_MMIO)
3027 return 0;
3028 }
3029 return 1;
3030} 3080}
3031 3081
3032static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 3082static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -3046,7 +3096,7 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
3046 struct kvm_run *kvm_run) 3096 struct kvm_run *kvm_run)
3047{ 3097{
3048 struct vcpu_vmx *vmx = to_vmx(vcpu); 3098 struct vcpu_vmx *vmx = to_vmx(vcpu);
3049 int err; 3099 enum emulation_result err = EMULATE_DONE;
3050 3100
3051 preempt_enable(); 3101 preempt_enable();
3052 local_irq_enable(); 3102 local_irq_enable();
@@ -3071,10 +3121,7 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
3071 local_irq_disable(); 3121 local_irq_disable();
3072 preempt_disable(); 3122 preempt_disable();
3073 3123
3074 /* Guest state should be valid now except if we need to 3124 vmx->invalid_state_emulation_result = err;
3075 * emulate an MMIO */
3076 if (guest_state_valid(vcpu))
3077 vmx->emulation_required = 0;
3078} 3125}
3079 3126
3080/* 3127/*
@@ -3123,8 +3170,11 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3123 3170
3124 /* If we need to emulate an MMIO from handle_invalid_guest_state 3171 /* If we need to emulate an MMIO from handle_invalid_guest_state
3125 * we just return 0 */ 3172 * we just return 0 */
3126 if (vmx->emulation_required && emulate_invalid_guest_state) 3173 if (vmx->emulation_required && emulate_invalid_guest_state) {
3127 return 0; 3174 if (guest_state_valid(vcpu))
3175 vmx->emulation_required = 0;
3176 return vmx->invalid_state_emulation_result != EMULATE_DO_MMIO;
3177 }
3128 3178
3129 /* Access CR3 don't cause VMExit in paging mode, so we need 3179 /* Access CR3 don't cause VMExit in paging mode, so we need
3130 * to sync with guest real CR3. */ 3180 * to sync with guest real CR3. */
@@ -3238,7 +3288,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3238 vmx->vcpu.arch.nmi_injected = false; 3288 vmx->vcpu.arch.nmi_injected = false;
3239 } 3289 }
3240 kvm_clear_exception_queue(&vmx->vcpu); 3290 kvm_clear_exception_queue(&vmx->vcpu);
3241 if (idtv_info_valid && type == INTR_TYPE_EXCEPTION) { 3291 if (idtv_info_valid && (type == INTR_TYPE_HARD_EXCEPTION ||
3292 type == INTR_TYPE_SOFT_EXCEPTION)) {
3242 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { 3293 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
3243 error = vmcs_read32(IDT_VECTORING_ERROR_CODE); 3294 error = vmcs_read32(IDT_VECTORING_ERROR_CODE);
3244 kvm_queue_exception_e(&vmx->vcpu, vector, error); 3295 kvm_queue_exception_e(&vmx->vcpu, vector, error);
@@ -3259,6 +3310,11 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
3259 3310
3260 vmx_update_window_states(vcpu); 3311 vmx_update_window_states(vcpu);
3261 3312
3313 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3314 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
3315 GUEST_INTR_STATE_STI |
3316 GUEST_INTR_STATE_MOV_SS);
3317
3262 if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { 3318 if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
3263 if (vcpu->arch.interrupt.pending) { 3319 if (vcpu->arch.interrupt.pending) {
3264 enable_nmi_window(vcpu); 3320 enable_nmi_window(vcpu);
@@ -3347,6 +3403,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3347 */ 3403 */
3348 vmcs_writel(HOST_CR0, read_cr0()); 3404 vmcs_writel(HOST_CR0, read_cr0());
3349 3405
3406 set_debugreg(vcpu->arch.dr6, 6);
3407
3350 asm( 3408 asm(
3351 /* Store host registers */ 3409 /* Store host registers */
3352 "push %%"R"dx; push %%"R"bp;" 3410 "push %%"R"dx; push %%"R"bp;"
@@ -3441,6 +3499,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3441 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); 3499 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
3442 vcpu->arch.regs_dirty = 0; 3500 vcpu->arch.regs_dirty = 0;
3443 3501
3502 get_debugreg(vcpu->arch.dr6, 6);
3503
3444 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 3504 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
3445 if (vmx->rmode.irq.pending) 3505 if (vmx->rmode.irq.pending)
3446 fixup_rmode_irq(vmx); 3506 fixup_rmode_irq(vmx);
@@ -3595,7 +3655,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
3595 .vcpu_put = vmx_vcpu_put, 3655 .vcpu_put = vmx_vcpu_put,
3596 3656
3597 .set_guest_debug = set_guest_debug, 3657 .set_guest_debug = set_guest_debug,
3598 .guest_debug_pre = kvm_guest_debug_pre,
3599 .get_msr = vmx_get_msr, 3658 .get_msr = vmx_get_msr,
3600 .set_msr = vmx_set_msr, 3659 .set_msr = vmx_set_msr,
3601 .get_segment_base = vmx_get_segment_base, 3660 .get_segment_base = vmx_get_segment_base,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 758b7a155ae9..8ca100a9ecac 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -36,6 +36,7 @@
36#include <linux/highmem.h> 36#include <linux/highmem.h>
37#include <linux/iommu.h> 37#include <linux/iommu.h>
38#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
39#include <linux/cpufreq.h>
39 40
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
41#include <asm/msr.h> 42#include <asm/msr.h>
@@ -69,6 +70,8 @@ static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
69 70
70static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, 71static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
71 struct kvm_cpuid_entry2 __user *entries); 72 struct kvm_cpuid_entry2 __user *entries);
73struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
74 u32 function, u32 index);
72 75
73struct kvm_x86_ops *kvm_x86_ops; 76struct kvm_x86_ops *kvm_x86_ops;
74EXPORT_SYMBOL_GPL(kvm_x86_ops); 77EXPORT_SYMBOL_GPL(kvm_x86_ops);
@@ -173,6 +176,7 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
173 u32 error_code) 176 u32 error_code)
174{ 177{
175 ++vcpu->stat.pf_guest; 178 ++vcpu->stat.pf_guest;
179
176 if (vcpu->arch.exception.pending) { 180 if (vcpu->arch.exception.pending) {
177 if (vcpu->arch.exception.nr == PF_VECTOR) { 181 if (vcpu->arch.exception.nr == PF_VECTOR) {
178 printk(KERN_DEBUG "kvm: inject_page_fault:" 182 printk(KERN_DEBUG "kvm: inject_page_fault:"
@@ -361,6 +365,7 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
361 } 365 }
362 kvm_x86_ops->set_cr4(vcpu, cr4); 366 kvm_x86_ops->set_cr4(vcpu, cr4);
363 vcpu->arch.cr4 = cr4; 367 vcpu->arch.cr4 = cr4;
368 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
364 kvm_mmu_sync_global(vcpu); 369 kvm_mmu_sync_global(vcpu);
365 kvm_mmu_reset_context(vcpu); 370 kvm_mmu_reset_context(vcpu);
366} 371}
@@ -442,6 +447,11 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
442} 447}
443EXPORT_SYMBOL_GPL(kvm_get_cr8); 448EXPORT_SYMBOL_GPL(kvm_get_cr8);
444 449
450static inline u32 bit(int bitno)
451{
452 return 1 << (bitno & 31);
453}
454
445/* 455/*
446 * List of msr numbers which we expose to userspace through KVM_GET_MSRS 456 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
447 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. 457 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
@@ -456,7 +466,7 @@ static u32 msrs_to_save[] = {
456 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 466 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
457#endif 467#endif
458 MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 468 MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
459 MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT 469 MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
460}; 470};
461 471
462static unsigned num_msrs_to_save; 472static unsigned num_msrs_to_save;
@@ -481,6 +491,28 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
481 return; 491 return;
482 } 492 }
483 493
494 if (efer & EFER_FFXSR) {
495 struct kvm_cpuid_entry2 *feat;
496
497 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
498 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
499 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
500 kvm_inject_gp(vcpu, 0);
501 return;
502 }
503 }
504
505 if (efer & EFER_SVME) {
506 struct kvm_cpuid_entry2 *feat;
507
508 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
509 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
510 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
511 kvm_inject_gp(vcpu, 0);
512 return;
513 }
514 }
515
484 kvm_x86_ops->set_efer(vcpu, efer); 516 kvm_x86_ops->set_efer(vcpu, efer);
485 517
486 efer &= ~EFER_LMA; 518 efer &= ~EFER_LMA;
@@ -586,6 +618,8 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *
586 hv_clock->tsc_to_system_mul); 618 hv_clock->tsc_to_system_mul);
587} 619}
588 620
621static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
622
589static void kvm_write_guest_time(struct kvm_vcpu *v) 623static void kvm_write_guest_time(struct kvm_vcpu *v)
590{ 624{
591 struct timespec ts; 625 struct timespec ts;
@@ -596,9 +630,9 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
596 if ((!vcpu->time_page)) 630 if ((!vcpu->time_page))
597 return; 631 return;
598 632
599 if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) { 633 if (unlikely(vcpu->hv_clock_tsc_khz != __get_cpu_var(cpu_tsc_khz))) {
600 kvm_set_time_scale(tsc_khz, &vcpu->hv_clock); 634 kvm_set_time_scale(__get_cpu_var(cpu_tsc_khz), &vcpu->hv_clock);
601 vcpu->hv_clock_tsc_khz = tsc_khz; 635 vcpu->hv_clock_tsc_khz = __get_cpu_var(cpu_tsc_khz);
602 } 636 }
603 637
604 /* Keep irq disabled to prevent changes to the clock */ 638 /* Keep irq disabled to prevent changes to the clock */
@@ -629,6 +663,16 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
629 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); 663 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
630} 664}
631 665
666static int kvm_request_guest_time_update(struct kvm_vcpu *v)
667{
668 struct kvm_vcpu_arch *vcpu = &v->arch;
669
670 if (!vcpu->time_page)
671 return 0;
672 set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
673 return 1;
674}
675
632static bool msr_mtrr_valid(unsigned msr) 676static bool msr_mtrr_valid(unsigned msr)
633{ 677{
634 switch (msr) { 678 switch (msr) {
@@ -722,6 +766,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
722 break; 766 break;
723 case MSR_IA32_UCODE_REV: 767 case MSR_IA32_UCODE_REV:
724 case MSR_IA32_UCODE_WRITE: 768 case MSR_IA32_UCODE_WRITE:
769 case MSR_VM_HSAVE_PA:
725 break; 770 break;
726 case 0x200 ... 0x2ff: 771 case 0x200 ... 0x2ff:
727 return set_msr_mtrr(vcpu, msr, data); 772 return set_msr_mtrr(vcpu, msr, data);
@@ -758,7 +803,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
758 vcpu->arch.time_page = NULL; 803 vcpu->arch.time_page = NULL;
759 } 804 }
760 805
761 kvm_write_guest_time(vcpu); 806 kvm_request_guest_time_update(vcpu);
762 break; 807 break;
763 } 808 }
764 default: 809 default:
@@ -843,6 +888,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
843 case MSR_IA32_LASTBRANCHTOIP: 888 case MSR_IA32_LASTBRANCHTOIP:
844 case MSR_IA32_LASTINTFROMIP: 889 case MSR_IA32_LASTINTFROMIP:
845 case MSR_IA32_LASTINTTOIP: 890 case MSR_IA32_LASTINTTOIP:
891 case MSR_VM_HSAVE_PA:
846 data = 0; 892 data = 0;
847 break; 893 break;
848 case MSR_MTRRcap: 894 case MSR_MTRRcap:
@@ -967,10 +1013,13 @@ int kvm_dev_ioctl_check_extension(long ext)
967 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: 1013 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
968 case KVM_CAP_SET_TSS_ADDR: 1014 case KVM_CAP_SET_TSS_ADDR:
969 case KVM_CAP_EXT_CPUID: 1015 case KVM_CAP_EXT_CPUID:
1016 case KVM_CAP_CLOCKSOURCE:
970 case KVM_CAP_PIT: 1017 case KVM_CAP_PIT:
971 case KVM_CAP_NOP_IO_DELAY: 1018 case KVM_CAP_NOP_IO_DELAY:
972 case KVM_CAP_MP_STATE: 1019 case KVM_CAP_MP_STATE:
973 case KVM_CAP_SYNC_MMU: 1020 case KVM_CAP_SYNC_MMU:
1021 case KVM_CAP_REINJECT_CONTROL:
1022 case KVM_CAP_IRQ_INJECT_STATUS:
974 r = 1; 1023 r = 1;
975 break; 1024 break;
976 case KVM_CAP_COALESCED_MMIO: 1025 case KVM_CAP_COALESCED_MMIO:
@@ -991,9 +1040,6 @@ int kvm_dev_ioctl_check_extension(long ext)
991 case KVM_CAP_IOMMU: 1040 case KVM_CAP_IOMMU:
992 r = iommu_found(); 1041 r = iommu_found();
993 break; 1042 break;
994 case KVM_CAP_CLOCKSOURCE:
995 r = boot_cpu_has(X86_FEATURE_CONSTANT_TSC);
996 break;
997 default: 1043 default:
998 r = 0; 1044 r = 0;
999 break; 1045 break;
@@ -1044,7 +1090,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
1044 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 1090 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1045 goto out; 1091 goto out;
1046 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid, 1092 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
1047 cpuid_arg->entries); 1093 cpuid_arg->entries);
1048 if (r) 1094 if (r)
1049 goto out; 1095 goto out;
1050 1096
@@ -1064,7 +1110,7 @@ out:
1064void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1110void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1065{ 1111{
1066 kvm_x86_ops->vcpu_load(vcpu, cpu); 1112 kvm_x86_ops->vcpu_load(vcpu, cpu);
1067 kvm_write_guest_time(vcpu); 1113 kvm_request_guest_time_update(vcpu);
1068} 1114}
1069 1115
1070void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1116void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -1142,8 +1188,8 @@ out:
1142} 1188}
1143 1189
1144static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, 1190static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1145 struct kvm_cpuid2 *cpuid, 1191 struct kvm_cpuid2 *cpuid,
1146 struct kvm_cpuid_entry2 __user *entries) 1192 struct kvm_cpuid_entry2 __user *entries)
1147{ 1193{
1148 int r; 1194 int r;
1149 1195
@@ -1162,8 +1208,8 @@ out:
1162} 1208}
1163 1209
1164static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, 1210static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1165 struct kvm_cpuid2 *cpuid, 1211 struct kvm_cpuid2 *cpuid,
1166 struct kvm_cpuid_entry2 __user *entries) 1212 struct kvm_cpuid_entry2 __user *entries)
1167{ 1213{
1168 int r; 1214 int r;
1169 1215
@@ -1172,7 +1218,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1172 goto out; 1218 goto out;
1173 r = -EFAULT; 1219 r = -EFAULT;
1174 if (copy_to_user(entries, &vcpu->arch.cpuid_entries, 1220 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
1175 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) 1221 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
1176 goto out; 1222 goto out;
1177 return 0; 1223 return 0;
1178 1224
@@ -1181,18 +1227,13 @@ out:
1181 return r; 1227 return r;
1182} 1228}
1183 1229
1184static inline u32 bit(int bitno)
1185{
1186 return 1 << (bitno & 31);
1187}
1188
1189static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, 1230static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1190 u32 index) 1231 u32 index)
1191{ 1232{
1192 entry->function = function; 1233 entry->function = function;
1193 entry->index = index; 1234 entry->index = index;
1194 cpuid_count(entry->function, entry->index, 1235 cpuid_count(entry->function, entry->index,
1195 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); 1236 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1196 entry->flags = 0; 1237 entry->flags = 0;
1197} 1238}
1198 1239
@@ -1222,15 +1263,17 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1222#ifdef CONFIG_X86_64 1263#ifdef CONFIG_X86_64
1223 bit(X86_FEATURE_LM) | 1264 bit(X86_FEATURE_LM) |
1224#endif 1265#endif
1266 bit(X86_FEATURE_FXSR_OPT) |
1225 bit(X86_FEATURE_MMXEXT) | 1267 bit(X86_FEATURE_MMXEXT) |
1226 bit(X86_FEATURE_3DNOWEXT) | 1268 bit(X86_FEATURE_3DNOWEXT) |
1227 bit(X86_FEATURE_3DNOW); 1269 bit(X86_FEATURE_3DNOW);
1228 const u32 kvm_supported_word3_x86_features = 1270 const u32 kvm_supported_word3_x86_features =
1229 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16); 1271 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
1230 const u32 kvm_supported_word6_x86_features = 1272 const u32 kvm_supported_word6_x86_features =
1231 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY); 1273 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY) |
1274 bit(X86_FEATURE_SVM);
1232 1275
1233 /* all func 2 cpuid_count() should be called on the same cpu */ 1276 /* all calls to cpuid_count() should be made on the same cpu */
1234 get_cpu(); 1277 get_cpu();
1235 do_cpuid_1_ent(entry, function, index); 1278 do_cpuid_1_ent(entry, function, index);
1236 ++*nent; 1279 ++*nent;
@@ -1304,7 +1347,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1304} 1347}
1305 1348
1306static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, 1349static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
1307 struct kvm_cpuid_entry2 __user *entries) 1350 struct kvm_cpuid_entry2 __user *entries)
1308{ 1351{
1309 struct kvm_cpuid_entry2 *cpuid_entries; 1352 struct kvm_cpuid_entry2 *cpuid_entries;
1310 int limit, nent = 0, r = -E2BIG; 1353 int limit, nent = 0, r = -E2BIG;
@@ -1321,7 +1364,7 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
1321 limit = cpuid_entries[0].eax; 1364 limit = cpuid_entries[0].eax;
1322 for (func = 1; func <= limit && nent < cpuid->nent; ++func) 1365 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1323 do_cpuid_ent(&cpuid_entries[nent], func, 0, 1366 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1324 &nent, cpuid->nent); 1367 &nent, cpuid->nent);
1325 r = -E2BIG; 1368 r = -E2BIG;
1326 if (nent >= cpuid->nent) 1369 if (nent >= cpuid->nent)
1327 goto out_free; 1370 goto out_free;
@@ -1330,10 +1373,10 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
1330 limit = cpuid_entries[nent - 1].eax; 1373 limit = cpuid_entries[nent - 1].eax;
1331 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func) 1374 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1332 do_cpuid_ent(&cpuid_entries[nent], func, 0, 1375 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1333 &nent, cpuid->nent); 1376 &nent, cpuid->nent);
1334 r = -EFAULT; 1377 r = -EFAULT;
1335 if (copy_to_user(entries, cpuid_entries, 1378 if (copy_to_user(entries, cpuid_entries,
1336 nent * sizeof(struct kvm_cpuid_entry2))) 1379 nent * sizeof(struct kvm_cpuid_entry2)))
1337 goto out_free; 1380 goto out_free;
1338 cpuid->nent = nent; 1381 cpuid->nent = nent;
1339 r = 0; 1382 r = 0;
@@ -1477,7 +1520,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1477 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 1520 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1478 goto out; 1521 goto out;
1479 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 1522 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1480 cpuid_arg->entries); 1523 cpuid_arg->entries);
1481 if (r) 1524 if (r)
1482 goto out; 1525 goto out;
1483 break; 1526 break;
@@ -1490,7 +1533,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1490 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 1533 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1491 goto out; 1534 goto out;
1492 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 1535 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1493 cpuid_arg->entries); 1536 cpuid_arg->entries);
1494 if (r) 1537 if (r)
1495 goto out; 1538 goto out;
1496 r = -EFAULT; 1539 r = -EFAULT;
@@ -1710,6 +1753,15 @@ static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1710 return r; 1753 return r;
1711} 1754}
1712 1755
1756static int kvm_vm_ioctl_reinject(struct kvm *kvm,
1757 struct kvm_reinject_control *control)
1758{
1759 if (!kvm->arch.vpit)
1760 return -ENXIO;
1761 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
1762 return 0;
1763}
1764
1713/* 1765/*
1714 * Get (and clear) the dirty memory log for a memory slot. 1766 * Get (and clear) the dirty memory log for a memory slot.
1715 */ 1767 */
@@ -1807,13 +1859,26 @@ long kvm_arch_vm_ioctl(struct file *filp,
1807 } 1859 }
1808 } else 1860 } else
1809 goto out; 1861 goto out;
1862 r = kvm_setup_default_irq_routing(kvm);
1863 if (r) {
1864 kfree(kvm->arch.vpic);
1865 kfree(kvm->arch.vioapic);
1866 goto out;
1867 }
1810 break; 1868 break;
1811 case KVM_CREATE_PIT: 1869 case KVM_CREATE_PIT:
1870 mutex_lock(&kvm->lock);
1871 r = -EEXIST;
1872 if (kvm->arch.vpit)
1873 goto create_pit_unlock;
1812 r = -ENOMEM; 1874 r = -ENOMEM;
1813 kvm->arch.vpit = kvm_create_pit(kvm); 1875 kvm->arch.vpit = kvm_create_pit(kvm);
1814 if (kvm->arch.vpit) 1876 if (kvm->arch.vpit)
1815 r = 0; 1877 r = 0;
1878 create_pit_unlock:
1879 mutex_unlock(&kvm->lock);
1816 break; 1880 break;
1881 case KVM_IRQ_LINE_STATUS:
1817 case KVM_IRQ_LINE: { 1882 case KVM_IRQ_LINE: {
1818 struct kvm_irq_level irq_event; 1883 struct kvm_irq_level irq_event;
1819 1884
@@ -1821,10 +1886,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
1821 if (copy_from_user(&irq_event, argp, sizeof irq_event)) 1886 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1822 goto out; 1887 goto out;
1823 if (irqchip_in_kernel(kvm)) { 1888 if (irqchip_in_kernel(kvm)) {
1889 __s32 status;
1824 mutex_lock(&kvm->lock); 1890 mutex_lock(&kvm->lock);
1825 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1891 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1826 irq_event.irq, irq_event.level); 1892 irq_event.irq, irq_event.level);
1827 mutex_unlock(&kvm->lock); 1893 mutex_unlock(&kvm->lock);
1894 if (ioctl == KVM_IRQ_LINE_STATUS) {
1895 irq_event.status = status;
1896 if (copy_to_user(argp, &irq_event,
1897 sizeof irq_event))
1898 goto out;
1899 }
1828 r = 0; 1900 r = 0;
1829 } 1901 }
1830 break; 1902 break;
@@ -1907,6 +1979,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
1907 r = 0; 1979 r = 0;
1908 break; 1980 break;
1909 } 1981 }
1982 case KVM_REINJECT_CONTROL: {
1983 struct kvm_reinject_control control;
1984 r = -EFAULT;
1985 if (copy_from_user(&control, argp, sizeof(control)))
1986 goto out;
1987 r = kvm_vm_ioctl_reinject(kvm, &control);
1988 if (r)
1989 goto out;
1990 r = 0;
1991 break;
1992 }
1910 default: 1993 default:
1911 ; 1994 ;
1912 } 1995 }
@@ -1960,10 +2043,38 @@ static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1960 return dev; 2043 return dev;
1961} 2044}
1962 2045
1963int emulator_read_std(unsigned long addr, 2046static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
1964 void *val, 2047 struct kvm_vcpu *vcpu)
1965 unsigned int bytes, 2048{
1966 struct kvm_vcpu *vcpu) 2049 void *data = val;
2050 int r = X86EMUL_CONTINUE;
2051
2052 while (bytes) {
2053 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2054 unsigned offset = addr & (PAGE_SIZE-1);
2055 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
2056 int ret;
2057
2058 if (gpa == UNMAPPED_GVA) {
2059 r = X86EMUL_PROPAGATE_FAULT;
2060 goto out;
2061 }
2062 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
2063 if (ret < 0) {
2064 r = X86EMUL_UNHANDLEABLE;
2065 goto out;
2066 }
2067
2068 bytes -= toread;
2069 data += toread;
2070 addr += toread;
2071 }
2072out:
2073 return r;
2074}
2075
2076static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2077 struct kvm_vcpu *vcpu)
1967{ 2078{
1968 void *data = val; 2079 void *data = val;
1969 int r = X86EMUL_CONTINUE; 2080 int r = X86EMUL_CONTINUE;
@@ -1971,27 +2082,27 @@ int emulator_read_std(unsigned long addr,
1971 while (bytes) { 2082 while (bytes) {
1972 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 2083 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1973 unsigned offset = addr & (PAGE_SIZE-1); 2084 unsigned offset = addr & (PAGE_SIZE-1);
1974 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); 2085 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
1975 int ret; 2086 int ret;
1976 2087
1977 if (gpa == UNMAPPED_GVA) { 2088 if (gpa == UNMAPPED_GVA) {
1978 r = X86EMUL_PROPAGATE_FAULT; 2089 r = X86EMUL_PROPAGATE_FAULT;
1979 goto out; 2090 goto out;
1980 } 2091 }
1981 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy); 2092 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
1982 if (ret < 0) { 2093 if (ret < 0) {
1983 r = X86EMUL_UNHANDLEABLE; 2094 r = X86EMUL_UNHANDLEABLE;
1984 goto out; 2095 goto out;
1985 } 2096 }
1986 2097
1987 bytes -= tocopy; 2098 bytes -= towrite;
1988 data += tocopy; 2099 data += towrite;
1989 addr += tocopy; 2100 addr += towrite;
1990 } 2101 }
1991out: 2102out:
1992 return r; 2103 return r;
1993} 2104}
1994EXPORT_SYMBOL_GPL(emulator_read_std); 2105
1995 2106
1996static int emulator_read_emulated(unsigned long addr, 2107static int emulator_read_emulated(unsigned long addr,
1997 void *val, 2108 void *val,
@@ -2013,8 +2124,8 @@ static int emulator_read_emulated(unsigned long addr,
2013 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 2124 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2014 goto mmio; 2125 goto mmio;
2015 2126
2016 if (emulator_read_std(addr, val, bytes, vcpu) 2127 if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2017 == X86EMUL_CONTINUE) 2128 == X86EMUL_CONTINUE)
2018 return X86EMUL_CONTINUE; 2129 return X86EMUL_CONTINUE;
2019 if (gpa == UNMAPPED_GVA) 2130 if (gpa == UNMAPPED_GVA)
2020 return X86EMUL_PROPAGATE_FAULT; 2131 return X86EMUL_PROPAGATE_FAULT;
@@ -2217,7 +2328,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2217 2328
2218 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); 2329 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2219 2330
2220 emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu); 2331 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
2221 2332
2222 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n", 2333 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2223 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]); 2334 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
@@ -2225,7 +2336,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2225EXPORT_SYMBOL_GPL(kvm_report_emulation_failure); 2336EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2226 2337
2227static struct x86_emulate_ops emulate_ops = { 2338static struct x86_emulate_ops emulate_ops = {
2228 .read_std = emulator_read_std, 2339 .read_std = kvm_read_guest_virt,
2229 .read_emulated = emulator_read_emulated, 2340 .read_emulated = emulator_read_emulated,
2230 .write_emulated = emulator_write_emulated, 2341 .write_emulated = emulator_write_emulated,
2231 .cmpxchg_emulated = emulator_cmpxchg_emulated, 2342 .cmpxchg_emulated = emulator_cmpxchg_emulated,
@@ -2327,40 +2438,19 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
2327} 2438}
2328EXPORT_SYMBOL_GPL(emulate_instruction); 2439EXPORT_SYMBOL_GPL(emulate_instruction);
2329 2440
2330static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
2331{
2332 int i;
2333
2334 for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
2335 if (vcpu->arch.pio.guest_pages[i]) {
2336 kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
2337 vcpu->arch.pio.guest_pages[i] = NULL;
2338 }
2339}
2340
2341static int pio_copy_data(struct kvm_vcpu *vcpu) 2441static int pio_copy_data(struct kvm_vcpu *vcpu)
2342{ 2442{
2343 void *p = vcpu->arch.pio_data; 2443 void *p = vcpu->arch.pio_data;
2344 void *q; 2444 gva_t q = vcpu->arch.pio.guest_gva;
2345 unsigned bytes; 2445 unsigned bytes;
2346 int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1; 2446 int ret;
2347 2447
2348 q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
2349 PAGE_KERNEL);
2350 if (!q) {
2351 free_pio_guest_pages(vcpu);
2352 return -ENOMEM;
2353 }
2354 q += vcpu->arch.pio.guest_page_offset;
2355 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count; 2448 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2356 if (vcpu->arch.pio.in) 2449 if (vcpu->arch.pio.in)
2357 memcpy(q, p, bytes); 2450 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
2358 else 2451 else
2359 memcpy(p, q, bytes); 2452 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
2360 q -= vcpu->arch.pio.guest_page_offset; 2453 return ret;
2361 vunmap(q);
2362 free_pio_guest_pages(vcpu);
2363 return 0;
2364} 2454}
2365 2455
2366int complete_pio(struct kvm_vcpu *vcpu) 2456int complete_pio(struct kvm_vcpu *vcpu)
@@ -2471,7 +2561,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2471 vcpu->arch.pio.in = in; 2561 vcpu->arch.pio.in = in;
2472 vcpu->arch.pio.string = 0; 2562 vcpu->arch.pio.string = 0;
2473 vcpu->arch.pio.down = 0; 2563 vcpu->arch.pio.down = 0;
2474 vcpu->arch.pio.guest_page_offset = 0;
2475 vcpu->arch.pio.rep = 0; 2564 vcpu->arch.pio.rep = 0;
2476 2565
2477 if (vcpu->run->io.direction == KVM_EXIT_IO_IN) 2566 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
@@ -2499,9 +2588,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2499 gva_t address, int rep, unsigned port) 2588 gva_t address, int rep, unsigned port)
2500{ 2589{
2501 unsigned now, in_page; 2590 unsigned now, in_page;
2502 int i, ret = 0; 2591 int ret = 0;
2503 int nr_pages = 1;
2504 struct page *page;
2505 struct kvm_io_device *pio_dev; 2592 struct kvm_io_device *pio_dev;
2506 2593
2507 vcpu->run->exit_reason = KVM_EXIT_IO; 2594 vcpu->run->exit_reason = KVM_EXIT_IO;
@@ -2513,7 +2600,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2513 vcpu->arch.pio.in = in; 2600 vcpu->arch.pio.in = in;
2514 vcpu->arch.pio.string = 1; 2601 vcpu->arch.pio.string = 1;
2515 vcpu->arch.pio.down = down; 2602 vcpu->arch.pio.down = down;
2516 vcpu->arch.pio.guest_page_offset = offset_in_page(address);
2517 vcpu->arch.pio.rep = rep; 2603 vcpu->arch.pio.rep = rep;
2518 2604
2519 if (vcpu->run->io.direction == KVM_EXIT_IO_IN) 2605 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
@@ -2533,15 +2619,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2533 else 2619 else
2534 in_page = offset_in_page(address) + size; 2620 in_page = offset_in_page(address) + size;
2535 now = min(count, (unsigned long)in_page / size); 2621 now = min(count, (unsigned long)in_page / size);
2536 if (!now) { 2622 if (!now)
2537 /*
2538 * String I/O straddles page boundary. Pin two guest pages
2539 * so that we satisfy atomicity constraints. Do just one
2540 * transaction to avoid complexity.
2541 */
2542 nr_pages = 2;
2543 now = 1; 2623 now = 1;
2544 }
2545 if (down) { 2624 if (down) {
2546 /* 2625 /*
2547 * String I/O in reverse. Yuck. Kill the guest, fix later. 2626 * String I/O in reverse. Yuck. Kill the guest, fix later.
@@ -2556,15 +2635,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2556 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count) 2635 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
2557 kvm_x86_ops->skip_emulated_instruction(vcpu); 2636 kvm_x86_ops->skip_emulated_instruction(vcpu);
2558 2637
2559 for (i = 0; i < nr_pages; ++i) { 2638 vcpu->arch.pio.guest_gva = address;
2560 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
2561 vcpu->arch.pio.guest_pages[i] = page;
2562 if (!page) {
2563 kvm_inject_gp(vcpu, 0);
2564 free_pio_guest_pages(vcpu);
2565 return 1;
2566 }
2567 }
2568 2639
2569 pio_dev = vcpu_find_pio_dev(vcpu, port, 2640 pio_dev = vcpu_find_pio_dev(vcpu, port,
2570 vcpu->arch.pio.cur_count, 2641 vcpu->arch.pio.cur_count,
@@ -2572,7 +2643,11 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2572 if (!vcpu->arch.pio.in) { 2643 if (!vcpu->arch.pio.in) {
2573 /* string PIO write */ 2644 /* string PIO write */
2574 ret = pio_copy_data(vcpu); 2645 ret = pio_copy_data(vcpu);
2575 if (ret >= 0 && pio_dev) { 2646 if (ret == X86EMUL_PROPAGATE_FAULT) {
2647 kvm_inject_gp(vcpu, 0);
2648 return 1;
2649 }
2650 if (ret == 0 && pio_dev) {
2576 pio_string_write(pio_dev, vcpu); 2651 pio_string_write(pio_dev, vcpu);
2577 complete_pio(vcpu); 2652 complete_pio(vcpu);
2578 if (vcpu->arch.pio.count == 0) 2653 if (vcpu->arch.pio.count == 0)
@@ -2587,9 +2662,72 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2587} 2662}
2588EXPORT_SYMBOL_GPL(kvm_emulate_pio_string); 2663EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2589 2664
2665static void bounce_off(void *info)
2666{
2667 /* nothing */
2668}
2669
2670static unsigned int ref_freq;
2671static unsigned long tsc_khz_ref;
2672
2673static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
2674 void *data)
2675{
2676 struct cpufreq_freqs *freq = data;
2677 struct kvm *kvm;
2678 struct kvm_vcpu *vcpu;
2679 int i, send_ipi = 0;
2680
2681 if (!ref_freq)
2682 ref_freq = freq->old;
2683
2684 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
2685 return 0;
2686 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
2687 return 0;
2688 per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
2689
2690 spin_lock(&kvm_lock);
2691 list_for_each_entry(kvm, &vm_list, vm_list) {
2692 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2693 vcpu = kvm->vcpus[i];
2694 if (!vcpu)
2695 continue;
2696 if (vcpu->cpu != freq->cpu)
2697 continue;
2698 if (!kvm_request_guest_time_update(vcpu))
2699 continue;
2700 if (vcpu->cpu != smp_processor_id())
2701 send_ipi++;
2702 }
2703 }
2704 spin_unlock(&kvm_lock);
2705
2706 if (freq->old < freq->new && send_ipi) {
2707 /*
2708 * We upscale the frequency. Must make the guest
2709 * doesn't see old kvmclock values while running with
2710 * the new frequency, otherwise we risk the guest sees
2711 * time go backwards.
2712 *
2713 * In case we update the frequency for another cpu
2714 * (which might be in guest context) send an interrupt
2715 * to kick the cpu out of guest context. Next time
2716 * guest context is entered kvmclock will be updated,
2717 * so the guest will not see stale values.
2718 */
2719 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
2720 }
2721 return 0;
2722}
2723
2724static struct notifier_block kvmclock_cpufreq_notifier_block = {
2725 .notifier_call = kvmclock_cpufreq_notifier
2726};
2727
2590int kvm_arch_init(void *opaque) 2728int kvm_arch_init(void *opaque)
2591{ 2729{
2592 int r; 2730 int r, cpu;
2593 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; 2731 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2594 2732
2595 if (kvm_x86_ops) { 2733 if (kvm_x86_ops) {
@@ -2620,6 +2758,15 @@ int kvm_arch_init(void *opaque)
2620 kvm_mmu_set_base_ptes(PT_PRESENT_MASK); 2758 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2621 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, 2759 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
2622 PT_DIRTY_MASK, PT64_NX_MASK, 0, 0); 2760 PT_DIRTY_MASK, PT64_NX_MASK, 0, 0);
2761
2762 for_each_possible_cpu(cpu)
2763 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
2764 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
2765 tsc_khz_ref = tsc_khz;
2766 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
2767 CPUFREQ_TRANSITION_NOTIFIER);
2768 }
2769
2623 return 0; 2770 return 0;
2624 2771
2625out: 2772out:
@@ -2827,25 +2974,20 @@ static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2827 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index) 2974 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2828 return 0; 2975 return 0;
2829 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) && 2976 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
2830 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT)) 2977 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
2831 return 0; 2978 return 0;
2832 return 1; 2979 return 1;
2833} 2980}
2834 2981
2835void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) 2982struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
2983 u32 function, u32 index)
2836{ 2984{
2837 int i; 2985 int i;
2838 u32 function, index; 2986 struct kvm_cpuid_entry2 *best = NULL;
2839 struct kvm_cpuid_entry2 *e, *best;
2840 2987
2841 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
2842 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
2843 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
2844 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
2845 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
2846 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
2847 best = NULL;
2848 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { 2988 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2989 struct kvm_cpuid_entry2 *e;
2990
2849 e = &vcpu->arch.cpuid_entries[i]; 2991 e = &vcpu->arch.cpuid_entries[i];
2850 if (is_matching_cpuid_entry(e, function, index)) { 2992 if (is_matching_cpuid_entry(e, function, index)) {
2851 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) 2993 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
@@ -2860,6 +3002,21 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2860 if (!best || e->function > best->function) 3002 if (!best || e->function > best->function)
2861 best = e; 3003 best = e;
2862 } 3004 }
3005 return best;
3006}
3007
3008void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3009{
3010 u32 function, index;
3011 struct kvm_cpuid_entry2 *best;
3012
3013 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
3014 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
3015 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
3016 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
3017 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
3018 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
3019 best = kvm_find_cpuid_entry(vcpu, function, index);
2863 if (best) { 3020 if (best) {
2864 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax); 3021 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
2865 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx); 3022 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
@@ -2945,6 +3102,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2945 if (vcpu->requests) { 3102 if (vcpu->requests) {
2946 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) 3103 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2947 __kvm_migrate_timers(vcpu); 3104 __kvm_migrate_timers(vcpu);
3105 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
3106 kvm_write_guest_time(vcpu);
2948 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests)) 3107 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
2949 kvm_mmu_sync_roots(vcpu); 3108 kvm_mmu_sync_roots(vcpu);
2950 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) 3109 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
@@ -2979,9 +3138,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2979 goto out; 3138 goto out;
2980 } 3139 }
2981 3140
2982 if (vcpu->guest_debug.enabled)
2983 kvm_x86_ops->guest_debug_pre(vcpu);
2984
2985 vcpu->guest_mode = 1; 3141 vcpu->guest_mode = 1;
2986 /* 3142 /*
2987 * Make sure that guest_mode assignment won't happen after 3143 * Make sure that guest_mode assignment won't happen after
@@ -3002,10 +3158,34 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3002 3158
3003 kvm_guest_enter(); 3159 kvm_guest_enter();
3004 3160
3161 get_debugreg(vcpu->arch.host_dr6, 6);
3162 get_debugreg(vcpu->arch.host_dr7, 7);
3163 if (unlikely(vcpu->arch.switch_db_regs)) {
3164 get_debugreg(vcpu->arch.host_db[0], 0);
3165 get_debugreg(vcpu->arch.host_db[1], 1);
3166 get_debugreg(vcpu->arch.host_db[2], 2);
3167 get_debugreg(vcpu->arch.host_db[3], 3);
3168
3169 set_debugreg(0, 7);
3170 set_debugreg(vcpu->arch.eff_db[0], 0);
3171 set_debugreg(vcpu->arch.eff_db[1], 1);
3172 set_debugreg(vcpu->arch.eff_db[2], 2);
3173 set_debugreg(vcpu->arch.eff_db[3], 3);
3174 }
3005 3175
3006 KVMTRACE_0D(VMENTRY, vcpu, entryexit); 3176 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
3007 kvm_x86_ops->run(vcpu, kvm_run); 3177 kvm_x86_ops->run(vcpu, kvm_run);
3008 3178
3179 if (unlikely(vcpu->arch.switch_db_regs)) {
3180 set_debugreg(0, 7);
3181 set_debugreg(vcpu->arch.host_db[0], 0);
3182 set_debugreg(vcpu->arch.host_db[1], 1);
3183 set_debugreg(vcpu->arch.host_db[2], 2);
3184 set_debugreg(vcpu->arch.host_db[3], 3);
3185 }
3186 set_debugreg(vcpu->arch.host_dr6, 6);
3187 set_debugreg(vcpu->arch.host_dr7, 7);
3188
3009 vcpu->guest_mode = 0; 3189 vcpu->guest_mode = 0;
3010 local_irq_enable(); 3190 local_irq_enable();
3011 3191
@@ -3192,7 +3372,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3192 /* 3372 /*
3193 * Don't leak debug flags in case they were set for guest debugging 3373 * Don't leak debug flags in case they were set for guest debugging
3194 */ 3374 */
3195 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep) 3375 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3196 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); 3376 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3197 3377
3198 vcpu_put(vcpu); 3378 vcpu_put(vcpu);
@@ -3811,15 +3991,32 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3811 return 0; 3991 return 0;
3812} 3992}
3813 3993
3814int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 3994int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3815 struct kvm_debug_guest *dbg) 3995 struct kvm_guest_debug *dbg)
3816{ 3996{
3817 int r; 3997 int i, r;
3818 3998
3819 vcpu_load(vcpu); 3999 vcpu_load(vcpu);
3820 4000
4001 if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
4002 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
4003 for (i = 0; i < KVM_NR_DB_REGS; ++i)
4004 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
4005 vcpu->arch.switch_db_regs =
4006 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
4007 } else {
4008 for (i = 0; i < KVM_NR_DB_REGS; i++)
4009 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
4010 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
4011 }
4012
3821 r = kvm_x86_ops->set_guest_debug(vcpu, dbg); 4013 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
3822 4014
4015 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
4016 kvm_queue_exception(vcpu, DB_VECTOR);
4017 else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
4018 kvm_queue_exception(vcpu, BP_VECTOR);
4019
3823 vcpu_put(vcpu); 4020 vcpu_put(vcpu);
3824 4021
3825 return r; 4022 return r;
@@ -4007,6 +4204,11 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4007 vcpu->arch.nmi_pending = false; 4204 vcpu->arch.nmi_pending = false;
4008 vcpu->arch.nmi_injected = false; 4205 vcpu->arch.nmi_injected = false;
4009 4206
4207 vcpu->arch.switch_db_regs = 0;
4208 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
4209 vcpu->arch.dr6 = DR6_FIXED_1;
4210 vcpu->arch.dr7 = DR7_FIXED_1;
4211
4010 return kvm_x86_ops->vcpu_reset(vcpu); 4212 return kvm_x86_ops->vcpu_reset(vcpu);
4011} 4213}
4012 4214
@@ -4100,6 +4302,8 @@ struct kvm *kvm_arch_create_vm(void)
4100 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 4302 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
4101 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 4303 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
4102 4304
4305 rdtscll(kvm->arch.vm_init_tsc);
4306
4103 return kvm; 4307 return kvm;
4104} 4308}
4105 4309
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index d174db7a3370..ca91749d2083 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -178,7 +178,7 @@ static u32 opcode_table[256] = {
178 0, ImplicitOps | Stack, 0, 0, 178 0, ImplicitOps | Stack, 0, 0,
179 ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov, 179 ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
180 /* 0xC8 - 0xCF */ 180 /* 0xC8 - 0xCF */
181 0, 0, 0, 0, 0, 0, 0, 0, 181 0, 0, 0, ImplicitOps | Stack, 0, 0, 0, 0,
182 /* 0xD0 - 0xD7 */ 182 /* 0xD0 - 0xD7 */
183 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, 183 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
184 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, 184 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
@@ -1136,18 +1136,19 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
1136} 1136}
1137 1137
1138static int emulate_pop(struct x86_emulate_ctxt *ctxt, 1138static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1139 struct x86_emulate_ops *ops) 1139 struct x86_emulate_ops *ops,
1140 void *dest, int len)
1140{ 1141{
1141 struct decode_cache *c = &ctxt->decode; 1142 struct decode_cache *c = &ctxt->decode;
1142 int rc; 1143 int rc;
1143 1144
1144 rc = ops->read_emulated(register_address(c, ss_base(ctxt), 1145 rc = ops->read_emulated(register_address(c, ss_base(ctxt),
1145 c->regs[VCPU_REGS_RSP]), 1146 c->regs[VCPU_REGS_RSP]),
1146 &c->src.val, c->src.bytes, ctxt->vcpu); 1147 dest, len, ctxt->vcpu);
1147 if (rc != 0) 1148 if (rc != 0)
1148 return rc; 1149 return rc;
1149 1150
1150 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.bytes); 1151 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1151 return rc; 1152 return rc;
1152} 1153}
1153 1154
@@ -1157,11 +1158,9 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1157 struct decode_cache *c = &ctxt->decode; 1158 struct decode_cache *c = &ctxt->decode;
1158 int rc; 1159 int rc;
1159 1160
1160 c->src.bytes = c->dst.bytes; 1161 rc = emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1161 rc = emulate_pop(ctxt, ops);
1162 if (rc != 0) 1162 if (rc != 0)
1163 return rc; 1163 return rc;
1164 c->dst.val = c->src.val;
1165 return 0; 1164 return 0;
1166} 1165}
1167 1166
@@ -1279,6 +1278,25 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1279 return 0; 1278 return 0;
1280} 1279}
1281 1280
1281static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1282 struct x86_emulate_ops *ops)
1283{
1284 struct decode_cache *c = &ctxt->decode;
1285 int rc;
1286 unsigned long cs;
1287
1288 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1289 if (rc)
1290 return rc;
1291 if (c->op_bytes == 4)
1292 c->eip = (u32)c->eip;
1293 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1294 if (rc)
1295 return rc;
1296 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, 1, VCPU_SREG_CS);
1297 return rc;
1298}
1299
1282static inline int writeback(struct x86_emulate_ctxt *ctxt, 1300static inline int writeback(struct x86_emulate_ctxt *ctxt,
1283 struct x86_emulate_ops *ops) 1301 struct x86_emulate_ops *ops)
1284{ 1302{
@@ -1467,11 +1485,9 @@ special_insn:
1467 break; 1485 break;
1468 case 0x58 ... 0x5f: /* pop reg */ 1486 case 0x58 ... 0x5f: /* pop reg */
1469 pop_instruction: 1487 pop_instruction:
1470 c->src.bytes = c->op_bytes; 1488 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
1471 rc = emulate_pop(ctxt, ops);
1472 if (rc != 0) 1489 if (rc != 0)
1473 goto done; 1490 goto done;
1474 c->dst.val = c->src.val;
1475 break; 1491 break;
1476 case 0x63: /* movsxd */ 1492 case 0x63: /* movsxd */
1477 if (ctxt->mode != X86EMUL_MODE_PROT64) 1493 if (ctxt->mode != X86EMUL_MODE_PROT64)
@@ -1738,6 +1754,11 @@ special_insn:
1738 mov: 1754 mov:
1739 c->dst.val = c->src.val; 1755 c->dst.val = c->src.val;
1740 break; 1756 break;
1757 case 0xcb: /* ret far */
1758 rc = emulate_ret_far(ctxt, ops);
1759 if (rc)
1760 goto done;
1761 break;
1741 case 0xd0 ... 0xd1: /* Grp2 */ 1762 case 0xd0 ... 0xd1: /* Grp2 */
1742 c->src.val = 1; 1763 c->src.val = 1;
1743 emulate_grp2(ctxt); 1764 emulate_grp2(ctxt);
@@ -1908,11 +1929,16 @@ twobyte_insn:
1908 c->dst.type = OP_NONE; 1929 c->dst.type = OP_NONE;
1909 break; 1930 break;
1910 case 3: /* lidt/vmmcall */ 1931 case 3: /* lidt/vmmcall */
1911 if (c->modrm_mod == 3 && c->modrm_rm == 1) { 1932 if (c->modrm_mod == 3) {
1912 rc = kvm_fix_hypercall(ctxt->vcpu); 1933 switch (c->modrm_rm) {
1913 if (rc) 1934 case 1:
1914 goto done; 1935 rc = kvm_fix_hypercall(ctxt->vcpu);
1915 kvm_emulate_hypercall(ctxt->vcpu); 1936 if (rc)
1937 goto done;
1938 break;
1939 default:
1940 goto cannot_emulate;
1941 }
1916 } else { 1942 } else {
1917 rc = read_descriptor(ctxt, ops, c->src.ptr, 1943 rc = read_descriptor(ctxt, ops, c->src.ptr,
1918 &size, &address, 1944 &size, &address,