aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <guangrong.xiao@linux.intel.com>2016-02-24 04:51:13 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2016-03-03 08:36:22 -0500
commit0eb05bf290cfe8610d9680b49abef37febd1c38a (patch)
treec3708bc310e6d9eb63f967fe27819b724c7064f2
parente5691a81e830c12d396b3f219ab999be87a1208f (diff)
KVM: page track: add notifier support
Notifier list is introduced so that any node wants to receive the track event can register to the list Two APIs are introduced here: - kvm_page_track_register_notifier(): register the notifier to receive track event - kvm_page_track_unregister_notifier(): stop receiving track event by unregister the notifier The callback, node->track_write() is called when a write access on the write tracked page happens Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/kvm_page_track.h39
-rw-r--r--arch/x86/kvm/page_track.c70
-rw-r--r--arch/x86/kvm/x86.c4
4 files changed, 114 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e2fc5c0ec86a..eb68e6aca0cf 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -704,6 +704,7 @@ struct kvm_arch {
704 */ 704 */
705 struct list_head active_mmu_pages; 705 struct list_head active_mmu_pages;
706 struct list_head zapped_obsolete_pages; 706 struct list_head zapped_obsolete_pages;
707 struct kvm_page_track_notifier_head track_notifier_head;
707 708
708 struct list_head assigned_dev_head; 709 struct list_head assigned_dev_head;
709 struct iommu_domain *iommu_domain; 710 struct iommu_domain *iommu_domain;
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index 5f16e2864e73..c2b8d24a235c 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -6,6 +6,36 @@ enum kvm_page_track_mode {
6 KVM_PAGE_TRACK_MAX, 6 KVM_PAGE_TRACK_MAX,
7}; 7};
8 8
9/*
10 * The notifier represented by @kvm_page_track_notifier_node is linked into
11 * the head which will be notified when guest is triggering the track event.
12 *
13 * Write access on the head is protected by kvm->mmu_lock, read access
14 * is protected by track_srcu.
15 */
16struct kvm_page_track_notifier_head {
17 struct srcu_struct track_srcu;
18 struct hlist_head track_notifier_list;
19};
20
21struct kvm_page_track_notifier_node {
22 struct hlist_node node;
23
24 /*
25 * It is called when guest is writing the write-tracked page
26 * and write emulation is finished at that time.
27 *
28 * @vcpu: the vcpu where the write access happened.
29 * @gpa: the physical address written by guest.
30 * @new: the data was written to the address.
31 * @bytes: the written length.
32 */
33 void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
34 int bytes);
35};
36
37void kvm_page_track_init(struct kvm *kvm);
38
9void kvm_page_track_free_memslot(struct kvm_memory_slot *free, 39void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
10 struct kvm_memory_slot *dont); 40 struct kvm_memory_slot *dont);
11int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, 41int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
@@ -19,4 +49,13 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
19 enum kvm_page_track_mode mode); 49 enum kvm_page_track_mode mode);
20bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, 50bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
21 enum kvm_page_track_mode mode); 51 enum kvm_page_track_mode mode);
52
53void
54kvm_page_track_register_notifier(struct kvm *kvm,
55 struct kvm_page_track_notifier_node *n);
56void
57kvm_page_track_unregister_notifier(struct kvm *kvm,
58 struct kvm_page_track_notifier_node *n);
59void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
60 int bytes);
22#endif 61#endif
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index f127f6d04fa1..11f76436f74f 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -150,3 +150,73 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
150 150
151 return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); 151 return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
152} 152}
153
154void kvm_page_track_init(struct kvm *kvm)
155{
156 struct kvm_page_track_notifier_head *head;
157
158 head = &kvm->arch.track_notifier_head;
159 init_srcu_struct(&head->track_srcu);
160 INIT_HLIST_HEAD(&head->track_notifier_list);
161}
162
163/*
164 * register the notifier so that event interception for the tracked guest
165 * pages can be received.
166 */
167void
168kvm_page_track_register_notifier(struct kvm *kvm,
169 struct kvm_page_track_notifier_node *n)
170{
171 struct kvm_page_track_notifier_head *head;
172
173 head = &kvm->arch.track_notifier_head;
174
175 spin_lock(&kvm->mmu_lock);
176 hlist_add_head_rcu(&n->node, &head->track_notifier_list);
177 spin_unlock(&kvm->mmu_lock);
178}
179
180/*
181 * stop receiving the event interception. It is the opposed operation of
182 * kvm_page_track_register_notifier().
183 */
184void
185kvm_page_track_unregister_notifier(struct kvm *kvm,
186 struct kvm_page_track_notifier_node *n)
187{
188 struct kvm_page_track_notifier_head *head;
189
190 head = &kvm->arch.track_notifier_head;
191
192 spin_lock(&kvm->mmu_lock);
193 hlist_del_rcu(&n->node);
194 spin_unlock(&kvm->mmu_lock);
195 synchronize_srcu(&head->track_srcu);
196}
197
198/*
199 * Notify the node that write access is intercepted and write emulation is
200 * finished at this time.
201 *
202 * The node should figure out if the written page is the one that node is
203 * interested in by itself.
204 */
205void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
206 int bytes)
207{
208 struct kvm_page_track_notifier_head *head;
209 struct kvm_page_track_notifier_node *n;
210 int idx;
211
212 head = &vcpu->kvm->arch.track_notifier_head;
213
214 if (hlist_empty(&head->track_notifier_list))
215 return;
216
217 idx = srcu_read_lock(&head->track_srcu);
218 hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
219 if (n->track_write)
220 n->track_write(vcpu, gpa, new, bytes);
221 srcu_read_unlock(&head->track_srcu, idx);
222}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7b4cfea09deb..b81c14ef1e1d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4346,6 +4346,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
4346 if (ret < 0) 4346 if (ret < 0)
4347 return 0; 4347 return 0;
4348 kvm_mmu_pte_write(vcpu, gpa, val, bytes); 4348 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
4349 kvm_page_track_write(vcpu, gpa, val, bytes);
4349 return 1; 4350 return 1;
4350} 4351}
4351 4352
@@ -4604,6 +4605,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
4604 4605
4605 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); 4606 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
4606 kvm_mmu_pte_write(vcpu, gpa, new, bytes); 4607 kvm_mmu_pte_write(vcpu, gpa, new, bytes);
4608 kvm_page_track_write(vcpu, gpa, new, bytes);
4607 4609
4608 return X86EMUL_CONTINUE; 4610 return X86EMUL_CONTINUE;
4609 4611
@@ -7724,6 +7726,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
7724 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); 7726 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
7725 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); 7727 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
7726 7728
7729 kvm_page_track_init(kvm);
7730
7727 return 0; 7731 return 0;
7728} 7732}
7729 7733