aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2012-08-01 05:03:28 -0400
committerAvi Kivity <avi@redhat.com>2012-08-06 05:47:30 -0400
commitd89cc617b954aff4030fce178f7d86f59aaf713d (patch)
treed04f2ccdddd1d718044f47fdf8442054d87799f6 /arch
parent65fbe37c42ed75604c9a770639209dcee162ebe7 (diff)
KVM: Push rmap into kvm_arch_memory_slot
Two reasons: - x86 can integrate rmap and rmap_pde and remove heuristics in __gfn_to_rmap(). - Some architectures do not need rmap. Since rmap is one of the most memory consuming stuff in KVM, ppc'd better restrict the allocation to Book3S HV. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c8
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/mmu.c5
-rw-r--r--arch/x86/kvm/x86.c55
7 files changed, 48 insertions, 33 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 572ad0141268..a29e0918172a 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -221,6 +221,7 @@ struct revmap_entry {
221#define KVMPPC_GOT_PAGE 0x80 221#define KVMPPC_GOT_PAGE 0x80
222 222
223struct kvm_arch_memory_slot { 223struct kvm_arch_memory_slot {
224 unsigned long *rmap;
224}; 225};
225 226
226struct kvm_arch { 227struct kvm_arch {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 3c635c0616b0..d95d11322a15 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -705,7 +705,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
705 goto out_unlock; 705 goto out_unlock;
706 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; 706 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
707 707
708 rmap = &memslot->rmap[gfn - memslot->base_gfn]; 708 rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
709 lock_rmap(rmap); 709 lock_rmap(rmap);
710 710
711 /* Check if we might have been invalidated; let the guest retry if so */ 711 /* Check if we might have been invalidated; let the guest retry if so */
@@ -788,7 +788,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
788 for (; gfn < gfn_end; ++gfn) { 788 for (; gfn < gfn_end; ++gfn) {
789 gfn_t gfn_offset = gfn - memslot->base_gfn; 789 gfn_t gfn_offset = gfn - memslot->base_gfn;
790 790
791 ret = handler(kvm, &memslot->rmap[gfn_offset], gfn); 791 ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn);
792 retval |= ret; 792 retval |= ret;
793 } 793 }
794 } 794 }
@@ -1036,7 +1036,7 @@ long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1036 unsigned long *rmapp, *map; 1036 unsigned long *rmapp, *map;
1037 1037
1038 preempt_disable(); 1038 preempt_disable();
1039 rmapp = memslot->rmap; 1039 rmapp = memslot->arch.rmap;
1040 map = memslot->dirty_bitmap; 1040 map = memslot->dirty_bitmap;
1041 for (i = 0; i < memslot->npages; ++i) { 1041 for (i = 0; i < memslot->npages; ++i) {
1042 if (kvm_test_clear_dirty(kvm, rmapp)) 1042 if (kvm_test_clear_dirty(kvm, rmapp))
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 5c70d19494f9..56ac1a5d9912 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -84,7 +84,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
84 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 84 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
85 return; 85 return;
86 86
87 rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]); 87 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
88 lock_rmap(rmap); 88 lock_rmap(rmap);
89 89
90 head = *rmap & KVMPPC_RMAP_INDEX; 90 head = *rmap & KVMPPC_RMAP_INDEX;
@@ -180,7 +180,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
180 if (!slot_is_aligned(memslot, psize)) 180 if (!slot_is_aligned(memslot, psize))
181 return H_PARAMETER; 181 return H_PARAMETER;
182 slot_fn = gfn - memslot->base_gfn; 182 slot_fn = gfn - memslot->base_gfn;
183 rmap = &memslot->rmap[slot_fn]; 183 rmap = &memslot->arch.rmap[slot_fn];
184 184
185 if (!kvm->arch.using_mmu_notifiers) { 185 if (!kvm->arch.using_mmu_notifiers) {
186 physp = kvm->arch.slot_phys[memslot->id]; 186 physp = kvm->arch.slot_phys[memslot->id];
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 87f4dc886076..879b14a61403 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -302,10 +302,18 @@ long kvm_arch_dev_ioctl(struct file *filp,
302void kvm_arch_free_memslot(struct kvm_memory_slot *free, 302void kvm_arch_free_memslot(struct kvm_memory_slot *free,
303 struct kvm_memory_slot *dont) 303 struct kvm_memory_slot *dont)
304{ 304{
305 if (!dont || free->arch.rmap != dont->arch.rmap) {
306 vfree(free->arch.rmap);
307 free->arch.rmap = NULL;
308 }
305} 309}
306 310
307int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 311int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
308{ 312{
313 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
314 if (!slot->arch.rmap)
315 return -ENOMEM;
316
309 return 0; 317 return 0;
310} 318}
311 319
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 48e713188469..1309e69b57fa 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -504,7 +504,7 @@ struct kvm_lpage_info {
504}; 504};
505 505
506struct kvm_arch_memory_slot { 506struct kvm_arch_memory_slot {
507 unsigned long *rmap_pde[KVM_NR_PAGE_SIZES - 1]; 507 unsigned long *rmap[KVM_NR_PAGE_SIZES];
508 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; 508 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
509}; 509};
510 510
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ee768bb2367f..aa9a987ddefb 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -970,11 +970,8 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
970{ 970{
971 unsigned long idx; 971 unsigned long idx;
972 972
973 if (likely(level == PT_PAGE_TABLE_LEVEL))
974 return &slot->rmap[gfn - slot->base_gfn];
975
976 idx = gfn_to_index(gfn, slot->base_gfn, level); 973 idx = gfn_to_index(gfn, slot->base_gfn, level);
977 return &slot->arch.rmap_pde[level - PT_DIRECTORY_LEVEL][idx]; 974 return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
978} 975}
979 976
980/* 977/*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index abc039d78428..ebf2109318e0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6303,14 +6303,18 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free,
6303{ 6303{
6304 int i; 6304 int i;
6305 6305
6306 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 6306 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
6307 if (!dont || free->arch.rmap_pde[i] != dont->arch.rmap_pde[i]) { 6307 if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
6308 kvm_kvfree(free->arch.rmap_pde[i]); 6308 kvm_kvfree(free->arch.rmap[i]);
6309 free->arch.rmap_pde[i] = NULL; 6309 free->arch.rmap[i] = NULL;
6310 } 6310 }
6311 if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) { 6311 if (i == 0)
6312 kvm_kvfree(free->arch.lpage_info[i]); 6312 continue;
6313 free->arch.lpage_info[i] = NULL; 6313
6314 if (!dont || free->arch.lpage_info[i - 1] !=
6315 dont->arch.lpage_info[i - 1]) {
6316 kvm_kvfree(free->arch.lpage_info[i - 1]);
6317 free->arch.lpage_info[i - 1] = NULL;
6314 } 6318 }
6315 } 6319 }
6316} 6320}
@@ -6319,28 +6323,30 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
6319{ 6323{
6320 int i; 6324 int i;
6321 6325
6322 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 6326 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
6323 unsigned long ugfn; 6327 unsigned long ugfn;
6324 int lpages; 6328 int lpages;
6325 int level = i + 2; 6329 int level = i + 1;
6326 6330
6327 lpages = gfn_to_index(slot->base_gfn + npages - 1, 6331 lpages = gfn_to_index(slot->base_gfn + npages - 1,
6328 slot->base_gfn, level) + 1; 6332 slot->base_gfn, level) + 1;
6329 6333
6330 slot->arch.rmap_pde[i] = 6334 slot->arch.rmap[i] =
6331 kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap_pde[i])); 6335 kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
6332 if (!slot->arch.rmap_pde[i]) 6336 if (!slot->arch.rmap[i])
6333 goto out_free; 6337 goto out_free;
6338 if (i == 0)
6339 continue;
6334 6340
6335 slot->arch.lpage_info[i] = 6341 slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages *
6336 kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i])); 6342 sizeof(*slot->arch.lpage_info[i - 1]));
6337 if (!slot->arch.lpage_info[i]) 6343 if (!slot->arch.lpage_info[i - 1])
6338 goto out_free; 6344 goto out_free;
6339 6345
6340 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 6346 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
6341 slot->arch.lpage_info[i][0].write_count = 1; 6347 slot->arch.lpage_info[i - 1][0].write_count = 1;
6342 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 6348 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
6343 slot->arch.lpage_info[i][lpages - 1].write_count = 1; 6349 slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1;
6344 ugfn = slot->userspace_addr >> PAGE_SHIFT; 6350 ugfn = slot->userspace_addr >> PAGE_SHIFT;
6345 /* 6351 /*
6346 * If the gfn and userspace address are not aligned wrt each 6352 * If the gfn and userspace address are not aligned wrt each
@@ -6352,18 +6358,21 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
6352 unsigned long j; 6358 unsigned long j;
6353 6359
6354 for (j = 0; j < lpages; ++j) 6360 for (j = 0; j < lpages; ++j)
6355 slot->arch.lpage_info[i][j].write_count = 1; 6361 slot->arch.lpage_info[i - 1][j].write_count = 1;
6356 } 6362 }
6357 } 6363 }
6358 6364
6359 return 0; 6365 return 0;
6360 6366
6361out_free: 6367out_free:
6362 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { 6368 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
6363 kvm_kvfree(slot->arch.rmap_pde[i]); 6369 kvm_kvfree(slot->arch.rmap[i]);
6364 kvm_kvfree(slot->arch.lpage_info[i]); 6370 slot->arch.rmap[i] = NULL;
6365 slot->arch.rmap_pde[i] = NULL; 6371 if (i == 0)
6366 slot->arch.lpage_info[i] = NULL; 6372 continue;
6373
6374 kvm_kvfree(slot->arch.lpage_info[i - 1]);
6375 slot->arch.lpage_info[i - 1] = NULL;
6367 } 6376 }
6368 return -ENOMEM; 6377 return -ENOMEM;
6369} 6378}