aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/include/asm/kvm_host.h3
-rw-r--r--arch/powerpc/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/asm/kvm_host.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h12
-rw-r--r--arch/x86/kvm/mmu.c30
-rw-r--r--arch/x86/kvm/paging_tmpl.h3
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--virt/kvm/kvm_main.c56
8 files changed, 73 insertions, 42 deletions
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 9cf1c4b1f92f..d9b6325a9328 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -235,7 +235,8 @@ struct kvm_vm_data {
235#define KVM_REQ_PTC_G 32 235#define KVM_REQ_PTC_G 32
236#define KVM_REQ_RESUME 33 236#define KVM_REQ_RESUME 33
237 237
238#define KVM_PAGES_PER_HPAGE 1 238#define KVM_NR_PAGE_SIZES 1
239#define KVM_PAGES_PER_HPAGE(x) 1
239 240
240struct kvm; 241struct kvm;
241struct kvm_vcpu; 242struct kvm_vcpu;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index d4caa6127f55..c9c930ed11d7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -34,7 +34,8 @@
34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
35 35
36/* We don't currently support large pages. */ 36/* We don't currently support large pages. */
37#define KVM_PAGES_PER_HPAGE (1UL << 31) 37#define KVM_NR_PAGE_SIZES 1
38#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
38 39
39struct kvm; 40struct kvm;
40struct kvm_run; 41struct kvm_run;
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 75535d4d7a05..78e07a622b45 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -40,7 +40,11 @@ struct sca_block {
40 struct sca_entry cpu[64]; 40 struct sca_entry cpu[64];
41} __attribute__((packed)); 41} __attribute__((packed));
42 42
43#define KVM_PAGES_PER_HPAGE 256 43#define KVM_NR_PAGE_SIZES 2
44#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + ((x) - 1) * 8)
45#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
46#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
47#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
44 48
45#define CPUSTAT_HOST 0x80000000 49#define CPUSTAT_HOST 0x80000000
46#define CPUSTAT_WAIT 0x10000000 50#define CPUSTAT_WAIT 0x10000000
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 19027ab20412..30b625d8e5f0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -54,12 +54,12 @@
54#define INVALID_PAGE (~(hpa_t)0) 54#define INVALID_PAGE (~(hpa_t)0)
55#define UNMAPPED_GVA (~(gpa_t)0) 55#define UNMAPPED_GVA (~(gpa_t)0)
56 56
57/* shadow tables are PAE even on non-PAE hosts */ 57/* KVM Hugepage definitions for x86 */
58#define KVM_HPAGE_SHIFT 21 58#define KVM_NR_PAGE_SIZES 2
59#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT) 59#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
60#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1)) 60#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
61 61#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
62#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE) 62#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
63 63
64#define DE_VECTOR 0 64#define DE_VECTOR 0
65#define DB_VECTOR 1 65#define DB_VECTOR 1
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 12974de88aa5..b67585c1ef08 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -394,9 +394,9 @@ static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
394{ 394{
395 unsigned long idx; 395 unsigned long idx;
396 396
397 idx = (gfn / KVM_PAGES_PER_HPAGE) - 397 idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) -
398 (slot->base_gfn / KVM_PAGES_PER_HPAGE); 398 (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL));
399 return &slot->lpage_info[idx].write_count; 399 return &slot->lpage_info[0][idx].write_count;
400} 400}
401 401
402static void account_shadowed(struct kvm *kvm, gfn_t gfn) 402static void account_shadowed(struct kvm *kvm, gfn_t gfn)
@@ -485,10 +485,10 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
485 if (!lpage) 485 if (!lpage)
486 return &slot->rmap[gfn - slot->base_gfn]; 486 return &slot->rmap[gfn - slot->base_gfn];
487 487
488 idx = (gfn / KVM_PAGES_PER_HPAGE) - 488 idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) -
489 (slot->base_gfn / KVM_PAGES_PER_HPAGE); 489 (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL));
490 490
491 return &slot->lpage_info[idx].rmap_pde; 491 return &slot->lpage_info[0][idx].rmap_pde;
492} 492}
493 493
494/* 494/*
@@ -731,11 +731,11 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
731 end = start + (memslot->npages << PAGE_SHIFT); 731 end = start + (memslot->npages << PAGE_SHIFT);
732 if (hva >= start && hva < end) { 732 if (hva >= start && hva < end) {
733 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; 733 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
734 int idx = gfn_offset /
735 KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL);
734 retval |= handler(kvm, &memslot->rmap[gfn_offset]); 736 retval |= handler(kvm, &memslot->rmap[gfn_offset]);
735 retval |= handler(kvm, 737 retval |= handler(kvm,
736 &memslot->lpage_info[ 738 &memslot->lpage_info[0][idx].rmap_pde);
737 gfn_offset /
738 KVM_PAGES_PER_HPAGE].rmap_pde);
739 } 739 }
740 } 740 }
741 741
@@ -1876,8 +1876,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1876 pfn_t pfn; 1876 pfn_t pfn;
1877 unsigned long mmu_seq; 1877 unsigned long mmu_seq;
1878 1878
1879 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { 1879 if (is_largepage_backed(vcpu, gfn &
1880 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 1880 ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) {
1881 gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
1881 largepage = 1; 1882 largepage = 1;
1882 } 1883 }
1883 1884
@@ -2082,8 +2083,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2082 if (r) 2083 if (r)
2083 return r; 2084 return r;
2084 2085
2085 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { 2086 if (is_largepage_backed(vcpu, gfn &
2086 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 2087 ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) {
2088 gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
2087 largepage = 1; 2089 largepage = 1;
2088 } 2090 }
2089 mmu_seq = vcpu->kvm->mmu_notifier_seq; 2091 mmu_seq = vcpu->kvm->mmu_notifier_seq;
@@ -2485,7 +2487,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2485 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 2487 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2486 2488
2487 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) { 2489 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
2488 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 2490 gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
2489 vcpu->arch.update_pte.largepage = 1; 2491 vcpu->arch.update_pte.largepage = 1;
2490 } 2492 }
2491 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; 2493 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 322e8113aeea..53e129cec5fd 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -401,7 +401,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
401 401
402 if (walker.level == PT_DIRECTORY_LEVEL) { 402 if (walker.level == PT_DIRECTORY_LEVEL) {
403 gfn_t large_gfn; 403 gfn_t large_gfn;
404 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1); 404 large_gfn = walker.gfn &
405 ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
405 if (is_largepage_backed(vcpu, large_gfn)) { 406 if (is_largepage_backed(vcpu, large_gfn)) {
406 walker.gfn = large_gfn; 407 walker.gfn = large_gfn;
407 largepage = 1; 408 largepage = 1;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6988858dc56e..06af936a250a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -103,7 +103,7 @@ struct kvm_memory_slot {
103 struct { 103 struct {
104 unsigned long rmap_pde; 104 unsigned long rmap_pde;
105 int write_count; 105 int write_count;
106 } *lpage_info; 106 } *lpage_info[KVM_NR_PAGE_SIZES - 1];
107 unsigned long userspace_addr; 107 unsigned long userspace_addr;
108 int user_alloc; 108 int user_alloc;
109}; 109};
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1da8072d61b1..8361662e7e0a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1001,19 +1001,25 @@ out:
1001static void kvm_free_physmem_slot(struct kvm_memory_slot *free, 1001static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
1002 struct kvm_memory_slot *dont) 1002 struct kvm_memory_slot *dont)
1003{ 1003{
1004 int i;
1005
1004 if (!dont || free->rmap != dont->rmap) 1006 if (!dont || free->rmap != dont->rmap)
1005 vfree(free->rmap); 1007 vfree(free->rmap);
1006 1008
1007 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 1009 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
1008 vfree(free->dirty_bitmap); 1010 vfree(free->dirty_bitmap);
1009 1011
1010 if (!dont || free->lpage_info != dont->lpage_info) 1012
1011 vfree(free->lpage_info); 1013 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
1014 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
1015 vfree(free->lpage_info[i]);
1016 free->lpage_info[i] = NULL;
1017 }
1018 }
1012 1019
1013 free->npages = 0; 1020 free->npages = 0;
1014 free->dirty_bitmap = NULL; 1021 free->dirty_bitmap = NULL;
1015 free->rmap = NULL; 1022 free->rmap = NULL;
1016 free->lpage_info = NULL;
1017} 1023}
1018 1024
1019void kvm_free_physmem(struct kvm *kvm) 1025void kvm_free_physmem(struct kvm *kvm)
@@ -1087,7 +1093,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
1087 int r; 1093 int r;
1088 gfn_t base_gfn; 1094 gfn_t base_gfn;
1089 unsigned long npages, ugfn; 1095 unsigned long npages, ugfn;
1090 unsigned long largepages, i; 1096 int lpages;
1097 unsigned long i, j;
1091 struct kvm_memory_slot *memslot; 1098 struct kvm_memory_slot *memslot;
1092 struct kvm_memory_slot old, new; 1099 struct kvm_memory_slot old, new;
1093 1100
@@ -1161,33 +1168,48 @@ int __kvm_set_memory_region(struct kvm *kvm,
1161 else 1168 else
1162 new.userspace_addr = 0; 1169 new.userspace_addr = 0;
1163 } 1170 }
1164 if (npages && !new.lpage_info) { 1171 if (!npages)
1165 largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE; 1172 goto skip_lpage;
1166 largepages -= base_gfn / KVM_PAGES_PER_HPAGE;
1167 1173
1168 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info)); 1174 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
1175 int level = i + 2;
1169 1176
1170 if (!new.lpage_info) 1177 /* Avoid unused variable warning if no large pages */
1178 (void)level;
1179
1180 if (new.lpage_info[i])
1181 continue;
1182
1183 lpages = 1 + (base_gfn + npages - 1) /
1184 KVM_PAGES_PER_HPAGE(level);
1185 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
1186
1187 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
1188
1189 if (!new.lpage_info[i])
1171 goto out_free; 1190 goto out_free;
1172 1191
1173 memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info)); 1192 memset(new.lpage_info[i], 0,
1193 lpages * sizeof(*new.lpage_info[i]));
1174 1194
1175 if (base_gfn % KVM_PAGES_PER_HPAGE) 1195 if (base_gfn % KVM_PAGES_PER_HPAGE(level))
1176 new.lpage_info[0].write_count = 1; 1196 new.lpage_info[i][0].write_count = 1;
1177 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE) 1197 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
1178 new.lpage_info[largepages-1].write_count = 1; 1198 new.lpage_info[i][lpages - 1].write_count = 1;
1179 ugfn = new.userspace_addr >> PAGE_SHIFT; 1199 ugfn = new.userspace_addr >> PAGE_SHIFT;
1180 /* 1200 /*
1181 * If the gfn and userspace address are not aligned wrt each 1201 * If the gfn and userspace address are not aligned wrt each
1182 * other, or if explicitly asked to, disable large page 1202 * other, or if explicitly asked to, disable large page
1183 * support for this slot 1203 * support for this slot
1184 */ 1204 */
1185 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1) || 1205 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
1186 !largepages_enabled) 1206 !largepages_enabled)
1187 for (i = 0; i < largepages; ++i) 1207 for (j = 0; j < lpages; ++j)
1188 new.lpage_info[i].write_count = 1; 1208 new.lpage_info[i][j].write_count = 1;
1189 } 1209 }
1190 1210
1211skip_lpage:
1212
1191 /* Allocate page dirty bitmap if needed */ 1213 /* Allocate page dirty bitmap if needed */
1192 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 1214 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
1193 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; 1215 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;