diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-06-19 09:16:23 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-09-10 01:33:02 -0400 |
commit | ec04b2604c3707a46db1d26d98f82b11d0844669 (patch) | |
tree | a7332c98d61b08b95ff4dc769f87c5861845ffe5 /virt | |
parent | f340ca0f065ecf3e7549687e763370106dacb2c2 (diff) |
KVM: Prepare memslot data structures for multiple hugepage sizes
[avi: fix build on non-x86]
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 56 |
1 files changed, 39 insertions, 17 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1da8072d61b1..8361662e7e0a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -1001,19 +1001,25 @@ out: | |||
1001 | static void kvm_free_physmem_slot(struct kvm_memory_slot *free, | 1001 | static void kvm_free_physmem_slot(struct kvm_memory_slot *free, |
1002 | struct kvm_memory_slot *dont) | 1002 | struct kvm_memory_slot *dont) |
1003 | { | 1003 | { |
1004 | int i; | ||
1005 | |||
1004 | if (!dont || free->rmap != dont->rmap) | 1006 | if (!dont || free->rmap != dont->rmap) |
1005 | vfree(free->rmap); | 1007 | vfree(free->rmap); |
1006 | 1008 | ||
1007 | if (!dont || free->dirty_bitmap != dont->dirty_bitmap) | 1009 | if (!dont || free->dirty_bitmap != dont->dirty_bitmap) |
1008 | vfree(free->dirty_bitmap); | 1010 | vfree(free->dirty_bitmap); |
1009 | 1011 | ||
1010 | if (!dont || free->lpage_info != dont->lpage_info) | 1012 | |
1011 | vfree(free->lpage_info); | 1013 | for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { |
1014 | if (!dont || free->lpage_info[i] != dont->lpage_info[i]) { | ||
1015 | vfree(free->lpage_info[i]); | ||
1016 | free->lpage_info[i] = NULL; | ||
1017 | } | ||
1018 | } | ||
1012 | 1019 | ||
1013 | free->npages = 0; | 1020 | free->npages = 0; |
1014 | free->dirty_bitmap = NULL; | 1021 | free->dirty_bitmap = NULL; |
1015 | free->rmap = NULL; | 1022 | free->rmap = NULL; |
1016 | free->lpage_info = NULL; | ||
1017 | } | 1023 | } |
1018 | 1024 | ||
1019 | void kvm_free_physmem(struct kvm *kvm) | 1025 | void kvm_free_physmem(struct kvm *kvm) |
@@ -1087,7 +1093,8 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
1087 | int r; | 1093 | int r; |
1088 | gfn_t base_gfn; | 1094 | gfn_t base_gfn; |
1089 | unsigned long npages, ugfn; | 1095 | unsigned long npages, ugfn; |
1090 | unsigned long largepages, i; | 1096 | int lpages; |
1097 | unsigned long i, j; | ||
1091 | struct kvm_memory_slot *memslot; | 1098 | struct kvm_memory_slot *memslot; |
1092 | struct kvm_memory_slot old, new; | 1099 | struct kvm_memory_slot old, new; |
1093 | 1100 | ||
@@ -1161,33 +1168,48 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
1161 | else | 1168 | else |
1162 | new.userspace_addr = 0; | 1169 | new.userspace_addr = 0; |
1163 | } | 1170 | } |
1164 | if (npages && !new.lpage_info) { | 1171 | if (!npages) |
1165 | largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE; | 1172 | goto skip_lpage; |
1166 | largepages -= base_gfn / KVM_PAGES_PER_HPAGE; | ||
1167 | 1173 | ||
1168 | new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info)); | 1174 | for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { |
1175 | int level = i + 2; | ||
1169 | 1176 | ||
1170 | if (!new.lpage_info) | 1177 | /* Avoid unused variable warning if no large pages */ |
1178 | (void)level; | ||
1179 | |||
1180 | if (new.lpage_info[i]) | ||
1181 | continue; | ||
1182 | |||
1183 | lpages = 1 + (base_gfn + npages - 1) / | ||
1184 | KVM_PAGES_PER_HPAGE(level); | ||
1185 | lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level); | ||
1186 | |||
1187 | new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i])); | ||
1188 | |||
1189 | if (!new.lpage_info[i]) | ||
1171 | goto out_free; | 1190 | goto out_free; |
1172 | 1191 | ||
1173 | memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info)); | 1192 | memset(new.lpage_info[i], 0, |
1193 | lpages * sizeof(*new.lpage_info[i])); | ||
1174 | 1194 | ||
1175 | if (base_gfn % KVM_PAGES_PER_HPAGE) | 1195 | if (base_gfn % KVM_PAGES_PER_HPAGE(level)) |
1176 | new.lpage_info[0].write_count = 1; | 1196 | new.lpage_info[i][0].write_count = 1; |
1177 | if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE) | 1197 | if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level)) |
1178 | new.lpage_info[largepages-1].write_count = 1; | 1198 | new.lpage_info[i][lpages - 1].write_count = 1; |
1179 | ugfn = new.userspace_addr >> PAGE_SHIFT; | 1199 | ugfn = new.userspace_addr >> PAGE_SHIFT; |
1180 | /* | 1200 | /* |
1181 | * If the gfn and userspace address are not aligned wrt each | 1201 | * If the gfn and userspace address are not aligned wrt each |
1182 | * other, or if explicitly asked to, disable large page | 1202 | * other, or if explicitly asked to, disable large page |
1183 | * support for this slot | 1203 | * support for this slot |
1184 | */ | 1204 | */ |
1185 | if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1) || | 1205 | if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || |
1186 | !largepages_enabled) | 1206 | !largepages_enabled) |
1187 | for (i = 0; i < largepages; ++i) | 1207 | for (j = 0; j < lpages; ++j) |
1188 | new.lpage_info[i].write_count = 1; | 1208 | new.lpage_info[i][j].write_count = 1; |
1189 | } | 1209 | } |
1190 | 1210 | ||
1211 | skip_lpage: | ||
1212 | |||
1191 | /* Allocate page dirty bitmap if needed */ | 1213 | /* Allocate page dirty bitmap if needed */ |
1192 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { | 1214 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { |
1193 | unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; | 1215 | unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; |