summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests
diff options
context:
space:
mode:
authorPeter Xu <peterx@redhat.com>2019-08-29 21:36:17 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2019-09-24 07:37:20 -0400
commit338eb29876b9e571273175f167fcd58d9441ac8e (patch)
tree5ee9b39e5fbadaa27689555277eb223723ad1801 /tools/testing/selftests
parent12c386b2308344f2ce8819ad11aab466166f276d (diff)
KVM: selftests: Create VM earlier for dirty log test
Since we've just removed the dependency of vm type in previous patch, now we can create the vm much earlier. Note that to move it earlier we used an approximation of number of extra pages but it should be fine. This prepares for the follow up patches to finally remove the duplication of guest mode parsings. Reviewed-by: Andrew Jones <drjones@redhat.com> Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'tools/testing/selftests')
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c19
1 files changed, 16 insertions, 3 deletions
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 6737b26b975e..cf2099abb121 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -263,6 +263,9 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
263 return vm; 263 return vm;
264} 264}
265 265
266#define DIRTY_MEM_BITS 30 /* 1G */
267#define PAGE_SHIFT_4K 12
268
266static void run_test(enum vm_guest_mode mode, unsigned long iterations, 269static void run_test(enum vm_guest_mode mode, unsigned long iterations,
267 unsigned long interval, uint64_t phys_offset) 270 unsigned long interval, uint64_t phys_offset)
268{ 271{
@@ -272,6 +275,18 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
272 uint64_t max_gfn; 275 uint64_t max_gfn;
273 unsigned long *bmap; 276 unsigned long *bmap;
274 277
278 /*
279 * We reserve page table for 2 times of extra dirty mem which
280 * will definitely cover the original (1G+) test range. Here
281 * we do the calculation with 4K page size which is the
282 * smallest so the page number will be enough for all archs
283 * (e.g., 64K page size guest will need even less memory for
284 * page tables).
285 */
286 vm = create_vm(mode, VCPU_ID,
287 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
288 guest_code);
289
275 switch (mode) { 290 switch (mode) {
276 case VM_MODE_P52V48_4K: 291 case VM_MODE_P52V48_4K:
277 guest_pa_bits = 52; 292 guest_pa_bits = 52;
@@ -318,7 +333,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
318 * A little more than 1G of guest page sized pages. Cover the 333 * A little more than 1G of guest page sized pages. Cover the
319 * case where the size is not aligned to 64 pages. 334 * case where the size is not aligned to 64 pages.
320 */ 335 */
321 guest_num_pages = (1ul << (30 - guest_page_shift)) + 16; 336 guest_num_pages = (1ul << (DIRTY_MEM_BITS - guest_page_shift)) + 16;
322#ifdef __s390x__ 337#ifdef __s390x__
323 /* Round up to multiple of 1M (segment size) */ 338 /* Round up to multiple of 1M (segment size) */
324 guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL; 339 guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL;
@@ -344,8 +359,6 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
344 bmap = bitmap_alloc(host_num_pages); 359 bmap = bitmap_alloc(host_num_pages);
345 host_bmap_track = bitmap_alloc(host_num_pages); 360 host_bmap_track = bitmap_alloc(host_num_pages);
346 361
347 vm = create_vm(mode, VCPU_ID, guest_num_pages, guest_code);
348
349#ifdef USE_CLEAR_DIRTY_LOG 362#ifdef USE_CLEAR_DIRTY_LOG
350 struct kvm_enable_cap cap = {}; 363 struct kvm_enable_cap cap = {};
351 364