aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kvm/mmu.c3
-rw-r--r--arch/mips/kvm/mips.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c8
-rw-r--r--arch/powerpc/kvm/book3s_pr.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--virt/kvm/kvm_main.c16
8 files changed, 31 insertions, 14 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 1d5accbd3dcf..6f0f8f3ac7df 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1155,7 +1155,8 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1155 */ 1155 */
1156void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) 1156void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1157{ 1157{
1158 struct kvm_memory_slot *memslot = id_to_memslot(kvm->memslots, slot); 1158 struct kvm_memslots *slots = kvm_memslots(kvm);
1159 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1159 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; 1160 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
1160 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; 1161 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1161 1162
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index a8e660a44474..bc5ddd973b44 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -968,6 +968,7 @@ out:
968/* Get (and clear) the dirty memory log for a memory slot. */ 968/* Get (and clear) the dirty memory log for a memory slot. */
969int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 969int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
970{ 970{
971 struct kvm_memslots *slots;
971 struct kvm_memory_slot *memslot; 972 struct kvm_memory_slot *memslot;
972 unsigned long ga, ga_end; 973 unsigned long ga, ga_end;
973 int is_dirty = 0; 974 int is_dirty = 0;
@@ -982,7 +983,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
982 983
983 /* If nothing is dirty, don't bother messing with page tables. */ 984 /* If nothing is dirty, don't bother messing with page tables. */
984 if (is_dirty) { 985 if (is_dirty) {
985 memslot = id_to_memslot(kvm->memslots, log->slot); 986 slots = kvm_memslots(kvm);
987 memslot = id_to_memslot(slots, log->slot);
986 988
987 ga = memslot->base_gfn << PAGE_SHIFT; 989 ga = memslot->base_gfn << PAGE_SHIFT;
988 ga_end = ga + (memslot->npages << PAGE_SHIFT); 990 ga_end = ga + (memslot->npages << PAGE_SHIFT);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 1a4acf8bf4f4..dab68b7af3f2 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -650,7 +650,7 @@ static void kvmppc_rmap_reset(struct kvm *kvm)
650 int srcu_idx; 650 int srcu_idx;
651 651
652 srcu_idx = srcu_read_lock(&kvm->srcu); 652 srcu_idx = srcu_read_lock(&kvm->srcu);
653 slots = kvm->memslots; 653 slots = kvm_memslots(kvm);
654 kvm_for_each_memslot(memslot, slots) { 654 kvm_for_each_memslot(memslot, slots) {
655 /* 655 /*
656 * This assumes it is acceptable to lose reference and 656 * This assumes it is acceptable to lose reference and
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index df81caab7383..6aff5a990492 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2321,6 +2321,7 @@ static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
2321static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, 2321static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
2322 struct kvm_dirty_log *log) 2322 struct kvm_dirty_log *log)
2323{ 2323{
2324 struct kvm_memslots *slots;
2324 struct kvm_memory_slot *memslot; 2325 struct kvm_memory_slot *memslot;
2325 int r; 2326 int r;
2326 unsigned long n; 2327 unsigned long n;
@@ -2331,7 +2332,8 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
2331 if (log->slot >= KVM_USER_MEM_SLOTS) 2332 if (log->slot >= KVM_USER_MEM_SLOTS)
2332 goto out; 2333 goto out;
2333 2334
2334 memslot = id_to_memslot(kvm->memslots, log->slot); 2335 slots = kvm_memslots(kvm);
2336 memslot = id_to_memslot(slots, log->slot);
2335 r = -ENOENT; 2337 r = -ENOENT;
2336 if (!memslot->dirty_bitmap) 2338 if (!memslot->dirty_bitmap)
2337 goto out; 2339 goto out;
@@ -2384,6 +2386,7 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
2384 const struct kvm_memory_slot *old) 2386 const struct kvm_memory_slot *old)
2385{ 2387{
2386 unsigned long npages = mem->memory_size >> PAGE_SHIFT; 2388 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
2389 struct kvm_memslots *slots;
2387 struct kvm_memory_slot *memslot; 2390 struct kvm_memory_slot *memslot;
2388 2391
2389 if (npages && old->npages) { 2392 if (npages && old->npages) {
@@ -2393,7 +2396,8 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
2393 * since the rmap array starts out as all zeroes, 2396 * since the rmap array starts out as all zeroes,
2394 * i.e. no pages are dirty. 2397 * i.e. no pages are dirty.
2395 */ 2398 */
2396 memslot = id_to_memslot(kvm->memslots, mem->slot); 2399 slots = kvm_memslots(kvm);
2400 memslot = id_to_memslot(slots, mem->slot);
2397 kvmppc_hv_get_dirty_log(kvm, memslot, NULL); 2401 kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
2398 } 2402 }
2399} 2403}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index f57383941d03..c01dfc798c66 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1530,6 +1530,7 @@ out:
1530static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, 1530static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1531 struct kvm_dirty_log *log) 1531 struct kvm_dirty_log *log)
1532{ 1532{
1533 struct kvm_memslots *slots;
1533 struct kvm_memory_slot *memslot; 1534 struct kvm_memory_slot *memslot;
1534 struct kvm_vcpu *vcpu; 1535 struct kvm_vcpu *vcpu;
1535 ulong ga, ga_end; 1536 ulong ga, ga_end;
@@ -1545,7 +1546,8 @@ static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1545 1546
1546 /* If nothing is dirty, don't bother messing with page tables. */ 1547 /* If nothing is dirty, don't bother messing with page tables. */
1547 if (is_dirty) { 1548 if (is_dirty) {
1548 memslot = id_to_memslot(kvm->memslots, log->slot); 1549 slots = kvm_memslots(kvm);
1550 memslot = id_to_memslot(slots, log->slot);
1549 1551
1550 ga = memslot->base_gfn << PAGE_SHIFT; 1552 ga = memslot->base_gfn << PAGE_SHIFT;
1551 ga_end = ga + (memslot->npages << PAGE_SHIFT); 1553 ga_end = ga + (memslot->npages << PAGE_SHIFT);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d461f8a15c07..a05107e9b2bf 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -236,6 +236,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
236{ 236{
237 int r; 237 int r;
238 unsigned long n; 238 unsigned long n;
239 struct kvm_memslots *slots;
239 struct kvm_memory_slot *memslot; 240 struct kvm_memory_slot *memslot;
240 int is_dirty = 0; 241 int is_dirty = 0;
241 242
@@ -245,7 +246,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
245 if (log->slot >= KVM_USER_MEM_SLOTS) 246 if (log->slot >= KVM_USER_MEM_SLOTS)
246 goto out; 247 goto out;
247 248
248 memslot = id_to_memslot(kvm->memslots, log->slot); 249 slots = kvm_memslots(kvm);
250 memslot = id_to_memslot(slots, log->slot);
249 r = -ENOENT; 251 r = -ENOENT;
250 if (!memslot->dirty_bitmap) 252 if (!memslot->dirty_bitmap)
251 goto out; 253 goto out;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d42d8ace90f1..8918e23e0e8e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7782,6 +7782,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
7782 const struct kvm_memory_slot *old, 7782 const struct kvm_memory_slot *old,
7783 enum kvm_mr_change change) 7783 enum kvm_mr_change change)
7784{ 7784{
7785 struct kvm_memslots *slots;
7785 struct kvm_memory_slot *new; 7786 struct kvm_memory_slot *new;
7786 int nr_mmu_pages = 0; 7787 int nr_mmu_pages = 0;
7787 7788
@@ -7803,7 +7804,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
7803 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); 7804 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
7804 7805
7805 /* It's OK to get 'new' slot here as it has already been installed */ 7806 /* It's OK to get 'new' slot here as it has already been installed */
7806 new = id_to_memslot(kvm->memslots, mem->slot); 7807 slots = kvm_memslots(kvm);
7808 new = id_to_memslot(slots, mem->slot);
7807 7809
7808 /* 7810 /*
7809 * Dirty logging tracks sptes in 4k granularity, meaning that large 7811 * Dirty logging tracks sptes in 4k granularity, meaning that large
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e299763ef744..42df724071c0 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -734,7 +734,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
734static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 734static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
735 struct kvm_memslots *slots) 735 struct kvm_memslots *slots)
736{ 736{
737 struct kvm_memslots *old_memslots = kvm->memslots; 737 struct kvm_memslots *old_memslots = kvm_memslots(kvm);
738 738
739 /* 739 /*
740 * Set the low bit in the generation, which disables SPTE caching 740 * Set the low bit in the generation, which disables SPTE caching
@@ -799,7 +799,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
799 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 799 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
800 goto out; 800 goto out;
801 801
802 slot = id_to_memslot(kvm->memslots, mem->slot); 802 slot = id_to_memslot(kvm_memslots(kvm), mem->slot);
803 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 803 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
804 npages = mem->memory_size >> PAGE_SHIFT; 804 npages = mem->memory_size >> PAGE_SHIFT;
805 805
@@ -842,7 +842,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
842 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 842 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
843 /* Check for overlaps */ 843 /* Check for overlaps */
844 r = -EEXIST; 844 r = -EEXIST;
845 kvm_for_each_memslot(slot, kvm->memslots) { 845 kvm_for_each_memslot(slot, kvm_memslots(kvm)) {
846 if ((slot->id >= KVM_USER_MEM_SLOTS) || 846 if ((slot->id >= KVM_USER_MEM_SLOTS) ||
847 (slot->id == mem->slot)) 847 (slot->id == mem->slot))
848 continue; 848 continue;
@@ -873,7 +873,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
873 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 873 slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
874 if (!slots) 874 if (!slots)
875 goto out_free; 875 goto out_free;
876 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); 876 memcpy(slots, kvm_memslots(kvm), sizeof(struct kvm_memslots));
877 877
878 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 878 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
879 slot = id_to_memslot(slots, mem->slot); 879 slot = id_to_memslot(slots, mem->slot);
@@ -966,6 +966,7 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
966int kvm_get_dirty_log(struct kvm *kvm, 966int kvm_get_dirty_log(struct kvm *kvm,
967 struct kvm_dirty_log *log, int *is_dirty) 967 struct kvm_dirty_log *log, int *is_dirty)
968{ 968{
969 struct kvm_memslots *slots;
969 struct kvm_memory_slot *memslot; 970 struct kvm_memory_slot *memslot;
970 int r, i; 971 int r, i;
971 unsigned long n; 972 unsigned long n;
@@ -975,7 +976,8 @@ int kvm_get_dirty_log(struct kvm *kvm,
975 if (log->slot >= KVM_USER_MEM_SLOTS) 976 if (log->slot >= KVM_USER_MEM_SLOTS)
976 goto out; 977 goto out;
977 978
978 memslot = id_to_memslot(kvm->memslots, log->slot); 979 slots = kvm_memslots(kvm);
980 memslot = id_to_memslot(slots, log->slot);
979 r = -ENOENT; 981 r = -ENOENT;
980 if (!memslot->dirty_bitmap) 982 if (!memslot->dirty_bitmap)
981 goto out; 983 goto out;
@@ -1024,6 +1026,7 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
1024int kvm_get_dirty_log_protect(struct kvm *kvm, 1026int kvm_get_dirty_log_protect(struct kvm *kvm,
1025 struct kvm_dirty_log *log, bool *is_dirty) 1027 struct kvm_dirty_log *log, bool *is_dirty)
1026{ 1028{
1029 struct kvm_memslots *slots;
1027 struct kvm_memory_slot *memslot; 1030 struct kvm_memory_slot *memslot;
1028 int r, i; 1031 int r, i;
1029 unsigned long n; 1032 unsigned long n;
@@ -1034,7 +1037,8 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
1034 if (log->slot >= KVM_USER_MEM_SLOTS) 1037 if (log->slot >= KVM_USER_MEM_SLOTS)
1035 goto out; 1038 goto out;
1036 1039
1037 memslot = id_to_memslot(kvm->memslots, log->slot); 1040 slots = kvm_memslots(kvm);
1041 memslot = id_to_memslot(slots, log->slot);
1038 1042
1039 dirty_bitmap = memslot->dirty_bitmap; 1043 dirty_bitmap = memslot->dirty_bitmap;
1040 r = -ENOENT; 1044 r = -ENOENT;