summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arm.c2
-rw-r--r--virt/kvm/arm/mmu.c119
-rw-r--r--virt/kvm/arm/vgic/vgic-kvm-device.c2
3 files changed, 63 insertions, 60 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 327d0fd28380..43e716bc3f08 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -545,7 +545,7 @@ static void update_vttbr(struct kvm *kvm)
545 545
546 /* update vttbr to be used with the new vmid */ 546 /* update vttbr to be used with the new vmid */
547 pgd_phys = virt_to_phys(kvm->arch.pgd); 547 pgd_phys = virt_to_phys(kvm->arch.pgd);
548 BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); 548 BUG_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm));
549 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); 549 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
550 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; 550 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
551 551
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 4a285d760ce0..7e477b3cae5b 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -45,7 +45,6 @@ static phys_addr_t hyp_idmap_vector;
45 45
46static unsigned long io_map_base; 46static unsigned long io_map_base;
47 47
48#define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
49#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) 48#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
50 49
51#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) 50#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
@@ -150,20 +149,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
150 149
151static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) 150static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
152{ 151{
153 pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL); 152 pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
154 stage2_pgd_clear(pgd); 153 stage2_pgd_clear(kvm, pgd);
155 kvm_tlb_flush_vmid_ipa(kvm, addr); 154 kvm_tlb_flush_vmid_ipa(kvm, addr);
156 stage2_pud_free(pud_table); 155 stage2_pud_free(kvm, pud_table);
157 put_page(virt_to_page(pgd)); 156 put_page(virt_to_page(pgd));
158} 157}
159 158
160static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 159static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
161{ 160{
162 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0); 161 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
163 VM_BUG_ON(stage2_pud_huge(*pud)); 162 VM_BUG_ON(stage2_pud_huge(kvm, *pud));
164 stage2_pud_clear(pud); 163 stage2_pud_clear(kvm, pud);
165 kvm_tlb_flush_vmid_ipa(kvm, addr); 164 kvm_tlb_flush_vmid_ipa(kvm, addr);
166 stage2_pmd_free(pmd_table); 165 stage2_pmd_free(kvm, pmd_table);
167 put_page(virt_to_page(pud)); 166 put_page(virt_to_page(pud));
168} 167}
169 168
@@ -252,7 +251,7 @@ static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
252 } 251 }
253 } while (pte++, addr += PAGE_SIZE, addr != end); 252 } while (pte++, addr += PAGE_SIZE, addr != end);
254 253
255 if (stage2_pte_table_empty(start_pte)) 254 if (stage2_pte_table_empty(kvm, start_pte))
256 clear_stage2_pmd_entry(kvm, pmd, start_addr); 255 clear_stage2_pmd_entry(kvm, pmd, start_addr);
257} 256}
258 257
@@ -262,9 +261,9 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
262 phys_addr_t next, start_addr = addr; 261 phys_addr_t next, start_addr = addr;
263 pmd_t *pmd, *start_pmd; 262 pmd_t *pmd, *start_pmd;
264 263
265 start_pmd = pmd = stage2_pmd_offset(pud, addr); 264 start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
266 do { 265 do {
267 next = stage2_pmd_addr_end(addr, end); 266 next = stage2_pmd_addr_end(kvm, addr, end);
268 if (!pmd_none(*pmd)) { 267 if (!pmd_none(*pmd)) {
269 if (pmd_thp_or_huge(*pmd)) { 268 if (pmd_thp_or_huge(*pmd)) {
270 pmd_t old_pmd = *pmd; 269 pmd_t old_pmd = *pmd;
@@ -281,7 +280,7 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
281 } 280 }
282 } while (pmd++, addr = next, addr != end); 281 } while (pmd++, addr = next, addr != end);
283 282
284 if (stage2_pmd_table_empty(start_pmd)) 283 if (stage2_pmd_table_empty(kvm, start_pmd))
285 clear_stage2_pud_entry(kvm, pud, start_addr); 284 clear_stage2_pud_entry(kvm, pud, start_addr);
286} 285}
287 286
@@ -291,14 +290,14 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
291 phys_addr_t next, start_addr = addr; 290 phys_addr_t next, start_addr = addr;
292 pud_t *pud, *start_pud; 291 pud_t *pud, *start_pud;
293 292
294 start_pud = pud = stage2_pud_offset(pgd, addr); 293 start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
295 do { 294 do {
296 next = stage2_pud_addr_end(addr, end); 295 next = stage2_pud_addr_end(kvm, addr, end);
297 if (!stage2_pud_none(*pud)) { 296 if (!stage2_pud_none(kvm, *pud)) {
298 if (stage2_pud_huge(*pud)) { 297 if (stage2_pud_huge(kvm, *pud)) {
299 pud_t old_pud = *pud; 298 pud_t old_pud = *pud;
300 299
301 stage2_pud_clear(pud); 300 stage2_pud_clear(kvm, pud);
302 kvm_tlb_flush_vmid_ipa(kvm, addr); 301 kvm_tlb_flush_vmid_ipa(kvm, addr);
303 kvm_flush_dcache_pud(old_pud); 302 kvm_flush_dcache_pud(old_pud);
304 put_page(virt_to_page(pud)); 303 put_page(virt_to_page(pud));
@@ -308,7 +307,7 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
308 } 307 }
309 } while (pud++, addr = next, addr != end); 308 } while (pud++, addr = next, addr != end);
310 309
311 if (stage2_pud_table_empty(start_pud)) 310 if (stage2_pud_table_empty(kvm, start_pud))
312 clear_stage2_pgd_entry(kvm, pgd, start_addr); 311 clear_stage2_pgd_entry(kvm, pgd, start_addr);
313} 312}
314 313
@@ -332,7 +331,7 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
332 assert_spin_locked(&kvm->mmu_lock); 331 assert_spin_locked(&kvm->mmu_lock);
333 WARN_ON(size & ~PAGE_MASK); 332 WARN_ON(size & ~PAGE_MASK);
334 333
335 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 334 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
336 do { 335 do {
337 /* 336 /*
338 * Make sure the page table is still active, as another thread 337 * Make sure the page table is still active, as another thread
@@ -341,8 +340,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
341 */ 340 */
342 if (!READ_ONCE(kvm->arch.pgd)) 341 if (!READ_ONCE(kvm->arch.pgd))
343 break; 342 break;
344 next = stage2_pgd_addr_end(addr, end); 343 next = stage2_pgd_addr_end(kvm, addr, end);
345 if (!stage2_pgd_none(*pgd)) 344 if (!stage2_pgd_none(kvm, *pgd))
346 unmap_stage2_puds(kvm, pgd, addr, next); 345 unmap_stage2_puds(kvm, pgd, addr, next);
347 /* 346 /*
348 * If the range is too large, release the kvm->mmu_lock 347 * If the range is too large, release the kvm->mmu_lock
@@ -371,9 +370,9 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
371 pmd_t *pmd; 370 pmd_t *pmd;
372 phys_addr_t next; 371 phys_addr_t next;
373 372
374 pmd = stage2_pmd_offset(pud, addr); 373 pmd = stage2_pmd_offset(kvm, pud, addr);
375 do { 374 do {
376 next = stage2_pmd_addr_end(addr, end); 375 next = stage2_pmd_addr_end(kvm, addr, end);
377 if (!pmd_none(*pmd)) { 376 if (!pmd_none(*pmd)) {
378 if (pmd_thp_or_huge(*pmd)) 377 if (pmd_thp_or_huge(*pmd))
379 kvm_flush_dcache_pmd(*pmd); 378 kvm_flush_dcache_pmd(*pmd);
@@ -389,11 +388,11 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
389 pud_t *pud; 388 pud_t *pud;
390 phys_addr_t next; 389 phys_addr_t next;
391 390
392 pud = stage2_pud_offset(pgd, addr); 391 pud = stage2_pud_offset(kvm, pgd, addr);
393 do { 392 do {
394 next = stage2_pud_addr_end(addr, end); 393 next = stage2_pud_addr_end(kvm, addr, end);
395 if (!stage2_pud_none(*pud)) { 394 if (!stage2_pud_none(kvm, *pud)) {
396 if (stage2_pud_huge(*pud)) 395 if (stage2_pud_huge(kvm, *pud))
397 kvm_flush_dcache_pud(*pud); 396 kvm_flush_dcache_pud(*pud);
398 else 397 else
399 stage2_flush_pmds(kvm, pud, addr, next); 398 stage2_flush_pmds(kvm, pud, addr, next);
@@ -409,10 +408,10 @@ static void stage2_flush_memslot(struct kvm *kvm,
409 phys_addr_t next; 408 phys_addr_t next;
410 pgd_t *pgd; 409 pgd_t *pgd;
411 410
412 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 411 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
413 do { 412 do {
414 next = stage2_pgd_addr_end(addr, end); 413 next = stage2_pgd_addr_end(kvm, addr, end);
415 if (!stage2_pgd_none(*pgd)) 414 if (!stage2_pgd_none(kvm, *pgd))
416 stage2_flush_puds(kvm, pgd, addr, next); 415 stage2_flush_puds(kvm, pgd, addr, next);
417 } while (pgd++, addr = next, addr != end); 416 } while (pgd++, addr = next, addr != end);
418} 417}
@@ -898,7 +897,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
898 } 897 }
899 898
900 /* Allocate the HW PGD, making sure that each page gets its own refcount */ 899 /* Allocate the HW PGD, making sure that each page gets its own refcount */
901 pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO); 900 pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
902 if (!pgd) 901 if (!pgd)
903 return -ENOMEM; 902 return -ENOMEM;
904 903
@@ -987,7 +986,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
987 986
988 spin_lock(&kvm->mmu_lock); 987 spin_lock(&kvm->mmu_lock);
989 if (kvm->arch.pgd) { 988 if (kvm->arch.pgd) {
990 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 989 unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
991 pgd = READ_ONCE(kvm->arch.pgd); 990 pgd = READ_ONCE(kvm->arch.pgd);
992 kvm->arch.pgd = NULL; 991 kvm->arch.pgd = NULL;
993 } 992 }
@@ -995,7 +994,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
995 994
996 /* Free the HW pgd, one page at a time */ 995 /* Free the HW pgd, one page at a time */
997 if (pgd) 996 if (pgd)
998 free_pages_exact(pgd, S2_PGD_SIZE); 997 free_pages_exact(pgd, stage2_pgd_size(kvm));
999} 998}
1000 999
1001static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, 1000static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1004,16 +1003,16 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
1004 pgd_t *pgd; 1003 pgd_t *pgd;
1005 pud_t *pud; 1004 pud_t *pud;
1006 1005
1007 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 1006 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1008 if (stage2_pgd_none(*pgd)) { 1007 if (stage2_pgd_none(kvm, *pgd)) {
1009 if (!cache) 1008 if (!cache)
1010 return NULL; 1009 return NULL;
1011 pud = mmu_memory_cache_alloc(cache); 1010 pud = mmu_memory_cache_alloc(cache);
1012 stage2_pgd_populate(pgd, pud); 1011 stage2_pgd_populate(kvm, pgd, pud);
1013 get_page(virt_to_page(pgd)); 1012 get_page(virt_to_page(pgd));
1014 } 1013 }
1015 1014
1016 return stage2_pud_offset(pgd, addr); 1015 return stage2_pud_offset(kvm, pgd, addr);
1017} 1016}
1018 1017
1019static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, 1018static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1026,15 +1025,15 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
1026 if (!pud) 1025 if (!pud)
1027 return NULL; 1026 return NULL;
1028 1027
1029 if (stage2_pud_none(*pud)) { 1028 if (stage2_pud_none(kvm, *pud)) {
1030 if (!cache) 1029 if (!cache)
1031 return NULL; 1030 return NULL;
1032 pmd = mmu_memory_cache_alloc(cache); 1031 pmd = mmu_memory_cache_alloc(cache);
1033 stage2_pud_populate(pud, pmd); 1032 stage2_pud_populate(kvm, pud, pmd);
1034 get_page(virt_to_page(pud)); 1033 get_page(virt_to_page(pud));
1035 } 1034 }
1036 1035
1037 return stage2_pmd_offset(pud, addr); 1036 return stage2_pmd_offset(kvm, pud, addr);
1038} 1037}
1039 1038
1040static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache 1039static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
@@ -1208,8 +1207,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1208 if (writable) 1207 if (writable)
1209 pte = kvm_s2pte_mkwrite(pte); 1208 pte = kvm_s2pte_mkwrite(pte);
1210 1209
1211 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES, 1210 ret = mmu_topup_memory_cache(&cache,
1212 KVM_NR_MEM_OBJS); 1211 kvm_mmu_cache_min_pages(kvm),
1212 KVM_NR_MEM_OBJS);
1213 if (ret) 1213 if (ret)
1214 goto out; 1214 goto out;
1215 spin_lock(&kvm->mmu_lock); 1215 spin_lock(&kvm->mmu_lock);
@@ -1297,19 +1297,21 @@ static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1297 1297
1298/** 1298/**
1299 * stage2_wp_pmds - write protect PUD range 1299 * stage2_wp_pmds - write protect PUD range
1300 * kvm: kvm instance for the VM
1300 * @pud: pointer to pud entry 1301 * @pud: pointer to pud entry
1301 * @addr: range start address 1302 * @addr: range start address
1302 * @end: range end address 1303 * @end: range end address
1303 */ 1304 */
1304static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) 1305static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
1306 phys_addr_t addr, phys_addr_t end)
1305{ 1307{
1306 pmd_t *pmd; 1308 pmd_t *pmd;
1307 phys_addr_t next; 1309 phys_addr_t next;
1308 1310
1309 pmd = stage2_pmd_offset(pud, addr); 1311 pmd = stage2_pmd_offset(kvm, pud, addr);
1310 1312
1311 do { 1313 do {
1312 next = stage2_pmd_addr_end(addr, end); 1314 next = stage2_pmd_addr_end(kvm, addr, end);
1313 if (!pmd_none(*pmd)) { 1315 if (!pmd_none(*pmd)) {
1314 if (pmd_thp_or_huge(*pmd)) { 1316 if (pmd_thp_or_huge(*pmd)) {
1315 if (!kvm_s2pmd_readonly(pmd)) 1317 if (!kvm_s2pmd_readonly(pmd))
@@ -1329,18 +1331,19 @@ static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
1329 * 1331 *
1330 * Process PUD entries, for a huge PUD we cause a panic. 1332 * Process PUD entries, for a huge PUD we cause a panic.
1331 */ 1333 */
1332static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end) 1334static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
1335 phys_addr_t addr, phys_addr_t end)
1333{ 1336{
1334 pud_t *pud; 1337 pud_t *pud;
1335 phys_addr_t next; 1338 phys_addr_t next;
1336 1339
1337 pud = stage2_pud_offset(pgd, addr); 1340 pud = stage2_pud_offset(kvm, pgd, addr);
1338 do { 1341 do {
1339 next = stage2_pud_addr_end(addr, end); 1342 next = stage2_pud_addr_end(kvm, addr, end);
1340 if (!stage2_pud_none(*pud)) { 1343 if (!stage2_pud_none(kvm, *pud)) {
1341 /* TODO:PUD not supported, revisit later if supported */ 1344 /* TODO:PUD not supported, revisit later if supported */
1342 BUG_ON(stage2_pud_huge(*pud)); 1345 BUG_ON(stage2_pud_huge(kvm, *pud));
1343 stage2_wp_pmds(pud, addr, next); 1346 stage2_wp_pmds(kvm, pud, addr, next);
1344 } 1347 }
1345 } while (pud++, addr = next, addr != end); 1348 } while (pud++, addr = next, addr != end);
1346} 1349}
@@ -1356,7 +1359,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1356 pgd_t *pgd; 1359 pgd_t *pgd;
1357 phys_addr_t next; 1360 phys_addr_t next;
1358 1361
1359 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 1362 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1360 do { 1363 do {
1361 /* 1364 /*
1362 * Release kvm_mmu_lock periodically if the memory region is 1365 * Release kvm_mmu_lock periodically if the memory region is
@@ -1370,9 +1373,9 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1370 cond_resched_lock(&kvm->mmu_lock); 1373 cond_resched_lock(&kvm->mmu_lock);
1371 if (!READ_ONCE(kvm->arch.pgd)) 1374 if (!READ_ONCE(kvm->arch.pgd))
1372 break; 1375 break;
1373 next = stage2_pgd_addr_end(addr, end); 1376 next = stage2_pgd_addr_end(kvm, addr, end);
1374 if (stage2_pgd_present(*pgd)) 1377 if (stage2_pgd_present(kvm, *pgd))
1375 stage2_wp_puds(pgd, addr, next); 1378 stage2_wp_puds(kvm, pgd, addr, next);
1376 } while (pgd++, addr = next, addr != end); 1379 } while (pgd++, addr = next, addr != end);
1377} 1380}
1378 1381
@@ -1521,7 +1524,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1521 up_read(&current->mm->mmap_sem); 1524 up_read(&current->mm->mmap_sem);
1522 1525
1523 /* We need minimum second+third level pages */ 1526 /* We need minimum second+third level pages */
1524 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, 1527 ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
1525 KVM_NR_MEM_OBJS); 1528 KVM_NR_MEM_OBJS);
1526 if (ret) 1529 if (ret)
1527 return ret; 1530 return ret;
@@ -1764,7 +1767,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1764 } 1767 }
1765 1768
1766 /* Userspace should not be able to register out-of-bounds IPAs */ 1769 /* Userspace should not be able to register out-of-bounds IPAs */
1767 VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); 1770 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
1768 1771
1769 if (fault_status == FSC_ACCESS) { 1772 if (fault_status == FSC_ACCESS) {
1770 handle_access_fault(vcpu, fault_ipa); 1773 handle_access_fault(vcpu, fault_ipa);
@@ -2063,7 +2066,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
2063 * space addressable by the KVM guest IPA space. 2066 * space addressable by the KVM guest IPA space.
2064 */ 2067 */
2065 if (memslot->base_gfn + memslot->npages >= 2068 if (memslot->base_gfn + memslot->npages >=
2066 (KVM_PHYS_SIZE >> PAGE_SHIFT)) 2069 (kvm_phys_size(kvm) >> PAGE_SHIFT))
2067 return -EFAULT; 2070 return -EFAULT;
2068 2071
2069 down_read(&current->mm->mmap_sem); 2072 down_read(&current->mm->mmap_sem);
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 6ada2432e37c..114dce9f4bf5 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -25,7 +25,7 @@
25int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, 25int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
26 phys_addr_t addr, phys_addr_t alignment) 26 phys_addr_t addr, phys_addr_t alignment)
27{ 27{
28 if (addr & ~KVM_PHYS_MASK) 28 if (addr & ~kvm_phys_mask(kvm))
29 return -E2BIG; 29 return -E2BIG;
30 30
31 if (!IS_ALIGNED(addr, alignment)) 31 if (!IS_ALIGNED(addr, alignment))