aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2015-03-12 14:16:50 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2015-03-12 17:34:30 -0400
commit1d2ebaccc741a299abfafb848414b01d190f4e33 (patch)
tree77bc47868944f30e42ef86bc18474a5799fa05d6 /arch/arm/kvm
parent174178fed338edba66ab9580af0c5d9e1a4e5019 (diff)
arm/arm64: KVM: Allow handle_hva_to_gpa to return a value
So far, handle_hva_to_gpa was never required to return a value. As we prepare to age pages at Stage-2, we need to be able to return a value from the iterator (kvm_test_age_hva). Adapt the code to handle this situation. No semantic change. Acked-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/mmu.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 3e6859bc3e11..ffa06e07eed2 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1377,15 +1377,16 @@ out_unlock:
1377 return ret; 1377 return ret;
1378} 1378}
1379 1379
1380static void handle_hva_to_gpa(struct kvm *kvm, 1380static int handle_hva_to_gpa(struct kvm *kvm,
1381 unsigned long start, 1381 unsigned long start,
1382 unsigned long end, 1382 unsigned long end,
1383 void (*handler)(struct kvm *kvm, 1383 int (*handler)(struct kvm *kvm,
1384 gpa_t gpa, void *data), 1384 gpa_t gpa, void *data),
1385 void *data) 1385 void *data)
1386{ 1386{
1387 struct kvm_memslots *slots; 1387 struct kvm_memslots *slots;
1388 struct kvm_memory_slot *memslot; 1388 struct kvm_memory_slot *memslot;
1389 int ret = 0;
1389 1390
1390 slots = kvm_memslots(kvm); 1391 slots = kvm_memslots(kvm);
1391 1392
@@ -1409,14 +1410,17 @@ static void handle_hva_to_gpa(struct kvm *kvm,
1409 1410
1410 for (; gfn < gfn_end; ++gfn) { 1411 for (; gfn < gfn_end; ++gfn) {
1411 gpa_t gpa = gfn << PAGE_SHIFT; 1412 gpa_t gpa = gfn << PAGE_SHIFT;
1412 handler(kvm, gpa, data); 1413 ret |= handler(kvm, gpa, data);
1413 } 1414 }
1414 } 1415 }
1416
1417 return ret;
1415} 1418}
1416 1419
1417static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) 1420static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1418{ 1421{
1419 unmap_stage2_range(kvm, gpa, PAGE_SIZE); 1422 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
1423 return 0;
1420} 1424}
1421 1425
1422int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 1426int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
@@ -1442,7 +1446,7 @@ int kvm_unmap_hva_range(struct kvm *kvm,
1442 return 0; 1446 return 0;
1443} 1447}
1444 1448
1445static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) 1449static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1446{ 1450{
1447 pte_t *pte = (pte_t *)data; 1451 pte_t *pte = (pte_t *)data;
1448 1452
@@ -1454,6 +1458,7 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1454 * through this calling path. 1458 * through this calling path.
1455 */ 1459 */
1456 stage2_set_pte(kvm, NULL, gpa, pte, 0); 1460 stage2_set_pte(kvm, NULL, gpa, pte, 0);
1461 return 0;
1457} 1462}
1458 1463
1459 1464