diff options
Diffstat (limited to 'arch/arm/kvm/mmu.c')
-rw-r--r-- | arch/arm/kvm/mmu.c | 134 |
1 files changed, 124 insertions, 10 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 5656d79c5a44..15b050d46fc9 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -1330,10 +1330,51 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
1330 | 1330 | ||
1331 | out_unlock: | 1331 | out_unlock: |
1332 | spin_unlock(&kvm->mmu_lock); | 1332 | spin_unlock(&kvm->mmu_lock); |
1333 | kvm_set_pfn_accessed(pfn); | ||
1333 | kvm_release_pfn_clean(pfn); | 1334 | kvm_release_pfn_clean(pfn); |
1334 | return ret; | 1335 | return ret; |
1335 | } | 1336 | } |
1336 | 1337 | ||
1338 | /* | ||
1339 | * Resolve the access fault by making the page young again. | ||
1340 | * Note that because the faulting entry is guaranteed not to be | ||
1341 | * cached in the TLB, we don't need to invalidate anything. | ||
1342 | */ | ||
1343 | static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) | ||
1344 | { | ||
1345 | pmd_t *pmd; | ||
1346 | pte_t *pte; | ||
1347 | pfn_t pfn; | ||
1348 | bool pfn_valid = false; | ||
1349 | |||
1350 | trace_kvm_access_fault(fault_ipa); | ||
1351 | |||
1352 | spin_lock(&vcpu->kvm->mmu_lock); | ||
1353 | |||
1354 | pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa); | ||
1355 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ | ||
1356 | goto out; | ||
1357 | |||
1358 | if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */ | ||
1359 | *pmd = pmd_mkyoung(*pmd); | ||
1360 | pfn = pmd_pfn(*pmd); | ||
1361 | pfn_valid = true; | ||
1362 | goto out; | ||
1363 | } | ||
1364 | |||
1365 | pte = pte_offset_kernel(pmd, fault_ipa); | ||
1366 | if (pte_none(*pte)) /* Nothing there either */ | ||
1367 | goto out; | ||
1368 | |||
1369 | *pte = pte_mkyoung(*pte); /* Just a page... */ | ||
1370 | pfn = pte_pfn(*pte); | ||
1371 | pfn_valid = true; | ||
1372 | out: | ||
1373 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
1374 | if (pfn_valid) | ||
1375 | kvm_set_pfn_accessed(pfn); | ||
1376 | } | ||
1377 | |||
1337 | /** | 1378 | /** |
1338 | * kvm_handle_guest_abort - handles all 2nd stage aborts | 1379 | * kvm_handle_guest_abort - handles all 2nd stage aborts |
1339 | * @vcpu: the VCPU pointer | 1380 | * @vcpu: the VCPU pointer |
@@ -1364,7 +1405,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
1364 | 1405 | ||
1365 | /* Check the stage-2 fault is trans. fault or write fault */ | 1406 | /* Check the stage-2 fault is trans. fault or write fault */ |
1366 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); | 1407 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); |
1367 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { | 1408 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM && |
1409 | fault_status != FSC_ACCESS) { | ||
1368 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", | 1410 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", |
1369 | kvm_vcpu_trap_get_class(vcpu), | 1411 | kvm_vcpu_trap_get_class(vcpu), |
1370 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), | 1412 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), |
@@ -1400,6 +1442,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
1400 | /* Userspace should not be able to register out-of-bounds IPAs */ | 1442 | /* Userspace should not be able to register out-of-bounds IPAs */ |
1401 | VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); | 1443 | VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); |
1402 | 1444 | ||
1445 | if (fault_status == FSC_ACCESS) { | ||
1446 | handle_access_fault(vcpu, fault_ipa); | ||
1447 | ret = 1; | ||
1448 | goto out_unlock; | ||
1449 | } | ||
1450 | |||
1403 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); | 1451 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); |
1404 | if (ret == 0) | 1452 | if (ret == 0) |
1405 | ret = 1; | 1453 | ret = 1; |
@@ -1408,15 +1456,16 @@ out_unlock: | |||
1408 | return ret; | 1456 | return ret; |
1409 | } | 1457 | } |
1410 | 1458 | ||
1411 | static void handle_hva_to_gpa(struct kvm *kvm, | 1459 | static int handle_hva_to_gpa(struct kvm *kvm, |
1412 | unsigned long start, | 1460 | unsigned long start, |
1413 | unsigned long end, | 1461 | unsigned long end, |
1414 | void (*handler)(struct kvm *kvm, | 1462 | int (*handler)(struct kvm *kvm, |
1415 | gpa_t gpa, void *data), | 1463 | gpa_t gpa, void *data), |
1416 | void *data) | 1464 | void *data) |
1417 | { | 1465 | { |
1418 | struct kvm_memslots *slots; | 1466 | struct kvm_memslots *slots; |
1419 | struct kvm_memory_slot *memslot; | 1467 | struct kvm_memory_slot *memslot; |
1468 | int ret = 0; | ||
1420 | 1469 | ||
1421 | slots = kvm_memslots(kvm); | 1470 | slots = kvm_memslots(kvm); |
1422 | 1471 | ||
@@ -1440,14 +1489,17 @@ static void handle_hva_to_gpa(struct kvm *kvm, | |||
1440 | 1489 | ||
1441 | for (; gfn < gfn_end; ++gfn) { | 1490 | for (; gfn < gfn_end; ++gfn) { |
1442 | gpa_t gpa = gfn << PAGE_SHIFT; | 1491 | gpa_t gpa = gfn << PAGE_SHIFT; |
1443 | handler(kvm, gpa, data); | 1492 | ret |= handler(kvm, gpa, data); |
1444 | } | 1493 | } |
1445 | } | 1494 | } |
1495 | |||
1496 | return ret; | ||
1446 | } | 1497 | } |
1447 | 1498 | ||
1448 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | 1499 | static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) |
1449 | { | 1500 | { |
1450 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | 1501 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); |
1502 | return 0; | ||
1451 | } | 1503 | } |
1452 | 1504 | ||
1453 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 1505 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
@@ -1473,7 +1525,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, | |||
1473 | return 0; | 1525 | return 0; |
1474 | } | 1526 | } |
1475 | 1527 | ||
1476 | static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) | 1528 | static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) |
1477 | { | 1529 | { |
1478 | pte_t *pte = (pte_t *)data; | 1530 | pte_t *pte = (pte_t *)data; |
1479 | 1531 | ||
@@ -1485,6 +1537,7 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) | |||
1485 | * through this calling path. | 1537 | * through this calling path. |
1486 | */ | 1538 | */ |
1487 | stage2_set_pte(kvm, NULL, gpa, pte, 0); | 1539 | stage2_set_pte(kvm, NULL, gpa, pte, 0); |
1540 | return 0; | ||
1488 | } | 1541 | } |
1489 | 1542 | ||
1490 | 1543 | ||
@@ -1501,6 +1554,67 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |||
1501 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); | 1554 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); |
1502 | } | 1555 | } |
1503 | 1556 | ||
1557 | static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | ||
1558 | { | ||
1559 | pmd_t *pmd; | ||
1560 | pte_t *pte; | ||
1561 | |||
1562 | pmd = stage2_get_pmd(kvm, NULL, gpa); | ||
1563 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ | ||
1564 | return 0; | ||
1565 | |||
1566 | if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */ | ||
1567 | if (pmd_young(*pmd)) { | ||
1568 | *pmd = pmd_mkold(*pmd); | ||
1569 | return 1; | ||
1570 | } | ||
1571 | |||
1572 | return 0; | ||
1573 | } | ||
1574 | |||
1575 | pte = pte_offset_kernel(pmd, gpa); | ||
1576 | if (pte_none(*pte)) | ||
1577 | return 0; | ||
1578 | |||
1579 | if (pte_young(*pte)) { | ||
1580 | *pte = pte_mkold(*pte); /* Just a page... */ | ||
1581 | return 1; | ||
1582 | } | ||
1583 | |||
1584 | return 0; | ||
1585 | } | ||
1586 | |||
1587 | static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | ||
1588 | { | ||
1589 | pmd_t *pmd; | ||
1590 | pte_t *pte; | ||
1591 | |||
1592 | pmd = stage2_get_pmd(kvm, NULL, gpa); | ||
1593 | if (!pmd || pmd_none(*pmd)) /* Nothing there */ | ||
1594 | return 0; | ||
1595 | |||
1596 | if (kvm_pmd_huge(*pmd)) /* THP, HugeTLB */ | ||
1597 | return pmd_young(*pmd); | ||
1598 | |||
1599 | pte = pte_offset_kernel(pmd, gpa); | ||
1600 | if (!pte_none(*pte)) /* Just a page... */ | ||
1601 | return pte_young(*pte); | ||
1602 | |||
1603 | return 0; | ||
1604 | } | ||
1605 | |||
1606 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) | ||
1607 | { | ||
1608 | trace_kvm_age_hva(start, end); | ||
1609 | return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); | ||
1610 | } | ||
1611 | |||
1612 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | ||
1613 | { | ||
1614 | trace_kvm_test_age_hva(hva); | ||
1615 | return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); | ||
1616 | } | ||
1617 | |||
1504 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) | 1618 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
1505 | { | 1619 | { |
1506 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); | 1620 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); |