diff options
-rw-r--r-- | arch/arm/include/asm/kvm_mmu.h | 12 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 12 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable-prot.h | 4 | ||||
-rw-r--r-- | virt/kvm/arm/mmu.c | 19 |
5 files changed, 43 insertions, 8 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index bc8d21e76637..4d7a54cbb3ab 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -85,6 +85,18 @@ static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) | |||
85 | return pmd; | 85 | return pmd; |
86 | } | 86 | } |
87 | 87 | ||
88 | static inline pte_t kvm_s2pte_mkexec(pte_t pte) | ||
89 | { | ||
90 | pte_val(pte) &= ~L_PTE_XN; | ||
91 | return pte; | ||
92 | } | ||
93 | |||
94 | static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd) | ||
95 | { | ||
96 | pmd_val(pmd) &= ~PMD_SECT_XN; | ||
97 | return pmd; | ||
98 | } | ||
99 | |||
88 | static inline void kvm_set_s2pte_readonly(pte_t *pte) | 100 | static inline void kvm_set_s2pte_readonly(pte_t *pte) |
89 | { | 101 | { |
90 | pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY; | 102 | pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY; |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 150ece66ddf3..a757401129f9 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -102,8 +102,8 @@ extern pgprot_t pgprot_s2_device; | |||
102 | #define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY) | 102 | #define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY) |
103 | #define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN) | 103 | #define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN) |
104 | #define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) | 104 | #define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) |
105 | #define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) | 105 | #define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY | L_PTE_XN) |
106 | #define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY) | 106 | #define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY | L_PTE_XN) |
107 | 107 | ||
108 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) | 108 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) |
109 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) | 109 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 56b3e03c85e7..1e1b20cb348f 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -173,6 +173,18 @@ static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) | |||
173 | return pmd; | 173 | return pmd; |
174 | } | 174 | } |
175 | 175 | ||
176 | static inline pte_t kvm_s2pte_mkexec(pte_t pte) | ||
177 | { | ||
178 | pte_val(pte) &= ~PTE_S2_XN; | ||
179 | return pte; | ||
180 | } | ||
181 | |||
182 | static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd) | ||
183 | { | ||
184 | pmd_val(pmd) &= ~PMD_S2_XN; | ||
185 | return pmd; | ||
186 | } | ||
187 | |||
176 | static inline void kvm_set_s2pte_readonly(pte_t *pte) | 188 | static inline void kvm_set_s2pte_readonly(pte_t *pte) |
177 | { | 189 | { |
178 | pteval_t old_pteval, pteval; | 190 | pteval_t old_pteval, pteval; |
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 0a5635fb0ef9..4e12dabd342b 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h | |||
@@ -60,8 +60,8 @@ | |||
60 | #define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) | 60 | #define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) |
61 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) | 61 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) |
62 | 62 | ||
63 | #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) | 63 | #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY | PTE_S2_XN) |
64 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) | 64 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN) |
65 | 65 | ||
66 | #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) | 66 | #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) |
67 | #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) | 67 | #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) |
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 2174244f6317..0417c8e2a81c 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
@@ -1292,7 +1292,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
1292 | unsigned long fault_status) | 1292 | unsigned long fault_status) |
1293 | { | 1293 | { |
1294 | int ret; | 1294 | int ret; |
1295 | bool write_fault, writable, hugetlb = false, force_pte = false; | 1295 | bool write_fault, exec_fault, writable, hugetlb = false, force_pte = false; |
1296 | unsigned long mmu_seq; | 1296 | unsigned long mmu_seq; |
1297 | gfn_t gfn = fault_ipa >> PAGE_SHIFT; | 1297 | gfn_t gfn = fault_ipa >> PAGE_SHIFT; |
1298 | struct kvm *kvm = vcpu->kvm; | 1298 | struct kvm *kvm = vcpu->kvm; |
@@ -1304,7 +1304,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
1304 | unsigned long flags = 0; | 1304 | unsigned long flags = 0; |
1305 | 1305 | ||
1306 | write_fault = kvm_is_write_fault(vcpu); | 1306 | write_fault = kvm_is_write_fault(vcpu); |
1307 | if (fault_status == FSC_PERM && !write_fault) { | 1307 | exec_fault = kvm_vcpu_trap_is_iabt(vcpu); |
1308 | VM_BUG_ON(write_fault && exec_fault); | ||
1309 | |||
1310 | if (fault_status == FSC_PERM && !write_fault && !exec_fault) { | ||
1308 | kvm_err("Unexpected L2 read permission error\n"); | 1311 | kvm_err("Unexpected L2 read permission error\n"); |
1309 | return -EFAULT; | 1312 | return -EFAULT; |
1310 | } | 1313 | } |
@@ -1398,7 +1401,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
1398 | kvm_set_pfn_dirty(pfn); | 1401 | kvm_set_pfn_dirty(pfn); |
1399 | } | 1402 | } |
1400 | clean_dcache_guest_page(vcpu, pfn, PMD_SIZE); | 1403 | clean_dcache_guest_page(vcpu, pfn, PMD_SIZE); |
1401 | invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE); | 1404 | |
1405 | if (exec_fault) { | ||
1406 | new_pmd = kvm_s2pmd_mkexec(new_pmd); | ||
1407 | invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE); | ||
1408 | } | ||
1402 | 1409 | ||
1403 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); | 1410 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); |
1404 | } else { | 1411 | } else { |
@@ -1410,7 +1417,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
1410 | mark_page_dirty(kvm, gfn); | 1417 | mark_page_dirty(kvm, gfn); |
1411 | } | 1418 | } |
1412 | clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE); | 1419 | clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE); |
1413 | invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE); | 1420 | |
1421 | if (exec_fault) { | ||
1422 | new_pte = kvm_s2pte_mkexec(new_pte); | ||
1423 | invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE); | ||
1424 | } | ||
1414 | 1425 | ||
1415 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); | 1426 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); |
1416 | } | 1427 | } |