aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorMario Smarduch <m.smarduch@samsung.com>2015-01-15 18:58:59 -0500
committerChristoffer Dall <christoffer.dall@linaro.org>2015-01-16 08:42:48 -0500
commit8199ed0e7c28ece79674a9fbba3208e93395a646 (patch)
tree1d6fbae21b2cc6d7624b149da7b0d8cf3d326683 /arch/arm64
parent15a49a44fc36209e1112e9b8451d653cd07f17a8 (diff)
KVM: arm64: ARMv8 header changes for page logging
This patch adds arm64 helpers to write protect pmds/ptes and retrieve permissions while logging dirty pages. Also adds prototype to write protect a memory slot and adds a pmd define to check for read-only pmds. Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Mario Smarduch <m.smarduch@samsung.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_asm.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h21
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
4 files changed, 24 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 483842180f8f..4f7310fa77f0 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -126,6 +126,7 @@ extern char __kvm_hyp_vector[];
126 126
127extern void __kvm_flush_vm_context(void); 127extern void __kvm_flush_vm_context(void);
128extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 128extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
129extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
129 130
130extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 131extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
131 132
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0b7dfdb931df..012af6ce9eed 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -199,6 +199,7 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
199 199
200u64 kvm_call_hyp(void *hypfn, ...); 200u64 kvm_call_hyp(void *hypfn, ...);
201void force_vm_exit(const cpumask_t *mask); 201void force_vm_exit(const cpumask_t *mask);
202void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
202 203
203int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 204int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
204 int exception_index); 205 int exception_index);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 14a74f136272..66577581ce68 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -118,6 +118,27 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
118 pmd_val(*pmd) |= PMD_S2_RDWR; 118 pmd_val(*pmd) |= PMD_S2_RDWR;
119} 119}
120 120
121static inline void kvm_set_s2pte_readonly(pte_t *pte)
122{
123 pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
124}
125
126static inline bool kvm_s2pte_readonly(pte_t *pte)
127{
128 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
129}
130
131static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
132{
133 pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
134}
135
136static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
137{
138 return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
139}
140
141
121#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end) 142#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
122#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) 143#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
123#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) 144#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 88174e0bfafe..5f930cc9ea83 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -119,6 +119,7 @@
119#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */ 119#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
120#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ 120#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
121 121
122#define PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[2:1] */
122#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ 123#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
123 124
124/* 125/*