aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2014-05-09 17:31:31 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2014-07-11 07:46:51 -0400
commit4f853a714bf16338ff5261128e6c7ae2569e9505 (patch)
tree3726687645ce6cc69b732606ebf07ff89cea63e5 /arch
parent9f6226a762c7ae02f6a23a3d4fc552dafa57ea23 (diff)
arm/arm64: KVM: Fix and refactor unmap_range
unmap_range() was utterly broken, to quote Marc, and broke in all sorts of situations. It was also quite complicated to follow and didn't follow the usual scheme of having a separate iterating function for each level of page tables. Address this by refactoring the code and introduce a pgd_clear() function. Reviewed-by: Jungseok Lee <jays.lee@samsung.com> Reviewed-by: Mario Smarduch <m.smarduch@samsung.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/kvm_mmu.h12
-rw-r--r--arch/arm/kvm/mmu.c157
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h15
3 files changed, 111 insertions, 73 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 5c7aa3c1519f..5cc0b0f5f72f 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -127,6 +127,18 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
127 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 127 (__boundary - 1 < (end) - 1)? __boundary: (end); \
128}) 128})
129 129
130static inline bool kvm_page_empty(void *ptr)
131{
132 struct page *ptr_page = virt_to_page(ptr);
133 return page_count(ptr_page) == 1;
134}
135
136
137#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
138#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
139#define kvm_pud_table_empty(pudp) (0)
140
141
130struct kvm; 142struct kvm;
131 143
132#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) 144#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 16f804938b8f..23360610aeac 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -90,104 +90,115 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
90 return p; 90 return p;
91} 91}
92 92
93static bool page_empty(void *ptr) 93static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
94{ 94{
95 struct page *ptr_page = virt_to_page(ptr); 95 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
96 return page_count(ptr_page) == 1; 96 pgd_clear(pgd);
97 kvm_tlb_flush_vmid_ipa(kvm, addr);
98 pud_free(NULL, pud_table);
99 put_page(virt_to_page(pgd));
97} 100}
98 101
99static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 102static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
100{ 103{
101 if (pud_huge(*pud)) { 104 pmd_t *pmd_table = pmd_offset(pud, 0);
102 pud_clear(pud); 105 VM_BUG_ON(pud_huge(*pud));
103 kvm_tlb_flush_vmid_ipa(kvm, addr); 106 pud_clear(pud);
104 } else { 107 kvm_tlb_flush_vmid_ipa(kvm, addr);
105 pmd_t *pmd_table = pmd_offset(pud, 0); 108 pmd_free(NULL, pmd_table);
106 pud_clear(pud);
107 kvm_tlb_flush_vmid_ipa(kvm, addr);
108 pmd_free(NULL, pmd_table);
109 }
110 put_page(virt_to_page(pud)); 109 put_page(virt_to_page(pud));
111} 110}
112 111
113static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) 112static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
114{ 113{
115 if (kvm_pmd_huge(*pmd)) { 114 pte_t *pte_table = pte_offset_kernel(pmd, 0);
116 pmd_clear(pmd); 115 VM_BUG_ON(kvm_pmd_huge(*pmd));
117 kvm_tlb_flush_vmid_ipa(kvm, addr); 116 pmd_clear(pmd);
118 } else { 117 kvm_tlb_flush_vmid_ipa(kvm, addr);
119 pte_t *pte_table = pte_offset_kernel(pmd, 0); 118 pte_free_kernel(NULL, pte_table);
120 pmd_clear(pmd);
121 kvm_tlb_flush_vmid_ipa(kvm, addr);
122 pte_free_kernel(NULL, pte_table);
123 }
124 put_page(virt_to_page(pmd)); 119 put_page(virt_to_page(pmd));
125} 120}
126 121
127static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) 122static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
123 phys_addr_t addr, phys_addr_t end)
128{ 124{
129 if (pte_present(*pte)) { 125 phys_addr_t start_addr = addr;
130 kvm_set_pte(pte, __pte(0)); 126 pte_t *pte, *start_pte;
131 put_page(virt_to_page(pte)); 127
132 kvm_tlb_flush_vmid_ipa(kvm, addr); 128 start_pte = pte = pte_offset_kernel(pmd, addr);
133 } 129 do {
130 if (!pte_none(*pte)) {
131 kvm_set_pte(pte, __pte(0));
132 put_page(virt_to_page(pte));
133 kvm_tlb_flush_vmid_ipa(kvm, addr);
134 }
135 } while (pte++, addr += PAGE_SIZE, addr != end);
136
137 if (kvm_pte_table_empty(start_pte))
138 clear_pmd_entry(kvm, pmd, start_addr);
134} 139}
135 140
136static void unmap_range(struct kvm *kvm, pgd_t *pgdp, 141static void unmap_pmds(struct kvm *kvm, pud_t *pud,
137 unsigned long long start, u64 size) 142 phys_addr_t addr, phys_addr_t end)
138{ 143{
139 pgd_t *pgd; 144 phys_addr_t next, start_addr = addr;
140 pud_t *pud; 145 pmd_t *pmd, *start_pmd;
141 pmd_t *pmd;
142 pte_t *pte;
143 unsigned long long addr = start, end = start + size;
144 u64 next;
145 146
146 while (addr < end) { 147 start_pmd = pmd = pmd_offset(pud, addr);
147 pgd = pgdp + pgd_index(addr); 148 do {
148 pud = pud_offset(pgd, addr); 149 next = kvm_pmd_addr_end(addr, end);
149 pte = NULL; 150 if (!pmd_none(*pmd)) {
150 if (pud_none(*pud)) { 151 if (kvm_pmd_huge(*pmd)) {
151 addr = kvm_pud_addr_end(addr, end); 152 pmd_clear(pmd);
152 continue; 153 kvm_tlb_flush_vmid_ipa(kvm, addr);
153 } 154 put_page(virt_to_page(pmd));
154 155 } else {
155 if (pud_huge(*pud)) { 156 unmap_ptes(kvm, pmd, addr, next);
156 /* 157 }
157 * If we are dealing with a huge pud, just clear it and
158 * move on.
159 */
160 clear_pud_entry(kvm, pud, addr);
161 addr = kvm_pud_addr_end(addr, end);
162 continue;
163 } 158 }
159 } while (pmd++, addr = next, addr != end);
164 160
165 pmd = pmd_offset(pud, addr); 161 if (kvm_pmd_table_empty(start_pmd))
166 if (pmd_none(*pmd)) { 162 clear_pud_entry(kvm, pud, start_addr);
167 addr = kvm_pmd_addr_end(addr, end); 163}
168 continue;
169 }
170 164
171 if (!kvm_pmd_huge(*pmd)) { 165static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
172 pte = pte_offset_kernel(pmd, addr); 166 phys_addr_t addr, phys_addr_t end)
173 clear_pte_entry(kvm, pte, addr); 167{
174 next = addr + PAGE_SIZE; 168 phys_addr_t next, start_addr = addr;
175 } 169 pud_t *pud, *start_pud;
176 170
177 /* 171 start_pud = pud = pud_offset(pgd, addr);
178 * If the pmd entry is to be cleared, walk back up the ladder 172 do {
179 */ 173 next = kvm_pud_addr_end(addr, end);
180 if (kvm_pmd_huge(*pmd) || (pte && page_empty(pte))) { 174 if (!pud_none(*pud)) {
181 clear_pmd_entry(kvm, pmd, addr); 175 if (pud_huge(*pud)) {
182 next = kvm_pmd_addr_end(addr, end); 176 pud_clear(pud);
183 if (page_empty(pmd) && !page_empty(pud)) { 177 kvm_tlb_flush_vmid_ipa(kvm, addr);
184 clear_pud_entry(kvm, pud, addr); 178 put_page(virt_to_page(pud));
185 next = kvm_pud_addr_end(addr, end); 179 } else {
180 unmap_pmds(kvm, pud, addr, next);
186 } 181 }
187 } 182 }
183 } while (pud++, addr = next, addr != end);
188 184
189 addr = next; 185 if (kvm_pud_table_empty(start_pud))
190 } 186 clear_pgd_entry(kvm, pgd, start_addr);
187}
188
189
190static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
191 phys_addr_t start, u64 size)
192{
193 pgd_t *pgd;
194 phys_addr_t addr = start, end = start + size;
195 phys_addr_t next;
196
197 pgd = pgdp + pgd_index(addr);
198 do {
199 next = kvm_pgd_addr_end(addr, end);
200 unmap_puds(kvm, pgd, addr, next);
201 } while (pgd++, addr = next, addr != end);
191} 202}
192 203
193static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, 204static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 7d29847a893b..8e138c7c53ac 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -125,6 +125,21 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
125#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) 125#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
126#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) 126#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
127 127
128static inline bool kvm_page_empty(void *ptr)
129{
130 struct page *ptr_page = virt_to_page(ptr);
131 return page_count(ptr_page) == 1;
132}
133
134#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
135#ifndef CONFIG_ARM64_64K_PAGES
136#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
137#else
138#define kvm_pmd_table_empty(pmdp) (0)
139#endif
140#define kvm_pud_table_empty(pudp) (0)
141
142
128struct kvm; 143struct kvm;
129 144
130#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 145#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))