aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2013-04-12 14:12:05 -0400
committerChristoffer Dall <cdall@cs.columbia.edu>2013-04-29 01:23:10 -0400
commit4f728276fbf1e043010485d7e9275082a1c3d650 (patch)
treef7532d81637eaa647d8045f6e81c14cd5d60a210 /arch/arm
parent0394e1f605208706e4e1999d06a4570b9f583b7f (diff)
ARM: KVM: rework HYP page table freeing
There is no point in freeing HYP page tables differently from Stage-2. They now have the same requirements, and should be dealt with the same way. Promote unmap_stage2_range to be The One True Way, and get rid of a number of nasty bugs in the process (good thing we never actually called free_hyp_pmds before...). Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@cs.columbia.edu>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm/kvm/arm.c2
-rw-r--r--arch/arm/kvm/mmu.c181
3 files changed, 82 insertions, 103 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 3c71a1d4b7a3..92eb20d57942 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -32,7 +32,7 @@
32 32
33int create_hyp_mappings(void *from, void *to); 33int create_hyp_mappings(void *from, void *to);
34int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 34int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
35void free_hyp_pmds(void); 35void free_hyp_pgds(void);
36 36
37int kvm_alloc_stage2_pgd(struct kvm *kvm); 37int kvm_alloc_stage2_pgd(struct kvm *kvm);
38void kvm_free_stage2_pgd(struct kvm *kvm); 38void kvm_free_stage2_pgd(struct kvm *kvm);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 7403f884a545..16f164a5db86 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -928,7 +928,7 @@ static int init_hyp_mode(void)
928out_free_vfp: 928out_free_vfp:
929 free_percpu(kvm_host_vfp_state); 929 free_percpu(kvm_host_vfp_state);
930out_free_mappings: 930out_free_mappings:
931 free_hyp_pmds(); 931 free_hyp_pgds();
932out_free_stack_pages: 932out_free_stack_pages:
933 for_each_possible_cpu(cpu) 933 for_each_possible_cpu(cpu)
934 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); 934 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index bfc59279de1b..7464824c17ef 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -72,56 +72,104 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
72 return p; 72 return p;
73} 73}
74 74
75static void free_ptes(pmd_t *pmd, unsigned long addr) 75static void clear_pud_entry(pud_t *pud)
76{ 76{
77 pte_t *pte; 77 pmd_t *pmd_table = pmd_offset(pud, 0);
78 unsigned int i; 78 pud_clear(pud);
79 pmd_free(NULL, pmd_table);
80 put_page(virt_to_page(pud));
81}
79 82
80 for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) { 83static void clear_pmd_entry(pmd_t *pmd)
81 if (!pmd_none(*pmd) && pmd_table(*pmd)) { 84{
82 pte = pte_offset_kernel(pmd, addr); 85 pte_t *pte_table = pte_offset_kernel(pmd, 0);
83 pte_free_kernel(NULL, pte); 86 pmd_clear(pmd);
84 } 87 pte_free_kernel(NULL, pte_table);
85 pmd++; 88 put_page(virt_to_page(pmd));
89}
90
91static bool pmd_empty(pmd_t *pmd)
92{
93 struct page *pmd_page = virt_to_page(pmd);
94 return page_count(pmd_page) == 1;
95}
96
97static void clear_pte_entry(pte_t *pte)
98{
99 if (pte_present(*pte)) {
100 kvm_set_pte(pte, __pte(0));
101 put_page(virt_to_page(pte));
86 } 102 }
87} 103}
88 104
89static void free_hyp_pgd_entry(unsigned long addr) 105static bool pte_empty(pte_t *pte)
106{
107 struct page *pte_page = virt_to_page(pte);
108 return page_count(pte_page) == 1;
109}
110
111static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
90{ 112{
91 pgd_t *pgd; 113 pgd_t *pgd;
92 pud_t *pud; 114 pud_t *pud;
93 pmd_t *pmd; 115 pmd_t *pmd;
94 unsigned long hyp_addr = KERN_TO_HYP(addr); 116 pte_t *pte;
117 unsigned long long addr = start, end = start + size;
118 u64 range;
95 119
96 pgd = hyp_pgd + pgd_index(hyp_addr); 120 while (addr < end) {
97 pud = pud_offset(pgd, hyp_addr); 121 pgd = pgdp + pgd_index(addr);
122 pud = pud_offset(pgd, addr);
123 if (pud_none(*pud)) {
124 addr += PUD_SIZE;
125 continue;
126 }
98 127
99 if (pud_none(*pud)) 128 pmd = pmd_offset(pud, addr);
100 return; 129 if (pmd_none(*pmd)) {
101 BUG_ON(pud_bad(*pud)); 130 addr += PMD_SIZE;
131 continue;
132 }
102 133
103 pmd = pmd_offset(pud, hyp_addr); 134 pte = pte_offset_kernel(pmd, addr);
104 free_ptes(pmd, addr); 135 clear_pte_entry(pte);
105 pmd_free(NULL, pmd); 136 range = PAGE_SIZE;
106 pud_clear(pud); 137
138 /* If we emptied the pte, walk back up the ladder */
139 if (pte_empty(pte)) {
140 clear_pmd_entry(pmd);
141 range = PMD_SIZE;
142 if (pmd_empty(pmd)) {
143 clear_pud_entry(pud);
144 range = PUD_SIZE;
145 }
146 }
147
148 addr += range;
149 }
107} 150}
108 151
109/** 152/**
110 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables 153 * free_hyp_pgds - free Hyp-mode page tables
111 * 154 *
112 * Assumes this is a page table used strictly in Hyp-mode and therefore contains 155 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and therefore contains
113 * either mappings in the kernel memory area (above PAGE_OFFSET), or 156 * either mappings in the kernel memory area (above PAGE_OFFSET), or
114 * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END). 157 * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END).
115 */ 158 */
116void free_hyp_pmds(void) 159void free_hyp_pgds(void)
117{ 160{
118 unsigned long addr; 161 unsigned long addr;
119 162
120 mutex_lock(&kvm_hyp_pgd_mutex); 163 mutex_lock(&kvm_hyp_pgd_mutex);
121 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) 164
122 free_hyp_pgd_entry(addr); 165 if (hyp_pgd) {
123 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) 166 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
124 free_hyp_pgd_entry(addr); 167 unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
168 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
169 unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
170 kfree(hyp_pgd);
171 }
172
125 mutex_unlock(&kvm_hyp_pgd_mutex); 173 mutex_unlock(&kvm_hyp_pgd_mutex);
126} 174}
127 175
@@ -136,6 +184,7 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
136 do { 184 do {
137 pte = pte_offset_kernel(pmd, addr); 185 pte = pte_offset_kernel(pmd, addr);
138 kvm_set_pte(pte, pfn_pte(pfn, prot)); 186 kvm_set_pte(pte, pfn_pte(pfn, prot));
187 get_page(virt_to_page(pte));
139 pfn++; 188 pfn++;
140 } while (addr += PAGE_SIZE, addr != end); 189 } while (addr += PAGE_SIZE, addr != end);
141} 190}
@@ -161,6 +210,7 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
161 return -ENOMEM; 210 return -ENOMEM;
162 } 211 }
163 pmd_populate_kernel(NULL, pmd, pte); 212 pmd_populate_kernel(NULL, pmd, pte);
213 get_page(virt_to_page(pmd));
164 } 214 }
165 215
166 next = pmd_addr_end(addr, end); 216 next = pmd_addr_end(addr, end);
@@ -197,6 +247,7 @@ static int __create_hyp_mappings(pgd_t *pgdp,
197 goto out; 247 goto out;
198 } 248 }
199 pud_populate(NULL, pud, pmd); 249 pud_populate(NULL, pud, pmd);
250 get_page(virt_to_page(pud));
200 } 251 }
201 252
202 next = pgd_addr_end(addr, end); 253 next = pgd_addr_end(addr, end);
@@ -289,42 +340,6 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
289 return 0; 340 return 0;
290} 341}
291 342
292static void clear_pud_entry(pud_t *pud)
293{
294 pmd_t *pmd_table = pmd_offset(pud, 0);
295 pud_clear(pud);
296 pmd_free(NULL, pmd_table);
297 put_page(virt_to_page(pud));
298}
299
300static void clear_pmd_entry(pmd_t *pmd)
301{
302 pte_t *pte_table = pte_offset_kernel(pmd, 0);
303 pmd_clear(pmd);
304 pte_free_kernel(NULL, pte_table);
305 put_page(virt_to_page(pmd));
306}
307
308static bool pmd_empty(pmd_t *pmd)
309{
310 struct page *pmd_page = virt_to_page(pmd);
311 return page_count(pmd_page) == 1;
312}
313
314static void clear_pte_entry(pte_t *pte)
315{
316 if (pte_present(*pte)) {
317 kvm_set_pte(pte, __pte(0));
318 put_page(virt_to_page(pte));
319 }
320}
321
322static bool pte_empty(pte_t *pte)
323{
324 struct page *pte_page = virt_to_page(pte);
325 return page_count(pte_page) == 1;
326}
327
328/** 343/**
329 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range 344 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
330 * @kvm: The VM pointer 345 * @kvm: The VM pointer
@@ -338,43 +353,7 @@ static bool pte_empty(pte_t *pte)
338 */ 353 */
339static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) 354static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
340{ 355{
341 pgd_t *pgd; 356 unmap_range(kvm->arch.pgd, start, size);
342 pud_t *pud;
343 pmd_t *pmd;
344 pte_t *pte;
345 phys_addr_t addr = start, end = start + size;
346 u64 range;
347
348 while (addr < end) {
349 pgd = kvm->arch.pgd + pgd_index(addr);
350 pud = pud_offset(pgd, addr);
351 if (pud_none(*pud)) {
352 addr += PUD_SIZE;
353 continue;
354 }
355
356 pmd = pmd_offset(pud, addr);
357 if (pmd_none(*pmd)) {
358 addr += PMD_SIZE;
359 continue;
360 }
361
362 pte = pte_offset_kernel(pmd, addr);
363 clear_pte_entry(pte);
364 range = PAGE_SIZE;
365
366 /* If we emptied the pte, walk back up the ladder */
367 if (pte_empty(pte)) {
368 clear_pmd_entry(pmd);
369 range = PMD_SIZE;
370 if (pmd_empty(pmd)) {
371 clear_pud_entry(pud);
372 range = PUD_SIZE;
373 }
374 }
375
376 addr += range;
377 }
378} 357}
379 358
380/** 359/**
@@ -741,7 +720,7 @@ int kvm_mmu_init(void)
741 720
742 return 0; 721 return 0;
743out: 722out:
744 kfree(hyp_pgd); 723 free_hyp_pgds();
745 return err; 724 return err;
746} 725}
747 726