aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2013-08-06 16:50:54 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2013-08-07 21:17:28 -0400
commitd3840b26614d8ce3db53c98061d9fcb1b9ccb0dd (patch)
treeaf0294fc67a471ebf56404e456f2c344ef27345e /arch/arm/kvm
parent240e99cbd00aa541b572480e3ea7ecb0d480bc79 (diff)
ARM: KVM: Fix unaligned unmap_range leak
The unmap_range function did not properly cover the case when the start address was not aligned to PMD_SIZE or PUD_SIZE and an entire pte table or pmd table was cleared, causing us to leak memory when incrementing the addr. The fix is to always move onto the next page table entry boundary instead of adding the full size of the VA range covered by the corresponding table level entry. Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/mmu.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index ca6bea4859b4..80a83ec4a9ae 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -132,37 +132,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
132 pmd_t *pmd; 132 pmd_t *pmd;
133 pte_t *pte; 133 pte_t *pte;
134 unsigned long long addr = start, end = start + size; 134 unsigned long long addr = start, end = start + size;
135 u64 range; 135 u64 next;
136 136
137 while (addr < end) { 137 while (addr < end) {
138 pgd = pgdp + pgd_index(addr); 138 pgd = pgdp + pgd_index(addr);
139 pud = pud_offset(pgd, addr); 139 pud = pud_offset(pgd, addr);
140 if (pud_none(*pud)) { 140 if (pud_none(*pud)) {
141 addr += PUD_SIZE; 141 addr = pud_addr_end(addr, end);
142 continue; 142 continue;
143 } 143 }
144 144
145 pmd = pmd_offset(pud, addr); 145 pmd = pmd_offset(pud, addr);
146 if (pmd_none(*pmd)) { 146 if (pmd_none(*pmd)) {
147 addr += PMD_SIZE; 147 addr = pmd_addr_end(addr, end);
148 continue; 148 continue;
149 } 149 }
150 150
151 pte = pte_offset_kernel(pmd, addr); 151 pte = pte_offset_kernel(pmd, addr);
152 clear_pte_entry(kvm, pte, addr); 152 clear_pte_entry(kvm, pte, addr);
153 range = PAGE_SIZE; 153 next = addr + PAGE_SIZE;
154 154
155 /* If we emptied the pte, walk back up the ladder */ 155 /* If we emptied the pte, walk back up the ladder */
156 if (pte_empty(pte)) { 156 if (pte_empty(pte)) {
157 clear_pmd_entry(kvm, pmd, addr); 157 clear_pmd_entry(kvm, pmd, addr);
158 range = PMD_SIZE; 158 next = pmd_addr_end(addr, end);
159 if (pmd_empty(pmd)) { 159 if (pmd_empty(pmd)) {
160 clear_pud_entry(kvm, pud, addr); 160 clear_pud_entry(kvm, pud, addr);
161 range = PUD_SIZE; 161 next = pud_addr_end(addr, end);
162 } 162 }
163 } 163 }
164 164
165 addr += range; 165 addr = next;
166 } 166 }
167} 167}
168 168