aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2013-04-12 14:12:02 -0400
committerChristoffer Dall <cdall@cs.columbia.edu>2013-04-29 01:23:08 -0400
commit3562c76dcb9ce84853c835eec12a911bf3a8e2da (patch)
tree2bce0997ff66f61975c04912d60318626072b526 /arch/arm/kvm
parent6060df84cbe0d36b8e1415b68e3f67b77f27052a (diff)
ARM: KVM: fix HYP mapping limitations around zero
The current code for creating HYP mapping doesn't like to wrap around zero, which prevents from mapping anything into the last page of the virtual address space. It doesn't take much effort to remove this limitation, making the code more consistent with the rest of the kernel in the process. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@cs.columbia.edu>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/mmu.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 30033219df84..96d61daa23ba 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -131,11 +131,12 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
131 pte_t *pte; 131 pte_t *pte;
132 unsigned long addr; 132 unsigned long addr;
133 133
134 for (addr = start; addr < end; addr += PAGE_SIZE) { 134 addr = start;
135 do {
135 pte = pte_offset_kernel(pmd, addr); 136 pte = pte_offset_kernel(pmd, addr);
136 kvm_set_pte(pte, pfn_pte(pfn, prot)); 137 kvm_set_pte(pte, pfn_pte(pfn, prot));
137 pfn++; 138 pfn++;
138 } 139 } while (addr += PAGE_SIZE, addr != end);
139} 140}
140 141
141static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, 142static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
@@ -146,7 +147,8 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
146 pte_t *pte; 147 pte_t *pte;
147 unsigned long addr, next; 148 unsigned long addr, next;
148 149
149 for (addr = start; addr < end; addr = next) { 150 addr = start;
151 do {
150 pmd = pmd_offset(pud, addr); 152 pmd = pmd_offset(pud, addr);
151 153
152 BUG_ON(pmd_sect(*pmd)); 154 BUG_ON(pmd_sect(*pmd));
@@ -164,7 +166,7 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
164 166
165 create_hyp_pte_mappings(pmd, addr, next, pfn, prot); 167 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
166 pfn += (next - addr) >> PAGE_SHIFT; 168 pfn += (next - addr) >> PAGE_SHIFT;
167 } 169 } while (addr = next, addr != end);
168 170
169 return 0; 171 return 0;
170} 172}
@@ -179,11 +181,10 @@ static int __create_hyp_mappings(pgd_t *pgdp,
179 unsigned long addr, next; 181 unsigned long addr, next;
180 int err = 0; 182 int err = 0;
181 183
182 if (start >= end)
183 return -EINVAL;
184
185 mutex_lock(&kvm_hyp_pgd_mutex); 184 mutex_lock(&kvm_hyp_pgd_mutex);
186 for (addr = start & PAGE_MASK; addr < end; addr = next) { 185 addr = start & PAGE_MASK;
186 end = PAGE_ALIGN(end);
187 do {
187 pgd = pgdp + pgd_index(addr); 188 pgd = pgdp + pgd_index(addr);
188 pud = pud_offset(pgd, addr); 189 pud = pud_offset(pgd, addr);
189 190
@@ -202,7 +203,7 @@ static int __create_hyp_mappings(pgd_t *pgdp,
202 if (err) 203 if (err)
203 goto out; 204 goto out;
204 pfn += (next - addr) >> PAGE_SHIFT; 205 pfn += (next - addr) >> PAGE_SHIFT;
205 } 206 } while (addr = next, addr != end);
206out: 207out:
207 mutex_unlock(&kvm_hyp_pgd_mutex); 208 mutex_unlock(&kvm_hyp_pgd_mutex);
208 return err; 209 return err;
@@ -216,8 +217,6 @@ out:
216 * The same virtual address as the kernel virtual address is also used 217 * The same virtual address as the kernel virtual address is also used
217 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying 218 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
218 * physical pages. 219 * physical pages.
219 *
220 * Note: Wrapping around zero in the "to" address is not supported.
221 */ 220 */
222int create_hyp_mappings(void *from, void *to) 221int create_hyp_mappings(void *from, void *to)
223{ 222{