aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2019-02-04 09:37:38 -0500
committerWill Deacon <will.deacon@arm.com>2019-02-04 09:37:38 -0500
commitd23c808c6fc6132e812690648e14c0d6b0cbe273 (patch)
treec30e67be724ecd617c383d82e041d08314cfc59a
parent8834f5600cf3c8db365e18a3d5cac2c2780c81e5 (diff)
arm64: ptdump: Don't iterate kernel page tables using PTRS_PER_PXX
When 52-bit virtual addressing is enabled for userspace (CONFIG_ARM64_USER_VA_BITS_52=y), the kernel continues to utilise 48-bit virtual addressing in TTBR1. Consequently, PTRS_PER_PGD reflects the larger page table size for userspace and the pgd pointer for kernel page tables is offset before being written to TTBR1. This means that we can't use PTRS_PER_PGD to iterate over kernel page tables unless we apply the same offset, which is fiddly to get right and leads to some non-idiomatic walking code. Instead, just follow the usual pattern when walking page tables by using a while loop driven by pXd_offset() and pXd_addr_end(). Reported-by: Qian Cai <cai@lca.pw> Tested-by: Qian Cai <cai@lca.pw> Acked-by: Steve Capper <steve.capper@arm.com> Tested-by: Steve Capper <steve.capper@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/mm/dump.c59
1 files changed, 29 insertions, 30 deletions
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index fcb1f2a6d7c6..99bb8facb5cb 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -286,74 +286,73 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
286 286
287} 287}
288 288
289static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start) 289static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start,
290 unsigned long end)
290{ 291{
291 pte_t *ptep = pte_offset_kernel(pmdp, 0UL); 292 unsigned long addr = start;
292 unsigned long addr; 293 pte_t *ptep = pte_offset_kernel(pmdp, start);
293 unsigned i;
294 294
295 for (i = 0; i < PTRS_PER_PTE; i++, ptep++) { 295 do {
296 addr = start + i * PAGE_SIZE;
297 note_page(st, addr, 4, READ_ONCE(pte_val(*ptep))); 296 note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
298 } 297 } while (ptep++, addr += PAGE_SIZE, addr != end);
299} 298}
300 299
301static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start) 300static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start,
301 unsigned long end)
302{ 302{
303 pmd_t *pmdp = pmd_offset(pudp, 0UL); 303 unsigned long next, addr = start;
304 unsigned long addr; 304 pmd_t *pmdp = pmd_offset(pudp, start);
305 unsigned i;
306 305
307 for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) { 306 do {
308 pmd_t pmd = READ_ONCE(*pmdp); 307 pmd_t pmd = READ_ONCE(*pmdp);
308 next = pmd_addr_end(addr, end);
309 309
310 addr = start + i * PMD_SIZE;
311 if (pmd_none(pmd) || pmd_sect(pmd)) { 310 if (pmd_none(pmd) || pmd_sect(pmd)) {
312 note_page(st, addr, 3, pmd_val(pmd)); 311 note_page(st, addr, 3, pmd_val(pmd));
313 } else { 312 } else {
314 BUG_ON(pmd_bad(pmd)); 313 BUG_ON(pmd_bad(pmd));
315 walk_pte(st, pmdp, addr); 314 walk_pte(st, pmdp, addr, next);
316 } 315 }
317 } 316 } while (pmdp++, addr = next, addr != end);
318} 317}
319 318
320static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start) 319static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start,
320 unsigned long end)
321{ 321{
322 pud_t *pudp = pud_offset(pgdp, 0UL); 322 unsigned long next, addr = start;
323 unsigned long addr; 323 pud_t *pudp = pud_offset(pgdp, start);
324 unsigned i;
325 324
326 for (i = 0; i < PTRS_PER_PUD; i++, pudp++) { 325 do {
327 pud_t pud = READ_ONCE(*pudp); 326 pud_t pud = READ_ONCE(*pudp);
327 next = pud_addr_end(addr, end);
328 328
329 addr = start + i * PUD_SIZE;
330 if (pud_none(pud) || pud_sect(pud)) { 329 if (pud_none(pud) || pud_sect(pud)) {
331 note_page(st, addr, 2, pud_val(pud)); 330 note_page(st, addr, 2, pud_val(pud));
332 } else { 331 } else {
333 BUG_ON(pud_bad(pud)); 332 BUG_ON(pud_bad(pud));
334 walk_pmd(st, pudp, addr); 333 walk_pmd(st, pudp, addr, next);
335 } 334 }
336 } 335 } while (pudp++, addr = next, addr != end);
337} 336}
338 337
339static void walk_pgd(struct pg_state *st, struct mm_struct *mm, 338static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
340 unsigned long start) 339 unsigned long start)
341{ 340{
342 pgd_t *pgdp = pgd_offset(mm, 0UL); 341 unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0;
343 unsigned i; 342 unsigned long next, addr = start;
344 unsigned long addr; 343 pgd_t *pgdp = pgd_offset(mm, start);
345 344
346 for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) { 345 do {
347 pgd_t pgd = READ_ONCE(*pgdp); 346 pgd_t pgd = READ_ONCE(*pgdp);
347 next = pgd_addr_end(addr, end);
348 348
349 addr = start + i * PGDIR_SIZE;
350 if (pgd_none(pgd)) { 349 if (pgd_none(pgd)) {
351 note_page(st, addr, 1, pgd_val(pgd)); 350 note_page(st, addr, 1, pgd_val(pgd));
352 } else { 351 } else {
353 BUG_ON(pgd_bad(pgd)); 352 BUG_ON(pgd_bad(pgd));
354 walk_pud(st, pgdp, addr); 353 walk_pud(st, pgdp, addr, next);
355 } 354 }
356 } 355 } while (pgdp++, addr = next, addr != end);
357} 356}
358 357
359void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) 358void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)