diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2007-04-21 05:21:28 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2007-04-21 15:35:55 -0400 |
commit | 24e6c6996fb6e0e716c1dda1def1bb023a0fe43b (patch) | |
tree | 91e507dd9a071d7e6d68bd06120f6538c92b1cec /arch/arm/mm/mmu.c | |
parent | 4a56c1e41f19393577bdd5c774c289c199b7269d (diff) |
[ARM] mm 4: make create_mapping() more conventional
Rather than our three separate loops to setup mappings (by page
mappings up to a section boundary, then section mappings, and the
remainder by page mappings) convert this to a more conventional
Linux style of a loop over each page table level.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 114 |
1 files changed, 55 insertions, 59 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 32139800d939..5821e67cf8c2 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -380,45 +380,55 @@ static void __init build_mem_type_table(void) | |||
380 | 380 | ||
381 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) | 381 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) |
382 | 382 | ||
383 | /* | 383 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, |
384 | * Create a SECTION PGD between VIRT and PHYS in domain | 384 | unsigned long end, unsigned long pfn, |
385 | * DOMAIN with protection PROT. This operates on half- | 385 | const struct mem_type *type) |
386 | * pgdir entry increments. | ||
387 | */ | ||
388 | static inline void | ||
389 | alloc_init_section(unsigned long virt, unsigned long phys, int prot) | ||
390 | { | 386 | { |
391 | pmd_t *pmdp = pmd_off_k(virt); | 387 | pte_t *pte; |
392 | 388 | ||
393 | if (virt & (1 << 20)) | 389 | if (pmd_none(*pmd)) { |
394 | pmdp++; | 390 | pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); |
391 | __pmd_populate(pmd, __pa(pte) | type->prot_l1); | ||
392 | } | ||
395 | 393 | ||
396 | *pmdp = __pmd(phys | prot); | 394 | pte = pte_offset_kernel(pmd, addr); |
397 | flush_pmd_entry(pmdp); | 395 | do { |
396 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); | ||
397 | pfn++; | ||
398 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
398 | } | 399 | } |
399 | 400 | ||
400 | /* | 401 | static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, |
401 | * Add a PAGE mapping between VIRT and PHYS in domain | 402 | unsigned long end, unsigned long phys, |
402 | * DOMAIN with protection PROT. Note that due to the | 403 | const struct mem_type *type) |
403 | * way we map the PTEs, we must allocate two PTE_SIZE'd | ||
404 | * blocks - one for the Linux pte table, and one for | ||
405 | * the hardware pte table. | ||
406 | */ | ||
407 | static inline void | ||
408 | alloc_init_page(unsigned long virt, unsigned long phys, const struct mem_type *type) | ||
409 | { | 404 | { |
410 | pmd_t *pmdp = pmd_off_k(virt); | 405 | pmd_t *pmd = pmd_offset(pgd, addr); |
411 | pte_t *ptep; | ||
412 | 406 | ||
413 | if (pmd_none(*pmdp)) { | 407 | /* |
414 | ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * | 408 | * Try a section mapping - end, addr and phys must all be aligned |
415 | sizeof(pte_t)); | 409 | * to a section boundary. Note that PMDs refer to the individual |
410 | * L1 entries, whereas PGDs refer to a group of L1 entries making | ||
411 | * up one logical pointer to an L2 table. | ||
412 | */ | ||
413 | if (((addr | end | phys) & ~SECTION_MASK) == 0) { | ||
414 | pmd_t *p = pmd; | ||
416 | 415 | ||
417 | __pmd_populate(pmdp, __pa(ptep) | type->prot_l1); | 416 | if (addr & SECTION_SIZE) |
418 | } | 417 | pmd++; |
419 | ptep = pte_offset_kernel(pmdp, virt); | 418 | |
419 | do { | ||
420 | *pmd = __pmd(phys | type->prot_sect); | ||
421 | phys += SECTION_SIZE; | ||
422 | } while (pmd++, addr += SECTION_SIZE, addr != end); | ||
420 | 423 | ||
421 | set_pte_ext(ptep, pfn_pte(phys >> PAGE_SHIFT, __pgprot(type->prot_pte)), 0); | 424 | flush_pmd_entry(p); |
425 | } else { | ||
426 | /* | ||
427 | * No need to loop; pte's aren't interested in the | ||
428 | * individual L1 entries. | ||
429 | */ | ||
430 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); | ||
431 | } | ||
422 | } | 432 | } |
423 | 433 | ||
424 | static void __init create_36bit_mapping(struct map_desc *md, | 434 | static void __init create_36bit_mapping(struct map_desc *md, |
@@ -488,9 +498,9 @@ static void __init create_36bit_mapping(struct map_desc *md, | |||
488 | */ | 498 | */ |
489 | void __init create_mapping(struct map_desc *md) | 499 | void __init create_mapping(struct map_desc *md) |
490 | { | 500 | { |
491 | unsigned long virt, length; | 501 | unsigned long phys, addr, length, end; |
492 | unsigned long off = (u32)__pfn_to_phys(md->pfn); | ||
493 | const struct mem_type *type; | 502 | const struct mem_type *type; |
503 | pgd_t *pgd; | ||
494 | 504 | ||
495 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | 505 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { |
496 | printk(KERN_WARNING "BUG: not creating mapping for " | 506 | printk(KERN_WARNING "BUG: not creating mapping for " |
@@ -516,41 +526,27 @@ void __init create_mapping(struct map_desc *md) | |||
516 | return; | 526 | return; |
517 | } | 527 | } |
518 | 528 | ||
519 | virt = md->virtual; | 529 | addr = md->virtual; |
520 | off -= virt; | 530 | phys = (unsigned long)__pfn_to_phys(md->pfn); |
521 | length = md->length; | 531 | length = PAGE_ALIGN(md->length); |
522 | 532 | ||
523 | if (type->prot_l1 == 0 && | 533 | if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { |
524 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { | ||
525 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | 534 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " |
526 | "be mapped using pages, ignoring.\n", | 535 | "be mapped using pages, ignoring.\n", |
527 | __pfn_to_phys(md->pfn), md->virtual); | 536 | __pfn_to_phys(md->pfn), addr); |
528 | return; | 537 | return; |
529 | } | 538 | } |
530 | 539 | ||
531 | while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { | 540 | pgd = pgd_offset_k(addr); |
532 | alloc_init_page(virt, virt + off, type); | 541 | end = addr + length; |
533 | 542 | do { | |
534 | virt += PAGE_SIZE; | 543 | unsigned long next = pgd_addr_end(addr, end); |
535 | length -= PAGE_SIZE; | ||
536 | } | ||
537 | |||
538 | /* | ||
539 | * A section mapping covers half a "pgdir" entry. | ||
540 | */ | ||
541 | while (length >= (PGDIR_SIZE / 2)) { | ||
542 | alloc_init_section(virt, virt + off, type->prot_sect); | ||
543 | |||
544 | virt += (PGDIR_SIZE / 2); | ||
545 | length -= (PGDIR_SIZE / 2); | ||
546 | } | ||
547 | 544 | ||
548 | while (length >= PAGE_SIZE) { | 545 | alloc_init_section(pgd, addr, next, phys, type); |
549 | alloc_init_page(virt, virt + off, type); | ||
550 | 546 | ||
551 | virt += PAGE_SIZE; | 547 | phys += next - addr; |
552 | length -= PAGE_SIZE; | 548 | addr = next; |
553 | } | 549 | } while (pgd++, addr != end); |
554 | } | 550 | } |
555 | 551 | ||
556 | /* | 552 | /* |