diff options
| author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2007-04-21 05:05:32 -0400 |
|---|---|---|
| committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2007-04-21 15:35:48 -0400 |
| commit | d5c98176ef34b8b78645646593c17e10f62f53ff (patch) | |
| tree | 33860fb055b89abb538506b0fecba77709d19314 | |
| parent | 2497f0a8125e307cf1fd4222bab53f66305eba27 (diff) | |
[ARM] mm 2: clean up create_mapping()
There's now no need to carry around each protection separately.
Instead, pass around the pointer to the entry in the mem_types
array which we're interested in.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
| -rw-r--r-- | arch/arm/mm/mmu.c | 30 |
1 files changed, 13 insertions, 17 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6178be0242f2..e359f3685433 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
| @@ -420,7 +420,7 @@ alloc_init_supersection(unsigned long virt, unsigned long phys, int prot) | |||
| 420 | * the hardware pte table. | 420 | * the hardware pte table. |
| 421 | */ | 421 | */ |
| 422 | static inline void | 422 | static inline void |
| 423 | alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) | 423 | alloc_init_page(unsigned long virt, unsigned long phys, const struct mem_type *type) |
| 424 | { | 424 | { |
| 425 | pmd_t *pmdp = pmd_off_k(virt); | 425 | pmd_t *pmdp = pmd_off_k(virt); |
| 426 | pte_t *ptep; | 426 | pte_t *ptep; |
| @@ -429,11 +429,11 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg | |||
| 429 | ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * | 429 | ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * |
| 430 | sizeof(pte_t)); | 430 | sizeof(pte_t)); |
| 431 | 431 | ||
| 432 | __pmd_populate(pmdp, __pa(ptep) | prot_l1); | 432 | __pmd_populate(pmdp, __pa(ptep) | type->prot_l1); |
| 433 | } | 433 | } |
| 434 | ptep = pte_offset_kernel(pmdp, virt); | 434 | ptep = pte_offset_kernel(pmdp, virt); |
| 435 | 435 | ||
| 436 | set_pte_ext(ptep, pfn_pte(phys >> PAGE_SHIFT, prot), 0); | 436 | set_pte_ext(ptep, pfn_pte(phys >> PAGE_SHIFT, __pgprot(type->prot_pte)), 0); |
| 437 | } | 437 | } |
| 438 | 438 | ||
| 439 | /* | 439 | /* |
| @@ -446,9 +446,8 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg | |||
| 446 | void __init create_mapping(struct map_desc *md) | 446 | void __init create_mapping(struct map_desc *md) |
| 447 | { | 447 | { |
| 448 | unsigned long virt, length; | 448 | unsigned long virt, length; |
| 449 | int prot_sect, prot_l1, domain; | ||
| 450 | pgprot_t prot_pte; | ||
| 451 | unsigned long off = (u32)__pfn_to_phys(md->pfn); | 449 | unsigned long off = (u32)__pfn_to_phys(md->pfn); |
| 450 | const struct mem_type *type; | ||
| 452 | 451 | ||
| 453 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | 452 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { |
| 454 | printk(KERN_WARNING "BUG: not creating mapping for " | 453 | printk(KERN_WARNING "BUG: not creating mapping for " |
| @@ -464,16 +463,13 @@ void __init create_mapping(struct map_desc *md) | |||
| 464 | __pfn_to_phys((u64)md->pfn), md->virtual); | 463 | __pfn_to_phys((u64)md->pfn), md->virtual); |
| 465 | } | 464 | } |
| 466 | 465 | ||
| 467 | domain = mem_types[md->type].domain; | 466 | type = &mem_types[md->type]; |
| 468 | prot_pte = __pgprot(mem_types[md->type].prot_pte); | ||
| 469 | prot_l1 = mem_types[md->type].prot_l1; | ||
| 470 | prot_sect = mem_types[md->type].prot_sect; | ||
| 471 | 467 | ||
| 472 | /* | 468 | /* |
| 473 | * Catch 36-bit addresses | 469 | * Catch 36-bit addresses |
| 474 | */ | 470 | */ |
| 475 | if(md->pfn >= 0x100000) { | 471 | if(md->pfn >= 0x100000) { |
| 476 | if(domain) { | 472 | if (type->domain) { |
| 477 | printk(KERN_ERR "MM: invalid domain in supersection " | 473 | printk(KERN_ERR "MM: invalid domain in supersection " |
| 478 | "mapping for 0x%08llx at 0x%08lx\n", | 474 | "mapping for 0x%08llx at 0x%08lx\n", |
| 479 | __pfn_to_phys((u64)md->pfn), md->virtual); | 475 | __pfn_to_phys((u64)md->pfn), md->virtual); |
| @@ -498,7 +494,7 @@ void __init create_mapping(struct map_desc *md) | |||
| 498 | off -= virt; | 494 | off -= virt; |
| 499 | length = md->length; | 495 | length = md->length; |
| 500 | 496 | ||
| 501 | if (mem_types[md->type].prot_l1 == 0 && | 497 | if (type->prot_l1 == 0 && |
| 502 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { | 498 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { |
| 503 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | 499 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " |
| 504 | "be mapped using pages, ignoring.\n", | 500 | "be mapped using pages, ignoring.\n", |
| @@ -507,7 +503,7 @@ void __init create_mapping(struct map_desc *md) | |||
| 507 | } | 503 | } |
| 508 | 504 | ||
| 509 | while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { | 505 | while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { |
| 510 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | 506 | alloc_init_page(virt, virt + off, type); |
| 511 | 507 | ||
| 512 | virt += PAGE_SIZE; | 508 | virt += PAGE_SIZE; |
| 513 | length -= PAGE_SIZE; | 509 | length -= PAGE_SIZE; |
| @@ -520,7 +516,7 @@ void __init create_mapping(struct map_desc *md) | |||
| 520 | * of the actual domain assignments in use. | 516 | * of the actual domain assignments in use. |
| 521 | */ | 517 | */ |
| 522 | if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3()) | 518 | if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3()) |
| 523 | && domain == 0) { | 519 | && type->domain == 0) { |
| 524 | /* | 520 | /* |
| 525 | * Align to supersection boundary if !high pages. | 521 | * Align to supersection boundary if !high pages. |
| 526 | * High pages have already been checked for proper | 522 | * High pages have already been checked for proper |
| @@ -532,7 +528,7 @@ void __init create_mapping(struct map_desc *md) | |||
| 532 | while ((virt & ~SUPERSECTION_MASK || | 528 | while ((virt & ~SUPERSECTION_MASK || |
| 533 | (virt + off) & ~SUPERSECTION_MASK) && | 529 | (virt + off) & ~SUPERSECTION_MASK) && |
| 534 | length >= (PGDIR_SIZE / 2)) { | 530 | length >= (PGDIR_SIZE / 2)) { |
| 535 | alloc_init_section(virt, virt + off, prot_sect); | 531 | alloc_init_section(virt, virt + off, type->prot_sect); |
| 536 | 532 | ||
| 537 | virt += (PGDIR_SIZE / 2); | 533 | virt += (PGDIR_SIZE / 2); |
| 538 | length -= (PGDIR_SIZE / 2); | 534 | length -= (PGDIR_SIZE / 2); |
| @@ -540,7 +536,7 @@ void __init create_mapping(struct map_desc *md) | |||
| 540 | } | 536 | } |
| 541 | 537 | ||
| 542 | while (length >= SUPERSECTION_SIZE) { | 538 | while (length >= SUPERSECTION_SIZE) { |
| 543 | alloc_init_supersection(virt, virt + off, prot_sect); | 539 | alloc_init_supersection(virt, virt + off, type->prot_sect); |
| 544 | 540 | ||
| 545 | virt += SUPERSECTION_SIZE; | 541 | virt += SUPERSECTION_SIZE; |
| 546 | length -= SUPERSECTION_SIZE; | 542 | length -= SUPERSECTION_SIZE; |
| @@ -551,14 +547,14 @@ void __init create_mapping(struct map_desc *md) | |||
| 551 | * A section mapping covers half a "pgdir" entry. | 547 | * A section mapping covers half a "pgdir" entry. |
| 552 | */ | 548 | */ |
| 553 | while (length >= (PGDIR_SIZE / 2)) { | 549 | while (length >= (PGDIR_SIZE / 2)) { |
| 554 | alloc_init_section(virt, virt + off, prot_sect); | 550 | alloc_init_section(virt, virt + off, type->prot_sect); |
| 555 | 551 | ||
| 556 | virt += (PGDIR_SIZE / 2); | 552 | virt += (PGDIR_SIZE / 2); |
| 557 | length -= (PGDIR_SIZE / 2); | 553 | length -= (PGDIR_SIZE / 2); |
| 558 | } | 554 | } |
| 559 | 555 | ||
| 560 | while (length >= PAGE_SIZE) { | 556 | while (length >= PAGE_SIZE) { |
| 561 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | 557 | alloc_init_page(virt, virt + off, type); |
| 562 | 558 | ||
| 563 | virt += PAGE_SIZE; | 559 | virt += PAGE_SIZE; |
| 564 | length -= PAGE_SIZE; | 560 | length -= PAGE_SIZE; |
