aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2007-04-21 05:16:48 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2007-04-21 15:35:52 -0400
commit4a56c1e41f19393577bdd5c774c289c199b7269d (patch)
treea2c36198e3b4d78ccc8e373c3748112bc0645b0e /arch
parentd5c98176ef34b8b78645646593c17e10f62f53ff (diff)
[ARM] mm 3: separate out supersection mappings, avoid for <4GB
Catalin Marinas at ARM Ltd says: > The CPU architects in ARM intended supersections only as a way to map > addresses >= 4GB. Supersections are not mandated by the architecture > and there is no easy way to detect their hardware support at run-time > (other than checking for a specific core). From the analysis done in > ARM, there wasn't a clear performance gain by using supersections > rather than sections (no significant improvement in the TLB misses). Therefore, we should avoid using supersections unless there's a real need (iow, we're mapping addresses >= 4GB). This means that we can simplify create_mapping() a bit since we will only use supersection mappings for addresses >= 4GB, which means that the physical, virtual and length must be multiples of the supersection mapping size. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm/mm/mmu.c130
2 files changed, 62 insertions, 70 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0ac615c0f798..800855b2dc83 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -302,7 +302,7 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
302#ifndef CONFIG_SMP 302#ifndef CONFIG_SMP
303 if (DOMAIN_IO == 0 && 303 if (DOMAIN_IO == 0 &&
304 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || 304 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
305 cpu_is_xsc3()) && 305 cpu_is_xsc3()) && pfn >= 0x100000 &&
306 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { 306 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
307 area->flags |= VM_ARM_SECTION_MAPPING; 307 area->flags |= VM_ARM_SECTION_MAPPING;
308 err = remap_area_supersections(addr, pfn, size, flags); 308 err = remap_area_supersections(addr, pfn, size, flags);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e359f3685433..32139800d939 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -398,21 +398,6 @@ alloc_init_section(unsigned long virt, unsigned long phys, int prot)
398} 398}
399 399
400/* 400/*
401 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
402 */
403static inline void
404alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
405{
406 int i;
407
408 for (i = 0; i < 16; i += 1) {
409 alloc_init_section(virt, phys, prot | PMD_SECT_SUPER);
410
411 virt += (PGDIR_SIZE / 2);
412 }
413}
414
415/*
416 * Add a PAGE mapping between VIRT and PHYS in domain 401 * Add a PAGE mapping between VIRT and PHYS in domain
417 * DOMAIN with protection PROT. Note that due to the 402 * DOMAIN with protection PROT. Note that due to the
418 * way we map the PTEs, we must allocate two PTE_SIZE'd 403 * way we map the PTEs, we must allocate two PTE_SIZE'd
@@ -436,6 +421,64 @@ alloc_init_page(unsigned long virt, unsigned long phys, const struct mem_type *t
436 set_pte_ext(ptep, pfn_pte(phys >> PAGE_SHIFT, __pgprot(type->prot_pte)), 0); 421 set_pte_ext(ptep, pfn_pte(phys >> PAGE_SHIFT, __pgprot(type->prot_pte)), 0);
437} 422}
438 423
424static void __init create_36bit_mapping(struct map_desc *md,
425 const struct mem_type *type)
426{
427 unsigned long phys, addr, length, end;
428 pgd_t *pgd;
429
430 addr = md->virtual;
431 phys = (unsigned long)__pfn_to_phys(md->pfn);
432 length = PAGE_ALIGN(md->length);
433
434 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
435 printk(KERN_ERR "MM: CPU does not support supersection "
436 "mapping for 0x%08llx at 0x%08lx\n",
437 __pfn_to_phys((u64)md->pfn), addr);
438 return;
439 }
440
441 /* N.B. ARMv6 supersections are only defined to work with domain 0.
442 * Since domain assignments can in fact be arbitrary, the
443 * 'domain == 0' check below is required to insure that ARMv6
444 * supersections are only allocated for domain 0 regardless
445 * of the actual domain assignments in use.
446 */
447 if (type->domain) {
448 printk(KERN_ERR "MM: invalid domain in supersection "
449 "mapping for 0x%08llx at 0x%08lx\n",
450 __pfn_to_phys((u64)md->pfn), addr);
451 return;
452 }
453
454 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
455 printk(KERN_ERR "MM: cannot create mapping for "
456 "0x%08llx at 0x%08lx invalid alignment\n",
457 __pfn_to_phys((u64)md->pfn), addr);
458 return;
459 }
460
461 /*
462 * Shift bits [35:32] of address into bits [23:20] of PMD
463 * (See ARMv6 spec).
464 */
465 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
466
467 pgd = pgd_offset_k(addr);
468 end = addr + length;
469 do {
470 pmd_t *pmd = pmd_offset(pgd, addr);
471 int i;
472
473 for (i = 0; i < 16; i++)
474 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
475
476 addr += SUPERSECTION_SIZE;
477 phys += SUPERSECTION_SIZE;
478 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
479 } while (addr != end);
480}
481
439/* 482/*
440 * Create the page directory entries and any necessary 483 * Create the page directory entries and any necessary
441 * page tables for the mapping specified by `md'. We 484 * page tables for the mapping specified by `md'. We
@@ -468,26 +511,9 @@ void __init create_mapping(struct map_desc *md)
468 /* 511 /*
469 * Catch 36-bit addresses 512 * Catch 36-bit addresses
470 */ 513 */
471 if(md->pfn >= 0x100000) { 514 if (md->pfn >= 0x100000) {
472 if (type->domain) { 515 create_36bit_mapping(md, type);
473 printk(KERN_ERR "MM: invalid domain in supersection " 516 return;
474 "mapping for 0x%08llx at 0x%08lx\n",
475 __pfn_to_phys((u64)md->pfn), md->virtual);
476 return;
477 }
478 if((md->virtual | md->length | __pfn_to_phys(md->pfn))
479 & ~SUPERSECTION_MASK) {
480 printk(KERN_ERR "MM: cannot create mapping for "
481 "0x%08llx at 0x%08lx invalid alignment\n",
482 __pfn_to_phys((u64)md->pfn), md->virtual);
483 return;
484 }
485
486 /*
487 * Shift bits [35:32] of address into bits [23:20] of PMD
488 * (See ARMv6 spec).
489 */
490 off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
491 } 517 }
492 518
493 virt = md->virtual; 519 virt = md->virtual;
@@ -509,40 +535,6 @@ void __init create_mapping(struct map_desc *md)
509 length -= PAGE_SIZE; 535 length -= PAGE_SIZE;
510 } 536 }
511 537
512 /* N.B. ARMv6 supersections are only defined to work with domain 0.
513 * Since domain assignments can in fact be arbitrary, the
514 * 'domain == 0' check below is required to insure that ARMv6
515 * supersections are only allocated for domain 0 regardless
516 * of the actual domain assignments in use.
517 */
518 if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())
519 && type->domain == 0) {
520 /*
521 * Align to supersection boundary if !high pages.
522 * High pages have already been checked for proper
523 * alignment above and they will fail the SUPSERSECTION_MASK
524 * check because of the way the address is encoded into
525 * offset.
526 */
527 if (md->pfn <= 0x100000) {
528 while ((virt & ~SUPERSECTION_MASK ||
529 (virt + off) & ~SUPERSECTION_MASK) &&
530 length >= (PGDIR_SIZE / 2)) {
531 alloc_init_section(virt, virt + off, type->prot_sect);
532
533 virt += (PGDIR_SIZE / 2);
534 length -= (PGDIR_SIZE / 2);
535 }
536 }
537
538 while (length >= SUPERSECTION_SIZE) {
539 alloc_init_supersection(virt, virt + off, type->prot_sect);
540
541 virt += SUPERSECTION_SIZE;
542 length -= SUPERSECTION_SIZE;
543 }
544 }
545
546 /* 538 /*
547 * A section mapping covers half a "pgdir" entry. 539 * A section mapping covers half a "pgdir" entry.
548 */ 540 */