aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-23 23:37:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-23 23:37:26 -0400
commit2f284c846331fa44be1300a3c2c3e85800268a00 (patch)
treebe2704e6157613bd2cc2a278559a6c86a0b644f4 /arch/arm/mm
parent93a72052be81823fa1584b9be037d51924f9efa4 (diff)
parent6f82f4db80189281a8ac42f2e72396accb719b57 (diff)
Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm: (35 commits) ARM: Update (and cut down) mach-types ARM: 6771/1: vexpress: add support for multiple core tiles ARM: 6797/1: hw_breakpoint: Fix newlines in WARNings ARM: 6751/1: vexpress: select applicable errata workarounds in Kconfig ARM: 6753/1: omap4: Enable ARM local timers with OMAP4430 es1.0 exception ARM: 6759/1: smp: Select local timers vs broadcast timer support runtime ARM: pgtable: add pud-level code ARM: 6673/1: LPAE: use phys_addr_t instead of unsigned long for start of membanks ARM: Use long long format when printing meminfo physical addresses ARM: integrator: add Integrator/CP sched_clock support ARM: realview/vexpress: consolidate SMP bringup code ARM: realview/vexpress: consolidate localtimer support ARM: integrator/versatile: consolidate FPGA IRQ handling code ARM: rationalize versatile family Kconfig/Makefile ARM: realview: remove old AMBA device DMA definitions ARM: versatile: remove old AMBA device DMA definitions ARM: vexpress: use new init_early for clock tree and sched_clock init ARM: realview: use new init_early for clock tree and sched_clock init ARM: versatile: use new init_early for clock tree and sched_clock init ARM: integrator: use new init_early for clock tree init ...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c11
-rw-r--r--arch/arm/mm/fault-armv.c7
-rw-r--r--arch/arm/mm/fault.c39
-rw-r--r--arch/arm/mm/idmap.c35
-rw-r--r--arch/arm/mm/init.c6
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmu.c71
-rw-r--r--arch/arm/mm/pgd.c24
8 files changed, 144 insertions, 51 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4771dba61448..82a093cee09a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -149,6 +149,7 @@ static int __init consistent_init(void)
149{ 149{
150 int ret = 0; 150 int ret = 0;
151 pgd_t *pgd; 151 pgd_t *pgd;
152 pud_t *pud;
152 pmd_t *pmd; 153 pmd_t *pmd;
153 pte_t *pte; 154 pte_t *pte;
154 int i = 0; 155 int i = 0;
@@ -156,7 +157,15 @@ static int __init consistent_init(void)
156 157
157 do { 158 do {
158 pgd = pgd_offset(&init_mm, base); 159 pgd = pgd_offset(&init_mm, base);
159 pmd = pmd_alloc(&init_mm, pgd, base); 160
161 pud = pud_alloc(&init_mm, pgd, base);
162 if (!pud) {
163 printk(KERN_ERR "%s: no pud tables\n", __func__);
164 ret = -ENOMEM;
165 break;
166 }
167
168 pmd = pmd_alloc(&init_mm, pud, base);
160 if (!pmd) { 169 if (!pmd) {
161 printk(KERN_ERR "%s: no pmd tables\n", __func__); 170 printk(KERN_ERR "%s: no pmd tables\n", __func__);
162 ret = -ENOMEM; 171 ret = -ENOMEM;
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 01210dba0221..7cab79179421 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -95,6 +95,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
95{ 95{
96 spinlock_t *ptl; 96 spinlock_t *ptl;
97 pgd_t *pgd; 97 pgd_t *pgd;
98 pud_t *pud;
98 pmd_t *pmd; 99 pmd_t *pmd;
99 pte_t *pte; 100 pte_t *pte;
100 int ret; 101 int ret;
@@ -103,7 +104,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
103 if (pgd_none_or_clear_bad(pgd)) 104 if (pgd_none_or_clear_bad(pgd))
104 return 0; 105 return 0;
105 106
106 pmd = pmd_offset(pgd, address); 107 pud = pud_offset(pgd, address);
108 if (pud_none_or_clear_bad(pud))
109 return 0;
110
111 pmd = pmd_offset(pud, address);
107 if (pmd_none_or_clear_bad(pmd)) 112 if (pmd_none_or_clear_bad(pmd))
108 return 0; 113 return 0;
109 114
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index f10f9bac2206..bc0e1d88fd3b 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -76,9 +76,11 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
76 76
77 printk(KERN_ALERT "pgd = %p\n", mm->pgd); 77 printk(KERN_ALERT "pgd = %p\n", mm->pgd);
78 pgd = pgd_offset(mm, addr); 78 pgd = pgd_offset(mm, addr);
79 printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd)); 79 printk(KERN_ALERT "[%08lx] *pgd=%08llx",
80 addr, (long long)pgd_val(*pgd));
80 81
81 do { 82 do {
83 pud_t *pud;
82 pmd_t *pmd; 84 pmd_t *pmd;
83 pte_t *pte; 85 pte_t *pte;
84 86
@@ -90,9 +92,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
90 break; 92 break;
91 } 93 }
92 94
93 pmd = pmd_offset(pgd, addr); 95 pud = pud_offset(pgd, addr);
96 if (PTRS_PER_PUD != 1)
97 printk(", *pud=%08lx", pud_val(*pud));
98
99 if (pud_none(*pud))
100 break;
101
102 if (pud_bad(*pud)) {
103 printk("(bad)");
104 break;
105 }
106
107 pmd = pmd_offset(pud, addr);
94 if (PTRS_PER_PMD != 1) 108 if (PTRS_PER_PMD != 1)
95 printk(", *pmd=%08lx", pmd_val(*pmd)); 109 printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
96 110
97 if (pmd_none(*pmd)) 111 if (pmd_none(*pmd))
98 break; 112 break;
@@ -107,8 +121,9 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
107 break; 121 break;
108 122
109 pte = pte_offset_map(pmd, addr); 123 pte = pte_offset_map(pmd, addr);
110 printk(", *pte=%08lx", pte_val(*pte)); 124 printk(", *pte=%08llx", (long long)pte_val(*pte));
111 printk(", *ppte=%08lx", pte_val(pte[PTE_HWTABLE_PTRS])); 125 printk(", *ppte=%08llx",
126 (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
112 pte_unmap(pte); 127 pte_unmap(pte);
113 } while(0); 128 } while(0);
114 129
@@ -388,6 +403,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
388{ 403{
389 unsigned int index; 404 unsigned int index;
390 pgd_t *pgd, *pgd_k; 405 pgd_t *pgd, *pgd_k;
406 pud_t *pud, *pud_k;
391 pmd_t *pmd, *pmd_k; 407 pmd_t *pmd, *pmd_k;
392 408
393 if (addr < TASK_SIZE) 409 if (addr < TASK_SIZE)
@@ -406,12 +422,19 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
406 422
407 if (pgd_none(*pgd_k)) 423 if (pgd_none(*pgd_k))
408 goto bad_area; 424 goto bad_area;
409
410 if (!pgd_present(*pgd)) 425 if (!pgd_present(*pgd))
411 set_pgd(pgd, *pgd_k); 426 set_pgd(pgd, *pgd_k);
412 427
413 pmd_k = pmd_offset(pgd_k, addr); 428 pud = pud_offset(pgd, addr);
414 pmd = pmd_offset(pgd, addr); 429 pud_k = pud_offset(pgd_k, addr);
430
431 if (pud_none(*pud_k))
432 goto bad_area;
433 if (!pud_present(*pud))
434 set_pud(pud, *pud_k);
435
436 pmd = pmd_offset(pud, addr);
437 pmd_k = pmd_offset(pud_k, addr);
415 438
416 /* 439 /*
417 * On ARM one Linux PGD entry contains two hardware entries (see page 440 * On ARM one Linux PGD entry contains two hardware entries (see page
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 57299446f787..2be9139a4ef3 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -4,10 +4,10 @@
4#include <asm/pgalloc.h> 4#include <asm/pgalloc.h>
5#include <asm/pgtable.h> 5#include <asm/pgtable.h>
6 6
7static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end, 7static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
8 unsigned long prot) 8 unsigned long prot)
9{ 9{
10 pmd_t *pmd = pmd_offset(pgd, addr); 10 pmd_t *pmd = pmd_offset(pud, addr);
11 11
12 addr = (addr & PMD_MASK) | prot; 12 addr = (addr & PMD_MASK) | prot;
13 pmd[0] = __pmd(addr); 13 pmd[0] = __pmd(addr);
@@ -16,6 +16,18 @@ static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end,
16 flush_pmd_entry(pmd); 16 flush_pmd_entry(pmd);
17} 17}
18 18
19static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
20 unsigned long prot)
21{
22 pud_t *pud = pud_offset(pgd, addr);
23 unsigned long next;
24
25 do {
26 next = pud_addr_end(addr, end);
27 idmap_add_pmd(pud, addr, next, prot);
28 } while (pud++, addr = next, addr != end);
29}
30
19void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) 31void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
20{ 32{
21 unsigned long prot, next; 33 unsigned long prot, next;
@@ -27,17 +39,28 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
27 pgd += pgd_index(addr); 39 pgd += pgd_index(addr);
28 do { 40 do {
29 next = pgd_addr_end(addr, end); 41 next = pgd_addr_end(addr, end);
30 idmap_add_pmd(pgd, addr, next, prot); 42 idmap_add_pud(pgd, addr, next, prot);
31 } while (pgd++, addr = next, addr != end); 43 } while (pgd++, addr = next, addr != end);
32} 44}
33 45
34#ifdef CONFIG_SMP 46#ifdef CONFIG_SMP
35static void idmap_del_pmd(pgd_t *pgd, unsigned long addr, unsigned long end) 47static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
36{ 48{
37 pmd_t *pmd = pmd_offset(pgd, addr); 49 pmd_t *pmd = pmd_offset(pud, addr);
38 pmd_clear(pmd); 50 pmd_clear(pmd);
39} 51}
40 52
53static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end)
54{
55 pud_t *pud = pud_offset(pgd, addr);
56 unsigned long next;
57
58 do {
59 next = pud_addr_end(addr, end);
60 idmap_del_pmd(pud, addr, next);
61 } while (pud++, addr = next, addr != end);
62}
63
41void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) 64void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
42{ 65{
43 unsigned long next; 66 unsigned long next;
@@ -45,7 +68,7 @@ void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
45 pgd += pgd_index(addr); 68 pgd += pgd_index(addr);
46 do { 69 do {
47 next = pgd_addr_end(addr, end); 70 next = pgd_addr_end(addr, end);
48 idmap_del_pmd(pgd, addr, next); 71 idmap_del_pud(pgd, addr, next);
49 } while (pgd++, addr = next, addr != end); 72 } while (pgd++, addr = next, addr != end);
50} 73}
51#endif 74#endif
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index cddd684364da..b3b0f0f5053d 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -350,7 +350,7 @@ void __init bootmem_init(void)
350 */ 350 */
351 arm_bootmem_free(min, max_low, max_high); 351 arm_bootmem_free(min, max_low, max_high);
352 352
353 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; 353 high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
354 354
355 /* 355 /*
356 * This doesn't seem to be used by the Linux memory manager any 356 * This doesn't seem to be used by the Linux memory manager any
@@ -398,8 +398,8 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
398 * Convert to physical addresses, and 398 * Convert to physical addresses, and
399 * round start upwards and end downwards. 399 * round start upwards and end downwards.
400 */ 400 */
401 pg = PAGE_ALIGN(__pa(start_pg)); 401 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
402 pgend = __pa(end_pg) & PAGE_MASK; 402 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
403 403
404 /* 404 /*
405 * If there are free pages between these, 405 * If there are free pages between these,
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 36960df5fb76..d2384106af9c 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -7,7 +7,7 @@ extern pmd_t *top_pmd;
7 7
8static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) 8static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
9{ 9{
10 return pmd_offset(pgd, virt); 10 return pmd_offset(pud_offset(pgd, virt), virt);
11} 11}
12 12
13static inline pmd_t *pmd_off_k(unsigned long virt) 13static inline pmd_t *pmd_off_k(unsigned long virt)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ff7b43b5885a..6cf76b3b68d1 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -533,7 +533,7 @@ static void __init *early_alloc(unsigned long sz)
533static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) 533static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
534{ 534{
535 if (pmd_none(*pmd)) { 535 if (pmd_none(*pmd)) {
536 pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); 536 pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
537 __pmd_populate(pmd, __pa(pte), prot); 537 __pmd_populate(pmd, __pa(pte), prot);
538 } 538 }
539 BUG_ON(pmd_bad(*pmd)); 539 BUG_ON(pmd_bad(*pmd));
@@ -551,11 +551,11 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
551 } while (pte++, addr += PAGE_SIZE, addr != end); 551 } while (pte++, addr += PAGE_SIZE, addr != end);
552} 552}
553 553
554static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, 554static void __init alloc_init_section(pud_t *pud, unsigned long addr,
555 unsigned long end, phys_addr_t phys, 555 unsigned long end, phys_addr_t phys,
556 const struct mem_type *type) 556 const struct mem_type *type)
557{ 557{
558 pmd_t *pmd = pmd_offset(pgd, addr); 558 pmd_t *pmd = pmd_offset(pud, addr);
559 559
560 /* 560 /*
561 * Try a section mapping - end, addr and phys must all be aligned 561 * Try a section mapping - end, addr and phys must all be aligned
@@ -584,6 +584,19 @@ static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
584 } 584 }
585} 585}
586 586
587static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
588 unsigned long phys, const struct mem_type *type)
589{
590 pud_t *pud = pud_offset(pgd, addr);
591 unsigned long next;
592
593 do {
594 next = pud_addr_end(addr, end);
595 alloc_init_section(pud, addr, next, phys, type);
596 phys += next - addr;
597 } while (pud++, addr = next, addr != end);
598}
599
587static void __init create_36bit_mapping(struct map_desc *md, 600static void __init create_36bit_mapping(struct map_desc *md,
588 const struct mem_type *type) 601 const struct mem_type *type)
589{ 602{
@@ -592,13 +605,13 @@ static void __init create_36bit_mapping(struct map_desc *md,
592 pgd_t *pgd; 605 pgd_t *pgd;
593 606
594 addr = md->virtual; 607 addr = md->virtual;
595 phys = (unsigned long)__pfn_to_phys(md->pfn); 608 phys = __pfn_to_phys(md->pfn);
596 length = PAGE_ALIGN(md->length); 609 length = PAGE_ALIGN(md->length);
597 610
598 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { 611 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
599 printk(KERN_ERR "MM: CPU does not support supersection " 612 printk(KERN_ERR "MM: CPU does not support supersection "
600 "mapping for 0x%08llx at 0x%08lx\n", 613 "mapping for 0x%08llx at 0x%08lx\n",
601 __pfn_to_phys((u64)md->pfn), addr); 614 (long long)__pfn_to_phys((u64)md->pfn), addr);
602 return; 615 return;
603 } 616 }
604 617
@@ -611,14 +624,14 @@ static void __init create_36bit_mapping(struct map_desc *md,
611 if (type->domain) { 624 if (type->domain) {
612 printk(KERN_ERR "MM: invalid domain in supersection " 625 printk(KERN_ERR "MM: invalid domain in supersection "
613 "mapping for 0x%08llx at 0x%08lx\n", 626 "mapping for 0x%08llx at 0x%08lx\n",
614 __pfn_to_phys((u64)md->pfn), addr); 627 (long long)__pfn_to_phys((u64)md->pfn), addr);
615 return; 628 return;
616 } 629 }
617 630
618 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { 631 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
619 printk(KERN_ERR "MM: cannot create mapping for " 632 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
620 "0x%08llx at 0x%08lx invalid alignment\n", 633 " at 0x%08lx invalid alignment\n",
621 __pfn_to_phys((u64)md->pfn), addr); 634 (long long)__pfn_to_phys((u64)md->pfn), addr);
622 return; 635 return;
623 } 636 }
624 637
@@ -631,7 +644,8 @@ static void __init create_36bit_mapping(struct map_desc *md,
631 pgd = pgd_offset_k(addr); 644 pgd = pgd_offset_k(addr);
632 end = addr + length; 645 end = addr + length;
633 do { 646 do {
634 pmd_t *pmd = pmd_offset(pgd, addr); 647 pud_t *pud = pud_offset(pgd, addr);
648 pmd_t *pmd = pmd_offset(pud, addr);
635 int i; 649 int i;
636 650
637 for (i = 0; i < 16; i++) 651 for (i = 0; i < 16; i++)
@@ -652,22 +666,23 @@ static void __init create_36bit_mapping(struct map_desc *md,
652 */ 666 */
653static void __init create_mapping(struct map_desc *md) 667static void __init create_mapping(struct map_desc *md)
654{ 668{
655 unsigned long phys, addr, length, end; 669 unsigned long addr, length, end;
670 phys_addr_t phys;
656 const struct mem_type *type; 671 const struct mem_type *type;
657 pgd_t *pgd; 672 pgd_t *pgd;
658 673
659 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 674 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
660 printk(KERN_WARNING "BUG: not creating mapping for " 675 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
661 "0x%08llx at 0x%08lx in user region\n", 676 " at 0x%08lx in user region\n",
662 __pfn_to_phys((u64)md->pfn), md->virtual); 677 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
663 return; 678 return;
664 } 679 }
665 680
666 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 681 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
667 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { 682 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
668 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " 683 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
669 "overlaps vmalloc space\n", 684 " at 0x%08lx overlaps vmalloc space\n",
670 __pfn_to_phys((u64)md->pfn), md->virtual); 685 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
671 } 686 }
672 687
673 type = &mem_types[md->type]; 688 type = &mem_types[md->type];
@@ -681,13 +696,13 @@ static void __init create_mapping(struct map_desc *md)
681 } 696 }
682 697
683 addr = md->virtual & PAGE_MASK; 698 addr = md->virtual & PAGE_MASK;
684 phys = (unsigned long)__pfn_to_phys(md->pfn); 699 phys = __pfn_to_phys(md->pfn);
685 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 700 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
686 701
687 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { 702 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
688 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " 703 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
689 "be mapped using pages, ignoring.\n", 704 "be mapped using pages, ignoring.\n",
690 __pfn_to_phys(md->pfn), addr); 705 (long long)__pfn_to_phys(md->pfn), addr);
691 return; 706 return;
692 } 707 }
693 708
@@ -696,7 +711,7 @@ static void __init create_mapping(struct map_desc *md)
696 do { 711 do {
697 unsigned long next = pgd_addr_end(addr, end); 712 unsigned long next = pgd_addr_end(addr, end);
698 713
699 alloc_init_section(pgd, addr, next, phys, type); 714 alloc_init_pud(pgd, addr, next, phys, type);
700 715
701 phys += next - addr; 716 phys += next - addr;
702 addr = next; 717 addr = next;
@@ -794,9 +809,10 @@ static void __init sanity_check_meminfo(void)
794 */ 809 */
795 if (__va(bank->start) >= vmalloc_min || 810 if (__va(bank->start) >= vmalloc_min ||
796 __va(bank->start) < (void *)PAGE_OFFSET) { 811 __va(bank->start) < (void *)PAGE_OFFSET) {
797 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " 812 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
798 "(vmalloc region overlap).\n", 813 "(vmalloc region overlap).\n",
799 bank->start, bank->start + bank->size - 1); 814 (unsigned long long)bank->start,
815 (unsigned long long)bank->start + bank->size - 1);
800 continue; 816 continue;
801 } 817 }
802 818
@@ -807,10 +823,11 @@ static void __init sanity_check_meminfo(void)
807 if (__va(bank->start + bank->size) > vmalloc_min || 823 if (__va(bank->start + bank->size) > vmalloc_min ||
808 __va(bank->start + bank->size) < __va(bank->start)) { 824 __va(bank->start + bank->size) < __va(bank->start)) {
809 unsigned long newsize = vmalloc_min - __va(bank->start); 825 unsigned long newsize = vmalloc_min - __va(bank->start);
810 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " 826 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
811 "to -%.8lx (vmalloc region overlap).\n", 827 "to -%.8llx (vmalloc region overlap).\n",
812 bank->start, bank->start + bank->size - 1, 828 (unsigned long long)bank->start,
813 bank->start + newsize - 1); 829 (unsigned long long)bank->start + bank->size - 1,
830 (unsigned long long)bank->start + newsize - 1);
814 bank->size = newsize; 831 bank->size = newsize;
815 } 832 }
816#endif 833#endif
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 709244c66fa3..b2027c154b2a 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -23,6 +23,7 @@
23pgd_t *pgd_alloc(struct mm_struct *mm) 23pgd_t *pgd_alloc(struct mm_struct *mm)
24{ 24{
25 pgd_t *new_pgd, *init_pgd; 25 pgd_t *new_pgd, *init_pgd;
26 pud_t *new_pud, *init_pud;
26 pmd_t *new_pmd, *init_pmd; 27 pmd_t *new_pmd, *init_pmd;
27 pte_t *new_pte, *init_pte; 28 pte_t *new_pte, *init_pte;
28 29
@@ -46,7 +47,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
46 * On ARM, first page must always be allocated since it 47 * On ARM, first page must always be allocated since it
47 * contains the machine vectors. 48 * contains the machine vectors.
48 */ 49 */
49 new_pmd = pmd_alloc(mm, new_pgd, 0); 50 new_pud = pud_alloc(mm, new_pgd, 0);
51 if (!new_pud)
52 goto no_pud;
53
54 new_pmd = pmd_alloc(mm, new_pud, 0);
50 if (!new_pmd) 55 if (!new_pmd)
51 goto no_pmd; 56 goto no_pmd;
52 57
@@ -54,7 +59,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
54 if (!new_pte) 59 if (!new_pte)
55 goto no_pte; 60 goto no_pte;
56 61
57 init_pmd = pmd_offset(init_pgd, 0); 62 init_pud = pud_offset(init_pgd, 0);
63 init_pmd = pmd_offset(init_pud, 0);
58 init_pte = pte_offset_map(init_pmd, 0); 64 init_pte = pte_offset_map(init_pmd, 0);
59 set_pte_ext(new_pte, *init_pte, 0); 65 set_pte_ext(new_pte, *init_pte, 0);
60 pte_unmap(init_pte); 66 pte_unmap(init_pte);
@@ -66,6 +72,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
66no_pte: 72no_pte:
67 pmd_free(mm, new_pmd); 73 pmd_free(mm, new_pmd);
68no_pmd: 74no_pmd:
75 pud_free(mm, new_pud);
76no_pud:
69 free_pages((unsigned long)new_pgd, 2); 77 free_pages((unsigned long)new_pgd, 2);
70no_pgd: 78no_pgd:
71 return NULL; 79 return NULL;
@@ -74,6 +82,7 @@ no_pgd:
74void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) 82void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
75{ 83{
76 pgd_t *pgd; 84 pgd_t *pgd;
85 pud_t *pud;
77 pmd_t *pmd; 86 pmd_t *pmd;
78 pgtable_t pte; 87 pgtable_t pte;
79 88
@@ -84,7 +93,11 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
84 if (pgd_none_or_clear_bad(pgd)) 93 if (pgd_none_or_clear_bad(pgd))
85 goto no_pgd; 94 goto no_pgd;
86 95
87 pmd = pmd_offset(pgd, 0); 96 pud = pud_offset(pgd, 0);
97 if (pud_none_or_clear_bad(pud))
98 goto no_pud;
99
100 pmd = pmd_offset(pud, 0);
88 if (pmd_none_or_clear_bad(pmd)) 101 if (pmd_none_or_clear_bad(pmd))
89 goto no_pmd; 102 goto no_pmd;
90 103
@@ -92,8 +105,11 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
92 pmd_clear(pmd); 105 pmd_clear(pmd);
93 pte_free(mm, pte); 106 pte_free(mm, pte);
94no_pmd: 107no_pmd:
95 pgd_clear(pgd); 108 pud_clear(pud);
96 pmd_free(mm, pmd); 109 pmd_free(mm, pmd);
110no_pud:
111 pgd_clear(pgd);
112 pud_free(mm, pud);
97no_pgd: 113no_pgd:
98 free_pages((unsigned long) pgd_base, 2); 114 free_pages((unsigned long) pgd_base, 2);
99} 115}