aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-01-05 08:24:33 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-01-05 08:24:33 -0500
commit2e0e943436912ffe0848ece58167edfe754edb96 (patch)
treeb91919095c74742fa06e2105db6d859bee39b2b4 /arch/arm/mm
parenta32737e1ca650504f172292dd344eb64c02311f3 (diff)
parentef3a0bf5bfadbace156fa2a3b9c753df2de41df2 (diff)
Merge branch 'devel-stable' into for-linus
Conflicts: arch/arm/kernel/setup.c arch/arm/mach-shmobile/board-kota2.c
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig40
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/context.c19
-rw-r--r--arch/arm/mm/fault.c111
-rw-r--r--arch/arm/mm/fault.h27
-rw-r--r--arch/arm/mm/fsr-2level.c78
-rw-r--r--arch/arm/mm/fsr-3level.c68
-rw-r--r--arch/arm/mm/idmap.c93
-rw-r--r--arch/arm/mm/init.c40
-rw-r--r--arch/arm/mm/ioremap.c119
-rw-r--r--arch/arm/mm/mm.h14
-rw-r--r--arch/arm/mm/mmu.c97
-rw-r--r--arch/arm/mm/nommu.c4
-rw-r--r--arch/arm/mm/pgd.c51
-rw-r--r--arch/arm/mm/proc-arm1020.S3
-rw-r--r--arch/arm/mm/proc-arm1020e.S3
-rw-r--r--arch/arm/mm/proc-arm1022.S3
-rw-r--r--arch/arm/mm/proc-arm1026.S3
-rw-r--r--arch/arm/mm/proc-arm6_7.S4
-rw-r--r--arch/arm/mm/proc-arm720.S3
-rw-r--r--arch/arm/mm/proc-arm740.S3
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S3
-rw-r--r--arch/arm/mm/proc-arm920.S3
-rw-r--r--arch/arm/mm/proc-arm922.S3
-rw-r--r--arch/arm/mm/proc-arm925.S3
-rw-r--r--arch/arm/mm/proc-arm926.S3
-rw-r--r--arch/arm/mm/proc-arm940.S3
-rw-r--r--arch/arm/mm/proc-arm946.S3
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S3
-rw-r--r--arch/arm/mm/proc-fa526.S3
-rw-r--r--arch/arm/mm/proc-feroceon.S3
-rw-r--r--arch/arm/mm/proc-macros.S5
-rw-r--r--arch/arm/mm/proc-mohawk.S3
-rw-r--r--arch/arm/mm/proc-sa110.S3
-rw-r--r--arch/arm/mm/proc-sa1100.S3
-rw-r--r--arch/arm/mm/proc-v6.S3
-rw-r--r--arch/arm/mm/proc-v7-2level.S171
-rw-r--r--arch/arm/mm/proc-v7-3level.S150
-rw-r--r--arch/arm/mm/proc-v7.S179
-rw-r--r--arch/arm/mm/proc-xsc3.S3
-rw-r--r--arch/arm/mm/proc-xscale.S3
41 files changed, 945 insertions, 393 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 67f75a0b66d6..4cefb57d9ed2 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -629,6 +629,23 @@ config IO_36
629 629
630comment "Processor Features" 630comment "Processor Features"
631 631
632config ARM_LPAE
633 bool "Support for the Large Physical Address Extension"
634 depends on MMU && CPU_V7
635 help
636 Say Y if you have an ARMv7 processor supporting the LPAE page
637 table format and you would like to access memory beyond the
638 4GB limit. The resulting kernel image will not run on
639 processors without the LPA extension.
640
641 If unsure, say N.
642
643config ARCH_PHYS_ADDR_T_64BIT
644 def_bool ARM_LPAE
645
646config ARCH_DMA_ADDR_T_64BIT
647 bool
648
632config ARM_THUMB 649config ARM_THUMB
633 bool "Support Thumb user binaries" 650 bool "Support Thumb user binaries"
634 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON 651 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
@@ -816,14 +833,23 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
816 Say Y here to use the Feroceon L2 cache in writethrough mode. 833 Say Y here to use the Feroceon L2 cache in writethrough mode.
817 Unless you specifically require this, say N for writeback mode. 834 Unless you specifically require this, say N for writeback mode.
818 835
836config MIGHT_HAVE_CACHE_L2X0
837 bool
838 help
839 This option should be selected by machines which have a L2x0
840 or PL310 cache controller, but where its use is optional.
841
842 The only effect of this option is to make CACHE_L2X0 and
843 related options available to the user for configuration.
844
845 Boards or SoCs which always require the cache controller
846 support to be present should select CACHE_L2X0 directly
847 instead of this option, thus preventing the user from
848 inadvertently configuring a broken kernel.
849
819config CACHE_L2X0 850config CACHE_L2X0
820 bool "Enable the L2x0 outer cache controller" 851 bool "Enable the L2x0 outer cache controller" if MIGHT_HAVE_CACHE_L2X0
821 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ 852 default MIGHT_HAVE_CACHE_L2X0
822 REALVIEW_EB_A9MP || ARCH_IMX_V6_V7 || MACH_REALVIEW_PBX || \
823 ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \
824 ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE || \
825 ARCH_PRIMA2 || ARCH_ZYNQ || ARCH_CNS3XXX || ARCH_HIGHBANK
826 default y
827 select OUTER_CACHE 853 select OUTER_CACHE
828 select OUTER_CACHE_SYNC 854 select OUTER_CACHE_SYNC
829 help 855 help
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index c335c76e0d88..caf14dc059e5 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -968,7 +968,7 @@ static int __init alignment_init(void)
968 ai_usermode = safe_usermode(ai_usermode, false); 968 ai_usermode = safe_usermode(ai_usermode, false);
969 } 969 }
970 970
971 hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, 971 hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
972 "alignment exception"); 972 "alignment exception");
973 973
974 /* 974 /*
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 93aac068da94..ee9bb363d606 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -22,6 +22,21 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION;
22DEFINE_PER_CPU(struct mm_struct *, current_mm); 22DEFINE_PER_CPU(struct mm_struct *, current_mm);
23#endif 23#endif
24 24
25#ifdef CONFIG_ARM_LPAE
26#define cpu_set_asid(asid) { \
27 unsigned long ttbl, ttbh; \
28 asm volatile( \
29 " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \
30 " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \
31 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \
32 : "=&r" (ttbl), "=&r" (ttbh) \
33 : "r" (asid & ~ASID_MASK)); \
34}
35#else
36#define cpu_set_asid(asid) \
37 asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid))
38#endif
39
25/* 40/*
26 * We fork()ed a process, and we need a new context for the child 41 * We fork()ed a process, and we need a new context for the child
27 * to run in. We reserve version 0 for initial tasks so we will 42 * to run in. We reserve version 0 for initial tasks so we will
@@ -37,7 +52,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
37static void flush_context(void) 52static void flush_context(void)
38{ 53{
39 /* set the reserved ASID before flushing the TLB */ 54 /* set the reserved ASID before flushing the TLB */
40 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); 55 cpu_set_asid(0);
41 isb(); 56 isb();
42 local_flush_tlb_all(); 57 local_flush_tlb_all();
43 if (icache_is_vivt_asid_tagged()) { 58 if (icache_is_vivt_asid_tagged()) {
@@ -99,7 +114,7 @@ static void reset_context(void *info)
99 set_mm_context(mm, asid); 114 set_mm_context(mm, asid);
100 115
101 /* set the new ASID */ 116 /* set the new ASID */
102 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); 117 cpu_set_asid(mm->context.id);
103 isb(); 118 isb();
104} 119}
105 120
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 4aabeaec25df..bb7eac381a8e 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -27,19 +27,6 @@
27 27
28#include "fault.h" 28#include "fault.h"
29 29
30/*
31 * Fault status register encodings. We steal bit 31 for our own purposes.
32 */
33#define FSR_LNX_PF (1 << 31)
34#define FSR_WRITE (1 << 11)
35#define FSR_FS4 (1 << 10)
36#define FSR_FS3_0 (15)
37
38static inline int fsr_fs(unsigned int fsr)
39{
40 return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
41}
42
43#ifdef CONFIG_MMU 30#ifdef CONFIG_MMU
44 31
45#ifdef CONFIG_KPROBES 32#ifdef CONFIG_KPROBES
@@ -123,8 +110,10 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
123 110
124 pte = pte_offset_map(pmd, addr); 111 pte = pte_offset_map(pmd, addr);
125 printk(", *pte=%08llx", (long long)pte_val(*pte)); 112 printk(", *pte=%08llx", (long long)pte_val(*pte));
113#ifndef CONFIG_ARM_LPAE
126 printk(", *ppte=%08llx", 114 printk(", *ppte=%08llx",
127 (long long)pte_val(pte[PTE_HWTABLE_PTRS])); 115 (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
116#endif
128 pte_unmap(pte); 117 pte_unmap(pte);
129 } while(0); 118 } while(0);
130 119
@@ -461,6 +450,12 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
461 pmd = pmd_offset(pud, addr); 450 pmd = pmd_offset(pud, addr);
462 pmd_k = pmd_offset(pud_k, addr); 451 pmd_k = pmd_offset(pud_k, addr);
463 452
453#ifdef CONFIG_ARM_LPAE
454 /*
455 * Only one hardware entry per PMD with LPAE.
456 */
457 index = 0;
458#else
464 /* 459 /*
465 * On ARM one Linux PGD entry contains two hardware entries (see page 460 * On ARM one Linux PGD entry contains two hardware entries (see page
466 * tables layout in pgtable.h). We normally guarantee that we always 461 * tables layout in pgtable.h). We normally guarantee that we always
@@ -470,6 +465,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
470 * for the first of pair. 465 * for the first of pair.
471 */ 466 */
472 index = (addr >> SECTION_SHIFT) & 1; 467 index = (addr >> SECTION_SHIFT) & 1;
468#endif
473 if (pmd_none(pmd_k[index])) 469 if (pmd_none(pmd_k[index]))
474 goto bad_area; 470 goto bad_area;
475 471
@@ -509,55 +505,20 @@ do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
509 return 1; 505 return 1;
510} 506}
511 507
512static struct fsr_info { 508struct fsr_info {
513 int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs); 509 int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
514 int sig; 510 int sig;
515 int code; 511 int code;
516 const char *name; 512 const char *name;
517} fsr_info[] = {
518 /*
519 * The following are the standard ARMv3 and ARMv4 aborts. ARMv5
520 * defines these to be "precise" aborts.
521 */
522 { do_bad, SIGSEGV, 0, "vector exception" },
523 { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
524 { do_bad, SIGKILL, 0, "terminal exception" },
525 { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
526 { do_bad, SIGBUS, 0, "external abort on linefetch" },
527 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
528 { do_bad, SIGBUS, 0, "external abort on linefetch" },
529 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
530 { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
531 { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
532 { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
533 { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
534 { do_bad, SIGBUS, 0, "external abort on translation" },
535 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
536 { do_bad, SIGBUS, 0, "external abort on translation" },
537 { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
538 /*
539 * The following are "imprecise" aborts, which are signalled by bit
540 * 10 of the FSR, and may not be recoverable. These are only
541 * supported if the CPU abort handler supports bit 10.
542 */
543 { do_bad, SIGBUS, 0, "unknown 16" },
544 { do_bad, SIGBUS, 0, "unknown 17" },
545 { do_bad, SIGBUS, 0, "unknown 18" },
546 { do_bad, SIGBUS, 0, "unknown 19" },
547 { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */
548 { do_bad, SIGBUS, 0, "unknown 21" },
549 { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */
550 { do_bad, SIGBUS, 0, "unknown 23" },
551 { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */
552 { do_bad, SIGBUS, 0, "unknown 25" },
553 { do_bad, SIGBUS, 0, "unknown 26" },
554 { do_bad, SIGBUS, 0, "unknown 27" },
555 { do_bad, SIGBUS, 0, "unknown 28" },
556 { do_bad, SIGBUS, 0, "unknown 29" },
557 { do_bad, SIGBUS, 0, "unknown 30" },
558 { do_bad, SIGBUS, 0, "unknown 31" }
559}; 513};
560 514
515/* FSR definition */
516#ifdef CONFIG_ARM_LPAE
517#include "fsr-3level.c"
518#else
519#include "fsr-2level.c"
520#endif
521
561void __init 522void __init
562hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), 523hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
563 int sig, int code, const char *name) 524 int sig, int code, const char *name)
@@ -593,42 +554,6 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
593 arm_notify_die("", regs, &info, fsr, 0); 554 arm_notify_die("", regs, &info, fsr, 0);
594} 555}
595 556
596
597static struct fsr_info ifsr_info[] = {
598 { do_bad, SIGBUS, 0, "unknown 0" },
599 { do_bad, SIGBUS, 0, "unknown 1" },
600 { do_bad, SIGBUS, 0, "debug event" },
601 { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
602 { do_bad, SIGBUS, 0, "unknown 4" },
603 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
604 { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
605 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
606 { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
607 { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
608 { do_bad, SIGBUS, 0, "unknown 10" },
609 { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
610 { do_bad, SIGBUS, 0, "external abort on translation" },
611 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
612 { do_bad, SIGBUS, 0, "external abort on translation" },
613 { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
614 { do_bad, SIGBUS, 0, "unknown 16" },
615 { do_bad, SIGBUS, 0, "unknown 17" },
616 { do_bad, SIGBUS, 0, "unknown 18" },
617 { do_bad, SIGBUS, 0, "unknown 19" },
618 { do_bad, SIGBUS, 0, "unknown 20" },
619 { do_bad, SIGBUS, 0, "unknown 21" },
620 { do_bad, SIGBUS, 0, "unknown 22" },
621 { do_bad, SIGBUS, 0, "unknown 23" },
622 { do_bad, SIGBUS, 0, "unknown 24" },
623 { do_bad, SIGBUS, 0, "unknown 25" },
624 { do_bad, SIGBUS, 0, "unknown 26" },
625 { do_bad, SIGBUS, 0, "unknown 27" },
626 { do_bad, SIGBUS, 0, "unknown 28" },
627 { do_bad, SIGBUS, 0, "unknown 29" },
628 { do_bad, SIGBUS, 0, "unknown 30" },
629 { do_bad, SIGBUS, 0, "unknown 31" },
630};
631
632void __init 557void __init
633hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), 558hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
634 int sig, int code, const char *name) 559 int sig, int code, const char *name)
@@ -661,6 +586,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
661 arm_notify_die("", regs, &info, ifsr, 0); 586 arm_notify_die("", regs, &info, ifsr, 0);
662} 587}
663 588
589#ifndef CONFIG_ARM_LPAE
664static int __init exceptions_init(void) 590static int __init exceptions_init(void)
665{ 591{
666 if (cpu_architecture() >= CPU_ARCH_ARMv6) { 592 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
@@ -683,3 +609,4 @@ static int __init exceptions_init(void)
683} 609}
684 610
685arch_initcall(exceptions_init); 611arch_initcall(exceptions_init);
612#endif
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index 49e9e3804de4..cf08bdfbe0d6 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -1,3 +1,28 @@
1void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); 1#ifndef __ARCH_ARM_FAULT_H
2#define __ARCH_ARM_FAULT_H
3
4/*
5 * Fault status register encodings. We steal bit 31 for our own purposes.
6 */
7#define FSR_LNX_PF (1 << 31)
8#define FSR_WRITE (1 << 11)
9#define FSR_FS4 (1 << 10)
10#define FSR_FS3_0 (15)
11#define FSR_FS5_0 (0x3f)
12
13#ifdef CONFIG_ARM_LPAE
14static inline int fsr_fs(unsigned int fsr)
15{
16 return fsr & FSR_FS5_0;
17}
18#else
19static inline int fsr_fs(unsigned int fsr)
20{
21 return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
22}
23#endif
2 24
25void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3unsigned long search_exception_table(unsigned long addr); 26unsigned long search_exception_table(unsigned long addr);
27
28#endif /* __ARCH_ARM_FAULT_H */
diff --git a/arch/arm/mm/fsr-2level.c b/arch/arm/mm/fsr-2level.c
new file mode 100644
index 000000000000..18ca74c0f341
--- /dev/null
+++ b/arch/arm/mm/fsr-2level.c
@@ -0,0 +1,78 @@
1static struct fsr_info fsr_info[] = {
2 /*
3 * The following are the standard ARMv3 and ARMv4 aborts. ARMv5
4 * defines these to be "precise" aborts.
5 */
6 { do_bad, SIGSEGV, 0, "vector exception" },
7 { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
8 { do_bad, SIGKILL, 0, "terminal exception" },
9 { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
10 { do_bad, SIGBUS, 0, "external abort on linefetch" },
11 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
12 { do_bad, SIGBUS, 0, "external abort on linefetch" },
13 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
14 { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
15 { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
16 { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
17 { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
18 { do_bad, SIGBUS, 0, "external abort on translation" },
19 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
20 { do_bad, SIGBUS, 0, "external abort on translation" },
21 { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
22 /*
23 * The following are "imprecise" aborts, which are signalled by bit
24 * 10 of the FSR, and may not be recoverable. These are only
25 * supported if the CPU abort handler supports bit 10.
26 */
27 { do_bad, SIGBUS, 0, "unknown 16" },
28 { do_bad, SIGBUS, 0, "unknown 17" },
29 { do_bad, SIGBUS, 0, "unknown 18" },
30 { do_bad, SIGBUS, 0, "unknown 19" },
31 { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */
32 { do_bad, SIGBUS, 0, "unknown 21" },
33 { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */
34 { do_bad, SIGBUS, 0, "unknown 23" },
35 { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */
36 { do_bad, SIGBUS, 0, "unknown 25" },
37 { do_bad, SIGBUS, 0, "unknown 26" },
38 { do_bad, SIGBUS, 0, "unknown 27" },
39 { do_bad, SIGBUS, 0, "unknown 28" },
40 { do_bad, SIGBUS, 0, "unknown 29" },
41 { do_bad, SIGBUS, 0, "unknown 30" },
42 { do_bad, SIGBUS, 0, "unknown 31" },
43};
44
45static struct fsr_info ifsr_info[] = {
46 { do_bad, SIGBUS, 0, "unknown 0" },
47 { do_bad, SIGBUS, 0, "unknown 1" },
48 { do_bad, SIGBUS, 0, "debug event" },
49 { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
50 { do_bad, SIGBUS, 0, "unknown 4" },
51 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
52 { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
53 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
54 { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
55 { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
56 { do_bad, SIGBUS, 0, "unknown 10" },
57 { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
58 { do_bad, SIGBUS, 0, "external abort on translation" },
59 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
60 { do_bad, SIGBUS, 0, "external abort on translation" },
61 { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
62 { do_bad, SIGBUS, 0, "unknown 16" },
63 { do_bad, SIGBUS, 0, "unknown 17" },
64 { do_bad, SIGBUS, 0, "unknown 18" },
65 { do_bad, SIGBUS, 0, "unknown 19" },
66 { do_bad, SIGBUS, 0, "unknown 20" },
67 { do_bad, SIGBUS, 0, "unknown 21" },
68 { do_bad, SIGBUS, 0, "unknown 22" },
69 { do_bad, SIGBUS, 0, "unknown 23" },
70 { do_bad, SIGBUS, 0, "unknown 24" },
71 { do_bad, SIGBUS, 0, "unknown 25" },
72 { do_bad, SIGBUS, 0, "unknown 26" },
73 { do_bad, SIGBUS, 0, "unknown 27" },
74 { do_bad, SIGBUS, 0, "unknown 28" },
75 { do_bad, SIGBUS, 0, "unknown 29" },
76 { do_bad, SIGBUS, 0, "unknown 30" },
77 { do_bad, SIGBUS, 0, "unknown 31" },
78};
diff --git a/arch/arm/mm/fsr-3level.c b/arch/arm/mm/fsr-3level.c
new file mode 100644
index 000000000000..05a4e9431836
--- /dev/null
+++ b/arch/arm/mm/fsr-3level.c
@@ -0,0 +1,68 @@
1static struct fsr_info fsr_info[] = {
2 { do_bad, SIGBUS, 0, "unknown 0" },
3 { do_bad, SIGBUS, 0, "unknown 1" },
4 { do_bad, SIGBUS, 0, "unknown 2" },
5 { do_bad, SIGBUS, 0, "unknown 3" },
6 { do_bad, SIGBUS, 0, "reserved translation fault" },
7 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
8 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
9 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
10 { do_bad, SIGBUS, 0, "reserved access flag fault" },
11 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
12 { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
13 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
14 { do_bad, SIGBUS, 0, "reserved permission fault" },
15 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
16 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
17 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
18 { do_bad, SIGBUS, 0, "synchronous external abort" },
19 { do_bad, SIGBUS, 0, "asynchronous external abort" },
20 { do_bad, SIGBUS, 0, "unknown 18" },
21 { do_bad, SIGBUS, 0, "unknown 19" },
22 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
23 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
24 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
25 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
26 { do_bad, SIGBUS, 0, "synchronous parity error" },
27 { do_bad, SIGBUS, 0, "asynchronous parity error" },
28 { do_bad, SIGBUS, 0, "unknown 26" },
29 { do_bad, SIGBUS, 0, "unknown 27" },
30 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
31 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
32 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
33 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
34 { do_bad, SIGBUS, 0, "unknown 32" },
35 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
36 { do_bad, SIGBUS, 0, "debug event" },
37 { do_bad, SIGBUS, 0, "unknown 35" },
38 { do_bad, SIGBUS, 0, "unknown 36" },
39 { do_bad, SIGBUS, 0, "unknown 37" },
40 { do_bad, SIGBUS, 0, "unknown 38" },
41 { do_bad, SIGBUS, 0, "unknown 39" },
42 { do_bad, SIGBUS, 0, "unknown 40" },
43 { do_bad, SIGBUS, 0, "unknown 41" },
44 { do_bad, SIGBUS, 0, "unknown 42" },
45 { do_bad, SIGBUS, 0, "unknown 43" },
46 { do_bad, SIGBUS, 0, "unknown 44" },
47 { do_bad, SIGBUS, 0, "unknown 45" },
48 { do_bad, SIGBUS, 0, "unknown 46" },
49 { do_bad, SIGBUS, 0, "unknown 47" },
50 { do_bad, SIGBUS, 0, "unknown 48" },
51 { do_bad, SIGBUS, 0, "unknown 49" },
52 { do_bad, SIGBUS, 0, "unknown 50" },
53 { do_bad, SIGBUS, 0, "unknown 51" },
54 { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
55 { do_bad, SIGBUS, 0, "unknown 53" },
56 { do_bad, SIGBUS, 0, "unknown 54" },
57 { do_bad, SIGBUS, 0, "unknown 55" },
58 { do_bad, SIGBUS, 0, "unknown 56" },
59 { do_bad, SIGBUS, 0, "unknown 57" },
60 { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" },
61 { do_bad, SIGBUS, 0, "unknown 59" },
62 { do_bad, SIGBUS, 0, "unknown 60" },
63 { do_bad, SIGBUS, 0, "unknown 61" },
64 { do_bad, SIGBUS, 0, "unknown 62" },
65 { do_bad, SIGBUS, 0, "unknown 63" },
66};
67
68#define ifsr_info fsr_info
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 2be9139a4ef3..feacf4c76712 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -1,9 +1,38 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2 2
3#include <asm/cputype.h> 3#include <asm/cputype.h>
4#include <asm/idmap.h>
4#include <asm/pgalloc.h> 5#include <asm/pgalloc.h>
5#include <asm/pgtable.h> 6#include <asm/pgtable.h>
7#include <asm/sections.h>
6 8
9pgd_t *idmap_pgd;
10
11#ifdef CONFIG_ARM_LPAE
12static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
13 unsigned long prot)
14{
15 pmd_t *pmd;
16 unsigned long next;
17
18 if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
19 pmd = pmd_alloc_one(&init_mm, addr);
20 if (!pmd) {
21 pr_warning("Failed to allocate identity pmd.\n");
22 return;
23 }
24 pud_populate(&init_mm, pud, pmd);
25 pmd += pmd_index(addr);
26 } else
27 pmd = pmd_offset(pud, addr);
28
29 do {
30 next = pmd_addr_end(addr, end);
31 *pmd = __pmd((addr & PMD_MASK) | prot);
32 flush_pmd_entry(pmd);
33 } while (pmd++, addr = next, addr != end);
34}
35#else /* !CONFIG_ARM_LPAE */
7static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, 36static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
8 unsigned long prot) 37 unsigned long prot)
9{ 38{
@@ -15,6 +44,7 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
15 pmd[1] = __pmd(addr); 44 pmd[1] = __pmd(addr);
16 flush_pmd_entry(pmd); 45 flush_pmd_entry(pmd);
17} 46}
47#endif /* CONFIG_ARM_LPAE */
18 48
19static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, 49static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
20 unsigned long prot) 50 unsigned long prot)
@@ -28,11 +58,11 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
28 } while (pud++, addr = next, addr != end); 58 } while (pud++, addr = next, addr != end);
29} 59}
30 60
31void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) 61static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
32{ 62{
33 unsigned long prot, next; 63 unsigned long prot, next;
34 64
35 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; 65 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
36 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 66 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
37 prot |= PMD_BIT4; 67 prot |= PMD_BIT4;
38 68
@@ -43,48 +73,41 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
43 } while (pgd++, addr = next, addr != end); 73 } while (pgd++, addr = next, addr != end);
44} 74}
45 75
46#ifdef CONFIG_SMP 76extern char __idmap_text_start[], __idmap_text_end[];
47static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
48{
49 pmd_t *pmd = pmd_offset(pud, addr);
50 pmd_clear(pmd);
51}
52 77
53static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end) 78static int __init init_static_idmap(void)
54{ 79{
55 pud_t *pud = pud_offset(pgd, addr); 80 phys_addr_t idmap_start, idmap_end;
56 unsigned long next;
57 81
58 do { 82 idmap_pgd = pgd_alloc(&init_mm);
59 next = pud_addr_end(addr, end); 83 if (!idmap_pgd)
60 idmap_del_pmd(pud, addr, next); 84 return -ENOMEM;
61 } while (pud++, addr = next, addr != end);
62}
63 85
64void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) 86 /* Add an identity mapping for the physical address of the section. */
65{ 87 idmap_start = virt_to_phys((void *)__idmap_text_start);
66 unsigned long next; 88 idmap_end = virt_to_phys((void *)__idmap_text_end);
67 89
68 pgd += pgd_index(addr); 90 pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
69 do { 91 (long long)idmap_start, (long long)idmap_end);
70 next = pgd_addr_end(addr, end); 92 identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
71 idmap_del_pud(pgd, addr, next); 93
72 } while (pgd++, addr = next, addr != end); 94 return 0;
73} 95}
74#endif 96early_initcall(init_static_idmap);
75 97
76/* 98/*
77 * In order to soft-boot, we need to insert a 1:1 mapping in place of 99 * In order to soft-boot, we need to switch to a 1:1 mapping for the
78 * the user-mode pages. This will then ensure that we have predictable 100 * cpu_reset functions. This will then ensure that we have predictable
79 * results when turning the mmu off 101 * results when turning off the mmu.
80 */ 102 */
81void setup_mm_for_reboot(char mode) 103void setup_mm_for_reboot(void)
82{ 104{
83 /* 105 /* Clean and invalidate L1. */
84 * We need to access to user-mode page tables here. For kernel threads 106 flush_cache_all();
85 * we don't have any user-mode mappings so we use the context that we 107
86 * "borrowed". 108 /* Switch to the identity mapping. */
87 */ 109 cpu_switch_mm(idmap_pgd, &init_mm);
88 identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE); 110
111 /* Flush the TLB. */
89 local_flush_tlb_all(); 112 local_flush_tlb_all();
90} 113}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index fbdd12ea3a58..786adddf1a86 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -20,7 +20,6 @@
20#include <linux/highmem.h> 20#include <linux/highmem.h>
21#include <linux/gfp.h> 21#include <linux/gfp.h>
22#include <linux/memblock.h> 22#include <linux/memblock.h>
23#include <linux/sort.h>
24 23
25#include <asm/mach-types.h> 24#include <asm/mach-types.h>
26#include <asm/prom.h> 25#include <asm/prom.h>
@@ -134,30 +133,18 @@ void show_mem(unsigned int filter)
134} 133}
135 134
136static void __init find_limits(unsigned long *min, unsigned long *max_low, 135static void __init find_limits(unsigned long *min, unsigned long *max_low,
137 unsigned long *max_high) 136 unsigned long *max_high)
138{ 137{
139 struct meminfo *mi = &meminfo; 138 struct meminfo *mi = &meminfo;
140 int i; 139 int i;
141 140
142 *min = -1UL; 141 /* This assumes the meminfo array is properly sorted */
143 *max_low = *max_high = 0; 142 *min = bank_pfn_start(&mi->bank[0]);
144 143 for_each_bank (i, mi)
145 for_each_bank (i, mi) { 144 if (mi->bank[i].highmem)
146 struct membank *bank = &mi->bank[i]; 145 break;
147 unsigned long start, end; 146 *max_low = bank_pfn_end(&mi->bank[i - 1]);
148 147 *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
149 start = bank_pfn_start(bank);
150 end = bank_pfn_end(bank);
151
152 if (*min > start)
153 *min = start;
154 if (*max_high < end)
155 *max_high = end;
156 if (bank->highmem)
157 continue;
158 if (*max_low < end)
159 *max_low = end;
160 }
161} 148}
162 149
163static void __init arm_bootmem_init(unsigned long start_pfn, 150static void __init arm_bootmem_init(unsigned long start_pfn,
@@ -319,19 +306,10 @@ static void arm_memory_present(void)
319} 306}
320#endif 307#endif
321 308
322static int __init meminfo_cmp(const void *_a, const void *_b)
323{
324 const struct membank *a = _a, *b = _b;
325 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
326 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
327}
328
329void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) 309void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
330{ 310{
331 int i; 311 int i;
332 312
333 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
334
335 memblock_init(); 313 memblock_init();
336 for (i = 0; i < mi->nr_banks; i++) 314 for (i = 0; i < mi->nr_banks; i++)
337 memblock_add(mi->bank[i].start, mi->bank[i].size); 315 memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -403,8 +381,6 @@ void __init bootmem_init(void)
403 */ 381 */
404 arm_bootmem_free(min, max_low, max_high); 382 arm_bootmem_free(min, max_low, max_high);
405 383
406 high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
407
408 /* 384 /*
409 * This doesn't seem to be used by the Linux memory manager any 385 * This doesn't seem to be used by the Linux memory manager any
410 * more, but is used by ll_rw_block. If we can get rid of it, we 386 * more, but is used by ll_rw_block. If we can get rid of it, we
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index bdb248c4f55c..80632e8d7538 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -36,12 +36,6 @@
36#include <asm/mach/map.h> 36#include <asm/mach/map.h>
37#include "mm.h" 37#include "mm.h"
38 38
39/*
40 * Used by ioremap() and iounmap() code to mark (super)section-mapped
41 * I/O regions in vm_struct->flags field.
42 */
43#define VM_ARM_SECTION_MAPPING 0x80000000
44
45int ioremap_page(unsigned long virt, unsigned long phys, 39int ioremap_page(unsigned long virt, unsigned long phys,
46 const struct mem_type *mtype) 40 const struct mem_type *mtype)
47{ 41{
@@ -64,7 +58,7 @@ void __check_kvm_seq(struct mm_struct *mm)
64 } while (seq != init_mm.context.kvm_seq); 58 } while (seq != init_mm.context.kvm_seq);
65} 59}
66 60
67#ifndef CONFIG_SMP 61#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
68/* 62/*
69 * Section support is unsafe on SMP - If you iounmap and ioremap a region, 63 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
70 * the other CPUs will not see this change until their next context switch. 64 * the other CPUs will not see this change until their next context switch.
@@ -79,13 +73,16 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
79{ 73{
80 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); 74 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
81 pgd_t *pgd; 75 pgd_t *pgd;
76 pud_t *pud;
77 pmd_t *pmdp;
82 78
83 flush_cache_vunmap(addr, end); 79 flush_cache_vunmap(addr, end);
84 pgd = pgd_offset_k(addr); 80 pgd = pgd_offset_k(addr);
81 pud = pud_offset(pgd, addr);
82 pmdp = pmd_offset(pud, addr);
85 do { 83 do {
86 pmd_t pmd, *pmdp = pmd_offset(pgd, addr); 84 pmd_t pmd = *pmdp;
87 85
88 pmd = *pmdp;
89 if (!pmd_none(pmd)) { 86 if (!pmd_none(pmd)) {
90 /* 87 /*
91 * Clear the PMD from the page table, and 88 * Clear the PMD from the page table, and
@@ -104,8 +101,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
104 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); 101 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
105 } 102 }
106 103
107 addr += PGDIR_SIZE; 104 addr += PMD_SIZE;
108 pgd++; 105 pmdp += 2;
109 } while (addr < end); 106 } while (addr < end);
110 107
111 /* 108 /*
@@ -124,6 +121,8 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
124{ 121{
125 unsigned long addr = virt, end = virt + size; 122 unsigned long addr = virt, end = virt + size;
126 pgd_t *pgd; 123 pgd_t *pgd;
124 pud_t *pud;
125 pmd_t *pmd;
127 126
128 /* 127 /*
129 * Remove and free any PTE-based mapping, and 128 * Remove and free any PTE-based mapping, and
@@ -132,17 +131,17 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
132 unmap_area_sections(virt, size); 131 unmap_area_sections(virt, size);
133 132
134 pgd = pgd_offset_k(addr); 133 pgd = pgd_offset_k(addr);
134 pud = pud_offset(pgd, addr);
135 pmd = pmd_offset(pud, addr);
135 do { 136 do {
136 pmd_t *pmd = pmd_offset(pgd, addr);
137
138 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); 137 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
139 pfn += SZ_1M >> PAGE_SHIFT; 138 pfn += SZ_1M >> PAGE_SHIFT;
140 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); 139 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
141 pfn += SZ_1M >> PAGE_SHIFT; 140 pfn += SZ_1M >> PAGE_SHIFT;
142 flush_pmd_entry(pmd); 141 flush_pmd_entry(pmd);
143 142
144 addr += PGDIR_SIZE; 143 addr += PMD_SIZE;
145 pgd++; 144 pmd += 2;
146 } while (addr < end); 145 } while (addr < end);
147 146
148 return 0; 147 return 0;
@@ -154,6 +153,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
154{ 153{
155 unsigned long addr = virt, end = virt + size; 154 unsigned long addr = virt, end = virt + size;
156 pgd_t *pgd; 155 pgd_t *pgd;
156 pud_t *pud;
157 pmd_t *pmd;
157 158
158 /* 159 /*
159 * Remove and free any PTE-based mapping, and 160 * Remove and free any PTE-based mapping, and
@@ -162,6 +163,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
162 unmap_area_sections(virt, size); 163 unmap_area_sections(virt, size);
163 164
164 pgd = pgd_offset_k(virt); 165 pgd = pgd_offset_k(virt);
166 pud = pud_offset(pgd, addr);
167 pmd = pmd_offset(pud, addr);
165 do { 168 do {
166 unsigned long super_pmd_val, i; 169 unsigned long super_pmd_val, i;
167 170
@@ -170,14 +173,12 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
170 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; 173 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
171 174
172 for (i = 0; i < 8; i++) { 175 for (i = 0; i < 8; i++) {
173 pmd_t *pmd = pmd_offset(pgd, addr);
174
175 pmd[0] = __pmd(super_pmd_val); 176 pmd[0] = __pmd(super_pmd_val);
176 pmd[1] = __pmd(super_pmd_val); 177 pmd[1] = __pmd(super_pmd_val);
177 flush_pmd_entry(pmd); 178 flush_pmd_entry(pmd);
178 179
179 addr += PGDIR_SIZE; 180 addr += PMD_SIZE;
180 pgd++; 181 pmd += 2;
181 } 182 }
182 183
183 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; 184 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
@@ -195,17 +196,13 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
195 unsigned long addr; 196 unsigned long addr;
196 struct vm_struct * area; 197 struct vm_struct * area;
197 198
199#ifndef CONFIG_ARM_LPAE
198 /* 200 /*
199 * High mappings must be supersection aligned 201 * High mappings must be supersection aligned
200 */ 202 */
201 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) 203 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
202 return NULL; 204 return NULL;
203 205#endif
204 /*
205 * Don't allow RAM to be mapped - this causes problems with ARMv6+
206 */
207 if (WARN_ON(pfn_valid(pfn)))
208 return NULL;
209 206
210 type = get_mem_type(mtype); 207 type = get_mem_type(mtype);
211 if (!type) 208 if (!type)
@@ -216,12 +213,40 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
216 */ 213 */
217 size = PAGE_ALIGN(offset + size); 214 size = PAGE_ALIGN(offset + size);
218 215
216 /*
217 * Try to reuse one of the static mapping whenever possible.
218 */
219 read_lock(&vmlist_lock);
220 for (area = vmlist; area; area = area->next) {
221 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
222 break;
223 if (!(area->flags & VM_ARM_STATIC_MAPPING))
224 continue;
225 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
226 continue;
227 if (__phys_to_pfn(area->phys_addr) > pfn ||
228 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
229 continue;
230 /* we can drop the lock here as we know *area is static */
231 read_unlock(&vmlist_lock);
232 addr = (unsigned long)area->addr;
233 addr += __pfn_to_phys(pfn) - area->phys_addr;
234 return (void __iomem *) (offset + addr);
235 }
236 read_unlock(&vmlist_lock);
237
238 /*
239 * Don't allow RAM to be mapped - this causes problems with ARMv6+
240 */
241 if (WARN_ON(pfn_valid(pfn)))
242 return NULL;
243
219 area = get_vm_area_caller(size, VM_IOREMAP, caller); 244 area = get_vm_area_caller(size, VM_IOREMAP, caller);
220 if (!area) 245 if (!area)
221 return NULL; 246 return NULL;
222 addr = (unsigned long)area->addr; 247 addr = (unsigned long)area->addr;
223 248
224#ifndef CONFIG_SMP 249#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
225 if (DOMAIN_IO == 0 && 250 if (DOMAIN_IO == 0 &&
226 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || 251 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
227 cpu_is_xsc3()) && pfn >= 0x100000 && 252 cpu_is_xsc3()) && pfn >= 0x100000 &&
@@ -313,28 +338,34 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
313void __iounmap(volatile void __iomem *io_addr) 338void __iounmap(volatile void __iomem *io_addr)
314{ 339{
315 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 340 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
316#ifndef CONFIG_SMP 341 struct vm_struct *vm;
317 struct vm_struct **p, *tmp;
318 342
319 /* 343 read_lock(&vmlist_lock);
320 * If this is a section based mapping we need to handle it 344 for (vm = vmlist; vm; vm = vm->next) {
321 * specially as the VM subsystem does not know how to handle 345 if (vm->addr > addr)
322 * such a beast. We need the lock here b/c we need to clear 346 break;
323 * all the mappings before the area can be reclaimed 347 if (!(vm->flags & VM_IOREMAP))
324 * by someone else. 348 continue;
325 */ 349 /* If this is a static mapping we must leave it alone */
326 write_lock(&vmlist_lock); 350 if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
327 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 351 (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
328 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { 352 read_unlock(&vmlist_lock);
329 if (tmp->flags & VM_ARM_SECTION_MAPPING) { 353 return;
330 unmap_area_sections((unsigned long)tmp->addr, 354 }
331 tmp->size); 355#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
332 } 356 /*
357 * If this is a section based mapping we need to handle it
358 * specially as the VM subsystem does not know how to handle
359 * such a beast.
360 */
361 if ((vm->addr == addr) &&
362 (vm->flags & VM_ARM_SECTION_MAPPING)) {
363 unmap_area_sections((unsigned long)vm->addr, vm->size);
333 break; 364 break;
334 } 365 }
335 }
336 write_unlock(&vmlist_lock);
337#endif 366#endif
367 }
368 read_unlock(&vmlist_lock);
338 369
339 vunmap(addr); 370 vunmap(addr);
340} 371}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index ad7cce3bc431..70f6d3ea4834 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -21,6 +21,20 @@ const struct mem_type *get_mem_type(unsigned int type);
21 21
22extern void __flush_dcache_page(struct address_space *mapping, struct page *page); 22extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
23 23
24/*
25 * ARM specific vm_struct->flags bits.
26 */
27
28/* (super)section-mapped I/O regions used by ioremap()/iounmap() */
29#define VM_ARM_SECTION_MAPPING 0x80000000
30
31/* permanent static mappings from iotable_init() */
32#define VM_ARM_STATIC_MAPPING 0x40000000
33
34/* mapping type (attributes) for permanent static mappings */
35#define VM_ARM_MTYPE(mt) ((mt) << 20)
36#define VM_ARM_MTYPE_MASK (0x1f << 20)
37
24#endif 38#endif
25 39
26#ifdef CONFIG_ZONE_DMA 40#ifdef CONFIG_ZONE_DMA
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index dc8c550e6cbd..94c5a0c94f5e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
15#include <linux/nodemask.h> 15#include <linux/nodemask.h>
16#include <linux/memblock.h> 16#include <linux/memblock.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/vmalloc.h>
18 19
19#include <asm/cputype.h> 20#include <asm/cputype.h>
20#include <asm/sections.h> 21#include <asm/sections.h>
@@ -150,6 +151,7 @@ static int __init early_nowrite(char *__unused)
150} 151}
151early_param("nowb", early_nowrite); 152early_param("nowb", early_nowrite);
152 153
154#ifndef CONFIG_ARM_LPAE
153static int __init early_ecc(char *p) 155static int __init early_ecc(char *p)
154{ 156{
155 if (memcmp(p, "on", 2) == 0) 157 if (memcmp(p, "on", 2) == 0)
@@ -159,6 +161,7 @@ static int __init early_ecc(char *p)
159 return 0; 161 return 0;
160} 162}
161early_param("ecc", early_ecc); 163early_param("ecc", early_ecc);
164#endif
162 165
163static int __init noalign_setup(char *__unused) 166static int __init noalign_setup(char *__unused)
164{ 167{
@@ -228,10 +231,12 @@ static struct mem_type mem_types[] = {
228 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 231 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
229 .domain = DOMAIN_KERNEL, 232 .domain = DOMAIN_KERNEL,
230 }, 233 },
234#ifndef CONFIG_ARM_LPAE
231 [MT_MINICLEAN] = { 235 [MT_MINICLEAN] = {
232 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, 236 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
233 .domain = DOMAIN_KERNEL, 237 .domain = DOMAIN_KERNEL,
234 }, 238 },
239#endif
235 [MT_LOW_VECTORS] = { 240 [MT_LOW_VECTORS] = {
236 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 241 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
237 L_PTE_RDONLY, 242 L_PTE_RDONLY,
@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void)
429 * ARMv6 and above have extended page tables. 434 * ARMv6 and above have extended page tables.
430 */ 435 */
431 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 436 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
437#ifndef CONFIG_ARM_LPAE
432 /* 438 /*
433 * Mark cache clean areas and XIP ROM read only 439 * Mark cache clean areas and XIP ROM read only
434 * from SVC mode and no access from userspace. 440 * from SVC mode and no access from userspace.
@@ -436,6 +442,7 @@ static void __init build_mem_type_table(void)
436 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 442 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
437 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 443 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
438 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 444 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
445#endif
439 446
440 if (is_smp()) { 447 if (is_smp()) {
441 /* 448 /*
@@ -474,6 +481,18 @@ static void __init build_mem_type_table(void)
474 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; 481 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
475 } 482 }
476 483
484#ifdef CONFIG_ARM_LPAE
485 /*
486 * Do not generate access flag faults for the kernel mappings.
487 */
488 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
489 mem_types[i].prot_pte |= PTE_EXT_AF;
490 mem_types[i].prot_sect |= PMD_SECT_AF;
491 }
492 kern_pgprot |= PTE_EXT_AF;
493 vecs_pgprot |= PTE_EXT_AF;
494#endif
495
477 for (i = 0; i < 16; i++) { 496 for (i = 0; i < 16; i++) {
478 unsigned long v = pgprot_val(protection_map[i]); 497 unsigned long v = pgprot_val(protection_map[i]);
479 protection_map[i] = __pgprot(v | user_pgprot); 498 protection_map[i] = __pgprot(v | user_pgprot);
@@ -529,13 +548,18 @@ EXPORT_SYMBOL(phys_mem_access_prot);
529 548
530#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 549#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
531 550
532static void __init *early_alloc(unsigned long sz) 551static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
533{ 552{
534 void *ptr = __va(memblock_alloc(sz, sz)); 553 void *ptr = __va(memblock_alloc(sz, align));
535 memset(ptr, 0, sz); 554 memset(ptr, 0, sz);
536 return ptr; 555 return ptr;
537} 556}
538 557
558static void __init *early_alloc(unsigned long sz)
559{
560 return early_alloc_aligned(sz, sz);
561}
562
539static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) 563static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
540{ 564{
541 if (pmd_none(*pmd)) { 565 if (pmd_none(*pmd)) {
@@ -572,8 +596,10 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
572 if (((addr | end | phys) & ~SECTION_MASK) == 0) { 596 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
573 pmd_t *p = pmd; 597 pmd_t *p = pmd;
574 598
599#ifndef CONFIG_ARM_LPAE
575 if (addr & SECTION_SIZE) 600 if (addr & SECTION_SIZE)
576 pmd++; 601 pmd++;
602#endif
577 603
578 do { 604 do {
579 *pmd = __pmd(phys | type->prot_sect); 605 *pmd = __pmd(phys | type->prot_sect);
@@ -603,6 +629,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
603 } while (pud++, addr = next, addr != end); 629 } while (pud++, addr = next, addr != end);
604} 630}
605 631
632#ifndef CONFIG_ARM_LPAE
606static void __init create_36bit_mapping(struct map_desc *md, 633static void __init create_36bit_mapping(struct map_desc *md,
607 const struct mem_type *type) 634 const struct mem_type *type)
608{ 635{
@@ -662,6 +689,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
662 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; 689 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
663 } while (addr != end); 690 } while (addr != end);
664} 691}
692#endif /* !CONFIG_ARM_LPAE */
665 693
666/* 694/*
667 * Create the page directory entries and any necessary 695 * Create the page directory entries and any necessary
@@ -685,14 +713,16 @@ static void __init create_mapping(struct map_desc *md)
685 } 713 }
686 714
687 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 715 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
688 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { 716 md->virtual >= PAGE_OFFSET &&
717 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
689 printk(KERN_WARNING "BUG: mapping for 0x%08llx" 718 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
690 " at 0x%08lx overlaps vmalloc space\n", 719 " at 0x%08lx out of vmalloc space\n",
691 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); 720 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
692 } 721 }
693 722
694 type = &mem_types[md->type]; 723 type = &mem_types[md->type];
695 724
725#ifndef CONFIG_ARM_LPAE
696 /* 726 /*
697 * Catch 36-bit addresses 727 * Catch 36-bit addresses
698 */ 728 */
@@ -700,6 +730,7 @@ static void __init create_mapping(struct map_desc *md)
700 create_36bit_mapping(md, type); 730 create_36bit_mapping(md, type);
701 return; 731 return;
702 } 732 }
733#endif
703 734
704 addr = md->virtual & PAGE_MASK; 735 addr = md->virtual & PAGE_MASK;
705 phys = __pfn_to_phys(md->pfn); 736 phys = __pfn_to_phys(md->pfn);
@@ -729,18 +760,33 @@ static void __init create_mapping(struct map_desc *md)
729 */ 760 */
730void __init iotable_init(struct map_desc *io_desc, int nr) 761void __init iotable_init(struct map_desc *io_desc, int nr)
731{ 762{
732 int i; 763 struct map_desc *md;
764 struct vm_struct *vm;
765
766 if (!nr)
767 return;
733 768
734 for (i = 0; i < nr; i++) 769 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
735 create_mapping(io_desc + i); 770
771 for (md = io_desc; nr; md++, nr--) {
772 create_mapping(md);
773 vm->addr = (void *)(md->virtual & PAGE_MASK);
774 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
775 vm->phys_addr = __pfn_to_phys(md->pfn);
776 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
777 vm->flags |= VM_ARM_MTYPE(md->type);
778 vm->caller = iotable_init;
779 vm_area_add_early(vm++);
780 }
736} 781}
737 782
738static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); 783static void * __initdata vmalloc_min =
784 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
739 785
740/* 786/*
741 * vmalloc=size forces the vmalloc area to be exactly 'size' 787 * vmalloc=size forces the vmalloc area to be exactly 'size'
742 * bytes. This can be used to increase (or decrease) the vmalloc 788 * bytes. This can be used to increase (or decrease) the vmalloc
743 * area - the default is 128m. 789 * area - the default is 240m.
744 */ 790 */
745static int __init early_vmalloc(char *arg) 791static int __init early_vmalloc(char *arg)
746{ 792{
@@ -775,6 +821,9 @@ void __init sanity_check_meminfo(void)
775 struct membank *bank = &meminfo.bank[j]; 821 struct membank *bank = &meminfo.bank[j];
776 *bank = meminfo.bank[i]; 822 *bank = meminfo.bank[i];
777 823
824 if (bank->start > ULONG_MAX)
825 highmem = 1;
826
778#ifdef CONFIG_HIGHMEM 827#ifdef CONFIG_HIGHMEM
779 if (__va(bank->start) >= vmalloc_min || 828 if (__va(bank->start) >= vmalloc_min ||
780 __va(bank->start) < (void *)PAGE_OFFSET) 829 __va(bank->start) < (void *)PAGE_OFFSET)
@@ -786,7 +835,7 @@ void __init sanity_check_meminfo(void)
786 * Split those memory banks which are partially overlapping 835 * Split those memory banks which are partially overlapping
787 * the vmalloc area greatly simplifying things later. 836 * the vmalloc area greatly simplifying things later.
788 */ 837 */
789 if (__va(bank->start) < vmalloc_min && 838 if (!highmem && __va(bank->start) < vmalloc_min &&
790 bank->size > vmalloc_min - __va(bank->start)) { 839 bank->size > vmalloc_min - __va(bank->start)) {
791 if (meminfo.nr_banks >= NR_BANKS) { 840 if (meminfo.nr_banks >= NR_BANKS) {
792 printk(KERN_CRIT "NR_BANKS too low, " 841 printk(KERN_CRIT "NR_BANKS too low, "
@@ -807,6 +856,17 @@ void __init sanity_check_meminfo(void)
807 bank->highmem = highmem; 856 bank->highmem = highmem;
808 857
809 /* 858 /*
859 * Highmem banks not allowed with !CONFIG_HIGHMEM.
860 */
861 if (highmem) {
862 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
863 "(!CONFIG_HIGHMEM).\n",
864 (unsigned long long)bank->start,
865 (unsigned long long)bank->start + bank->size - 1);
866 continue;
867 }
868
869 /*
810 * Check whether this memory bank would entirely overlap 870 * Check whether this memory bank would entirely overlap
811 * the vmalloc area. 871 * the vmalloc area.
812 */ 872 */
@@ -860,6 +920,7 @@ void __init sanity_check_meminfo(void)
860 } 920 }
861#endif 921#endif
862 meminfo.nr_banks = j; 922 meminfo.nr_banks = j;
923 high_memory = __va(lowmem_limit - 1) + 1;
863 memblock_set_current_limit(lowmem_limit); 924 memblock_set_current_limit(lowmem_limit);
864} 925}
865 926
@@ -890,14 +951,20 @@ static inline void prepare_page_table(void)
890 951
891 /* 952 /*
892 * Clear out all the kernel space mappings, except for the first 953 * Clear out all the kernel space mappings, except for the first
893 * memory bank, up to the end of the vmalloc region. 954 * memory bank, up to the vmalloc region.
894 */ 955 */
895 for (addr = __phys_to_virt(end); 956 for (addr = __phys_to_virt(end);
896 addr < VMALLOC_END; addr += PMD_SIZE) 957 addr < VMALLOC_START; addr += PMD_SIZE)
897 pmd_clear(pmd_off_k(addr)); 958 pmd_clear(pmd_off_k(addr));
898} 959}
899 960
961#ifdef CONFIG_ARM_LPAE
962/* the first page is reserved for pgd */
963#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
964 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
965#else
900#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) 966#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
967#endif
901 968
902/* 969/*
903 * Reserve the special regions of memory 970 * Reserve the special regions of memory
@@ -920,8 +987,8 @@ void __init arm_mm_memblock_reserve(void)
920} 987}
921 988
922/* 989/*
923 * Set up device the mappings. Since we clear out the page tables for all 990 * Set up the device mappings. Since we clear out the page tables for all
924 * mappings above VMALLOC_END, we will remove any debug device mappings. 991 * mappings above VMALLOC_START, we will remove any debug device mappings.
925 * This means you have to be careful how you debug this function, or any 992 * This means you have to be careful how you debug this function, or any
926 * called function. This means you can't use any function or debugging 993 * called function. This means you can't use any function or debugging
927 * method which may touch any device, otherwise the kernel _will_ crash. 994 * method which may touch any device, otherwise the kernel _will_ crash.
@@ -936,7 +1003,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
936 */ 1003 */
937 vectors_page = early_alloc(PAGE_SIZE); 1004 vectors_page = early_alloc(PAGE_SIZE);
938 1005
939 for (addr = VMALLOC_END; addr; addr += PMD_SIZE) 1006 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
940 pmd_clear(pmd_off_k(addr)); 1007 pmd_clear(pmd_off_k(addr));
941 1008
942 /* 1009 /*
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 941a98c9e8aa..4fc6794cca4b 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -29,6 +29,8 @@ void __init arm_mm_memblock_reserve(void)
29 29
30void __init sanity_check_meminfo(void) 30void __init sanity_check_meminfo(void)
31{ 31{
32 phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]);
33 high_memory = __va(end - 1) + 1;
32} 34}
33 35
34/* 36/*
@@ -43,7 +45,7 @@ void __init paging_init(struct machine_desc *mdesc)
43/* 45/*
44 * We don't need to do anything here for nommu machines. 46 * We don't need to do anything here for nommu machines.
45 */ 47 */
46void setup_mm_for_reboot(char mode) 48void setup_mm_for_reboot(void)
47{ 49{
48} 50}
49 51
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index b2027c154b2a..a3e78ccabd65 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -10,6 +10,7 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12#include <linux/highmem.h> 12#include <linux/highmem.h>
13#include <linux/slab.h>
13 14
14#include <asm/pgalloc.h> 15#include <asm/pgalloc.h>
15#include <asm/page.h> 16#include <asm/page.h>
@@ -17,6 +18,14 @@
17 18
18#include "mm.h" 19#include "mm.h"
19 20
21#ifdef CONFIG_ARM_LPAE
22#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
23#define __pgd_free(pgd) kfree(pgd)
24#else
25#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
26#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
27#endif
28
20/* 29/*
21 * need to get a 16k page for level 1 30 * need to get a 16k page for level 1
22 */ 31 */
@@ -27,7 +36,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27 pmd_t *new_pmd, *init_pmd; 36 pmd_t *new_pmd, *init_pmd;
28 pte_t *new_pte, *init_pte; 37 pte_t *new_pte, *init_pte;
29 38
30 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); 39 new_pgd = __pgd_alloc();
31 if (!new_pgd) 40 if (!new_pgd)
32 goto no_pgd; 41 goto no_pgd;
33 42
@@ -42,10 +51,25 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
42 51
43 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); 52 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
44 53
54#ifdef CONFIG_ARM_LPAE
55 /*
56 * Allocate PMD table for modules and pkmap mappings.
57 */
58 new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
59 MODULES_VADDR);
60 if (!new_pud)
61 goto no_pud;
62
63 new_pmd = pmd_alloc(mm, new_pud, 0);
64 if (!new_pmd)
65 goto no_pmd;
66#endif
67
45 if (!vectors_high()) { 68 if (!vectors_high()) {
46 /* 69 /*
47 * On ARM, first page must always be allocated since it 70 * On ARM, first page must always be allocated since it
48 * contains the machine vectors. 71 * contains the machine vectors. The vectors are always high
72 * with LPAE.
49 */ 73 */
50 new_pud = pud_alloc(mm, new_pgd, 0); 74 new_pud = pud_alloc(mm, new_pgd, 0);
51 if (!new_pud) 75 if (!new_pud)
@@ -74,7 +98,7 @@ no_pte:
74no_pmd: 98no_pmd:
75 pud_free(mm, new_pud); 99 pud_free(mm, new_pud);
76no_pud: 100no_pud:
77 free_pages((unsigned long)new_pgd, 2); 101 __pgd_free(new_pgd);
78no_pgd: 102no_pgd:
79 return NULL; 103 return NULL;
80} 104}
@@ -111,5 +135,24 @@ no_pud:
111 pgd_clear(pgd); 135 pgd_clear(pgd);
112 pud_free(mm, pud); 136 pud_free(mm, pud);
113no_pgd: 137no_pgd:
114 free_pages((unsigned long) pgd_base, 2); 138#ifdef CONFIG_ARM_LPAE
139 /*
140 * Free modules/pkmap or identity pmd tables.
141 */
142 for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
143 if (pgd_none_or_clear_bad(pgd))
144 continue;
145 if (pgd_val(*pgd) & L_PGD_SWAPPER)
146 continue;
147 pud = pud_offset(pgd, 0);
148 if (pud_none_or_clear_bad(pud))
149 continue;
150 pmd = pmd_offset(pud, 0);
151 pud_clear(pud);
152 pmd_free(mm, pmd);
153 pgd_clear(pgd);
154 pud_free(mm, pud);
155 }
156#endif
157 __pgd_free(pgd_base);
115} 158}
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 67469665d47a..234951345eb3 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020_proc_fin)
95 * loc: location to jump to for soft reset 95 * loc: location to jump to for soft reset
96 */ 96 */
97 .align 5 97 .align 5
98 .pushsection .idmap.text, "ax"
98ENTRY(cpu_arm1020_reset) 99ENTRY(cpu_arm1020_reset)
99 mov ip, #0 100 mov ip, #0
100 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 101 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020_reset)
107 bic ip, ip, #0x1100 @ ...i...s........ 108 bic ip, ip, #0x1100 @ ...i...s........
108 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
109 mov pc, r0 110 mov pc, r0
111ENDPROC(cpu_arm1020_reset)
112 .popsection
110 113
111/* 114/*
112 * cpu_arm1020_do_idle() 115 * cpu_arm1020_do_idle()
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 4251421c0ed5..c244b06caac9 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020e_proc_fin)
95 * loc: location to jump to for soft reset 95 * loc: location to jump to for soft reset
96 */ 96 */
97 .align 5 97 .align 5
98 .pushsection .idmap.text, "ax"
98ENTRY(cpu_arm1020e_reset) 99ENTRY(cpu_arm1020e_reset)
99 mov ip, #0 100 mov ip, #0
100 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 101 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020e_reset)
107 bic ip, ip, #0x1100 @ ...i...s........ 108 bic ip, ip, #0x1100 @ ...i...s........
108 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
109 mov pc, r0 110 mov pc, r0
111ENDPROC(cpu_arm1020e_reset)
112 .popsection
110 113
111/* 114/*
112 * cpu_arm1020e_do_idle() 115 * cpu_arm1020e_do_idle()
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index d283cf3d06e3..38fe22efd18f 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -84,6 +84,7 @@ ENTRY(cpu_arm1022_proc_fin)
84 * loc: location to jump to for soft reset 84 * loc: location to jump to for soft reset
85 */ 85 */
86 .align 5 86 .align 5
87 .pushsection .idmap.text, "ax"
87ENTRY(cpu_arm1022_reset) 88ENTRY(cpu_arm1022_reset)
88 mov ip, #0 89 mov ip, #0
89 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 90 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -96,6 +97,8 @@ ENTRY(cpu_arm1022_reset)
96 bic ip, ip, #0x1100 @ ...i...s........ 97 bic ip, ip, #0x1100 @ ...i...s........
97 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
98 mov pc, r0 99 mov pc, r0
100ENDPROC(cpu_arm1022_reset)
101 .popsection
99 102
100/* 103/*
101 * cpu_arm1022_do_idle() 104 * cpu_arm1022_do_idle()
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 678a1ceafed2..3eb9c3c26c75 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -84,6 +84,7 @@ ENTRY(cpu_arm1026_proc_fin)
84 * loc: location to jump to for soft reset 84 * loc: location to jump to for soft reset
85 */ 85 */
86 .align 5 86 .align 5
87 .pushsection .idmap.text, "ax"
87ENTRY(cpu_arm1026_reset) 88ENTRY(cpu_arm1026_reset)
88 mov ip, #0 89 mov ip, #0
89 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 90 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -96,6 +97,8 @@ ENTRY(cpu_arm1026_reset)
96 bic ip, ip, #0x1100 @ ...i...s........ 97 bic ip, ip, #0x1100 @ ...i...s........
97 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
98 mov pc, r0 99 mov pc, r0
100ENDPROC(cpu_arm1026_reset)
101 .popsection
99 102
100/* 103/*
101 * cpu_arm1026_do_idle() 104 * cpu_arm1026_do_idle()
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index e5b974cddac3..4fbeb5b8e6c2 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -225,6 +225,7 @@ ENTRY(cpu_arm7_set_pte_ext)
225 * Params : r0 = address to jump to 225 * Params : r0 = address to jump to
226 * Notes : This sets up everything for a reset 226 * Notes : This sets up everything for a reset
227 */ 227 */
228 .pushsection .idmap.text, "ax"
228ENTRY(cpu_arm6_reset) 229ENTRY(cpu_arm6_reset)
229ENTRY(cpu_arm7_reset) 230ENTRY(cpu_arm7_reset)
230 mov r1, #0 231 mov r1, #0
@@ -235,6 +236,9 @@ ENTRY(cpu_arm7_reset)
235 mov r1, #0x30 236 mov r1, #0x30
236 mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc 237 mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc
237 mov pc, r0 238 mov pc, r0
239ENDPROC(cpu_arm6_reset)
240ENDPROC(cpu_arm7_reset)
241 .popsection
238 242
239 __CPUINIT 243 __CPUINIT
240 244
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 55f4e290665a..0ac908c7ade1 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -101,6 +101,7 @@ ENTRY(cpu_arm720_set_pte_ext)
101 * Params : r0 = address to jump to 101 * Params : r0 = address to jump to
102 * Notes : This sets up everything for a reset 102 * Notes : This sets up everything for a reset
103 */ 103 */
104 .pushsection .idmap.text, "ax"
104ENTRY(cpu_arm720_reset) 105ENTRY(cpu_arm720_reset)
105 mov ip, #0 106 mov ip, #0
106 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache 107 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache
@@ -112,6 +113,8 @@ ENTRY(cpu_arm720_reset)
112 bic ip, ip, #0x2100 @ ..v....s........ 113 bic ip, ip, #0x2100 @ ..v....s........
113 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 114 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
114 mov pc, r0 115 mov pc, r0
116ENDPROC(cpu_arm720_reset)
117 .popsection
115 118
116 __CPUINIT 119 __CPUINIT
117 120
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 4506be3adda6..dc5de5d53f20 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -49,6 +49,7 @@ ENTRY(cpu_arm740_proc_fin)
49 * Params : r0 = address to jump to 49 * Params : r0 = address to jump to
50 * Notes : This sets up everything for a reset 50 * Notes : This sets up everything for a reset
51 */ 51 */
52 .pushsection .idmap.text, "ax"
52ENTRY(cpu_arm740_reset) 53ENTRY(cpu_arm740_reset)
53 mov ip, #0 54 mov ip, #0
54 mcr p15, 0, ip, c7, c0, 0 @ invalidate cache 55 mcr p15, 0, ip, c7, c0, 0 @ invalidate cache
@@ -56,6 +57,8 @@ ENTRY(cpu_arm740_reset)
56 bic ip, ip, #0x0000000c @ ............wc.. 57 bic ip, ip, #0x0000000c @ ............wc..
57 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 58 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
58 mov pc, r0 59 mov pc, r0
60ENDPROC(cpu_arm740_reset)
61 .popsection
59 62
60 __CPUINIT 63 __CPUINIT
61 64
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 7e0e1fe4ed4d..6ddea3e464bd 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -45,8 +45,11 @@ ENTRY(cpu_arm7tdmi_proc_fin)
45 * Params : loc(r0) address to jump to 45 * Params : loc(r0) address to jump to
46 * Purpose : Sets up everything for a reset and jump to the location for soft reset. 46 * Purpose : Sets up everything for a reset and jump to the location for soft reset.
47 */ 47 */
48 .pushsection .idmap.text, "ax"
48ENTRY(cpu_arm7tdmi_reset) 49ENTRY(cpu_arm7tdmi_reset)
49 mov pc, r0 50 mov pc, r0
51ENDPROC(cpu_arm7tdmi_reset)
52 .popsection
50 53
51 __CPUINIT 54 __CPUINIT
52 55
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 88fb3d9e0640..cb941ae95f66 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -85,6 +85,7 @@ ENTRY(cpu_arm920_proc_fin)
85 * loc: location to jump to for soft reset 85 * loc: location to jump to for soft reset
86 */ 86 */
87 .align 5 87 .align 5
88 .pushsection .idmap.text, "ax"
88ENTRY(cpu_arm920_reset) 89ENTRY(cpu_arm920_reset)
89 mov ip, #0 90 mov ip, #0
90 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 91 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -97,6 +98,8 @@ ENTRY(cpu_arm920_reset)
97 bic ip, ip, #0x1100 @ ...i...s........ 98 bic ip, ip, #0x1100 @ ...i...s........
98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 99 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
99 mov pc, r0 100 mov pc, r0
101ENDPROC(cpu_arm920_reset)
102 .popsection
100 103
101/* 104/*
102 * cpu_arm920_do_idle() 105 * cpu_arm920_do_idle()
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 490e18833857..4ec0e074dd55 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -87,6 +87,7 @@ ENTRY(cpu_arm922_proc_fin)
87 * loc: location to jump to for soft reset 87 * loc: location to jump to for soft reset
88 */ 88 */
89 .align 5 89 .align 5
90 .pushsection .idmap.text, "ax"
90ENTRY(cpu_arm922_reset) 91ENTRY(cpu_arm922_reset)
91 mov ip, #0 92 mov ip, #0
92 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 93 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -99,6 +100,8 @@ ENTRY(cpu_arm922_reset)
99 bic ip, ip, #0x1100 @ ...i...s........ 100 bic ip, ip, #0x1100 @ ...i...s........
100 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 101 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
101 mov pc, r0 102 mov pc, r0
103ENDPROC(cpu_arm922_reset)
104 .popsection
102 105
103/* 106/*
104 * cpu_arm922_do_idle() 107 * cpu_arm922_do_idle()
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 51d494be057e..9dccd9a365b3 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -108,6 +108,7 @@ ENTRY(cpu_arm925_proc_fin)
108 * loc: location to jump to for soft reset 108 * loc: location to jump to for soft reset
109 */ 109 */
110 .align 5 110 .align 5
111 .pushsection .idmap.text, "ax"
111ENTRY(cpu_arm925_reset) 112ENTRY(cpu_arm925_reset)
112 /* Send software reset to MPU and DSP */ 113 /* Send software reset to MPU and DSP */
113 mov ip, #0xff000000 114 mov ip, #0xff000000
@@ -115,6 +116,8 @@ ENTRY(cpu_arm925_reset)
115 orr ip, ip, #0x0000ce00 116 orr ip, ip, #0x0000ce00
116 mov r4, #1 117 mov r4, #1
117 strh r4, [ip, #0x10] 118 strh r4, [ip, #0x10]
119ENDPROC(cpu_arm925_reset)
120 .popsection
118 121
119 mov ip, #0 122 mov ip, #0
120 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 123 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 9f8fd91f918a..820259b81a1f 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -77,6 +77,7 @@ ENTRY(cpu_arm926_proc_fin)
77 * loc: location to jump to for soft reset 77 * loc: location to jump to for soft reset
78 */ 78 */
79 .align 5 79 .align 5
80 .pushsection .idmap.text, "ax"
80ENTRY(cpu_arm926_reset) 81ENTRY(cpu_arm926_reset)
81 mov ip, #0 82 mov ip, #0
82 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 83 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -89,6 +90,8 @@ ENTRY(cpu_arm926_reset)
89 bic ip, ip, #0x1100 @ ...i...s........ 90 bic ip, ip, #0x1100 @ ...i...s........
90 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 91 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
91 mov pc, r0 92 mov pc, r0
93ENDPROC(cpu_arm926_reset)
94 .popsection
92 95
93/* 96/*
94 * cpu_arm926_do_idle() 97 * cpu_arm926_do_idle()
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index ac750d506153..9fdc0a170974 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -48,6 +48,7 @@ ENTRY(cpu_arm940_proc_fin)
48 * Params : r0 = address to jump to 48 * Params : r0 = address to jump to
49 * Notes : This sets up everything for a reset 49 * Notes : This sets up everything for a reset
50 */ 50 */
51 .pushsection .idmap.text, "ax"
51ENTRY(cpu_arm940_reset) 52ENTRY(cpu_arm940_reset)
52 mov ip, #0 53 mov ip, #0
53 mcr p15, 0, ip, c7, c5, 0 @ flush I cache 54 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
@@ -58,6 +59,8 @@ ENTRY(cpu_arm940_reset)
58 bic ip, ip, #0x00001000 @ i-cache 59 bic ip, ip, #0x00001000 @ i-cache
59 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 60 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
60 mov pc, r0 61 mov pc, r0
62ENDPROC(cpu_arm940_reset)
63 .popsection
61 64
62/* 65/*
63 * cpu_arm940_do_idle() 66 * cpu_arm940_do_idle()
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 683af3a182b7..f684cfedcca9 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -55,6 +55,7 @@ ENTRY(cpu_arm946_proc_fin)
55 * Params : r0 = address to jump to 55 * Params : r0 = address to jump to
56 * Notes : This sets up everything for a reset 56 * Notes : This sets up everything for a reset
57 */ 57 */
58 .pushsection .idmap.text, "ax"
58ENTRY(cpu_arm946_reset) 59ENTRY(cpu_arm946_reset)
59 mov ip, #0 60 mov ip, #0
60 mcr p15, 0, ip, c7, c5, 0 @ flush I cache 61 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
@@ -65,6 +66,8 @@ ENTRY(cpu_arm946_reset)
65 bic ip, ip, #0x00001000 @ i-cache 66 bic ip, ip, #0x00001000 @ i-cache
66 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 67 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
67 mov pc, r0 68 mov pc, r0
69ENDPROC(cpu_arm946_reset)
70 .popsection
68 71
69/* 72/*
70 * cpu_arm946_do_idle() 73 * cpu_arm946_do_idle()
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 2120f9e2af7f..8881391dfb9e 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -45,8 +45,11 @@ ENTRY(cpu_arm9tdmi_proc_fin)
45 * Params : loc(r0) address to jump to 45 * Params : loc(r0) address to jump to
46 * Purpose : Sets up everything for a reset and jump to the location for soft reset. 46 * Purpose : Sets up everything for a reset and jump to the location for soft reset.
47 */ 47 */
48 .pushsection .idmap.text, "ax"
48ENTRY(cpu_arm9tdmi_reset) 49ENTRY(cpu_arm9tdmi_reset)
49 mov pc, r0 50 mov pc, r0
51ENDPROC(cpu_arm9tdmi_reset)
52 .popsection
50 53
51 __CPUINIT 54 __CPUINIT
52 55
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 4c7a5710472b..272558a133a3 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -57,6 +57,7 @@ ENTRY(cpu_fa526_proc_fin)
57 * loc: location to jump to for soft reset 57 * loc: location to jump to for soft reset
58 */ 58 */
59 .align 4 59 .align 4
60 .pushsection .idmap.text, "ax"
60ENTRY(cpu_fa526_reset) 61ENTRY(cpu_fa526_reset)
61/* TODO: Use CP8 if possible... */ 62/* TODO: Use CP8 if possible... */
62 mov ip, #0 63 mov ip, #0
@@ -73,6 +74,8 @@ ENTRY(cpu_fa526_reset)
73 nop 74 nop
74 nop 75 nop
75 mov pc, r0 76 mov pc, r0
77ENDPROC(cpu_fa526_reset)
78 .popsection
76 79
77/* 80/*
78 * cpu_fa526_do_idle() 81 * cpu_fa526_do_idle()
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 8a6c2f78c1c3..ba3c500584ac 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -98,6 +98,7 @@ ENTRY(cpu_feroceon_proc_fin)
98 * loc: location to jump to for soft reset 98 * loc: location to jump to for soft reset
99 */ 99 */
100 .align 5 100 .align 5
101 .pushsection .idmap.text, "ax"
101ENTRY(cpu_feroceon_reset) 102ENTRY(cpu_feroceon_reset)
102 mov ip, #0 103 mov ip, #0
103 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 104 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -110,6 +111,8 @@ ENTRY(cpu_feroceon_reset)
110 bic ip, ip, #0x1100 @ ...i...s........ 111 bic ip, ip, #0x1100 @ ...i...s........
111 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 112 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
112 mov pc, r0 113 mov pc, r0
114ENDPROC(cpu_feroceon_reset)
115 .popsection
113 116
114/* 117/*
115 * cpu_feroceon_do_idle() 118 * cpu_feroceon_do_idle()
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 307a4def8d3a..2d8ff3ad86d3 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -91,8 +91,9 @@
91#if L_PTE_SHARED != PTE_EXT_SHARED 91#if L_PTE_SHARED != PTE_EXT_SHARED
92#error PTE shared bit mismatch 92#error PTE shared bit mismatch
93#endif 93#endif
94#if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\ 94#if !defined (CONFIG_ARM_LPAE) && \
95 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED 95 (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
96 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
96#error Invalid Linux PTE bit settings 97#error Invalid Linux PTE bit settings
97#endif 98#endif
98#endif /* CONFIG_MMU */ 99#endif /* CONFIG_MMU */
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index db52b0fb14a0..cdfedc5b8ad8 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -69,6 +69,7 @@ ENTRY(cpu_mohawk_proc_fin)
69 * (same as arm926) 69 * (same as arm926)
70 */ 70 */
71 .align 5 71 .align 5
72 .pushsection .idmap.text, "ax"
72ENTRY(cpu_mohawk_reset) 73ENTRY(cpu_mohawk_reset)
73 mov ip, #0 74 mov ip, #0
74 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 75 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -79,6 +80,8 @@ ENTRY(cpu_mohawk_reset)
79 bic ip, ip, #0x1100 @ ...i...s........ 80 bic ip, ip, #0x1100 @ ...i...s........
80 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
81 mov pc, r0 82 mov pc, r0
83ENDPROC(cpu_mohawk_reset)
84 .popsection
82 85
83/* 86/*
84 * cpu_mohawk_do_idle() 87 * cpu_mohawk_do_idle()
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index d50ada26edd6..775d70fba937 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -62,6 +62,7 @@ ENTRY(cpu_sa110_proc_fin)
62 * loc: location to jump to for soft reset 62 * loc: location to jump to for soft reset
63 */ 63 */
64 .align 5 64 .align 5
65 .pushsection .idmap.text, "ax"
65ENTRY(cpu_sa110_reset) 66ENTRY(cpu_sa110_reset)
66 mov ip, #0 67 mov ip, #0
67 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 68 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -74,6 +75,8 @@ ENTRY(cpu_sa110_reset)
74 bic ip, ip, #0x1100 @ ...i...s........ 75 bic ip, ip, #0x1100 @ ...i...s........
75 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 76 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
76 mov pc, r0 77 mov pc, r0
78ENDPROC(cpu_sa110_reset)
79 .popsection
77 80
78/* 81/*
79 * cpu_sa110_do_idle(type) 82 * cpu_sa110_do_idle(type)
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 7d91545d089b..3aa0da11fd84 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -70,6 +70,7 @@ ENTRY(cpu_sa1100_proc_fin)
70 * loc: location to jump to for soft reset 70 * loc: location to jump to for soft reset
71 */ 71 */
72 .align 5 72 .align 5
73 .pushsection .idmap.text, "ax"
73ENTRY(cpu_sa1100_reset) 74ENTRY(cpu_sa1100_reset)
74 mov ip, #0 75 mov ip, #0
75 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 76 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -82,6 +83,8 @@ ENTRY(cpu_sa1100_reset)
82 bic ip, ip, #0x1100 @ ...i...s........ 83 bic ip, ip, #0x1100 @ ...i...s........
83 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 84 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
84 mov pc, r0 85 mov pc, r0
86ENDPROC(cpu_sa1100_reset)
87 .popsection
85 88
86/* 89/*
87 * cpu_sa1100_do_idle(type) 90 * cpu_sa1100_do_idle(type)
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index d061d2fa5506..5900cd520e84 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -55,6 +55,7 @@ ENTRY(cpu_v6_proc_fin)
55 * - loc - location to jump to for soft reset 55 * - loc - location to jump to for soft reset
56 */ 56 */
57 .align 5 57 .align 5
58 .pushsection .idmap.text, "ax"
58ENTRY(cpu_v6_reset) 59ENTRY(cpu_v6_reset)
59 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 60 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
60 bic r1, r1, #0x1 @ ...............m 61 bic r1, r1, #0x1 @ ...............m
@@ -62,6 +63,8 @@ ENTRY(cpu_v6_reset)
62 mov r1, #0 63 mov r1, #0
63 mcr p15, 0, r1, c7, c5, 4 @ ISB 64 mcr p15, 0, r1, c7, c5, 4 @ ISB
64 mov pc, r0 65 mov pc, r0
66ENDPROC(cpu_v6_reset)
67 .popsection
65 68
66/* 69/*
67 * cpu_v6_do_idle() 70 * cpu_v6_do_idle()
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
new file mode 100644
index 000000000000..3a4b3e7b888c
--- /dev/null
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -0,0 +1,171 @@
1/*
2 * arch/arm/mm/proc-v7-2level.S
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define TTB_S (1 << 1)
12#define TTB_RGN_NC (0 << 3)
13#define TTB_RGN_OC_WBWA (1 << 3)
14#define TTB_RGN_OC_WT (2 << 3)
15#define TTB_RGN_OC_WB (3 << 3)
16#define TTB_NOS (1 << 5)
17#define TTB_IRGN_NC ((0 << 0) | (0 << 6))
18#define TTB_IRGN_WBWA ((0 << 0) | (1 << 6))
19#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
20#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
21
22/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
23#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
24#define PMD_FLAGS_UP PMD_SECT_WB
25
26/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
27#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
28#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
29
30/*
31 * cpu_v7_switch_mm(pgd_phys, tsk)
32 *
33 * Set the translation table base pointer to be pgd_phys
34 *
35 * - pgd_phys - physical address of new TTB
36 *
37 * It is assumed that:
38 * - we are not using split page tables
39 */
40ENTRY(cpu_v7_switch_mm)
41#ifdef CONFIG_MMU
42 mov r2, #0
43 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
44 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
45 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
46#ifdef CONFIG_ARM_ERRATA_430973
47 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
48#endif
49#ifdef CONFIG_ARM_ERRATA_754322
50 dsb
51#endif
52 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
53 isb
541: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
55 isb
56#ifdef CONFIG_ARM_ERRATA_754322
57 dsb
58#endif
59 mcr p15, 0, r1, c13, c0, 1 @ set context ID
60 isb
61#endif
62 mov pc, lr
63ENDPROC(cpu_v7_switch_mm)
64
65/*
66 * cpu_v7_set_pte_ext(ptep, pte)
67 *
68 * Set a level 2 translation table entry.
69 *
70 * - ptep - pointer to level 2 translation table entry
71 * (hardware version is stored at +2048 bytes)
72 * - pte - PTE value to store
73 * - ext - value for extended PTE bits
74 */
75ENTRY(cpu_v7_set_pte_ext)
76#ifdef CONFIG_MMU
77 str r1, [r0] @ linux version
78
79 bic r3, r1, #0x000003f0
80 bic r3, r3, #PTE_TYPE_MASK
81 orr r3, r3, r2
82 orr r3, r3, #PTE_EXT_AP0 | 2
83
84 tst r1, #1 << 4
85 orrne r3, r3, #PTE_EXT_TEX(1)
86
87 eor r1, r1, #L_PTE_DIRTY
88 tst r1, #L_PTE_RDONLY | L_PTE_DIRTY
89 orrne r3, r3, #PTE_EXT_APX
90
91 tst r1, #L_PTE_USER
92 orrne r3, r3, #PTE_EXT_AP1
93#ifdef CONFIG_CPU_USE_DOMAINS
94 @ allow kernel read/write access to read-only user pages
95 tstne r3, #PTE_EXT_APX
96 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
97#endif
98
99 tst r1, #L_PTE_XN
100 orrne r3, r3, #PTE_EXT_XN
101
102 tst r1, #L_PTE_YOUNG
103 tstne r1, #L_PTE_PRESENT
104 moveq r3, #0
105
106 ARM( str r3, [r0, #2048]! )
107 THUMB( add r0, r0, #2048 )
108 THUMB( str r3, [r0] )
109 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
110#endif
111 mov pc, lr
112ENDPROC(cpu_v7_set_pte_ext)
113
114 /*
115 * Memory region attributes with SCTLR.TRE=1
116 *
117 * n = TEX[0],C,B
118 * TR = PRRR[2n+1:2n] - memory type
119 * IR = NMRR[2n+1:2n] - inner cacheable property
120 * OR = NMRR[2n+17:2n+16] - outer cacheable property
121 *
122 * n TR IR OR
123 * UNCACHED 000 00
124 * BUFFERABLE 001 10 00 00
125 * WRITETHROUGH 010 10 10 10
126 * WRITEBACK 011 10 11 11
127 * reserved 110
128 * WRITEALLOC 111 10 01 01
129 * DEV_SHARED 100 01
130 * DEV_NONSHARED 100 01
131 * DEV_WC 001 10
132 * DEV_CACHED 011 10
133 *
134 * Other attributes:
135 *
136 * DS0 = PRRR[16] = 0 - device shareable property
137 * DS1 = PRRR[17] = 1 - device shareable property
138 * NS0 = PRRR[18] = 0 - normal shareable property
139 * NS1 = PRRR[19] = 1 - normal shareable property
140 * NOS = PRRR[24+n] = 1 - not outer shareable
141 */
142.equ PRRR, 0xff0a81a8
143.equ NMRR, 0x40e040e0
144
145 /*
146 * Macro for setting up the TTBRx and TTBCR registers.
147 * - \ttb0 and \ttb1 updated with the corresponding flags.
148 */
149 .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
150 mcr p15, 0, \zero, c2, c0, 2 @ TTB control register
151 ALT_SMP(orr \ttbr0, \ttbr0, #TTB_FLAGS_SMP)
152 ALT_UP(orr \ttbr0, \ttbr0, #TTB_FLAGS_UP)
153 ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP)
154 ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP)
155 mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1
156 .endm
157
158 __CPUINIT
159
160 /* AT
161 * TFR EV X F I D LR S
162 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
163 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
164 * 1 0 110 0011 1100 .111 1101 < we want
165 */
166 .align 2
167 .type v7_crval, #object
168v7_crval:
169 crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
170
171 .previous
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
new file mode 100644
index 000000000000..8de0f1dd1549
--- /dev/null
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -0,0 +1,150 @@
1/*
2 * arch/arm/mm/proc-v7-3level.S
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2011 ARM Ltd.
6 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * based on arch/arm/mm/proc-v7-2level.S
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#define TTB_IRGN_NC (0 << 8)
24#define TTB_IRGN_WBWA (1 << 8)
25#define TTB_IRGN_WT (2 << 8)
26#define TTB_IRGN_WB (3 << 8)
27#define TTB_RGN_NC (0 << 10)
28#define TTB_RGN_OC_WBWA (1 << 10)
29#define TTB_RGN_OC_WT (2 << 10)
30#define TTB_RGN_OC_WB (3 << 10)
31#define TTB_S (3 << 12)
32#define TTB_EAE (1 << 31)
33
34/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
35#define TTB_FLAGS_UP (TTB_IRGN_WB|TTB_RGN_OC_WB)
36#define PMD_FLAGS_UP (PMD_SECT_WB)
37
38/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
39#define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA)
40#define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S)
41
42/*
43 * cpu_v7_switch_mm(pgd_phys, tsk)
44 *
45 * Set the translation table base pointer to be pgd_phys (physical address of
46 * the new TTB).
47 */
48ENTRY(cpu_v7_switch_mm)
49#ifdef CONFIG_MMU
50 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
51 and r3, r1, #0xff
52 mov r3, r3, lsl #(48 - 32) @ ASID
53 mcrr p15, 0, r0, r3, c2 @ set TTB 0
54 isb
55#endif
56 mov pc, lr
57ENDPROC(cpu_v7_switch_mm)
58
59/*
60 * cpu_v7_set_pte_ext(ptep, pte)
61 *
62 * Set a level 2 translation table entry.
63 * - ptep - pointer to level 3 translation table entry
64 * - pte - PTE value to store (64-bit in r2 and r3)
65 */
66ENTRY(cpu_v7_set_pte_ext)
67#ifdef CONFIG_MMU
68 tst r2, #L_PTE_PRESENT
69 beq 1f
70 tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
71 orreq r2, #L_PTE_RDONLY
721: strd r2, r3, [r0]
73 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
74#endif
75 mov pc, lr
76ENDPROC(cpu_v7_set_pte_ext)
77
78 /*
79 * Memory region attributes for LPAE (defined in pgtable-3level.h):
80 *
81 * n = AttrIndx[2:0]
82 *
83 * n MAIR
84 * UNCACHED 000 00000000
85 * BUFFERABLE 001 01000100
86 * DEV_WC 001 01000100
87 * WRITETHROUGH 010 10101010
88 * WRITEBACK 011 11101110
89 * DEV_CACHED 011 11101110
90 * DEV_SHARED 100 00000100
91 * DEV_NONSHARED 100 00000100
92 * unused 101
93 * unused 110
94 * WRITEALLOC 111 11111111
95 */
96.equ PRRR, 0xeeaa4400 @ MAIR0
97.equ NMRR, 0xff000004 @ MAIR1
98
99 /*
100 * Macro for setting up the TTBRx and TTBCR registers.
101 * - \ttbr1 updated.
102 */
103 .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
104 ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address
105 cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below)
106 mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register
107 orr \tmp, \tmp, #TTB_EAE
108 ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP)
109 ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP)
110 ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16)
111 ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16)
112 /*
113 * TTBR0/TTBR1 split (PAGE_OFFSET):
114 * 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
115 * 0x80000000: T0SZ = 0, T1SZ = 1
116 * 0xc0000000: T0SZ = 0, T1SZ = 2
117 *
118 * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
119 * booting secondary CPUs would end up using TTBR1 for the identity
120 * mapping set up in TTBR0.
121 */
122 bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET?
123 orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ
124#if defined CONFIG_VMSPLIT_2G
125 /* PAGE_OFFSET == 0x80000000, T1SZ == 1 */
126 add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries
127#elif defined CONFIG_VMSPLIT_3G
128 /* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */
129 add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd
130#endif
131 /* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */
1329001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register
133 mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1
134 .endm
135
136 __CPUINIT
137
138 /*
139 * AT
140 * TFR EV X F IHD LR S
141 * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM
142 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
143 * 11 0 110 1 0011 1100 .111 1101 < we want
144 */
145 .align 2
146 .type v7_crval, #object
147v7_crval:
148 crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c
149
150 .previous
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 69a98a4204a5..7e9b5bf910c1 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -19,24 +19,11 @@
19 19
20#include "proc-macros.S" 20#include "proc-macros.S"
21 21
22#define TTB_S (1 << 1) 22#ifdef CONFIG_ARM_LPAE
23#define TTB_RGN_NC (0 << 3) 23#include "proc-v7-3level.S"
24#define TTB_RGN_OC_WBWA (1 << 3) 24#else
25#define TTB_RGN_OC_WT (2 << 3) 25#include "proc-v7-2level.S"
26#define TTB_RGN_OC_WB (3 << 3) 26#endif
27#define TTB_NOS (1 << 5)
28#define TTB_IRGN_NC ((0 << 0) | (0 << 6))
29#define TTB_IRGN_WBWA ((0 << 0) | (1 << 6))
30#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
31#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
32
33/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
34#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
35#define PMD_FLAGS_UP PMD_SECT_WB
36
37/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
38#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
39#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
40 27
41ENTRY(cpu_v7_proc_init) 28ENTRY(cpu_v7_proc_init)
42 mov pc, lr 29 mov pc, lr
@@ -63,6 +50,7 @@ ENDPROC(cpu_v7_proc_fin)
63 * caches disabled. 50 * caches disabled.
64 */ 51 */
65 .align 5 52 .align 5
53 .pushsection .idmap.text, "ax"
66ENTRY(cpu_v7_reset) 54ENTRY(cpu_v7_reset)
67 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 55 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
68 bic r1, r1, #0x1 @ ...............m 56 bic r1, r1, #0x1 @ ...............m
@@ -71,6 +59,7 @@ ENTRY(cpu_v7_reset)
71 isb 59 isb
72 mov pc, r0 60 mov pc, r0
73ENDPROC(cpu_v7_reset) 61ENDPROC(cpu_v7_reset)
62 .popsection
74 63
75/* 64/*
76 * cpu_v7_do_idle() 65 * cpu_v7_do_idle()
@@ -97,127 +86,12 @@ ENTRY(cpu_v7_dcache_clean_area)
97 mov pc, lr 86 mov pc, lr
98ENDPROC(cpu_v7_dcache_clean_area) 87ENDPROC(cpu_v7_dcache_clean_area)
99 88
100/*
101 * cpu_v7_switch_mm(pgd_phys, tsk)
102 *
103 * Set the translation table base pointer to be pgd_phys
104 *
105 * - pgd_phys - physical address of new TTB
106 *
107 * It is assumed that:
108 * - we are not using split page tables
109 */
110ENTRY(cpu_v7_switch_mm)
111#ifdef CONFIG_MMU
112 mov r2, #0
113 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
114 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
115 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
116#ifdef CONFIG_ARM_ERRATA_430973
117 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
118#endif
119#ifdef CONFIG_ARM_ERRATA_754322
120 dsb
121#endif
122 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
123 isb
1241: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
125 isb
126#ifdef CONFIG_ARM_ERRATA_754322
127 dsb
128#endif
129 mcr p15, 0, r1, c13, c0, 1 @ set context ID
130 isb
131#endif
132 mov pc, lr
133ENDPROC(cpu_v7_switch_mm)
134
135/*
136 * cpu_v7_set_pte_ext(ptep, pte)
137 *
138 * Set a level 2 translation table entry.
139 *
140 * - ptep - pointer to level 2 translation table entry
141 * (hardware version is stored at +2048 bytes)
142 * - pte - PTE value to store
143 * - ext - value for extended PTE bits
144 */
145ENTRY(cpu_v7_set_pte_ext)
146#ifdef CONFIG_MMU
147 str r1, [r0] @ linux version
148
149 bic r3, r1, #0x000003f0
150 bic r3, r3, #PTE_TYPE_MASK
151 orr r3, r3, r2
152 orr r3, r3, #PTE_EXT_AP0 | 2
153
154 tst r1, #1 << 4
155 orrne r3, r3, #PTE_EXT_TEX(1)
156
157 eor r1, r1, #L_PTE_DIRTY
158 tst r1, #L_PTE_RDONLY | L_PTE_DIRTY
159 orrne r3, r3, #PTE_EXT_APX
160
161 tst r1, #L_PTE_USER
162 orrne r3, r3, #PTE_EXT_AP1
163#ifdef CONFIG_CPU_USE_DOMAINS
164 @ allow kernel read/write access to read-only user pages
165 tstne r3, #PTE_EXT_APX
166 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
167#endif
168
169 tst r1, #L_PTE_XN
170 orrne r3, r3, #PTE_EXT_XN
171
172 tst r1, #L_PTE_YOUNG
173 tstne r1, #L_PTE_PRESENT
174 moveq r3, #0
175
176 ARM( str r3, [r0, #2048]! )
177 THUMB( add r0, r0, #2048 )
178 THUMB( str r3, [r0] )
179 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
180#endif
181 mov pc, lr
182ENDPROC(cpu_v7_set_pte_ext)
183
184 string cpu_v7_name, "ARMv7 Processor" 89 string cpu_v7_name, "ARMv7 Processor"
185 .align 90 .align
186 91
187 /*
188 * Memory region attributes with SCTLR.TRE=1
189 *
190 * n = TEX[0],C,B
191 * TR = PRRR[2n+1:2n] - memory type
192 * IR = NMRR[2n+1:2n] - inner cacheable property
193 * OR = NMRR[2n+17:2n+16] - outer cacheable property
194 *
195 * n TR IR OR
196 * UNCACHED 000 00
197 * BUFFERABLE 001 10 00 00
198 * WRITETHROUGH 010 10 10 10
199 * WRITEBACK 011 10 11 11
200 * reserved 110
201 * WRITEALLOC 111 10 01 01
202 * DEV_SHARED 100 01
203 * DEV_NONSHARED 100 01
204 * DEV_WC 001 10
205 * DEV_CACHED 011 10
206 *
207 * Other attributes:
208 *
209 * DS0 = PRRR[16] = 0 - device shareable property
210 * DS1 = PRRR[17] = 1 - device shareable property
211 * NS0 = PRRR[18] = 0 - normal shareable property
212 * NS1 = PRRR[19] = 1 - normal shareable property
213 * NOS = PRRR[24+n] = 1 - not outer shareable
214 */
215.equ PRRR, 0xff0a81a8
216.equ NMRR, 0x40e040e0
217
218/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ 92/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
219.globl cpu_v7_suspend_size 93.globl cpu_v7_suspend_size
220.equ cpu_v7_suspend_size, 4 * 7 94.equ cpu_v7_suspend_size, 4 * 8
221#ifdef CONFIG_ARM_CPU_SUSPEND 95#ifdef CONFIG_ARM_CPU_SUSPEND
222ENTRY(cpu_v7_do_suspend) 96ENTRY(cpu_v7_do_suspend)
223 stmfd sp!, {r4 - r10, lr} 97 stmfd sp!, {r4 - r10, lr}
@@ -226,10 +100,11 @@ ENTRY(cpu_v7_do_suspend)
226 stmia r0!, {r4 - r5} 100 stmia r0!, {r4 - r5}
227 mrc p15, 0, r6, c3, c0, 0 @ Domain ID 101 mrc p15, 0, r6, c3, c0, 0 @ Domain ID
228 mrc p15, 0, r7, c2, c0, 1 @ TTB 1 102 mrc p15, 0, r7, c2, c0, 1 @ TTB 1
103 mrc p15, 0, r11, c2, c0, 2 @ TTB control register
229 mrc p15, 0, r8, c1, c0, 0 @ Control register 104 mrc p15, 0, r8, c1, c0, 0 @ Control register
230 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register 105 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
231 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control 106 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
232 stmia r0, {r6 - r10} 107 stmia r0, {r6 - r11}
233 ldmfd sp!, {r4 - r10, pc} 108 ldmfd sp!, {r4 - r10, pc}
234ENDPROC(cpu_v7_do_suspend) 109ENDPROC(cpu_v7_do_suspend)
235 110
@@ -241,13 +116,15 @@ ENTRY(cpu_v7_do_resume)
241 ldmia r0!, {r4 - r5} 116 ldmia r0!, {r4 - r5}
242 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID 117 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
243 mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID 118 mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID
244 ldmia r0, {r6 - r10} 119 ldmia r0, {r6 - r11}
245 mcr p15, 0, r6, c3, c0, 0 @ Domain ID 120 mcr p15, 0, r6, c3, c0, 0 @ Domain ID
121#ifndef CONFIG_ARM_LPAE
246 ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) 122 ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP)
247 ALT_UP(orr r1, r1, #TTB_FLAGS_UP) 123 ALT_UP(orr r1, r1, #TTB_FLAGS_UP)
124#endif
248 mcr p15, 0, r1, c2, c0, 0 @ TTB 0 125 mcr p15, 0, r1, c2, c0, 0 @ TTB 0
249 mcr p15, 0, r7, c2, c0, 1 @ TTB 1 126 mcr p15, 0, r7, c2, c0, 1 @ TTB 1
250 mcr p15, 0, ip, c2, c0, 2 @ TTB control register 127 mcr p15, 0, r11, c2, c0, 2 @ TTB control register
251 mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register 128 mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register
252 teq r4, r9 @ Is it already set? 129 teq r4, r9 @ Is it already set?
253 mcrne p15, 0, r9, c1, c0, 1 @ No, so write it 130 mcrne p15, 0, r9, c1, c0, 1 @ No, so write it
@@ -380,12 +257,7 @@ __v7_setup:
380 dsb 257 dsb
381#ifdef CONFIG_MMU 258#ifdef CONFIG_MMU
382 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 259 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
383 mcr p15, 0, r10, c2, c0, 2 @ TTB control register 260 v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
384 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
385 ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
386 ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP)
387 ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
388 mcr p15, 0, r8, c2, c0, 1 @ load TTB1
389 ldr r5, =PRRR @ PRRR 261 ldr r5, =PRRR @ PRRR
390 ldr r6, =NMRR @ NMRR 262 ldr r6, =NMRR @ NMRR
391 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 263 mcr p15, 0, r5, c10, c2, 0 @ write PRRR
@@ -407,16 +279,7 @@ __v7_setup:
407 mov pc, lr @ return to head.S:__ret 279 mov pc, lr @ return to head.S:__ret
408ENDPROC(__v7_setup) 280ENDPROC(__v7_setup)
409 281
410 /* AT 282 .align 2
411 * TFR EV X F I D LR S
412 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
413 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
414 * 1 0 110 0011 1100 .111 1101 < we want
415 */
416 .type v7_crval, #object
417v7_crval:
418 crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
419
420__v7_setup_stack: 283__v7_setup_stack:
421 .space 4 * 11 @ 11 registers 284 .space 4 * 11 @ 11 registers
422 285
@@ -438,11 +301,11 @@ __v7_setup_stack:
438 */ 301 */
439.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 302.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0
440 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ 303 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
441 PMD_FLAGS_SMP | \mm_mmuflags) 304 PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
442 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ 305 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
443 PMD_FLAGS_UP | \mm_mmuflags) 306 PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags)
444 .long PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \ 307 .long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \
445 PMD_SECT_AP_READ | \io_mmuflags 308 PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags
446 W(b) \initfunc 309 W(b) \initfunc
447 .long cpu_arch_name 310 .long cpu_arch_name
448 .long cpu_elf_name 311 .long cpu_elf_name
@@ -455,6 +318,7 @@ __v7_setup_stack:
455 .long v7_cache_fns 318 .long v7_cache_fns
456.endm 319.endm
457 320
321#ifndef CONFIG_ARM_LPAE
458 /* 322 /*
459 * ARM Ltd. Cortex A5 processor. 323 * ARM Ltd. Cortex A5 processor.
460 */ 324 */
@@ -484,6 +348,7 @@ __v7_ca9mp_proc_info:
484 .long 0xff0ffff0 348 .long 0xff0ffff0
485 __v7_proc __v7_ca9mp_setup 349 __v7_proc __v7_ca9mp_setup
486 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info 350 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
351#endif /* CONFIG_ARM_LPAE */
487 352
488 /* 353 /*
489 * ARM Ltd. Cortex A15 processor. 354 * ARM Ltd. Cortex A15 processor.
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index abf0507a08ae..b0d57869da2d 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -105,6 +105,7 @@ ENTRY(cpu_xsc3_proc_fin)
105 * loc: location to jump to for soft reset 105 * loc: location to jump to for soft reset
106 */ 106 */
107 .align 5 107 .align 5
108 .pushsection .idmap.text, "ax"
108ENTRY(cpu_xsc3_reset) 109ENTRY(cpu_xsc3_reset)
109 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 110 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
110 msr cpsr_c, r1 @ reset CPSR 111 msr cpsr_c, r1 @ reset CPSR
@@ -119,6 +120,8 @@ ENTRY(cpu_xsc3_reset)
119 @ already containing those two last instructions to survive. 120 @ already containing those two last instructions to survive.
120 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 121 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
121 mov pc, r0 122 mov pc, r0
123ENDPROC(cpu_xsc3_reset)
124 .popsection
122 125
123/* 126/*
124 * cpu_xsc3_do_idle() 127 * cpu_xsc3_do_idle()
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 3277904bebaf..4ffebaa595ee 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -142,6 +142,7 @@ ENTRY(cpu_xscale_proc_fin)
142 * Beware PXA270 erratum E7. 142 * Beware PXA270 erratum E7.
143 */ 143 */
144 .align 5 144 .align 5
145 .pushsection .idmap.text, "ax"
145ENTRY(cpu_xscale_reset) 146ENTRY(cpu_xscale_reset)
146 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 147 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
147 msr cpsr_c, r1 @ reset CPSR 148 msr cpsr_c, r1 @ reset CPSR
@@ -160,6 +161,8 @@ ENTRY(cpu_xscale_reset)
160 @ already containing those two last instructions to survive. 161 @ already containing those two last instructions to survive.
161 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 162 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
162 mov pc, r0 163 mov pc, r0
164ENDPROC(cpu_xscale_reset)
165 .popsection
163 166
164/* 167/*
165 * cpu_xscale_do_idle() 168 * cpu_xscale_do_idle()