diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-12-08 13:02:04 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-12-08 13:02:04 -0500 |
commit | 6ae25a5b9d7ba86d6ac19c403dfa57dae6caa73d (patch) | |
tree | 41d04269f268d6162e5f1866496dd42fbc79d2a4 /arch/arm/mm | |
parent | 3ee0fc5ca129cbae81c073756febcb1c552af446 (diff) | |
parent | 497b7e943d0dc5743454de56dcdb67352bbf96b2 (diff) |
Merge branch 'for-rmk' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux into devel-stable
Conflicts:
arch/arm/mm/ioremap.c
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 17 | ||||
-rw-r--r-- | arch/arm/mm/alignment.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 19 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 111 | ||||
-rw-r--r-- | arch/arm/mm/fault.h | 27 | ||||
-rw-r--r-- | arch/arm/mm/fsr-2level.c | 78 | ||||
-rw-r--r-- | arch/arm/mm/fsr-3level.c | 68 | ||||
-rw-r--r-- | arch/arm/mm/idmap.c | 28 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 39 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 46 | ||||
-rw-r--r-- | arch/arm/mm/pgd.c | 51 | ||||
-rw-r--r-- | arch/arm/mm/proc-macros.S | 5 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-2level.S | 171 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-3level.S | 150 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 177 |
15 files changed, 713 insertions, 276 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 67f75a0b66d6..5cf7922ff5e7 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -629,6 +629,23 @@ config IO_36 | |||
629 | 629 | ||
630 | comment "Processor Features" | 630 | comment "Processor Features" |
631 | 631 | ||
632 | config ARM_LPAE | ||
633 | bool "Support for the Large Physical Address Extension" | ||
634 | depends on MMU && CPU_V7 | ||
635 | help | ||
636 | Say Y if you have an ARMv7 processor supporting the LPAE page | ||
637 | table format and you would like to access memory beyond the | ||
638 | 4GB limit. The resulting kernel image will not run on | ||
639 | processors without the LPA extension. | ||
640 | |||
641 | If unsure, say N. | ||
642 | |||
643 | config ARCH_PHYS_ADDR_T_64BIT | ||
644 | def_bool ARM_LPAE | ||
645 | |||
646 | config ARCH_DMA_ADDR_T_64BIT | ||
647 | bool | ||
648 | |||
632 | config ARM_THUMB | 649 | config ARM_THUMB |
633 | bool "Support Thumb user binaries" | 650 | bool "Support Thumb user binaries" |
634 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON | 651 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index c335c76e0d88..caf14dc059e5 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -968,7 +968,7 @@ static int __init alignment_init(void) | |||
968 | ai_usermode = safe_usermode(ai_usermode, false); | 968 | ai_usermode = safe_usermode(ai_usermode, false); |
969 | } | 969 | } |
970 | 970 | ||
971 | hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, | 971 | hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, |
972 | "alignment exception"); | 972 | "alignment exception"); |
973 | 973 | ||
974 | /* | 974 | /* |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 93aac068da94..ee9bb363d606 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -22,6 +22,21 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION; | |||
22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); | 22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); |
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | #ifdef CONFIG_ARM_LPAE | ||
26 | #define cpu_set_asid(asid) { \ | ||
27 | unsigned long ttbl, ttbh; \ | ||
28 | asm volatile( \ | ||
29 | " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \ | ||
30 | " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \ | ||
31 | " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \ | ||
32 | : "=&r" (ttbl), "=&r" (ttbh) \ | ||
33 | : "r" (asid & ~ASID_MASK)); \ | ||
34 | } | ||
35 | #else | ||
36 | #define cpu_set_asid(asid) \ | ||
37 | asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid)) | ||
38 | #endif | ||
39 | |||
25 | /* | 40 | /* |
26 | * We fork()ed a process, and we need a new context for the child | 41 | * We fork()ed a process, and we need a new context for the child |
27 | * to run in. We reserve version 0 for initial tasks so we will | 42 | * to run in. We reserve version 0 for initial tasks so we will |
@@ -37,7 +52,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
37 | static void flush_context(void) | 52 | static void flush_context(void) |
38 | { | 53 | { |
39 | /* set the reserved ASID before flushing the TLB */ | 54 | /* set the reserved ASID before flushing the TLB */ |
40 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); | 55 | cpu_set_asid(0); |
41 | isb(); | 56 | isb(); |
42 | local_flush_tlb_all(); | 57 | local_flush_tlb_all(); |
43 | if (icache_is_vivt_asid_tagged()) { | 58 | if (icache_is_vivt_asid_tagged()) { |
@@ -99,7 +114,7 @@ static void reset_context(void *info) | |||
99 | set_mm_context(mm, asid); | 114 | set_mm_context(mm, asid); |
100 | 115 | ||
101 | /* set the new ASID */ | 116 | /* set the new ASID */ |
102 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); | 117 | cpu_set_asid(mm->context.id); |
103 | isb(); | 118 | isb(); |
104 | } | 119 | } |
105 | 120 | ||
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index aa33949fef60..eb5520fc755f 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -27,19 +27,6 @@ | |||
27 | 27 | ||
28 | #include "fault.h" | 28 | #include "fault.h" |
29 | 29 | ||
30 | /* | ||
31 | * Fault status register encodings. We steal bit 31 for our own purposes. | ||
32 | */ | ||
33 | #define FSR_LNX_PF (1 << 31) | ||
34 | #define FSR_WRITE (1 << 11) | ||
35 | #define FSR_FS4 (1 << 10) | ||
36 | #define FSR_FS3_0 (15) | ||
37 | |||
38 | static inline int fsr_fs(unsigned int fsr) | ||
39 | { | ||
40 | return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; | ||
41 | } | ||
42 | |||
43 | #ifdef CONFIG_MMU | 30 | #ifdef CONFIG_MMU |
44 | 31 | ||
45 | #ifdef CONFIG_KPROBES | 32 | #ifdef CONFIG_KPROBES |
@@ -123,8 +110,10 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
123 | 110 | ||
124 | pte = pte_offset_map(pmd, addr); | 111 | pte = pte_offset_map(pmd, addr); |
125 | printk(", *pte=%08llx", (long long)pte_val(*pte)); | 112 | printk(", *pte=%08llx", (long long)pte_val(*pte)); |
113 | #ifndef CONFIG_ARM_LPAE | ||
126 | printk(", *ppte=%08llx", | 114 | printk(", *ppte=%08llx", |
127 | (long long)pte_val(pte[PTE_HWTABLE_PTRS])); | 115 | (long long)pte_val(pte[PTE_HWTABLE_PTRS])); |
116 | #endif | ||
128 | pte_unmap(pte); | 117 | pte_unmap(pte); |
129 | } while(0); | 118 | } while(0); |
130 | 119 | ||
@@ -441,6 +430,12 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |||
441 | pmd = pmd_offset(pud, addr); | 430 | pmd = pmd_offset(pud, addr); |
442 | pmd_k = pmd_offset(pud_k, addr); | 431 | pmd_k = pmd_offset(pud_k, addr); |
443 | 432 | ||
433 | #ifdef CONFIG_ARM_LPAE | ||
434 | /* | ||
435 | * Only one hardware entry per PMD with LPAE. | ||
436 | */ | ||
437 | index = 0; | ||
438 | #else | ||
444 | /* | 439 | /* |
445 | * On ARM one Linux PGD entry contains two hardware entries (see page | 440 | * On ARM one Linux PGD entry contains two hardware entries (see page |
446 | * tables layout in pgtable.h). We normally guarantee that we always | 441 | * tables layout in pgtable.h). We normally guarantee that we always |
@@ -450,6 +445,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |||
450 | * for the first of pair. | 445 | * for the first of pair. |
451 | */ | 446 | */ |
452 | index = (addr >> SECTION_SHIFT) & 1; | 447 | index = (addr >> SECTION_SHIFT) & 1; |
448 | #endif | ||
453 | if (pmd_none(pmd_k[index])) | 449 | if (pmd_none(pmd_k[index])) |
454 | goto bad_area; | 450 | goto bad_area; |
455 | 451 | ||
@@ -489,55 +485,20 @@ do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
489 | return 1; | 485 | return 1; |
490 | } | 486 | } |
491 | 487 | ||
492 | static struct fsr_info { | 488 | struct fsr_info { |
493 | int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs); | 489 | int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs); |
494 | int sig; | 490 | int sig; |
495 | int code; | 491 | int code; |
496 | const char *name; | 492 | const char *name; |
497 | } fsr_info[] = { | ||
498 | /* | ||
499 | * The following are the standard ARMv3 and ARMv4 aborts. ARMv5 | ||
500 | * defines these to be "precise" aborts. | ||
501 | */ | ||
502 | { do_bad, SIGSEGV, 0, "vector exception" }, | ||
503 | { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, | ||
504 | { do_bad, SIGKILL, 0, "terminal exception" }, | ||
505 | { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, | ||
506 | { do_bad, SIGBUS, 0, "external abort on linefetch" }, | ||
507 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, | ||
508 | { do_bad, SIGBUS, 0, "external abort on linefetch" }, | ||
509 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, | ||
510 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
511 | { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, | ||
512 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
513 | { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, | ||
514 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
515 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, | ||
516 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
517 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, | ||
518 | /* | ||
519 | * The following are "imprecise" aborts, which are signalled by bit | ||
520 | * 10 of the FSR, and may not be recoverable. These are only | ||
521 | * supported if the CPU abort handler supports bit 10. | ||
522 | */ | ||
523 | { do_bad, SIGBUS, 0, "unknown 16" }, | ||
524 | { do_bad, SIGBUS, 0, "unknown 17" }, | ||
525 | { do_bad, SIGBUS, 0, "unknown 18" }, | ||
526 | { do_bad, SIGBUS, 0, "unknown 19" }, | ||
527 | { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */ | ||
528 | { do_bad, SIGBUS, 0, "unknown 21" }, | ||
529 | { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */ | ||
530 | { do_bad, SIGBUS, 0, "unknown 23" }, | ||
531 | { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */ | ||
532 | { do_bad, SIGBUS, 0, "unknown 25" }, | ||
533 | { do_bad, SIGBUS, 0, "unknown 26" }, | ||
534 | { do_bad, SIGBUS, 0, "unknown 27" }, | ||
535 | { do_bad, SIGBUS, 0, "unknown 28" }, | ||
536 | { do_bad, SIGBUS, 0, "unknown 29" }, | ||
537 | { do_bad, SIGBUS, 0, "unknown 30" }, | ||
538 | { do_bad, SIGBUS, 0, "unknown 31" } | ||
539 | }; | 493 | }; |
540 | 494 | ||
495 | /* FSR definition */ | ||
496 | #ifdef CONFIG_ARM_LPAE | ||
497 | #include "fsr-3level.c" | ||
498 | #else | ||
499 | #include "fsr-2level.c" | ||
500 | #endif | ||
501 | |||
541 | void __init | 502 | void __init |
542 | hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), | 503 | hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), |
543 | int sig, int code, const char *name) | 504 | int sig, int code, const char *name) |
@@ -573,42 +534,6 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
573 | arm_notify_die("", regs, &info, fsr, 0); | 534 | arm_notify_die("", regs, &info, fsr, 0); |
574 | } | 535 | } |
575 | 536 | ||
576 | |||
577 | static struct fsr_info ifsr_info[] = { | ||
578 | { do_bad, SIGBUS, 0, "unknown 0" }, | ||
579 | { do_bad, SIGBUS, 0, "unknown 1" }, | ||
580 | { do_bad, SIGBUS, 0, "debug event" }, | ||
581 | { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" }, | ||
582 | { do_bad, SIGBUS, 0, "unknown 4" }, | ||
583 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, | ||
584 | { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" }, | ||
585 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, | ||
586 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
587 | { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, | ||
588 | { do_bad, SIGBUS, 0, "unknown 10" }, | ||
589 | { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, | ||
590 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
591 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, | ||
592 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
593 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, | ||
594 | { do_bad, SIGBUS, 0, "unknown 16" }, | ||
595 | { do_bad, SIGBUS, 0, "unknown 17" }, | ||
596 | { do_bad, SIGBUS, 0, "unknown 18" }, | ||
597 | { do_bad, SIGBUS, 0, "unknown 19" }, | ||
598 | { do_bad, SIGBUS, 0, "unknown 20" }, | ||
599 | { do_bad, SIGBUS, 0, "unknown 21" }, | ||
600 | { do_bad, SIGBUS, 0, "unknown 22" }, | ||
601 | { do_bad, SIGBUS, 0, "unknown 23" }, | ||
602 | { do_bad, SIGBUS, 0, "unknown 24" }, | ||
603 | { do_bad, SIGBUS, 0, "unknown 25" }, | ||
604 | { do_bad, SIGBUS, 0, "unknown 26" }, | ||
605 | { do_bad, SIGBUS, 0, "unknown 27" }, | ||
606 | { do_bad, SIGBUS, 0, "unknown 28" }, | ||
607 | { do_bad, SIGBUS, 0, "unknown 29" }, | ||
608 | { do_bad, SIGBUS, 0, "unknown 30" }, | ||
609 | { do_bad, SIGBUS, 0, "unknown 31" }, | ||
610 | }; | ||
611 | |||
612 | void __init | 537 | void __init |
613 | hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), | 538 | hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), |
614 | int sig, int code, const char *name) | 539 | int sig, int code, const char *name) |
@@ -641,6 +566,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) | |||
641 | arm_notify_die("", regs, &info, ifsr, 0); | 566 | arm_notify_die("", regs, &info, ifsr, 0); |
642 | } | 567 | } |
643 | 568 | ||
569 | #ifndef CONFIG_ARM_LPAE | ||
644 | static int __init exceptions_init(void) | 570 | static int __init exceptions_init(void) |
645 | { | 571 | { |
646 | if (cpu_architecture() >= CPU_ARCH_ARMv6) { | 572 | if (cpu_architecture() >= CPU_ARCH_ARMv6) { |
@@ -663,3 +589,4 @@ static int __init exceptions_init(void) | |||
663 | } | 589 | } |
664 | 590 | ||
665 | arch_initcall(exceptions_init); | 591 | arch_initcall(exceptions_init); |
592 | #endif | ||
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index 49e9e3804de4..cf08bdfbe0d6 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h | |||
@@ -1,3 +1,28 @@ | |||
1 | void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); | 1 | #ifndef __ARCH_ARM_FAULT_H |
2 | #define __ARCH_ARM_FAULT_H | ||
3 | |||
4 | /* | ||
5 | * Fault status register encodings. We steal bit 31 for our own purposes. | ||
6 | */ | ||
7 | #define FSR_LNX_PF (1 << 31) | ||
8 | #define FSR_WRITE (1 << 11) | ||
9 | #define FSR_FS4 (1 << 10) | ||
10 | #define FSR_FS3_0 (15) | ||
11 | #define FSR_FS5_0 (0x3f) | ||
12 | |||
13 | #ifdef CONFIG_ARM_LPAE | ||
14 | static inline int fsr_fs(unsigned int fsr) | ||
15 | { | ||
16 | return fsr & FSR_FS5_0; | ||
17 | } | ||
18 | #else | ||
19 | static inline int fsr_fs(unsigned int fsr) | ||
20 | { | ||
21 | return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; | ||
22 | } | ||
23 | #endif | ||
2 | 24 | ||
25 | void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); | ||
3 | unsigned long search_exception_table(unsigned long addr); | 26 | unsigned long search_exception_table(unsigned long addr); |
27 | |||
28 | #endif /* __ARCH_ARM_FAULT_H */ | ||
diff --git a/arch/arm/mm/fsr-2level.c b/arch/arm/mm/fsr-2level.c new file mode 100644 index 000000000000..18ca74c0f341 --- /dev/null +++ b/arch/arm/mm/fsr-2level.c | |||
@@ -0,0 +1,78 @@ | |||
1 | static struct fsr_info fsr_info[] = { | ||
2 | /* | ||
3 | * The following are the standard ARMv3 and ARMv4 aborts. ARMv5 | ||
4 | * defines these to be "precise" aborts. | ||
5 | */ | ||
6 | { do_bad, SIGSEGV, 0, "vector exception" }, | ||
7 | { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, | ||
8 | { do_bad, SIGKILL, 0, "terminal exception" }, | ||
9 | { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, | ||
10 | { do_bad, SIGBUS, 0, "external abort on linefetch" }, | ||
11 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, | ||
12 | { do_bad, SIGBUS, 0, "external abort on linefetch" }, | ||
13 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, | ||
14 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
15 | { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, | ||
16 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
17 | { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, | ||
18 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
19 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, | ||
20 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
21 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, | ||
22 | /* | ||
23 | * The following are "imprecise" aborts, which are signalled by bit | ||
24 | * 10 of the FSR, and may not be recoverable. These are only | ||
25 | * supported if the CPU abort handler supports bit 10. | ||
26 | */ | ||
27 | { do_bad, SIGBUS, 0, "unknown 16" }, | ||
28 | { do_bad, SIGBUS, 0, "unknown 17" }, | ||
29 | { do_bad, SIGBUS, 0, "unknown 18" }, | ||
30 | { do_bad, SIGBUS, 0, "unknown 19" }, | ||
31 | { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */ | ||
32 | { do_bad, SIGBUS, 0, "unknown 21" }, | ||
33 | { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */ | ||
34 | { do_bad, SIGBUS, 0, "unknown 23" }, | ||
35 | { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */ | ||
36 | { do_bad, SIGBUS, 0, "unknown 25" }, | ||
37 | { do_bad, SIGBUS, 0, "unknown 26" }, | ||
38 | { do_bad, SIGBUS, 0, "unknown 27" }, | ||
39 | { do_bad, SIGBUS, 0, "unknown 28" }, | ||
40 | { do_bad, SIGBUS, 0, "unknown 29" }, | ||
41 | { do_bad, SIGBUS, 0, "unknown 30" }, | ||
42 | { do_bad, SIGBUS, 0, "unknown 31" }, | ||
43 | }; | ||
44 | |||
45 | static struct fsr_info ifsr_info[] = { | ||
46 | { do_bad, SIGBUS, 0, "unknown 0" }, | ||
47 | { do_bad, SIGBUS, 0, "unknown 1" }, | ||
48 | { do_bad, SIGBUS, 0, "debug event" }, | ||
49 | { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" }, | ||
50 | { do_bad, SIGBUS, 0, "unknown 4" }, | ||
51 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, | ||
52 | { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" }, | ||
53 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, | ||
54 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
55 | { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, | ||
56 | { do_bad, SIGBUS, 0, "unknown 10" }, | ||
57 | { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, | ||
58 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
59 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, | ||
60 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
61 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, | ||
62 | { do_bad, SIGBUS, 0, "unknown 16" }, | ||
63 | { do_bad, SIGBUS, 0, "unknown 17" }, | ||
64 | { do_bad, SIGBUS, 0, "unknown 18" }, | ||
65 | { do_bad, SIGBUS, 0, "unknown 19" }, | ||
66 | { do_bad, SIGBUS, 0, "unknown 20" }, | ||
67 | { do_bad, SIGBUS, 0, "unknown 21" }, | ||
68 | { do_bad, SIGBUS, 0, "unknown 22" }, | ||
69 | { do_bad, SIGBUS, 0, "unknown 23" }, | ||
70 | { do_bad, SIGBUS, 0, "unknown 24" }, | ||
71 | { do_bad, SIGBUS, 0, "unknown 25" }, | ||
72 | { do_bad, SIGBUS, 0, "unknown 26" }, | ||
73 | { do_bad, SIGBUS, 0, "unknown 27" }, | ||
74 | { do_bad, SIGBUS, 0, "unknown 28" }, | ||
75 | { do_bad, SIGBUS, 0, "unknown 29" }, | ||
76 | { do_bad, SIGBUS, 0, "unknown 30" }, | ||
77 | { do_bad, SIGBUS, 0, "unknown 31" }, | ||
78 | }; | ||
diff --git a/arch/arm/mm/fsr-3level.c b/arch/arm/mm/fsr-3level.c new file mode 100644 index 000000000000..05a4e9431836 --- /dev/null +++ b/arch/arm/mm/fsr-3level.c | |||
@@ -0,0 +1,68 @@ | |||
1 | static struct fsr_info fsr_info[] = { | ||
2 | { do_bad, SIGBUS, 0, "unknown 0" }, | ||
3 | { do_bad, SIGBUS, 0, "unknown 1" }, | ||
4 | { do_bad, SIGBUS, 0, "unknown 2" }, | ||
5 | { do_bad, SIGBUS, 0, "unknown 3" }, | ||
6 | { do_bad, SIGBUS, 0, "reserved translation fault" }, | ||
7 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, | ||
8 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, | ||
9 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, | ||
10 | { do_bad, SIGBUS, 0, "reserved access flag fault" }, | ||
11 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, | ||
12 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, | ||
13 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, | ||
14 | { do_bad, SIGBUS, 0, "reserved permission fault" }, | ||
15 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, | ||
16 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, | ||
17 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, | ||
18 | { do_bad, SIGBUS, 0, "synchronous external abort" }, | ||
19 | { do_bad, SIGBUS, 0, "asynchronous external abort" }, | ||
20 | { do_bad, SIGBUS, 0, "unknown 18" }, | ||
21 | { do_bad, SIGBUS, 0, "unknown 19" }, | ||
22 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | ||
23 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | ||
24 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | ||
25 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | ||
26 | { do_bad, SIGBUS, 0, "synchronous parity error" }, | ||
27 | { do_bad, SIGBUS, 0, "asynchronous parity error" }, | ||
28 | { do_bad, SIGBUS, 0, "unknown 26" }, | ||
29 | { do_bad, SIGBUS, 0, "unknown 27" }, | ||
30 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | ||
31 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | ||
32 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | ||
33 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | ||
34 | { do_bad, SIGBUS, 0, "unknown 32" }, | ||
35 | { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, | ||
36 | { do_bad, SIGBUS, 0, "debug event" }, | ||
37 | { do_bad, SIGBUS, 0, "unknown 35" }, | ||
38 | { do_bad, SIGBUS, 0, "unknown 36" }, | ||
39 | { do_bad, SIGBUS, 0, "unknown 37" }, | ||
40 | { do_bad, SIGBUS, 0, "unknown 38" }, | ||
41 | { do_bad, SIGBUS, 0, "unknown 39" }, | ||
42 | { do_bad, SIGBUS, 0, "unknown 40" }, | ||
43 | { do_bad, SIGBUS, 0, "unknown 41" }, | ||
44 | { do_bad, SIGBUS, 0, "unknown 42" }, | ||
45 | { do_bad, SIGBUS, 0, "unknown 43" }, | ||
46 | { do_bad, SIGBUS, 0, "unknown 44" }, | ||
47 | { do_bad, SIGBUS, 0, "unknown 45" }, | ||
48 | { do_bad, SIGBUS, 0, "unknown 46" }, | ||
49 | { do_bad, SIGBUS, 0, "unknown 47" }, | ||
50 | { do_bad, SIGBUS, 0, "unknown 48" }, | ||
51 | { do_bad, SIGBUS, 0, "unknown 49" }, | ||
52 | { do_bad, SIGBUS, 0, "unknown 50" }, | ||
53 | { do_bad, SIGBUS, 0, "unknown 51" }, | ||
54 | { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, | ||
55 | { do_bad, SIGBUS, 0, "unknown 53" }, | ||
56 | { do_bad, SIGBUS, 0, "unknown 54" }, | ||
57 | { do_bad, SIGBUS, 0, "unknown 55" }, | ||
58 | { do_bad, SIGBUS, 0, "unknown 56" }, | ||
59 | { do_bad, SIGBUS, 0, "unknown 57" }, | ||
60 | { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, | ||
61 | { do_bad, SIGBUS, 0, "unknown 59" }, | ||
62 | { do_bad, SIGBUS, 0, "unknown 60" }, | ||
63 | { do_bad, SIGBUS, 0, "unknown 61" }, | ||
64 | { do_bad, SIGBUS, 0, "unknown 62" }, | ||
65 | { do_bad, SIGBUS, 0, "unknown 63" }, | ||
66 | }; | ||
67 | |||
68 | #define ifsr_info fsr_info | ||
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 660f1bc68f99..feacf4c76712 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c | |||
@@ -8,6 +8,31 @@ | |||
8 | 8 | ||
9 | pgd_t *idmap_pgd; | 9 | pgd_t *idmap_pgd; |
10 | 10 | ||
11 | #ifdef CONFIG_ARM_LPAE | ||
12 | static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, | ||
13 | unsigned long prot) | ||
14 | { | ||
15 | pmd_t *pmd; | ||
16 | unsigned long next; | ||
17 | |||
18 | if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) { | ||
19 | pmd = pmd_alloc_one(&init_mm, addr); | ||
20 | if (!pmd) { | ||
21 | pr_warning("Failed to allocate identity pmd.\n"); | ||
22 | return; | ||
23 | } | ||
24 | pud_populate(&init_mm, pud, pmd); | ||
25 | pmd += pmd_index(addr); | ||
26 | } else | ||
27 | pmd = pmd_offset(pud, addr); | ||
28 | |||
29 | do { | ||
30 | next = pmd_addr_end(addr, end); | ||
31 | *pmd = __pmd((addr & PMD_MASK) | prot); | ||
32 | flush_pmd_entry(pmd); | ||
33 | } while (pmd++, addr = next, addr != end); | ||
34 | } | ||
35 | #else /* !CONFIG_ARM_LPAE */ | ||
11 | static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, | 36 | static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, |
12 | unsigned long prot) | 37 | unsigned long prot) |
13 | { | 38 | { |
@@ -19,6 +44,7 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, | |||
19 | pmd[1] = __pmd(addr); | 44 | pmd[1] = __pmd(addr); |
20 | flush_pmd_entry(pmd); | 45 | flush_pmd_entry(pmd); |
21 | } | 46 | } |
47 | #endif /* CONFIG_ARM_LPAE */ | ||
22 | 48 | ||
23 | static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, | 49 | static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, |
24 | unsigned long prot) | 50 | unsigned long prot) |
@@ -36,7 +62,7 @@ static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long e | |||
36 | { | 62 | { |
37 | unsigned long prot, next; | 63 | unsigned long prot, next; |
38 | 64 | ||
39 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; | 65 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; |
40 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | 66 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) |
41 | prot |= PMD_BIT4; | 67 | prot |= PMD_BIT4; |
42 | 68 | ||
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 12c7ad215ce7..80632e8d7538 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -58,7 +58,7 @@ void __check_kvm_seq(struct mm_struct *mm) | |||
58 | } while (seq != init_mm.context.kvm_seq); | 58 | } while (seq != init_mm.context.kvm_seq); |
59 | } | 59 | } |
60 | 60 | ||
61 | #ifndef CONFIG_SMP | 61 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
62 | /* | 62 | /* |
63 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, | 63 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, |
64 | * the other CPUs will not see this change until their next context switch. | 64 | * the other CPUs will not see this change until their next context switch. |
@@ -73,13 +73,16 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) | |||
73 | { | 73 | { |
74 | unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); | 74 | unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); |
75 | pgd_t *pgd; | 75 | pgd_t *pgd; |
76 | pud_t *pud; | ||
77 | pmd_t *pmdp; | ||
76 | 78 | ||
77 | flush_cache_vunmap(addr, end); | 79 | flush_cache_vunmap(addr, end); |
78 | pgd = pgd_offset_k(addr); | 80 | pgd = pgd_offset_k(addr); |
81 | pud = pud_offset(pgd, addr); | ||
82 | pmdp = pmd_offset(pud, addr); | ||
79 | do { | 83 | do { |
80 | pmd_t pmd, *pmdp = pmd_offset(pgd, addr); | 84 | pmd_t pmd = *pmdp; |
81 | 85 | ||
82 | pmd = *pmdp; | ||
83 | if (!pmd_none(pmd)) { | 86 | if (!pmd_none(pmd)) { |
84 | /* | 87 | /* |
85 | * Clear the PMD from the page table, and | 88 | * Clear the PMD from the page table, and |
@@ -98,8 +101,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) | |||
98 | pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); | 101 | pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); |
99 | } | 102 | } |
100 | 103 | ||
101 | addr += PGDIR_SIZE; | 104 | addr += PMD_SIZE; |
102 | pgd++; | 105 | pmdp += 2; |
103 | } while (addr < end); | 106 | } while (addr < end); |
104 | 107 | ||
105 | /* | 108 | /* |
@@ -118,6 +121,8 @@ remap_area_sections(unsigned long virt, unsigned long pfn, | |||
118 | { | 121 | { |
119 | unsigned long addr = virt, end = virt + size; | 122 | unsigned long addr = virt, end = virt + size; |
120 | pgd_t *pgd; | 123 | pgd_t *pgd; |
124 | pud_t *pud; | ||
125 | pmd_t *pmd; | ||
121 | 126 | ||
122 | /* | 127 | /* |
123 | * Remove and free any PTE-based mapping, and | 128 | * Remove and free any PTE-based mapping, and |
@@ -126,17 +131,17 @@ remap_area_sections(unsigned long virt, unsigned long pfn, | |||
126 | unmap_area_sections(virt, size); | 131 | unmap_area_sections(virt, size); |
127 | 132 | ||
128 | pgd = pgd_offset_k(addr); | 133 | pgd = pgd_offset_k(addr); |
134 | pud = pud_offset(pgd, addr); | ||
135 | pmd = pmd_offset(pud, addr); | ||
129 | do { | 136 | do { |
130 | pmd_t *pmd = pmd_offset(pgd, addr); | ||
131 | |||
132 | pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); | 137 | pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
133 | pfn += SZ_1M >> PAGE_SHIFT; | 138 | pfn += SZ_1M >> PAGE_SHIFT; |
134 | pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); | 139 | pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
135 | pfn += SZ_1M >> PAGE_SHIFT; | 140 | pfn += SZ_1M >> PAGE_SHIFT; |
136 | flush_pmd_entry(pmd); | 141 | flush_pmd_entry(pmd); |
137 | 142 | ||
138 | addr += PGDIR_SIZE; | 143 | addr += PMD_SIZE; |
139 | pgd++; | 144 | pmd += 2; |
140 | } while (addr < end); | 145 | } while (addr < end); |
141 | 146 | ||
142 | return 0; | 147 | return 0; |
@@ -148,6 +153,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
148 | { | 153 | { |
149 | unsigned long addr = virt, end = virt + size; | 154 | unsigned long addr = virt, end = virt + size; |
150 | pgd_t *pgd; | 155 | pgd_t *pgd; |
156 | pud_t *pud; | ||
157 | pmd_t *pmd; | ||
151 | 158 | ||
152 | /* | 159 | /* |
153 | * Remove and free any PTE-based mapping, and | 160 | * Remove and free any PTE-based mapping, and |
@@ -156,6 +163,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
156 | unmap_area_sections(virt, size); | 163 | unmap_area_sections(virt, size); |
157 | 164 | ||
158 | pgd = pgd_offset_k(virt); | 165 | pgd = pgd_offset_k(virt); |
166 | pud = pud_offset(pgd, addr); | ||
167 | pmd = pmd_offset(pud, addr); | ||
159 | do { | 168 | do { |
160 | unsigned long super_pmd_val, i; | 169 | unsigned long super_pmd_val, i; |
161 | 170 | ||
@@ -164,14 +173,12 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
164 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; | 173 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; |
165 | 174 | ||
166 | for (i = 0; i < 8; i++) { | 175 | for (i = 0; i < 8; i++) { |
167 | pmd_t *pmd = pmd_offset(pgd, addr); | ||
168 | |||
169 | pmd[0] = __pmd(super_pmd_val); | 176 | pmd[0] = __pmd(super_pmd_val); |
170 | pmd[1] = __pmd(super_pmd_val); | 177 | pmd[1] = __pmd(super_pmd_val); |
171 | flush_pmd_entry(pmd); | 178 | flush_pmd_entry(pmd); |
172 | 179 | ||
173 | addr += PGDIR_SIZE; | 180 | addr += PMD_SIZE; |
174 | pgd++; | 181 | pmd += 2; |
175 | } | 182 | } |
176 | 183 | ||
177 | pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; | 184 | pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; |
@@ -189,11 +196,13 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
189 | unsigned long addr; | 196 | unsigned long addr; |
190 | struct vm_struct * area; | 197 | struct vm_struct * area; |
191 | 198 | ||
199 | #ifndef CONFIG_ARM_LPAE | ||
192 | /* | 200 | /* |
193 | * High mappings must be supersection aligned | 201 | * High mappings must be supersection aligned |
194 | */ | 202 | */ |
195 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | 203 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) |
196 | return NULL; | 204 | return NULL; |
205 | #endif | ||
197 | 206 | ||
198 | type = get_mem_type(mtype); | 207 | type = get_mem_type(mtype); |
199 | if (!type) | 208 | if (!type) |
@@ -237,7 +246,7 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
237 | return NULL; | 246 | return NULL; |
238 | addr = (unsigned long)area->addr; | 247 | addr = (unsigned long)area->addr; |
239 | 248 | ||
240 | #ifndef CONFIG_SMP | 249 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
241 | if (DOMAIN_IO == 0 && | 250 | if (DOMAIN_IO == 0 && |
242 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || | 251 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || |
243 | cpu_is_xsc3()) && pfn >= 0x100000 && | 252 | cpu_is_xsc3()) && pfn >= 0x100000 && |
@@ -343,7 +352,7 @@ void __iounmap(volatile void __iomem *io_addr) | |||
343 | read_unlock(&vmlist_lock); | 352 | read_unlock(&vmlist_lock); |
344 | return; | 353 | return; |
345 | } | 354 | } |
346 | #ifndef CONFIG_SMP | 355 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
347 | /* | 356 | /* |
348 | * If this is a section based mapping we need to handle it | 357 | * If this is a section based mapping we need to handle it |
349 | * specially as the VM subsystem does not know how to handle | 358 | * specially as the VM subsystem does not know how to handle |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 27e366af67f9..94c5a0c94f5e 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -151,6 +151,7 @@ static int __init early_nowrite(char *__unused) | |||
151 | } | 151 | } |
152 | early_param("nowb", early_nowrite); | 152 | early_param("nowb", early_nowrite); |
153 | 153 | ||
154 | #ifndef CONFIG_ARM_LPAE | ||
154 | static int __init early_ecc(char *p) | 155 | static int __init early_ecc(char *p) |
155 | { | 156 | { |
156 | if (memcmp(p, "on", 2) == 0) | 157 | if (memcmp(p, "on", 2) == 0) |
@@ -160,6 +161,7 @@ static int __init early_ecc(char *p) | |||
160 | return 0; | 161 | return 0; |
161 | } | 162 | } |
162 | early_param("ecc", early_ecc); | 163 | early_param("ecc", early_ecc); |
164 | #endif | ||
163 | 165 | ||
164 | static int __init noalign_setup(char *__unused) | 166 | static int __init noalign_setup(char *__unused) |
165 | { | 167 | { |
@@ -229,10 +231,12 @@ static struct mem_type mem_types[] = { | |||
229 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | 231 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, |
230 | .domain = DOMAIN_KERNEL, | 232 | .domain = DOMAIN_KERNEL, |
231 | }, | 233 | }, |
234 | #ifndef CONFIG_ARM_LPAE | ||
232 | [MT_MINICLEAN] = { | 235 | [MT_MINICLEAN] = { |
233 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, | 236 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, |
234 | .domain = DOMAIN_KERNEL, | 237 | .domain = DOMAIN_KERNEL, |
235 | }, | 238 | }, |
239 | #endif | ||
236 | [MT_LOW_VECTORS] = { | 240 | [MT_LOW_VECTORS] = { |
237 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 241 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
238 | L_PTE_RDONLY, | 242 | L_PTE_RDONLY, |
@@ -430,6 +434,7 @@ static void __init build_mem_type_table(void) | |||
430 | * ARMv6 and above have extended page tables. | 434 | * ARMv6 and above have extended page tables. |
431 | */ | 435 | */ |
432 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { | 436 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { |
437 | #ifndef CONFIG_ARM_LPAE | ||
433 | /* | 438 | /* |
434 | * Mark cache clean areas and XIP ROM read only | 439 | * Mark cache clean areas and XIP ROM read only |
435 | * from SVC mode and no access from userspace. | 440 | * from SVC mode and no access from userspace. |
@@ -437,6 +442,7 @@ static void __init build_mem_type_table(void) | |||
437 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 442 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
438 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 443 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
439 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 444 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
445 | #endif | ||
440 | 446 | ||
441 | if (is_smp()) { | 447 | if (is_smp()) { |
442 | /* | 448 | /* |
@@ -475,6 +481,18 @@ static void __init build_mem_type_table(void) | |||
475 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; | 481 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; |
476 | } | 482 | } |
477 | 483 | ||
484 | #ifdef CONFIG_ARM_LPAE | ||
485 | /* | ||
486 | * Do not generate access flag faults for the kernel mappings. | ||
487 | */ | ||
488 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { | ||
489 | mem_types[i].prot_pte |= PTE_EXT_AF; | ||
490 | mem_types[i].prot_sect |= PMD_SECT_AF; | ||
491 | } | ||
492 | kern_pgprot |= PTE_EXT_AF; | ||
493 | vecs_pgprot |= PTE_EXT_AF; | ||
494 | #endif | ||
495 | |||
478 | for (i = 0; i < 16; i++) { | 496 | for (i = 0; i < 16; i++) { |
479 | unsigned long v = pgprot_val(protection_map[i]); | 497 | unsigned long v = pgprot_val(protection_map[i]); |
480 | protection_map[i] = __pgprot(v | user_pgprot); | 498 | protection_map[i] = __pgprot(v | user_pgprot); |
@@ -578,8 +596,10 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, | |||
578 | if (((addr | end | phys) & ~SECTION_MASK) == 0) { | 596 | if (((addr | end | phys) & ~SECTION_MASK) == 0) { |
579 | pmd_t *p = pmd; | 597 | pmd_t *p = pmd; |
580 | 598 | ||
599 | #ifndef CONFIG_ARM_LPAE | ||
581 | if (addr & SECTION_SIZE) | 600 | if (addr & SECTION_SIZE) |
582 | pmd++; | 601 | pmd++; |
602 | #endif | ||
583 | 603 | ||
584 | do { | 604 | do { |
585 | *pmd = __pmd(phys | type->prot_sect); | 605 | *pmd = __pmd(phys | type->prot_sect); |
@@ -609,6 +629,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
609 | } while (pud++, addr = next, addr != end); | 629 | } while (pud++, addr = next, addr != end); |
610 | } | 630 | } |
611 | 631 | ||
632 | #ifndef CONFIG_ARM_LPAE | ||
612 | static void __init create_36bit_mapping(struct map_desc *md, | 633 | static void __init create_36bit_mapping(struct map_desc *md, |
613 | const struct mem_type *type) | 634 | const struct mem_type *type) |
614 | { | 635 | { |
@@ -668,6 +689,7 @@ static void __init create_36bit_mapping(struct map_desc *md, | |||
668 | pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; | 689 | pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; |
669 | } while (addr != end); | 690 | } while (addr != end); |
670 | } | 691 | } |
692 | #endif /* !CONFIG_ARM_LPAE */ | ||
671 | 693 | ||
672 | /* | 694 | /* |
673 | * Create the page directory entries and any necessary | 695 | * Create the page directory entries and any necessary |
@@ -700,6 +722,7 @@ static void __init create_mapping(struct map_desc *md) | |||
700 | 722 | ||
701 | type = &mem_types[md->type]; | 723 | type = &mem_types[md->type]; |
702 | 724 | ||
725 | #ifndef CONFIG_ARM_LPAE | ||
703 | /* | 726 | /* |
704 | * Catch 36-bit addresses | 727 | * Catch 36-bit addresses |
705 | */ | 728 | */ |
@@ -707,6 +730,7 @@ static void __init create_mapping(struct map_desc *md) | |||
707 | create_36bit_mapping(md, type); | 730 | create_36bit_mapping(md, type); |
708 | return; | 731 | return; |
709 | } | 732 | } |
733 | #endif | ||
710 | 734 | ||
711 | addr = md->virtual & PAGE_MASK; | 735 | addr = md->virtual & PAGE_MASK; |
712 | phys = __pfn_to_phys(md->pfn); | 736 | phys = __pfn_to_phys(md->pfn); |
@@ -797,6 +821,9 @@ void __init sanity_check_meminfo(void) | |||
797 | struct membank *bank = &meminfo.bank[j]; | 821 | struct membank *bank = &meminfo.bank[j]; |
798 | *bank = meminfo.bank[i]; | 822 | *bank = meminfo.bank[i]; |
799 | 823 | ||
824 | if (bank->start > ULONG_MAX) | ||
825 | highmem = 1; | ||
826 | |||
800 | #ifdef CONFIG_HIGHMEM | 827 | #ifdef CONFIG_HIGHMEM |
801 | if (__va(bank->start) >= vmalloc_min || | 828 | if (__va(bank->start) >= vmalloc_min || |
802 | __va(bank->start) < (void *)PAGE_OFFSET) | 829 | __va(bank->start) < (void *)PAGE_OFFSET) |
@@ -808,7 +835,7 @@ void __init sanity_check_meminfo(void) | |||
808 | * Split those memory banks which are partially overlapping | 835 | * Split those memory banks which are partially overlapping |
809 | * the vmalloc area greatly simplifying things later. | 836 | * the vmalloc area greatly simplifying things later. |
810 | */ | 837 | */ |
811 | if (__va(bank->start) < vmalloc_min && | 838 | if (!highmem && __va(bank->start) < vmalloc_min && |
812 | bank->size > vmalloc_min - __va(bank->start)) { | 839 | bank->size > vmalloc_min - __va(bank->start)) { |
813 | if (meminfo.nr_banks >= NR_BANKS) { | 840 | if (meminfo.nr_banks >= NR_BANKS) { |
814 | printk(KERN_CRIT "NR_BANKS too low, " | 841 | printk(KERN_CRIT "NR_BANKS too low, " |
@@ -829,6 +856,17 @@ void __init sanity_check_meminfo(void) | |||
829 | bank->highmem = highmem; | 856 | bank->highmem = highmem; |
830 | 857 | ||
831 | /* | 858 | /* |
859 | * Highmem banks not allowed with !CONFIG_HIGHMEM. | ||
860 | */ | ||
861 | if (highmem) { | ||
862 | printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " | ||
863 | "(!CONFIG_HIGHMEM).\n", | ||
864 | (unsigned long long)bank->start, | ||
865 | (unsigned long long)bank->start + bank->size - 1); | ||
866 | continue; | ||
867 | } | ||
868 | |||
869 | /* | ||
832 | * Check whether this memory bank would entirely overlap | 870 | * Check whether this memory bank would entirely overlap |
833 | * the vmalloc area. | 871 | * the vmalloc area. |
834 | */ | 872 | */ |
@@ -920,7 +958,13 @@ static inline void prepare_page_table(void) | |||
920 | pmd_clear(pmd_off_k(addr)); | 958 | pmd_clear(pmd_off_k(addr)); |
921 | } | 959 | } |
922 | 960 | ||
961 | #ifdef CONFIG_ARM_LPAE | ||
962 | /* the first page is reserved for pgd */ | ||
963 | #define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \ | ||
964 | PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t)) | ||
965 | #else | ||
923 | #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) | 966 | #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) |
967 | #endif | ||
924 | 968 | ||
925 | /* | 969 | /* |
926 | * Reserve the special regions of memory | 970 | * Reserve the special regions of memory |
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index b2027c154b2a..a3e78ccabd65 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/gfp.h> | 11 | #include <linux/gfp.h> |
12 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
13 | #include <linux/slab.h> | ||
13 | 14 | ||
14 | #include <asm/pgalloc.h> | 15 | #include <asm/pgalloc.h> |
15 | #include <asm/page.h> | 16 | #include <asm/page.h> |
@@ -17,6 +18,14 @@ | |||
17 | 18 | ||
18 | #include "mm.h" | 19 | #include "mm.h" |
19 | 20 | ||
21 | #ifdef CONFIG_ARM_LPAE | ||
22 | #define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL) | ||
23 | #define __pgd_free(pgd) kfree(pgd) | ||
24 | #else | ||
25 | #define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2) | ||
26 | #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) | ||
27 | #endif | ||
28 | |||
20 | /* | 29 | /* |
21 | * need to get a 16k page for level 1 | 30 | * need to get a 16k page for level 1 |
22 | */ | 31 | */ |
@@ -27,7 +36,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
27 | pmd_t *new_pmd, *init_pmd; | 36 | pmd_t *new_pmd, *init_pmd; |
28 | pte_t *new_pte, *init_pte; | 37 | pte_t *new_pte, *init_pte; |
29 | 38 | ||
30 | new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); | 39 | new_pgd = __pgd_alloc(); |
31 | if (!new_pgd) | 40 | if (!new_pgd) |
32 | goto no_pgd; | 41 | goto no_pgd; |
33 | 42 | ||
@@ -42,10 +51,25 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
42 | 51 | ||
43 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | 52 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); |
44 | 53 | ||
54 | #ifdef CONFIG_ARM_LPAE | ||
55 | /* | ||
56 | * Allocate PMD table for modules and pkmap mappings. | ||
57 | */ | ||
58 | new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), | ||
59 | MODULES_VADDR); | ||
60 | if (!new_pud) | ||
61 | goto no_pud; | ||
62 | |||
63 | new_pmd = pmd_alloc(mm, new_pud, 0); | ||
64 | if (!new_pmd) | ||
65 | goto no_pmd; | ||
66 | #endif | ||
67 | |||
45 | if (!vectors_high()) { | 68 | if (!vectors_high()) { |
46 | /* | 69 | /* |
47 | * On ARM, first page must always be allocated since it | 70 | * On ARM, first page must always be allocated since it |
48 | * contains the machine vectors. | 71 | * contains the machine vectors. The vectors are always high |
72 | * with LPAE. | ||
49 | */ | 73 | */ |
50 | new_pud = pud_alloc(mm, new_pgd, 0); | 74 | new_pud = pud_alloc(mm, new_pgd, 0); |
51 | if (!new_pud) | 75 | if (!new_pud) |
@@ -74,7 +98,7 @@ no_pte: | |||
74 | no_pmd: | 98 | no_pmd: |
75 | pud_free(mm, new_pud); | 99 | pud_free(mm, new_pud); |
76 | no_pud: | 100 | no_pud: |
77 | free_pages((unsigned long)new_pgd, 2); | 101 | __pgd_free(new_pgd); |
78 | no_pgd: | 102 | no_pgd: |
79 | return NULL; | 103 | return NULL; |
80 | } | 104 | } |
@@ -111,5 +135,24 @@ no_pud: | |||
111 | pgd_clear(pgd); | 135 | pgd_clear(pgd); |
112 | pud_free(mm, pud); | 136 | pud_free(mm, pud); |
113 | no_pgd: | 137 | no_pgd: |
114 | free_pages((unsigned long) pgd_base, 2); | 138 | #ifdef CONFIG_ARM_LPAE |
139 | /* | ||
140 | * Free modules/pkmap or identity pmd tables. | ||
141 | */ | ||
142 | for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { | ||
143 | if (pgd_none_or_clear_bad(pgd)) | ||
144 | continue; | ||
145 | if (pgd_val(*pgd) & L_PGD_SWAPPER) | ||
146 | continue; | ||
147 | pud = pud_offset(pgd, 0); | ||
148 | if (pud_none_or_clear_bad(pud)) | ||
149 | continue; | ||
150 | pmd = pmd_offset(pud, 0); | ||
151 | pud_clear(pud); | ||
152 | pmd_free(mm, pmd); | ||
153 | pgd_clear(pgd); | ||
154 | pud_free(mm, pud); | ||
155 | } | ||
156 | #endif | ||
157 | __pgd_free(pgd_base); | ||
115 | } | 158 | } |
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 307a4def8d3a..2d8ff3ad86d3 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S | |||
@@ -91,8 +91,9 @@ | |||
91 | #if L_PTE_SHARED != PTE_EXT_SHARED | 91 | #if L_PTE_SHARED != PTE_EXT_SHARED |
92 | #error PTE shared bit mismatch | 92 | #error PTE shared bit mismatch |
93 | #endif | 93 | #endif |
94 | #if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\ | 94 | #if !defined (CONFIG_ARM_LPAE) && \ |
95 | L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED | 95 | (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\ |
96 | L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED | ||
96 | #error Invalid Linux PTE bit settings | 97 | #error Invalid Linux PTE bit settings |
97 | #endif | 98 | #endif |
98 | #endif /* CONFIG_MMU */ | 99 | #endif /* CONFIG_MMU */ |
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S new file mode 100644 index 000000000000..3a4b3e7b888c --- /dev/null +++ b/arch/arm/mm/proc-v7-2level.S | |||
@@ -0,0 +1,171 @@ | |||
1 | /* | ||
2 | * arch/arm/mm/proc-v7-2level.S | ||
3 | * | ||
4 | * Copyright (C) 2001 Deep Blue Solutions Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #define TTB_S (1 << 1) | ||
12 | #define TTB_RGN_NC (0 << 3) | ||
13 | #define TTB_RGN_OC_WBWA (1 << 3) | ||
14 | #define TTB_RGN_OC_WT (2 << 3) | ||
15 | #define TTB_RGN_OC_WB (3 << 3) | ||
16 | #define TTB_NOS (1 << 5) | ||
17 | #define TTB_IRGN_NC ((0 << 0) | (0 << 6)) | ||
18 | #define TTB_IRGN_WBWA ((0 << 0) | (1 << 6)) | ||
19 | #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) | ||
20 | #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) | ||
21 | |||
22 | /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ | ||
23 | #define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB | ||
24 | #define PMD_FLAGS_UP PMD_SECT_WB | ||
25 | |||
26 | /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ | ||
27 | #define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA | ||
28 | #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S | ||
29 | |||
30 | /* | ||
31 | * cpu_v7_switch_mm(pgd_phys, tsk) | ||
32 | * | ||
33 | * Set the translation table base pointer to be pgd_phys | ||
34 | * | ||
35 | * - pgd_phys - physical address of new TTB | ||
36 | * | ||
37 | * It is assumed that: | ||
38 | * - we are not using split page tables | ||
39 | */ | ||
40 | ENTRY(cpu_v7_switch_mm) | ||
41 | #ifdef CONFIG_MMU | ||
42 | mov r2, #0 | ||
43 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | ||
44 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) | ||
45 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) | ||
46 | #ifdef CONFIG_ARM_ERRATA_430973 | ||
47 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | ||
48 | #endif | ||
49 | #ifdef CONFIG_ARM_ERRATA_754322 | ||
50 | dsb | ||
51 | #endif | ||
52 | mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID | ||
53 | isb | ||
54 | 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | ||
55 | isb | ||
56 | #ifdef CONFIG_ARM_ERRATA_754322 | ||
57 | dsb | ||
58 | #endif | ||
59 | mcr p15, 0, r1, c13, c0, 1 @ set context ID | ||
60 | isb | ||
61 | #endif | ||
62 | mov pc, lr | ||
63 | ENDPROC(cpu_v7_switch_mm) | ||
64 | |||
65 | /* | ||
66 | * cpu_v7_set_pte_ext(ptep, pte) | ||
67 | * | ||
68 | * Set a level 2 translation table entry. | ||
69 | * | ||
70 | * - ptep - pointer to level 2 translation table entry | ||
71 | * (hardware version is stored at +2048 bytes) | ||
72 | * - pte - PTE value to store | ||
73 | * - ext - value for extended PTE bits | ||
74 | */ | ||
75 | ENTRY(cpu_v7_set_pte_ext) | ||
76 | #ifdef CONFIG_MMU | ||
77 | str r1, [r0] @ linux version | ||
78 | |||
79 | bic r3, r1, #0x000003f0 | ||
80 | bic r3, r3, #PTE_TYPE_MASK | ||
81 | orr r3, r3, r2 | ||
82 | orr r3, r3, #PTE_EXT_AP0 | 2 | ||
83 | |||
84 | tst r1, #1 << 4 | ||
85 | orrne r3, r3, #PTE_EXT_TEX(1) | ||
86 | |||
87 | eor r1, r1, #L_PTE_DIRTY | ||
88 | tst r1, #L_PTE_RDONLY | L_PTE_DIRTY | ||
89 | orrne r3, r3, #PTE_EXT_APX | ||
90 | |||
91 | tst r1, #L_PTE_USER | ||
92 | orrne r3, r3, #PTE_EXT_AP1 | ||
93 | #ifdef CONFIG_CPU_USE_DOMAINS | ||
94 | @ allow kernel read/write access to read-only user pages | ||
95 | tstne r3, #PTE_EXT_APX | ||
96 | bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 | ||
97 | #endif | ||
98 | |||
99 | tst r1, #L_PTE_XN | ||
100 | orrne r3, r3, #PTE_EXT_XN | ||
101 | |||
102 | tst r1, #L_PTE_YOUNG | ||
103 | tstne r1, #L_PTE_PRESENT | ||
104 | moveq r3, #0 | ||
105 | |||
106 | ARM( str r3, [r0, #2048]! ) | ||
107 | THUMB( add r0, r0, #2048 ) | ||
108 | THUMB( str r3, [r0] ) | ||
109 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | ||
110 | #endif | ||
111 | mov pc, lr | ||
112 | ENDPROC(cpu_v7_set_pte_ext) | ||
113 | |||
114 | /* | ||
115 | * Memory region attributes with SCTLR.TRE=1 | ||
116 | * | ||
117 | * n = TEX[0],C,B | ||
118 | * TR = PRRR[2n+1:2n] - memory type | ||
119 | * IR = NMRR[2n+1:2n] - inner cacheable property | ||
120 | * OR = NMRR[2n+17:2n+16] - outer cacheable property | ||
121 | * | ||
122 | * n TR IR OR | ||
123 | * UNCACHED 000 00 | ||
124 | * BUFFERABLE 001 10 00 00 | ||
125 | * WRITETHROUGH 010 10 10 10 | ||
126 | * WRITEBACK 011 10 11 11 | ||
127 | * reserved 110 | ||
128 | * WRITEALLOC 111 10 01 01 | ||
129 | * DEV_SHARED 100 01 | ||
130 | * DEV_NONSHARED 100 01 | ||
131 | * DEV_WC 001 10 | ||
132 | * DEV_CACHED 011 10 | ||
133 | * | ||
134 | * Other attributes: | ||
135 | * | ||
136 | * DS0 = PRRR[16] = 0 - device shareable property | ||
137 | * DS1 = PRRR[17] = 1 - device shareable property | ||
138 | * NS0 = PRRR[18] = 0 - normal shareable property | ||
139 | * NS1 = PRRR[19] = 1 - normal shareable property | ||
140 | * NOS = PRRR[24+n] = 1 - not outer shareable | ||
141 | */ | ||
142 | .equ PRRR, 0xff0a81a8 | ||
143 | .equ NMRR, 0x40e040e0 | ||
144 | |||
145 | /* | ||
146 | * Macro for setting up the TTBRx and TTBCR registers. | ||
147 | * - \ttb0 and \ttb1 updated with the corresponding flags. | ||
148 | */ | ||
149 | .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp | ||
150 | mcr p15, 0, \zero, c2, c0, 2 @ TTB control register | ||
151 | ALT_SMP(orr \ttbr0, \ttbr0, #TTB_FLAGS_SMP) | ||
152 | ALT_UP(orr \ttbr0, \ttbr0, #TTB_FLAGS_UP) | ||
153 | ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP) | ||
154 | ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP) | ||
155 | mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1 | ||
156 | .endm | ||
157 | |||
158 | __CPUINIT | ||
159 | |||
160 | /* AT | ||
161 | * TFR EV X F I D LR S | ||
162 | * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM | ||
163 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced | ||
164 | * 1 0 110 0011 1100 .111 1101 < we want | ||
165 | */ | ||
166 | .align 2 | ||
167 | .type v7_crval, #object | ||
168 | v7_crval: | ||
169 | crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c | ||
170 | |||
171 | .previous | ||
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S new file mode 100644 index 000000000000..8de0f1dd1549 --- /dev/null +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * arch/arm/mm/proc-v7-3level.S | ||
3 | * | ||
4 | * Copyright (C) 2001 Deep Blue Solutions Ltd. | ||
5 | * Copyright (C) 2011 ARM Ltd. | ||
6 | * Author: Catalin Marinas <catalin.marinas@arm.com> | ||
7 | * based on arch/arm/mm/proc-v7-2level.S | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 | */ | ||
22 | |||
23 | #define TTB_IRGN_NC (0 << 8) | ||
24 | #define TTB_IRGN_WBWA (1 << 8) | ||
25 | #define TTB_IRGN_WT (2 << 8) | ||
26 | #define TTB_IRGN_WB (3 << 8) | ||
27 | #define TTB_RGN_NC (0 << 10) | ||
28 | #define TTB_RGN_OC_WBWA (1 << 10) | ||
29 | #define TTB_RGN_OC_WT (2 << 10) | ||
30 | #define TTB_RGN_OC_WB (3 << 10) | ||
31 | #define TTB_S (3 << 12) | ||
32 | #define TTB_EAE (1 << 31) | ||
33 | |||
34 | /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ | ||
35 | #define TTB_FLAGS_UP (TTB_IRGN_WB|TTB_RGN_OC_WB) | ||
36 | #define PMD_FLAGS_UP (PMD_SECT_WB) | ||
37 | |||
38 | /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ | ||
39 | #define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) | ||
40 | #define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) | ||
41 | |||
42 | /* | ||
43 | * cpu_v7_switch_mm(pgd_phys, tsk) | ||
44 | * | ||
45 | * Set the translation table base pointer to be pgd_phys (physical address of | ||
46 | * the new TTB). | ||
47 | */ | ||
48 | ENTRY(cpu_v7_switch_mm) | ||
49 | #ifdef CONFIG_MMU | ||
50 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | ||
51 | and r3, r1, #0xff | ||
52 | mov r3, r3, lsl #(48 - 32) @ ASID | ||
53 | mcrr p15, 0, r0, r3, c2 @ set TTB 0 | ||
54 | isb | ||
55 | #endif | ||
56 | mov pc, lr | ||
57 | ENDPROC(cpu_v7_switch_mm) | ||
58 | |||
59 | /* | ||
60 | * cpu_v7_set_pte_ext(ptep, pte) | ||
61 | * | ||
62 | * Set a level 2 translation table entry. | ||
63 | * - ptep - pointer to level 3 translation table entry | ||
64 | * - pte - PTE value to store (64-bit in r2 and r3) | ||
65 | */ | ||
66 | ENTRY(cpu_v7_set_pte_ext) | ||
67 | #ifdef CONFIG_MMU | ||
68 | tst r2, #L_PTE_PRESENT | ||
69 | beq 1f | ||
70 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY | ||
71 | orreq r2, #L_PTE_RDONLY | ||
72 | 1: strd r2, r3, [r0] | ||
73 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | ||
74 | #endif | ||
75 | mov pc, lr | ||
76 | ENDPROC(cpu_v7_set_pte_ext) | ||
77 | |||
78 | /* | ||
79 | * Memory region attributes for LPAE (defined in pgtable-3level.h): | ||
80 | * | ||
81 | * n = AttrIndx[2:0] | ||
82 | * | ||
83 | * n MAIR | ||
84 | * UNCACHED 000 00000000 | ||
85 | * BUFFERABLE 001 01000100 | ||
86 | * DEV_WC 001 01000100 | ||
87 | * WRITETHROUGH 010 10101010 | ||
88 | * WRITEBACK 011 11101110 | ||
89 | * DEV_CACHED 011 11101110 | ||
90 | * DEV_SHARED 100 00000100 | ||
91 | * DEV_NONSHARED 100 00000100 | ||
92 | * unused 101 | ||
93 | * unused 110 | ||
94 | * WRITEALLOC 111 11111111 | ||
95 | */ | ||
96 | .equ PRRR, 0xeeaa4400 @ MAIR0 | ||
97 | .equ NMRR, 0xff000004 @ MAIR1 | ||
98 | |||
99 | /* | ||
100 | * Macro for setting up the TTBRx and TTBCR registers. | ||
101 | * - \ttbr1 updated. | ||
102 | */ | ||
103 | .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp | ||
104 | ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address | ||
105 | cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below) | ||
106 | mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register | ||
107 | orr \tmp, \tmp, #TTB_EAE | ||
108 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) | ||
109 | ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP) | ||
110 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) | ||
111 | ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) | ||
112 | /* | ||
113 | * TTBR0/TTBR1 split (PAGE_OFFSET): | ||
114 | * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) | ||
115 | * 0x80000000: T0SZ = 0, T1SZ = 1 | ||
116 | * 0xc0000000: T0SZ = 0, T1SZ = 2 | ||
117 | * | ||
118 | * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise | ||
119 | * booting secondary CPUs would end up using TTBR1 for the identity | ||
120 | * mapping set up in TTBR0. | ||
121 | */ | ||
122 | bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET? | ||
123 | orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ | ||
124 | #if defined CONFIG_VMSPLIT_2G | ||
125 | /* PAGE_OFFSET == 0x80000000, T1SZ == 1 */ | ||
126 | add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries | ||
127 | #elif defined CONFIG_VMSPLIT_3G | ||
128 | /* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */ | ||
129 | add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd | ||
130 | #endif | ||
131 | /* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */ | ||
132 | 9001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register | ||
133 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 | ||
134 | .endm | ||
135 | |||
136 | __CPUINIT | ||
137 | |||
138 | /* | ||
139 | * AT | ||
140 | * TFR EV X F IHD LR S | ||
141 | * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM | ||
142 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced | ||
143 | * 11 0 110 1 0011 1100 .111 1101 < we want | ||
144 | */ | ||
145 | .align 2 | ||
146 | .type v7_crval, #object | ||
147 | v7_crval: | ||
148 | crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c | ||
149 | |||
150 | .previous | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 66a185f018a0..7efa2a721d5d 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -19,24 +19,11 @@ | |||
19 | 19 | ||
20 | #include "proc-macros.S" | 20 | #include "proc-macros.S" |
21 | 21 | ||
22 | #define TTB_S (1 << 1) | 22 | #ifdef CONFIG_ARM_LPAE |
23 | #define TTB_RGN_NC (0 << 3) | 23 | #include "proc-v7-3level.S" |
24 | #define TTB_RGN_OC_WBWA (1 << 3) | 24 | #else |
25 | #define TTB_RGN_OC_WT (2 << 3) | 25 | #include "proc-v7-2level.S" |
26 | #define TTB_RGN_OC_WB (3 << 3) | 26 | #endif |
27 | #define TTB_NOS (1 << 5) | ||
28 | #define TTB_IRGN_NC ((0 << 0) | (0 << 6)) | ||
29 | #define TTB_IRGN_WBWA ((0 << 0) | (1 << 6)) | ||
30 | #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) | ||
31 | #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) | ||
32 | |||
33 | /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ | ||
34 | #define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB | ||
35 | #define PMD_FLAGS_UP PMD_SECT_WB | ||
36 | |||
37 | /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ | ||
38 | #define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA | ||
39 | #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S | ||
40 | 27 | ||
41 | ENTRY(cpu_v7_proc_init) | 28 | ENTRY(cpu_v7_proc_init) |
42 | mov pc, lr | 29 | mov pc, lr |
@@ -99,127 +86,12 @@ ENTRY(cpu_v7_dcache_clean_area) | |||
99 | mov pc, lr | 86 | mov pc, lr |
100 | ENDPROC(cpu_v7_dcache_clean_area) | 87 | ENDPROC(cpu_v7_dcache_clean_area) |
101 | 88 | ||
102 | /* | ||
103 | * cpu_v7_switch_mm(pgd_phys, tsk) | ||
104 | * | ||
105 | * Set the translation table base pointer to be pgd_phys | ||
106 | * | ||
107 | * - pgd_phys - physical address of new TTB | ||
108 | * | ||
109 | * It is assumed that: | ||
110 | * - we are not using split page tables | ||
111 | */ | ||
112 | ENTRY(cpu_v7_switch_mm) | ||
113 | #ifdef CONFIG_MMU | ||
114 | mov r2, #0 | ||
115 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | ||
116 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) | ||
117 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) | ||
118 | #ifdef CONFIG_ARM_ERRATA_430973 | ||
119 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | ||
120 | #endif | ||
121 | #ifdef CONFIG_ARM_ERRATA_754322 | ||
122 | dsb | ||
123 | #endif | ||
124 | mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID | ||
125 | isb | ||
126 | 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | ||
127 | isb | ||
128 | #ifdef CONFIG_ARM_ERRATA_754322 | ||
129 | dsb | ||
130 | #endif | ||
131 | mcr p15, 0, r1, c13, c0, 1 @ set context ID | ||
132 | isb | ||
133 | #endif | ||
134 | mov pc, lr | ||
135 | ENDPROC(cpu_v7_switch_mm) | ||
136 | |||
137 | /* | ||
138 | * cpu_v7_set_pte_ext(ptep, pte) | ||
139 | * | ||
140 | * Set a level 2 translation table entry. | ||
141 | * | ||
142 | * - ptep - pointer to level 2 translation table entry | ||
143 | * (hardware version is stored at +2048 bytes) | ||
144 | * - pte - PTE value to store | ||
145 | * - ext - value for extended PTE bits | ||
146 | */ | ||
147 | ENTRY(cpu_v7_set_pte_ext) | ||
148 | #ifdef CONFIG_MMU | ||
149 | str r1, [r0] @ linux version | ||
150 | |||
151 | bic r3, r1, #0x000003f0 | ||
152 | bic r3, r3, #PTE_TYPE_MASK | ||
153 | orr r3, r3, r2 | ||
154 | orr r3, r3, #PTE_EXT_AP0 | 2 | ||
155 | |||
156 | tst r1, #1 << 4 | ||
157 | orrne r3, r3, #PTE_EXT_TEX(1) | ||
158 | |||
159 | eor r1, r1, #L_PTE_DIRTY | ||
160 | tst r1, #L_PTE_RDONLY | L_PTE_DIRTY | ||
161 | orrne r3, r3, #PTE_EXT_APX | ||
162 | |||
163 | tst r1, #L_PTE_USER | ||
164 | orrne r3, r3, #PTE_EXT_AP1 | ||
165 | #ifdef CONFIG_CPU_USE_DOMAINS | ||
166 | @ allow kernel read/write access to read-only user pages | ||
167 | tstne r3, #PTE_EXT_APX | ||
168 | bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 | ||
169 | #endif | ||
170 | |||
171 | tst r1, #L_PTE_XN | ||
172 | orrne r3, r3, #PTE_EXT_XN | ||
173 | |||
174 | tst r1, #L_PTE_YOUNG | ||
175 | tstne r1, #L_PTE_PRESENT | ||
176 | moveq r3, #0 | ||
177 | |||
178 | ARM( str r3, [r0, #2048]! ) | ||
179 | THUMB( add r0, r0, #2048 ) | ||
180 | THUMB( str r3, [r0] ) | ||
181 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | ||
182 | #endif | ||
183 | mov pc, lr | ||
184 | ENDPROC(cpu_v7_set_pte_ext) | ||
185 | |||
186 | string cpu_v7_name, "ARMv7 Processor" | 89 | string cpu_v7_name, "ARMv7 Processor" |
187 | .align | 90 | .align |
188 | 91 | ||
189 | /* | ||
190 | * Memory region attributes with SCTLR.TRE=1 | ||
191 | * | ||
192 | * n = TEX[0],C,B | ||
193 | * TR = PRRR[2n+1:2n] - memory type | ||
194 | * IR = NMRR[2n+1:2n] - inner cacheable property | ||
195 | * OR = NMRR[2n+17:2n+16] - outer cacheable property | ||
196 | * | ||
197 | * n TR IR OR | ||
198 | * UNCACHED 000 00 | ||
199 | * BUFFERABLE 001 10 00 00 | ||
200 | * WRITETHROUGH 010 10 10 10 | ||
201 | * WRITEBACK 011 10 11 11 | ||
202 | * reserved 110 | ||
203 | * WRITEALLOC 111 10 01 01 | ||
204 | * DEV_SHARED 100 01 | ||
205 | * DEV_NONSHARED 100 01 | ||
206 | * DEV_WC 001 10 | ||
207 | * DEV_CACHED 011 10 | ||
208 | * | ||
209 | * Other attributes: | ||
210 | * | ||
211 | * DS0 = PRRR[16] = 0 - device shareable property | ||
212 | * DS1 = PRRR[17] = 1 - device shareable property | ||
213 | * NS0 = PRRR[18] = 0 - normal shareable property | ||
214 | * NS1 = PRRR[19] = 1 - normal shareable property | ||
215 | * NOS = PRRR[24+n] = 1 - not outer shareable | ||
216 | */ | ||
217 | .equ PRRR, 0xff0a81a8 | ||
218 | .equ NMRR, 0x40e040e0 | ||
219 | |||
220 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ | 92 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ |
221 | .globl cpu_v7_suspend_size | 93 | .globl cpu_v7_suspend_size |
222 | .equ cpu_v7_suspend_size, 4 * 7 | 94 | .equ cpu_v7_suspend_size, 4 * 8 |
223 | #ifdef CONFIG_ARM_CPU_SUSPEND | 95 | #ifdef CONFIG_ARM_CPU_SUSPEND |
224 | ENTRY(cpu_v7_do_suspend) | 96 | ENTRY(cpu_v7_do_suspend) |
225 | stmfd sp!, {r4 - r10, lr} | 97 | stmfd sp!, {r4 - r10, lr} |
@@ -228,10 +100,11 @@ ENTRY(cpu_v7_do_suspend) | |||
228 | stmia r0!, {r4 - r5} | 100 | stmia r0!, {r4 - r5} |
229 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID | 101 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID |
230 | mrc p15, 0, r7, c2, c0, 1 @ TTB 1 | 102 | mrc p15, 0, r7, c2, c0, 1 @ TTB 1 |
103 | mrc p15, 0, r11, c2, c0, 2 @ TTB control register | ||
231 | mrc p15, 0, r8, c1, c0, 0 @ Control register | 104 | mrc p15, 0, r8, c1, c0, 0 @ Control register |
232 | mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register | 105 | mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register |
233 | mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control | 106 | mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control |
234 | stmia r0, {r6 - r10} | 107 | stmia r0, {r6 - r11} |
235 | ldmfd sp!, {r4 - r10, pc} | 108 | ldmfd sp!, {r4 - r10, pc} |
236 | ENDPROC(cpu_v7_do_suspend) | 109 | ENDPROC(cpu_v7_do_suspend) |
237 | 110 | ||
@@ -243,13 +116,15 @@ ENTRY(cpu_v7_do_resume) | |||
243 | ldmia r0!, {r4 - r5} | 116 | ldmia r0!, {r4 - r5} |
244 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID | 117 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID |
245 | mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID | 118 | mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID |
246 | ldmia r0, {r6 - r10} | 119 | ldmia r0, {r6 - r11} |
247 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID | 120 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID |
121 | #ifndef CONFIG_ARM_LPAE | ||
248 | ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) | 122 | ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) |
249 | ALT_UP(orr r1, r1, #TTB_FLAGS_UP) | 123 | ALT_UP(orr r1, r1, #TTB_FLAGS_UP) |
124 | #endif | ||
250 | mcr p15, 0, r1, c2, c0, 0 @ TTB 0 | 125 | mcr p15, 0, r1, c2, c0, 0 @ TTB 0 |
251 | mcr p15, 0, r7, c2, c0, 1 @ TTB 1 | 126 | mcr p15, 0, r7, c2, c0, 1 @ TTB 1 |
252 | mcr p15, 0, ip, c2, c0, 2 @ TTB control register | 127 | mcr p15, 0, r11, c2, c0, 2 @ TTB control register |
253 | mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register | 128 | mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register |
254 | teq r4, r9 @ Is it already set? | 129 | teq r4, r9 @ Is it already set? |
255 | mcrne p15, 0, r9, c1, c0, 1 @ No, so write it | 130 | mcrne p15, 0, r9, c1, c0, 1 @ No, so write it |
@@ -379,12 +254,7 @@ __v7_setup: | |||
379 | dsb | 254 | dsb |
380 | #ifdef CONFIG_MMU | 255 | #ifdef CONFIG_MMU |
381 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs | 256 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs |
382 | mcr p15, 0, r10, c2, c0, 2 @ TTB control register | 257 | v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup |
383 | ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) | ||
384 | ALT_UP(orr r4, r4, #TTB_FLAGS_UP) | ||
385 | ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP) | ||
386 | ALT_UP(orr r8, r8, #TTB_FLAGS_UP) | ||
387 | mcr p15, 0, r8, c2, c0, 1 @ load TTB1 | ||
388 | ldr r5, =PRRR @ PRRR | 258 | ldr r5, =PRRR @ PRRR |
389 | ldr r6, =NMRR @ NMRR | 259 | ldr r6, =NMRR @ NMRR |
390 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR | 260 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR |
@@ -406,16 +276,7 @@ __v7_setup: | |||
406 | mov pc, lr @ return to head.S:__ret | 276 | mov pc, lr @ return to head.S:__ret |
407 | ENDPROC(__v7_setup) | 277 | ENDPROC(__v7_setup) |
408 | 278 | ||
409 | /* AT | 279 | .align 2 |
410 | * TFR EV X F I D LR S | ||
411 | * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM | ||
412 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced | ||
413 | * 1 0 110 0011 1100 .111 1101 < we want | ||
414 | */ | ||
415 | .type v7_crval, #object | ||
416 | v7_crval: | ||
417 | crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c | ||
418 | |||
419 | __v7_setup_stack: | 280 | __v7_setup_stack: |
420 | .space 4 * 11 @ 11 registers | 281 | .space 4 * 11 @ 11 registers |
421 | 282 | ||
@@ -437,11 +298,11 @@ __v7_setup_stack: | |||
437 | */ | 298 | */ |
438 | .macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 | 299 | .macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 |
439 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ | 300 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ |
440 | PMD_FLAGS_SMP | \mm_mmuflags) | 301 | PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) |
441 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ | 302 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ |
442 | PMD_FLAGS_UP | \mm_mmuflags) | 303 | PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags) |
443 | .long PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \ | 304 | .long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \ |
444 | PMD_SECT_AP_READ | \io_mmuflags | 305 | PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags |
445 | W(b) \initfunc | 306 | W(b) \initfunc |
446 | .long cpu_arch_name | 307 | .long cpu_arch_name |
447 | .long cpu_elf_name | 308 | .long cpu_elf_name |
@@ -454,6 +315,7 @@ __v7_setup_stack: | |||
454 | .long v7_cache_fns | 315 | .long v7_cache_fns |
455 | .endm | 316 | .endm |
456 | 317 | ||
318 | #ifndef CONFIG_ARM_LPAE | ||
457 | /* | 319 | /* |
458 | * ARM Ltd. Cortex A5 processor. | 320 | * ARM Ltd. Cortex A5 processor. |
459 | */ | 321 | */ |
@@ -473,6 +335,7 @@ __v7_ca9mp_proc_info: | |||
473 | .long 0xff0ffff0 | 335 | .long 0xff0ffff0 |
474 | __v7_proc __v7_ca9mp_setup | 336 | __v7_proc __v7_ca9mp_setup |
475 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info | 337 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info |
338 | #endif /* CONFIG_ARM_LPAE */ | ||
476 | 339 | ||
477 | /* | 340 | /* |
478 | * ARM Ltd. Cortex A15 processor. | 341 | * ARM Ltd. Cortex A15 processor. |