diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-06-18 15:11:32 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-06-18 15:11:32 -0400 |
commit | 3fbd55ec21e698221ffb43526090137b07c32586 (patch) | |
tree | 421349dff22226b6b85188a5bf8b0bc6e167dfeb | |
parent | b3f288de7c8add67a3364e989b865b6537838662 (diff) | |
parent | a469abd0f868c902b75532579bf87553dcf1b360 (diff) |
Merge branch 'for-rmk/lpae' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable
Conflicts:
arch/arm/kernel/smp.c
Please pull these miscellaneous LPAE fixes I've been collecting for a while
now for 3.11. They've been tested and reviewed by quite a few people, and most
of the patches are pretty trivial. -- Will Deacon.
-rw-r--r-- | arch/arm/include/asm/memory.h | 18 | ||||
-rw-r--r-- | arch/arm/include/asm/page.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable-3level-hwdef.h | 20 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable-3level.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/proc-fns.h | 26 | ||||
-rw-r--r-- | arch/arm/include/uapi/asm/hwcap.h | 2 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 10 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 8 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 11 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 9 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 19 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 49 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-3level.S | 53 |
13 files changed, 139 insertions, 96 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 57870ab313c5..584786f740f9 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/sizes.h> | 19 | #include <linux/sizes.h> |
20 | 20 | ||
21 | #include <asm/cache.h> | ||
22 | |||
21 | #ifdef CONFIG_NEED_MACH_MEMORY_H | 23 | #ifdef CONFIG_NEED_MACH_MEMORY_H |
22 | #include <mach/memory.h> | 24 | #include <mach/memory.h> |
23 | #endif | 25 | #endif |
@@ -141,6 +143,20 @@ | |||
141 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) | 143 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) |
142 | #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) | 144 | #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) |
143 | 145 | ||
146 | /* | ||
147 | * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed | ||
148 | * around in head.S and proc-*.S are shifted by this amount, in order to | ||
149 | * leave spare high bits for systems with physical address extension. This | ||
150 | * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but | ||
151 | * gives us about 38-bits or so. | ||
152 | */ | ||
153 | #ifdef CONFIG_ARM_LPAE | ||
154 | #define ARCH_PGD_SHIFT L1_CACHE_SHIFT | ||
155 | #else | ||
156 | #define ARCH_PGD_SHIFT 0 | ||
157 | #endif | ||
158 | #define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) | ||
159 | |||
144 | #ifndef __ASSEMBLY__ | 160 | #ifndef __ASSEMBLY__ |
145 | 161 | ||
146 | /* | 162 | /* |
@@ -207,7 +223,7 @@ static inline unsigned long __phys_to_virt(unsigned long x) | |||
207 | * direct-mapped view. We assume this is the first page | 223 | * direct-mapped view. We assume this is the first page |
208 | * of RAM in the mem_map as well. | 224 | * of RAM in the mem_map as well. |
209 | */ | 225 | */ |
210 | #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) | 226 | #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) |
211 | 227 | ||
212 | /* | 228 | /* |
213 | * These are *only* valid on the kernel direct mapped RAM memory. | 229 | * These are *only* valid on the kernel direct mapped RAM memory. |
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 812a4944e783..6363f3d1d505 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
@@ -13,7 +13,7 @@ | |||
13 | /* PAGE_SHIFT determines the page size */ | 13 | /* PAGE_SHIFT determines the page size */ |
14 | #define PAGE_SHIFT 12 | 14 | #define PAGE_SHIFT 12 |
15 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | 15 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
16 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 16 | #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) |
17 | 17 | ||
18 | #ifndef __ASSEMBLY__ | 18 | #ifndef __ASSEMBLY__ |
19 | 19 | ||
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h index f088c864c992..626989fec4d3 100644 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h | |||
@@ -83,4 +83,24 @@ | |||
83 | #define PHYS_MASK_SHIFT (40) | 83 | #define PHYS_MASK_SHIFT (40) |
84 | #define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1) | 84 | #define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1) |
85 | 85 | ||
86 | /* | ||
87 | * TTBR0/TTBR1 split (PAGE_OFFSET): | ||
88 | * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) | ||
89 | * 0x80000000: T0SZ = 0, T1SZ = 1 | ||
90 | * 0xc0000000: T0SZ = 0, T1SZ = 2 | ||
91 | * | ||
92 | * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise | ||
93 | * booting secondary CPUs would end up using TTBR1 for the identity | ||
94 | * mapping set up in TTBR0. | ||
95 | */ | ||
96 | #if defined CONFIG_VMSPLIT_2G | ||
97 | #define TTBR1_OFFSET 16 /* skip two L1 entries */ | ||
98 | #elif defined CONFIG_VMSPLIT_3G | ||
99 | #define TTBR1_OFFSET (4096 * (1 + 3)) /* only L2, skip pgd + 3*pmd */ | ||
100 | #else | ||
101 | #define TTBR1_OFFSET 0 | ||
102 | #endif | ||
103 | |||
104 | #define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16) | ||
105 | |||
86 | #endif | 106 | #endif |
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 54733e5ef7a1..5689c18c85f5 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h | |||
@@ -33,7 +33,7 @@ | |||
33 | #define PTRS_PER_PMD 512 | 33 | #define PTRS_PER_PMD 512 |
34 | #define PTRS_PER_PGD 4 | 34 | #define PTRS_PER_PGD 4 |
35 | 35 | ||
36 | #define PTE_HWTABLE_PTRS (PTRS_PER_PTE) | 36 | #define PTE_HWTABLE_PTRS (0) |
37 | #define PTE_HWTABLE_OFF (0) | 37 | #define PTE_HWTABLE_OFF (0) |
38 | #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) | 38 | #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) |
39 | 39 | ||
@@ -48,16 +48,16 @@ | |||
48 | #define PMD_SHIFT 21 | 48 | #define PMD_SHIFT 21 |
49 | 49 | ||
50 | #define PMD_SIZE (1UL << PMD_SHIFT) | 50 | #define PMD_SIZE (1UL << PMD_SHIFT) |
51 | #define PMD_MASK (~(PMD_SIZE-1)) | 51 | #define PMD_MASK (~((1 << PMD_SHIFT) - 1)) |
52 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 52 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
53 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 53 | #define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1)) |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * section address mask and size definitions. | 56 | * section address mask and size definitions. |
57 | */ | 57 | */ |
58 | #define SECTION_SHIFT 21 | 58 | #define SECTION_SHIFT 21 |
59 | #define SECTION_SIZE (1UL << SECTION_SHIFT) | 59 | #define SECTION_SIZE (1UL << SECTION_SHIFT) |
60 | #define SECTION_MASK (~(SECTION_SIZE-1)) | 60 | #define SECTION_MASK (~((1 << SECTION_SHIFT) - 1)) |
61 | 61 | ||
62 | #define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) | 62 | #define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) |
63 | 63 | ||
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index a6c99fe62b82..5324c1112f3a 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h | |||
@@ -60,7 +60,7 @@ extern struct processor { | |||
60 | /* | 60 | /* |
61 | * Set the page table | 61 | * Set the page table |
62 | */ | 62 | */ |
63 | void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); | 63 | void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm); |
64 | /* | 64 | /* |
65 | * Set a possibly extended PTE. Non-extended PTEs should | 65 | * Set a possibly extended PTE. Non-extended PTEs should |
66 | * ignore 'ext'. | 66 | * ignore 'ext'. |
@@ -82,7 +82,7 @@ extern void cpu_proc_init(void); | |||
82 | extern void cpu_proc_fin(void); | 82 | extern void cpu_proc_fin(void); |
83 | extern int cpu_do_idle(void); | 83 | extern int cpu_do_idle(void); |
84 | extern void cpu_dcache_clean_area(void *, int); | 84 | extern void cpu_dcache_clean_area(void *, int); |
85 | extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); | 85 | extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); |
86 | #ifdef CONFIG_ARM_LPAE | 86 | #ifdef CONFIG_ARM_LPAE |
87 | extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); | 87 | extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); |
88 | #else | 88 | #else |
@@ -116,13 +116,25 @@ extern void cpu_resume(void); | |||
116 | #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) | 116 | #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) |
117 | 117 | ||
118 | #ifdef CONFIG_ARM_LPAE | 118 | #ifdef CONFIG_ARM_LPAE |
119 | |||
120 | #define cpu_get_ttbr(nr) \ | ||
121 | ({ \ | ||
122 | u64 ttbr; \ | ||
123 | __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \ | ||
124 | : "=r" (ttbr)); \ | ||
125 | ttbr; \ | ||
126 | }) | ||
127 | |||
128 | #define cpu_set_ttbr(nr, val) \ | ||
129 | do { \ | ||
130 | u64 ttbr = val; \ | ||
131 | __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \ | ||
132 | : : "r" (ttbr)); \ | ||
133 | } while (0) | ||
134 | |||
119 | #define cpu_get_pgd() \ | 135 | #define cpu_get_pgd() \ |
120 | ({ \ | 136 | ({ \ |
121 | unsigned long pg, pg2; \ | 137 | u64 pg = cpu_get_ttbr(0); \ |
122 | __asm__("mrrc p15, 0, %0, %1, c2" \ | ||
123 | : "=r" (pg), "=r" (pg2) \ | ||
124 | : \ | ||
125 | : "cc"); \ | ||
126 | pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ | 138 | pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ |
127 | (pgd_t *)phys_to_virt(pg); \ | 139 | (pgd_t *)phys_to_virt(pg); \ |
128 | }) | 140 | }) |
diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h index 3688fd15a32d..6d34d080372a 100644 --- a/arch/arm/include/uapi/asm/hwcap.h +++ b/arch/arm/include/uapi/asm/hwcap.h | |||
@@ -25,6 +25,6 @@ | |||
25 | #define HWCAP_IDIVT (1 << 18) | 25 | #define HWCAP_IDIVT (1 << 18) |
26 | #define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ | 26 | #define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ |
27 | #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) | 27 | #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) |
28 | 28 | #define HWCAP_LPAE (1 << 20) | |
29 | 29 | ||
30 | #endif /* _UAPI__ASMARM_HWCAP_H */ | 30 | #endif /* _UAPI__ASMARM_HWCAP_H */ |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 8bac553fe213..45e8935cae4e 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -156,7 +156,7 @@ ENDPROC(stext) | |||
156 | * | 156 | * |
157 | * Returns: | 157 | * Returns: |
158 | * r0, r3, r5-r7 corrupted | 158 | * r0, r3, r5-r7 corrupted |
159 | * r4 = physical page table address | 159 | * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) |
160 | */ | 160 | */ |
161 | __create_page_tables: | 161 | __create_page_tables: |
162 | pgtbl r4, r8 @ page table address | 162 | pgtbl r4, r8 @ page table address |
@@ -331,6 +331,7 @@ __create_page_tables: | |||
331 | #endif | 331 | #endif |
332 | #ifdef CONFIG_ARM_LPAE | 332 | #ifdef CONFIG_ARM_LPAE |
333 | sub r4, r4, #0x1000 @ point to the PGD table | 333 | sub r4, r4, #0x1000 @ point to the PGD table |
334 | mov r4, r4, lsr #ARCH_PGD_SHIFT | ||
334 | #endif | 335 | #endif |
335 | mov pc, lr | 336 | mov pc, lr |
336 | ENDPROC(__create_page_tables) | 337 | ENDPROC(__create_page_tables) |
@@ -408,7 +409,7 @@ __secondary_data: | |||
408 | * r0 = cp#15 control register | 409 | * r0 = cp#15 control register |
409 | * r1 = machine ID | 410 | * r1 = machine ID |
410 | * r2 = atags or dtb pointer | 411 | * r2 = atags or dtb pointer |
411 | * r4 = page table pointer | 412 | * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) |
412 | * r9 = processor ID | 413 | * r9 = processor ID |
413 | * r13 = *virtual* address to jump to upon completion | 414 | * r13 = *virtual* address to jump to upon completion |
414 | */ | 415 | */ |
@@ -427,10 +428,7 @@ __enable_mmu: | |||
427 | #ifdef CONFIG_CPU_ICACHE_DISABLE | 428 | #ifdef CONFIG_CPU_ICACHE_DISABLE |
428 | bic r0, r0, #CR_I | 429 | bic r0, r0, #CR_I |
429 | #endif | 430 | #endif |
430 | #ifdef CONFIG_ARM_LPAE | 431 | #ifndef CONFIG_ARM_LPAE |
431 | mov r5, #0 | ||
432 | mcrr p15, 0, r4, r5, c2 @ load TTBR0 | ||
433 | #else | ||
434 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | 432 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ |
435 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | 433 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ |
436 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | 434 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index a1a2fbaaa31c..ca34224f891f 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -367,7 +367,7 @@ void __init early_print(const char *str, ...) | |||
367 | 367 | ||
368 | static void __init cpuid_init_hwcaps(void) | 368 | static void __init cpuid_init_hwcaps(void) |
369 | { | 369 | { |
370 | unsigned int divide_instrs; | 370 | unsigned int divide_instrs, vmsa; |
371 | 371 | ||
372 | if (cpu_architecture() < CPU_ARCH_ARMv7) | 372 | if (cpu_architecture() < CPU_ARCH_ARMv7) |
373 | return; | 373 | return; |
@@ -380,6 +380,11 @@ static void __init cpuid_init_hwcaps(void) | |||
380 | case 1: | 380 | case 1: |
381 | elf_hwcap |= HWCAP_IDIVT; | 381 | elf_hwcap |= HWCAP_IDIVT; |
382 | } | 382 | } |
383 | |||
384 | /* LPAE implies atomic ldrd/strd instructions */ | ||
385 | vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0; | ||
386 | if (vmsa >= 5) | ||
387 | elf_hwcap |= HWCAP_LPAE; | ||
383 | } | 388 | } |
384 | 389 | ||
385 | static void __init feat_v6_fixup(void) | 390 | static void __init feat_v6_fixup(void) |
@@ -892,6 +897,7 @@ static const char *hwcap_str[] = { | |||
892 | "vfpv4", | 897 | "vfpv4", |
893 | "idiva", | 898 | "idiva", |
894 | "idivt", | 899 | "idivt", |
900 | "lpae", | ||
895 | NULL | 901 | NULL |
896 | }; | 902 | }; |
897 | 903 | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index e17d9346baee..32af17932a7a 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -79,6 +79,13 @@ void __init smp_set_ops(struct smp_operations *ops) | |||
79 | smp_ops = *ops; | 79 | smp_ops = *ops; |
80 | }; | 80 | }; |
81 | 81 | ||
82 | static unsigned long get_arch_pgd(pgd_t *pgd) | ||
83 | { | ||
84 | phys_addr_t pgdir = virt_to_phys(pgd); | ||
85 | BUG_ON(pgdir & ARCH_PGD_MASK); | ||
86 | return pgdir >> ARCH_PGD_SHIFT; | ||
87 | } | ||
88 | |||
82 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | 89 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) |
83 | { | 90 | { |
84 | int ret; | 91 | int ret; |
@@ -93,8 +100,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
93 | #endif | 100 | #endif |
94 | 101 | ||
95 | #ifdef CONFIG_MMU | 102 | #ifdef CONFIG_MMU |
96 | secondary_data.pgdir = virt_to_phys(idmap_pgd); | 103 | secondary_data.pgdir = get_arch_pgd(idmap_pgd); |
97 | secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); | 104 | secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); |
98 | #endif | 105 | #endif |
99 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); | 106 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
100 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); | 107 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 2ac37372ef52..3675e31473e3 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/smp_plat.h> | 20 | #include <asm/smp_plat.h> |
21 | #include <asm/thread_notify.h> | 21 | #include <asm/thread_notify.h> |
22 | #include <asm/tlbflush.h> | 22 | #include <asm/tlbflush.h> |
23 | #include <asm/proc-fns.h> | ||
23 | 24 | ||
24 | /* | 25 | /* |
25 | * On ARMv6, we have the following structure in the Context ID: | 26 | * On ARMv6, we have the following structure in the Context ID: |
@@ -55,17 +56,11 @@ static cpumask_t tlb_flush_pending; | |||
55 | #ifdef CONFIG_ARM_LPAE | 56 | #ifdef CONFIG_ARM_LPAE |
56 | static void cpu_set_reserved_ttbr0(void) | 57 | static void cpu_set_reserved_ttbr0(void) |
57 | { | 58 | { |
58 | unsigned long ttbl = __pa(swapper_pg_dir); | ||
59 | unsigned long ttbh = 0; | ||
60 | |||
61 | /* | 59 | /* |
62 | * Set TTBR0 to swapper_pg_dir which contains only global entries. The | 60 | * Set TTBR0 to swapper_pg_dir which contains only global entries. The |
63 | * ASID is set to 0. | 61 | * ASID is set to 0. |
64 | */ | 62 | */ |
65 | asm volatile( | 63 | cpu_set_ttbr(0, __pa(swapper_pg_dir)); |
66 | " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" | ||
67 | : | ||
68 | : "r" (ttbl), "r" (ttbh)); | ||
69 | isb(); | 64 | isb(); |
70 | } | 65 | } |
71 | #else | 66 | #else |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 9a5cdc01fcdf..2ffee02d1d5c 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -36,12 +36,13 @@ | |||
36 | 36 | ||
37 | #include "mm.h" | 37 | #include "mm.h" |
38 | 38 | ||
39 | static unsigned long phys_initrd_start __initdata = 0; | 39 | static phys_addr_t phys_initrd_start __initdata = 0; |
40 | static unsigned long phys_initrd_size __initdata = 0; | 40 | static unsigned long phys_initrd_size __initdata = 0; |
41 | 41 | ||
42 | static int __init early_initrd(char *p) | 42 | static int __init early_initrd(char *p) |
43 | { | 43 | { |
44 | unsigned long start, size; | 44 | phys_addr_t start; |
45 | unsigned long size; | ||
45 | char *endp; | 46 | char *endp; |
46 | 47 | ||
47 | start = memparse(p, &endp); | 48 | start = memparse(p, &endp); |
@@ -350,14 +351,14 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) | |||
350 | #ifdef CONFIG_BLK_DEV_INITRD | 351 | #ifdef CONFIG_BLK_DEV_INITRD |
351 | if (phys_initrd_size && | 352 | if (phys_initrd_size && |
352 | !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { | 353 | !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { |
353 | pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", | 354 | pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", |
354 | phys_initrd_start, phys_initrd_size); | 355 | (u64)phys_initrd_start, phys_initrd_size); |
355 | phys_initrd_start = phys_initrd_size = 0; | 356 | phys_initrd_start = phys_initrd_size = 0; |
356 | } | 357 | } |
357 | if (phys_initrd_size && | 358 | if (phys_initrd_size && |
358 | memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { | 359 | memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { |
359 | pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", | 360 | pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", |
360 | phys_initrd_start, phys_initrd_size); | 361 | (u64)phys_initrd_start, phys_initrd_size); |
361 | phys_initrd_start = phys_initrd_size = 0; | 362 | phys_initrd_start = phys_initrd_size = 0; |
362 | } | 363 | } |
363 | if (phys_initrd_size) { | 364 | if (phys_initrd_size) { |
@@ -442,7 +443,7 @@ static inline void | |||
442 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) | 443 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) |
443 | { | 444 | { |
444 | struct page *start_pg, *end_pg; | 445 | struct page *start_pg, *end_pg; |
445 | unsigned long pg, pgend; | 446 | phys_addr_t pg, pgend; |
446 | 447 | ||
447 | /* | 448 | /* |
448 | * Convert start_pfn/end_pfn to a struct page pointer. | 449 | * Convert start_pfn/end_pfn to a struct page pointer. |
@@ -454,8 +455,8 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |||
454 | * Convert to physical addresses, and | 455 | * Convert to physical addresses, and |
455 | * round start upwards and end downwards. | 456 | * round start upwards and end downwards. |
456 | */ | 457 | */ |
457 | pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); | 458 | pg = PAGE_ALIGN(__pa(start_pg)); |
458 | pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; | 459 | pgend = __pa(end_pg) & PAGE_MASK; |
459 | 460 | ||
460 | /* | 461 | /* |
461 | * If there are free pages between these, | 462 | * If there are free pages between these, |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e0d8565671a6..280f91d02de2 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -673,7 +673,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | |||
673 | } | 673 | } |
674 | 674 | ||
675 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | 675 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, |
676 | unsigned long end, unsigned long phys, const struct mem_type *type) | 676 | unsigned long end, phys_addr_t phys, |
677 | const struct mem_type *type) | ||
677 | { | 678 | { |
678 | pud_t *pud = pud_offset(pgd, addr); | 679 | pud_t *pud = pud_offset(pgd, addr); |
679 | unsigned long next; | 680 | unsigned long next; |
@@ -987,27 +988,28 @@ phys_addr_t arm_lowmem_limit __initdata = 0; | |||
987 | void __init sanity_check_meminfo(void) | 988 | void __init sanity_check_meminfo(void) |
988 | { | 989 | { |
989 | int i, j, highmem = 0; | 990 | int i, j, highmem = 0; |
991 | phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; | ||
990 | 992 | ||
991 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { | 993 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { |
992 | struct membank *bank = &meminfo.bank[j]; | 994 | struct membank *bank = &meminfo.bank[j]; |
993 | *bank = meminfo.bank[i]; | 995 | phys_addr_t size_limit; |
994 | 996 | ||
995 | if (bank->start > ULONG_MAX) | 997 | *bank = meminfo.bank[i]; |
996 | highmem = 1; | 998 | size_limit = bank->size; |
997 | 999 | ||
998 | #ifdef CONFIG_HIGHMEM | 1000 | if (bank->start >= vmalloc_limit) |
999 | if (__va(bank->start) >= vmalloc_min || | ||
1000 | __va(bank->start) < (void *)PAGE_OFFSET) | ||
1001 | highmem = 1; | 1001 | highmem = 1; |
1002 | else | ||
1003 | size_limit = vmalloc_limit - bank->start; | ||
1002 | 1004 | ||
1003 | bank->highmem = highmem; | 1005 | bank->highmem = highmem; |
1004 | 1006 | ||
1007 | #ifdef CONFIG_HIGHMEM | ||
1005 | /* | 1008 | /* |
1006 | * Split those memory banks which are partially overlapping | 1009 | * Split those memory banks which are partially overlapping |
1007 | * the vmalloc area greatly simplifying things later. | 1010 | * the vmalloc area greatly simplifying things later. |
1008 | */ | 1011 | */ |
1009 | if (!highmem && __va(bank->start) < vmalloc_min && | 1012 | if (!highmem && bank->size > size_limit) { |
1010 | bank->size > vmalloc_min - __va(bank->start)) { | ||
1011 | if (meminfo.nr_banks >= NR_BANKS) { | 1013 | if (meminfo.nr_banks >= NR_BANKS) { |
1012 | printk(KERN_CRIT "NR_BANKS too low, " | 1014 | printk(KERN_CRIT "NR_BANKS too low, " |
1013 | "ignoring high memory\n"); | 1015 | "ignoring high memory\n"); |
@@ -1016,16 +1018,14 @@ void __init sanity_check_meminfo(void) | |||
1016 | (meminfo.nr_banks - i) * sizeof(*bank)); | 1018 | (meminfo.nr_banks - i) * sizeof(*bank)); |
1017 | meminfo.nr_banks++; | 1019 | meminfo.nr_banks++; |
1018 | i++; | 1020 | i++; |
1019 | bank[1].size -= vmalloc_min - __va(bank->start); | 1021 | bank[1].size -= size_limit; |
1020 | bank[1].start = __pa(vmalloc_min - 1) + 1; | 1022 | bank[1].start = vmalloc_limit; |
1021 | bank[1].highmem = highmem = 1; | 1023 | bank[1].highmem = highmem = 1; |
1022 | j++; | 1024 | j++; |
1023 | } | 1025 | } |
1024 | bank->size = vmalloc_min - __va(bank->start); | 1026 | bank->size = size_limit; |
1025 | } | 1027 | } |
1026 | #else | 1028 | #else |
1027 | bank->highmem = highmem; | ||
1028 | |||
1029 | /* | 1029 | /* |
1030 | * Highmem banks not allowed with !CONFIG_HIGHMEM. | 1030 | * Highmem banks not allowed with !CONFIG_HIGHMEM. |
1031 | */ | 1031 | */ |
@@ -1038,31 +1038,16 @@ void __init sanity_check_meminfo(void) | |||
1038 | } | 1038 | } |
1039 | 1039 | ||
1040 | /* | 1040 | /* |
1041 | * Check whether this memory bank would entirely overlap | ||
1042 | * the vmalloc area. | ||
1043 | */ | ||
1044 | if (__va(bank->start) >= vmalloc_min || | ||
1045 | __va(bank->start) < (void *)PAGE_OFFSET) { | ||
1046 | printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " | ||
1047 | "(vmalloc region overlap).\n", | ||
1048 | (unsigned long long)bank->start, | ||
1049 | (unsigned long long)bank->start + bank->size - 1); | ||
1050 | continue; | ||
1051 | } | ||
1052 | |||
1053 | /* | ||
1054 | * Check whether this memory bank would partially overlap | 1041 | * Check whether this memory bank would partially overlap |
1055 | * the vmalloc area. | 1042 | * the vmalloc area. |
1056 | */ | 1043 | */ |
1057 | if (__va(bank->start + bank->size - 1) >= vmalloc_min || | 1044 | if (bank->size > size_limit) { |
1058 | __va(bank->start + bank->size - 1) <= __va(bank->start)) { | ||
1059 | unsigned long newsize = vmalloc_min - __va(bank->start); | ||
1060 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " | 1045 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " |
1061 | "to -%.8llx (vmalloc region overlap).\n", | 1046 | "to -%.8llx (vmalloc region overlap).\n", |
1062 | (unsigned long long)bank->start, | 1047 | (unsigned long long)bank->start, |
1063 | (unsigned long long)bank->start + bank->size - 1, | 1048 | (unsigned long long)bank->start + bank->size - 1, |
1064 | (unsigned long long)bank->start + newsize - 1); | 1049 | (unsigned long long)bank->start + size_limit - 1); |
1065 | bank->size = newsize; | 1050 | bank->size = size_limit; |
1066 | } | 1051 | } |
1067 | #endif | 1052 | #endif |
1068 | if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) | 1053 | if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) |
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 363027e811d6..5ffe1956c6d9 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -39,6 +39,14 @@ | |||
39 | #define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) | 39 | #define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) |
40 | #define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) | 40 | #define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) |
41 | 41 | ||
42 | #ifndef __ARMEB__ | ||
43 | # define rpgdl r0 | ||
44 | # define rpgdh r1 | ||
45 | #else | ||
46 | # define rpgdl r1 | ||
47 | # define rpgdh r0 | ||
48 | #endif | ||
49 | |||
42 | /* | 50 | /* |
43 | * cpu_v7_switch_mm(pgd_phys, tsk) | 51 | * cpu_v7_switch_mm(pgd_phys, tsk) |
44 | * | 52 | * |
@@ -47,10 +55,10 @@ | |||
47 | */ | 55 | */ |
48 | ENTRY(cpu_v7_switch_mm) | 56 | ENTRY(cpu_v7_switch_mm) |
49 | #ifdef CONFIG_MMU | 57 | #ifdef CONFIG_MMU |
50 | mmid r1, r1 @ get mm->context.id | 58 | mmid r2, r2 |
51 | asid r3, r1 | 59 | asid r2, r2 |
52 | mov r3, r3, lsl #(48 - 32) @ ASID | 60 | orr rpgdh, rpgdh, r2, lsl #(48 - 32) @ upper 32-bits of pgd |
53 | mcrr p15, 0, r0, r3, c2 @ set TTB 0 | 61 | mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0 |
54 | isb | 62 | isb |
55 | #endif | 63 | #endif |
56 | mov pc, lr | 64 | mov pc, lr |
@@ -106,7 +114,8 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
106 | */ | 114 | */ |
107 | .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp | 115 | .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp |
108 | ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address | 116 | ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address |
109 | cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below) | 117 | mov \tmp, \tmp, lsr #ARCH_PGD_SHIFT |
118 | cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? | ||
110 | mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register | 119 | mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register |
111 | orr \tmp, \tmp, #TTB_EAE | 120 | orr \tmp, \tmp, #TTB_EAE |
112 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) | 121 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) |
@@ -114,27 +123,21 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
114 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) | 123 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) |
115 | ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) | 124 | ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) |
116 | /* | 125 | /* |
117 | * TTBR0/TTBR1 split (PAGE_OFFSET): | 126 | * Only use split TTBRs if PHYS_OFFSET <= PAGE_OFFSET (cmp above), |
118 | * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) | 127 | * otherwise booting secondary CPUs would end up using TTBR1 for the |
119 | * 0x80000000: T0SZ = 0, T1SZ = 1 | 128 | * identity mapping set up in TTBR0. |
120 | * 0xc0000000: T0SZ = 0, T1SZ = 2 | ||
121 | * | ||
122 | * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise | ||
123 | * booting secondary CPUs would end up using TTBR1 for the identity | ||
124 | * mapping set up in TTBR0. | ||
125 | */ | 129 | */ |
126 | bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET? | 130 | orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ |
127 | orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ | 131 | mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR |
128 | #if defined CONFIG_VMSPLIT_2G | 132 | mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits |
129 | /* PAGE_OFFSET == 0x80000000, T1SZ == 1 */ | 133 | mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits |
130 | add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries | 134 | addls \ttbr1, \ttbr1, #TTBR1_OFFSET |
131 | #elif defined CONFIG_VMSPLIT_3G | 135 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 |
132 | /* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */ | 136 | mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits |
133 | add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd | 137 | mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits |
134 | #endif | 138 | mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0 |
135 | /* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */ | 139 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 |
136 | 9001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register | 140 | mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0 |
137 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 | ||
138 | .endm | 141 | .endm |
139 | 142 | ||
140 | __CPUINIT | 143 | __CPUINIT |