aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-v6.S1
-rw-r--r--arch/arm/mm/cache-v7.S2
-rw-r--r--arch/arm/mm/context.c17
-rw-r--r--arch/arm/mm/init.c15
-rw-r--r--arch/arm/mm/mmu.c9
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7.S14
7 files changed, 41 insertions, 21 deletions
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index c96fa1b3f49f..73b4a8b66a57 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -176,6 +176,7 @@ ENDPROC(v6_coherent_kern_range)
176 */ 176 */
177ENTRY(v6_flush_kern_dcache_area) 177ENTRY(v6_flush_kern_dcache_area)
178 add r1, r0, r1 178 add r1, r0, r1
179 bic r0, r0, #D_CACHE_LINE_SIZE - 1
1791: 1801:
180#ifdef HARVARD_CACHE 181#ifdef HARVARD_CACHE
181 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 182 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index dc18d81ef8ce..d32f02b61866 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -221,6 +221,8 @@ ENDPROC(v7_coherent_user_range)
221ENTRY(v7_flush_kern_dcache_area) 221ENTRY(v7_flush_kern_dcache_area)
222 dcache_line_size r2, r3 222 dcache_line_size r2, r3
223 add r1, r0, r1 223 add r1, r0, r1
224 sub r3, r2, #1
225 bic r0, r0, r3
2241: 2261:
225 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line 227 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
226 add r0, r0, r2 228 add r0, r0, r2
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b0ee9ba3cfab..8bfae964b133 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -24,9 +24,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
24 24
25/* 25/*
26 * We fork()ed a process, and we need a new context for the child 26 * We fork()ed a process, and we need a new context for the child
27 * to run in. We reserve version 0 for initial tasks so we will 27 * to run in.
28 * always allocate an ASID. The ASID 0 is reserved for the TTBR
29 * register changing sequence.
30 */ 28 */
31void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 29void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
32{ 30{
@@ -36,8 +34,11 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
36 34
37static void flush_context(void) 35static void flush_context(void)
38{ 36{
39 /* set the reserved ASID before flushing the TLB */ 37 u32 ttb;
40 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); 38 /* Copy TTBR1 into TTBR0 */
39 asm volatile("mrc p15, 0, %0, c2, c0, 1\n"
40 "mcr p15, 0, %0, c2, c0, 0"
41 : "=r" (ttb));
41 isb(); 42 isb();
42 local_flush_tlb_all(); 43 local_flush_tlb_all();
43 if (icache_is_vivt_asid_tagged()) { 44 if (icache_is_vivt_asid_tagged()) {
@@ -93,7 +94,7 @@ static void reset_context(void *info)
93 return; 94 return;
94 95
95 smp_rmb(); 96 smp_rmb();
96 asid = cpu_last_asid + cpu + 1; 97 asid = cpu_last_asid + cpu;
97 98
98 flush_context(); 99 flush_context();
99 set_mm_context(mm, asid); 100 set_mm_context(mm, asid);
@@ -143,13 +144,13 @@ void __new_context(struct mm_struct *mm)
143 * to start a new version and flush the TLB. 144 * to start a new version and flush the TLB.
144 */ 145 */
145 if (unlikely((asid & ~ASID_MASK) == 0)) { 146 if (unlikely((asid & ~ASID_MASK) == 0)) {
146 asid = cpu_last_asid + smp_processor_id() + 1; 147 asid = cpu_last_asid + smp_processor_id();
147 flush_context(); 148 flush_context();
148#ifdef CONFIG_SMP 149#ifdef CONFIG_SMP
149 smp_wmb(); 150 smp_wmb();
150 smp_call_function(reset_context, NULL, 1); 151 smp_call_function(reset_context, NULL, 1);
151#endif 152#endif
152 cpu_last_asid += NR_CPUS; 153 cpu_last_asid += NR_CPUS - 1;
153 } 154 }
154 155
155 set_mm_context(mm, asid); 156 set_mm_context(mm, asid);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3f17ea146f0e..2c2cce9cd8c8 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,12 +15,14 @@
15#include <linux/mman.h> 15#include <linux/mman.h>
16#include <linux/nodemask.h> 16#include <linux/nodemask.h>
17#include <linux/initrd.h> 17#include <linux/initrd.h>
18#include <linux/of_fdt.h>
18#include <linux/highmem.h> 19#include <linux/highmem.h>
19#include <linux/gfp.h> 20#include <linux/gfp.h>
20#include <linux/memblock.h> 21#include <linux/memblock.h>
21#include <linux/sort.h> 22#include <linux/sort.h>
22 23
23#include <asm/mach-types.h> 24#include <asm/mach-types.h>
25#include <asm/prom.h>
24#include <asm/sections.h> 26#include <asm/sections.h>
25#include <asm/setup.h> 27#include <asm/setup.h>
26#include <asm/sizes.h> 28#include <asm/sizes.h>
@@ -71,6 +73,14 @@ static int __init parse_tag_initrd2(const struct tag *tag)
71 73
72__tagtable(ATAG_INITRD2, parse_tag_initrd2); 74__tagtable(ATAG_INITRD2, parse_tag_initrd2);
73 75
76#ifdef CONFIG_OF_FLATTREE
77void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
78{
79 phys_initrd_start = start;
80 phys_initrd_size = end - start;
81}
82#endif /* CONFIG_OF_FLATTREE */
83
74/* 84/*
75 * This keeps memory configuration data used by a couple memory 85 * This keeps memory configuration data used by a couple memory
76 * initialization functions, as well as show_mem() for the skipping 86 * initialization functions, as well as show_mem() for the skipping
@@ -273,13 +283,15 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
273 free_area_init_node(0, zone_size, min, zhole_size); 283 free_area_init_node(0, zone_size, min, zhole_size);
274} 284}
275 285
276#ifndef CONFIG_SPARSEMEM 286#ifdef CONFIG_HAVE_ARCH_PFN_VALID
277int pfn_valid(unsigned long pfn) 287int pfn_valid(unsigned long pfn)
278{ 288{
279 return memblock_is_memory(pfn << PAGE_SHIFT); 289 return memblock_is_memory(pfn << PAGE_SHIFT);
280} 290}
281EXPORT_SYMBOL(pfn_valid); 291EXPORT_SYMBOL(pfn_valid);
292#endif
282 293
294#ifndef CONFIG_SPARSEMEM
283static void arm_memory_present(void) 295static void arm_memory_present(void)
284{ 296{
285} 297}
@@ -334,6 +346,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
334#endif 346#endif
335 347
336 arm_mm_memblock_reserve(); 348 arm_mm_memblock_reserve();
349 arm_dt_memblock_reserve();
337 350
338 /* reserve any platform specific memblock areas */ 351 /* reserve any platform specific memblock areas */
339 if (mdesc->reserve) 352 if (mdesc->reserve)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 08a92368d9d3..9d9e736c2b4f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -763,15 +763,12 @@ static void __init sanity_check_meminfo(void)
763{ 763{
764 int i, j, highmem = 0; 764 int i, j, highmem = 0;
765 765
766 lowmem_limit = __pa(vmalloc_min - 1) + 1;
767 memblock_set_current_limit(lowmem_limit);
768
769 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 766 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
770 struct membank *bank = &meminfo.bank[j]; 767 struct membank *bank = &meminfo.bank[j];
771 *bank = meminfo.bank[i]; 768 *bank = meminfo.bank[i];
772 769
773#ifdef CONFIG_HIGHMEM 770#ifdef CONFIG_HIGHMEM
774 if (__va(bank->start) > vmalloc_min || 771 if (__va(bank->start) >= vmalloc_min ||
775 __va(bank->start) < (void *)PAGE_OFFSET) 772 __va(bank->start) < (void *)PAGE_OFFSET)
776 highmem = 1; 773 highmem = 1;
777 774
@@ -829,6 +826,9 @@ static void __init sanity_check_meminfo(void)
829 bank->size = newsize; 826 bank->size = newsize;
830 } 827 }
831#endif 828#endif
829 if (!bank->highmem && bank->start + bank->size > lowmem_limit)
830 lowmem_limit = bank->start + bank->size;
831
832 j++; 832 j++;
833 } 833 }
834#ifdef CONFIG_HIGHMEM 834#ifdef CONFIG_HIGHMEM
@@ -852,6 +852,7 @@ static void __init sanity_check_meminfo(void)
852 } 852 }
853#endif 853#endif
854 meminfo.nr_banks = j; 854 meminfo.nr_banks = j;
855 memblock_set_current_limit(lowmem_limit);
855} 856}
856 857
857static inline void prepare_page_table(void) 858static inline void prepare_page_table(void)
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index ab17cc0d3fa7..1d2b8451bf25 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -213,7 +213,9 @@ __v6_setup:
213 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 213 mcr p15, 0, r0, c2, c0, 2 @ TTB control register
214 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) 214 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
215 ALT_UP(orr r4, r4, #TTB_FLAGS_UP) 215 ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
216 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 216 ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP)
217 ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
218 mcr p15, 0, r8, c2, c0, 1 @ load TTB1
217#endif /* CONFIG_MMU */ 219#endif /* CONFIG_MMU */
218 adr r5, v6_crval 220 adr r5, v6_crval
219 ldmia r5, {r5, r6} 221 ldmia r5, {r5, r6}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index babfba09c89f..b3b566ec83d3 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -108,18 +108,16 @@ ENTRY(cpu_v7_switch_mm)
108#ifdef CONFIG_ARM_ERRATA_430973 108#ifdef CONFIG_ARM_ERRATA_430973
109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
110#endif 110#endif
111#ifdef CONFIG_ARM_ERRATA_754322 111 mrc p15, 0, r2, c2, c0, 1 @ load TTB 1
112 dsb 112 mcr p15, 0, r2, c2, c0, 0 @ into TTB 0
113#endif
114 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
115 isb
1161: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
117 isb 113 isb
118#ifdef CONFIG_ARM_ERRATA_754322 114#ifdef CONFIG_ARM_ERRATA_754322
119 dsb 115 dsb
120#endif 116#endif
121 mcr p15, 0, r1, c13, c0, 1 @ set context ID 117 mcr p15, 0, r1, c13, c0, 1 @ set context ID
122 isb 118 isb
119 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
120 isb
123#endif 121#endif
124 mov pc, lr 122 mov pc, lr
125ENDPROC(cpu_v7_switch_mm) 123ENDPROC(cpu_v7_switch_mm)
@@ -368,7 +366,9 @@ __v7_setup:
368 mcr p15, 0, r10, c2, c0, 2 @ TTB control register 366 mcr p15, 0, r10, c2, c0, 2 @ TTB control register
369 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) 367 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
370 ALT_UP(orr r4, r4, #TTB_FLAGS_UP) 368 ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
371 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 369 ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP)
370 ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
371 mcr p15, 0, r8, c2, c0, 1 @ load TTB1
372 ldr r5, =PRRR @ PRRR 372 ldr r5, =PRRR @ PRRR
373 ldr r6, =NMRR @ NMRR 373 ldr r6, =NMRR @ NMRR
374 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 374 mcr p15, 0, r5, c10, c2, 0 @ write PRRR