diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/cache-feroceon-l2.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/cache-xsc3l2.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 117 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 12 | ||||
-rw-r--r-- | arch/arm/mm/proc-xsc3.S | 2 |
5 files changed, 96 insertions, 43 deletions
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 13cdae8b0d44..80cd207cbaea 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c | |||
@@ -150,7 +150,7 @@ static void feroceon_l2_inv_range(unsigned long start, unsigned long end) | |||
150 | /* | 150 | /* |
151 | * Clean and invalidate partial last cache line. | 151 | * Clean and invalidate partial last cache line. |
152 | */ | 152 | */ |
153 | if (end & (CACHE_LINE_SIZE - 1)) { | 153 | if (start < end && end & (CACHE_LINE_SIZE - 1)) { |
154 | l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); | 154 | l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); |
155 | end &= ~(CACHE_LINE_SIZE - 1); | 155 | end &= ~(CACHE_LINE_SIZE - 1); |
156 | } | 156 | } |
@@ -158,7 +158,7 @@ static void feroceon_l2_inv_range(unsigned long start, unsigned long end) | |||
158 | /* | 158 | /* |
159 | * Invalidate all full cache lines between 'start' and 'end'. | 159 | * Invalidate all full cache lines between 'start' and 'end'. |
160 | */ | 160 | */ |
161 | while (start != end) { | 161 | while (start < end) { |
162 | unsigned long range_end = calc_range_end(start, end); | 162 | unsigned long range_end = calc_range_end(start, end); |
163 | l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE); | 163 | l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE); |
164 | start = range_end; | 164 | start = range_end; |
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index 10b1bae1a258..464de893a988 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c | |||
@@ -98,7 +98,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | |||
98 | /* | 98 | /* |
99 | * Clean and invalidate partial last cache line. | 99 | * Clean and invalidate partial last cache line. |
100 | */ | 100 | */ |
101 | if (end & (CACHE_LINE_SIZE - 1)) { | 101 | if (start < end && (end & (CACHE_LINE_SIZE - 1))) { |
102 | xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1)); | 102 | xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1)); |
103 | xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); | 103 | xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); |
104 | end &= ~(CACHE_LINE_SIZE - 1); | 104 | end &= ~(CACHE_LINE_SIZE - 1); |
@@ -107,7 +107,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | |||
107 | /* | 107 | /* |
108 | * Invalidate all full cache lines between 'start' and 'end'. | 108 | * Invalidate all full cache lines between 'start' and 'end'. |
109 | */ | 109 | */ |
110 | while (start != end) { | 110 | while (start < end) { |
111 | xsc3_l2_inv_pa(start); | 111 | xsc3_l2_inv_pa(start); |
112 | start += CACHE_LINE_SIZE; | 112 | start += CACHE_LINE_SIZE; |
113 | } | 113 | } |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 8ba754064559..7f36c825718d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -180,20 +180,20 @@ void adjust_cr(unsigned long mask, unsigned long set) | |||
180 | #endif | 180 | #endif |
181 | 181 | ||
182 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE | 182 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE |
183 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE | 183 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE |
184 | 184 | ||
185 | static struct mem_type mem_types[] = { | 185 | static struct mem_type mem_types[] = { |
186 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ | 186 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ |
187 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | | 187 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | |
188 | L_PTE_SHARED, | 188 | L_PTE_SHARED, |
189 | .prot_l1 = PMD_TYPE_TABLE, | 189 | .prot_l1 = PMD_TYPE_TABLE, |
190 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, | 190 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, |
191 | .domain = DOMAIN_IO, | 191 | .domain = DOMAIN_IO, |
192 | }, | 192 | }, |
193 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ | 193 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ |
194 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, | 194 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, |
195 | .prot_l1 = PMD_TYPE_TABLE, | 195 | .prot_l1 = PMD_TYPE_TABLE, |
196 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), | 196 | .prot_sect = PROT_SECT_DEVICE, |
197 | .domain = DOMAIN_IO, | 197 | .domain = DOMAIN_IO, |
198 | }, | 198 | }, |
199 | [MT_DEVICE_CACHED] = { /* ioremap_cached */ | 199 | [MT_DEVICE_CACHED] = { /* ioremap_cached */ |
@@ -205,7 +205,13 @@ static struct mem_type mem_types[] = { | |||
205 | [MT_DEVICE_WC] = { /* ioremap_wc */ | 205 | [MT_DEVICE_WC] = { /* ioremap_wc */ |
206 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, | 206 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, |
207 | .prot_l1 = PMD_TYPE_TABLE, | 207 | .prot_l1 = PMD_TYPE_TABLE, |
208 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE, | 208 | .prot_sect = PROT_SECT_DEVICE, |
209 | .domain = DOMAIN_IO, | ||
210 | }, | ||
211 | [MT_UNCACHED] = { | ||
212 | .prot_pte = PROT_PTE_DEVICE, | ||
213 | .prot_l1 = PMD_TYPE_TABLE, | ||
214 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | ||
209 | .domain = DOMAIN_IO, | 215 | .domain = DOMAIN_IO, |
210 | }, | 216 | }, |
211 | [MT_CACHECLEAN] = { | 217 | [MT_CACHECLEAN] = { |
@@ -273,22 +279,23 @@ static void __init build_mem_type_table(void) | |||
273 | #endif | 279 | #endif |
274 | 280 | ||
275 | /* | 281 | /* |
276 | * On non-Xscale3 ARMv5-and-older systems, use CB=01 | 282 | * Strip out features not present on earlier architectures. |
277 | * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3 | 283 | * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those |
278 | * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable | 284 | * without extended page tables don't have the 'Shared' bit. |
279 | * in xsc3 parlance, Uncached Normal in ARMv6 parlance). | ||
280 | */ | 285 | */ |
281 | if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { | 286 | if (cpu_arch < CPU_ARCH_ARMv5) |
282 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); | 287 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) |
283 | mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE; | 288 | mem_types[i].prot_sect &= ~PMD_SECT_TEX(7); |
284 | } | 289 | if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3()) |
290 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) | ||
291 | mem_types[i].prot_sect &= ~PMD_SECT_S; | ||
285 | 292 | ||
286 | /* | 293 | /* |
287 | * ARMv5 and lower, bit 4 must be set for page tables. | 294 | * ARMv5 and lower, bit 4 must be set for page tables (was: cache |
288 | * (was: cache "update-able on write" bit on ARM610) | 295 | * "update-able on write" bit on ARM610). However, Xscale and |
289 | * However, Xscale cores require this bit to be cleared. | 296 | * Xscale3 require this bit to be cleared. |
290 | */ | 297 | */ |
291 | if (cpu_is_xscale()) { | 298 | if (cpu_is_xscale() || cpu_is_xsc3()) { |
292 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { | 299 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { |
293 | mem_types[i].prot_sect &= ~PMD_BIT4; | 300 | mem_types[i].prot_sect &= ~PMD_BIT4; |
294 | mem_types[i].prot_l1 &= ~PMD_BIT4; | 301 | mem_types[i].prot_l1 &= ~PMD_BIT4; |
@@ -302,6 +309,64 @@ static void __init build_mem_type_table(void) | |||
302 | } | 309 | } |
303 | } | 310 | } |
304 | 311 | ||
312 | /* | ||
313 | * Mark the device areas according to the CPU/architecture. | ||
314 | */ | ||
315 | if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) { | ||
316 | if (!cpu_is_xsc3()) { | ||
317 | /* | ||
318 | * Mark device regions on ARMv6+ as execute-never | ||
319 | * to prevent speculative instruction fetches. | ||
320 | */ | ||
321 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN; | ||
322 | mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; | ||
323 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; | ||
324 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; | ||
325 | } | ||
326 | if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { | ||
327 | /* | ||
328 | * For ARMv7 with TEX remapping, | ||
329 | * - shared device is SXCB=1100 | ||
330 | * - nonshared device is SXCB=0100 | ||
331 | * - write combine device mem is SXCB=0001 | ||
332 | * (Uncached Normal memory) | ||
333 | */ | ||
334 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1); | ||
335 | mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1); | ||
336 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; | ||
337 | } else if (cpu_is_xsc3()) { | ||
338 | /* | ||
339 | * For Xscale3, | ||
340 | * - shared device is TEXCB=00101 | ||
341 | * - nonshared device is TEXCB=01000 | ||
342 | * - write combine device mem is TEXCB=00100 | ||
343 | * (Inner/Outer Uncacheable in xsc3 parlance) | ||
344 | */ | ||
345 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED; | ||
346 | mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); | ||
347 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); | ||
348 | } else { | ||
349 | /* | ||
350 | * For ARMv6 and ARMv7 without TEX remapping, | ||
351 | * - shared device is TEXCB=00001 | ||
352 | * - nonshared device is TEXCB=01000 | ||
353 | * - write combine device mem is TEXCB=00100 | ||
354 | * (Uncached Normal in ARMv6 parlance). | ||
355 | */ | ||
356 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; | ||
357 | mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); | ||
358 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); | ||
359 | } | ||
360 | } else { | ||
361 | /* | ||
362 | * On others, write combining is "Uncached/Buffered" | ||
363 | */ | ||
364 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; | ||
365 | } | ||
366 | |||
367 | /* | ||
368 | * Now deal with the memory-type mappings | ||
369 | */ | ||
305 | cp = &cache_policies[cachepolicy]; | 370 | cp = &cache_policies[cachepolicy]; |
306 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; | 371 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
307 | 372 | ||
@@ -317,12 +382,8 @@ static void __init build_mem_type_table(void) | |||
317 | * Enable CPU-specific coherency if supported. | 382 | * Enable CPU-specific coherency if supported. |
318 | * (Only available on XSC3 at the moment.) | 383 | * (Only available on XSC3 at the moment.) |
319 | */ | 384 | */ |
320 | if (arch_is_coherent()) { | 385 | if (arch_is_coherent() && cpu_is_xsc3()) |
321 | if (cpu_is_xsc3()) { | 386 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
322 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | ||
323 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | ||
324 | } | ||
325 | } | ||
326 | 387 | ||
327 | /* | 388 | /* |
328 | * ARMv6 and above have extended page tables. | 389 | * ARMv6 and above have extended page tables. |
@@ -336,11 +397,6 @@ static void __init build_mem_type_table(void) | |||
336 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 397 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
337 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 398 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
338 | 399 | ||
339 | /* | ||
340 | * Mark the device area as "shared device" | ||
341 | */ | ||
342 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; | ||
343 | |||
344 | #ifdef CONFIG_SMP | 400 | #ifdef CONFIG_SMP |
345 | /* | 401 | /* |
346 | * Mark memory with the "shared" attribute for SMP systems | 402 | * Mark memory with the "shared" attribute for SMP systems |
@@ -360,9 +416,6 @@ static void __init build_mem_type_table(void) | |||
360 | mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; | 416 | mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; |
361 | mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; | 417 | mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; |
362 | 418 | ||
363 | if (cpu_arch < CPU_ARCH_ARMv5) | ||
364 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); | ||
365 | |||
366 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); | 419 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); |
367 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 420 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
368 | L_PTE_DIRTY | L_PTE_WRITE | | 421 | L_PTE_DIRTY | L_PTE_WRITE | |
@@ -654,7 +707,7 @@ static inline void prepare_page_table(struct meminfo *mi) | |||
654 | /* | 707 | /* |
655 | * Clear out all the mappings below the kernel image. | 708 | * Clear out all the mappings below the kernel image. |
656 | */ | 709 | */ |
657 | for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE) | 710 | for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) |
658 | pmd_clear(pmd_off_k(addr)); | 711 | pmd_clear(pmd_off_k(addr)); |
659 | 712 | ||
660 | #ifdef CONFIG_XIP_KERNEL | 713 | #ifdef CONFIG_XIP_KERNEL |
@@ -766,7 +819,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
766 | */ | 819 | */ |
767 | #ifdef CONFIG_XIP_KERNEL | 820 | #ifdef CONFIG_XIP_KERNEL |
768 | map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); | 821 | map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); |
769 | map.virtual = MODULE_START; | 822 | map.virtual = MODULES_VADDR; |
770 | map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; | 823 | map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; |
771 | map.type = MT_ROM; | 824 | map.type = MT_ROM; |
772 | create_mapping(&map); | 825 | create_mapping(&map); |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 07f82db70945..4d3c0a73e7fb 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -115,7 +115,7 @@ ENTRY(cpu_v7_set_pte_ext) | |||
115 | orr r3, r3, r2 | 115 | orr r3, r3, r2 |
116 | orr r3, r3, #PTE_EXT_AP0 | 2 | 116 | orr r3, r3, #PTE_EXT_AP0 | 2 |
117 | 117 | ||
118 | tst r2, #1 << 4 | 118 | tst r1, #1 << 4 |
119 | orrne r3, r3, #PTE_EXT_TEX(1) | 119 | orrne r3, r3, #PTE_EXT_TEX(1) |
120 | 120 | ||
121 | tst r1, #L_PTE_WRITE | 121 | tst r1, #L_PTE_WRITE |
@@ -192,11 +192,11 @@ __v7_setup: | |||
192 | mov pc, lr @ return to head.S:__ret | 192 | mov pc, lr @ return to head.S:__ret |
193 | ENDPROC(__v7_setup) | 193 | ENDPROC(__v7_setup) |
194 | 194 | ||
195 | /* | 195 | /* AT |
196 | * V X F I D LR | 196 | * TFR EV X F I D LR |
197 | * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM | 197 | * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM |
198 | * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced | 198 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced |
199 | * 0 110 0011 1.00 .111 1101 < we want | 199 | * 1 0 110 0011 1.00 .111 1101 < we want |
200 | */ | 200 | */ |
201 | .type v7_crval, #object | 201 | .type v7_crval, #object |
202 | v7_crval: | 202 | v7_crval: |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 04dc8b65401b..8f6cf56c11c0 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
@@ -349,7 +349,7 @@ ENTRY(cpu_xsc3_switch_mm) | |||
349 | cpu_xsc3_mt_table: | 349 | cpu_xsc3_mt_table: |
350 | .long 0x00 @ L_PTE_MT_UNCACHED | 350 | .long 0x00 @ L_PTE_MT_UNCACHED |
351 | .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE | 351 | .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE |
352 | .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH | 352 | .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH |
353 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK | 353 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK |
354 | .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED | 354 | .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED |
355 | .long 0x00 @ unused | 355 | .long 0x00 @ unused |