aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/memory.h12
-rw-r--r--arch/arm/include/asm/system.h4
-rw-r--r--arch/arm/kernel/elf.c6
-rw-r--r--arch/arm/kernel/module.c8
-rw-r--r--arch/arm/mm/cache-xsc3l2.c4
-rw-r--r--arch/arm/mm/mmu.c111
-rw-r--r--arch/arm/mm/proc-v7.S12
7 files changed, 106 insertions, 51 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 809ff9ab853a..77764301844b 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -44,10 +44,10 @@
44 * The module space lives between the addresses given by TASK_SIZE 44 * The module space lives between the addresses given by TASK_SIZE
45 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 45 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
46 */ 46 */
47#define MODULE_END (PAGE_OFFSET) 47#define MODULES_END (PAGE_OFFSET)
48#define MODULE_START (MODULE_END - 16*1048576) 48#define MODULES_VADDR (MODULES_END - 16*1048576)
49 49
50#if TASK_SIZE > MODULE_START 50#if TASK_SIZE > MODULES_VADDR
51#error Top of user space clashes with start of module space 51#error Top of user space clashes with start of module space
52#endif 52#endif
53 53
@@ -56,7 +56,7 @@
56 * Since we use sections to map it, this macro replaces the physical address 56 * Since we use sections to map it, this macro replaces the physical address
57 * with its virtual address while keeping offset from the base section. 57 * with its virtual address while keeping offset from the base section.
58 */ 58 */
59#define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff)) 59#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
60 60
61/* 61/*
62 * Allow 16MB-aligned ioremap pages 62 * Allow 16MB-aligned ioremap pages
@@ -94,8 +94,8 @@
94/* 94/*
95 * The module can be at any place in ram in nommu mode. 95 * The module can be at any place in ram in nommu mode.
96 */ 96 */
97#define MODULE_END (END_MEM) 97#define MODULES_END (END_MEM)
98#define MODULE_START (PHYS_OFFSET) 98#define MODULES_VADDR (PHYS_OFFSET)
99 99
100#endif /* !CONFIG_MMU */ 100#endif /* !CONFIG_MMU */
101 101
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 7aad78420f18..568020b34e3e 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -42,6 +42,10 @@
42#define CR_U (1 << 22) /* Unaligned access operation */ 42#define CR_U (1 << 22) /* Unaligned access operation */
43#define CR_XP (1 << 23) /* Extended page tables */ 43#define CR_XP (1 << 23) /* Extended page tables */
44#define CR_VE (1 << 24) /* Vectored interrupts */ 44#define CR_VE (1 << 24) /* Vectored interrupts */
45#define CR_EE (1 << 25) /* Exception (Big) Endian */
46#define CR_TRE (1 << 28) /* TEX remap enable */
47#define CR_AFE (1 << 29) /* Access flag enable */
48#define CR_TE (1 << 30) /* Thumb exception enable */
45 49
46/* 50/*
47 * This is used to ensure the compiler did actually allocate the register we 51 * This is used to ensure the compiler did actually allocate the register we
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index 513f332f040d..84849098c8e8 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -21,12 +21,16 @@ int elf_check_arch(const struct elf32_hdr *x)
21 21
22 eflags = x->e_flags; 22 eflags = x->e_flags;
23 if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { 23 if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
24 unsigned int flt_fmt;
25
24 /* APCS26 is only allowed if the CPU supports it */ 26 /* APCS26 is only allowed if the CPU supports it */
25 if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT)) 27 if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT))
26 return 0; 28 return 0;
27 29
30 flt_fmt = eflags & (EF_ARM_VFP_FLOAT | EF_ARM_SOFT_FLOAT);
31
28 /* VFP requires the supporting code */ 32 /* VFP requires the supporting code */
29 if ((eflags & EF_ARM_VFP_FLOAT) && !(elf_hwcap & HWCAP_VFP)) 33 if (flt_fmt == EF_ARM_VFP_FLOAT && !(elf_hwcap & HWCAP_VFP))
30 return 0; 34 return 0;
31 } 35 }
32 return 1; 36 return 1;
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 9203ba7d58ee..b8d965dcd6fd 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -26,12 +26,12 @@
26/* 26/*
27 * The XIP kernel text is mapped in the module area for modules and 27 * The XIP kernel text is mapped in the module area for modules and
28 * some other stuff to work without any indirect relocations. 28 * some other stuff to work without any indirect relocations.
29 * MODULE_START is redefined here and not in asm/memory.h to avoid 29 * MODULES_VADDR is redefined here and not in asm/memory.h to avoid
30 * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off. 30 * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
31 */ 31 */
32extern void _etext; 32extern void _etext;
33#undef MODULE_START 33#undef MODULES_VADDR
34#define MODULE_START (((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK) 34#define MODULES_VADDR (((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK)
35#endif 35#endif
36 36
37#ifdef CONFIG_MMU 37#ifdef CONFIG_MMU
@@ -43,7 +43,7 @@ void *module_alloc(unsigned long size)
43 if (!size) 43 if (!size)
44 return NULL; 44 return NULL;
45 45
46 area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END); 46 area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
47 if (!area) 47 if (!area)
48 return NULL; 48 return NULL;
49 49
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index 10b1bae1a258..464de893a988 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -98,7 +98,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
98 /* 98 /*
99 * Clean and invalidate partial last cache line. 99 * Clean and invalidate partial last cache line.
100 */ 100 */
101 if (end & (CACHE_LINE_SIZE - 1)) { 101 if (start < end && (end & (CACHE_LINE_SIZE - 1))) {
102 xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1)); 102 xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1));
103 xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); 103 xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
104 end &= ~(CACHE_LINE_SIZE - 1); 104 end &= ~(CACHE_LINE_SIZE - 1);
@@ -107,7 +107,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
107 /* 107 /*
108 * Invalidate all full cache lines between 'start' and 'end'. 108 * Invalidate all full cache lines between 'start' and 'end'.
109 */ 109 */
110 while (start != end) { 110 while (start < end) {
111 xsc3_l2_inv_pa(start); 111 xsc3_l2_inv_pa(start);
112 start += CACHE_LINE_SIZE; 112 start += CACHE_LINE_SIZE;
113 } 113 }
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 8ba754064559..e63db11f16a8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -180,20 +180,20 @@ void adjust_cr(unsigned long mask, unsigned long set)
180#endif 180#endif
181 181
182#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE 182#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
183#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE 183#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
184 184
185static struct mem_type mem_types[] = { 185static struct mem_type mem_types[] = {
186 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 186 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
187 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 187 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
188 L_PTE_SHARED, 188 L_PTE_SHARED,
189 .prot_l1 = PMD_TYPE_TABLE, 189 .prot_l1 = PMD_TYPE_TABLE,
190 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, 190 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
191 .domain = DOMAIN_IO, 191 .domain = DOMAIN_IO,
192 }, 192 },
193 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ 193 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
194 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, 194 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
195 .prot_l1 = PMD_TYPE_TABLE, 195 .prot_l1 = PMD_TYPE_TABLE,
196 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), 196 .prot_sect = PROT_SECT_DEVICE,
197 .domain = DOMAIN_IO, 197 .domain = DOMAIN_IO,
198 }, 198 },
199 [MT_DEVICE_CACHED] = { /* ioremap_cached */ 199 [MT_DEVICE_CACHED] = { /* ioremap_cached */
@@ -205,7 +205,7 @@ static struct mem_type mem_types[] = {
205 [MT_DEVICE_WC] = { /* ioremap_wc */ 205 [MT_DEVICE_WC] = { /* ioremap_wc */
206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
207 .prot_l1 = PMD_TYPE_TABLE, 207 .prot_l1 = PMD_TYPE_TABLE,
208 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE, 208 .prot_sect = PROT_SECT_DEVICE,
209 .domain = DOMAIN_IO, 209 .domain = DOMAIN_IO,
210 }, 210 },
211 [MT_CACHECLEAN] = { 211 [MT_CACHECLEAN] = {
@@ -273,22 +273,23 @@ static void __init build_mem_type_table(void)
273#endif 273#endif
274 274
275 /* 275 /*
276 * On non-Xscale3 ARMv5-and-older systems, use CB=01 276 * Strip out features not present on earlier architectures.
277 * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3 277 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
278 * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable 278 * without extended page tables don't have the 'Shared' bit.
279 * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
280 */ 279 */
281 if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { 280 if (cpu_arch < CPU_ARCH_ARMv5)
282 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 281 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
283 mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE; 282 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
284 } 283 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
284 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
285 mem_types[i].prot_sect &= ~PMD_SECT_S;
285 286
286 /* 287 /*
287 * ARMv5 and lower, bit 4 must be set for page tables. 288 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
288 * (was: cache "update-able on write" bit on ARM610) 289 * "update-able on write" bit on ARM610). However, Xscale and
289 * However, Xscale cores require this bit to be cleared. 290 * Xscale3 require this bit to be cleared.
290 */ 291 */
291 if (cpu_is_xscale()) { 292 if (cpu_is_xscale() || cpu_is_xsc3()) {
292 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 293 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
293 mem_types[i].prot_sect &= ~PMD_BIT4; 294 mem_types[i].prot_sect &= ~PMD_BIT4;
294 mem_types[i].prot_l1 &= ~PMD_BIT4; 295 mem_types[i].prot_l1 &= ~PMD_BIT4;
@@ -302,6 +303,64 @@ static void __init build_mem_type_table(void)
302 } 303 }
303 } 304 }
304 305
306 /*
307 * Mark the device areas according to the CPU/architecture.
308 */
309 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
310 if (!cpu_is_xsc3()) {
311 /*
312 * Mark device regions on ARMv6+ as execute-never
313 * to prevent speculative instruction fetches.
314 */
315 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
316 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
317 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
318 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
319 }
320 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
321 /*
322 * For ARMv7 with TEX remapping,
323 * - shared device is SXCB=1100
324 * - nonshared device is SXCB=0100
325 * - write combine device mem is SXCB=0001
326 * (Uncached Normal memory)
327 */
328 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
329 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
330 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
331 } else if (cpu_is_xsc3()) {
332 /*
333 * For Xscale3,
334 * - shared device is TEXCB=00101
335 * - nonshared device is TEXCB=01000
336 * - write combine device mem is TEXCB=00100
337 * (Inner/Outer Uncacheable in xsc3 parlance)
338 */
339 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
340 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
341 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
342 } else {
343 /*
344 * For ARMv6 and ARMv7 without TEX remapping,
345 * - shared device is TEXCB=00001
346 * - nonshared device is TEXCB=01000
347 * - write combine device mem is TEXCB=00100
348 * (Uncached Normal in ARMv6 parlance).
349 */
350 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
351 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
352 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
353 }
354 } else {
355 /*
356 * On others, write combining is "Uncached/Buffered"
357 */
358 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
359 }
360
361 /*
362 * Now deal with the memory-type mappings
363 */
305 cp = &cache_policies[cachepolicy]; 364 cp = &cache_policies[cachepolicy];
306 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 365 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
307 366
@@ -317,12 +376,8 @@ static void __init build_mem_type_table(void)
317 * Enable CPU-specific coherency if supported. 376 * Enable CPU-specific coherency if supported.
318 * (Only available on XSC3 at the moment.) 377 * (Only available on XSC3 at the moment.)
319 */ 378 */
320 if (arch_is_coherent()) { 379 if (arch_is_coherent() && cpu_is_xsc3())
321 if (cpu_is_xsc3()) { 380 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
322 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
323 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
324 }
325 }
326 381
327 /* 382 /*
328 * ARMv6 and above have extended page tables. 383 * ARMv6 and above have extended page tables.
@@ -336,11 +391,6 @@ static void __init build_mem_type_table(void)
336 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 391 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
337 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 392 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
338 393
339 /*
340 * Mark the device area as "shared device"
341 */
342 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
343
344#ifdef CONFIG_SMP 394#ifdef CONFIG_SMP
345 /* 395 /*
346 * Mark memory with the "shared" attribute for SMP systems 396 * Mark memory with the "shared" attribute for SMP systems
@@ -360,9 +410,6 @@ static void __init build_mem_type_table(void)
360 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; 410 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
361 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; 411 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
362 412
363 if (cpu_arch < CPU_ARCH_ARMv5)
364 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
365
366 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 413 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
367 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 414 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
368 L_PTE_DIRTY | L_PTE_WRITE | 415 L_PTE_DIRTY | L_PTE_WRITE |
@@ -654,7 +701,7 @@ static inline void prepare_page_table(struct meminfo *mi)
654 /* 701 /*
655 * Clear out all the mappings below the kernel image. 702 * Clear out all the mappings below the kernel image.
656 */ 703 */
657 for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE) 704 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
658 pmd_clear(pmd_off_k(addr)); 705 pmd_clear(pmd_off_k(addr));
659 706
660#ifdef CONFIG_XIP_KERNEL 707#ifdef CONFIG_XIP_KERNEL
@@ -766,7 +813,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
766 */ 813 */
767#ifdef CONFIG_XIP_KERNEL 814#ifdef CONFIG_XIP_KERNEL
768 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 815 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
769 map.virtual = MODULE_START; 816 map.virtual = MODULES_VADDR;
770 map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; 817 map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
771 map.type = MT_ROM; 818 map.type = MT_ROM;
772 create_mapping(&map); 819 create_mapping(&map);
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 07f82db70945..4d3c0a73e7fb 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -115,7 +115,7 @@ ENTRY(cpu_v7_set_pte_ext)
115 orr r3, r3, r2 115 orr r3, r3, r2
116 orr r3, r3, #PTE_EXT_AP0 | 2 116 orr r3, r3, #PTE_EXT_AP0 | 2
117 117
118 tst r2, #1 << 4 118 tst r1, #1 << 4
119 orrne r3, r3, #PTE_EXT_TEX(1) 119 orrne r3, r3, #PTE_EXT_TEX(1)
120 120
121 tst r1, #L_PTE_WRITE 121 tst r1, #L_PTE_WRITE
@@ -192,11 +192,11 @@ __v7_setup:
192 mov pc, lr @ return to head.S:__ret 192 mov pc, lr @ return to head.S:__ret
193ENDPROC(__v7_setup) 193ENDPROC(__v7_setup)
194 194
195 /* 195 /* AT
196 * V X F I D LR 196 * TFR EV X F I D LR
197 * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM 197 * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM
198 * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced 198 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
199 * 0 110 0011 1.00 .111 1101 < we want 199 * 1 0 110 0011 1.00 .111 1101 < we want
200 */ 200 */
201 .type v7_crval, #object 201 .type v7_crval, #object
202v7_crval: 202v7_crval: