diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 14 | ||||
-rw-r--r-- | arch/arm/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/mm/alignment.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/cache-feroceon-l2.c | 1 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 11 | ||||
-rw-r--r-- | arch/arm/mm/cache-v3.S | 137 | ||||
-rw-r--r-- | arch/arm/mm/cache-v4.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 5 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 92 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm740.S | 30 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm920.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm926.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-mohawk.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-sa1100.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-syms.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v6.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 19 | ||||
-rw-r--r-- | arch/arm/mm/proc-xsc3.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/tcm.h | 17 |
21 files changed, 148 insertions, 202 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 025d17328730..35955b54944c 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -43,7 +43,7 @@ config CPU_ARM740T | |||
43 | depends on !MMU | 43 | depends on !MMU |
44 | select CPU_32v4T | 44 | select CPU_32v4T |
45 | select CPU_ABRT_LV4T | 45 | select CPU_ABRT_LV4T |
46 | select CPU_CACHE_V3 # although the core is v4t | 46 | select CPU_CACHE_V4 |
47 | select CPU_CP15_MPU | 47 | select CPU_CP15_MPU |
48 | select CPU_PABRT_LEGACY | 48 | select CPU_PABRT_LEGACY |
49 | help | 49 | help |
@@ -397,6 +397,13 @@ config CPU_V7 | |||
397 | select CPU_PABRT_V7 | 397 | select CPU_PABRT_V7 |
398 | select CPU_TLB_V7 if MMU | 398 | select CPU_TLB_V7 if MMU |
399 | 399 | ||
400 | config CPU_THUMBONLY | ||
401 | bool | ||
402 | # There are no CPUs available with MMU that don't implement an ARM ISA: | ||
403 | depends on !MMU | ||
404 | help | ||
405 | Select this if your CPU doesn't support the 32 bit ARM instructions. | ||
406 | |||
400 | # Figure out what processor architecture version we should be using. | 407 | # Figure out what processor architecture version we should be using. |
401 | # This defines the compiler instruction set which depends on the machine type. | 408 | # This defines the compiler instruction set which depends on the machine type. |
402 | config CPU_32v3 | 409 | config CPU_32v3 |
@@ -469,9 +476,6 @@ config CPU_PABRT_V7 | |||
469 | bool | 476 | bool |
470 | 477 | ||
471 | # The cache model | 478 | # The cache model |
472 | config CPU_CACHE_V3 | ||
473 | bool | ||
474 | |||
475 | config CPU_CACHE_V4 | 479 | config CPU_CACHE_V4 |
476 | bool | 480 | bool |
477 | 481 | ||
@@ -608,7 +612,7 @@ config ARCH_DMA_ADDR_T_64BIT | |||
608 | bool | 612 | bool |
609 | 613 | ||
610 | config ARM_THUMB | 614 | config ARM_THUMB |
611 | bool "Support Thumb user binaries" | 615 | bool "Support Thumb user binaries" if !CPU_THUMBONLY |
612 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON | 616 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON |
613 | default y | 617 | default y |
614 | help | 618 | help |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 4e333fa2756f..9e51be96f635 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o | |||
33 | obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o | 33 | obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o |
34 | obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o | 34 | obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o |
35 | 35 | ||
36 | obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o | ||
37 | obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o | 36 | obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o |
38 | obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o | 37 | obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o |
39 | obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o | 38 | obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index db26e2e543f4..6f4585b89078 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -961,12 +961,14 @@ static int __init alignment_init(void) | |||
961 | return -ENOMEM; | 961 | return -ENOMEM; |
962 | #endif | 962 | #endif |
963 | 963 | ||
964 | #ifdef CONFIG_CPU_CP15 | ||
964 | if (cpu_is_v6_unaligned()) { | 965 | if (cpu_is_v6_unaligned()) { |
965 | cr_alignment &= ~CR_A; | 966 | cr_alignment &= ~CR_A; |
966 | cr_no_alignment &= ~CR_A; | 967 | cr_no_alignment &= ~CR_A; |
967 | set_cr(cr_alignment); | 968 | set_cr(cr_alignment); |
968 | ai_usermode = safe_usermode(ai_usermode, false); | 969 | ai_usermode = safe_usermode(ai_usermode, false); |
969 | } | 970 | } |
971 | #endif | ||
970 | 972 | ||
971 | hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, | 973 | hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, |
972 | "alignment exception"); | 974 | "alignment exception"); |
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index dd3d59122cc3..48bc3c0a87ce 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c | |||
@@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override) | |||
343 | outer_cache.inv_range = feroceon_l2_inv_range; | 343 | outer_cache.inv_range = feroceon_l2_inv_range; |
344 | outer_cache.clean_range = feroceon_l2_clean_range; | 344 | outer_cache.clean_range = feroceon_l2_clean_range; |
345 | outer_cache.flush_range = feroceon_l2_flush_range; | 345 | outer_cache.flush_range = feroceon_l2_flush_range; |
346 | outer_cache.inv_all = l2_inv_all; | ||
346 | 347 | ||
347 | enable_l2(); | 348 | enable_l2(); |
348 | 349 | ||
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c2f37390308a..c465faca51b0 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id) | |||
299 | int lockregs; | 299 | int lockregs; |
300 | int i; | 300 | int i; |
301 | 301 | ||
302 | switch (cache_id) { | 302 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { |
303 | case L2X0_CACHE_ID_PART_L310: | 303 | case L2X0_CACHE_ID_PART_L310: |
304 | lockregs = 8; | 304 | lockregs = 8; |
305 | break; | 305 | break; |
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |||
333 | if (cache_id_part_number_from_dt) | 333 | if (cache_id_part_number_from_dt) |
334 | cache_id = cache_id_part_number_from_dt; | 334 | cache_id = cache_id_part_number_from_dt; |
335 | else | 335 | else |
336 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) | 336 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); |
337 | & L2X0_CACHE_ID_PART_MASK; | ||
338 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | 337 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); |
339 | 338 | ||
340 | aux &= aux_mask; | 339 | aux &= aux_mask; |
341 | aux |= aux_val; | 340 | aux |= aux_val; |
342 | 341 | ||
343 | /* Determine the number of ways */ | 342 | /* Determine the number of ways */ |
344 | switch (cache_id) { | 343 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { |
345 | case L2X0_CACHE_ID_PART_L310: | 344 | case L2X0_CACHE_ID_PART_L310: |
346 | if (aux & (1 << 16)) | 345 | if (aux & (1 << 16)) |
347 | ways = 16; | 346 | ways = 16; |
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = { | |||
725 | .flush_all = l2x0_flush_all, | 724 | .flush_all = l2x0_flush_all, |
726 | .inv_all = l2x0_inv_all, | 725 | .inv_all = l2x0_inv_all, |
727 | .disable = l2x0_disable, | 726 | .disable = l2x0_disable, |
728 | .set_debug = pl310_set_debug, | ||
729 | }, | 727 | }, |
730 | }; | 728 | }; |
731 | 729 | ||
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) | |||
814 | data->save(); | 812 | data->save(); |
815 | 813 | ||
816 | of_init = true; | 814 | of_init = true; |
817 | l2x0_init(l2x0_base, aux_val, aux_mask); | ||
818 | |||
819 | memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); | 815 | memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); |
816 | l2x0_init(l2x0_base, aux_val, aux_mask); | ||
820 | 817 | ||
821 | return 0; | 818 | return 0; |
822 | } | 819 | } |
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S deleted file mode 100644 index 8a3fadece8d3..000000000000 --- a/arch/arm/mm/cache-v3.S +++ /dev/null | |||
@@ -1,137 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/cache-v3.S | ||
3 | * | ||
4 | * Copyright (C) 1997-2002 Russell king | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/linkage.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <asm/page.h> | ||
13 | #include "proc-macros.S" | ||
14 | |||
15 | /* | ||
16 | * flush_icache_all() | ||
17 | * | ||
18 | * Unconditionally clean and invalidate the entire icache. | ||
19 | */ | ||
20 | ENTRY(v3_flush_icache_all) | ||
21 | mov pc, lr | ||
22 | ENDPROC(v3_flush_icache_all) | ||
23 | |||
24 | /* | ||
25 | * flush_user_cache_all() | ||
26 | * | ||
27 | * Invalidate all cache entries in a particular address | ||
28 | * space. | ||
29 | * | ||
30 | * - mm - mm_struct describing address space | ||
31 | */ | ||
32 | ENTRY(v3_flush_user_cache_all) | ||
33 | /* FALLTHROUGH */ | ||
34 | /* | ||
35 | * flush_kern_cache_all() | ||
36 | * | ||
37 | * Clean and invalidate the entire cache. | ||
38 | */ | ||
39 | ENTRY(v3_flush_kern_cache_all) | ||
40 | /* FALLTHROUGH */ | ||
41 | |||
42 | /* | ||
43 | * flush_user_cache_range(start, end, flags) | ||
44 | * | ||
45 | * Invalidate a range of cache entries in the specified | ||
46 | * address space. | ||
47 | * | ||
48 | * - start - start address (may not be aligned) | ||
49 | * - end - end address (exclusive, may not be aligned) | ||
50 | * - flags - vma_area_struct flags describing address space | ||
51 | */ | ||
52 | ENTRY(v3_flush_user_cache_range) | ||
53 | mov ip, #0 | ||
54 | mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache | ||
55 | mov pc, lr | ||
56 | |||
57 | /* | ||
58 | * coherent_kern_range(start, end) | ||
59 | * | ||
60 | * Ensure coherency between the Icache and the Dcache in the | ||
61 | * region described by start. If you have non-snooping | ||
62 | * Harvard caches, you need to implement this function. | ||
63 | * | ||
64 | * - start - virtual start address | ||
65 | * - end - virtual end address | ||
66 | */ | ||
67 | ENTRY(v3_coherent_kern_range) | ||
68 | /* FALLTHROUGH */ | ||
69 | |||
70 | /* | ||
71 | * coherent_user_range(start, end) | ||
72 | * | ||
73 | * Ensure coherency between the Icache and the Dcache in the | ||
74 | * region described by start. If you have non-snooping | ||
75 | * Harvard caches, you need to implement this function. | ||
76 | * | ||
77 | * - start - virtual start address | ||
78 | * - end - virtual end address | ||
79 | */ | ||
80 | ENTRY(v3_coherent_user_range) | ||
81 | mov r0, #0 | ||
82 | mov pc, lr | ||
83 | |||
84 | /* | ||
85 | * flush_kern_dcache_area(void *page, size_t size) | ||
86 | * | ||
87 | * Ensure no D cache aliasing occurs, either with itself or | ||
88 | * the I cache | ||
89 | * | ||
90 | * - addr - kernel address | ||
91 | * - size - region size | ||
92 | */ | ||
93 | ENTRY(v3_flush_kern_dcache_area) | ||
94 | /* FALLTHROUGH */ | ||
95 | |||
96 | /* | ||
97 | * dma_flush_range(start, end) | ||
98 | * | ||
99 | * Clean and invalidate the specified virtual address range. | ||
100 | * | ||
101 | * - start - virtual start address | ||
102 | * - end - virtual end address | ||
103 | */ | ||
104 | ENTRY(v3_dma_flush_range) | ||
105 | mov r0, #0 | ||
106 | mcr p15, 0, r0, c7, c0, 0 @ flush ID cache | ||
107 | mov pc, lr | ||
108 | |||
109 | /* | ||
110 | * dma_unmap_area(start, size, dir) | ||
111 | * - start - kernel virtual start address | ||
112 | * - size - size of region | ||
113 | * - dir - DMA direction | ||
114 | */ | ||
115 | ENTRY(v3_dma_unmap_area) | ||
116 | teq r2, #DMA_TO_DEVICE | ||
117 | bne v3_dma_flush_range | ||
118 | /* FALLTHROUGH */ | ||
119 | |||
120 | /* | ||
121 | * dma_map_area(start, size, dir) | ||
122 | * - start - kernel virtual start address | ||
123 | * - size - size of region | ||
124 | * - dir - DMA direction | ||
125 | */ | ||
126 | ENTRY(v3_dma_map_area) | ||
127 | mov pc, lr | ||
128 | ENDPROC(v3_dma_unmap_area) | ||
129 | ENDPROC(v3_dma_map_area) | ||
130 | |||
131 | .globl v3_flush_kern_cache_louis | ||
132 | .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all | ||
133 | |||
134 | __INITDATA | ||
135 | |||
136 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | ||
137 | define_cache_functions v3 | ||
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 43e5d77be677..a7ba68f59f0c 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S | |||
@@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all) | |||
58 | ENTRY(v4_flush_user_cache_range) | 58 | ENTRY(v4_flush_user_cache_range) |
59 | #ifdef CONFIG_CPU_CP15 | 59 | #ifdef CONFIG_CPU_CP15 |
60 | mov ip, #0 | 60 | mov ip, #0 |
61 | mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache | 61 | mcr p15, 0, ip, c7, c7, 0 @ flush ID cache |
62 | mov pc, lr | 62 | mov pc, lr |
63 | #else | 63 | #else |
64 | /* FALLTHROUGH */ | 64 | /* FALLTHROUGH */ |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a5a4b2bc42ba..2ac37372ef52 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |||
48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | 48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | 49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
50 | 50 | ||
51 | static DEFINE_PER_CPU(atomic64_t, active_asids); | 51 | DEFINE_PER_CPU(atomic64_t, active_asids); |
52 | static DEFINE_PER_CPU(u64, reserved_asids); | 52 | static DEFINE_PER_CPU(u64, reserved_asids); |
53 | static cpumask_t tlb_flush_pending; | 53 | static cpumask_t tlb_flush_pending; |
54 | 54 | ||
@@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |||
215 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { | 215 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { |
216 | local_flush_bp_all(); | 216 | local_flush_bp_all(); |
217 | local_flush_tlb_all(); | 217 | local_flush_tlb_all(); |
218 | dummy_flush_tlb_a15_erratum(); | ||
218 | } | 219 | } |
219 | 220 | ||
220 | atomic64_set(&per_cpu(active_asids, cpu), asid); | 221 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b47dd48d8634..ef3e0f3aac96 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -342,6 +342,7 @@ static int __init atomic_pool_init(void) | |||
342 | { | 342 | { |
343 | struct dma_pool *pool = &atomic_pool; | 343 | struct dma_pool *pool = &atomic_pool; |
344 | pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); | 344 | pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); |
345 | gfp_t gfp = GFP_KERNEL | GFP_DMA; | ||
345 | unsigned long nr_pages = pool->size >> PAGE_SHIFT; | 346 | unsigned long nr_pages = pool->size >> PAGE_SHIFT; |
346 | unsigned long *bitmap; | 347 | unsigned long *bitmap; |
347 | struct page *page; | 348 | struct page *page; |
@@ -361,8 +362,8 @@ static int __init atomic_pool_init(void) | |||
361 | ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, | 362 | ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, |
362 | atomic_pool_init); | 363 | atomic_pool_init); |
363 | else | 364 | else |
364 | ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, | 365 | ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, |
365 | &page, atomic_pool_init); | 366 | atomic_pool_init); |
366 | if (ptr) { | 367 | if (ptr) { |
367 | int i; | 368 | int i; |
368 | 369 | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e95a996ab78f..e0d8565671a6 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <asm/mach/pci.h> | 34 | #include <asm/mach/pci.h> |
35 | 35 | ||
36 | #include "mm.h" | 36 | #include "mm.h" |
37 | #include "tcm.h" | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * empty_zero_page is a special page that is used for | 40 | * empty_zero_page is a special page that is used for |
@@ -112,6 +113,7 @@ static struct cachepolicy cache_policies[] __initdata = { | |||
112 | } | 113 | } |
113 | }; | 114 | }; |
114 | 115 | ||
116 | #ifdef CONFIG_CPU_CP15 | ||
115 | /* | 117 | /* |
116 | * These are useful for identifying cache coherency | 118 | * These are useful for identifying cache coherency |
117 | * problems by allowing the cache or the cache and | 119 | * problems by allowing the cache or the cache and |
@@ -210,6 +212,22 @@ void adjust_cr(unsigned long mask, unsigned long set) | |||
210 | } | 212 | } |
211 | #endif | 213 | #endif |
212 | 214 | ||
215 | #else /* ifdef CONFIG_CPU_CP15 */ | ||
216 | |||
217 | static int __init early_cachepolicy(char *p) | ||
218 | { | ||
219 | pr_warning("cachepolicy kernel parameter not supported without cp15\n"); | ||
220 | } | ||
221 | early_param("cachepolicy", early_cachepolicy); | ||
222 | |||
223 | static int __init noalign_setup(char *__unused) | ||
224 | { | ||
225 | pr_warning("noalign kernel parameter not supported without cp15\n"); | ||
226 | } | ||
227 | __setup("noalign", noalign_setup); | ||
228 | |||
229 | #endif /* ifdef CONFIG_CPU_CP15 / else */ | ||
230 | |||
213 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN | 231 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN |
214 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE | 232 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE |
215 | 233 | ||
@@ -598,39 +616,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
598 | } while (pte++, addr += PAGE_SIZE, addr != end); | 616 | } while (pte++, addr += PAGE_SIZE, addr != end); |
599 | } | 617 | } |
600 | 618 | ||
601 | static void __init alloc_init_section(pud_t *pud, unsigned long addr, | 619 | static void __init map_init_section(pmd_t *pmd, unsigned long addr, |
602 | unsigned long end, phys_addr_t phys, | 620 | unsigned long end, phys_addr_t phys, |
603 | const struct mem_type *type) | 621 | const struct mem_type *type) |
604 | { | 622 | { |
605 | pmd_t *pmd = pmd_offset(pud, addr); | 623 | #ifndef CONFIG_ARM_LPAE |
606 | |||
607 | /* | 624 | /* |
608 | * Try a section mapping - end, addr and phys must all be aligned | 625 | * In classic MMU format, puds and pmds are folded in to |
609 | * to a section boundary. Note that PMDs refer to the individual | 626 | * the pgds. pmd_offset gives the PGD entry. PGDs refer to a |
610 | * L1 entries, whereas PGDs refer to a group of L1 entries making | 627 | * group of L1 entries making up one logical pointer to |
611 | * up one logical pointer to an L2 table. | 628 | * an L2 table (2MB), where as PMDs refer to the individual |
629 | * L1 entries (1MB). Hence increment to get the correct | ||
630 | * offset for odd 1MB sections. | ||
631 | * (See arch/arm/include/asm/pgtable-2level.h) | ||
612 | */ | 632 | */ |
613 | if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { | 633 | if (addr & SECTION_SIZE) |
614 | pmd_t *p = pmd; | 634 | pmd++; |
615 | |||
616 | #ifndef CONFIG_ARM_LPAE | ||
617 | if (addr & SECTION_SIZE) | ||
618 | pmd++; | ||
619 | #endif | 635 | #endif |
636 | do { | ||
637 | *pmd = __pmd(phys | type->prot_sect); | ||
638 | phys += SECTION_SIZE; | ||
639 | } while (pmd++, addr += SECTION_SIZE, addr != end); | ||
620 | 640 | ||
621 | do { | 641 | flush_pmd_entry(pmd); |
622 | *pmd = __pmd(phys | type->prot_sect); | 642 | } |
623 | phys += SECTION_SIZE; | ||
624 | } while (pmd++, addr += SECTION_SIZE, addr != end); | ||
625 | 643 | ||
626 | flush_pmd_entry(p); | 644 | static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, |
627 | } else { | 645 | unsigned long end, phys_addr_t phys, |
646 | const struct mem_type *type) | ||
647 | { | ||
648 | pmd_t *pmd = pmd_offset(pud, addr); | ||
649 | unsigned long next; | ||
650 | |||
651 | do { | ||
628 | /* | 652 | /* |
629 | * No need to loop; pte's aren't interested in the | 653 | * With LPAE, we must loop over to map |
630 | * individual L1 entries. | 654 | * all the pmds for the given range. |
631 | */ | 655 | */ |
632 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); | 656 | next = pmd_addr_end(addr, end); |
633 | } | 657 | |
658 | /* | ||
659 | * Try a section mapping - addr, next and phys must all be | ||
660 | * aligned to a section boundary. | ||
661 | */ | ||
662 | if (type->prot_sect && | ||
663 | ((addr | next | phys) & ~SECTION_MASK) == 0) { | ||
664 | map_init_section(pmd, addr, next, phys, type); | ||
665 | } else { | ||
666 | alloc_init_pte(pmd, addr, next, | ||
667 | __phys_to_pfn(phys), type); | ||
668 | } | ||
669 | |||
670 | phys += next - addr; | ||
671 | |||
672 | } while (pmd++, addr = next, addr != end); | ||
634 | } | 673 | } |
635 | 674 | ||
636 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | 675 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, |
@@ -641,7 +680,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | |||
641 | 680 | ||
642 | do { | 681 | do { |
643 | next = pud_addr_end(addr, end); | 682 | next = pud_addr_end(addr, end); |
644 | alloc_init_section(pud, addr, next, phys, type); | 683 | alloc_init_pmd(pud, addr, next, phys, type); |
645 | phys += next - addr; | 684 | phys += next - addr; |
646 | } while (pud++, addr = next, addr != end); | 685 | } while (pud++, addr = next, addr != end); |
647 | } | 686 | } |
@@ -1256,6 +1295,7 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1256 | dma_contiguous_remap(); | 1295 | dma_contiguous_remap(); |
1257 | devicemaps_init(mdesc); | 1296 | devicemaps_init(mdesc); |
1258 | kmap_init(); | 1297 | kmap_init(); |
1298 | tcm_init(); | ||
1259 | 1299 | ||
1260 | top_pmd = pmd_off_k(0xffff0000); | 1300 | top_pmd = pmd_off_k(0xffff0000); |
1261 | 1301 | ||
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index dc5de5d53f20..fde2d2a794cf 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S | |||
@@ -77,24 +77,27 @@ __arm740_setup: | |||
77 | mcr p15, 0, r0, c6, c0 @ set area 0, default | 77 | mcr p15, 0, r0, c6, c0 @ set area 0, default |
78 | 78 | ||
79 | ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM | 79 | ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM |
80 | ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) | 80 | ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) |
81 | mov r2, #10 @ 11 is the minimum (4KB) | 81 | mov r4, #10 @ 11 is the minimum (4KB) |
82 | 1: add r2, r2, #1 @ area size *= 2 | 82 | 1: add r4, r4, #1 @ area size *= 2 |
83 | mov r1, r1, lsr #1 | 83 | movs r3, r3, lsr #1 |
84 | bne 1b @ count not zero r-shift | 84 | bne 1b @ count not zero r-shift |
85 | orr r0, r0, r2, lsl #1 @ the area register value | 85 | orr r0, r0, r4, lsl #1 @ the area register value |
86 | orr r0, r0, #1 @ set enable bit | 86 | orr r0, r0, #1 @ set enable bit |
87 | mcr p15, 0, r0, c6, c1 @ set area 1, RAM | 87 | mcr p15, 0, r0, c6, c1 @ set area 1, RAM |
88 | 88 | ||
89 | ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH | 89 | ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH |
90 | ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) | 90 | ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) |
91 | mov r2, #10 @ 11 is the minimum (4KB) | 91 | cmp r3, #0 |
92 | 1: add r2, r2, #1 @ area size *= 2 | 92 | moveq r0, #0 |
93 | mov r1, r1, lsr #1 | 93 | beq 2f |
94 | mov r4, #10 @ 11 is the minimum (4KB) | ||
95 | 1: add r4, r4, #1 @ area size *= 2 | ||
96 | movs r3, r3, lsr #1 | ||
94 | bne 1b @ count not zero r-shift | 97 | bne 1b @ count not zero r-shift |
95 | orr r0, r0, r2, lsl #1 @ the area register value | 98 | orr r0, r0, r4, lsl #1 @ the area register value |
96 | orr r0, r0, #1 @ set enable bit | 99 | orr r0, r0, #1 @ set enable bit |
97 | mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH | 100 | 2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH |
98 | 101 | ||
99 | mov r0, #0x06 | 102 | mov r0, #0x06 |
100 | mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable | 103 | mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable |
@@ -137,13 +140,14 @@ __arm740_proc_info: | |||
137 | .long 0x41807400 | 140 | .long 0x41807400 |
138 | .long 0xfffffff0 | 141 | .long 0xfffffff0 |
139 | .long 0 | 142 | .long 0 |
143 | .long 0 | ||
140 | b __arm740_setup | 144 | b __arm740_setup |
141 | .long cpu_arch_name | 145 | .long cpu_arch_name |
142 | .long cpu_elf_name | 146 | .long cpu_elf_name |
143 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | 147 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT |
144 | .long cpu_arm740_name | 148 | .long cpu_arm740_name |
145 | .long arm740_processor_functions | 149 | .long arm740_processor_functions |
146 | .long 0 | 150 | .long 0 |
147 | .long 0 | 151 | .long 0 |
148 | .long v3_cache_fns @ cache model | 152 | .long v4_cache_fns @ cache model |
149 | .size __arm740_proc_info, . - __arm740_proc_info | 153 | .size __arm740_proc_info, . - __arm740_proc_info |
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2c3b9421ab5e..2556cf1c2da1 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S | |||
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext) | |||
387 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ | 387 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ |
388 | .globl cpu_arm920_suspend_size | 388 | .globl cpu_arm920_suspend_size |
389 | .equ cpu_arm920_suspend_size, 4 * 3 | 389 | .equ cpu_arm920_suspend_size, 4 * 3 |
390 | #ifdef CONFIG_PM_SLEEP | 390 | #ifdef CONFIG_ARM_CPU_SUSPEND |
391 | ENTRY(cpu_arm920_do_suspend) | 391 | ENTRY(cpu_arm920_do_suspend) |
392 | stmfd sp!, {r4 - r6, lr} | 392 | stmfd sp!, {r4 - r6, lr} |
393 | mrc p15, 0, r4, c13, c0, 0 @ PID | 393 | mrc p15, 0, r4, c13, c0, 0 @ PID |
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index f1803f7e2972..344c8a548cc0 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S | |||
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext) | |||
402 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ | 402 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ |
403 | .globl cpu_arm926_suspend_size | 403 | .globl cpu_arm926_suspend_size |
404 | .equ cpu_arm926_suspend_size, 4 * 3 | 404 | .equ cpu_arm926_suspend_size, 4 * 3 |
405 | #ifdef CONFIG_PM_SLEEP | 405 | #ifdef CONFIG_ARM_CPU_SUSPEND |
406 | ENTRY(cpu_arm926_do_suspend) | 406 | ENTRY(cpu_arm926_do_suspend) |
407 | stmfd sp!, {r4 - r6, lr} | 407 | stmfd sp!, {r4 - r6, lr} |
408 | mrc p15, 0, r4, c13, c0, 0 @ PID | 408 | mrc p15, 0, r4, c13, c0, 0 @ PID |
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 82f9cdc751d6..0b60dd3d742a 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S | |||
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext) | |||
350 | 350 | ||
351 | .globl cpu_mohawk_suspend_size | 351 | .globl cpu_mohawk_suspend_size |
352 | .equ cpu_mohawk_suspend_size, 4 * 6 | 352 | .equ cpu_mohawk_suspend_size, 4 * 6 |
353 | #ifdef CONFIG_PM_SLEEP | 353 | #ifdef CONFIG_ARM_CPU_SUSPEND |
354 | ENTRY(cpu_mohawk_do_suspend) | 354 | ENTRY(cpu_mohawk_do_suspend) |
355 | stmfd sp!, {r4 - r9, lr} | 355 | stmfd sp!, {r4 - r9, lr} |
356 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | 356 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 3aa0da11fd84..d92dfd081429 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S | |||
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext) | |||
172 | 172 | ||
173 | .globl cpu_sa1100_suspend_size | 173 | .globl cpu_sa1100_suspend_size |
174 | .equ cpu_sa1100_suspend_size, 4 * 3 | 174 | .equ cpu_sa1100_suspend_size, 4 * 3 |
175 | #ifdef CONFIG_PM_SLEEP | 175 | #ifdef CONFIG_ARM_CPU_SUSPEND |
176 | ENTRY(cpu_sa1100_do_suspend) | 176 | ENTRY(cpu_sa1100_do_suspend) |
177 | stmfd sp!, {r4 - r6, lr} | 177 | stmfd sp!, {r4 - r6, lr} |
178 | mrc p15, 0, r4, c3, c0, 0 @ domain ID | 178 | mrc p15, 0, r4, c3, c0, 0 @ domain ID |
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 3e6210b4d6d4..054b491ff764 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c | |||
@@ -17,7 +17,9 @@ | |||
17 | 17 | ||
18 | #ifndef MULTI_CPU | 18 | #ifndef MULTI_CPU |
19 | EXPORT_SYMBOL(cpu_dcache_clean_area); | 19 | EXPORT_SYMBOL(cpu_dcache_clean_area); |
20 | #ifdef CONFIG_MMU | ||
20 | EXPORT_SYMBOL(cpu_set_pte_ext); | 21 | EXPORT_SYMBOL(cpu_set_pte_ext); |
22 | #endif | ||
21 | #else | 23 | #else |
22 | EXPORT_SYMBOL(processor); | 24 | EXPORT_SYMBOL(processor); |
23 | #endif | 25 | #endif |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index a286d4712b57..919405e20b80 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -136,7 +136,7 @@ ENTRY(cpu_v6_set_pte_ext) | |||
136 | /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ | 136 | /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ |
137 | .globl cpu_v6_suspend_size | 137 | .globl cpu_v6_suspend_size |
138 | .equ cpu_v6_suspend_size, 4 * 6 | 138 | .equ cpu_v6_suspend_size, 4 * 6 |
139 | #ifdef CONFIG_PM_SLEEP | 139 | #ifdef CONFIG_ARM_CPU_SUSPEND |
140 | ENTRY(cpu_v6_do_suspend) | 140 | ENTRY(cpu_v6_do_suspend) |
141 | stmfd sp!, {r4 - r9, lr} | 141 | stmfd sp!, {r4 - r9, lr} |
142 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 142 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 4fa28acaf7f9..2c73a7301ff7 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -421,7 +421,7 @@ __v7_pj4b_proc_info: | |||
421 | __v7_ca7mp_proc_info: | 421 | __v7_ca7mp_proc_info: |
422 | .long 0x410fc070 | 422 | .long 0x410fc070 |
423 | .long 0xff0ffff0 | 423 | .long 0xff0ffff0 |
424 | __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV | 424 | __v7_proc __v7_ca7mp_setup |
425 | .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info | 425 | .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info |
426 | 426 | ||
427 | /* | 427 | /* |
@@ -431,10 +431,25 @@ __v7_ca7mp_proc_info: | |||
431 | __v7_ca15mp_proc_info: | 431 | __v7_ca15mp_proc_info: |
432 | .long 0x410fc0f0 | 432 | .long 0x410fc0f0 |
433 | .long 0xff0ffff0 | 433 | .long 0xff0ffff0 |
434 | __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV | 434 | __v7_proc __v7_ca15mp_setup |
435 | .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info | 435 | .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info |
436 | 436 | ||
437 | /* | 437 | /* |
438 | * Qualcomm Inc. Krait processors. | ||
439 | */ | ||
440 | .type __krait_proc_info, #object | ||
441 | __krait_proc_info: | ||
442 | .long 0x510f0400 @ Required ID value | ||
443 | .long 0xff0ffc00 @ Mask for ID | ||
444 | /* | ||
445 | * Some Krait processors don't indicate support for SDIV and UDIV | ||
446 | * instructions in the ARM instruction set, even though they actually | ||
447 | * do support them. | ||
448 | */ | ||
449 | __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | ||
450 | .size __krait_proc_info, . - __krait_proc_info | ||
451 | |||
452 | /* | ||
438 | * Match any ARMv7 processor core. | 453 | * Match any ARMv7 processor core. |
439 | */ | 454 | */ |
440 | .type __v7_proc_info, #object | 455 | .type __v7_proc_info, #object |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index eb93d6487f35..e8efd83b6f25 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext) | |||
413 | 413 | ||
414 | .globl cpu_xsc3_suspend_size | 414 | .globl cpu_xsc3_suspend_size |
415 | .equ cpu_xsc3_suspend_size, 4 * 6 | 415 | .equ cpu_xsc3_suspend_size, 4 * 6 |
416 | #ifdef CONFIG_PM_SLEEP | 416 | #ifdef CONFIG_ARM_CPU_SUSPEND |
417 | ENTRY(cpu_xsc3_do_suspend) | 417 | ENTRY(cpu_xsc3_do_suspend) |
418 | stmfd sp!, {r4 - r9, lr} | 418 | stmfd sp!, {r4 - r9, lr} |
419 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | 419 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 25510361aa18..e766f889bfd6 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext) | |||
528 | 528 | ||
529 | .globl cpu_xscale_suspend_size | 529 | .globl cpu_xscale_suspend_size |
530 | .equ cpu_xscale_suspend_size, 4 * 6 | 530 | .equ cpu_xscale_suspend_size, 4 * 6 |
531 | #ifdef CONFIG_PM_SLEEP | 531 | #ifdef CONFIG_ARM_CPU_SUSPEND |
532 | ENTRY(cpu_xscale_do_suspend) | 532 | ENTRY(cpu_xscale_do_suspend) |
533 | stmfd sp!, {r4 - r9, lr} | 533 | stmfd sp!, {r4 - r9, lr} |
534 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | 534 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |
diff --git a/arch/arm/mm/tcm.h b/arch/arm/mm/tcm.h new file mode 100644 index 000000000000..8015ad434a40 --- /dev/null +++ b/arch/arm/mm/tcm.h | |||
@@ -0,0 +1,17 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2009 ST-Ericsson AB | ||
3 | * License terms: GNU General Public License (GPL) version 2 | ||
4 | * TCM memory handling for ARM systems | ||
5 | * | ||
6 | * Author: Linus Walleij <linus.walleij@stericsson.com> | ||
7 | * Author: Rickard Andersson <rickard.andersson@stericsson.com> | ||
8 | */ | ||
9 | |||
10 | #ifdef CONFIG_HAVE_TCM | ||
11 | void __init tcm_init(void); | ||
12 | #else | ||
13 | /* No TCM support, just blank inlines to be optimized out */ | ||
14 | inline void tcm_init(void) | ||
15 | { | ||
16 | } | ||
17 | #endif | ||