diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-05 18:57:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-05 18:57:04 -0400 |
commit | eb3d3ec567e868c8a3bfbfdfc9465ffd52983d11 (patch) | |
tree | 75acf38b8d73cd281e5ce4dcc941faf48e244b98 /arch | |
parent | c3c55a07203947f72afa50a3218460b27307c47d (diff) | |
parent | bd63ce27d9d62bc40a962b991cbbbe4f0dc913d2 (diff) |
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm into next
Pull ARM updates from Russell King:
- Major clean-up of the L2 cache support code. The existing mess was
becoming rather unmaintainable through all the additions that others
have done over time. This turns it into a much nicer structure, and
implements a few performance improvements as well.
- Clean up some of the CP15 control register tweaks for alignment
support, moving some code and data into alignment.c
- DMA properties for ARM, from Santosh and reviewed by DT people. This
adds DT properties to specify bus translations we can't discover
automatically, and to indicate whether devices are coherent.
- Hibernation support for ARM
- Make ftrace work with read-only text in modules
- add suspend support for PJ4B CPUs
- rework interrupt masking for undefined instruction handling, which
allows us to enable interrupts earlier in the handling of these
exceptions.
- support for big endian page tables
- fix stacktrace support to exclude stacktrace functions from the
trace, and add save_stack_trace_regs() implementation so that kprobes
can record stack traces.
- Add support for the Cortex-A17 CPU.
- Remove last vestiges of ARM710 support.
- Removal of ARM "meminfo" structure, finally converting us solely to
memblock to handle the early memory initialisation.
* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (142 commits)
ARM: ensure C page table setup code follows assembly code (part II)
ARM: ensure C page table setup code follows assembly code
ARM: consolidate last remaining open-coded alignment trap enable
ARM: remove global cr_no_alignment
ARM: remove CPU_CP15 conditional from alignment.c
ARM: remove unused adjust_cr() function
ARM: move "noalign" command line option to alignment.c
ARM: provide common method to clear bits in CPU control register
ARM: 8025/1: Get rid of meminfo
ARM: 8060/1: mm: allow sub-architectures to override PCI I/O memory type
ARM: 8066/1: correction for ARM patch 8031/2
ARM: 8049/1: ftrace/add save_stack_trace_regs() implementation
ARM: 8065/1: remove last use of CONFIG_CPU_ARM710
ARM: 8062/1: Modify ldrt fixup handler to re-execute the userspace instruction
ARM: 8047/1: rwsem: use asm-generic rwsem implementation
ARM: l2c: trial at enabling some Cortex-A9 optimisations
ARM: l2c: add warnings for stuff modifying aux_ctrl register values
ARM: l2c: print a warning with L2C-310 caches if the cache size is modified
ARM: l2c: remove old .set_debug method
ARM: l2c: kill L2X0_AUX_CTRL_MASK before anyone else makes use of this
...
Diffstat (limited to 'arch')
131 files changed, 2192 insertions, 1536 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ad89a033f17f..87b63fde06d7 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -165,12 +165,9 @@ config TRACE_IRQFLAGS_SUPPORT | |||
165 | bool | 165 | bool |
166 | default y | 166 | default y |
167 | 167 | ||
168 | config RWSEM_GENERIC_SPINLOCK | ||
169 | bool | ||
170 | default y | ||
171 | |||
172 | config RWSEM_XCHGADD_ALGORITHM | 168 | config RWSEM_XCHGADD_ALGORITHM |
173 | bool | 169 | bool |
170 | default y | ||
174 | 171 | ||
175 | config ARCH_HAS_ILOG2_U32 | 172 | config ARCH_HAS_ILOG2_U32 |
176 | bool | 173 | bool |
@@ -1089,11 +1086,6 @@ source "arch/arm/firmware/Kconfig" | |||
1089 | 1086 | ||
1090 | source arch/arm/mm/Kconfig | 1087 | source arch/arm/mm/Kconfig |
1091 | 1088 | ||
1092 | config ARM_NR_BANKS | ||
1093 | int | ||
1094 | default 16 if ARCH_EP93XX | ||
1095 | default 8 | ||
1096 | |||
1097 | config IWMMXT | 1089 | config IWMMXT |
1098 | bool "Enable iWMMXt support" | 1090 | bool "Enable iWMMXt support" |
1099 | depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B | 1091 | depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B |
@@ -1214,19 +1206,6 @@ config ARM_ERRATA_742231 | |||
1214 | register of the Cortex-A9 which reduces the linefill issuing | 1206 | register of the Cortex-A9 which reduces the linefill issuing |
1215 | capabilities of the processor. | 1207 | capabilities of the processor. |
1216 | 1208 | ||
1217 | config PL310_ERRATA_588369 | ||
1218 | bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines" | ||
1219 | depends on CACHE_L2X0 | ||
1220 | help | ||
1221 | The PL310 L2 cache controller implements three types of Clean & | ||
1222 | Invalidate maintenance operations: by Physical Address | ||
1223 | (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC). | ||
1224 | They are architecturally defined to behave as the execution of a | ||
1225 | clean operation followed immediately by an invalidate operation, | ||
1226 | both performing to the same memory location. This functionality | ||
1227 | is not correctly implemented in PL310 as clean lines are not | ||
1228 | invalidated as a result of these operations. | ||
1229 | |||
1230 | config ARM_ERRATA_643719 | 1209 | config ARM_ERRATA_643719 |
1231 | bool "ARM errata: LoUIS bit field in CLIDR register is incorrect" | 1210 | bool "ARM errata: LoUIS bit field in CLIDR register is incorrect" |
1232 | depends on CPU_V7 && SMP | 1211 | depends on CPU_V7 && SMP |
@@ -1249,17 +1228,6 @@ config ARM_ERRATA_720789 | |||
1249 | tables. The workaround changes the TLB flushing routines to invalidate | 1228 | tables. The workaround changes the TLB flushing routines to invalidate |
1250 | entries regardless of the ASID. | 1229 | entries regardless of the ASID. |
1251 | 1230 | ||
1252 | config PL310_ERRATA_727915 | ||
1253 | bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption" | ||
1254 | depends on CACHE_L2X0 | ||
1255 | help | ||
1256 | PL310 implements the Clean & Invalidate by Way L2 cache maintenance | ||
1257 | operation (offset 0x7FC). This operation runs in background so that | ||
1258 | PL310 can handle normal accesses while it is in progress. Under very | ||
1259 | rare circumstances, due to this erratum, write data can be lost when | ||
1260 | PL310 treats a cacheable write transaction during a Clean & | ||
1261 | Invalidate by Way operation. | ||
1262 | |||
1263 | config ARM_ERRATA_743622 | 1231 | config ARM_ERRATA_743622 |
1264 | bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption" | 1232 | bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption" |
1265 | depends on CPU_V7 | 1233 | depends on CPU_V7 |
@@ -1285,21 +1253,6 @@ config ARM_ERRATA_751472 | |||
1285 | operation is received by a CPU before the ICIALLUIS has completed, | 1253 | operation is received by a CPU before the ICIALLUIS has completed, |
1286 | potentially leading to corrupted entries in the cache or TLB. | 1254 | potentially leading to corrupted entries in the cache or TLB. |
1287 | 1255 | ||
1288 | config PL310_ERRATA_753970 | ||
1289 | bool "PL310 errata: cache sync operation may be faulty" | ||
1290 | depends on CACHE_PL310 | ||
1291 | help | ||
1292 | This option enables the workaround for the 753970 PL310 (r3p0) erratum. | ||
1293 | |||
1294 | Under some condition the effect of cache sync operation on | ||
1295 | the store buffer still remains when the operation completes. | ||
1296 | This means that the store buffer is always asked to drain and | ||
1297 | this prevents it from merging any further writes. The workaround | ||
1298 | is to replace the normal offset of cache sync operation (0x730) | ||
1299 | by another offset targeting an unmapped PL310 register 0x740. | ||
1300 | This has the same effect as the cache sync operation: store buffer | ||
1301 | drain and waiting for all buffers empty. | ||
1302 | |||
1303 | config ARM_ERRATA_754322 | 1256 | config ARM_ERRATA_754322 |
1304 | bool "ARM errata: possible faulty MMU translations following an ASID switch" | 1257 | bool "ARM errata: possible faulty MMU translations following an ASID switch" |
1305 | depends on CPU_V7 | 1258 | depends on CPU_V7 |
@@ -1348,18 +1301,6 @@ config ARM_ERRATA_764369 | |||
1348 | relevant cache maintenance functions and sets a specific bit | 1301 | relevant cache maintenance functions and sets a specific bit |
1349 | in the diagnostic control register of the SCU. | 1302 | in the diagnostic control register of the SCU. |
1350 | 1303 | ||
1351 | config PL310_ERRATA_769419 | ||
1352 | bool "PL310 errata: no automatic Store Buffer drain" | ||
1353 | depends on CACHE_L2X0 | ||
1354 | help | ||
1355 | On revisions of the PL310 prior to r3p2, the Store Buffer does | ||
1356 | not automatically drain. This can cause normal, non-cacheable | ||
1357 | writes to be retained when the memory system is idle, leading | ||
1358 | to suboptimal I/O performance for drivers using coherent DMA. | ||
1359 | This option adds a write barrier to the cpu_idle loop so that, | ||
1360 | on systems with an outer cache, the store buffer is drained | ||
1361 | explicitly. | ||
1362 | |||
1363 | config ARM_ERRATA_775420 | 1304 | config ARM_ERRATA_775420 |
1364 | bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock" | 1305 | bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock" |
1365 | depends on CPU_V7 | 1306 | depends on CPU_V7 |
@@ -2279,6 +2220,11 @@ config ARCH_SUSPEND_POSSIBLE | |||
2279 | config ARM_CPU_SUSPEND | 2220 | config ARM_CPU_SUSPEND |
2280 | def_bool PM_SLEEP | 2221 | def_bool PM_SLEEP |
2281 | 2222 | ||
2223 | config ARCH_HIBERNATION_POSSIBLE | ||
2224 | bool | ||
2225 | depends on MMU | ||
2226 | default y if ARCH_SUSPEND_POSSIBLE | ||
2227 | |||
2282 | endmenu | 2228 | endmenu |
2283 | 2229 | ||
2284 | source "net/Kconfig" | 2230 | source "net/Kconfig" |
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c index d1153c8a765a..9448aa0c6686 100644 --- a/arch/arm/boot/compressed/atags_to_fdt.c +++ b/arch/arm/boot/compressed/atags_to_fdt.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #define do_extend_cmdline 0 | 7 | #define do_extend_cmdline 0 |
8 | #endif | 8 | #endif |
9 | 9 | ||
10 | #define NR_BANKS 16 | ||
11 | |||
10 | static int node_offset(void *fdt, const char *node_path) | 12 | static int node_offset(void *fdt, const char *node_path) |
11 | { | 13 | { |
12 | int offset = fdt_path_offset(fdt, node_path); | 14 | int offset = fdt_path_offset(fdt, node_path); |
diff --git a/arch/arm/boot/dts/marco.dtsi b/arch/arm/boot/dts/marco.dtsi index 0c9647d28765..fb354225740a 100644 --- a/arch/arm/boot/dts/marco.dtsi +++ b/arch/arm/boot/dts/marco.dtsi | |||
@@ -36,7 +36,7 @@ | |||
36 | ranges = <0x40000000 0x40000000 0xa0000000>; | 36 | ranges = <0x40000000 0x40000000 0xa0000000>; |
37 | 37 | ||
38 | l2-cache-controller@c0030000 { | 38 | l2-cache-controller@c0030000 { |
39 | compatible = "sirf,marco-pl310-cache", "arm,pl310-cache"; | 39 | compatible = "arm,pl310-cache"; |
40 | reg = <0xc0030000 0x1000>; | 40 | reg = <0xc0030000 0x1000>; |
41 | interrupts = <0 59 0>; | 41 | interrupts = <0 59 0>; |
42 | arm,tag-latency = <1 1 1>; | 42 | arm,tag-latency = <1 1 1>; |
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi index 3df7ba860282..963b7e54ab15 100644 --- a/arch/arm/boot/dts/prima2.dtsi +++ b/arch/arm/boot/dts/prima2.dtsi | |||
@@ -48,7 +48,7 @@ | |||
48 | ranges = <0x40000000 0x40000000 0x80000000>; | 48 | ranges = <0x40000000 0x40000000 0x80000000>; |
49 | 49 | ||
50 | l2-cache-controller@80040000 { | 50 | l2-cache-controller@80040000 { |
51 | compatible = "arm,pl310-cache", "sirf,prima2-pl310-cache"; | 51 | compatible = "arm,pl310-cache"; |
52 | reg = <0x80040000 0x1000>; | 52 | reg = <0x80040000 0x1000>; |
53 | interrupts = <59>; | 53 | interrupts = <59>; |
54 | arm,tag-latency = <1 1 1>; | 54 | arm,tag-latency = <1 1 1>; |
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 86fd60fefbc9..f91136ab447e 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c | |||
@@ -106,14 +106,14 @@ void mcpm_cpu_power_down(void) | |||
106 | BUG(); | 106 | BUG(); |
107 | } | 107 | } |
108 | 108 | ||
109 | int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster) | 109 | int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) |
110 | { | 110 | { |
111 | int ret; | 111 | int ret; |
112 | 112 | ||
113 | if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish)) | 113 | if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown)) |
114 | return -EUNATCH; | 114 | return -EUNATCH; |
115 | 115 | ||
116 | ret = platform_ops->power_down_finish(cpu, cluster); | 116 | ret = platform_ops->wait_for_powerdown(cpu, cluster); |
117 | if (ret) | 117 | if (ret) |
118 | pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", | 118 | pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", |
119 | __func__, cpu, cluster, ret); | 119 | __func__, cpu, cluster, ret); |
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c index 177251a4dd9a..92e54d7c6f46 100644 --- a/arch/arm/common/mcpm_platsmp.c +++ b/arch/arm/common/mcpm_platsmp.c | |||
@@ -62,7 +62,7 @@ static int mcpm_cpu_kill(unsigned int cpu) | |||
62 | 62 | ||
63 | cpu_to_pcpu(cpu, &pcpu, &pcluster); | 63 | cpu_to_pcpu(cpu, &pcpu, &pcluster); |
64 | 64 | ||
65 | return !mcpm_cpu_power_down_finish(pcpu, pcluster); | 65 | return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster); |
66 | } | 66 | } |
67 | 67 | ||
68 | static int mcpm_cpu_disable(unsigned int cpu) | 68 | static int mcpm_cpu_disable(unsigned int cpu) |
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 23e728ecf8ab..f5a357601983 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild | |||
@@ -21,6 +21,7 @@ generic-y += parport.h | |||
21 | generic-y += poll.h | 21 | generic-y += poll.h |
22 | generic-y += preempt.h | 22 | generic-y += preempt.h |
23 | generic-y += resource.h | 23 | generic-y += resource.h |
24 | generic-y += rwsem.h | ||
24 | generic-y += sections.h | 25 | generic-y += sections.h |
25 | generic-y += segment.h | 26 | generic-y += segment.h |
26 | generic-y += sembuf.h | 27 | generic-y += sembuf.h |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index b974184f9941..57f0584e8d97 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -312,7 +312,7 @@ | |||
312 | * you cannot return to the original mode. | 312 | * you cannot return to the original mode. |
313 | */ | 313 | */ |
314 | .macro safe_svcmode_maskall reg:req | 314 | .macro safe_svcmode_maskall reg:req |
315 | #if __LINUX_ARM_ARCH__ >= 6 | 315 | #if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M) |
316 | mrs \reg , cpsr | 316 | mrs \reg , cpsr |
317 | eor \reg, \reg, #HYP_MODE | 317 | eor \reg, \reg, #HYP_MODE |
318 | tst \reg, #MODE_MASK | 318 | tst \reg, #MODE_MASK |
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 8b8b61685a34..fd43f7f55b70 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -212,7 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, | |||
212 | static inline void __flush_icache_all(void) | 212 | static inline void __flush_icache_all(void) |
213 | { | 213 | { |
214 | __flush_icache_preferred(); | 214 | __flush_icache_preferred(); |
215 | dsb(); | 215 | dsb(ishst); |
216 | } | 216 | } |
217 | 217 | ||
218 | /* | 218 | /* |
@@ -487,4 +487,6 @@ int set_memory_rw(unsigned long addr, int numpages); | |||
487 | int set_memory_x(unsigned long addr, int numpages); | 487 | int set_memory_x(unsigned long addr, int numpages); |
488 | int set_memory_nx(unsigned long addr, int numpages); | 488 | int set_memory_nx(unsigned long addr, int numpages); |
489 | 489 | ||
490 | void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, | ||
491 | void *kaddr, unsigned long len); | ||
490 | #endif | 492 | #endif |
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 6493802f880a..c3f11524f10c 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h | |||
@@ -42,24 +42,23 @@ | |||
42 | #ifndef __ASSEMBLY__ | 42 | #ifndef __ASSEMBLY__ |
43 | 43 | ||
44 | #if __LINUX_ARM_ARCH__ >= 4 | 44 | #if __LINUX_ARM_ARCH__ >= 4 |
45 | #define vectors_high() (cr_alignment & CR_V) | 45 | #define vectors_high() (get_cr() & CR_V) |
46 | #else | 46 | #else |
47 | #define vectors_high() (0) | 47 | #define vectors_high() (0) |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #ifdef CONFIG_CPU_CP15 | 50 | #ifdef CONFIG_CPU_CP15 |
51 | 51 | ||
52 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ | ||
53 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | 52 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ |
54 | 53 | ||
55 | static inline unsigned int get_cr(void) | 54 | static inline unsigned long get_cr(void) |
56 | { | 55 | { |
57 | unsigned int val; | 56 | unsigned long val; |
58 | asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); | 57 | asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); |
59 | return val; | 58 | return val; |
60 | } | 59 | } |
61 | 60 | ||
62 | static inline void set_cr(unsigned int val) | 61 | static inline void set_cr(unsigned long val) |
63 | { | 62 | { |
64 | asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" | 63 | asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" |
65 | : : "r" (val) : "cc"); | 64 | : : "r" (val) : "cc"); |
@@ -80,10 +79,6 @@ static inline void set_auxcr(unsigned int val) | |||
80 | isb(); | 79 | isb(); |
81 | } | 80 | } |
82 | 81 | ||
83 | #ifndef CONFIG_SMP | ||
84 | extern void adjust_cr(unsigned long mask, unsigned long set); | ||
85 | #endif | ||
86 | |||
87 | #define CPACC_FULL(n) (3 << (n * 2)) | 82 | #define CPACC_FULL(n) (3 << (n * 2)) |
88 | #define CPACC_SVC(n) (1 << (n * 2)) | 83 | #define CPACC_SVC(n) (1 << (n * 2)) |
89 | #define CPACC_DISABLE(n) (0 << (n * 2)) | 84 | #define CPACC_DISABLE(n) (0 << (n * 2)) |
@@ -106,13 +101,17 @@ static inline void set_copro_access(unsigned int val) | |||
106 | #else /* ifdef CONFIG_CPU_CP15 */ | 101 | #else /* ifdef CONFIG_CPU_CP15 */ |
107 | 102 | ||
108 | /* | 103 | /* |
109 | * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the | 104 | * cr_alignment is tightly coupled to cp15 (at least in the minds of the |
110 | * minds of the developers). Yielding 0 for machines without a cp15 (and making | 105 | * developers). Yielding 0 for machines without a cp15 (and making it |
111 | * it read-only) is fine for most cases and saves quite some #ifdeffery. | 106 | * read-only) is fine for most cases and saves quite some #ifdeffery. |
112 | */ | 107 | */ |
113 | #define cr_no_alignment UL(0) | ||
114 | #define cr_alignment UL(0) | 108 | #define cr_alignment UL(0) |
115 | 109 | ||
110 | static inline unsigned long get_cr(void) | ||
111 | { | ||
112 | return 0; | ||
113 | } | ||
114 | |||
116 | #endif /* ifdef CONFIG_CPU_CP15 / else */ | 115 | #endif /* ifdef CONFIG_CPU_CP15 / else */ |
117 | 116 | ||
118 | #endif /* ifndef __ASSEMBLY__ */ | 117 | #endif /* ifndef __ASSEMBLY__ */ |
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 4764344367d4..8c2b7321a478 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
@@ -72,6 +72,7 @@ | |||
72 | #define ARM_CPU_PART_CORTEX_A15 0xC0F0 | 72 | #define ARM_CPU_PART_CORTEX_A15 0xC0F0 |
73 | #define ARM_CPU_PART_CORTEX_A7 0xC070 | 73 | #define ARM_CPU_PART_CORTEX_A7 0xC070 |
74 | #define ARM_CPU_PART_CORTEX_A12 0xC0D0 | 74 | #define ARM_CPU_PART_CORTEX_A12 0xC0D0 |
75 | #define ARM_CPU_PART_CORTEX_A17 0xC0E0 | ||
75 | 76 | ||
76 | #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 | 77 | #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 |
77 | #define ARM_CPU_XSCALE_ARCH_V1 0x2000 | 78 | #define ARM_CPU_XSCALE_ARCH_V1 0x2000 |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index e701a4d9aa59..c45b61a4b4a5 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -58,21 +58,37 @@ static inline int dma_set_mask(struct device *dev, u64 mask) | |||
58 | #ifndef __arch_pfn_to_dma | 58 | #ifndef __arch_pfn_to_dma |
59 | static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) | 59 | static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) |
60 | { | 60 | { |
61 | if (dev) | ||
62 | pfn -= dev->dma_pfn_offset; | ||
61 | return (dma_addr_t)__pfn_to_bus(pfn); | 63 | return (dma_addr_t)__pfn_to_bus(pfn); |
62 | } | 64 | } |
63 | 65 | ||
64 | static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) | 66 | static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) |
65 | { | 67 | { |
66 | return __bus_to_pfn(addr); | 68 | unsigned long pfn = __bus_to_pfn(addr); |
69 | |||
70 | if (dev) | ||
71 | pfn += dev->dma_pfn_offset; | ||
72 | |||
73 | return pfn; | ||
67 | } | 74 | } |
68 | 75 | ||
69 | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) | 76 | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) |
70 | { | 77 | { |
78 | if (dev) { | ||
79 | unsigned long pfn = dma_to_pfn(dev, addr); | ||
80 | |||
81 | return phys_to_virt(__pfn_to_phys(pfn)); | ||
82 | } | ||
83 | |||
71 | return (void *)__bus_to_virt((unsigned long)addr); | 84 | return (void *)__bus_to_virt((unsigned long)addr); |
72 | } | 85 | } |
73 | 86 | ||
74 | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) | 87 | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) |
75 | { | 88 | { |
89 | if (dev) | ||
90 | return pfn_to_dma(dev, virt_to_pfn(addr)); | ||
91 | |||
76 | return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); | 92 | return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); |
77 | } | 93 | } |
78 | 94 | ||
@@ -105,6 +121,13 @@ static inline unsigned long dma_max_pfn(struct device *dev) | |||
105 | } | 121 | } |
106 | #define dma_max_pfn(dev) dma_max_pfn(dev) | 122 | #define dma_max_pfn(dev) dma_max_pfn(dev) |
107 | 123 | ||
124 | static inline int set_arch_dma_coherent_ops(struct device *dev) | ||
125 | { | ||
126 | set_dma_ops(dev, &arm_coherent_dma_ops); | ||
127 | return 0; | ||
128 | } | ||
129 | #define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) | ||
130 | |||
108 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | 131 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
109 | { | 132 | { |
110 | unsigned int offset = paddr & ~PAGE_MASK; | 133 | unsigned int offset = paddr & ~PAGE_MASK; |
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index bbae919bceb4..74124b0d0d79 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h | |||
@@ -1,24 +1,11 @@ | |||
1 | #ifndef _ASM_FIXMAP_H | 1 | #ifndef _ASM_FIXMAP_H |
2 | #define _ASM_FIXMAP_H | 2 | #define _ASM_FIXMAP_H |
3 | 3 | ||
4 | /* | 4 | #define FIXADDR_START 0xffc00000UL |
5 | * Nothing too fancy for now. | 5 | #define FIXADDR_TOP 0xffe00000UL |
6 | * | ||
7 | * On ARM we already have well known fixed virtual addresses imposed by | ||
8 | * the architecture such as the vector page which is located at 0xffff0000, | ||
9 | * therefore a second level page table is already allocated covering | ||
10 | * 0xfff00000 upwards. | ||
11 | * | ||
12 | * The cache flushing code in proc-xscale.S uses the virtual area between | ||
13 | * 0xfffe0000 and 0xfffeffff. | ||
14 | */ | ||
15 | |||
16 | #define FIXADDR_START 0xfff00000UL | ||
17 | #define FIXADDR_TOP 0xfffe0000UL | ||
18 | #define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START) | 6 | #define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START) |
19 | 7 | ||
20 | #define FIX_KMAP_BEGIN 0 | 8 | #define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT) |
21 | #define FIX_KMAP_END (FIXADDR_SIZE >> PAGE_SHIFT) | ||
22 | 9 | ||
23 | #define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) | 10 | #define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) |
24 | #define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) | 11 | #define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) |
@@ -27,7 +14,7 @@ extern void __this_fixmap_does_not_exist(void); | |||
27 | 14 | ||
28 | static inline unsigned long fix_to_virt(const unsigned int idx) | 15 | static inline unsigned long fix_to_virt(const unsigned int idx) |
29 | { | 16 | { |
30 | if (idx >= FIX_KMAP_END) | 17 | if (idx >= FIX_KMAP_NR_PTES) |
31 | __this_fixmap_does_not_exist(); | 18 | __this_fixmap_does_not_exist(); |
32 | return __fix_to_virt(idx); | 19 | return __fix_to_virt(idx); |
33 | } | 20 | } |
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h index 6b70f1b46a6e..04e18b656659 100644 --- a/arch/arm/include/asm/glue-df.h +++ b/arch/arm/include/asm/glue-df.h | |||
@@ -31,14 +31,6 @@ | |||
31 | #undef CPU_DABORT_HANDLER | 31 | #undef CPU_DABORT_HANDLER |
32 | #undef MULTI_DABORT | 32 | #undef MULTI_DABORT |
33 | 33 | ||
34 | #if defined(CONFIG_CPU_ARM710) | ||
35 | # ifdef CPU_DABORT_HANDLER | ||
36 | # define MULTI_DABORT 1 | ||
37 | # else | ||
38 | # define CPU_DABORT_HANDLER cpu_arm7_data_abort | ||
39 | # endif | ||
40 | #endif | ||
41 | |||
42 | #ifdef CONFIG_CPU_ABRT_EV4 | 34 | #ifdef CONFIG_CPU_ABRT_EV4 |
43 | # ifdef CPU_DABORT_HANDLER | 35 | # ifdef CPU_DABORT_HANDLER |
44 | # define MULTI_DABORT 1 | 36 | # define MULTI_DABORT 1 |
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 6795ff743b3d..3a5ec1c25659 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h | |||
@@ -26,8 +26,8 @@ | |||
26 | #define L2X0_CACHE_TYPE 0x004 | 26 | #define L2X0_CACHE_TYPE 0x004 |
27 | #define L2X0_CTRL 0x100 | 27 | #define L2X0_CTRL 0x100 |
28 | #define L2X0_AUX_CTRL 0x104 | 28 | #define L2X0_AUX_CTRL 0x104 |
29 | #define L2X0_TAG_LATENCY_CTRL 0x108 | 29 | #define L310_TAG_LATENCY_CTRL 0x108 |
30 | #define L2X0_DATA_LATENCY_CTRL 0x10C | 30 | #define L310_DATA_LATENCY_CTRL 0x10C |
31 | #define L2X0_EVENT_CNT_CTRL 0x200 | 31 | #define L2X0_EVENT_CNT_CTRL 0x200 |
32 | #define L2X0_EVENT_CNT1_CFG 0x204 | 32 | #define L2X0_EVENT_CNT1_CFG 0x204 |
33 | #define L2X0_EVENT_CNT0_CFG 0x208 | 33 | #define L2X0_EVENT_CNT0_CFG 0x208 |
@@ -54,53 +54,93 @@ | |||
54 | #define L2X0_LOCKDOWN_WAY_D_BASE 0x900 | 54 | #define L2X0_LOCKDOWN_WAY_D_BASE 0x900 |
55 | #define L2X0_LOCKDOWN_WAY_I_BASE 0x904 | 55 | #define L2X0_LOCKDOWN_WAY_I_BASE 0x904 |
56 | #define L2X0_LOCKDOWN_STRIDE 0x08 | 56 | #define L2X0_LOCKDOWN_STRIDE 0x08 |
57 | #define L2X0_ADDR_FILTER_START 0xC00 | 57 | #define L310_ADDR_FILTER_START 0xC00 |
58 | #define L2X0_ADDR_FILTER_END 0xC04 | 58 | #define L310_ADDR_FILTER_END 0xC04 |
59 | #define L2X0_TEST_OPERATION 0xF00 | 59 | #define L2X0_TEST_OPERATION 0xF00 |
60 | #define L2X0_LINE_DATA 0xF10 | 60 | #define L2X0_LINE_DATA 0xF10 |
61 | #define L2X0_LINE_TAG 0xF30 | 61 | #define L2X0_LINE_TAG 0xF30 |
62 | #define L2X0_DEBUG_CTRL 0xF40 | 62 | #define L2X0_DEBUG_CTRL 0xF40 |
63 | #define L2X0_PREFETCH_CTRL 0xF60 | 63 | #define L310_PREFETCH_CTRL 0xF60 |
64 | #define L2X0_POWER_CTRL 0xF80 | 64 | #define L310_POWER_CTRL 0xF80 |
65 | #define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1) | 65 | #define L310_DYNAMIC_CLK_GATING_EN (1 << 1) |
66 | #define L2X0_STNDBY_MODE_EN (1 << 0) | 66 | #define L310_STNDBY_MODE_EN (1 << 0) |
67 | 67 | ||
68 | /* Registers shifts and masks */ | 68 | /* Registers shifts and masks */ |
69 | #define L2X0_CACHE_ID_PART_MASK (0xf << 6) | 69 | #define L2X0_CACHE_ID_PART_MASK (0xf << 6) |
70 | #define L2X0_CACHE_ID_PART_L210 (1 << 6) | 70 | #define L2X0_CACHE_ID_PART_L210 (1 << 6) |
71 | #define L2X0_CACHE_ID_PART_L220 (2 << 6) | ||
71 | #define L2X0_CACHE_ID_PART_L310 (3 << 6) | 72 | #define L2X0_CACHE_ID_PART_L310 (3 << 6) |
72 | #define L2X0_CACHE_ID_RTL_MASK 0x3f | 73 | #define L2X0_CACHE_ID_RTL_MASK 0x3f |
73 | #define L2X0_CACHE_ID_RTL_R0P0 0x0 | 74 | #define L210_CACHE_ID_RTL_R0P2_02 0x00 |
74 | #define L2X0_CACHE_ID_RTL_R1P0 0x2 | 75 | #define L210_CACHE_ID_RTL_R0P1 0x01 |
75 | #define L2X0_CACHE_ID_RTL_R2P0 0x4 | 76 | #define L210_CACHE_ID_RTL_R0P2_01 0x02 |
76 | #define L2X0_CACHE_ID_RTL_R3P0 0x5 | 77 | #define L210_CACHE_ID_RTL_R0P3 0x03 |
77 | #define L2X0_CACHE_ID_RTL_R3P1 0x6 | 78 | #define L210_CACHE_ID_RTL_R0P4 0x0b |
78 | #define L2X0_CACHE_ID_RTL_R3P2 0x8 | 79 | #define L210_CACHE_ID_RTL_R0P5 0x0f |
80 | #define L220_CACHE_ID_RTL_R1P7_01REL0 0x06 | ||
81 | #define L310_CACHE_ID_RTL_R0P0 0x00 | ||
82 | #define L310_CACHE_ID_RTL_R1P0 0x02 | ||
83 | #define L310_CACHE_ID_RTL_R2P0 0x04 | ||
84 | #define L310_CACHE_ID_RTL_R3P0 0x05 | ||
85 | #define L310_CACHE_ID_RTL_R3P1 0x06 | ||
86 | #define L310_CACHE_ID_RTL_R3P1_50REL0 0x07 | ||
87 | #define L310_CACHE_ID_RTL_R3P2 0x08 | ||
88 | #define L310_CACHE_ID_RTL_R3P3 0x09 | ||
79 | 89 | ||
80 | #define L2X0_AUX_CTRL_MASK 0xc0000fff | 90 | /* L2C auxiliary control register - bits common to L2C-210/220/310 */ |
91 | #define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17 | ||
92 | #define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17) | ||
93 | #define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17) | ||
94 | #define L2C_AUX_CTRL_EVTMON_ENABLE BIT(20) | ||
95 | #define L2C_AUX_CTRL_PARITY_ENABLE BIT(21) | ||
96 | #define L2C_AUX_CTRL_SHARED_OVERRIDE BIT(22) | ||
97 | /* L2C-210/220 common bits */ | ||
81 | #define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0 | 98 | #define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0 |
82 | #define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7 | 99 | #define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK (7 << 0) |
83 | #define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3 | 100 | #define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3 |
84 | #define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3) | 101 | #define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (7 << 3) |
85 | #define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6 | 102 | #define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6 |
86 | #define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6) | 103 | #define L2X0_AUX_CTRL_TAG_LATENCY_MASK (7 << 6) |
87 | #define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9 | 104 | #define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9 |
88 | #define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9) | 105 | #define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (7 << 9) |
89 | #define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16 | 106 | #define L2X0_AUX_CTRL_ASSOC_SHIFT 13 |
90 | #define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17 | 107 | #define L2X0_AUX_CTRL_ASSOC_MASK (15 << 13) |
91 | #define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17) | 108 | /* L2C-210 specific bits */ |
92 | #define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22 | 109 | #define L210_AUX_CTRL_WRAP_DISABLE BIT(12) |
93 | #define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26 | 110 | #define L210_AUX_CTRL_WA_OVERRIDE BIT(23) |
94 | #define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27 | 111 | #define L210_AUX_CTRL_EXCLUSIVE_ABORT BIT(24) |
95 | #define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28 | 112 | /* L2C-220 specific bits */ |
96 | #define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29 | 113 | #define L220_AUX_CTRL_EXCLUSIVE_CACHE BIT(12) |
97 | #define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30 | 114 | #define L220_AUX_CTRL_FWA_SHIFT 23 |
115 | #define L220_AUX_CTRL_FWA_MASK (3 << 23) | ||
116 | #define L220_AUX_CTRL_NS_LOCKDOWN BIT(26) | ||
117 | #define L220_AUX_CTRL_NS_INT_CTRL BIT(27) | ||
118 | /* L2C-310 specific bits */ | ||
119 | #define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */ | ||
120 | #define L310_AUX_CTRL_HIGHPRIO_SO_DEV BIT(10) /* R2P0+ */ | ||
121 | #define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */ | ||
122 | #define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12) | ||
123 | #define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16) | ||
124 | #define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */ | ||
125 | #define L310_AUX_CTRL_NS_LOCKDOWN BIT(26) | ||
126 | #define L310_AUX_CTRL_NS_INT_CTRL BIT(27) | ||
127 | #define L310_AUX_CTRL_DATA_PREFETCH BIT(28) | ||
128 | #define L310_AUX_CTRL_INSTR_PREFETCH BIT(29) | ||
129 | #define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */ | ||
98 | 130 | ||
99 | #define L2X0_LATENCY_CTRL_SETUP_SHIFT 0 | 131 | #define L310_LATENCY_CTRL_SETUP(n) ((n) << 0) |
100 | #define L2X0_LATENCY_CTRL_RD_SHIFT 4 | 132 | #define L310_LATENCY_CTRL_RD(n) ((n) << 4) |
101 | #define L2X0_LATENCY_CTRL_WR_SHIFT 8 | 133 | #define L310_LATENCY_CTRL_WR(n) ((n) << 8) |
102 | 134 | ||
103 | #define L2X0_ADDR_FILTER_EN 1 | 135 | #define L310_ADDR_FILTER_EN 1 |
136 | |||
137 | #define L310_PREFETCH_CTRL_OFFSET_MASK 0x1f | ||
138 | #define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR BIT(23) | ||
139 | #define L310_PREFETCH_CTRL_PREFETCH_DROP BIT(24) | ||
140 | #define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP BIT(27) | ||
141 | #define L310_PREFETCH_CTRL_DATA_PREFETCH BIT(28) | ||
142 | #define L310_PREFETCH_CTRL_INSTR_PREFETCH BIT(29) | ||
143 | #define L310_PREFETCH_CTRL_DBL_LINEFILL BIT(30) | ||
104 | 144 | ||
105 | #define L2X0_CTRL_EN 1 | 145 | #define L2X0_CTRL_EN 1 |
106 | 146 | ||
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 91b99abe7a95..535579511ed0 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h | |||
@@ -18,6 +18,7 @@ | |||
18 | } while (0) | 18 | } while (0) |
19 | 19 | ||
20 | extern pte_t *pkmap_page_table; | 20 | extern pte_t *pkmap_page_table; |
21 | extern pte_t *fixmap_page_table; | ||
21 | 22 | ||
22 | extern void *kmap_high(struct page *page); | 23 | extern void *kmap_high(struct page *page); |
23 | extern void kunmap_high(struct page *page); | 24 | extern void kunmap_high(struct page *page); |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 8aa4cca74501..3d23418cbddd 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -179,6 +179,12 @@ static inline void __iomem *__typesafe_io(unsigned long addr) | |||
179 | /* PCI fixed i/o mapping */ | 179 | /* PCI fixed i/o mapping */ |
180 | #define PCI_IO_VIRT_BASE 0xfee00000 | 180 | #define PCI_IO_VIRT_BASE 0xfee00000 |
181 | 181 | ||
182 | #if defined(CONFIG_PCI) | ||
183 | void pci_ioremap_set_mem_type(int mem_type); | ||
184 | #else | ||
185 | static inline void pci_ioremap_set_mem_type(int mem_type) {} | ||
186 | #endif | ||
187 | |||
182 | extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr); | 188 | extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr); |
183 | 189 | ||
184 | /* | 190 | /* |
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 17a3fa2979e8..060a75e99263 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/reboot.h> | 14 | #include <linux/reboot.h> |
15 | 15 | ||
16 | struct tag; | 16 | struct tag; |
17 | struct meminfo; | ||
18 | struct pt_regs; | 17 | struct pt_regs; |
19 | struct smp_operations; | 18 | struct smp_operations; |
20 | #ifdef CONFIG_SMP | 19 | #ifdef CONFIG_SMP |
@@ -45,10 +44,12 @@ struct machine_desc { | |||
45 | unsigned char reserve_lp1 :1; /* never has lp1 */ | 44 | unsigned char reserve_lp1 :1; /* never has lp1 */ |
46 | unsigned char reserve_lp2 :1; /* never has lp2 */ | 45 | unsigned char reserve_lp2 :1; /* never has lp2 */ |
47 | enum reboot_mode reboot_mode; /* default restart mode */ | 46 | enum reboot_mode reboot_mode; /* default restart mode */ |
47 | unsigned l2c_aux_val; /* L2 cache aux value */ | ||
48 | unsigned l2c_aux_mask; /* L2 cache aux mask */ | ||
49 | void (*l2c_write_sec)(unsigned long, unsigned); | ||
48 | struct smp_operations *smp; /* SMP operations */ | 50 | struct smp_operations *smp; /* SMP operations */ |
49 | bool (*smp_init)(void); | 51 | bool (*smp_init)(void); |
50 | void (*fixup)(struct tag *, char **, | 52 | void (*fixup)(struct tag *, char **); |
51 | struct meminfo *); | ||
52 | void (*init_meminfo)(void); | 53 | void (*init_meminfo)(void); |
53 | void (*reserve)(void);/* reserve mem blocks */ | 54 | void (*reserve)(void);/* reserve mem blocks */ |
54 | void (*map_io)(void);/* IO mapping function */ | 55 | void (*map_io)(void);/* IO mapping function */ |
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index a5ff410dcdb6..d9702eb0b02b 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h | |||
@@ -98,14 +98,14 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); | |||
98 | * previously in which case the caller should take appropriate action. | 98 | * previously in which case the caller should take appropriate action. |
99 | * | 99 | * |
100 | * On success, the CPU is not guaranteed to be truly halted until | 100 | * On success, the CPU is not guaranteed to be truly halted until |
101 | * mcpm_cpu_power_down_finish() subsequently returns non-zero for the | 101 | * mcpm_wait_for_cpu_powerdown() subsequently returns non-zero for the |
102 | * specified cpu. Until then, other CPUs should make sure they do not | 102 | * specified cpu. Until then, other CPUs should make sure they do not |
103 | * trash memory the target CPU might be executing/accessing. | 103 | * trash memory the target CPU might be executing/accessing. |
104 | */ | 104 | */ |
105 | void mcpm_cpu_power_down(void); | 105 | void mcpm_cpu_power_down(void); |
106 | 106 | ||
107 | /** | 107 | /** |
108 | * mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and | 108 | * mcpm_wait_for_cpu_powerdown - wait for a specified CPU to halt, and |
109 | * make sure it is powered off | 109 | * make sure it is powered off |
110 | * | 110 | * |
111 | * @cpu: CPU number within given cluster | 111 | * @cpu: CPU number within given cluster |
@@ -127,7 +127,7 @@ void mcpm_cpu_power_down(void); | |||
127 | * - zero if the CPU is in a safely parked state | 127 | * - zero if the CPU is in a safely parked state |
128 | * - nonzero otherwise (e.g., timeout) | 128 | * - nonzero otherwise (e.g., timeout) |
129 | */ | 129 | */ |
130 | int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster); | 130 | int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); |
131 | 131 | ||
132 | /** | 132 | /** |
133 | * mcpm_cpu_suspend - bring the calling CPU in a suspended state | 133 | * mcpm_cpu_suspend - bring the calling CPU in a suspended state |
@@ -171,7 +171,7 @@ int mcpm_cpu_powered_up(void); | |||
171 | struct mcpm_platform_ops { | 171 | struct mcpm_platform_ops { |
172 | int (*power_up)(unsigned int cpu, unsigned int cluster); | 172 | int (*power_up)(unsigned int cpu, unsigned int cluster); |
173 | void (*power_down)(void); | 173 | void (*power_down)(void); |
174 | int (*power_down_finish)(unsigned int cpu, unsigned int cluster); | 174 | int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster); |
175 | void (*suspend)(u64); | 175 | void (*suspend)(u64); |
176 | void (*powered_up)(void); | 176 | void (*powered_up)(void); |
177 | }; | 177 | }; |
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h index c2f5102ae659..bf47a6c110a2 100644 --- a/arch/arm/include/asm/memblock.h +++ b/arch/arm/include/asm/memblock.h | |||
@@ -1,10 +1,9 @@ | |||
1 | #ifndef _ASM_ARM_MEMBLOCK_H | 1 | #ifndef _ASM_ARM_MEMBLOCK_H |
2 | #define _ASM_ARM_MEMBLOCK_H | 2 | #define _ASM_ARM_MEMBLOCK_H |
3 | 3 | ||
4 | struct meminfo; | ||
5 | struct machine_desc; | 4 | struct machine_desc; |
6 | 5 | ||
7 | void arm_memblock_init(struct meminfo *, const struct machine_desc *); | 6 | void arm_memblock_init(const struct machine_desc *); |
8 | phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align); | 7 | phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align); |
9 | 8 | ||
10 | #endif | 9 | #endif |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 02fa2558f662..2b751464d6ff 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -83,8 +83,6 @@ | |||
83 | */ | 83 | */ |
84 | #define IOREMAP_MAX_ORDER 24 | 84 | #define IOREMAP_MAX_ORDER 24 |
85 | 85 | ||
86 | #define CONSISTENT_END (0xffe00000UL) | ||
87 | |||
88 | #else /* CONFIG_MMU */ | 86 | #else /* CONFIG_MMU */ |
89 | 87 | ||
90 | /* | 88 | /* |
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index f94784f0e3a6..891a56b35bcf 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h | |||
@@ -28,53 +28,84 @@ struct outer_cache_fns { | |||
28 | void (*clean_range)(unsigned long, unsigned long); | 28 | void (*clean_range)(unsigned long, unsigned long); |
29 | void (*flush_range)(unsigned long, unsigned long); | 29 | void (*flush_range)(unsigned long, unsigned long); |
30 | void (*flush_all)(void); | 30 | void (*flush_all)(void); |
31 | void (*inv_all)(void); | ||
32 | void (*disable)(void); | 31 | void (*disable)(void); |
33 | #ifdef CONFIG_OUTER_CACHE_SYNC | 32 | #ifdef CONFIG_OUTER_CACHE_SYNC |
34 | void (*sync)(void); | 33 | void (*sync)(void); |
35 | #endif | 34 | #endif |
36 | void (*set_debug)(unsigned long); | ||
37 | void (*resume)(void); | 35 | void (*resume)(void); |
36 | |||
37 | /* This is an ARM L2C thing */ | ||
38 | void (*write_sec)(unsigned long, unsigned); | ||
38 | }; | 39 | }; |
39 | 40 | ||
40 | extern struct outer_cache_fns outer_cache; | 41 | extern struct outer_cache_fns outer_cache; |
41 | 42 | ||
42 | #ifdef CONFIG_OUTER_CACHE | 43 | #ifdef CONFIG_OUTER_CACHE |
43 | 44 | /** | |
45 | * outer_inv_range - invalidate range of outer cache lines | ||
46 | * @start: starting physical address, inclusive | ||
47 | * @end: end physical address, exclusive | ||
48 | */ | ||
44 | static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) | 49 | static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) |
45 | { | 50 | { |
46 | if (outer_cache.inv_range) | 51 | if (outer_cache.inv_range) |
47 | outer_cache.inv_range(start, end); | 52 | outer_cache.inv_range(start, end); |
48 | } | 53 | } |
54 | |||
55 | /** | ||
56 | * outer_clean_range - clean dirty outer cache lines | ||
57 | * @start: starting physical address, inclusive | ||
58 | * @end: end physical address, exclusive | ||
59 | */ | ||
49 | static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) | 60 | static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) |
50 | { | 61 | { |
51 | if (outer_cache.clean_range) | 62 | if (outer_cache.clean_range) |
52 | outer_cache.clean_range(start, end); | 63 | outer_cache.clean_range(start, end); |
53 | } | 64 | } |
65 | |||
66 | /** | ||
67 | * outer_flush_range - clean and invalidate outer cache lines | ||
68 | * @start: starting physical address, inclusive | ||
69 | * @end: end physical address, exclusive | ||
70 | */ | ||
54 | static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) | 71 | static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) |
55 | { | 72 | { |
56 | if (outer_cache.flush_range) | 73 | if (outer_cache.flush_range) |
57 | outer_cache.flush_range(start, end); | 74 | outer_cache.flush_range(start, end); |
58 | } | 75 | } |
59 | 76 | ||
77 | /** | ||
78 | * outer_flush_all - clean and invalidate all cache lines in the outer cache | ||
79 | * | ||
80 | * Note: depending on implementation, this may not be atomic - it must | ||
81 | * only be called with interrupts disabled and no other active outer | ||
82 | * cache masters. | ||
83 | * | ||
84 | * It is intended that this function is only used by implementations | ||
85 | * needing to override the outer_cache.disable() method due to security. | ||
86 | * (Some implementations perform this as a clean followed by an invalidate.) | ||
87 | */ | ||
60 | static inline void outer_flush_all(void) | 88 | static inline void outer_flush_all(void) |
61 | { | 89 | { |
62 | if (outer_cache.flush_all) | 90 | if (outer_cache.flush_all) |
63 | outer_cache.flush_all(); | 91 | outer_cache.flush_all(); |
64 | } | 92 | } |
65 | 93 | ||
66 | static inline void outer_inv_all(void) | 94 | /** |
67 | { | 95 | * outer_disable - clean, invalidate and disable the outer cache |
68 | if (outer_cache.inv_all) | 96 | * |
69 | outer_cache.inv_all(); | 97 | * Disable the outer cache, ensuring that any data contained in the outer |
70 | } | 98 | * cache is pushed out to lower levels of system memory. The note and |
71 | 99 | * conditions above concerning outer_flush_all() applies here. | |
72 | static inline void outer_disable(void) | 100 | */ |
73 | { | 101 | extern void outer_disable(void); |
74 | if (outer_cache.disable) | ||
75 | outer_cache.disable(); | ||
76 | } | ||
77 | 102 | ||
103 | /** | ||
104 | * outer_resume - restore the cache configuration and re-enable outer cache | ||
105 | * | ||
106 | * Restore any configuration that the cache had when previously enabled, | ||
107 | * and re-enable the outer cache. | ||
108 | */ | ||
78 | static inline void outer_resume(void) | 109 | static inline void outer_resume(void) |
79 | { | 110 | { |
80 | if (outer_cache.resume) | 111 | if (outer_cache.resume) |
@@ -90,13 +121,18 @@ static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) | |||
90 | static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) | 121 | static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) |
91 | { } | 122 | { } |
92 | static inline void outer_flush_all(void) { } | 123 | static inline void outer_flush_all(void) { } |
93 | static inline void outer_inv_all(void) { } | ||
94 | static inline void outer_disable(void) { } | 124 | static inline void outer_disable(void) { } |
95 | static inline void outer_resume(void) { } | 125 | static inline void outer_resume(void) { } |
96 | 126 | ||
97 | #endif | 127 | #endif |
98 | 128 | ||
99 | #ifdef CONFIG_OUTER_CACHE_SYNC | 129 | #ifdef CONFIG_OUTER_CACHE_SYNC |
130 | /** | ||
131 | * outer_sync - perform a sync point for outer cache | ||
132 | * | ||
133 | * Ensure that all outer cache operations are complete and any store | ||
134 | * buffers are drained. | ||
135 | */ | ||
100 | static inline void outer_sync(void) | 136 | static inline void outer_sync(void) |
101 | { | 137 | { |
102 | if (outer_cache.sync) | 138 | if (outer_cache.sync) |
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 8d6a089dfb76..e0adb9f1bf94 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h | |||
@@ -21,34 +21,6 @@ | |||
21 | #define __tagtable(tag, fn) \ | 21 | #define __tagtable(tag, fn) \ |
22 | static const struct tagtable __tagtable_##fn __tag = { tag, fn } | 22 | static const struct tagtable __tagtable_##fn __tag = { tag, fn } |
23 | 23 | ||
24 | /* | ||
25 | * Memory map description | ||
26 | */ | ||
27 | #define NR_BANKS CONFIG_ARM_NR_BANKS | ||
28 | |||
29 | struct membank { | ||
30 | phys_addr_t start; | ||
31 | phys_addr_t size; | ||
32 | unsigned int highmem; | ||
33 | }; | ||
34 | |||
35 | struct meminfo { | ||
36 | int nr_banks; | ||
37 | struct membank bank[NR_BANKS]; | ||
38 | }; | ||
39 | |||
40 | extern struct meminfo meminfo; | ||
41 | |||
42 | #define for_each_bank(iter,mi) \ | ||
43 | for (iter = 0; iter < (mi)->nr_banks; iter++) | ||
44 | |||
45 | #define bank_pfn_start(bank) __phys_to_pfn((bank)->start) | ||
46 | #define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) | ||
47 | #define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT) | ||
48 | #define bank_phys_start(bank) (bank)->start | ||
49 | #define bank_phys_end(bank) ((bank)->start + (bank)->size) | ||
50 | #define bank_phys_size(bank) (bank)->size | ||
51 | |||
52 | extern int arm_add_memory(u64 start, u64 size); | 24 | extern int arm_add_memory(u64 start, u64 size); |
53 | extern void early_print(const char *str, ...); | 25 | extern void early_print(const char *str, ...); |
54 | extern void dump_machine_table(void); | 26 | extern void dump_machine_table(void); |
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 040619c32d68..38ddd9f83d0e 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -39,6 +39,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o | |||
39 | obj-$(CONFIG_ISA_DMA) += dma-isa.o | 39 | obj-$(CONFIG_ISA_DMA) += dma-isa.o |
40 | obj-$(CONFIG_PCI) += bios32.o isa.o | 40 | obj-$(CONFIG_PCI) += bios32.o isa.o |
41 | obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o | 41 | obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o |
42 | obj-$(CONFIG_HIBERNATION) += hibernate.o | ||
42 | obj-$(CONFIG_SMP) += smp.o | 43 | obj-$(CONFIG_SMP) += smp.o |
43 | ifdef CONFIG_MMU | 44 | ifdef CONFIG_MMU |
44 | obj-$(CONFIG_SMP) += smp_tlb.o | 45 | obj-$(CONFIG_SMP) += smp_tlb.o |
diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c index 8c14de8180c0..7807ef58a2ab 100644 --- a/arch/arm/kernel/atags_parse.c +++ b/arch/arm/kernel/atags_parse.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
23 | #include <linux/root_dev.h> | 23 | #include <linux/root_dev.h> |
24 | #include <linux/screen_info.h> | 24 | #include <linux/screen_info.h> |
25 | #include <linux/memblock.h> | ||
25 | 26 | ||
26 | #include <asm/setup.h> | 27 | #include <asm/setup.h> |
27 | #include <asm/system_info.h> | 28 | #include <asm/system_info.h> |
@@ -222,10 +223,10 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) | |||
222 | } | 223 | } |
223 | 224 | ||
224 | if (mdesc->fixup) | 225 | if (mdesc->fixup) |
225 | mdesc->fixup(tags, &from, &meminfo); | 226 | mdesc->fixup(tags, &from); |
226 | 227 | ||
227 | if (tags->hdr.tag == ATAG_CORE) { | 228 | if (tags->hdr.tag == ATAG_CORE) { |
228 | if (meminfo.nr_banks != 0) | 229 | if (memblock_phys_mem_size()) |
229 | squash_mem_tags(tags); | 230 | squash_mem_tags(tags); |
230 | save_atags(tags); | 231 | save_atags(tags); |
231 | parse_tags(tags); | 232 | parse_tags(tags); |
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index ea9ce92a4b52..e94a157ddff1 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c | |||
@@ -27,10 +27,6 @@ | |||
27 | #include <asm/mach/arch.h> | 27 | #include <asm/mach/arch.h> |
28 | #include <asm/mach-types.h> | 28 | #include <asm/mach-types.h> |
29 | 29 | ||
30 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | ||
31 | { | ||
32 | arm_add_memory(base, size); | ||
33 | } | ||
34 | 30 | ||
35 | #ifdef CONFIG_SMP | 31 | #ifdef CONFIG_SMP |
36 | extern struct of_cpu_method __cpu_method_of_table[]; | 32 | extern struct of_cpu_method __cpu_method_of_table[]; |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 1879e8dd2acc..52a949a8077d 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -344,7 +344,7 @@ ENDPROC(__pabt_svc) | |||
344 | @ | 344 | @ |
345 | @ Enable the alignment trap while in kernel mode | 345 | @ Enable the alignment trap while in kernel mode |
346 | @ | 346 | @ |
347 | alignment_trap r0 | 347 | alignment_trap r0, .LCcralign |
348 | 348 | ||
349 | @ | 349 | @ |
350 | @ Clear FP to mark the first stack frame | 350 | @ Clear FP to mark the first stack frame |
@@ -413,6 +413,11 @@ __und_usr: | |||
413 | @ | 413 | @ |
414 | adr r9, BSYM(ret_from_exception) | 414 | adr r9, BSYM(ret_from_exception) |
415 | 415 | ||
416 | @ IRQs must be enabled before attempting to read the instruction from | ||
417 | @ user space since that could cause a page/translation fault if the | ||
418 | @ page table was modified by another CPU. | ||
419 | enable_irq | ||
420 | |||
416 | tst r3, #PSR_T_BIT @ Thumb mode? | 421 | tst r3, #PSR_T_BIT @ Thumb mode? |
417 | bne __und_usr_thumb | 422 | bne __und_usr_thumb |
418 | sub r4, r2, #4 @ ARM instr at LR - 4 | 423 | sub r4, r2, #4 @ ARM instr at LR - 4 |
@@ -484,7 +489,8 @@ ENDPROC(__und_usr) | |||
484 | */ | 489 | */ |
485 | .pushsection .fixup, "ax" | 490 | .pushsection .fixup, "ax" |
486 | .align 2 | 491 | .align 2 |
487 | 4: mov pc, r9 | 492 | 4: str r4, [sp, #S_PC] @ retry current instruction |
493 | mov pc, r9 | ||
488 | .popsection | 494 | .popsection |
489 | .pushsection __ex_table,"a" | 495 | .pushsection __ex_table,"a" |
490 | .long 1b, 4b | 496 | .long 1b, 4b |
@@ -517,7 +523,7 @@ ENDPROC(__und_usr) | |||
517 | * r9 = normal "successful" return address | 523 | * r9 = normal "successful" return address |
518 | * r10 = this threads thread_info structure | 524 | * r10 = this threads thread_info structure |
519 | * lr = unrecognised instruction return address | 525 | * lr = unrecognised instruction return address |
520 | * IRQs disabled, FIQs enabled. | 526 | * IRQs enabled, FIQs enabled. |
521 | */ | 527 | */ |
522 | @ | 528 | @ |
523 | @ Fall-through from Thumb-2 __und_usr | 529 | @ Fall-through from Thumb-2 __und_usr |
@@ -624,7 +630,6 @@ call_fpe: | |||
624 | #endif | 630 | #endif |
625 | 631 | ||
626 | do_fpe: | 632 | do_fpe: |
627 | enable_irq | ||
628 | ldr r4, .LCfp | 633 | ldr r4, .LCfp |
629 | add r10, r10, #TI_FPSTATE @ r10 = workspace | 634 | add r10, r10, #TI_FPSTATE @ r10 = workspace |
630 | ldr pc, [r4] @ Call FP module USR entry point | 635 | ldr pc, [r4] @ Call FP module USR entry point |
@@ -652,8 +657,7 @@ __und_usr_fault_32: | |||
652 | b 1f | 657 | b 1f |
653 | __und_usr_fault_16: | 658 | __und_usr_fault_16: |
654 | mov r1, #2 | 659 | mov r1, #2 |
655 | 1: enable_irq | 660 | 1: mov r0, sp |
656 | mov r0, sp | ||
657 | adr lr, BSYM(ret_from_exception) | 661 | adr lr, BSYM(ret_from_exception) |
658 | b __und_fault | 662 | b __und_fault |
659 | ENDPROC(__und_usr_fault_32) | 663 | ENDPROC(__und_usr_fault_32) |
@@ -1143,11 +1147,8 @@ __vectors_start: | |||
1143 | .data | 1147 | .data |
1144 | 1148 | ||
1145 | .globl cr_alignment | 1149 | .globl cr_alignment |
1146 | .globl cr_no_alignment | ||
1147 | cr_alignment: | 1150 | cr_alignment: |
1148 | .space 4 | 1151 | .space 4 |
1149 | cr_no_alignment: | ||
1150 | .space 4 | ||
1151 | 1152 | ||
1152 | #ifdef CONFIG_MULTI_IRQ_HANDLER | 1153 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
1153 | .globl handle_arch_irq | 1154 | .globl handle_arch_irq |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index a2dcafdf1bc8..7139d4a7dea7 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -365,13 +365,7 @@ ENTRY(vector_swi) | |||
365 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | 365 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 |
366 | #endif | 366 | #endif |
367 | zero_fp | 367 | zero_fp |
368 | 368 | alignment_trap ip, __cr_alignment | |
369 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
370 | ldr ip, __cr_alignment | ||
371 | ldr ip, [ip] | ||
372 | mcr p15, 0, ip, c1, c0 @ update control register | ||
373 | #endif | ||
374 | |||
375 | enable_irq | 369 | enable_irq |
376 | ct_user_exit | 370 | ct_user_exit |
377 | get_thread_info tsk | 371 | get_thread_info tsk |
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index efb208de75ec..5d702f8900b1 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S | |||
@@ -37,9 +37,9 @@ | |||
37 | #endif | 37 | #endif |
38 | .endm | 38 | .endm |
39 | 39 | ||
40 | .macro alignment_trap, rtemp | 40 | .macro alignment_trap, rtemp, label |
41 | #ifdef CONFIG_ALIGNMENT_TRAP | 41 | #ifdef CONFIG_ALIGNMENT_TRAP |
42 | ldr \rtemp, .LCcralign | 42 | ldr \rtemp, \label |
43 | ldr \rtemp, [\rtemp] | 43 | ldr \rtemp, [\rtemp] |
44 | mcr p15, 0, \rtemp, c1, c0 | 44 | mcr p15, 0, \rtemp, c1, c0 |
45 | #endif | 45 | #endif |
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index c108ddcb9ba4..af9a8a927a4e 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
16 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
17 | #include <linux/module.h> | ||
17 | 18 | ||
18 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
19 | #include <asm/opcodes.h> | 20 | #include <asm/opcodes.h> |
@@ -63,6 +64,18 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) | |||
63 | } | 64 | } |
64 | #endif | 65 | #endif |
65 | 66 | ||
67 | int ftrace_arch_code_modify_prepare(void) | ||
68 | { | ||
69 | set_all_modules_text_rw(); | ||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | int ftrace_arch_code_modify_post_process(void) | ||
74 | { | ||
75 | set_all_modules_text_ro(); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
66 | static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) | 79 | static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) |
67 | { | 80 | { |
68 | return arm_gen_branch_link(pc, addr); | 81 | return arm_gen_branch_link(pc, addr); |
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index c96ecacb2021..572a38335c96 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S | |||
@@ -99,8 +99,7 @@ __mmap_switched: | |||
99 | str r1, [r5] @ Save machine type | 99 | str r1, [r5] @ Save machine type |
100 | str r2, [r6] @ Save atags pointer | 100 | str r2, [r6] @ Save atags pointer |
101 | cmp r7, #0 | 101 | cmp r7, #0 |
102 | bicne r4, r0, #CR_A @ Clear 'A' bit | 102 | strne r0, [r7] @ Save control register values |
103 | stmneia r7, {r0, r4} @ Save control register values | ||
104 | b start_kernel | 103 | b start_kernel |
105 | ENDPROC(__mmap_switched) | 104 | ENDPROC(__mmap_switched) |
106 | 105 | ||
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 591d6e4a6492..2c35f0ff2fdc 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -475,7 +475,7 @@ ENDPROC(__turn_mmu_on) | |||
475 | 475 | ||
476 | 476 | ||
477 | #ifdef CONFIG_SMP_ON_UP | 477 | #ifdef CONFIG_SMP_ON_UP |
478 | __INIT | 478 | __HEAD |
479 | __fixup_smp: | 479 | __fixup_smp: |
480 | and r3, r9, #0x000f0000 @ architecture version | 480 | and r3, r9, #0x000f0000 @ architecture version |
481 | teq r3, #0x000f0000 @ CPU ID supported? | 481 | teq r3, #0x000f0000 @ CPU ID supported? |
diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c new file mode 100644 index 000000000000..bb8b79648643 --- /dev/null +++ b/arch/arm/kernel/hibernate.c | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * Hibernation support specific for ARM | ||
3 | * | ||
4 | * Derived from work on ARM hibernation support by: | ||
5 | * | ||
6 | * Ubuntu project, hibernation support for mach-dove | ||
7 | * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu) | ||
8 | * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.) | ||
9 | * https://lkml.org/lkml/2010/6/18/4 | ||
10 | * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html | ||
11 | * https://patchwork.kernel.org/patch/96442/ | ||
12 | * | ||
13 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> | ||
14 | * | ||
15 | * License terms: GNU General Public License (GPL) version 2 | ||
16 | */ | ||
17 | |||
18 | #include <linux/mm.h> | ||
19 | #include <linux/suspend.h> | ||
20 | #include <asm/system_misc.h> | ||
21 | #include <asm/idmap.h> | ||
22 | #include <asm/suspend.h> | ||
23 | #include <asm/memory.h> | ||
24 | |||
25 | extern const void __nosave_begin, __nosave_end; | ||
26 | |||
27 | int pfn_is_nosave(unsigned long pfn) | ||
28 | { | ||
29 | unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin); | ||
30 | unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1); | ||
31 | |||
32 | return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn); | ||
33 | } | ||
34 | |||
35 | void notrace save_processor_state(void) | ||
36 | { | ||
37 | WARN_ON(num_online_cpus() != 1); | ||
38 | local_fiq_disable(); | ||
39 | } | ||
40 | |||
41 | void notrace restore_processor_state(void) | ||
42 | { | ||
43 | local_fiq_enable(); | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * Snapshot kernel memory and reset the system. | ||
48 | * | ||
49 | * swsusp_save() is executed in the suspend finisher so that the CPU | ||
50 | * context pointer and memory are part of the saved image, which is | ||
51 | * required by the resume kernel image to restart execution from | ||
52 | * swsusp_arch_suspend(). | ||
53 | * | ||
54 | * soft_restart is not technically needed, but is used to get success | ||
55 | * returned from cpu_suspend. | ||
56 | * | ||
57 | * When soft reboot completes, the hibernation snapshot is written out. | ||
58 | */ | ||
59 | static int notrace arch_save_image(unsigned long unused) | ||
60 | { | ||
61 | int ret; | ||
62 | |||
63 | ret = swsusp_save(); | ||
64 | if (ret == 0) | ||
65 | soft_restart(virt_to_phys(cpu_resume)); | ||
66 | return ret; | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Save the current CPU state before suspend / poweroff. | ||
71 | */ | ||
72 | int notrace swsusp_arch_suspend(void) | ||
73 | { | ||
74 | return cpu_suspend(0, arch_save_image); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Restore page contents for physical pages that were in use during loading | ||
79 | * hibernation image. Switch to idmap_pgd so the physical page tables | ||
80 | * are overwritten with the same contents. | ||
81 | */ | ||
82 | static void notrace arch_restore_image(void *unused) | ||
83 | { | ||
84 | struct pbe *pbe; | ||
85 | |||
86 | cpu_switch_mm(idmap_pgd, &init_mm); | ||
87 | for (pbe = restore_pblist; pbe; pbe = pbe->next) | ||
88 | copy_page(pbe->orig_address, pbe->address); | ||
89 | |||
90 | soft_restart(virt_to_phys(cpu_resume)); | ||
91 | } | ||
92 | |||
93 | static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata; | ||
94 | |||
95 | /* | ||
96 | * Resume from the hibernation image. | ||
97 | * Due to the kernel heap / data restore, stack contents change underneath | ||
98 | * and that would make function calls impossible; switch to a temporary | ||
99 | * stack within the nosave region to avoid that problem. | ||
100 | */ | ||
101 | int swsusp_arch_resume(void) | ||
102 | { | ||
103 | extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); | ||
104 | call_with_stack(arch_restore_image, 0, | ||
105 | resume_stack + ARRAY_SIZE(resume_stack)); | ||
106 | return 0; | ||
107 | } | ||
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 9723d17b8f38..2c4257604513 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/proc_fs.h> | 37 | #include <linux/proc_fs.h> |
38 | #include <linux/export.h> | 38 | #include <linux/export.h> |
39 | 39 | ||
40 | #include <asm/hardware/cache-l2x0.h> | ||
40 | #include <asm/exception.h> | 41 | #include <asm/exception.h> |
41 | #include <asm/mach/arch.h> | 42 | #include <asm/mach/arch.h> |
42 | #include <asm/mach/irq.h> | 43 | #include <asm/mach/irq.h> |
@@ -115,10 +116,21 @@ EXPORT_SYMBOL_GPL(set_irq_flags); | |||
115 | 116 | ||
116 | void __init init_IRQ(void) | 117 | void __init init_IRQ(void) |
117 | { | 118 | { |
119 | int ret; | ||
120 | |||
118 | if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq) | 121 | if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq) |
119 | irqchip_init(); | 122 | irqchip_init(); |
120 | else | 123 | else |
121 | machine_desc->init_irq(); | 124 | machine_desc->init_irq(); |
125 | |||
126 | if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_CACHE_L2X0) && | ||
127 | (machine_desc->l2c_aux_mask || machine_desc->l2c_aux_val)) { | ||
128 | outer_cache.write_sec = machine_desc->l2c_write_sec; | ||
129 | ret = l2x0_of_init(machine_desc->l2c_aux_val, | ||
130 | machine_desc->l2c_aux_mask); | ||
131 | if (ret) | ||
132 | pr_err("L2C: failed to init: %d\n", ret); | ||
133 | } | ||
122 | } | 134 | } |
123 | 135 | ||
124 | #ifdef CONFIG_MULTI_IRQ_HANDLER | 136 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index 2452dd1bef53..a5599cfc43cb 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | #include <asm/thread_info.h> | 19 | #include <asm/thread_info.h> |
20 | #include <asm/asm-offsets.h> | 20 | #include <asm/asm-offsets.h> |
21 | #include <asm/assembler.h> | ||
21 | 22 | ||
22 | #if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B) | 23 | #if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B) |
23 | #define PJ4(code...) code | 24 | #define PJ4(code...) code |
@@ -65,17 +66,18 @@ | |||
65 | * r9 = ret_from_exception | 66 | * r9 = ret_from_exception |
66 | * lr = undefined instr exit | 67 | * lr = undefined instr exit |
67 | * | 68 | * |
68 | * called from prefetch exception handler with interrupts disabled | 69 | * called from prefetch exception handler with interrupts enabled |
69 | */ | 70 | */ |
70 | 71 | ||
71 | ENTRY(iwmmxt_task_enable) | 72 | ENTRY(iwmmxt_task_enable) |
73 | inc_preempt_count r10, r3 | ||
72 | 74 | ||
73 | XSC(mrc p15, 0, r2, c15, c1, 0) | 75 | XSC(mrc p15, 0, r2, c15, c1, 0) |
74 | PJ4(mrc p15, 0, r2, c1, c0, 2) | 76 | PJ4(mrc p15, 0, r2, c1, c0, 2) |
75 | @ CP0 and CP1 accessible? | 77 | @ CP0 and CP1 accessible? |
76 | XSC(tst r2, #0x3) | 78 | XSC(tst r2, #0x3) |
77 | PJ4(tst r2, #0xf) | 79 | PJ4(tst r2, #0xf) |
78 | movne pc, lr @ if so no business here | 80 | bne 4f @ if so no business here |
79 | @ enable access to CP0 and CP1 | 81 | @ enable access to CP0 and CP1 |
80 | XSC(orr r2, r2, #0x3) | 82 | XSC(orr r2, r2, #0x3) |
81 | XSC(mcr p15, 0, r2, c15, c1, 0) | 83 | XSC(mcr p15, 0, r2, c15, c1, 0) |
@@ -136,7 +138,7 @@ concan_dump: | |||
136 | wstrd wR15, [r1, #MMX_WR15] | 138 | wstrd wR15, [r1, #MMX_WR15] |
137 | 139 | ||
138 | 2: teq r0, #0 @ anything to load? | 140 | 2: teq r0, #0 @ anything to load? |
139 | moveq pc, lr | 141 | beq 3f |
140 | 142 | ||
141 | concan_load: | 143 | concan_load: |
142 | 144 | ||
@@ -169,8 +171,14 @@ concan_load: | |||
169 | @ clear CUP/MUP (only if r1 != 0) | 171 | @ clear CUP/MUP (only if r1 != 0) |
170 | teq r1, #0 | 172 | teq r1, #0 |
171 | mov r2, #0 | 173 | mov r2, #0 |
172 | moveq pc, lr | 174 | beq 3f |
173 | tmcr wCon, r2 | 175 | tmcr wCon, r2 |
176 | |||
177 | 3: | ||
178 | #ifdef CONFIG_PREEMPT_COUNT | ||
179 | get_thread_info r10 | ||
180 | #endif | ||
181 | 4: dec_preempt_count r10, r3 | ||
174 | mov pc, lr | 182 | mov pc, lr |
175 | 183 | ||
176 | /* | 184 | /* |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 51798d7854ac..a71ae1523620 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -221,6 +221,7 @@ static struct notifier_block cpu_pmu_hotplug_notifier = { | |||
221 | * PMU platform driver and devicetree bindings. | 221 | * PMU platform driver and devicetree bindings. |
222 | */ | 222 | */ |
223 | static struct of_device_id cpu_pmu_of_device_ids[] = { | 223 | static struct of_device_id cpu_pmu_of_device_ids[] = { |
224 | {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init}, | ||
224 | {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, | 225 | {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, |
225 | {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, | 226 | {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, |
226 | {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, | 227 | {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index f4ef3981ed02..2037f7205987 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -1599,6 +1599,13 @@ static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) | |||
1599 | return 0; | 1599 | return 0; |
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu) | ||
1603 | { | ||
1604 | armv7_a12_pmu_init(cpu_pmu); | ||
1605 | cpu_pmu->name = "ARMv7 Cortex-A17"; | ||
1606 | return 0; | ||
1607 | } | ||
1608 | |||
1602 | /* | 1609 | /* |
1603 | * Krait Performance Monitor Region Event Selection Register (PMRESRn) | 1610 | * Krait Performance Monitor Region Event Selection Register (PMRESRn) |
1604 | * | 1611 | * |
@@ -2021,6 +2028,11 @@ static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) | |||
2021 | return -ENODEV; | 2028 | return -ENODEV; |
2022 | } | 2029 | } |
2023 | 2030 | ||
2031 | static inline int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu) | ||
2032 | { | ||
2033 | return -ENODEV; | ||
2034 | } | ||
2035 | |||
2024 | static inline int krait_pmu_init(struct arm_pmu *cpu_pmu) | 2036 | static inline int krait_pmu_init(struct arm_pmu *cpu_pmu) |
2025 | { | 2037 | { |
2026 | return -ENODEV; | 2038 | return -ENODEV; |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 50e198c1e9c8..8a16ee5d8a95 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -72,6 +72,7 @@ static int __init fpe_setup(char *line) | |||
72 | __setup("fpe=", fpe_setup); | 72 | __setup("fpe=", fpe_setup); |
73 | #endif | 73 | #endif |
74 | 74 | ||
75 | extern void init_default_cache_policy(unsigned long); | ||
75 | extern void paging_init(const struct machine_desc *desc); | 76 | extern void paging_init(const struct machine_desc *desc); |
76 | extern void early_paging_init(const struct machine_desc *, | 77 | extern void early_paging_init(const struct machine_desc *, |
77 | struct proc_info_list *); | 78 | struct proc_info_list *); |
@@ -590,7 +591,7 @@ static void __init setup_processor(void) | |||
590 | 591 | ||
591 | pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", | 592 | pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", |
592 | cpu_name, read_cpuid_id(), read_cpuid_id() & 15, | 593 | cpu_name, read_cpuid_id(), read_cpuid_id() & 15, |
593 | proc_arch[cpu_architecture()], cr_alignment); | 594 | proc_arch[cpu_architecture()], get_cr()); |
594 | 595 | ||
595 | snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", | 596 | snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", |
596 | list->arch_name, ENDIANNESS); | 597 | list->arch_name, ENDIANNESS); |
@@ -603,7 +604,9 @@ static void __init setup_processor(void) | |||
603 | #ifndef CONFIG_ARM_THUMB | 604 | #ifndef CONFIG_ARM_THUMB |
604 | elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); | 605 | elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); |
605 | #endif | 606 | #endif |
606 | 607 | #ifdef CONFIG_MMU | |
608 | init_default_cache_policy(list->__cpu_mm_mmu_flags); | ||
609 | #endif | ||
607 | erratum_a15_798181_init(); | 610 | erratum_a15_798181_init(); |
608 | 611 | ||
609 | feat_v6_fixup(); | 612 | feat_v6_fixup(); |
@@ -628,15 +631,8 @@ void __init dump_machine_table(void) | |||
628 | 631 | ||
629 | int __init arm_add_memory(u64 start, u64 size) | 632 | int __init arm_add_memory(u64 start, u64 size) |
630 | { | 633 | { |
631 | struct membank *bank = &meminfo.bank[meminfo.nr_banks]; | ||
632 | u64 aligned_start; | 634 | u64 aligned_start; |
633 | 635 | ||
634 | if (meminfo.nr_banks >= NR_BANKS) { | ||
635 | pr_crit("NR_BANKS too low, ignoring memory at 0x%08llx\n", | ||
636 | (long long)start); | ||
637 | return -EINVAL; | ||
638 | } | ||
639 | |||
640 | /* | 636 | /* |
641 | * Ensure that start/size are aligned to a page boundary. | 637 | * Ensure that start/size are aligned to a page boundary. |
642 | * Size is appropriately rounded down, start is rounded up. | 638 | * Size is appropriately rounded down, start is rounded up. |
@@ -677,17 +673,17 @@ int __init arm_add_memory(u64 start, u64 size) | |||
677 | aligned_start = PHYS_OFFSET; | 673 | aligned_start = PHYS_OFFSET; |
678 | } | 674 | } |
679 | 675 | ||
680 | bank->start = aligned_start; | 676 | start = aligned_start; |
681 | bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1); | 677 | size = size & ~(phys_addr_t)(PAGE_SIZE - 1); |
682 | 678 | ||
683 | /* | 679 | /* |
684 | * Check whether this memory region has non-zero size or | 680 | * Check whether this memory region has non-zero size or |
685 | * invalid node number. | 681 | * invalid node number. |
686 | */ | 682 | */ |
687 | if (bank->size == 0) | 683 | if (size == 0) |
688 | return -EINVAL; | 684 | return -EINVAL; |
689 | 685 | ||
690 | meminfo.nr_banks++; | 686 | memblock_add(start, size); |
691 | return 0; | 687 | return 0; |
692 | } | 688 | } |
693 | 689 | ||
@@ -695,6 +691,7 @@ int __init arm_add_memory(u64 start, u64 size) | |||
695 | * Pick out the memory size. We look for mem=size@start, | 691 | * Pick out the memory size. We look for mem=size@start, |
696 | * where start and size are "size[KkMm]" | 692 | * where start and size are "size[KkMm]" |
697 | */ | 693 | */ |
694 | |||
698 | static int __init early_mem(char *p) | 695 | static int __init early_mem(char *p) |
699 | { | 696 | { |
700 | static int usermem __initdata = 0; | 697 | static int usermem __initdata = 0; |
@@ -709,7 +706,8 @@ static int __init early_mem(char *p) | |||
709 | */ | 706 | */ |
710 | if (usermem == 0) { | 707 | if (usermem == 0) { |
711 | usermem = 1; | 708 | usermem = 1; |
712 | meminfo.nr_banks = 0; | 709 | memblock_remove(memblock_start_of_DRAM(), |
710 | memblock_end_of_DRAM() - memblock_start_of_DRAM()); | ||
713 | } | 711 | } |
714 | 712 | ||
715 | start = PHYS_OFFSET; | 713 | start = PHYS_OFFSET; |
@@ -854,13 +852,6 @@ static void __init reserve_crashkernel(void) | |||
854 | static inline void reserve_crashkernel(void) {} | 852 | static inline void reserve_crashkernel(void) {} |
855 | #endif /* CONFIG_KEXEC */ | 853 | #endif /* CONFIG_KEXEC */ |
856 | 854 | ||
857 | static int __init meminfo_cmp(const void *_a, const void *_b) | ||
858 | { | ||
859 | const struct membank *a = _a, *b = _b; | ||
860 | long cmp = bank_pfn_start(a) - bank_pfn_start(b); | ||
861 | return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; | ||
862 | } | ||
863 | |||
864 | void __init hyp_mode_check(void) | 855 | void __init hyp_mode_check(void) |
865 | { | 856 | { |
866 | #ifdef CONFIG_ARM_VIRT_EXT | 857 | #ifdef CONFIG_ARM_VIRT_EXT |
@@ -903,12 +894,10 @@ void __init setup_arch(char **cmdline_p) | |||
903 | 894 | ||
904 | parse_early_param(); | 895 | parse_early_param(); |
905 | 896 | ||
906 | sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); | ||
907 | |||
908 | early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); | 897 | early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); |
909 | setup_dma_zone(mdesc); | 898 | setup_dma_zone(mdesc); |
910 | sanity_check_meminfo(); | 899 | sanity_check_meminfo(); |
911 | arm_memblock_init(&meminfo, mdesc); | 900 | arm_memblock_init(mdesc); |
912 | 901 | ||
913 | paging_init(mdesc); | 902 | paging_init(mdesc); |
914 | request_standard_resources(mdesc); | 903 | request_standard_resources(mdesc); |
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index b907d9b790ab..1b880db2a033 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S | |||
@@ -127,6 +127,10 @@ ENDPROC(cpu_resume_after_mmu) | |||
127 | .align | 127 | .align |
128 | ENTRY(cpu_resume) | 128 | ENTRY(cpu_resume) |
129 | ARM_BE8(setend be) @ ensure we are in BE mode | 129 | ARM_BE8(setend be) @ ensure we are in BE mode |
130 | #ifdef CONFIG_ARM_VIRT_EXT | ||
131 | bl __hyp_stub_install_secondary | ||
132 | #endif | ||
133 | safe_svcmode_maskall r1 | ||
130 | mov r1, #0 | 134 | mov r1, #0 |
131 | ALT_SMP(mrc p15, 0, r0, c0, c0, 5) | 135 | ALT_SMP(mrc p15, 0, r0, c0, c0, 5) |
132 | ALT_UP_B(1f) | 136 | ALT_UP_B(1f) |
@@ -144,7 +148,6 @@ ARM_BE8(setend be) @ ensure we are in BE mode | |||
144 | ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] | 148 | ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] |
145 | ldr r0, [r0, r1, lsl #2] | 149 | ldr r0, [r0, r1, lsl #2] |
146 | 150 | ||
147 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off | ||
148 | @ load phys pgd, stack, resume fn | 151 | @ load phys pgd, stack, resume fn |
149 | ARM( ldmia r0!, {r1, sp, pc} ) | 152 | ARM( ldmia r0!, {r1, sp, pc} ) |
150 | THUMB( ldmia r0!, {r1, r2, r3} ) | 153 | THUMB( ldmia r0!, {r1, r2, r3} ) |
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index af4e8c8a5422..f065eb05d254 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/stacktrace.h> | 3 | #include <linux/stacktrace.h> |
4 | 4 | ||
5 | #include <asm/stacktrace.h> | 5 | #include <asm/stacktrace.h> |
6 | #include <asm/traps.h> | ||
6 | 7 | ||
7 | #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) | 8 | #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) |
8 | /* | 9 | /* |
@@ -61,6 +62,7 @@ EXPORT_SYMBOL(walk_stackframe); | |||
61 | #ifdef CONFIG_STACKTRACE | 62 | #ifdef CONFIG_STACKTRACE |
62 | struct stack_trace_data { | 63 | struct stack_trace_data { |
63 | struct stack_trace *trace; | 64 | struct stack_trace *trace; |
65 | unsigned long last_pc; | ||
64 | unsigned int no_sched_functions; | 66 | unsigned int no_sched_functions; |
65 | unsigned int skip; | 67 | unsigned int skip; |
66 | }; | 68 | }; |
@@ -69,6 +71,7 @@ static int save_trace(struct stackframe *frame, void *d) | |||
69 | { | 71 | { |
70 | struct stack_trace_data *data = d; | 72 | struct stack_trace_data *data = d; |
71 | struct stack_trace *trace = data->trace; | 73 | struct stack_trace *trace = data->trace; |
74 | struct pt_regs *regs; | ||
72 | unsigned long addr = frame->pc; | 75 | unsigned long addr = frame->pc; |
73 | 76 | ||
74 | if (data->no_sched_functions && in_sched_functions(addr)) | 77 | if (data->no_sched_functions && in_sched_functions(addr)) |
@@ -80,16 +83,39 @@ static int save_trace(struct stackframe *frame, void *d) | |||
80 | 83 | ||
81 | trace->entries[trace->nr_entries++] = addr; | 84 | trace->entries[trace->nr_entries++] = addr; |
82 | 85 | ||
86 | if (trace->nr_entries >= trace->max_entries) | ||
87 | return 1; | ||
88 | |||
89 | /* | ||
90 | * in_exception_text() is designed to test if the PC is one of | ||
91 | * the functions which has an exception stack above it, but | ||
92 | * unfortunately what is in frame->pc is the return LR value, | ||
93 | * not the saved PC value. So, we need to track the previous | ||
94 | * frame PC value when doing this. | ||
95 | */ | ||
96 | addr = data->last_pc; | ||
97 | data->last_pc = frame->pc; | ||
98 | if (!in_exception_text(addr)) | ||
99 | return 0; | ||
100 | |||
101 | regs = (struct pt_regs *)frame->sp; | ||
102 | |||
103 | trace->entries[trace->nr_entries++] = regs->ARM_pc; | ||
104 | |||
83 | return trace->nr_entries >= trace->max_entries; | 105 | return trace->nr_entries >= trace->max_entries; |
84 | } | 106 | } |
85 | 107 | ||
86 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | 108 | /* This must be noinline to so that our skip calculation works correctly */ |
109 | static noinline void __save_stack_trace(struct task_struct *tsk, | ||
110 | struct stack_trace *trace, unsigned int nosched) | ||
87 | { | 111 | { |
88 | struct stack_trace_data data; | 112 | struct stack_trace_data data; |
89 | struct stackframe frame; | 113 | struct stackframe frame; |
90 | 114 | ||
91 | data.trace = trace; | 115 | data.trace = trace; |
116 | data.last_pc = ULONG_MAX; | ||
92 | data.skip = trace->skip; | 117 | data.skip = trace->skip; |
118 | data.no_sched_functions = nosched; | ||
93 | 119 | ||
94 | if (tsk != current) { | 120 | if (tsk != current) { |
95 | #ifdef CONFIG_SMP | 121 | #ifdef CONFIG_SMP |
@@ -102,7 +128,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
102 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 128 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
103 | return; | 129 | return; |
104 | #else | 130 | #else |
105 | data.no_sched_functions = 1; | ||
106 | frame.fp = thread_saved_fp(tsk); | 131 | frame.fp = thread_saved_fp(tsk); |
107 | frame.sp = thread_saved_sp(tsk); | 132 | frame.sp = thread_saved_sp(tsk); |
108 | frame.lr = 0; /* recovered from the stack */ | 133 | frame.lr = 0; /* recovered from the stack */ |
@@ -111,11 +136,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
111 | } else { | 136 | } else { |
112 | register unsigned long current_sp asm ("sp"); | 137 | register unsigned long current_sp asm ("sp"); |
113 | 138 | ||
114 | data.no_sched_functions = 0; | 139 | /* We don't want this function nor the caller */ |
140 | data.skip += 2; | ||
115 | frame.fp = (unsigned long)__builtin_frame_address(0); | 141 | frame.fp = (unsigned long)__builtin_frame_address(0); |
116 | frame.sp = current_sp; | 142 | frame.sp = current_sp; |
117 | frame.lr = (unsigned long)__builtin_return_address(0); | 143 | frame.lr = (unsigned long)__builtin_return_address(0); |
118 | frame.pc = (unsigned long)save_stack_trace_tsk; | 144 | frame.pc = (unsigned long)__save_stack_trace; |
119 | } | 145 | } |
120 | 146 | ||
121 | walk_stackframe(&frame, save_trace, &data); | 147 | walk_stackframe(&frame, save_trace, &data); |
@@ -123,9 +149,33 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
123 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 149 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
124 | } | 150 | } |
125 | 151 | ||
152 | void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) | ||
153 | { | ||
154 | struct stack_trace_data data; | ||
155 | struct stackframe frame; | ||
156 | |||
157 | data.trace = trace; | ||
158 | data.skip = trace->skip; | ||
159 | data.no_sched_functions = 0; | ||
160 | |||
161 | frame.fp = regs->ARM_fp; | ||
162 | frame.sp = regs->ARM_sp; | ||
163 | frame.lr = regs->ARM_lr; | ||
164 | frame.pc = regs->ARM_pc; | ||
165 | |||
166 | walk_stackframe(&frame, save_trace, &data); | ||
167 | if (trace->nr_entries < trace->max_entries) | ||
168 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
169 | } | ||
170 | |||
171 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | ||
172 | { | ||
173 | __save_stack_trace(tsk, trace, 1); | ||
174 | } | ||
175 | |||
126 | void save_stack_trace(struct stack_trace *trace) | 176 | void save_stack_trace(struct stack_trace *trace) |
127 | { | 177 | { |
128 | save_stack_trace_tsk(current, trace); | 178 | __save_stack_trace(current, trace, 0); |
129 | } | 179 | } |
130 | EXPORT_SYMBOL_GPL(save_stack_trace); | 180 | EXPORT_SYMBOL_GPL(save_stack_trace); |
131 | #endif | 181 | #endif |
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 71e1fec6d31a..3997c411c140 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c | |||
@@ -91,13 +91,13 @@ static void __init parse_dt_topology(void) | |||
91 | { | 91 | { |
92 | const struct cpu_efficiency *cpu_eff; | 92 | const struct cpu_efficiency *cpu_eff; |
93 | struct device_node *cn = NULL; | 93 | struct device_node *cn = NULL; |
94 | unsigned long min_capacity = (unsigned long)(-1); | 94 | unsigned long min_capacity = ULONG_MAX; |
95 | unsigned long max_capacity = 0; | 95 | unsigned long max_capacity = 0; |
96 | unsigned long capacity = 0; | 96 | unsigned long capacity = 0; |
97 | int alloc_size, cpu = 0; | 97 | int cpu = 0; |
98 | 98 | ||
99 | alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity); | 99 | __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), |
100 | __cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT); | 100 | GFP_NOWAIT); |
101 | 101 | ||
102 | for_each_possible_cpu(cpu) { | 102 | for_each_possible_cpu(cpu) { |
103 | const u32 *rate; | 103 | const u32 *rate; |
diff --git a/arch/arm/kernel/uprobes.c b/arch/arm/kernel/uprobes.c index f9bacee973bf..56adf9c1fde0 100644 --- a/arch/arm/kernel/uprobes.c +++ b/arch/arm/kernel/uprobes.c | |||
@@ -113,6 +113,26 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, | |||
113 | return 0; | 113 | return 0; |
114 | } | 114 | } |
115 | 115 | ||
116 | void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, | ||
117 | void *src, unsigned long len) | ||
118 | { | ||
119 | void *xol_page_kaddr = kmap_atomic(page); | ||
120 | void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK); | ||
121 | |||
122 | preempt_disable(); | ||
123 | |||
124 | /* Initialize the slot */ | ||
125 | memcpy(dst, src, len); | ||
126 | |||
127 | /* flush caches (dcache/icache) */ | ||
128 | flush_uprobe_xol_access(page, vaddr, dst, len); | ||
129 | |||
130 | preempt_enable(); | ||
131 | |||
132 | kunmap_atomic(xol_page_kaddr); | ||
133 | } | ||
134 | |||
135 | |||
116 | int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | 136 | int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) |
117 | { | 137 | { |
118 | struct uprobe_task *utask = current->utask; | 138 | struct uprobe_task *utask = current->utask; |
diff --git a/arch/arm/mach-bcm/bcm_5301x.c b/arch/arm/mach-bcm/bcm_5301x.c index edff69761e04..e9bcbdbce555 100644 --- a/arch/arm/mach-bcm/bcm_5301x.c +++ b/arch/arm/mach-bcm/bcm_5301x.c | |||
@@ -43,19 +43,14 @@ static void __init bcm5301x_init_early(void) | |||
43 | "imprecise external abort"); | 43 | "imprecise external abort"); |
44 | } | 44 | } |
45 | 45 | ||
46 | static void __init bcm5301x_dt_init(void) | ||
47 | { | ||
48 | l2x0_of_init(0, ~0UL); | ||
49 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
50 | } | ||
51 | |||
52 | static const char __initconst *bcm5301x_dt_compat[] = { | 46 | static const char __initconst *bcm5301x_dt_compat[] = { |
53 | "brcm,bcm4708", | 47 | "brcm,bcm4708", |
54 | NULL, | 48 | NULL, |
55 | }; | 49 | }; |
56 | 50 | ||
57 | DT_MACHINE_START(BCM5301X, "BCM5301X") | 51 | DT_MACHINE_START(BCM5301X, "BCM5301X") |
52 | .l2c_aux_val = 0, | ||
53 | .l2c_aux_mask = ~0, | ||
58 | .init_early = bcm5301x_init_early, | 54 | .init_early = bcm5301x_init_early, |
59 | .init_machine = bcm5301x_dt_init, | ||
60 | .dt_compat = bcm5301x_dt_compat, | 55 | .dt_compat = bcm5301x_dt_compat, |
61 | MACHINE_END | 56 | MACHINE_END |
diff --git a/arch/arm/mach-berlin/berlin.c b/arch/arm/mach-berlin/berlin.c index 025bcb5473eb..ac181c6797ee 100644 --- a/arch/arm/mach-berlin/berlin.c +++ b/arch/arm/mach-berlin/berlin.c | |||
@@ -18,16 +18,6 @@ | |||
18 | #include <asm/hardware/cache-l2x0.h> | 18 | #include <asm/hardware/cache-l2x0.h> |
19 | #include <asm/mach/arch.h> | 19 | #include <asm/mach/arch.h> |
20 | 20 | ||
21 | static void __init berlin_init_machine(void) | ||
22 | { | ||
23 | /* | ||
24 | * with DT probing for L2CCs, berlin_init_machine can be removed. | ||
25 | * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc | ||
26 | */ | ||
27 | l2x0_of_init(0x70c00000, 0xfeffffff); | ||
28 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
29 | } | ||
30 | |||
31 | static const char * const berlin_dt_compat[] = { | 21 | static const char * const berlin_dt_compat[] = { |
32 | "marvell,berlin", | 22 | "marvell,berlin", |
33 | NULL, | 23 | NULL, |
@@ -35,5 +25,10 @@ static const char * const berlin_dt_compat[] = { | |||
35 | 25 | ||
36 | DT_MACHINE_START(BERLIN_DT, "Marvell Berlin") | 26 | DT_MACHINE_START(BERLIN_DT, "Marvell Berlin") |
37 | .dt_compat = berlin_dt_compat, | 27 | .dt_compat = berlin_dt_compat, |
38 | .init_machine = berlin_init_machine, | 28 | /* |
29 | * with DT probing for L2CCs, berlin_init_machine can be removed. | ||
30 | * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc | ||
31 | */ | ||
32 | .l2c_aux_val = 0x30c00000, | ||
33 | .l2c_aux_mask = 0xfeffffff, | ||
39 | MACHINE_END | 34 | MACHINE_END |
diff --git a/arch/arm/mach-clps711x/board-clep7312.c b/arch/arm/mach-clps711x/board-clep7312.c index 221b9de32dd6..94a7add88a3f 100644 --- a/arch/arm/mach-clps711x/board-clep7312.c +++ b/arch/arm/mach-clps711x/board-clep7312.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <linux/string.h> | 20 | #include <linux/string.h> |
21 | #include <linux/memblock.h> | ||
21 | 22 | ||
22 | #include <asm/setup.h> | 23 | #include <asm/setup.h> |
23 | #include <asm/mach-types.h> | 24 | #include <asm/mach-types.h> |
@@ -26,11 +27,9 @@ | |||
26 | #include "common.h" | 27 | #include "common.h" |
27 | 28 | ||
28 | static void __init | 29 | static void __init |
29 | fixup_clep7312(struct tag *tags, char **cmdline, struct meminfo *mi) | 30 | fixup_clep7312(struct tag *tags, char **cmdline) |
30 | { | 31 | { |
31 | mi->nr_banks=1; | 32 | memblock_add(0xc0000000, 0x01000000); |
32 | mi->bank[0].start = 0xc0000000; | ||
33 | mi->bank[0].size = 0x01000000; | ||
34 | } | 33 | } |
35 | 34 | ||
36 | MACHINE_START(CLEP7212, "Cirrus Logic 7212/7312") | 35 | MACHINE_START(CLEP7212, "Cirrus Logic 7212/7312") |
diff --git a/arch/arm/mach-clps711x/board-edb7211.c b/arch/arm/mach-clps711x/board-edb7211.c index 077609841f14..f9828f89972a 100644 --- a/arch/arm/mach-clps711x/board-edb7211.c +++ b/arch/arm/mach-clps711x/board-edb7211.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/backlight.h> | 17 | #include <linux/backlight.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/memblock.h> | ||
19 | 20 | ||
20 | #include <linux/mtd/physmap.h> | 21 | #include <linux/mtd/physmap.h> |
21 | #include <linux/mtd/partitions.h> | 22 | #include <linux/mtd/partitions.h> |
@@ -133,7 +134,7 @@ static void __init edb7211_reserve(void) | |||
133 | } | 134 | } |
134 | 135 | ||
135 | static void __init | 136 | static void __init |
136 | fixup_edb7211(struct tag *tags, char **cmdline, struct meminfo *mi) | 137 | fixup_edb7211(struct tag *tags, char **cmdline) |
137 | { | 138 | { |
138 | /* | 139 | /* |
139 | * Bank start addresses are not present in the information | 140 | * Bank start addresses are not present in the information |
@@ -143,11 +144,8 @@ fixup_edb7211(struct tag *tags, char **cmdline, struct meminfo *mi) | |||
143 | * Banks sizes _are_ present in the param block, but we're | 144 | * Banks sizes _are_ present in the param block, but we're |
144 | * not using that information yet. | 145 | * not using that information yet. |
145 | */ | 146 | */ |
146 | mi->bank[0].start = 0xc0000000; | 147 | memblock_add(0xc0000000, SZ_8M); |
147 | mi->bank[0].size = SZ_8M; | 148 | memblock_add(0xc1000000, SZ_8M); |
148 | mi->bank[1].start = 0xc1000000; | ||
149 | mi->bank[1].size = SZ_8M; | ||
150 | mi->nr_banks = 2; | ||
151 | } | 149 | } |
152 | 150 | ||
153 | static void __init edb7211_init(void) | 151 | static void __init edb7211_init(void) |
diff --git a/arch/arm/mach-clps711x/board-p720t.c b/arch/arm/mach-clps711x/board-p720t.c index 67b733744ed7..0cf0e51e6546 100644 --- a/arch/arm/mach-clps711x/board-p720t.c +++ b/arch/arm/mach-clps711x/board-p720t.c | |||
@@ -295,7 +295,7 @@ static struct generic_bl_info p720t_lcd_backlight_pdata = { | |||
295 | }; | 295 | }; |
296 | 296 | ||
297 | static void __init | 297 | static void __init |
298 | fixup_p720t(struct tag *tag, char **cmdline, struct meminfo *mi) | 298 | fixup_p720t(struct tag *tag, char **cmdline) |
299 | { | 299 | { |
300 | /* | 300 | /* |
301 | * Our bootloader doesn't setup any tags (yet). | 301 | * Our bootloader doesn't setup any tags (yet). |
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index 2ae28a69e3e5..f85449a6accd 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c | |||
@@ -272,9 +272,9 @@ void __init cns3xxx_l2x0_init(void) | |||
272 | * | 272 | * |
273 | * 1 cycle of latency for setup, read and write accesses | 273 | * 1 cycle of latency for setup, read and write accesses |
274 | */ | 274 | */ |
275 | val = readl(base + L2X0_TAG_LATENCY_CTRL); | 275 | val = readl(base + L310_TAG_LATENCY_CTRL); |
276 | val &= 0xfffff888; | 276 | val &= 0xfffff888; |
277 | writel(val, base + L2X0_TAG_LATENCY_CTRL); | 277 | writel(val, base + L310_TAG_LATENCY_CTRL); |
278 | 278 | ||
279 | /* | 279 | /* |
280 | * Data RAM Control register | 280 | * Data RAM Control register |
@@ -285,12 +285,12 @@ void __init cns3xxx_l2x0_init(void) | |||
285 | * | 285 | * |
286 | * 1 cycle of latency for setup, read and write accesses | 286 | * 1 cycle of latency for setup, read and write accesses |
287 | */ | 287 | */ |
288 | val = readl(base + L2X0_DATA_LATENCY_CTRL); | 288 | val = readl(base + L310_DATA_LATENCY_CTRL); |
289 | val &= 0xfffff888; | 289 | val &= 0xfffff888; |
290 | writel(val, base + L2X0_DATA_LATENCY_CTRL); | 290 | writel(val, base + L310_DATA_LATENCY_CTRL); |
291 | 291 | ||
292 | /* 32 KiB, 8-way, parity disable */ | 292 | /* 32 KiB, 8-way, parity disable */ |
293 | l2x0_init(base, 0x00540000, 0xfe000fff); | 293 | l2x0_init(base, 0x00500000, 0xfe0f0fff); |
294 | } | 294 | } |
295 | 295 | ||
296 | #endif /* CONFIG_CACHE_L2X0 */ | 296 | #endif /* CONFIG_CACHE_L2X0 */ |
diff --git a/arch/arm/mach-ep93xx/crunch-bits.S b/arch/arm/mach-ep93xx/crunch-bits.S index 0ec9bb48fab9..e96923a3017b 100644 --- a/arch/arm/mach-ep93xx/crunch-bits.S +++ b/arch/arm/mach-ep93xx/crunch-bits.S | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <asm/ptrace.h> | 16 | #include <asm/ptrace.h> |
17 | #include <asm/thread_info.h> | 17 | #include <asm/thread_info.h> |
18 | #include <asm/asm-offsets.h> | 18 | #include <asm/asm-offsets.h> |
19 | #include <asm/assembler.h> | ||
19 | #include <mach/ep93xx-regs.h> | 20 | #include <mach/ep93xx-regs.h> |
20 | 21 | ||
21 | /* | 22 | /* |
@@ -62,14 +63,16 @@ | |||
62 | * r9 = ret_from_exception | 63 | * r9 = ret_from_exception |
63 | * lr = undefined instr exit | 64 | * lr = undefined instr exit |
64 | * | 65 | * |
65 | * called from prefetch exception handler with interrupts disabled | 66 | * called from prefetch exception handler with interrupts enabled |
66 | */ | 67 | */ |
67 | ENTRY(crunch_task_enable) | 68 | ENTRY(crunch_task_enable) |
69 | inc_preempt_count r10, r3 | ||
70 | |||
68 | ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr | 71 | ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr |
69 | 72 | ||
70 | ldr r1, [r8, #0x80] | 73 | ldr r1, [r8, #0x80] |
71 | tst r1, #0x00800000 @ access to crunch enabled? | 74 | tst r1, #0x00800000 @ access to crunch enabled? |
72 | movne pc, lr @ if so no business here | 75 | bne 2f @ if so no business here |
73 | mov r3, #0xaa @ unlock syscon swlock | 76 | mov r3, #0xaa @ unlock syscon swlock |
74 | str r3, [r8, #0xc0] | 77 | str r3, [r8, #0xc0] |
75 | orr r1, r1, #0x00800000 @ enable access to crunch | 78 | orr r1, r1, #0x00800000 @ enable access to crunch |
@@ -142,7 +145,7 @@ crunch_save: | |||
142 | 145 | ||
143 | teq r0, #0 @ anything to load? | 146 | teq r0, #0 @ anything to load? |
144 | cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered | 147 | cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered |
145 | moveq pc, lr | 148 | beq 1f |
146 | 149 | ||
147 | crunch_load: | 150 | crunch_load: |
148 | cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word | 151 | cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word |
@@ -190,6 +193,11 @@ crunch_load: | |||
190 | cfldr64 mvdx14, [r0, #CRUNCH_MVDX14] | 193 | cfldr64 mvdx14, [r0, #CRUNCH_MVDX14] |
191 | cfldr64 mvdx15, [r0, #CRUNCH_MVDX15] | 194 | cfldr64 mvdx15, [r0, #CRUNCH_MVDX15] |
192 | 195 | ||
196 | 1: | ||
197 | #ifdef CONFIG_PREEMPT_COUNT | ||
198 | get_thread_info r10 | ||
199 | #endif | ||
200 | 2: dec_preempt_count r10, r3 | ||
193 | mov pc, lr | 201 | mov pc, lr |
194 | 202 | ||
195 | /* | 203 | /* |
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h index 80b90e346ca0..16617bdb37a9 100644 --- a/arch/arm/mach-exynos/common.h +++ b/arch/arm/mach-exynos/common.h | |||
@@ -153,7 +153,6 @@ enum sys_powerdown { | |||
153 | NUM_SYS_POWERDOWN, | 153 | NUM_SYS_POWERDOWN, |
154 | }; | 154 | }; |
155 | 155 | ||
156 | extern unsigned long l2x0_regs_phys; | ||
157 | struct exynos_pmu_conf { | 156 | struct exynos_pmu_conf { |
158 | void __iomem *reg; | 157 | void __iomem *reg; |
159 | unsigned int val[NUM_SYS_POWERDOWN]; | 158 | unsigned int val[NUM_SYS_POWERDOWN]; |
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c index a56ce45a3f90..90aab4d75d08 100644 --- a/arch/arm/mach-exynos/exynos.c +++ b/arch/arm/mach-exynos/exynos.c | |||
@@ -30,9 +30,6 @@ | |||
30 | #include "mfc.h" | 30 | #include "mfc.h" |
31 | #include "regs-pmu.h" | 31 | #include "regs-pmu.h" |
32 | 32 | ||
33 | #define L2_AUX_VAL 0x7C470001 | ||
34 | #define L2_AUX_MASK 0xC200ffff | ||
35 | |||
36 | static struct map_desc exynos4_iodesc[] __initdata = { | 33 | static struct map_desc exynos4_iodesc[] __initdata = { |
37 | { | 34 | { |
38 | .virtual = (unsigned long)S3C_VA_SYS, | 35 | .virtual = (unsigned long)S3C_VA_SYS, |
@@ -246,25 +243,6 @@ void __init exynos_init_io(void) | |||
246 | exynos_map_io(); | 243 | exynos_map_io(); |
247 | } | 244 | } |
248 | 245 | ||
249 | static int __init exynos4_l2x0_cache_init(void) | ||
250 | { | ||
251 | int ret; | ||
252 | |||
253 | if (!soc_is_exynos4()) | ||
254 | return 0; | ||
255 | |||
256 | ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK); | ||
257 | if (ret) | ||
258 | return ret; | ||
259 | |||
260 | if (IS_ENABLED(CONFIG_S5P_SLEEP)) { | ||
261 | l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs); | ||
262 | clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long)); | ||
263 | } | ||
264 | return 0; | ||
265 | } | ||
266 | early_initcall(exynos4_l2x0_cache_init); | ||
267 | |||
268 | static void __init exynos_dt_machine_init(void) | 246 | static void __init exynos_dt_machine_init(void) |
269 | { | 247 | { |
270 | struct device_node *i2c_np; | 248 | struct device_node *i2c_np; |
@@ -333,6 +311,8 @@ static void __init exynos_reserve(void) | |||
333 | DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)") | 311 | DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)") |
334 | /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */ | 312 | /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */ |
335 | /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ | 313 | /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ |
314 | .l2c_aux_val = 0x3c400001, | ||
315 | .l2c_aux_mask = 0xc20fffff, | ||
336 | .smp = smp_ops(exynos_smp_ops), | 316 | .smp = smp_ops(exynos_smp_ops), |
337 | .map_io = exynos_init_io, | 317 | .map_io = exynos_init_io, |
338 | .init_early = exynos_firmware_init, | 318 | .init_early = exynos_firmware_init, |
diff --git a/arch/arm/mach-exynos/sleep.S b/arch/arm/mach-exynos/sleep.S index a2613e944e10..108a45f4bb62 100644 --- a/arch/arm/mach-exynos/sleep.S +++ b/arch/arm/mach-exynos/sleep.S | |||
@@ -16,8 +16,6 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
19 | #include <asm/asm-offsets.h> | ||
20 | #include <asm/hardware/cache-l2x0.h> | ||
21 | 19 | ||
22 | #define CPU_MASK 0xff0ffff0 | 20 | #define CPU_MASK 0xff0ffff0 |
23 | #define CPU_CORTEX_A9 0x410fc090 | 21 | #define CPU_CORTEX_A9 0x410fc090 |
@@ -53,33 +51,7 @@ ENTRY(exynos_cpu_resume) | |||
53 | and r0, r0, r1 | 51 | and r0, r0, r1 |
54 | ldr r1, =CPU_CORTEX_A9 | 52 | ldr r1, =CPU_CORTEX_A9 |
55 | cmp r0, r1 | 53 | cmp r0, r1 |
56 | bne skip_l2_resume | 54 | bleq l2c310_early_resume |
57 | adr r0, l2x0_regs_phys | ||
58 | ldr r0, [r0] | ||
59 | cmp r0, #0 | ||
60 | beq skip_l2_resume | ||
61 | ldr r1, [r0, #L2X0_R_PHY_BASE] | ||
62 | ldr r2, [r1, #L2X0_CTRL] | ||
63 | tst r2, #0x1 | ||
64 | bne skip_l2_resume | ||
65 | ldr r2, [r0, #L2X0_R_AUX_CTRL] | ||
66 | str r2, [r1, #L2X0_AUX_CTRL] | ||
67 | ldr r2, [r0, #L2X0_R_TAG_LATENCY] | ||
68 | str r2, [r1, #L2X0_TAG_LATENCY_CTRL] | ||
69 | ldr r2, [r0, #L2X0_R_DATA_LATENCY] | ||
70 | str r2, [r1, #L2X0_DATA_LATENCY_CTRL] | ||
71 | ldr r2, [r0, #L2X0_R_PREFETCH_CTRL] | ||
72 | str r2, [r1, #L2X0_PREFETCH_CTRL] | ||
73 | ldr r2, [r0, #L2X0_R_PWR_CTRL] | ||
74 | str r2, [r1, #L2X0_POWER_CTRL] | ||
75 | mov r2, #1 | ||
76 | str r2, [r1, #L2X0_CTRL] | ||
77 | skip_l2_resume: | ||
78 | #endif | 55 | #endif |
79 | b cpu_resume | 56 | b cpu_resume |
80 | ENDPROC(exynos_cpu_resume) | 57 | ENDPROC(exynos_cpu_resume) |
81 | #ifdef CONFIG_CACHE_L2X0 | ||
82 | .globl l2x0_regs_phys | ||
83 | l2x0_regs_phys: | ||
84 | .long 0 | ||
85 | #endif | ||
diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c index da0415094856..8f05489671b7 100644 --- a/arch/arm/mach-footbridge/cats-hw.c +++ b/arch/arm/mach-footbridge/cats-hw.c | |||
@@ -76,7 +76,7 @@ __initcall(cats_hw_init); | |||
76 | * hard reboots fail on early boards. | 76 | * hard reboots fail on early boards. |
77 | */ | 77 | */ |
78 | static void __init | 78 | static void __init |
79 | fixup_cats(struct tag *tags, char **cmdline, struct meminfo *mi) | 79 | fixup_cats(struct tag *tags, char **cmdline) |
80 | { | 80 | { |
81 | #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) | 81 | #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) |
82 | screen_info.orig_video_lines = 25; | 82 | screen_info.orig_video_lines = 25; |
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c index eb1fa5c84723..cdee08c6d239 100644 --- a/arch/arm/mach-footbridge/netwinder-hw.c +++ b/arch/arm/mach-footbridge/netwinder-hw.c | |||
@@ -620,7 +620,7 @@ __initcall(nw_hw_init); | |||
620 | * the parameter page. | 620 | * the parameter page. |
621 | */ | 621 | */ |
622 | static void __init | 622 | static void __init |
623 | fixup_netwinder(struct tag *tags, char **cmdline, struct meminfo *mi) | 623 | fixup_netwinder(struct tag *tags, char **cmdline) |
624 | { | 624 | { |
625 | #ifdef CONFIG_ISAPNP | 625 | #ifdef CONFIG_ISAPNP |
626 | extern int isapnp_disable; | 626 | extern int isapnp_disable; |
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index c7de89b263dd..8c35ae4ff176 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c | |||
@@ -51,11 +51,13 @@ static void __init highbank_scu_map_io(void) | |||
51 | } | 51 | } |
52 | 52 | ||
53 | 53 | ||
54 | static void highbank_l2x0_disable(void) | 54 | static void highbank_l2c310_write_sec(unsigned long val, unsigned reg) |
55 | { | 55 | { |
56 | outer_flush_all(); | 56 | if (reg == L2X0_CTRL) |
57 | /* Disable PL310 L2 Cache controller */ | 57 | highbank_smc1(0x102, val); |
58 | highbank_smc1(0x102, 0x0); | 58 | else |
59 | WARN_ONCE(1, "Highbank L2C310: ignoring write to reg 0x%x\n", | ||
60 | reg); | ||
59 | } | 61 | } |
60 | 62 | ||
61 | static void __init highbank_init_irq(void) | 63 | static void __init highbank_init_irq(void) |
@@ -64,14 +66,6 @@ static void __init highbank_init_irq(void) | |||
64 | 66 | ||
65 | if (of_find_compatible_node(NULL, NULL, "arm,cortex-a9")) | 67 | if (of_find_compatible_node(NULL, NULL, "arm,cortex-a9")) |
66 | highbank_scu_map_io(); | 68 | highbank_scu_map_io(); |
67 | |||
68 | /* Enable PL310 L2 Cache controller */ | ||
69 | if (IS_ENABLED(CONFIG_CACHE_L2X0) && | ||
70 | of_find_compatible_node(NULL, NULL, "arm,pl310-cache")) { | ||
71 | highbank_smc1(0x102, 0x1); | ||
72 | l2x0_of_init(0, ~0UL); | ||
73 | outer_cache.disable = highbank_l2x0_disable; | ||
74 | } | ||
75 | } | 69 | } |
76 | 70 | ||
77 | static void highbank_power_off(void) | 71 | static void highbank_power_off(void) |
@@ -185,6 +179,9 @@ DT_MACHINE_START(HIGHBANK, "Highbank") | |||
185 | #if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) | 179 | #if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) |
186 | .dma_zone_size = (4ULL * SZ_1G), | 180 | .dma_zone_size = (4ULL * SZ_1G), |
187 | #endif | 181 | #endif |
182 | .l2c_aux_val = 0, | ||
183 | .l2c_aux_mask = ~0, | ||
184 | .l2c_write_sec = highbank_l2c310_write_sec, | ||
188 | .init_irq = highbank_init_irq, | 185 | .init_irq = highbank_init_irq, |
189 | .init_machine = highbank_init, | 186 | .init_machine = highbank_init, |
190 | .dt_compat = highbank_match, | 187 | .dt_compat = highbank_match, |
diff --git a/arch/arm/mach-imx/mach-vf610.c b/arch/arm/mach-imx/mach-vf610.c index 2d8aef5a6efa..c44602758120 100644 --- a/arch/arm/mach-imx/mach-vf610.c +++ b/arch/arm/mach-imx/mach-vf610.c | |||
@@ -20,19 +20,14 @@ static void __init vf610_init_machine(void) | |||
20 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 20 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
21 | } | 21 | } |
22 | 22 | ||
23 | static void __init vf610_init_irq(void) | ||
24 | { | ||
25 | l2x0_of_init(0, ~0UL); | ||
26 | irqchip_init(); | ||
27 | } | ||
28 | |||
29 | static const char *vf610_dt_compat[] __initconst = { | 23 | static const char *vf610_dt_compat[] __initconst = { |
30 | "fsl,vf610", | 24 | "fsl,vf610", |
31 | NULL, | 25 | NULL, |
32 | }; | 26 | }; |
33 | 27 | ||
34 | DT_MACHINE_START(VYBRID_VF610, "Freescale Vybrid VF610 (Device Tree)") | 28 | DT_MACHINE_START(VYBRID_VF610, "Freescale Vybrid VF610 (Device Tree)") |
35 | .init_irq = vf610_init_irq, | 29 | .l2c_aux_val = 0, |
30 | .l2c_aux_mask = ~0, | ||
36 | .init_machine = vf610_init_machine, | 31 | .init_machine = vf610_init_machine, |
37 | .dt_compat = vf610_dt_compat, | 32 | .dt_compat = vf610_dt_compat, |
38 | .restart = mxc_restart, | 33 | .restart = mxc_restart, |
diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S index 20048ff05739..fe123b079c05 100644 --- a/arch/arm/mach-imx/suspend-imx6.S +++ b/arch/arm/mach-imx/suspend-imx6.S | |||
@@ -334,28 +334,10 @@ ENDPROC(imx6_suspend) | |||
334 | * turned into relative ones. | 334 | * turned into relative ones. |
335 | */ | 335 | */ |
336 | 336 | ||
337 | #ifdef CONFIG_CACHE_L2X0 | ||
338 | .macro pl310_resume | ||
339 | adr r0, l2x0_saved_regs_offset | ||
340 | ldr r2, [r0] | ||
341 | add r2, r2, r0 | ||
342 | ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0 | ||
343 | ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value | ||
344 | str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl | ||
345 | mov r1, #0x1 | ||
346 | str r1, [r0, #L2X0_CTRL] @ re-enable L2 | ||
347 | .endm | ||
348 | |||
349 | l2x0_saved_regs_offset: | ||
350 | .word l2x0_saved_regs - . | ||
351 | |||
352 | #else | ||
353 | .macro pl310_resume | ||
354 | .endm | ||
355 | #endif | ||
356 | |||
357 | ENTRY(v7_cpu_resume) | 337 | ENTRY(v7_cpu_resume) |
358 | bl v7_invalidate_l1 | 338 | bl v7_invalidate_l1 |
359 | pl310_resume | 339 | #ifdef CONFIG_CACHE_L2X0 |
340 | bl l2c310_early_resume | ||
341 | #endif | ||
360 | b cpu_resume | 342 | b cpu_resume |
361 | ENDPROC(v7_cpu_resume) | 343 | ENDPROC(v7_cpu_resume) |
diff --git a/arch/arm/mach-imx/system.c b/arch/arm/mach-imx/system.c index 5e3027d3692f..3b0733edb68c 100644 --- a/arch/arm/mach-imx/system.c +++ b/arch/arm/mach-imx/system.c | |||
@@ -124,7 +124,7 @@ void __init imx_init_l2cache(void) | |||
124 | } | 124 | } |
125 | 125 | ||
126 | /* Configure the L2 PREFETCH and POWER registers */ | 126 | /* Configure the L2 PREFETCH and POWER registers */ |
127 | val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); | 127 | val = readl_relaxed(l2x0_base + L310_PREFETCH_CTRL); |
128 | val |= 0x70800000; | 128 | val |= 0x70800000; |
129 | /* | 129 | /* |
130 | * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0 | 130 | * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0 |
@@ -137,14 +137,12 @@ void __init imx_init_l2cache(void) | |||
137 | */ | 137 | */ |
138 | if (cpu_is_imx6q()) | 138 | if (cpu_is_imx6q()) |
139 | val &= ~(1 << 30 | 1 << 23); | 139 | val &= ~(1 << 30 | 1 << 23); |
140 | writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL); | 140 | writel_relaxed(val, l2x0_base + L310_PREFETCH_CTRL); |
141 | val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN; | ||
142 | writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL); | ||
143 | 141 | ||
144 | iounmap(l2x0_base); | 142 | iounmap(l2x0_base); |
145 | of_node_put(np); | 143 | of_node_put(np); |
146 | 144 | ||
147 | out: | 145 | out: |
148 | l2x0_of_init(0, ~0UL); | 146 | l2x0_of_init(0, ~0); |
149 | } | 147 | } |
150 | #endif | 148 | #endif |
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c index a77529887cbc..61bfe584a9d7 100644 --- a/arch/arm/mach-msm/board-halibut.c +++ b/arch/arm/mach-msm/board-halibut.c | |||
@@ -83,11 +83,6 @@ static void __init halibut_init(void) | |||
83 | platform_add_devices(devices, ARRAY_SIZE(devices)); | 83 | platform_add_devices(devices, ARRAY_SIZE(devices)); |
84 | } | 84 | } |
85 | 85 | ||
86 | static void __init halibut_fixup(struct tag *tags, char **cmdline, | ||
87 | struct meminfo *mi) | ||
88 | { | ||
89 | } | ||
90 | |||
91 | static void __init halibut_map_io(void) | 86 | static void __init halibut_map_io(void) |
92 | { | 87 | { |
93 | msm_map_common_io(); | 88 | msm_map_common_io(); |
@@ -100,7 +95,6 @@ static void __init halibut_init_late(void) | |||
100 | 95 | ||
101 | MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)") | 96 | MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)") |
102 | .atag_offset = 0x100, | 97 | .atag_offset = 0x100, |
103 | .fixup = halibut_fixup, | ||
104 | .map_io = halibut_map_io, | 98 | .map_io = halibut_map_io, |
105 | .init_early = halibut_init_early, | 99 | .init_early = halibut_init_early, |
106 | .init_irq = halibut_init_irq, | 100 | .init_irq = halibut_init_irq, |
diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c index 7d9981cb400e..873c3ca3cd7e 100644 --- a/arch/arm/mach-msm/board-mahimahi.c +++ b/arch/arm/mach-msm/board-mahimahi.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/memblock.h> | ||
25 | 26 | ||
26 | #include <asm/mach-types.h> | 27 | #include <asm/mach-types.h> |
27 | #include <asm/mach/arch.h> | 28 | #include <asm/mach/arch.h> |
@@ -52,16 +53,10 @@ static void __init mahimahi_init(void) | |||
52 | platform_add_devices(devices, ARRAY_SIZE(devices)); | 53 | platform_add_devices(devices, ARRAY_SIZE(devices)); |
53 | } | 54 | } |
54 | 55 | ||
55 | static void __init mahimahi_fixup(struct tag *tags, char **cmdline, | 56 | static void __init mahimahi_fixup(struct tag *tags, char **cmdline) |
56 | struct meminfo *mi) | ||
57 | { | 57 | { |
58 | mi->nr_banks = 2; | 58 | memblock_add(PHYS_OFFSET, 219*SZ_1M); |
59 | mi->bank[0].start = PHYS_OFFSET; | 59 | memblock_add(MSM_HIGHMEM_BASE, MSM_HIGHMEM_SIZE); |
60 | mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET); | ||
61 | mi->bank[0].size = (219*1024*1024); | ||
62 | mi->bank[1].start = MSM_HIGHMEM_BASE; | ||
63 | mi->bank[1].node = PHYS_TO_NID(MSM_HIGHMEM_BASE); | ||
64 | mi->bank[1].size = MSM_HIGHMEM_SIZE; | ||
65 | } | 60 | } |
66 | 61 | ||
67 | static void __init mahimahi_map_io(void) | 62 | static void __init mahimahi_map_io(void) |
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c index 0c4c200e1221..245884319d2e 100644 --- a/arch/arm/mach-msm/board-msm7x30.c +++ b/arch/arm/mach-msm/board-msm7x30.c | |||
@@ -40,8 +40,7 @@ | |||
40 | #include "proc_comm.h" | 40 | #include "proc_comm.h" |
41 | #include "common.h" | 41 | #include "common.h" |
42 | 42 | ||
43 | static void __init msm7x30_fixup(struct tag *tag, char **cmdline, | 43 | static void __init msm7x30_fixup(struct tag *tag, char **cmdline) |
44 | struct meminfo *mi) | ||
45 | { | 44 | { |
46 | for (; tag->hdr.size; tag = tag_next(tag)) | 45 | for (; tag->hdr.size; tag = tag_next(tag)) |
47 | if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) { | 46 | if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) { |
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c index 327605174d63..e50967926dcd 100644 --- a/arch/arm/mach-msm/board-sapphire.c +++ b/arch/arm/mach-msm/board-sapphire.c | |||
@@ -35,6 +35,7 @@ | |||
35 | 35 | ||
36 | #include <linux/mtd/nand.h> | 36 | #include <linux/mtd/nand.h> |
37 | #include <linux/mtd/partitions.h> | 37 | #include <linux/mtd/partitions.h> |
38 | #include <linux/memblock.h> | ||
38 | 39 | ||
39 | #include "gpio_chip.h" | 40 | #include "gpio_chip.h" |
40 | #include "board-sapphire.h" | 41 | #include "board-sapphire.h" |
@@ -74,22 +75,18 @@ static struct map_desc sapphire_io_desc[] __initdata = { | |||
74 | } | 75 | } |
75 | }; | 76 | }; |
76 | 77 | ||
77 | static void __init sapphire_fixup(struct tag *tags, char **cmdline, | 78 | static void __init sapphire_fixup(struct tag *tags, char **cmdline) |
78 | struct meminfo *mi) | ||
79 | { | 79 | { |
80 | int smi_sz = parse_tag_smi((const struct tag *)tags); | 80 | int smi_sz = parse_tag_smi((const struct tag *)tags); |
81 | 81 | ||
82 | mi->nr_banks = 1; | ||
83 | mi->bank[0].start = PHYS_OFFSET; | ||
84 | mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET); | ||
85 | if (smi_sz == 32) { | 82 | if (smi_sz == 32) { |
86 | mi->bank[0].size = (84*1024*1024); | 83 | memblock_add(PHYS_OFFSET, 84*SZ_1M); |
87 | } else if (smi_sz == 64) { | 84 | } else if (smi_sz == 64) { |
88 | mi->bank[0].size = (101*1024*1024); | 85 | memblock_add(PHYS_OFFSET, 101*SZ_1M); |
89 | } else { | 86 | } else { |
87 | memblock_add(PHYS_OFFSET, 101*SZ_1M); | ||
90 | /* Give a default value when not get smi size */ | 88 | /* Give a default value when not get smi size */ |
91 | smi_sz = 64; | 89 | smi_sz = 64; |
92 | mi->bank[0].size = (101*1024*1024); | ||
93 | } | 90 | } |
94 | } | 91 | } |
95 | 92 | ||
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c index 5edfbd904d06..f72b07de2152 100644 --- a/arch/arm/mach-msm/board-trout.c +++ b/arch/arm/mach-msm/board-trout.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/clkdev.h> | 21 | #include <linux/clkdev.h> |
22 | #include <linux/memblock.h> | ||
22 | 23 | ||
23 | #include <asm/system_info.h> | 24 | #include <asm/system_info.h> |
24 | #include <asm/mach-types.h> | 25 | #include <asm/mach-types.h> |
@@ -55,12 +56,9 @@ static void __init trout_init_irq(void) | |||
55 | msm_init_irq(); | 56 | msm_init_irq(); |
56 | } | 57 | } |
57 | 58 | ||
58 | static void __init trout_fixup(struct tag *tags, char **cmdline, | 59 | static void __init trout_fixup(struct tag *tags, char **cmdline) |
59 | struct meminfo *mi) | ||
60 | { | 60 | { |
61 | mi->nr_banks = 1; | 61 | memblock_add(PHYS_OFFSET, 101*SZ_1M); |
62 | mi->bank[0].start = PHYS_OFFSET; | ||
63 | mi->bank[0].size = (101*1024*1024); | ||
64 | } | 62 | } |
65 | 63 | ||
66 | static void __init trout_init(void) | 64 | static void __init trout_init(void) |
diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c index 01cfce6ac20b..594262b27f56 100644 --- a/arch/arm/mach-mvebu/board-v7.c +++ b/arch/arm/mach-mvebu/board-v7.c | |||
@@ -182,6 +182,8 @@ static const char * const armada_370_xp_dt_compat[] = { | |||
182 | }; | 182 | }; |
183 | 183 | ||
184 | DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)") | 184 | DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)") |
185 | .l2c_aux_val = 0, | ||
186 | .l2c_aux_mask = ~0, | ||
185 | .smp = smp_ops(armada_xp_smp_ops), | 187 | .smp = smp_ops(armada_xp_smp_ops), |
186 | .init_machine = mvebu_dt_init, | 188 | .init_machine = mvebu_dt_init, |
187 | .init_time = mvebu_timer_and_clk_init, | 189 | .init_time = mvebu_timer_and_clk_init, |
@@ -195,6 +197,8 @@ static const char * const armada_375_dt_compat[] = { | |||
195 | }; | 197 | }; |
196 | 198 | ||
197 | DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)") | 199 | DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)") |
200 | .l2c_aux_val = 0, | ||
201 | .l2c_aux_mask = ~0, | ||
198 | .init_time = mvebu_timer_and_clk_init, | 202 | .init_time = mvebu_timer_and_clk_init, |
199 | .init_machine = mvebu_dt_init, | 203 | .init_machine = mvebu_dt_init, |
200 | .restart = mvebu_restart, | 204 | .restart = mvebu_restart, |
@@ -208,6 +212,8 @@ static const char * const armada_38x_dt_compat[] = { | |||
208 | }; | 212 | }; |
209 | 213 | ||
210 | DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)") | 214 | DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)") |
215 | .l2c_aux_val = 0, | ||
216 | .l2c_aux_mask = ~0, | ||
211 | .init_time = mvebu_timer_and_clk_init, | 217 | .init_time = mvebu_timer_and_clk_init, |
212 | .restart = mvebu_restart, | 218 | .restart = mvebu_restart, |
213 | .dt_compat = armada_38x_dt_compat, | 219 | .dt_compat = armada_38x_dt_compat, |
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c index 4a1065e41e9c..9116ca476d7c 100644 --- a/arch/arm/mach-nomadik/cpu-8815.c +++ b/arch/arm/mach-nomadik/cpu-8815.c | |||
@@ -143,23 +143,16 @@ static int __init cpu8815_mmcsd_init(void) | |||
143 | } | 143 | } |
144 | device_initcall(cpu8815_mmcsd_init); | 144 | device_initcall(cpu8815_mmcsd_init); |
145 | 145 | ||
146 | static void __init cpu8815_init_of(void) | ||
147 | { | ||
148 | #ifdef CONFIG_CACHE_L2X0 | ||
149 | /* At full speed latency must be >=2, so 0x249 in low bits */ | ||
150 | l2x0_of_init(0x00730249, 0xfe000fff); | ||
151 | #endif | ||
152 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
153 | } | ||
154 | |||
155 | static const char * cpu8815_board_compat[] = { | 146 | static const char * cpu8815_board_compat[] = { |
156 | "calaosystems,usb-s8815", | 147 | "calaosystems,usb-s8815", |
157 | NULL, | 148 | NULL, |
158 | }; | 149 | }; |
159 | 150 | ||
160 | DT_MACHINE_START(NOMADIK_DT, "Nomadik STn8815") | 151 | DT_MACHINE_START(NOMADIK_DT, "Nomadik STn8815") |
152 | /* At full speed latency must be >=2, so 0x249 in low bits */ | ||
153 | .l2c_aux_val = 0x00700249, | ||
154 | .l2c_aux_mask = 0xfe0fefff, | ||
161 | .map_io = cpu8815_map_io, | 155 | .map_io = cpu8815_map_io, |
162 | .init_machine = cpu8815_init_of, | ||
163 | .restart = cpu8815_restart, | 156 | .restart = cpu8815_restart, |
164 | .dt_compat = cpu8815_board_compat, | 157 | .dt_compat = cpu8815_board_compat, |
165 | MACHINE_END | 158 | MACHINE_END |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index cb31d4390d52..0ba482638ebf 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -65,6 +65,7 @@ config SOC_AM43XX | |||
65 | select ARCH_HAS_OPP | 65 | select ARCH_HAS_OPP |
66 | select ARM_GIC | 66 | select ARM_GIC |
67 | select MACH_OMAP_GENERIC | 67 | select MACH_OMAP_GENERIC |
68 | select MIGHT_HAVE_CACHE_L2X0 | ||
68 | 69 | ||
69 | config SOC_DRA7XX | 70 | config SOC_DRA7XX |
70 | bool "TI DRA7XX" | 71 | bool "TI DRA7XX" |
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index d88aff7baff8..ff029737c8f0 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h | |||
@@ -91,6 +91,7 @@ extern void omap3_sync32k_timer_init(void); | |||
91 | extern void omap3_secure_sync32k_timer_init(void); | 91 | extern void omap3_secure_sync32k_timer_init(void); |
92 | extern void omap3_gptimer_timer_init(void); | 92 | extern void omap3_gptimer_timer_init(void); |
93 | extern void omap4_local_timer_init(void); | 93 | extern void omap4_local_timer_init(void); |
94 | int omap_l2_cache_init(void); | ||
94 | extern void omap5_realtime_timer_init(void); | 95 | extern void omap5_realtime_timer_init(void); |
95 | 96 | ||
96 | void omap2420_init_early(void); | 97 | void omap2420_init_early(void); |
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 4ec3b4a93843..8f559450c876 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c | |||
@@ -609,6 +609,7 @@ void __init am43xx_init_early(void) | |||
609 | am43xx_clockdomains_init(); | 609 | am43xx_clockdomains_init(); |
610 | am43xx_hwmod_init(); | 610 | am43xx_hwmod_init(); |
611 | omap_hwmod_init_postsetup(); | 611 | omap_hwmod_init_postsetup(); |
612 | omap_l2_cache_init(); | ||
612 | omap_clk_soc_init = am43xx_dt_clk_init; | 613 | omap_clk_soc_init = am43xx_dt_clk_init; |
613 | } | 614 | } |
614 | 615 | ||
@@ -640,6 +641,7 @@ void __init omap4430_init_early(void) | |||
640 | omap44xx_clockdomains_init(); | 641 | omap44xx_clockdomains_init(); |
641 | omap44xx_hwmod_init(); | 642 | omap44xx_hwmod_init(); |
642 | omap_hwmod_init_postsetup(); | 643 | omap_hwmod_init_postsetup(); |
644 | omap_l2_cache_init(); | ||
643 | omap_clk_soc_init = omap4xxx_dt_clk_init; | 645 | omap_clk_soc_init = omap4xxx_dt_clk_init; |
644 | } | 646 | } |
645 | 647 | ||
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index eb76e47091ad..4001325f90fb 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c | |||
@@ -187,19 +187,15 @@ static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state) | |||
187 | * in every restore MPUSS OFF path. | 187 | * in every restore MPUSS OFF path. |
188 | */ | 188 | */ |
189 | #ifdef CONFIG_CACHE_L2X0 | 189 | #ifdef CONFIG_CACHE_L2X0 |
190 | static void save_l2x0_context(void) | 190 | static void __init save_l2x0_context(void) |
191 | { | 191 | { |
192 | u32 val; | 192 | writel_relaxed(l2x0_saved_regs.aux_ctrl, |
193 | void __iomem *l2x0_base = omap4_get_l2cache_base(); | 193 | sar_base + L2X0_AUXCTRL_OFFSET); |
194 | if (l2x0_base) { | 194 | writel_relaxed(l2x0_saved_regs.prefetch_ctrl, |
195 | val = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | 195 | sar_base + L2X0_PREFETCH_CTRL_OFFSET); |
196 | writel_relaxed(val, sar_base + L2X0_AUXCTRL_OFFSET); | ||
197 | val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); | ||
198 | writel_relaxed(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET); | ||
199 | } | ||
200 | } | 196 | } |
201 | #else | 197 | #else |
202 | static void save_l2x0_context(void) | 198 | static void __init save_l2x0_context(void) |
203 | {} | 199 | {} |
204 | #endif | 200 | #endif |
205 | 201 | ||
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 99b0154493a4..326cd982a3cb 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c | |||
@@ -167,75 +167,57 @@ void __iomem *omap4_get_l2cache_base(void) | |||
167 | return l2cache_base; | 167 | return l2cache_base; |
168 | } | 168 | } |
169 | 169 | ||
170 | static void omap4_l2x0_disable(void) | 170 | static void omap4_l2c310_write_sec(unsigned long val, unsigned reg) |
171 | { | 171 | { |
172 | outer_flush_all(); | 172 | unsigned smc_op; |
173 | /* Disable PL310 L2 Cache controller */ | ||
174 | omap_smc1(0x102, 0x0); | ||
175 | } | ||
176 | 173 | ||
177 | static void omap4_l2x0_set_debug(unsigned long val) | 174 | switch (reg) { |
178 | { | 175 | case L2X0_CTRL: |
179 | /* Program PL310 L2 Cache controller debug register */ | 176 | smc_op = OMAP4_MON_L2X0_CTRL_INDEX; |
180 | omap_smc1(0x100, val); | 177 | break; |
178 | |||
179 | case L2X0_AUX_CTRL: | ||
180 | smc_op = OMAP4_MON_L2X0_AUXCTRL_INDEX; | ||
181 | break; | ||
182 | |||
183 | case L2X0_DEBUG_CTRL: | ||
184 | smc_op = OMAP4_MON_L2X0_DBG_CTRL_INDEX; | ||
185 | break; | ||
186 | |||
187 | case L310_PREFETCH_CTRL: | ||
188 | smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX; | ||
189 | break; | ||
190 | |||
191 | default: | ||
192 | WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg); | ||
193 | return; | ||
194 | } | ||
195 | |||
196 | omap_smc1(smc_op, val); | ||
181 | } | 197 | } |
182 | 198 | ||
183 | static int __init omap_l2_cache_init(void) | 199 | int __init omap_l2_cache_init(void) |
184 | { | 200 | { |
185 | u32 aux_ctrl = 0; | 201 | u32 aux_ctrl; |
186 | |||
187 | /* | ||
188 | * To avoid code running on other OMAPs in | ||
189 | * multi-omap builds | ||
190 | */ | ||
191 | if (!cpu_is_omap44xx()) | ||
192 | return -ENODEV; | ||
193 | 202 | ||
194 | /* Static mapping, never released */ | 203 | /* Static mapping, never released */ |
195 | l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K); | 204 | l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K); |
196 | if (WARN_ON(!l2cache_base)) | 205 | if (WARN_ON(!l2cache_base)) |
197 | return -ENOMEM; | 206 | return -ENOMEM; |
198 | 207 | ||
199 | /* | 208 | /* 16-way associativity, parity disabled, way size - 64KB (es2.0 +) */ |
200 | * 16-way associativity, parity disabled | 209 | aux_ctrl = L2C_AUX_CTRL_SHARED_OVERRIDE | |
201 | * Way size - 32KB (es1.0) | 210 | L310_AUX_CTRL_DATA_PREFETCH | |
202 | * Way size - 64KB (es2.0 +) | 211 | L310_AUX_CTRL_INSTR_PREFETCH; |
203 | */ | ||
204 | aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) | | ||
205 | (0x1 << 25) | | ||
206 | (0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) | | ||
207 | (0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT)); | ||
208 | |||
209 | if (omap_rev() == OMAP4430_REV_ES1_0) { | ||
210 | aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT; | ||
211 | } else { | ||
212 | aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) | | ||
213 | (1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) | | ||
214 | (1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) | | ||
215 | (1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) | | ||
216 | (1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT)); | ||
217 | } | ||
218 | if (omap_rev() != OMAP4430_REV_ES1_0) | ||
219 | omap_smc1(0x109, aux_ctrl); | ||
220 | |||
221 | /* Enable PL310 L2 Cache controller */ | ||
222 | omap_smc1(0x102, 0x1); | ||
223 | 212 | ||
213 | outer_cache.write_sec = omap4_l2c310_write_sec; | ||
224 | if (of_have_populated_dt()) | 214 | if (of_have_populated_dt()) |
225 | l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK); | 215 | l2x0_of_init(aux_ctrl, 0xcf9fffff); |
226 | else | 216 | else |
227 | l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK); | 217 | l2x0_init(l2cache_base, aux_ctrl, 0xcf9fffff); |
228 | |||
229 | /* | ||
230 | * Override default outer_cache.disable with a OMAP4 | ||
231 | * specific one | ||
232 | */ | ||
233 | outer_cache.disable = omap4_l2x0_disable; | ||
234 | outer_cache.set_debug = omap4_l2x0_set_debug; | ||
235 | 218 | ||
236 | return 0; | 219 | return 0; |
237 | } | 220 | } |
238 | omap_early_initcall(omap_l2_cache_init); | ||
239 | #endif | 221 | #endif |
240 | 222 | ||
241 | void __iomem *omap4_get_sar_ram_base(void) | 223 | void __iomem *omap4_get_sar_ram_base(void) |
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 3f1de1111e0f..6bbb7b55c6d1 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c | |||
@@ -365,8 +365,7 @@ void orion5x_restart(enum reboot_mode mode, const char *cmd) | |||
365 | * Many orion-based systems have buggy bootloader implementations. | 365 | * Many orion-based systems have buggy bootloader implementations. |
366 | * This is a common fixup for bogus memory tags. | 366 | * This is a common fixup for bogus memory tags. |
367 | */ | 367 | */ |
368 | void __init tag_fixup_mem32(struct tag *t, char **from, | 368 | void __init tag_fixup_mem32(struct tag *t, char **from) |
369 | struct meminfo *meminfo) | ||
370 | { | 369 | { |
371 | for (; t->hdr.size; t = tag_next(t)) | 370 | for (; t->hdr.size; t = tag_next(t)) |
372 | if (t->hdr.tag == ATAG_MEM && | 371 | if (t->hdr.tag == ATAG_MEM && |
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h index 26d6f34b6027..cd0389c6e822 100644 --- a/arch/arm/mach-orion5x/common.h +++ b/arch/arm/mach-orion5x/common.h | |||
@@ -64,9 +64,8 @@ int orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys); | |||
64 | struct pci_bus *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys); | 64 | struct pci_bus *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys); |
65 | int orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); | 65 | int orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); |
66 | 66 | ||
67 | struct meminfo; | ||
68 | struct tag; | 67 | struct tag; |
69 | extern void __init tag_fixup_mem32(struct tag *, char **, struct meminfo *); | 68 | extern void __init tag_fixup_mem32(struct tag *, char **); |
70 | 69 | ||
71 | #ifdef CONFIG_MACH_MSS2_DT | 70 | #ifdef CONFIG_MACH_MSS2_DT |
72 | extern void mss2_init(void); | 71 | extern void mss2_init(void); |
diff --git a/arch/arm/mach-prima2/Makefile b/arch/arm/mach-prima2/Makefile index 7a6b4a323125..8846e7d87ea5 100644 --- a/arch/arm/mach-prima2/Makefile +++ b/arch/arm/mach-prima2/Makefile | |||
@@ -2,7 +2,6 @@ obj-y += rstc.o | |||
2 | obj-y += common.o | 2 | obj-y += common.o |
3 | obj-y += rtciobrg.o | 3 | obj-y += rtciobrg.o |
4 | obj-$(CONFIG_DEBUG_LL) += lluart.o | 4 | obj-$(CONFIG_DEBUG_LL) += lluart.o |
5 | obj-$(CONFIG_CACHE_L2X0) += l2x0.o | ||
6 | obj-$(CONFIG_SUSPEND) += pm.o sleep.o | 5 | obj-$(CONFIG_SUSPEND) += pm.o sleep.o |
7 | obj-$(CONFIG_SMP) += platsmp.o headsmp.o | 6 | obj-$(CONFIG_SMP) += platsmp.o headsmp.o |
8 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o | 7 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o |
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c index 47c7819edb9b..a860ea27e8ae 100644 --- a/arch/arm/mach-prima2/common.c +++ b/arch/arm/mach-prima2/common.c | |||
@@ -34,6 +34,8 @@ static const char *atlas6_dt_match[] __initconst = { | |||
34 | 34 | ||
35 | DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") | 35 | DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") |
36 | /* Maintainer: Barry Song <baohua.song@csr.com> */ | 36 | /* Maintainer: Barry Song <baohua.song@csr.com> */ |
37 | .l2c_aux_val = 0, | ||
38 | .l2c_aux_mask = ~0, | ||
37 | .map_io = sirfsoc_map_io, | 39 | .map_io = sirfsoc_map_io, |
38 | .init_late = sirfsoc_init_late, | 40 | .init_late = sirfsoc_init_late, |
39 | .dt_compat = atlas6_dt_match, | 41 | .dt_compat = atlas6_dt_match, |
@@ -48,6 +50,8 @@ static const char *prima2_dt_match[] __initconst = { | |||
48 | 50 | ||
49 | DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") | 51 | DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") |
50 | /* Maintainer: Barry Song <baohua.song@csr.com> */ | 52 | /* Maintainer: Barry Song <baohua.song@csr.com> */ |
53 | .l2c_aux_val = 0, | ||
54 | .l2c_aux_mask = ~0, | ||
51 | .map_io = sirfsoc_map_io, | 55 | .map_io = sirfsoc_map_io, |
52 | .dma_zone_size = SZ_256M, | 56 | .dma_zone_size = SZ_256M, |
53 | .init_late = sirfsoc_init_late, | 57 | .init_late = sirfsoc_init_late, |
@@ -63,6 +67,8 @@ static const char *marco_dt_match[] __initconst = { | |||
63 | 67 | ||
64 | DT_MACHINE_START(MARCO_DT, "Generic MARCO (Flattened Device Tree)") | 68 | DT_MACHINE_START(MARCO_DT, "Generic MARCO (Flattened Device Tree)") |
65 | /* Maintainer: Barry Song <baohua.song@csr.com> */ | 69 | /* Maintainer: Barry Song <baohua.song@csr.com> */ |
70 | .l2c_aux_val = 0, | ||
71 | .l2c_aux_mask = ~0, | ||
66 | .smp = smp_ops(sirfsoc_smp_ops), | 72 | .smp = smp_ops(sirfsoc_smp_ops), |
67 | .map_io = sirfsoc_map_io, | 73 | .map_io = sirfsoc_map_io, |
68 | .init_late = sirfsoc_init_late, | 74 | .init_late = sirfsoc_init_late, |
diff --git a/arch/arm/mach-prima2/l2x0.c b/arch/arm/mach-prima2/l2x0.c deleted file mode 100644 index c7102539c0b0..000000000000 --- a/arch/arm/mach-prima2/l2x0.c +++ /dev/null | |||
@@ -1,49 +0,0 @@ | |||
1 | /* | ||
2 | * l2 cache initialization for CSR SiRFprimaII | ||
3 | * | ||
4 | * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. | ||
5 | * | ||
6 | * Licensed under GPLv2 or later. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/of.h> | ||
12 | #include <asm/hardware/cache-l2x0.h> | ||
13 | |||
14 | struct l2x0_aux { | ||
15 | u32 val; | ||
16 | u32 mask; | ||
17 | }; | ||
18 | |||
19 | static const struct l2x0_aux prima2_l2x0_aux __initconst = { | ||
20 | .val = 2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT, | ||
21 | .mask = 0, | ||
22 | }; | ||
23 | |||
24 | static const struct l2x0_aux marco_l2x0_aux __initconst = { | ||
25 | .val = (2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) | | ||
26 | (1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT), | ||
27 | .mask = L2X0_AUX_CTRL_MASK, | ||
28 | }; | ||
29 | |||
30 | static const struct of_device_id sirf_l2x0_ids[] __initconst = { | ||
31 | { .compatible = "sirf,prima2-pl310-cache", .data = &prima2_l2x0_aux, }, | ||
32 | { .compatible = "sirf,marco-pl310-cache", .data = &marco_l2x0_aux, }, | ||
33 | {}, | ||
34 | }; | ||
35 | |||
36 | static int __init sirfsoc_l2x0_init(void) | ||
37 | { | ||
38 | struct device_node *np; | ||
39 | const struct l2x0_aux *aux; | ||
40 | |||
41 | np = of_find_matching_node(NULL, sirf_l2x0_ids); | ||
42 | if (np) { | ||
43 | aux = of_match_node(sirf_l2x0_ids, np)->data; | ||
44 | return l2x0_of_init(aux->val, aux->mask); | ||
45 | } | ||
46 | |||
47 | return 0; | ||
48 | } | ||
49 | early_initcall(sirfsoc_l2x0_init); | ||
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c index c4525a88e5da..96e9bc102117 100644 --- a/arch/arm/mach-prima2/pm.c +++ b/arch/arm/mach-prima2/pm.c | |||
@@ -71,7 +71,6 @@ static int sirfsoc_pm_enter(suspend_state_t state) | |||
71 | case PM_SUSPEND_MEM: | 71 | case PM_SUSPEND_MEM: |
72 | sirfsoc_pre_suspend_power_off(); | 72 | sirfsoc_pre_suspend_power_off(); |
73 | 73 | ||
74 | outer_flush_all(); | ||
75 | outer_disable(); | 74 | outer_disable(); |
76 | /* go zzz */ | 75 | /* go zzz */ |
77 | cpu_suspend(0, sirfsoc_finish_suspend); | 76 | cpu_suspend(0, sirfsoc_finish_suspend); |
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c index 584439bfa59f..4d3588d26c2a 100644 --- a/arch/arm/mach-pxa/cm-x300.c +++ b/arch/arm/mach-pxa/cm-x300.c | |||
@@ -837,8 +837,7 @@ static void __init cm_x300_init(void) | |||
837 | cm_x300_init_bl(); | 837 | cm_x300_init_bl(); |
838 | } | 838 | } |
839 | 839 | ||
840 | static void __init cm_x300_fixup(struct tag *tags, char **cmdline, | 840 | static void __init cm_x300_fixup(struct tag *tags, char **cmdline) |
841 | struct meminfo *mi) | ||
842 | { | 841 | { |
843 | /* Make sure that mi->bank[0].start = PHYS_ADDR */ | 842 | /* Make sure that mi->bank[0].start = PHYS_ADDR */ |
844 | for (; tags->hdr.size; tags = tag_next(tags)) | 843 | for (; tags->hdr.size; tags = tag_next(tags)) |
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index 57d60542f982..91dd1c7cdbcd 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/input/matrix_keypad.h> | 34 | #include <linux/input/matrix_keypad.h> |
35 | #include <linux/gpio_keys.h> | 35 | #include <linux/gpio_keys.h> |
36 | #include <linux/module.h> | 36 | #include <linux/module.h> |
37 | #include <linux/memblock.h> | ||
37 | #include <video/w100fb.h> | 38 | #include <video/w100fb.h> |
38 | 39 | ||
39 | #include <asm/setup.h> | 40 | #include <asm/setup.h> |
@@ -753,16 +754,13 @@ static void __init corgi_init(void) | |||
753 | platform_add_devices(devices, ARRAY_SIZE(devices)); | 754 | platform_add_devices(devices, ARRAY_SIZE(devices)); |
754 | } | 755 | } |
755 | 756 | ||
756 | static void __init fixup_corgi(struct tag *tags, char **cmdline, | 757 | static void __init fixup_corgi(struct tag *tags, char **cmdline) |
757 | struct meminfo *mi) | ||
758 | { | 758 | { |
759 | sharpsl_save_param(); | 759 | sharpsl_save_param(); |
760 | mi->nr_banks=1; | ||
761 | mi->bank[0].start = 0xa0000000; | ||
762 | if (machine_is_corgi()) | 760 | if (machine_is_corgi()) |
763 | mi->bank[0].size = (32*1024*1024); | 761 | memblock_add(0xa0000000, SZ_32M); |
764 | else | 762 | else |
765 | mi->bank[0].size = (64*1024*1024); | 763 | memblock_add(0xa0000000, SZ_64M); |
766 | } | 764 | } |
767 | 765 | ||
768 | #ifdef CONFIG_MACH_CORGI | 766 | #ifdef CONFIG_MACH_CORGI |
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c index 8280ebcaab9f..cfb864173ce3 100644 --- a/arch/arm/mach-pxa/eseries.c +++ b/arch/arm/mach-pxa/eseries.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/mtd/nand.h> | 21 | #include <linux/mtd/nand.h> |
22 | #include <linux/mtd/partitions.h> | 22 | #include <linux/mtd/partitions.h> |
23 | #include <linux/usb/gpio_vbus.h> | 23 | #include <linux/usb/gpio_vbus.h> |
24 | #include <linux/memblock.h> | ||
24 | 25 | ||
25 | #include <video/w100fb.h> | 26 | #include <video/w100fb.h> |
26 | 27 | ||
@@ -41,14 +42,12 @@ | |||
41 | #include "clock.h" | 42 | #include "clock.h" |
42 | 43 | ||
43 | /* Only e800 has 128MB RAM */ | 44 | /* Only e800 has 128MB RAM */ |
44 | void __init eseries_fixup(struct tag *tags, char **cmdline, struct meminfo *mi) | 45 | void __init eseries_fixup(struct tag *tags, char **cmdline) |
45 | { | 46 | { |
46 | mi->nr_banks=1; | ||
47 | mi->bank[0].start = 0xa0000000; | ||
48 | if (machine_is_e800()) | 47 | if (machine_is_e800()) |
49 | mi->bank[0].size = (128*1024*1024); | 48 | memblock_add(0xa0000000, SZ_128M); |
50 | else | 49 | else |
51 | mi->bank[0].size = (64*1024*1024); | 50 | memblock_add(0xa0000000, SZ_64M); |
52 | } | 51 | } |
53 | 52 | ||
54 | struct gpio_vbus_mach_info e7xx_udc_info = { | 53 | struct gpio_vbus_mach_info e7xx_udc_info = { |
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index aedf053a1de5..131991629116 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/spi/ads7846.h> | 29 | #include <linux/spi/ads7846.h> |
30 | #include <linux/spi/pxa2xx_spi.h> | 30 | #include <linux/spi/pxa2xx_spi.h> |
31 | #include <linux/mtd/sharpsl.h> | 31 | #include <linux/mtd/sharpsl.h> |
32 | #include <linux/memblock.h> | ||
32 | 33 | ||
33 | #include <mach/hardware.h> | 34 | #include <mach/hardware.h> |
34 | #include <asm/mach-types.h> | 35 | #include <asm/mach-types.h> |
@@ -456,13 +457,10 @@ static void __init poodle_init(void) | |||
456 | poodle_init_spi(); | 457 | poodle_init_spi(); |
457 | } | 458 | } |
458 | 459 | ||
459 | static void __init fixup_poodle(struct tag *tags, char **cmdline, | 460 | static void __init fixup_poodle(struct tag *tags, char **cmdline) |
460 | struct meminfo *mi) | ||
461 | { | 461 | { |
462 | sharpsl_save_param(); | 462 | sharpsl_save_param(); |
463 | mi->nr_banks=1; | 463 | memblock_add(0xa0000000, SZ_32M); |
464 | mi->bank[0].start = 0xa0000000; | ||
465 | mi->bank[0].size = (32*1024*1024); | ||
466 | } | 464 | } |
467 | 465 | ||
468 | MACHINE_START(POODLE, "SHARP Poodle") | 466 | MACHINE_START(POODLE, "SHARP Poodle") |
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 0b11c1af51c4..840c3a48e720 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/io.h> | 32 | #include <linux/io.h> |
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/reboot.h> | 34 | #include <linux/reboot.h> |
35 | #include <linux/memblock.h> | ||
35 | 36 | ||
36 | #include <asm/setup.h> | 37 | #include <asm/setup.h> |
37 | #include <asm/mach-types.h> | 38 | #include <asm/mach-types.h> |
@@ -971,13 +972,10 @@ static void __init spitz_init(void) | |||
971 | spitz_i2c_init(); | 972 | spitz_i2c_init(); |
972 | } | 973 | } |
973 | 974 | ||
974 | static void __init spitz_fixup(struct tag *tags, char **cmdline, | 975 | static void __init spitz_fixup(struct tag *tags, char **cmdline) |
975 | struct meminfo *mi) | ||
976 | { | 976 | { |
977 | sharpsl_save_param(); | 977 | sharpsl_save_param(); |
978 | mi->nr_banks = 1; | 978 | memblock_add(0xa0000000, SZ_64M); |
979 | mi->bank[0].start = 0xa0000000; | ||
980 | mi->bank[0].size = (64*1024*1024); | ||
981 | } | 979 | } |
982 | 980 | ||
983 | #ifdef CONFIG_MACH_SPITZ | 981 | #ifdef CONFIG_MACH_SPITZ |
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index ef5557b807ed..c158a6e3e0aa 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/i2c/pxa-i2c.h> | 37 | #include <linux/i2c/pxa-i2c.h> |
38 | #include <linux/usb/gpio_vbus.h> | 38 | #include <linux/usb/gpio_vbus.h> |
39 | #include <linux/reboot.h> | 39 | #include <linux/reboot.h> |
40 | #include <linux/memblock.h> | ||
40 | 41 | ||
41 | #include <asm/setup.h> | 42 | #include <asm/setup.h> |
42 | #include <asm/mach-types.h> | 43 | #include <asm/mach-types.h> |
@@ -960,13 +961,10 @@ static void __init tosa_init(void) | |||
960 | platform_add_devices(devices, ARRAY_SIZE(devices)); | 961 | platform_add_devices(devices, ARRAY_SIZE(devices)); |
961 | } | 962 | } |
962 | 963 | ||
963 | static void __init fixup_tosa(struct tag *tags, char **cmdline, | 964 | static void __init fixup_tosa(struct tag *tags, char **cmdline) |
964 | struct meminfo *mi) | ||
965 | { | 965 | { |
966 | sharpsl_save_param(); | 966 | sharpsl_save_param(); |
967 | mi->nr_banks=1; | 967 | memblock_add(0xa0000000, SZ_64M); |
968 | mi->bank[0].start = 0xa0000000; | ||
969 | mi->bank[0].size = (64*1024*1024); | ||
970 | } | 968 | } |
971 | 969 | ||
972 | MACHINE_START(TOSA, "SHARP Tosa") | 970 | MACHINE_START(TOSA, "SHARP Tosa") |
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 960b8dd78c44..8c1b39a0caa0 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/amba/mmci.h> | 31 | #include <linux/amba/mmci.h> |
32 | #include <linux/gfp.h> | 32 | #include <linux/gfp.h> |
33 | #include <linux/mtd/physmap.h> | 33 | #include <linux/mtd/physmap.h> |
34 | #include <linux/memblock.h> | ||
34 | 35 | ||
35 | #include <mach/hardware.h> | 36 | #include <mach/hardware.h> |
36 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
@@ -385,19 +386,15 @@ void __init realview_timer_init(unsigned int timer_irq) | |||
385 | /* | 386 | /* |
386 | * Setup the memory banks. | 387 | * Setup the memory banks. |
387 | */ | 388 | */ |
388 | void realview_fixup(struct tag *tags, char **from, struct meminfo *meminfo) | 389 | void realview_fixup(struct tag *tags, char **from) |
389 | { | 390 | { |
390 | /* | 391 | /* |
391 | * Most RealView platforms have 512MB contiguous RAM at 0x70000000. | 392 | * Most RealView platforms have 512MB contiguous RAM at 0x70000000. |
392 | * Half of this is mirrored at 0. | 393 | * Half of this is mirrored at 0. |
393 | */ | 394 | */ |
394 | #ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET | 395 | #ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET |
395 | meminfo->bank[0].start = 0x70000000; | 396 | memblock_add(0x70000000, SZ_512M); |
396 | meminfo->bank[0].size = SZ_512M; | ||
397 | meminfo->nr_banks = 1; | ||
398 | #else | 397 | #else |
399 | meminfo->bank[0].start = 0; | 398 | memblock_add(0, SZ_256M); |
400 | meminfo->bank[0].size = SZ_256M; | ||
401 | meminfo->nr_banks = 1; | ||
402 | #endif | 399 | #endif |
403 | } | 400 | } |
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h index 13dc830ef469..868ece221978 100644 --- a/arch/arm/mach-realview/core.h +++ b/arch/arm/mach-realview/core.h | |||
@@ -52,8 +52,7 @@ extern int realview_flash_register(struct resource *res, u32 num); | |||
52 | extern int realview_eth_register(const char *name, struct resource *res); | 52 | extern int realview_eth_register(const char *name, struct resource *res); |
53 | extern int realview_usb_register(struct resource *res); | 53 | extern int realview_usb_register(struct resource *res); |
54 | extern void realview_init_early(void); | 54 | extern void realview_init_early(void); |
55 | extern void realview_fixup(struct tag *tags, char **from, | 55 | extern void realview_fixup(struct tag *tags, char **from); |
56 | struct meminfo *meminfo); | ||
57 | 56 | ||
58 | extern struct smp_operations realview_smp_ops; | 57 | extern struct smp_operations realview_smp_ops; |
59 | extern void realview_cpu_die(unsigned int cpu); | 58 | extern void realview_cpu_die(unsigned int cpu); |
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index 6bb070e80128..739d4f113097 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c | |||
@@ -442,8 +442,13 @@ static void __init realview_eb_init(void) | |||
442 | realview_eb11mp_fixup(); | 442 | realview_eb11mp_fixup(); |
443 | 443 | ||
444 | #ifdef CONFIG_CACHE_L2X0 | 444 | #ifdef CONFIG_CACHE_L2X0 |
445 | /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled | 445 | /* |
446 | * Bits: .... ...0 0111 1001 0000 .... .... .... */ | 446 | * The PL220 needs to be manually configured as the hardware |
447 | * doesn't report the correct sizes. | ||
448 | * 1MB (128KB/way), 8-way associativity, event monitor and | ||
449 | * parity enabled, ignore share bit, no force write allocate | ||
450 | * Bits: .... ...0 0111 1001 0000 .... .... .... | ||
451 | */ | ||
447 | l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff); | 452 | l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff); |
448 | #endif | 453 | #endif |
449 | platform_device_register(&pmu_device); | 454 | platform_device_register(&pmu_device); |
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c index 173f2c15de49..b0e0dcaed944 100644 --- a/arch/arm/mach-realview/realview_pb1176.c +++ b/arch/arm/mach-realview/realview_pb1176.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/irqchip/arm-gic.h> | 32 | #include <linux/irqchip/arm-gic.h> |
33 | #include <linux/platform_data/clk-realview.h> | 33 | #include <linux/platform_data/clk-realview.h> |
34 | #include <linux/reboot.h> | 34 | #include <linux/reboot.h> |
35 | #include <linux/memblock.h> | ||
35 | 36 | ||
36 | #include <mach/hardware.h> | 37 | #include <mach/hardware.h> |
37 | #include <asm/irq.h> | 38 | #include <asm/irq.h> |
@@ -339,15 +340,12 @@ static void realview_pb1176_restart(enum reboot_mode mode, const char *cmd) | |||
339 | dsb(); | 340 | dsb(); |
340 | } | 341 | } |
341 | 342 | ||
342 | static void realview_pb1176_fixup(struct tag *tags, char **from, | 343 | static void realview_pb1176_fixup(struct tag *tags, char **from) |
343 | struct meminfo *meminfo) | ||
344 | { | 344 | { |
345 | /* | 345 | /* |
346 | * RealView PB1176 only has 128MB of RAM mapped at 0. | 346 | * RealView PB1176 only has 128MB of RAM mapped at 0. |
347 | */ | 347 | */ |
348 | meminfo->bank[0].start = 0; | 348 | memblock_add(0, SZ_128M); |
349 | meminfo->bank[0].size = SZ_128M; | ||
350 | meminfo->nr_banks = 1; | ||
351 | } | 349 | } |
352 | 350 | ||
353 | static void __init realview_pb1176_init(void) | 351 | static void __init realview_pb1176_init(void) |
@@ -355,7 +353,13 @@ static void __init realview_pb1176_init(void) | |||
355 | int i; | 353 | int i; |
356 | 354 | ||
357 | #ifdef CONFIG_CACHE_L2X0 | 355 | #ifdef CONFIG_CACHE_L2X0 |
358 | /* 128Kb (16Kb/way) 8-way associativity. evmon/parity/share enabled. */ | 356 | /* |
357 | * The PL220 needs to be manually configured as the hardware | ||
358 | * doesn't report the correct sizes. | ||
359 | * 128kB (16kB/way), 8-way associativity, event monitor and | ||
360 | * parity enabled, ignore share bit, no force write allocate | ||
361 | * Bits: .... ...0 0111 0011 0000 .... .... .... | ||
362 | */ | ||
359 | l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff); | 363 | l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff); |
360 | #endif | 364 | #endif |
361 | 365 | ||
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c index bde7e6b1fd44..47bf55fdbf27 100644 --- a/arch/arm/mach-realview/realview_pb11mp.c +++ b/arch/arm/mach-realview/realview_pb11mp.c | |||
@@ -337,8 +337,13 @@ static void __init realview_pb11mp_init(void) | |||
337 | int i; | 337 | int i; |
338 | 338 | ||
339 | #ifdef CONFIG_CACHE_L2X0 | 339 | #ifdef CONFIG_CACHE_L2X0 |
340 | /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled | 340 | /* |
341 | * Bits: .... ...0 0111 1001 0000 .... .... .... */ | 341 | * The PL220 needs to be manually configured as the hardware |
342 | * doesn't report the correct sizes. | ||
343 | * 1MB (128KB/way), 8-way associativity, event monitor and | ||
344 | * parity enabled, ignore share bit, no force write allocate | ||
345 | * Bits: .... ...0 0111 1001 0000 .... .... .... | ||
346 | */ | ||
342 | l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff); | 347 | l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff); |
343 | #endif | 348 | #endif |
344 | 349 | ||
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c index 72c96caebefa..d89eb4023467 100644 --- a/arch/arm/mach-realview/realview_pbx.c +++ b/arch/arm/mach-realview/realview_pbx.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/irqchip/arm-gic.h> | 29 | #include <linux/irqchip/arm-gic.h> |
30 | #include <linux/platform_data/clk-realview.h> | 30 | #include <linux/platform_data/clk-realview.h> |
31 | #include <linux/reboot.h> | 31 | #include <linux/reboot.h> |
32 | #include <linux/memblock.h> | ||
32 | 33 | ||
33 | #include <asm/irq.h> | 34 | #include <asm/irq.h> |
34 | #include <asm/mach-types.h> | 35 | #include <asm/mach-types.h> |
@@ -325,23 +326,19 @@ static void __init realview_pbx_timer_init(void) | |||
325 | realview_pbx_twd_init(); | 326 | realview_pbx_twd_init(); |
326 | } | 327 | } |
327 | 328 | ||
328 | static void realview_pbx_fixup(struct tag *tags, char **from, | 329 | static void realview_pbx_fixup(struct tag *tags, char **from) |
329 | struct meminfo *meminfo) | ||
330 | { | 330 | { |
331 | #ifdef CONFIG_SPARSEMEM | 331 | #ifdef CONFIG_SPARSEMEM |
332 | /* | 332 | /* |
333 | * Memory configuration with SPARSEMEM enabled on RealView PBX (see | 333 | * Memory configuration with SPARSEMEM enabled on RealView PBX (see |
334 | * asm/mach/memory.h for more information). | 334 | * asm/mach/memory.h for more information). |
335 | */ | 335 | */ |
336 | meminfo->bank[0].start = 0; | 336 | |
337 | meminfo->bank[0].size = SZ_256M; | 337 | memblock_add(0, SZ_256M); |
338 | meminfo->bank[1].start = 0x20000000; | 338 | memblock_add(0x20000000, SZ_512M); |
339 | meminfo->bank[1].size = SZ_512M; | 339 | memblock_add(0x80000000, SZ_256M); |
340 | meminfo->bank[2].start = 0x80000000; | ||
341 | meminfo->bank[2].size = SZ_256M; | ||
342 | meminfo->nr_banks = 3; | ||
343 | #else | 340 | #else |
344 | realview_fixup(tags, from, meminfo); | 341 | realview_fixup(tags, from); |
345 | #endif | 342 | #endif |
346 | } | 343 | } |
347 | 344 | ||
@@ -370,8 +367,8 @@ static void __init realview_pbx_init(void) | |||
370 | __io_address(REALVIEW_PBX_TILE_L220_BASE); | 367 | __io_address(REALVIEW_PBX_TILE_L220_BASE); |
371 | 368 | ||
372 | /* set RAM latencies to 1 cycle for eASIC */ | 369 | /* set RAM latencies to 1 cycle for eASIC */ |
373 | writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); | 370 | writel(0, l2x0_base + L310_TAG_LATENCY_CTRL); |
374 | writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); | 371 | writel(0, l2x0_base + L310_DATA_LATENCY_CTRL); |
375 | 372 | ||
376 | /* 16KB way size, 8-way associativity, parity disabled | 373 | /* 16KB way size, 8-way associativity, parity disabled |
377 | * Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */ | 374 | * Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */ |
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c index 4499b0a31a27..968cc348e624 100644 --- a/arch/arm/mach-rockchip/rockchip.c +++ b/arch/arm/mach-rockchip/rockchip.c | |||
@@ -24,12 +24,6 @@ | |||
24 | #include <asm/hardware/cache-l2x0.h> | 24 | #include <asm/hardware/cache-l2x0.h> |
25 | #include "core.h" | 25 | #include "core.h" |
26 | 26 | ||
27 | static void __init rockchip_dt_init(void) | ||
28 | { | ||
29 | l2x0_of_init(0, ~0UL); | ||
30 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
31 | } | ||
32 | |||
33 | static const char * const rockchip_board_dt_compat[] = { | 27 | static const char * const rockchip_board_dt_compat[] = { |
34 | "rockchip,rk2928", | 28 | "rockchip,rk2928", |
35 | "rockchip,rk3066a", | 29 | "rockchip,rk3066a", |
@@ -39,6 +33,7 @@ static const char * const rockchip_board_dt_compat[] = { | |||
39 | }; | 33 | }; |
40 | 34 | ||
41 | DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)") | 35 | DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)") |
42 | .init_machine = rockchip_dt_init, | 36 | .l2c_aux_val = 0, |
37 | .l2c_aux_mask = ~0, | ||
43 | .dt_compat = rockchip_board_dt_compat, | 38 | .dt_compat = rockchip_board_dt_compat, |
44 | MACHINE_END | 39 | MACHINE_END |
diff --git a/arch/arm/mach-s3c24xx/mach-smdk2413.c b/arch/arm/mach-s3c24xx/mach-smdk2413.c index a38f8a049e22..fb3b80e44595 100644 --- a/arch/arm/mach-s3c24xx/mach-smdk2413.c +++ b/arch/arm/mach-s3c24xx/mach-smdk2413.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/serial_s3c.h> | 22 | #include <linux/serial_s3c.h> |
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
25 | #include <linux/memblock.h> | ||
25 | 26 | ||
26 | #include <asm/mach/arch.h> | 27 | #include <asm/mach/arch.h> |
27 | #include <asm/mach/map.h> | 28 | #include <asm/mach/map.h> |
@@ -93,13 +94,10 @@ static struct platform_device *smdk2413_devices[] __initdata = { | |||
93 | &s3c2412_device_dma, | 94 | &s3c2412_device_dma, |
94 | }; | 95 | }; |
95 | 96 | ||
96 | static void __init smdk2413_fixup(struct tag *tags, char **cmdline, | 97 | static void __init smdk2413_fixup(struct tag *tags, char **cmdline) |
97 | struct meminfo *mi) | ||
98 | { | 98 | { |
99 | if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { | 99 | if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { |
100 | mi->nr_banks=1; | 100 | memblock_add(0x30000000, SZ_64M); |
101 | mi->bank[0].start = 0x30000000; | ||
102 | mi->bank[0].size = SZ_64M; | ||
103 | } | 101 | } |
104 | } | 102 | } |
105 | 103 | ||
diff --git a/arch/arm/mach-s3c24xx/mach-vstms.c b/arch/arm/mach-s3c24xx/mach-vstms.c index 6b706c915387..9104c2be36c9 100644 --- a/arch/arm/mach-s3c24xx/mach-vstms.c +++ b/arch/arm/mach-s3c24xx/mach-vstms.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/mtd/nand.h> | 23 | #include <linux/mtd/nand.h> |
24 | #include <linux/mtd/nand_ecc.h> | 24 | #include <linux/mtd/nand_ecc.h> |
25 | #include <linux/mtd/partitions.h> | 25 | #include <linux/mtd/partitions.h> |
26 | #include <linux/memblock.h> | ||
26 | 27 | ||
27 | #include <asm/mach/arch.h> | 28 | #include <asm/mach/arch.h> |
28 | #include <asm/mach/map.h> | 29 | #include <asm/mach/map.h> |
@@ -129,13 +130,10 @@ static struct platform_device *vstms_devices[] __initdata = { | |||
129 | &s3c2412_device_dma, | 130 | &s3c2412_device_dma, |
130 | }; | 131 | }; |
131 | 132 | ||
132 | static void __init vstms_fixup(struct tag *tags, char **cmdline, | 133 | static void __init vstms_fixup(struct tag *tags, char **cmdline) |
133 | struct meminfo *mi) | ||
134 | { | 134 | { |
135 | if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { | 135 | if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { |
136 | mi->nr_banks=1; | 136 | memblock_add(0x30000000, SZ_64M); |
137 | mi->bank[0].start = 0x30000000; | ||
138 | mi->bank[0].size = SZ_64M; | ||
139 | } | 137 | } |
140 | } | 138 | } |
141 | 139 | ||
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index 8443a27bca2f..7dd894ece9ae 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c | |||
@@ -531,7 +531,7 @@ static void __init get_assabet_scr(void) | |||
531 | } | 531 | } |
532 | 532 | ||
533 | static void __init | 533 | static void __init |
534 | fixup_assabet(struct tag *tags, char **cmdline, struct meminfo *mi) | 534 | fixup_assabet(struct tag *tags, char **cmdline) |
535 | { | 535 | { |
536 | /* This must be done before any call to machine_has_neponset() */ | 536 | /* This must be done before any call to machine_has_neponset() */ |
537 | map_sa1100_gpio_regs(); | 537 | map_sa1100_gpio_regs(); |
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva-reference.c b/arch/arm/mach-shmobile/board-armadillo800eva-reference.c index 57d246eb8813..f660fbb96e0b 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva-reference.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva-reference.c | |||
@@ -164,8 +164,8 @@ static void __init eva_init(void) | |||
164 | r8a7740_meram_workaround(); | 164 | r8a7740_meram_workaround(); |
165 | 165 | ||
166 | #ifdef CONFIG_CACHE_L2X0 | 166 | #ifdef CONFIG_CACHE_L2X0 |
167 | /* Early BRESP enable, Shared attribute override enable, 32K*8way */ | 167 | /* Shared attribute override enable, 32K*8way */ |
168 | l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff); | 168 | l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff); |
169 | #endif | 169 | #endif |
170 | 170 | ||
171 | r8a7740_add_standard_devices_dt(); | 171 | r8a7740_add_standard_devices_dt(); |
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index bc2cf7a89534..01f81100c330 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c | |||
@@ -1271,8 +1271,8 @@ static void __init eva_init(void) | |||
1271 | 1271 | ||
1272 | 1272 | ||
1273 | #ifdef CONFIG_CACHE_L2X0 | 1273 | #ifdef CONFIG_CACHE_L2X0 |
1274 | /* Early BRESP enable, Shared attribute override enable, 32K*8way */ | 1274 | /* Shared attribute override enable, 32K*8way */ |
1275 | l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff); | 1275 | l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff); |
1276 | #endif | 1276 | #endif |
1277 | 1277 | ||
1278 | i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); | 1278 | i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); |
diff --git a/arch/arm/mach-shmobile/board-kzm9g-reference.c b/arch/arm/mach-shmobile/board-kzm9g-reference.c index 598e32488410..a735a1d80c28 100644 --- a/arch/arm/mach-shmobile/board-kzm9g-reference.c +++ b/arch/arm/mach-shmobile/board-kzm9g-reference.c | |||
@@ -36,8 +36,8 @@ static void __init kzm_init(void) | |||
36 | sh73a0_add_standard_devices_dt(); | 36 | sh73a0_add_standard_devices_dt(); |
37 | 37 | ||
38 | #ifdef CONFIG_CACHE_L2X0 | 38 | #ifdef CONFIG_CACHE_L2X0 |
39 | /* Early BRESP enable, Shared attribute override enable, 64K*8way */ | 39 | /* Shared attribute override enable, 64K*8way */ |
40 | l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff); | 40 | l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff); |
41 | #endif | 41 | #endif |
42 | } | 42 | } |
43 | 43 | ||
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index 03dc3ac84502..f94ec8ca42c1 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c | |||
@@ -876,8 +876,8 @@ static void __init kzm_init(void) | |||
876 | gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */ | 876 | gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */ |
877 | 877 | ||
878 | #ifdef CONFIG_CACHE_L2X0 | 878 | #ifdef CONFIG_CACHE_L2X0 |
879 | /* Early BRESP enable, Shared attribute override enable, 64K*8way */ | 879 | /* Shared attribute override enable, 64K*8way */ |
880 | l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff); | 880 | l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff); |
881 | #endif | 881 | #endif |
882 | 882 | ||
883 | i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); | 883 | i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); |
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c index 8c02e24f2483..d311ef903b39 100644 --- a/arch/arm/mach-shmobile/setup-r8a7778.c +++ b/arch/arm/mach-shmobile/setup-r8a7778.c | |||
@@ -285,10 +285,10 @@ void __init r8a7778_add_dt_devices(void) | |||
285 | void __iomem *base = ioremap_nocache(0xf0100000, 0x1000); | 285 | void __iomem *base = ioremap_nocache(0xf0100000, 0x1000); |
286 | if (base) { | 286 | if (base) { |
287 | /* | 287 | /* |
288 | * Early BRESP enable, Shared attribute override enable, 64K*16way | 288 | * Shared attribute override enable, 64K*16way |
289 | * don't call iounmap(base) | 289 | * don't call iounmap(base) |
290 | */ | 290 | */ |
291 | l2x0_init(base, 0x40470000, 0x82000fff); | 291 | l2x0_init(base, 0x00400000, 0xc20f0fff); |
292 | } | 292 | } |
293 | #endif | 293 | #endif |
294 | 294 | ||
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c index d197b5adc886..aba4ed652d54 100644 --- a/arch/arm/mach-shmobile/setup-r8a7779.c +++ b/arch/arm/mach-shmobile/setup-r8a7779.c | |||
@@ -660,8 +660,8 @@ static struct platform_device *r8a7779_standard_devices[] __initdata = { | |||
660 | void __init r8a7779_add_standard_devices(void) | 660 | void __init r8a7779_add_standard_devices(void) |
661 | { | 661 | { |
662 | #ifdef CONFIG_CACHE_L2X0 | 662 | #ifdef CONFIG_CACHE_L2X0 |
663 | /* Early BRESP enable, Shared attribute override enable, 64K*16way */ | 663 | /* Shared attribute override enable, 64K*16way */ |
664 | l2x0_init(IOMEM(0xf0100000), 0x40470000, 0x82000fff); | 664 | l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff); |
665 | #endif | 665 | #endif |
666 | r8a7779_pm_init(); | 666 | r8a7779_pm_init(); |
667 | 667 | ||
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index d86231e11b34..adbf38314ca8 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c | |||
@@ -98,22 +98,17 @@ static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd) | |||
98 | writel(temp, rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); | 98 | writel(temp, rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); |
99 | } | 99 | } |
100 | 100 | ||
101 | static void __init socfpga_cyclone5_init(void) | ||
102 | { | ||
103 | l2x0_of_init(0, ~0UL); | ||
104 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
105 | } | ||
106 | |||
107 | static const char *altera_dt_match[] = { | 101 | static const char *altera_dt_match[] = { |
108 | "altr,socfpga", | 102 | "altr,socfpga", |
109 | NULL | 103 | NULL |
110 | }; | 104 | }; |
111 | 105 | ||
112 | DT_MACHINE_START(SOCFPGA, "Altera SOCFPGA") | 106 | DT_MACHINE_START(SOCFPGA, "Altera SOCFPGA") |
107 | .l2c_aux_val = 0, | ||
108 | .l2c_aux_mask = ~0, | ||
113 | .smp = smp_ops(socfpga_smp_ops), | 109 | .smp = smp_ops(socfpga_smp_ops), |
114 | .map_io = socfpga_map_io, | 110 | .map_io = socfpga_map_io, |
115 | .init_irq = socfpga_init_irq, | 111 | .init_irq = socfpga_init_irq, |
116 | .init_machine = socfpga_cyclone5_init, | ||
117 | .restart = socfpga_cyclone5_restart, | 112 | .restart = socfpga_cyclone5_restart, |
118 | .dt_compat = altera_dt_match, | 113 | .dt_compat = altera_dt_match, |
119 | MACHINE_END | 114 | MACHINE_END |
diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c index c19751fff2c6..fd4297713d67 100644 --- a/arch/arm/mach-spear/platsmp.c +++ b/arch/arm/mach-spear/platsmp.c | |||
@@ -20,6 +20,18 @@ | |||
20 | #include <mach/spear.h> | 20 | #include <mach/spear.h> |
21 | #include "generic.h" | 21 | #include "generic.h" |
22 | 22 | ||
23 | /* | ||
24 | * Write pen_release in a way that is guaranteed to be visible to all | ||
25 | * observers, irrespective of whether they're taking part in coherency | ||
26 | * or not. This is necessary for the hotplug code to work reliably. | ||
27 | */ | ||
28 | static void write_pen_release(int val) | ||
29 | { | ||
30 | pen_release = val; | ||
31 | smp_wmb(); | ||
32 | sync_cache_w(&pen_release); | ||
33 | } | ||
34 | |||
23 | static DEFINE_SPINLOCK(boot_lock); | 35 | static DEFINE_SPINLOCK(boot_lock); |
24 | 36 | ||
25 | static void __iomem *scu_base = IOMEM(VA_SCU_BASE); | 37 | static void __iomem *scu_base = IOMEM(VA_SCU_BASE); |
@@ -30,8 +42,7 @@ static void spear13xx_secondary_init(unsigned int cpu) | |||
30 | * let the primary processor know we're out of the | 42 | * let the primary processor know we're out of the |
31 | * pen, then head off into the C entry point | 43 | * pen, then head off into the C entry point |
32 | */ | 44 | */ |
33 | pen_release = -1; | 45 | write_pen_release(-1); |
34 | smp_wmb(); | ||
35 | 46 | ||
36 | /* | 47 | /* |
37 | * Synchronise with the boot thread. | 48 | * Synchronise with the boot thread. |
@@ -58,9 +69,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) | |||
58 | * Note that "pen_release" is the hardware CPU ID, whereas | 69 | * Note that "pen_release" is the hardware CPU ID, whereas |
59 | * "cpu" is Linux's internal ID. | 70 | * "cpu" is Linux's internal ID. |
60 | */ | 71 | */ |
61 | pen_release = cpu; | 72 | write_pen_release(cpu); |
62 | flush_cache_all(); | ||
63 | outer_flush_all(); | ||
64 | 73 | ||
65 | timeout = jiffies + (1 * HZ); | 74 | timeout = jiffies + (1 * HZ); |
66 | while (time_before(jiffies, timeout)) { | 75 | while (time_before(jiffies, timeout)) { |
diff --git a/arch/arm/mach-spear/spear13xx.c b/arch/arm/mach-spear/spear13xx.c index 7aa6e8cf830f..c9897ea38980 100644 --- a/arch/arm/mach-spear/spear13xx.c +++ b/arch/arm/mach-spear/spear13xx.c | |||
@@ -38,15 +38,15 @@ void __init spear13xx_l2x0_init(void) | |||
38 | if (!IS_ENABLED(CONFIG_CACHE_L2X0)) | 38 | if (!IS_ENABLED(CONFIG_CACHE_L2X0)) |
39 | return; | 39 | return; |
40 | 40 | ||
41 | writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL); | 41 | writel_relaxed(0x06, VA_L2CC_BASE + L310_PREFETCH_CTRL); |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * Program following latencies in order to make | 44 | * Program following latencies in order to make |
45 | * SPEAr1340 work at 600 MHz | 45 | * SPEAr1340 work at 600 MHz |
46 | */ | 46 | */ |
47 | writel_relaxed(0x221, VA_L2CC_BASE + L2X0_TAG_LATENCY_CTRL); | 47 | writel_relaxed(0x221, VA_L2CC_BASE + L310_TAG_LATENCY_CTRL); |
48 | writel_relaxed(0x441, VA_L2CC_BASE + L2X0_DATA_LATENCY_CTRL); | 48 | writel_relaxed(0x441, VA_L2CC_BASE + L310_DATA_LATENCY_CTRL); |
49 | l2x0_init(VA_L2CC_BASE, 0x70A60001, 0xfe00ffff); | 49 | l2x0_init(VA_L2CC_BASE, 0x30a00001, 0xfe0fffff); |
50 | } | 50 | } |
51 | 51 | ||
52 | /* | 52 | /* |
diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c index df731f2322fa..3cf6ef8d4317 100644 --- a/arch/arm/mach-sti/board-dt.c +++ b/arch/arm/mach-sti/board-dt.c | |||
@@ -14,25 +14,6 @@ | |||
14 | 14 | ||
15 | #include "smp.h" | 15 | #include "smp.h" |
16 | 16 | ||
17 | void __init stih41x_l2x0_init(void) | ||
18 | { | ||
19 | u32 way_size = 0x4; | ||
20 | u32 aux_ctrl; | ||
21 | /* may be this can be encoded in macros like BIT*() */ | ||
22 | aux_ctrl = (0x1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) | | ||
23 | (0x1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) | | ||
24 | (0x1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) | | ||
25 | (way_size << L2X0_AUX_CTRL_WAY_SIZE_SHIFT); | ||
26 | |||
27 | l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK); | ||
28 | } | ||
29 | |||
30 | static void __init stih41x_machine_init(void) | ||
31 | { | ||
32 | stih41x_l2x0_init(); | ||
33 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
34 | } | ||
35 | |||
36 | static const char *stih41x_dt_match[] __initdata = { | 17 | static const char *stih41x_dt_match[] __initdata = { |
37 | "st,stih415", | 18 | "st,stih415", |
38 | "st,stih416", | 19 | "st,stih416", |
@@ -41,7 +22,11 @@ static const char *stih41x_dt_match[] __initdata = { | |||
41 | }; | 22 | }; |
42 | 23 | ||
43 | DT_MACHINE_START(STM, "STiH415/416 SoC with Flattened Device Tree") | 24 | DT_MACHINE_START(STM, "STiH415/416 SoC with Flattened Device Tree") |
44 | .init_machine = stih41x_machine_init, | ||
45 | .smp = smp_ops(sti_smp_ops), | ||
46 | .dt_compat = stih41x_dt_match, | 25 | .dt_compat = stih41x_dt_match, |
26 | .l2c_aux_val = L2C_AUX_CTRL_SHARED_OVERRIDE | | ||
27 | L310_AUX_CTRL_DATA_PREFETCH | | ||
28 | L310_AUX_CTRL_INSTR_PREFETCH | | ||
29 | L2C_AUX_CTRL_WAY_SIZE(4), | ||
30 | .l2c_aux_mask = 0xc0000fff, | ||
31 | .smp = smp_ops(sti_smp_ops), | ||
47 | MACHINE_END | 32 | MACHINE_END |
diff --git a/arch/arm/mach-tegra/pm.h b/arch/arm/mach-tegra/pm.h index 6e92a7c2ecbd..f4a89698e5b0 100644 --- a/arch/arm/mach-tegra/pm.h +++ b/arch/arm/mach-tegra/pm.h | |||
@@ -35,8 +35,6 @@ void tegra20_sleep_core_init(void); | |||
35 | void tegra30_lp1_iram_hook(void); | 35 | void tegra30_lp1_iram_hook(void); |
36 | void tegra30_sleep_core_init(void); | 36 | void tegra30_sleep_core_init(void); |
37 | 37 | ||
38 | extern unsigned long l2x0_saved_regs_addr; | ||
39 | |||
40 | void tegra_clear_cpu_in_lp2(void); | 38 | void tegra_clear_cpu_in_lp2(void); |
41 | bool tegra_set_cpu_in_lp2(void); | 39 | bool tegra_set_cpu_in_lp2(void); |
42 | 40 | ||
diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S index 8c1ba4fea384..578d4d1ad648 100644 --- a/arch/arm/mach-tegra/reset-handler.S +++ b/arch/arm/mach-tegra/reset-handler.S | |||
@@ -19,7 +19,6 @@ | |||
19 | 19 | ||
20 | #include <asm/cache.h> | 20 | #include <asm/cache.h> |
21 | #include <asm/asm-offsets.h> | 21 | #include <asm/asm-offsets.h> |
22 | #include <asm/hardware/cache-l2x0.h> | ||
23 | 22 | ||
24 | #include "flowctrl.h" | 23 | #include "flowctrl.h" |
25 | #include "fuse.h" | 24 | #include "fuse.h" |
@@ -78,8 +77,10 @@ ENTRY(tegra_resume) | |||
78 | str r1, [r0] | 77 | str r1, [r0] |
79 | #endif | 78 | #endif |
80 | 79 | ||
80 | #ifdef CONFIG_CACHE_L2X0 | ||
81 | /* L2 cache resume & re-enable */ | 81 | /* L2 cache resume & re-enable */ |
82 | l2_cache_resume r0, r1, r2, l2x0_saved_regs_addr | 82 | bl l2c310_early_resume |
83 | #endif | ||
83 | end_ca9_scu_l2_resume: | 84 | end_ca9_scu_l2_resume: |
84 | mov32 r9, 0xc0f | 85 | mov32 r9, 0xc0f |
85 | cmp r8, r9 | 86 | cmp r8, r9 |
@@ -89,12 +90,6 @@ end_ca9_scu_l2_resume: | |||
89 | ENDPROC(tegra_resume) | 90 | ENDPROC(tegra_resume) |
90 | #endif | 91 | #endif |
91 | 92 | ||
92 | #ifdef CONFIG_CACHE_L2X0 | ||
93 | .globl l2x0_saved_regs_addr | ||
94 | l2x0_saved_regs_addr: | ||
95 | .long 0 | ||
96 | #endif | ||
97 | |||
98 | .align L1_CACHE_SHIFT | 93 | .align L1_CACHE_SHIFT |
99 | ENTRY(__tegra_cpu_reset_handler_start) | 94 | ENTRY(__tegra_cpu_reset_handler_start) |
100 | 95 | ||
diff --git a/arch/arm/mach-tegra/sleep.h b/arch/arm/mach-tegra/sleep.h index a4edbb3abd3d..339fe42cd6fb 100644 --- a/arch/arm/mach-tegra/sleep.h +++ b/arch/arm/mach-tegra/sleep.h | |||
@@ -120,37 +120,6 @@ | |||
120 | mov \tmp1, \tmp1, lsr #8 | 120 | mov \tmp1, \tmp1, lsr #8 |
121 | .endm | 121 | .endm |
122 | 122 | ||
123 | /* Macro to resume & re-enable L2 cache */ | ||
124 | #ifndef L2X0_CTRL_EN | ||
125 | #define L2X0_CTRL_EN 1 | ||
126 | #endif | ||
127 | |||
128 | #ifdef CONFIG_CACHE_L2X0 | ||
129 | .macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs | ||
130 | W(adr) \tmp1, \phys_l2x0_saved_regs | ||
131 | ldr \tmp1, [\tmp1] | ||
132 | ldr \tmp2, [\tmp1, #L2X0_R_PHY_BASE] | ||
133 | ldr \tmp3, [\tmp2, #L2X0_CTRL] | ||
134 | tst \tmp3, #L2X0_CTRL_EN | ||
135 | bne exit_l2_resume | ||
136 | ldr \tmp3, [\tmp1, #L2X0_R_TAG_LATENCY] | ||
137 | str \tmp3, [\tmp2, #L2X0_TAG_LATENCY_CTRL] | ||
138 | ldr \tmp3, [\tmp1, #L2X0_R_DATA_LATENCY] | ||
139 | str \tmp3, [\tmp2, #L2X0_DATA_LATENCY_CTRL] | ||
140 | ldr \tmp3, [\tmp1, #L2X0_R_PREFETCH_CTRL] | ||
141 | str \tmp3, [\tmp2, #L2X0_PREFETCH_CTRL] | ||
142 | ldr \tmp3, [\tmp1, #L2X0_R_PWR_CTRL] | ||
143 | str \tmp3, [\tmp2, #L2X0_POWER_CTRL] | ||
144 | ldr \tmp3, [\tmp1, #L2X0_R_AUX_CTRL] | ||
145 | str \tmp3, [\tmp2, #L2X0_AUX_CTRL] | ||
146 | mov \tmp3, #L2X0_CTRL_EN | ||
147 | str \tmp3, [\tmp2, #L2X0_CTRL] | ||
148 | exit_l2_resume: | ||
149 | .endm | ||
150 | #else /* CONFIG_CACHE_L2X0 */ | ||
151 | .macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs | ||
152 | .endm | ||
153 | #endif /* CONFIG_CACHE_L2X0 */ | ||
154 | #else | 123 | #else |
155 | void tegra_pen_lock(void); | 124 | void tegra_pen_lock(void); |
156 | void tegra_pen_unlock(void); | 125 | void tegra_pen_unlock(void); |
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c index 6191603379e1..15ac9fcc96b1 100644 --- a/arch/arm/mach-tegra/tegra.c +++ b/arch/arm/mach-tegra/tegra.c | |||
@@ -70,40 +70,12 @@ u32 tegra_uart_config[3] = { | |||
70 | 0, | 70 | 0, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static void __init tegra_init_cache(void) | ||
74 | { | ||
75 | #ifdef CONFIG_CACHE_L2X0 | ||
76 | static const struct of_device_id pl310_ids[] __initconst = { | ||
77 | { .compatible = "arm,pl310-cache", }, | ||
78 | {} | ||
79 | }; | ||
80 | |||
81 | struct device_node *np; | ||
82 | int ret; | ||
83 | void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; | ||
84 | u32 aux_ctrl, cache_type; | ||
85 | |||
86 | np = of_find_matching_node(NULL, pl310_ids); | ||
87 | if (!np) | ||
88 | return; | ||
89 | |||
90 | cache_type = readl(p + L2X0_CACHE_TYPE); | ||
91 | aux_ctrl = (cache_type & 0x700) << (17-8); | ||
92 | aux_ctrl |= 0x7C400001; | ||
93 | |||
94 | ret = l2x0_of_init(aux_ctrl, 0x8200c3fe); | ||
95 | if (!ret) | ||
96 | l2x0_saved_regs_addr = virt_to_phys(&l2x0_saved_regs); | ||
97 | #endif | ||
98 | } | ||
99 | |||
100 | static void __init tegra_init_early(void) | 73 | static void __init tegra_init_early(void) |
101 | { | 74 | { |
102 | of_register_trusted_foundations(); | 75 | of_register_trusted_foundations(); |
103 | tegra_apb_io_init(); | 76 | tegra_apb_io_init(); |
104 | tegra_init_fuse(); | 77 | tegra_init_fuse(); |
105 | tegra_cpu_reset_handler_init(); | 78 | tegra_cpu_reset_handler_init(); |
106 | tegra_init_cache(); | ||
107 | tegra_powergate_init(); | 79 | tegra_powergate_init(); |
108 | tegra_hotplug_init(); | 80 | tegra_hotplug_init(); |
109 | } | 81 | } |
@@ -191,8 +163,10 @@ static const char * const tegra_dt_board_compat[] = { | |||
191 | }; | 163 | }; |
192 | 164 | ||
193 | DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)") | 165 | DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)") |
194 | .map_io = tegra_map_common_io, | 166 | .l2c_aux_val = 0x3c400001, |
167 | .l2c_aux_mask = 0xc20fc3fe, | ||
195 | .smp = smp_ops(tegra_smp_ops), | 168 | .smp = smp_ops(tegra_smp_ops), |
169 | .map_io = tegra_map_common_io, | ||
196 | .init_early = tegra_init_early, | 170 | .init_early = tegra_init_early, |
197 | .init_irq = tegra_dt_init_irq, | 171 | .init_irq = tegra_dt_init_irq, |
198 | .init_machine = tegra_dt_init, | 172 | .init_machine = tegra_dt_init, |
diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c index 264f894c0e3d..842ebedbdd1c 100644 --- a/arch/arm/mach-ux500/cache-l2x0.c +++ b/arch/arm/mach-ux500/cache-l2x0.c | |||
@@ -35,10 +35,16 @@ static int __init ux500_l2x0_unlock(void) | |||
35 | return 0; | 35 | return 0; |
36 | } | 36 | } |
37 | 37 | ||
38 | static int __init ux500_l2x0_init(void) | 38 | static void ux500_l2c310_write_sec(unsigned long val, unsigned reg) |
39 | { | 39 | { |
40 | u32 aux_val = 0x3e000000; | 40 | /* |
41 | * We can't write to secure registers as we are in non-secure | ||
42 | * mode, until we have some SMI service available. | ||
43 | */ | ||
44 | } | ||
41 | 45 | ||
46 | static int __init ux500_l2x0_init(void) | ||
47 | { | ||
42 | if (cpu_is_u8500_family() || cpu_is_ux540_family()) | 48 | if (cpu_is_u8500_family() || cpu_is_ux540_family()) |
43 | l2x0_base = __io_address(U8500_L2CC_BASE); | 49 | l2x0_base = __io_address(U8500_L2CC_BASE); |
44 | else | 50 | else |
@@ -48,28 +54,12 @@ static int __init ux500_l2x0_init(void) | |||
48 | /* Unlock before init */ | 54 | /* Unlock before init */ |
49 | ux500_l2x0_unlock(); | 55 | ux500_l2x0_unlock(); |
50 | 56 | ||
51 | /* DBx540's L2 has 128KB way size */ | 57 | outer_cache.write_sec = ux500_l2c310_write_sec; |
52 | if (cpu_is_ux540_family()) | ||
53 | /* 128KB way size */ | ||
54 | aux_val |= (0x4 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT); | ||
55 | else | ||
56 | /* 64KB way size */ | ||
57 | aux_val |= (0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT); | ||
58 | 58 | ||
59 | /* 64KB way size, 8 way associativity, force WA */ | ||
60 | if (of_have_populated_dt()) | 59 | if (of_have_populated_dt()) |
61 | l2x0_of_init(aux_val, 0xc0000fff); | 60 | l2x0_of_init(0, ~0); |
62 | else | 61 | else |
63 | l2x0_init(l2x0_base, aux_val, 0xc0000fff); | 62 | l2x0_init(l2x0_base, 0, ~0); |
64 | |||
65 | /* | ||
66 | * We can't disable l2 as we are in non secure mode, currently | ||
67 | * this seems be called only during kexec path. So let's | ||
68 | * override outer.disable with nasty assignment until we have | ||
69 | * some SMI service available. | ||
70 | */ | ||
71 | outer_cache.disable = NULL; | ||
72 | outer_cache.set_debug = NULL; | ||
73 | 63 | ||
74 | return 0; | 64 | return 0; |
75 | } | 65 | } |
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 494d70bfddad..86150d7a2e7d 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c | |||
@@ -45,6 +45,23 @@ static void __init ct_ca9x4_map_io(void) | |||
45 | iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); | 45 | iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); |
46 | } | 46 | } |
47 | 47 | ||
48 | static void __init ca9x4_l2_init(void) | ||
49 | { | ||
50 | #ifdef CONFIG_CACHE_L2X0 | ||
51 | void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K); | ||
52 | |||
53 | if (l2x0_base) { | ||
54 | /* set RAM latencies to 1 cycle for this core tile. */ | ||
55 | writel(0, l2x0_base + L310_TAG_LATENCY_CTRL); | ||
56 | writel(0, l2x0_base + L310_DATA_LATENCY_CTRL); | ||
57 | |||
58 | l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); | ||
59 | } else { | ||
60 | pr_err("L2C: unable to map L2 cache controller\n"); | ||
61 | } | ||
62 | #endif | ||
63 | } | ||
64 | |||
48 | #ifdef CONFIG_HAVE_ARM_TWD | 65 | #ifdef CONFIG_HAVE_ARM_TWD |
49 | static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER); | 66 | static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER); |
50 | 67 | ||
@@ -63,6 +80,7 @@ static void __init ct_ca9x4_init_irq(void) | |||
63 | gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K), | 80 | gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K), |
64 | ioremap(A9_MPCORE_GIC_CPU, SZ_256)); | 81 | ioremap(A9_MPCORE_GIC_CPU, SZ_256)); |
65 | ca9x4_twd_init(); | 82 | ca9x4_twd_init(); |
83 | ca9x4_l2_init(); | ||
66 | } | 84 | } |
67 | 85 | ||
68 | static int ct_ca9x4_clcd_setup(struct clcd_fb *fb) | 86 | static int ct_ca9x4_clcd_setup(struct clcd_fb *fb) |
@@ -146,16 +164,6 @@ static void __init ct_ca9x4_init(void) | |||
146 | { | 164 | { |
147 | int i; | 165 | int i; |
148 | 166 | ||
149 | #ifdef CONFIG_CACHE_L2X0 | ||
150 | void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K); | ||
151 | |||
152 | /* set RAM latencies to 1 cycle for this core tile. */ | ||
153 | writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); | ||
154 | writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); | ||
155 | |||
156 | l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); | ||
157 | #endif | ||
158 | |||
159 | for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++) | 167 | for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++) |
160 | amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource); | 168 | amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource); |
161 | 169 | ||
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c index 29e7785a54bc..b743a0ae02ce 100644 --- a/arch/arm/mach-vexpress/tc2_pm.c +++ b/arch/arm/mach-vexpress/tc2_pm.c | |||
@@ -209,7 +209,7 @@ static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) | |||
209 | #define POLL_MSEC 10 | 209 | #define POLL_MSEC 10 |
210 | #define TIMEOUT_MSEC 1000 | 210 | #define TIMEOUT_MSEC 1000 |
211 | 211 | ||
212 | static int tc2_pm_power_down_finish(unsigned int cpu, unsigned int cluster) | 212 | static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) |
213 | { | 213 | { |
214 | unsigned tries; | 214 | unsigned tries; |
215 | 215 | ||
@@ -290,7 +290,7 @@ static void tc2_pm_powered_up(void) | |||
290 | static const struct mcpm_platform_ops tc2_pm_power_ops = { | 290 | static const struct mcpm_platform_ops tc2_pm_power_ops = { |
291 | .power_up = tc2_pm_power_up, | 291 | .power_up = tc2_pm_power_up, |
292 | .power_down = tc2_pm_power_down, | 292 | .power_down = tc2_pm_power_down, |
293 | .power_down_finish = tc2_pm_power_down_finish, | 293 | .wait_for_powerdown = tc2_pm_wait_for_powerdown, |
294 | .suspend = tc2_pm_suspend, | 294 | .suspend = tc2_pm_suspend, |
295 | .powered_up = tc2_pm_powered_up, | 295 | .powered_up = tc2_pm_powered_up, |
296 | }; | 296 | }; |
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c index 38f4f6f37770..6ff681a24ba7 100644 --- a/arch/arm/mach-vexpress/v2m.c +++ b/arch/arm/mach-vexpress/v2m.c | |||
@@ -372,7 +372,6 @@ MACHINE_END | |||
372 | 372 | ||
373 | static void __init v2m_dt_init(void) | 373 | static void __init v2m_dt_init(void) |
374 | { | 374 | { |
375 | l2x0_of_init(0x00400000, 0xfe0fffff); | ||
376 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 375 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
377 | } | 376 | } |
378 | 377 | ||
@@ -383,6 +382,8 @@ static const char * const v2m_dt_match[] __initconst = { | |||
383 | 382 | ||
384 | DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express") | 383 | DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express") |
385 | .dt_compat = v2m_dt_match, | 384 | .dt_compat = v2m_dt_match, |
385 | .l2c_aux_val = 0x00400000, | ||
386 | .l2c_aux_mask = 0xfe0fffff, | ||
386 | .smp = smp_ops(vexpress_smp_dt_ops), | 387 | .smp = smp_ops(vexpress_smp_dt_ops), |
387 | .smp_init = smp_init_ops(vexpress_smp_init_ops), | 388 | .smp_init = smp_init_ops(vexpress_smp_init_ops), |
388 | .init_machine = v2m_dt_init, | 389 | .init_machine = v2m_dt_init, |
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index edbd9d83f407..31a6fa40ba37 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c | |||
@@ -109,11 +109,6 @@ static void __init zynq_init_machine(void) | |||
109 | struct soc_device *soc_dev; | 109 | struct soc_device *soc_dev; |
110 | struct device *parent = NULL; | 110 | struct device *parent = NULL; |
111 | 111 | ||
112 | /* | ||
113 | * 64KB way size, 8-way associativity, parity disabled | ||
114 | */ | ||
115 | l2x0_of_init(0x02060000, 0xF0F0FFFF); | ||
116 | |||
117 | soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); | 112 | soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); |
118 | if (!soc_dev_attr) | 113 | if (!soc_dev_attr) |
119 | goto out; | 114 | goto out; |
@@ -202,6 +197,9 @@ static const char * const zynq_dt_match[] = { | |||
202 | }; | 197 | }; |
203 | 198 | ||
204 | DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") | 199 | DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") |
200 | /* 64KB way size, 8-way associativity, parity disabled */ | ||
201 | .l2c_aux_val = 0x02000000, | ||
202 | .l2c_aux_mask = 0xf0ffffff, | ||
205 | .smp = smp_ops(zynq_smp_ops), | 203 | .smp = smp_ops(zynq_smp_ops), |
206 | .map_io = zynq_map_io, | 204 | .map_io = zynq_map_io, |
207 | .init_irq = zynq_irq_init, | 205 | .init_irq = zynq_irq_init, |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5bf7c3c3b301..eda0dd0ab97b 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -897,6 +897,57 @@ config CACHE_PL310 | |||
897 | This option enables optimisations for the PL310 cache | 897 | This option enables optimisations for the PL310 cache |
898 | controller. | 898 | controller. |
899 | 899 | ||
900 | config PL310_ERRATA_588369 | ||
901 | bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines" | ||
902 | depends on CACHE_L2X0 | ||
903 | help | ||
904 | The PL310 L2 cache controller implements three types of Clean & | ||
905 | Invalidate maintenance operations: by Physical Address | ||
906 | (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC). | ||
907 | They are architecturally defined to behave as the execution of a | ||
908 | clean operation followed immediately by an invalidate operation, | ||
909 | both performing to the same memory location. This functionality | ||
910 | is not correctly implemented in PL310 as clean lines are not | ||
911 | invalidated as a result of these operations. | ||
912 | |||
913 | config PL310_ERRATA_727915 | ||
914 | bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption" | ||
915 | depends on CACHE_L2X0 | ||
916 | help | ||
917 | PL310 implements the Clean & Invalidate by Way L2 cache maintenance | ||
918 | operation (offset 0x7FC). This operation runs in background so that | ||
919 | PL310 can handle normal accesses while it is in progress. Under very | ||
920 | rare circumstances, due to this erratum, write data can be lost when | ||
921 | PL310 treats a cacheable write transaction during a Clean & | ||
922 | Invalidate by Way operation. | ||
923 | |||
924 | config PL310_ERRATA_753970 | ||
925 | bool "PL310 errata: cache sync operation may be faulty" | ||
926 | depends on CACHE_PL310 | ||
927 | help | ||
928 | This option enables the workaround for the 753970 PL310 (r3p0) erratum. | ||
929 | |||
930 | Under some condition the effect of cache sync operation on | ||
931 | the store buffer still remains when the operation completes. | ||
932 | This means that the store buffer is always asked to drain and | ||
933 | this prevents it from merging any further writes. The workaround | ||
934 | is to replace the normal offset of cache sync operation (0x730) | ||
935 | by another offset targeting an unmapped PL310 register 0x740. | ||
936 | This has the same effect as the cache sync operation: store buffer | ||
937 | drain and waiting for all buffers empty. | ||
938 | |||
939 | config PL310_ERRATA_769419 | ||
940 | bool "PL310 errata: no automatic Store Buffer drain" | ||
941 | depends on CACHE_L2X0 | ||
942 | help | ||
943 | On revisions of the PL310 prior to r3p2, the Store Buffer does | ||
944 | not automatically drain. This can cause normal, non-cacheable | ||
945 | writes to be retained when the memory system is idle, leading | ||
946 | to suboptimal I/O performance for drivers using coherent DMA. | ||
947 | This option adds a write barrier to the cpu_idle loop so that, | ||
948 | on systems with an outer cache, the store buffer is drained | ||
949 | explicitly. | ||
950 | |||
900 | config CACHE_TAUROS2 | 951 | config CACHE_TAUROS2 |
901 | bool "Enable the Tauros2 L2 cache controller" | 952 | bool "Enable the Tauros2 L2 cache controller" |
902 | depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4) | 953 | depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4) |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 7f39ce2f841f..91da64de440f 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -95,7 +95,8 @@ obj-$(CONFIG_CPU_V7M) += proc-v7m.o | |||
95 | AFLAGS_proc-v6.o :=-Wa,-march=armv6 | 95 | AFLAGS_proc-v6.o :=-Wa,-march=armv6 |
96 | AFLAGS_proc-v7.o :=-Wa,-march=armv7-a | 96 | AFLAGS_proc-v7.o :=-Wa,-march=armv7-a |
97 | 97 | ||
98 | obj-$(CONFIG_OUTER_CACHE) += l2c-common.o | ||
98 | obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o | 99 | obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o |
99 | obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o | 100 | obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o |
100 | obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o | 101 | obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o |
101 | obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o | 102 | obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 924036473b16..b8cb1a2688a0 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/opcodes.h> | 28 | #include <asm/opcodes.h> |
29 | 29 | ||
30 | #include "fault.h" | 30 | #include "fault.h" |
31 | #include "mm.h" | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998 | 34 | * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998 |
@@ -81,6 +82,7 @@ static unsigned long ai_word; | |||
81 | static unsigned long ai_dword; | 82 | static unsigned long ai_dword; |
82 | static unsigned long ai_multi; | 83 | static unsigned long ai_multi; |
83 | static int ai_usermode; | 84 | static int ai_usermode; |
85 | static unsigned long cr_no_alignment; | ||
84 | 86 | ||
85 | core_param(alignment, ai_usermode, int, 0600); | 87 | core_param(alignment, ai_usermode, int, 0600); |
86 | 88 | ||
@@ -91,7 +93,7 @@ core_param(alignment, ai_usermode, int, 0600); | |||
91 | /* Return true if and only if the ARMv6 unaligned access model is in use. */ | 93 | /* Return true if and only if the ARMv6 unaligned access model is in use. */ |
92 | static bool cpu_is_v6_unaligned(void) | 94 | static bool cpu_is_v6_unaligned(void) |
93 | { | 95 | { |
94 | return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U); | 96 | return cpu_architecture() >= CPU_ARCH_ARMv6 && get_cr() & CR_U; |
95 | } | 97 | } |
96 | 98 | ||
97 | static int safe_usermode(int new_usermode, bool warn) | 99 | static int safe_usermode(int new_usermode, bool warn) |
@@ -949,6 +951,13 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
949 | return 0; | 951 | return 0; |
950 | } | 952 | } |
951 | 953 | ||
954 | static int __init noalign_setup(char *__unused) | ||
955 | { | ||
956 | set_cr(__clear_cr(CR_A)); | ||
957 | return 1; | ||
958 | } | ||
959 | __setup("noalign", noalign_setup); | ||
960 | |||
952 | /* | 961 | /* |
953 | * This needs to be done after sysctl_init, otherwise sys/ will be | 962 | * This needs to be done after sysctl_init, otherwise sys/ will be |
954 | * overwritten. Actually, this shouldn't be in sys/ at all since | 963 | * overwritten. Actually, this shouldn't be in sys/ at all since |
@@ -966,14 +975,12 @@ static int __init alignment_init(void) | |||
966 | return -ENOMEM; | 975 | return -ENOMEM; |
967 | #endif | 976 | #endif |
968 | 977 | ||
969 | #ifdef CONFIG_CPU_CP15 | ||
970 | if (cpu_is_v6_unaligned()) { | 978 | if (cpu_is_v6_unaligned()) { |
971 | cr_alignment &= ~CR_A; | 979 | set_cr(__clear_cr(CR_A)); |
972 | cr_no_alignment &= ~CR_A; | ||
973 | set_cr(cr_alignment); | ||
974 | ai_usermode = safe_usermode(ai_usermode, false); | 980 | ai_usermode = safe_usermode(ai_usermode, false); |
975 | } | 981 | } |
976 | #endif | 982 | |
983 | cr_no_alignment = get_cr() & ~CR_A; | ||
977 | 984 | ||
978 | hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, | 985 | hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, |
979 | "alignment exception"); | 986 | "alignment exception"); |
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index dc814a548056..e028a7f2ebcc 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c | |||
@@ -350,7 +350,6 @@ void __init feroceon_l2_init(int __l2_wt_override) | |||
350 | outer_cache.inv_range = feroceon_l2_inv_range; | 350 | outer_cache.inv_range = feroceon_l2_inv_range; |
351 | outer_cache.clean_range = feroceon_l2_clean_range; | 351 | outer_cache.clean_range = feroceon_l2_clean_range; |
352 | outer_cache.flush_range = feroceon_l2_flush_range; | 352 | outer_cache.flush_range = feroceon_l2_flush_range; |
353 | outer_cache.inv_all = l2_inv_all; | ||
354 | 353 | ||
355 | enable_l2(); | 354 | enable_l2(); |
356 | 355 | ||
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 7abde2ce8973..efc5cabf70e0 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -16,18 +16,33 @@ | |||
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | #include <linux/cpu.h> | ||
19 | #include <linux/err.h> | 20 | #include <linux/err.h> |
20 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/smp.h> | ||
21 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
22 | #include <linux/io.h> | 24 | #include <linux/io.h> |
23 | #include <linux/of.h> | 25 | #include <linux/of.h> |
24 | #include <linux/of_address.h> | 26 | #include <linux/of_address.h> |
25 | 27 | ||
26 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
29 | #include <asm/cp15.h> | ||
30 | #include <asm/cputype.h> | ||
27 | #include <asm/hardware/cache-l2x0.h> | 31 | #include <asm/hardware/cache-l2x0.h> |
28 | #include "cache-tauros3.h" | 32 | #include "cache-tauros3.h" |
29 | #include "cache-aurora-l2.h" | 33 | #include "cache-aurora-l2.h" |
30 | 34 | ||
35 | struct l2c_init_data { | ||
36 | const char *type; | ||
37 | unsigned way_size_0; | ||
38 | unsigned num_lock; | ||
39 | void (*of_parse)(const struct device_node *, u32 *, u32 *); | ||
40 | void (*enable)(void __iomem *, u32, unsigned); | ||
41 | void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); | ||
42 | void (*save)(void __iomem *); | ||
43 | struct outer_cache_fns outer_cache; | ||
44 | }; | ||
45 | |||
31 | #define CACHE_LINE_SIZE 32 | 46 | #define CACHE_LINE_SIZE 32 |
32 | 47 | ||
33 | static void __iomem *l2x0_base; | 48 | static void __iomem *l2x0_base; |
@@ -36,96 +51,116 @@ static u32 l2x0_way_mask; /* Bitmask of active ways */ | |||
36 | static u32 l2x0_size; | 51 | static u32 l2x0_size; |
37 | static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; | 52 | static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; |
38 | 53 | ||
39 | /* Aurora don't have the cache ID register available, so we have to | ||
40 | * pass it though the device tree */ | ||
41 | static u32 cache_id_part_number_from_dt; | ||
42 | |||
43 | struct l2x0_regs l2x0_saved_regs; | 54 | struct l2x0_regs l2x0_saved_regs; |
44 | 55 | ||
45 | struct l2x0_of_data { | 56 | /* |
46 | void (*setup)(const struct device_node *, u32 *, u32 *); | 57 | * Common code for all cache controllers. |
47 | void (*save)(void); | 58 | */ |
48 | struct outer_cache_fns outer_cache; | 59 | static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) |
49 | }; | ||
50 | |||
51 | static bool of_init = false; | ||
52 | |||
53 | static inline void cache_wait_way(void __iomem *reg, unsigned long mask) | ||
54 | { | 60 | { |
55 | /* wait for cache operation by line or way to complete */ | 61 | /* wait for cache operation by line or way to complete */ |
56 | while (readl_relaxed(reg) & mask) | 62 | while (readl_relaxed(reg) & mask) |
57 | cpu_relax(); | 63 | cpu_relax(); |
58 | } | 64 | } |
59 | 65 | ||
60 | #ifdef CONFIG_CACHE_PL310 | 66 | /* |
61 | static inline void cache_wait(void __iomem *reg, unsigned long mask) | 67 | * By default, we write directly to secure registers. Platforms must |
68 | * override this if they are running non-secure. | ||
69 | */ | ||
70 | static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg) | ||
62 | { | 71 | { |
63 | /* cache operations by line are atomic on PL310 */ | 72 | if (val == readl_relaxed(base + reg)) |
73 | return; | ||
74 | if (outer_cache.write_sec) | ||
75 | outer_cache.write_sec(val, reg); | ||
76 | else | ||
77 | writel_relaxed(val, base + reg); | ||
64 | } | 78 | } |
65 | #else | ||
66 | #define cache_wait cache_wait_way | ||
67 | #endif | ||
68 | 79 | ||
69 | static inline void cache_sync(void) | 80 | /* |
81 | * This should only be called when we have a requirement that the | ||
82 | * register be written due to a work-around, as platforms running | ||
83 | * in non-secure mode may not be able to access this register. | ||
84 | */ | ||
85 | static inline void l2c_set_debug(void __iomem *base, unsigned long val) | ||
70 | { | 86 | { |
71 | void __iomem *base = l2x0_base; | 87 | l2c_write_sec(val, base, L2X0_DEBUG_CTRL); |
72 | |||
73 | writel_relaxed(0, base + sync_reg_offset); | ||
74 | cache_wait(base + L2X0_CACHE_SYNC, 1); | ||
75 | } | 88 | } |
76 | 89 | ||
77 | static inline void l2x0_clean_line(unsigned long addr) | 90 | static void __l2c_op_way(void __iomem *reg) |
78 | { | 91 | { |
79 | void __iomem *base = l2x0_base; | 92 | writel_relaxed(l2x0_way_mask, reg); |
80 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | 93 | l2c_wait_mask(reg, l2x0_way_mask); |
81 | writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); | ||
82 | } | 94 | } |
83 | 95 | ||
84 | static inline void l2x0_inv_line(unsigned long addr) | 96 | static inline void l2c_unlock(void __iomem *base, unsigned num) |
85 | { | 97 | { |
86 | void __iomem *base = l2x0_base; | 98 | unsigned i; |
87 | cache_wait(base + L2X0_INV_LINE_PA, 1); | 99 | |
88 | writel_relaxed(addr, base + L2X0_INV_LINE_PA); | 100 | for (i = 0; i < num; i++) { |
101 | writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + | ||
102 | i * L2X0_LOCKDOWN_STRIDE); | ||
103 | writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + | ||
104 | i * L2X0_LOCKDOWN_STRIDE); | ||
105 | } | ||
89 | } | 106 | } |
90 | 107 | ||
91 | #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) | 108 | /* |
92 | static inline void debug_writel(unsigned long val) | 109 | * Enable the L2 cache controller. This function must only be |
110 | * called when the cache controller is known to be disabled. | ||
111 | */ | ||
112 | static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) | ||
93 | { | 113 | { |
94 | if (outer_cache.set_debug) | 114 | unsigned long flags; |
95 | outer_cache.set_debug(val); | 115 | |
116 | l2c_write_sec(aux, base, L2X0_AUX_CTRL); | ||
117 | |||
118 | l2c_unlock(base, num_lock); | ||
119 | |||
120 | local_irq_save(flags); | ||
121 | __l2c_op_way(base + L2X0_INV_WAY); | ||
122 | writel_relaxed(0, base + sync_reg_offset); | ||
123 | l2c_wait_mask(base + sync_reg_offset, 1); | ||
124 | local_irq_restore(flags); | ||
125 | |||
126 | l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); | ||
96 | } | 127 | } |
97 | 128 | ||
98 | static void pl310_set_debug(unsigned long val) | 129 | static void l2c_disable(void) |
99 | { | 130 | { |
100 | writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); | 131 | void __iomem *base = l2x0_base; |
132 | |||
133 | outer_cache.flush_all(); | ||
134 | l2c_write_sec(0, base, L2X0_CTRL); | ||
135 | dsb(st); | ||
101 | } | 136 | } |
102 | #else | 137 | |
103 | /* Optimised out for non-errata case */ | 138 | #ifdef CONFIG_CACHE_PL310 |
104 | static inline void debug_writel(unsigned long val) | 139 | static inline void cache_wait(void __iomem *reg, unsigned long mask) |
105 | { | 140 | { |
141 | /* cache operations by line are atomic on PL310 */ | ||
106 | } | 142 | } |
107 | 143 | #else | |
108 | #define pl310_set_debug NULL | 144 | #define cache_wait l2c_wait_mask |
109 | #endif | 145 | #endif |
110 | 146 | ||
111 | #ifdef CONFIG_PL310_ERRATA_588369 | 147 | static inline void cache_sync(void) |
112 | static inline void l2x0_flush_line(unsigned long addr) | ||
113 | { | 148 | { |
114 | void __iomem *base = l2x0_base; | 149 | void __iomem *base = l2x0_base; |
115 | 150 | ||
116 | /* Clean by PA followed by Invalidate by PA */ | 151 | writel_relaxed(0, base + sync_reg_offset); |
117 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | 152 | cache_wait(base + L2X0_CACHE_SYNC, 1); |
118 | writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); | ||
119 | cache_wait(base + L2X0_INV_LINE_PA, 1); | ||
120 | writel_relaxed(addr, base + L2X0_INV_LINE_PA); | ||
121 | } | 153 | } |
122 | #else | ||
123 | 154 | ||
124 | static inline void l2x0_flush_line(unsigned long addr) | 155 | #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) |
156 | static inline void debug_writel(unsigned long val) | ||
157 | { | ||
158 | l2c_set_debug(l2x0_base, val); | ||
159 | } | ||
160 | #else | ||
161 | /* Optimised out for non-errata case */ | ||
162 | static inline void debug_writel(unsigned long val) | ||
125 | { | 163 | { |
126 | void __iomem *base = l2x0_base; | ||
127 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | ||
128 | writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); | ||
129 | } | 164 | } |
130 | #endif | 165 | #endif |
131 | 166 | ||
@@ -141,8 +176,7 @@ static void l2x0_cache_sync(void) | |||
141 | static void __l2x0_flush_all(void) | 176 | static void __l2x0_flush_all(void) |
142 | { | 177 | { |
143 | debug_writel(0x03); | 178 | debug_writel(0x03); |
144 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); | 179 | __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); |
145 | cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); | ||
146 | cache_sync(); | 180 | cache_sync(); |
147 | debug_writel(0x00); | 181 | debug_writel(0x00); |
148 | } | 182 | } |
@@ -157,275 +191,883 @@ static void l2x0_flush_all(void) | |||
157 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 191 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
158 | } | 192 | } |
159 | 193 | ||
160 | static void l2x0_clean_all(void) | 194 | static void l2x0_disable(void) |
161 | { | 195 | { |
162 | unsigned long flags; | 196 | unsigned long flags; |
163 | 197 | ||
164 | /* clean all ways */ | ||
165 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 198 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
166 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); | 199 | __l2x0_flush_all(); |
167 | cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); | 200 | l2c_write_sec(0, l2x0_base, L2X0_CTRL); |
168 | cache_sync(); | 201 | dsb(st); |
169 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 202 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
170 | } | 203 | } |
171 | 204 | ||
172 | static void l2x0_inv_all(void) | 205 | static void l2c_save(void __iomem *base) |
173 | { | 206 | { |
174 | unsigned long flags; | 207 | l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); |
208 | } | ||
175 | 209 | ||
176 | /* invalidate all ways */ | 210 | /* |
177 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 211 | * L2C-210 specific code. |
178 | /* Invalidating when L2 is enabled is a nono */ | 212 | * |
179 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN); | 213 | * The L2C-2x0 PA, set/way and sync operations are atomic, but we must |
180 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | 214 | * ensure that no background operation is running. The way operations |
181 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | 215 | * are all background tasks. |
182 | cache_sync(); | 216 | * |
183 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 217 | * While a background operation is in progress, any new operation is |
218 | * ignored (unspecified whether this causes an error.) Thankfully, not | ||
219 | * used on SMP. | ||
220 | * | ||
221 | * Never has a different sync register other than L2X0_CACHE_SYNC, but | ||
222 | * we use sync_reg_offset here so we can share some of this with L2C-310. | ||
223 | */ | ||
224 | static void __l2c210_cache_sync(void __iomem *base) | ||
225 | { | ||
226 | writel_relaxed(0, base + sync_reg_offset); | ||
184 | } | 227 | } |
185 | 228 | ||
186 | static void l2x0_inv_range(unsigned long start, unsigned long end) | 229 | static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, |
230 | unsigned long end) | ||
231 | { | ||
232 | while (start < end) { | ||
233 | writel_relaxed(start, reg); | ||
234 | start += CACHE_LINE_SIZE; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | static void l2c210_inv_range(unsigned long start, unsigned long end) | ||
187 | { | 239 | { |
188 | void __iomem *base = l2x0_base; | 240 | void __iomem *base = l2x0_base; |
189 | unsigned long flags; | ||
190 | 241 | ||
191 | raw_spin_lock_irqsave(&l2x0_lock, flags); | ||
192 | if (start & (CACHE_LINE_SIZE - 1)) { | 242 | if (start & (CACHE_LINE_SIZE - 1)) { |
193 | start &= ~(CACHE_LINE_SIZE - 1); | 243 | start &= ~(CACHE_LINE_SIZE - 1); |
194 | debug_writel(0x03); | 244 | writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); |
195 | l2x0_flush_line(start); | ||
196 | debug_writel(0x00); | ||
197 | start += CACHE_LINE_SIZE; | 245 | start += CACHE_LINE_SIZE; |
198 | } | 246 | } |
199 | 247 | ||
200 | if (end & (CACHE_LINE_SIZE - 1)) { | 248 | if (end & (CACHE_LINE_SIZE - 1)) { |
201 | end &= ~(CACHE_LINE_SIZE - 1); | 249 | end &= ~(CACHE_LINE_SIZE - 1); |
202 | debug_writel(0x03); | 250 | writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); |
203 | l2x0_flush_line(end); | ||
204 | debug_writel(0x00); | ||
205 | } | 251 | } |
206 | 252 | ||
253 | __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); | ||
254 | __l2c210_cache_sync(base); | ||
255 | } | ||
256 | |||
257 | static void l2c210_clean_range(unsigned long start, unsigned long end) | ||
258 | { | ||
259 | void __iomem *base = l2x0_base; | ||
260 | |||
261 | start &= ~(CACHE_LINE_SIZE - 1); | ||
262 | __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); | ||
263 | __l2c210_cache_sync(base); | ||
264 | } | ||
265 | |||
266 | static void l2c210_flush_range(unsigned long start, unsigned long end) | ||
267 | { | ||
268 | void __iomem *base = l2x0_base; | ||
269 | |||
270 | start &= ~(CACHE_LINE_SIZE - 1); | ||
271 | __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); | ||
272 | __l2c210_cache_sync(base); | ||
273 | } | ||
274 | |||
275 | static void l2c210_flush_all(void) | ||
276 | { | ||
277 | void __iomem *base = l2x0_base; | ||
278 | |||
279 | BUG_ON(!irqs_disabled()); | ||
280 | |||
281 | __l2c_op_way(base + L2X0_CLEAN_INV_WAY); | ||
282 | __l2c210_cache_sync(base); | ||
283 | } | ||
284 | |||
285 | static void l2c210_sync(void) | ||
286 | { | ||
287 | __l2c210_cache_sync(l2x0_base); | ||
288 | } | ||
289 | |||
290 | static void l2c210_resume(void) | ||
291 | { | ||
292 | void __iomem *base = l2x0_base; | ||
293 | |||
294 | if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) | ||
295 | l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1); | ||
296 | } | ||
297 | |||
298 | static const struct l2c_init_data l2c210_data __initconst = { | ||
299 | .type = "L2C-210", | ||
300 | .way_size_0 = SZ_8K, | ||
301 | .num_lock = 1, | ||
302 | .enable = l2c_enable, | ||
303 | .save = l2c_save, | ||
304 | .outer_cache = { | ||
305 | .inv_range = l2c210_inv_range, | ||
306 | .clean_range = l2c210_clean_range, | ||
307 | .flush_range = l2c210_flush_range, | ||
308 | .flush_all = l2c210_flush_all, | ||
309 | .disable = l2c_disable, | ||
310 | .sync = l2c210_sync, | ||
311 | .resume = l2c210_resume, | ||
312 | }, | ||
313 | }; | ||
314 | |||
315 | /* | ||
316 | * L2C-220 specific code. | ||
317 | * | ||
318 | * All operations are background operations: they have to be waited for. | ||
319 | * Conflicting requests generate a slave error (which will cause an | ||
320 | * imprecise abort.) Never uses sync_reg_offset, so we hard-code the | ||
321 | * sync register here. | ||
322 | * | ||
323 | * However, we can re-use the l2c210_resume call. | ||
324 | */ | ||
325 | static inline void __l2c220_cache_sync(void __iomem *base) | ||
326 | { | ||
327 | writel_relaxed(0, base + L2X0_CACHE_SYNC); | ||
328 | l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); | ||
329 | } | ||
330 | |||
331 | static void l2c220_op_way(void __iomem *base, unsigned reg) | ||
332 | { | ||
333 | unsigned long flags; | ||
334 | |||
335 | raw_spin_lock_irqsave(&l2x0_lock, flags); | ||
336 | __l2c_op_way(base + reg); | ||
337 | __l2c220_cache_sync(base); | ||
338 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | ||
339 | } | ||
340 | |||
341 | static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, | ||
342 | unsigned long end, unsigned long flags) | ||
343 | { | ||
344 | raw_spinlock_t *lock = &l2x0_lock; | ||
345 | |||
207 | while (start < end) { | 346 | while (start < end) { |
208 | unsigned long blk_end = start + min(end - start, 4096UL); | 347 | unsigned long blk_end = start + min(end - start, 4096UL); |
209 | 348 | ||
210 | while (start < blk_end) { | 349 | while (start < blk_end) { |
211 | l2x0_inv_line(start); | 350 | l2c_wait_mask(reg, 1); |
351 | writel_relaxed(start, reg); | ||
212 | start += CACHE_LINE_SIZE; | 352 | start += CACHE_LINE_SIZE; |
213 | } | 353 | } |
214 | 354 | ||
215 | if (blk_end < end) { | 355 | if (blk_end < end) { |
216 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 356 | raw_spin_unlock_irqrestore(lock, flags); |
217 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 357 | raw_spin_lock_irqsave(lock, flags); |
218 | } | 358 | } |
219 | } | 359 | } |
220 | cache_wait(base + L2X0_INV_LINE_PA, 1); | 360 | |
221 | cache_sync(); | 361 | return flags; |
222 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | ||
223 | } | 362 | } |
224 | 363 | ||
225 | static void l2x0_clean_range(unsigned long start, unsigned long end) | 364 | static void l2c220_inv_range(unsigned long start, unsigned long end) |
226 | { | 365 | { |
227 | void __iomem *base = l2x0_base; | 366 | void __iomem *base = l2x0_base; |
228 | unsigned long flags; | 367 | unsigned long flags; |
229 | 368 | ||
230 | if ((end - start) >= l2x0_size) { | ||
231 | l2x0_clean_all(); | ||
232 | return; | ||
233 | } | ||
234 | |||
235 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 369 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
236 | start &= ~(CACHE_LINE_SIZE - 1); | 370 | if ((start | end) & (CACHE_LINE_SIZE - 1)) { |
237 | while (start < end) { | 371 | if (start & (CACHE_LINE_SIZE - 1)) { |
238 | unsigned long blk_end = start + min(end - start, 4096UL); | 372 | start &= ~(CACHE_LINE_SIZE - 1); |
239 | 373 | writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); | |
240 | while (start < blk_end) { | ||
241 | l2x0_clean_line(start); | ||
242 | start += CACHE_LINE_SIZE; | 374 | start += CACHE_LINE_SIZE; |
243 | } | 375 | } |
244 | 376 | ||
245 | if (blk_end < end) { | 377 | if (end & (CACHE_LINE_SIZE - 1)) { |
246 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 378 | end &= ~(CACHE_LINE_SIZE - 1); |
247 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 379 | l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); |
380 | writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); | ||
248 | } | 381 | } |
249 | } | 382 | } |
250 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | 383 | |
251 | cache_sync(); | 384 | flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, |
385 | start, end, flags); | ||
386 | l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); | ||
387 | __l2c220_cache_sync(base); | ||
252 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 388 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
253 | } | 389 | } |
254 | 390 | ||
255 | static void l2x0_flush_range(unsigned long start, unsigned long end) | 391 | static void l2c220_clean_range(unsigned long start, unsigned long end) |
256 | { | 392 | { |
257 | void __iomem *base = l2x0_base; | 393 | void __iomem *base = l2x0_base; |
258 | unsigned long flags; | 394 | unsigned long flags; |
259 | 395 | ||
396 | start &= ~(CACHE_LINE_SIZE - 1); | ||
260 | if ((end - start) >= l2x0_size) { | 397 | if ((end - start) >= l2x0_size) { |
261 | l2x0_flush_all(); | 398 | l2c220_op_way(base, L2X0_CLEAN_WAY); |
262 | return; | 399 | return; |
263 | } | 400 | } |
264 | 401 | ||
265 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 402 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
403 | flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, | ||
404 | start, end, flags); | ||
405 | l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); | ||
406 | __l2c220_cache_sync(base); | ||
407 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | ||
408 | } | ||
409 | |||
410 | static void l2c220_flush_range(unsigned long start, unsigned long end) | ||
411 | { | ||
412 | void __iomem *base = l2x0_base; | ||
413 | unsigned long flags; | ||
414 | |||
266 | start &= ~(CACHE_LINE_SIZE - 1); | 415 | start &= ~(CACHE_LINE_SIZE - 1); |
416 | if ((end - start) >= l2x0_size) { | ||
417 | l2c220_op_way(base, L2X0_CLEAN_INV_WAY); | ||
418 | return; | ||
419 | } | ||
420 | |||
421 | raw_spin_lock_irqsave(&l2x0_lock, flags); | ||
422 | flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, | ||
423 | start, end, flags); | ||
424 | l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); | ||
425 | __l2c220_cache_sync(base); | ||
426 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | ||
427 | } | ||
428 | |||
429 | static void l2c220_flush_all(void) | ||
430 | { | ||
431 | l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); | ||
432 | } | ||
433 | |||
434 | static void l2c220_sync(void) | ||
435 | { | ||
436 | unsigned long flags; | ||
437 | |||
438 | raw_spin_lock_irqsave(&l2x0_lock, flags); | ||
439 | __l2c220_cache_sync(l2x0_base); | ||
440 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | ||
441 | } | ||
442 | |||
443 | static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock) | ||
444 | { | ||
445 | /* | ||
446 | * Always enable non-secure access to the lockdown registers - | ||
447 | * we write to them as part of the L2C enable sequence so they | ||
448 | * need to be accessible. | ||
449 | */ | ||
450 | aux |= L220_AUX_CTRL_NS_LOCKDOWN; | ||
451 | |||
452 | l2c_enable(base, aux, num_lock); | ||
453 | } | ||
454 | |||
455 | static const struct l2c_init_data l2c220_data = { | ||
456 | .type = "L2C-220", | ||
457 | .way_size_0 = SZ_8K, | ||
458 | .num_lock = 1, | ||
459 | .enable = l2c220_enable, | ||
460 | .save = l2c_save, | ||
461 | .outer_cache = { | ||
462 | .inv_range = l2c220_inv_range, | ||
463 | .clean_range = l2c220_clean_range, | ||
464 | .flush_range = l2c220_flush_range, | ||
465 | .flush_all = l2c220_flush_all, | ||
466 | .disable = l2c_disable, | ||
467 | .sync = l2c220_sync, | ||
468 | .resume = l2c210_resume, | ||
469 | }, | ||
470 | }; | ||
471 | |||
472 | /* | ||
473 | * L2C-310 specific code. | ||
474 | * | ||
475 | * Very similar to L2C-210, the PA, set/way and sync operations are atomic, | ||
476 | * and the way operations are all background tasks. However, issuing an | ||
477 | * operation while a background operation is in progress results in a | ||
478 | * SLVERR response. We can reuse: | ||
479 | * | ||
480 | * __l2c210_cache_sync (using sync_reg_offset) | ||
481 | * l2c210_sync | ||
482 | * l2c210_inv_range (if 588369 is not applicable) | ||
483 | * l2c210_clean_range | ||
484 | * l2c210_flush_range (if 588369 is not applicable) | ||
485 | * l2c210_flush_all (if 727915 is not applicable) | ||
486 | * | ||
487 | * Errata: | ||
488 | * 588369: PL310 R0P0->R1P0, fixed R2P0. | ||
489 | * Affects: all clean+invalidate operations | ||
490 | * clean and invalidate skips the invalidate step, so we need to issue | ||
491 | * separate operations. We also require the above debug workaround | ||
492 | * enclosing this code fragment on affected parts. On unaffected parts, | ||
493 | * we must not use this workaround without the debug register writes | ||
494 | * to avoid exposing a problem similar to 727915. | ||
495 | * | ||
496 | * 727915: PL310 R2P0->R3P0, fixed R3P1. | ||
497 | * Affects: clean+invalidate by way | ||
498 | * clean and invalidate by way runs in the background, and a store can | ||
499 | * hit the line between the clean operation and invalidate operation, | ||
500 | * resulting in the store being lost. | ||
501 | * | ||
502 | * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2. | ||
503 | * Affects: 8x64-bit (double fill) line fetches | ||
504 | * double fill line fetches can fail to cause dirty data to be evicted | ||
505 | * from the cache before the new data overwrites the second line. | ||
506 | * | ||
507 | * 753970: PL310 R3P0, fixed R3P1. | ||
508 | * Affects: sync | ||
509 | * prevents merging writes after the sync operation, until another L2C | ||
510 | * operation is performed (or a number of other conditions.) | ||
511 | * | ||
512 | * 769419: PL310 R0P0->R3P1, fixed R3P2. | ||
513 | * Affects: store buffer | ||
514 | * store buffer is not automatically drained. | ||
515 | */ | ||
516 | static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) | ||
517 | { | ||
518 | void __iomem *base = l2x0_base; | ||
519 | |||
520 | if ((start | end) & (CACHE_LINE_SIZE - 1)) { | ||
521 | unsigned long flags; | ||
522 | |||
523 | /* Erratum 588369 for both clean+invalidate operations */ | ||
524 | raw_spin_lock_irqsave(&l2x0_lock, flags); | ||
525 | l2c_set_debug(base, 0x03); | ||
526 | |||
527 | if (start & (CACHE_LINE_SIZE - 1)) { | ||
528 | start &= ~(CACHE_LINE_SIZE - 1); | ||
529 | writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); | ||
530 | writel_relaxed(start, base + L2X0_INV_LINE_PA); | ||
531 | start += CACHE_LINE_SIZE; | ||
532 | } | ||
533 | |||
534 | if (end & (CACHE_LINE_SIZE - 1)) { | ||
535 | end &= ~(CACHE_LINE_SIZE - 1); | ||
536 | writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); | ||
537 | writel_relaxed(end, base + L2X0_INV_LINE_PA); | ||
538 | } | ||
539 | |||
540 | l2c_set_debug(base, 0x00); | ||
541 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | ||
542 | } | ||
543 | |||
544 | __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); | ||
545 | __l2c210_cache_sync(base); | ||
546 | } | ||
547 | |||
548 | static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) | ||
549 | { | ||
550 | raw_spinlock_t *lock = &l2x0_lock; | ||
551 | unsigned long flags; | ||
552 | void __iomem *base = l2x0_base; | ||
553 | |||
554 | raw_spin_lock_irqsave(lock, flags); | ||
267 | while (start < end) { | 555 | while (start < end) { |
268 | unsigned long blk_end = start + min(end - start, 4096UL); | 556 | unsigned long blk_end = start + min(end - start, 4096UL); |
269 | 557 | ||
270 | debug_writel(0x03); | 558 | l2c_set_debug(base, 0x03); |
271 | while (start < blk_end) { | 559 | while (start < blk_end) { |
272 | l2x0_flush_line(start); | 560 | writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); |
561 | writel_relaxed(start, base + L2X0_INV_LINE_PA); | ||
273 | start += CACHE_LINE_SIZE; | 562 | start += CACHE_LINE_SIZE; |
274 | } | 563 | } |
275 | debug_writel(0x00); | 564 | l2c_set_debug(base, 0x00); |
276 | 565 | ||
277 | if (blk_end < end) { | 566 | if (blk_end < end) { |
278 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 567 | raw_spin_unlock_irqrestore(lock, flags); |
279 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 568 | raw_spin_lock_irqsave(lock, flags); |
280 | } | 569 | } |
281 | } | 570 | } |
282 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | 571 | raw_spin_unlock_irqrestore(lock, flags); |
283 | cache_sync(); | 572 | __l2c210_cache_sync(base); |
284 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | ||
285 | } | 573 | } |
286 | 574 | ||
287 | static void l2x0_disable(void) | 575 | static void l2c310_flush_all_erratum(void) |
288 | { | 576 | { |
577 | void __iomem *base = l2x0_base; | ||
289 | unsigned long flags; | 578 | unsigned long flags; |
290 | 579 | ||
291 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 580 | raw_spin_lock_irqsave(&l2x0_lock, flags); |
292 | __l2x0_flush_all(); | 581 | l2c_set_debug(base, 0x03); |
293 | writel_relaxed(0, l2x0_base + L2X0_CTRL); | 582 | __l2c_op_way(base + L2X0_CLEAN_INV_WAY); |
294 | dsb(st); | 583 | l2c_set_debug(base, 0x00); |
584 | __l2c210_cache_sync(base); | ||
295 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 585 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); |
296 | } | 586 | } |
297 | 587 | ||
298 | static void l2x0_unlock(u32 cache_id) | 588 | static void __init l2c310_save(void __iomem *base) |
299 | { | 589 | { |
300 | int lockregs; | 590 | unsigned revision; |
301 | int i; | ||
302 | 591 | ||
303 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { | 592 | l2c_save(base); |
304 | case L2X0_CACHE_ID_PART_L310: | 593 | |
305 | lockregs = 8; | 594 | l2x0_saved_regs.tag_latency = readl_relaxed(base + |
306 | break; | 595 | L310_TAG_LATENCY_CTRL); |
307 | case AURORA_CACHE_ID: | 596 | l2x0_saved_regs.data_latency = readl_relaxed(base + |
308 | lockregs = 4; | 597 | L310_DATA_LATENCY_CTRL); |
598 | l2x0_saved_regs.filter_end = readl_relaxed(base + | ||
599 | L310_ADDR_FILTER_END); | ||
600 | l2x0_saved_regs.filter_start = readl_relaxed(base + | ||
601 | L310_ADDR_FILTER_START); | ||
602 | |||
603 | revision = readl_relaxed(base + L2X0_CACHE_ID) & | ||
604 | L2X0_CACHE_ID_RTL_MASK; | ||
605 | |||
606 | /* From r2p0, there is Prefetch offset/control register */ | ||
607 | if (revision >= L310_CACHE_ID_RTL_R2P0) | ||
608 | l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + | ||
609 | L310_PREFETCH_CTRL); | ||
610 | |||
611 | /* From r3p0, there is Power control register */ | ||
612 | if (revision >= L310_CACHE_ID_RTL_R3P0) | ||
613 | l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + | ||
614 | L310_POWER_CTRL); | ||
615 | } | ||
616 | |||
617 | static void l2c310_resume(void) | ||
618 | { | ||
619 | void __iomem *base = l2x0_base; | ||
620 | |||
621 | if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { | ||
622 | unsigned revision; | ||
623 | |||
624 | /* restore pl310 setup */ | ||
625 | writel_relaxed(l2x0_saved_regs.tag_latency, | ||
626 | base + L310_TAG_LATENCY_CTRL); | ||
627 | writel_relaxed(l2x0_saved_regs.data_latency, | ||
628 | base + L310_DATA_LATENCY_CTRL); | ||
629 | writel_relaxed(l2x0_saved_regs.filter_end, | ||
630 | base + L310_ADDR_FILTER_END); | ||
631 | writel_relaxed(l2x0_saved_regs.filter_start, | ||
632 | base + L310_ADDR_FILTER_START); | ||
633 | |||
634 | revision = readl_relaxed(base + L2X0_CACHE_ID) & | ||
635 | L2X0_CACHE_ID_RTL_MASK; | ||
636 | |||
637 | if (revision >= L310_CACHE_ID_RTL_R2P0) | ||
638 | l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, | ||
639 | L310_PREFETCH_CTRL); | ||
640 | if (revision >= L310_CACHE_ID_RTL_R3P0) | ||
641 | l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, | ||
642 | L310_POWER_CTRL); | ||
643 | |||
644 | l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); | ||
645 | |||
646 | /* Re-enable full-line-of-zeros for Cortex-A9 */ | ||
647 | if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) | ||
648 | set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); | ||
649 | } | ||
650 | } | ||
651 | |||
652 | static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data) | ||
653 | { | ||
654 | switch (act & ~CPU_TASKS_FROZEN) { | ||
655 | case CPU_STARTING: | ||
656 | set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); | ||
309 | break; | 657 | break; |
310 | default: | 658 | case CPU_DYING: |
311 | /* L210 and unknown types */ | 659 | set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); |
312 | lockregs = 1; | ||
313 | break; | 660 | break; |
314 | } | 661 | } |
662 | return NOTIFY_OK; | ||
663 | } | ||
315 | 664 | ||
316 | for (i = 0; i < lockregs; i++) { | 665 | static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) |
317 | writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + | 666 | { |
318 | i * L2X0_LOCKDOWN_STRIDE); | 667 | unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK; |
319 | writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + | 668 | bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; |
320 | i * L2X0_LOCKDOWN_STRIDE); | 669 | |
670 | if (rev >= L310_CACHE_ID_RTL_R2P0) { | ||
671 | if (cortex_a9) { | ||
672 | aux |= L310_AUX_CTRL_EARLY_BRESP; | ||
673 | pr_info("L2C-310 enabling early BRESP for Cortex-A9\n"); | ||
674 | } else if (aux & L310_AUX_CTRL_EARLY_BRESP) { | ||
675 | pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n"); | ||
676 | aux &= ~L310_AUX_CTRL_EARLY_BRESP; | ||
677 | } | ||
678 | } | ||
679 | |||
680 | if (cortex_a9) { | ||
681 | u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL); | ||
682 | u32 acr = get_auxcr(); | ||
683 | |||
684 | pr_debug("Cortex-A9 ACR=0x%08x\n", acr); | ||
685 | |||
686 | if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO)) | ||
687 | pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n"); | ||
688 | |||
689 | if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3))) | ||
690 | pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n"); | ||
691 | |||
692 | if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) { | ||
693 | aux |= L310_AUX_CTRL_FULL_LINE_ZERO; | ||
694 | pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n"); | ||
695 | } | ||
696 | } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) { | ||
697 | pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n"); | ||
698 | aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP); | ||
699 | } | ||
700 | |||
701 | if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) { | ||
702 | u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL); | ||
703 | |||
704 | pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n", | ||
705 | aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "", | ||
706 | aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "", | ||
707 | 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK)); | ||
708 | } | ||
709 | |||
710 | /* r3p0 or later has power control register */ | ||
711 | if (rev >= L310_CACHE_ID_RTL_R3P0) { | ||
712 | u32 power_ctrl; | ||
713 | |||
714 | l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN, | ||
715 | base, L310_POWER_CTRL); | ||
716 | power_ctrl = readl_relaxed(base + L310_POWER_CTRL); | ||
717 | pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n", | ||
718 | power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis", | ||
719 | power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis"); | ||
720 | } | ||
721 | |||
722 | /* | ||
723 | * Always enable non-secure access to the lockdown registers - | ||
724 | * we write to them as part of the L2C enable sequence so they | ||
725 | * need to be accessible. | ||
726 | */ | ||
727 | aux |= L310_AUX_CTRL_NS_LOCKDOWN; | ||
728 | |||
729 | l2c_enable(base, aux, num_lock); | ||
730 | |||
731 | if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) { | ||
732 | set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); | ||
733 | cpu_notifier(l2c310_cpu_enable_flz, 0); | ||
321 | } | 734 | } |
322 | } | 735 | } |
323 | 736 | ||
324 | void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | 737 | static void __init l2c310_fixup(void __iomem *base, u32 cache_id, |
738 | struct outer_cache_fns *fns) | ||
325 | { | 739 | { |
326 | u32 aux; | 740 | unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; |
327 | u32 cache_id; | 741 | const char *errata[8]; |
328 | u32 way_size = 0; | 742 | unsigned n = 0; |
329 | int ways; | 743 | |
330 | int way_size_shift = L2X0_WAY_SIZE_SHIFT; | 744 | if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && |
331 | const char *type; | 745 | revision < L310_CACHE_ID_RTL_R2P0 && |
746 | /* For bcm compatibility */ | ||
747 | fns->inv_range == l2c210_inv_range) { | ||
748 | fns->inv_range = l2c310_inv_range_erratum; | ||
749 | fns->flush_range = l2c310_flush_range_erratum; | ||
750 | errata[n++] = "588369"; | ||
751 | } | ||
332 | 752 | ||
333 | l2x0_base = base; | 753 | if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && |
334 | if (cache_id_part_number_from_dt) | 754 | revision >= L310_CACHE_ID_RTL_R2P0 && |
335 | cache_id = cache_id_part_number_from_dt; | 755 | revision < L310_CACHE_ID_RTL_R3P1) { |
336 | else | 756 | fns->flush_all = l2c310_flush_all_erratum; |
337 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); | 757 | errata[n++] = "727915"; |
338 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | 758 | } |
759 | |||
760 | if (revision >= L310_CACHE_ID_RTL_R3P0 && | ||
761 | revision < L310_CACHE_ID_RTL_R3P2) { | ||
762 | u32 val = readl_relaxed(base + L310_PREFETCH_CTRL); | ||
763 | /* I don't think bit23 is required here... but iMX6 does so */ | ||
764 | if (val & (BIT(30) | BIT(23))) { | ||
765 | val &= ~(BIT(30) | BIT(23)); | ||
766 | l2c_write_sec(val, base, L310_PREFETCH_CTRL); | ||
767 | errata[n++] = "752271"; | ||
768 | } | ||
769 | } | ||
770 | |||
771 | if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && | ||
772 | revision == L310_CACHE_ID_RTL_R3P0) { | ||
773 | sync_reg_offset = L2X0_DUMMY_REG; | ||
774 | errata[n++] = "753970"; | ||
775 | } | ||
776 | |||
777 | if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) | ||
778 | errata[n++] = "769419"; | ||
779 | |||
780 | if (n) { | ||
781 | unsigned i; | ||
339 | 782 | ||
783 | pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); | ||
784 | for (i = 0; i < n; i++) | ||
785 | pr_cont(" %s", errata[i]); | ||
786 | pr_cont(" enabled\n"); | ||
787 | } | ||
788 | } | ||
789 | |||
790 | static void l2c310_disable(void) | ||
791 | { | ||
792 | /* | ||
793 | * If full-line-of-zeros is enabled, we must first disable it in the | ||
794 | * Cortex-A9 auxiliary control register before disabling the L2 cache. | ||
795 | */ | ||
796 | if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) | ||
797 | set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); | ||
798 | |||
799 | l2c_disable(); | ||
800 | } | ||
801 | |||
802 | static const struct l2c_init_data l2c310_init_fns __initconst = { | ||
803 | .type = "L2C-310", | ||
804 | .way_size_0 = SZ_8K, | ||
805 | .num_lock = 8, | ||
806 | .enable = l2c310_enable, | ||
807 | .fixup = l2c310_fixup, | ||
808 | .save = l2c310_save, | ||
809 | .outer_cache = { | ||
810 | .inv_range = l2c210_inv_range, | ||
811 | .clean_range = l2c210_clean_range, | ||
812 | .flush_range = l2c210_flush_range, | ||
813 | .flush_all = l2c210_flush_all, | ||
814 | .disable = l2c310_disable, | ||
815 | .sync = l2c210_sync, | ||
816 | .resume = l2c310_resume, | ||
817 | }, | ||
818 | }; | ||
819 | |||
820 | static void __init __l2c_init(const struct l2c_init_data *data, | ||
821 | u32 aux_val, u32 aux_mask, u32 cache_id) | ||
822 | { | ||
823 | struct outer_cache_fns fns; | ||
824 | unsigned way_size_bits, ways; | ||
825 | u32 aux, old_aux; | ||
826 | |||
827 | /* | ||
828 | * Sanity check the aux values. aux_mask is the bits we preserve | ||
829 | * from reading the hardware register, and aux_val is the bits we | ||
830 | * set. | ||
831 | */ | ||
832 | if (aux_val & aux_mask) | ||
833 | pr_alert("L2C: platform provided aux values permit register corruption.\n"); | ||
834 | |||
835 | old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | ||
340 | aux &= aux_mask; | 836 | aux &= aux_mask; |
341 | aux |= aux_val; | 837 | aux |= aux_val; |
342 | 838 | ||
839 | if (old_aux != aux) | ||
840 | pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n", | ||
841 | old_aux, aux); | ||
842 | |||
343 | /* Determine the number of ways */ | 843 | /* Determine the number of ways */ |
344 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { | 844 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { |
345 | case L2X0_CACHE_ID_PART_L310: | 845 | case L2X0_CACHE_ID_PART_L310: |
846 | if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16)) | ||
847 | pr_warn("L2C: DT/platform tries to modify or specify cache size\n"); | ||
346 | if (aux & (1 << 16)) | 848 | if (aux & (1 << 16)) |
347 | ways = 16; | 849 | ways = 16; |
348 | else | 850 | else |
349 | ways = 8; | 851 | ways = 8; |
350 | type = "L310"; | ||
351 | #ifdef CONFIG_PL310_ERRATA_753970 | ||
352 | /* Unmapped register. */ | ||
353 | sync_reg_offset = L2X0_DUMMY_REG; | ||
354 | #endif | ||
355 | if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0) | ||
356 | outer_cache.set_debug = pl310_set_debug; | ||
357 | break; | 852 | break; |
853 | |||
358 | case L2X0_CACHE_ID_PART_L210: | 854 | case L2X0_CACHE_ID_PART_L210: |
855 | case L2X0_CACHE_ID_PART_L220: | ||
359 | ways = (aux >> 13) & 0xf; | 856 | ways = (aux >> 13) & 0xf; |
360 | type = "L210"; | ||
361 | break; | 857 | break; |
362 | 858 | ||
363 | case AURORA_CACHE_ID: | 859 | case AURORA_CACHE_ID: |
364 | sync_reg_offset = AURORA_SYNC_REG; | ||
365 | ways = (aux >> 13) & 0xf; | 860 | ways = (aux >> 13) & 0xf; |
366 | ways = 2 << ((ways + 1) >> 2); | 861 | ways = 2 << ((ways + 1) >> 2); |
367 | way_size_shift = AURORA_WAY_SIZE_SHIFT; | ||
368 | type = "Aurora"; | ||
369 | break; | 862 | break; |
863 | |||
370 | default: | 864 | default: |
371 | /* Assume unknown chips have 8 ways */ | 865 | /* Assume unknown chips have 8 ways */ |
372 | ways = 8; | 866 | ways = 8; |
373 | type = "L2x0 series"; | ||
374 | break; | 867 | break; |
375 | } | 868 | } |
376 | 869 | ||
377 | l2x0_way_mask = (1 << ways) - 1; | 870 | l2x0_way_mask = (1 << ways) - 1; |
378 | 871 | ||
379 | /* | 872 | /* |
380 | * L2 cache Size = Way size * Number of ways | 873 | * way_size_0 is the size that a way_size value of zero would be |
874 | * given the calculation: way_size = way_size_0 << way_size_bits. | ||
875 | * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, | ||
876 | * then way_size_0 would be 8k. | ||
877 | * | ||
878 | * L2 cache size = number of ways * way size. | ||
381 | */ | 879 | */ |
382 | way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; | 880 | way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >> |
383 | way_size = 1 << (way_size + way_size_shift); | 881 | L2C_AUX_CTRL_WAY_SIZE_SHIFT; |
882 | l2x0_size = ways * (data->way_size_0 << way_size_bits); | ||
384 | 883 | ||
385 | l2x0_size = ways * way_size * SZ_1K; | 884 | fns = data->outer_cache; |
885 | fns.write_sec = outer_cache.write_sec; | ||
886 | if (data->fixup) | ||
887 | data->fixup(l2x0_base, cache_id, &fns); | ||
386 | 888 | ||
387 | /* | 889 | /* |
388 | * Check if l2x0 controller is already enabled. | 890 | * Check if l2x0 controller is already enabled. If we are booting |
389 | * If you are booting from non-secure mode | 891 | * in non-secure mode accessing the below registers will fault. |
390 | * accessing the below registers will fault. | ||
391 | */ | 892 | */ |
392 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { | 893 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) |
393 | /* Make sure that I&D is not locked down when starting */ | 894 | data->enable(l2x0_base, aux, data->num_lock); |
394 | l2x0_unlock(cache_id); | ||
395 | 895 | ||
396 | /* l2x0 controller is disabled */ | 896 | outer_cache = fns; |
397 | writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); | ||
398 | 897 | ||
399 | l2x0_inv_all(); | 898 | /* |
400 | 899 | * It is strange to save the register state before initialisation, | |
401 | /* enable L2X0 */ | 900 | * but hey, this is what the DT implementations decided to do. |
402 | writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); | 901 | */ |
403 | } | 902 | if (data->save) |
903 | data->save(l2x0_base); | ||
404 | 904 | ||
405 | /* Re-read it in case some bits are reserved. */ | 905 | /* Re-read it in case some bits are reserved. */ |
406 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | 906 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); |
407 | 907 | ||
408 | /* Save the value for resuming. */ | 908 | pr_info("%s cache controller enabled, %d ways, %d kB\n", |
409 | l2x0_saved_regs.aux_ctrl = aux; | 909 | data->type, ways, l2x0_size >> 10); |
910 | pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", | ||
911 | data->type, cache_id, aux); | ||
912 | } | ||
913 | |||
914 | void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | ||
915 | { | ||
916 | const struct l2c_init_data *data; | ||
917 | u32 cache_id; | ||
918 | |||
919 | l2x0_base = base; | ||
920 | |||
921 | cache_id = readl_relaxed(base + L2X0_CACHE_ID); | ||
922 | |||
923 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { | ||
924 | default: | ||
925 | case L2X0_CACHE_ID_PART_L210: | ||
926 | data = &l2c210_data; | ||
927 | break; | ||
410 | 928 | ||
411 | if (!of_init) { | 929 | case L2X0_CACHE_ID_PART_L220: |
412 | outer_cache.inv_range = l2x0_inv_range; | 930 | data = &l2c220_data; |
413 | outer_cache.clean_range = l2x0_clean_range; | 931 | break; |
414 | outer_cache.flush_range = l2x0_flush_range; | 932 | |
415 | outer_cache.sync = l2x0_cache_sync; | 933 | case L2X0_CACHE_ID_PART_L310: |
416 | outer_cache.flush_all = l2x0_flush_all; | 934 | data = &l2c310_init_fns; |
417 | outer_cache.inv_all = l2x0_inv_all; | 935 | break; |
418 | outer_cache.disable = l2x0_disable; | ||
419 | } | 936 | } |
420 | 937 | ||
421 | pr_info("%s cache controller enabled\n", type); | 938 | __l2c_init(data, aux_val, aux_mask, cache_id); |
422 | pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n", | ||
423 | ways, cache_id, aux, l2x0_size >> 10); | ||
424 | } | 939 | } |
425 | 940 | ||
426 | #ifdef CONFIG_OF | 941 | #ifdef CONFIG_OF |
427 | static int l2_wt_override; | 942 | static int l2_wt_override; |
428 | 943 | ||
944 | /* Aurora don't have the cache ID register available, so we have to | ||
945 | * pass it though the device tree */ | ||
946 | static u32 cache_id_part_number_from_dt; | ||
947 | |||
948 | static void __init l2x0_of_parse(const struct device_node *np, | ||
949 | u32 *aux_val, u32 *aux_mask) | ||
950 | { | ||
951 | u32 data[2] = { 0, 0 }; | ||
952 | u32 tag = 0; | ||
953 | u32 dirty = 0; | ||
954 | u32 val = 0, mask = 0; | ||
955 | |||
956 | of_property_read_u32(np, "arm,tag-latency", &tag); | ||
957 | if (tag) { | ||
958 | mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; | ||
959 | val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; | ||
960 | } | ||
961 | |||
962 | of_property_read_u32_array(np, "arm,data-latency", | ||
963 | data, ARRAY_SIZE(data)); | ||
964 | if (data[0] && data[1]) { | ||
965 | mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | | ||
966 | L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; | ||
967 | val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | | ||
968 | ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); | ||
969 | } | ||
970 | |||
971 | of_property_read_u32(np, "arm,dirty-latency", &dirty); | ||
972 | if (dirty) { | ||
973 | mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; | ||
974 | val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; | ||
975 | } | ||
976 | |||
977 | *aux_val &= ~mask; | ||
978 | *aux_val |= val; | ||
979 | *aux_mask &= ~mask; | ||
980 | } | ||
981 | |||
982 | static const struct l2c_init_data of_l2c210_data __initconst = { | ||
983 | .type = "L2C-210", | ||
984 | .way_size_0 = SZ_8K, | ||
985 | .num_lock = 1, | ||
986 | .of_parse = l2x0_of_parse, | ||
987 | .enable = l2c_enable, | ||
988 | .save = l2c_save, | ||
989 | .outer_cache = { | ||
990 | .inv_range = l2c210_inv_range, | ||
991 | .clean_range = l2c210_clean_range, | ||
992 | .flush_range = l2c210_flush_range, | ||
993 | .flush_all = l2c210_flush_all, | ||
994 | .disable = l2c_disable, | ||
995 | .sync = l2c210_sync, | ||
996 | .resume = l2c210_resume, | ||
997 | }, | ||
998 | }; | ||
999 | |||
1000 | static const struct l2c_init_data of_l2c220_data __initconst = { | ||
1001 | .type = "L2C-220", | ||
1002 | .way_size_0 = SZ_8K, | ||
1003 | .num_lock = 1, | ||
1004 | .of_parse = l2x0_of_parse, | ||
1005 | .enable = l2c220_enable, | ||
1006 | .save = l2c_save, | ||
1007 | .outer_cache = { | ||
1008 | .inv_range = l2c220_inv_range, | ||
1009 | .clean_range = l2c220_clean_range, | ||
1010 | .flush_range = l2c220_flush_range, | ||
1011 | .flush_all = l2c220_flush_all, | ||
1012 | .disable = l2c_disable, | ||
1013 | .sync = l2c220_sync, | ||
1014 | .resume = l2c210_resume, | ||
1015 | }, | ||
1016 | }; | ||
1017 | |||
1018 | static void __init l2c310_of_parse(const struct device_node *np, | ||
1019 | u32 *aux_val, u32 *aux_mask) | ||
1020 | { | ||
1021 | u32 data[3] = { 0, 0, 0 }; | ||
1022 | u32 tag[3] = { 0, 0, 0 }; | ||
1023 | u32 filter[2] = { 0, 0 }; | ||
1024 | |||
1025 | of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); | ||
1026 | if (tag[0] && tag[1] && tag[2]) | ||
1027 | writel_relaxed( | ||
1028 | L310_LATENCY_CTRL_RD(tag[0] - 1) | | ||
1029 | L310_LATENCY_CTRL_WR(tag[1] - 1) | | ||
1030 | L310_LATENCY_CTRL_SETUP(tag[2] - 1), | ||
1031 | l2x0_base + L310_TAG_LATENCY_CTRL); | ||
1032 | |||
1033 | of_property_read_u32_array(np, "arm,data-latency", | ||
1034 | data, ARRAY_SIZE(data)); | ||
1035 | if (data[0] && data[1] && data[2]) | ||
1036 | writel_relaxed( | ||
1037 | L310_LATENCY_CTRL_RD(data[0] - 1) | | ||
1038 | L310_LATENCY_CTRL_WR(data[1] - 1) | | ||
1039 | L310_LATENCY_CTRL_SETUP(data[2] - 1), | ||
1040 | l2x0_base + L310_DATA_LATENCY_CTRL); | ||
1041 | |||
1042 | of_property_read_u32_array(np, "arm,filter-ranges", | ||
1043 | filter, ARRAY_SIZE(filter)); | ||
1044 | if (filter[1]) { | ||
1045 | writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), | ||
1046 | l2x0_base + L310_ADDR_FILTER_END); | ||
1047 | writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN, | ||
1048 | l2x0_base + L310_ADDR_FILTER_START); | ||
1049 | } | ||
1050 | } | ||
1051 | |||
1052 | static const struct l2c_init_data of_l2c310_data __initconst = { | ||
1053 | .type = "L2C-310", | ||
1054 | .way_size_0 = SZ_8K, | ||
1055 | .num_lock = 8, | ||
1056 | .of_parse = l2c310_of_parse, | ||
1057 | .enable = l2c310_enable, | ||
1058 | .fixup = l2c310_fixup, | ||
1059 | .save = l2c310_save, | ||
1060 | .outer_cache = { | ||
1061 | .inv_range = l2c210_inv_range, | ||
1062 | .clean_range = l2c210_clean_range, | ||
1063 | .flush_range = l2c210_flush_range, | ||
1064 | .flush_all = l2c210_flush_all, | ||
1065 | .disable = l2c310_disable, | ||
1066 | .sync = l2c210_sync, | ||
1067 | .resume = l2c310_resume, | ||
1068 | }, | ||
1069 | }; | ||
1070 | |||
429 | /* | 1071 | /* |
430 | * Note that the end addresses passed to Linux primitives are | 1072 | * Note that the end addresses passed to Linux primitives are |
431 | * noninclusive, while the hardware cache range operations use | 1073 | * noninclusive, while the hardware cache range operations use |
@@ -524,6 +1166,100 @@ static void aurora_flush_range(unsigned long start, unsigned long end) | |||
524 | } | 1166 | } |
525 | } | 1167 | } |
526 | 1168 | ||
1169 | static void aurora_save(void __iomem *base) | ||
1170 | { | ||
1171 | l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); | ||
1172 | l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); | ||
1173 | } | ||
1174 | |||
1175 | static void aurora_resume(void) | ||
1176 | { | ||
1177 | void __iomem *base = l2x0_base; | ||
1178 | |||
1179 | if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) { | ||
1180 | writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL); | ||
1181 | writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL); | ||
1182 | } | ||
1183 | } | ||
1184 | |||
1185 | /* | ||
1186 | * For Aurora cache in no outer mode, enable via the CP15 coprocessor | ||
1187 | * broadcasting of cache commands to L2. | ||
1188 | */ | ||
1189 | static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, | ||
1190 | unsigned num_lock) | ||
1191 | { | ||
1192 | u32 u; | ||
1193 | |||
1194 | asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); | ||
1195 | u |= AURORA_CTRL_FW; /* Set the FW bit */ | ||
1196 | asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); | ||
1197 | |||
1198 | isb(); | ||
1199 | |||
1200 | l2c_enable(base, aux, num_lock); | ||
1201 | } | ||
1202 | |||
1203 | static void __init aurora_fixup(void __iomem *base, u32 cache_id, | ||
1204 | struct outer_cache_fns *fns) | ||
1205 | { | ||
1206 | sync_reg_offset = AURORA_SYNC_REG; | ||
1207 | } | ||
1208 | |||
1209 | static void __init aurora_of_parse(const struct device_node *np, | ||
1210 | u32 *aux_val, u32 *aux_mask) | ||
1211 | { | ||
1212 | u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; | ||
1213 | u32 mask = AURORA_ACR_REPLACEMENT_MASK; | ||
1214 | |||
1215 | of_property_read_u32(np, "cache-id-part", | ||
1216 | &cache_id_part_number_from_dt); | ||
1217 | |||
1218 | /* Determine and save the write policy */ | ||
1219 | l2_wt_override = of_property_read_bool(np, "wt-override"); | ||
1220 | |||
1221 | if (l2_wt_override) { | ||
1222 | val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; | ||
1223 | mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; | ||
1224 | } | ||
1225 | |||
1226 | *aux_val &= ~mask; | ||
1227 | *aux_val |= val; | ||
1228 | *aux_mask &= ~mask; | ||
1229 | } | ||
1230 | |||
1231 | static const struct l2c_init_data of_aurora_with_outer_data __initconst = { | ||
1232 | .type = "Aurora", | ||
1233 | .way_size_0 = SZ_4K, | ||
1234 | .num_lock = 4, | ||
1235 | .of_parse = aurora_of_parse, | ||
1236 | .enable = l2c_enable, | ||
1237 | .fixup = aurora_fixup, | ||
1238 | .save = aurora_save, | ||
1239 | .outer_cache = { | ||
1240 | .inv_range = aurora_inv_range, | ||
1241 | .clean_range = aurora_clean_range, | ||
1242 | .flush_range = aurora_flush_range, | ||
1243 | .flush_all = l2x0_flush_all, | ||
1244 | .disable = l2x0_disable, | ||
1245 | .sync = l2x0_cache_sync, | ||
1246 | .resume = aurora_resume, | ||
1247 | }, | ||
1248 | }; | ||
1249 | |||
1250 | static const struct l2c_init_data of_aurora_no_outer_data __initconst = { | ||
1251 | .type = "Aurora", | ||
1252 | .way_size_0 = SZ_4K, | ||
1253 | .num_lock = 4, | ||
1254 | .of_parse = aurora_of_parse, | ||
1255 | .enable = aurora_enable_no_outer, | ||
1256 | .fixup = aurora_fixup, | ||
1257 | .save = aurora_save, | ||
1258 | .outer_cache = { | ||
1259 | .resume = aurora_resume, | ||
1260 | }, | ||
1261 | }; | ||
1262 | |||
527 | /* | 1263 | /* |
528 | * For certain Broadcom SoCs, depending on the address range, different offsets | 1264 | * For certain Broadcom SoCs, depending on the address range, different offsets |
529 | * need to be added to the address before passing it to L2 for | 1265 | * need to be added to the address before passing it to L2 for |
@@ -588,16 +1324,16 @@ static void bcm_inv_range(unsigned long start, unsigned long end) | |||
588 | 1324 | ||
589 | /* normal case, no cross section between start and end */ | 1325 | /* normal case, no cross section between start and end */ |
590 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { | 1326 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { |
591 | l2x0_inv_range(new_start, new_end); | 1327 | l2c210_inv_range(new_start, new_end); |
592 | return; | 1328 | return; |
593 | } | 1329 | } |
594 | 1330 | ||
595 | /* They cross sections, so it can only be a cross from section | 1331 | /* They cross sections, so it can only be a cross from section |
596 | * 2 to section 3 | 1332 | * 2 to section 3 |
597 | */ | 1333 | */ |
598 | l2x0_inv_range(new_start, | 1334 | l2c210_inv_range(new_start, |
599 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); | 1335 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); |
600 | l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), | 1336 | l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), |
601 | new_end); | 1337 | new_end); |
602 | } | 1338 | } |
603 | 1339 | ||
@@ -610,26 +1346,21 @@ static void bcm_clean_range(unsigned long start, unsigned long end) | |||
610 | if (unlikely(end <= start)) | 1346 | if (unlikely(end <= start)) |
611 | return; | 1347 | return; |
612 | 1348 | ||
613 | if ((end - start) >= l2x0_size) { | ||
614 | l2x0_clean_all(); | ||
615 | return; | ||
616 | } | ||
617 | |||
618 | new_start = bcm_l2_phys_addr(start); | 1349 | new_start = bcm_l2_phys_addr(start); |
619 | new_end = bcm_l2_phys_addr(end); | 1350 | new_end = bcm_l2_phys_addr(end); |
620 | 1351 | ||
621 | /* normal case, no cross section between start and end */ | 1352 | /* normal case, no cross section between start and end */ |
622 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { | 1353 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { |
623 | l2x0_clean_range(new_start, new_end); | 1354 | l2c210_clean_range(new_start, new_end); |
624 | return; | 1355 | return; |
625 | } | 1356 | } |
626 | 1357 | ||
627 | /* They cross sections, so it can only be a cross from section | 1358 | /* They cross sections, so it can only be a cross from section |
628 | * 2 to section 3 | 1359 | * 2 to section 3 |
629 | */ | 1360 | */ |
630 | l2x0_clean_range(new_start, | 1361 | l2c210_clean_range(new_start, |
631 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); | 1362 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); |
632 | l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), | 1363 | l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), |
633 | new_end); | 1364 | new_end); |
634 | } | 1365 | } |
635 | 1366 | ||
@@ -643,7 +1374,7 @@ static void bcm_flush_range(unsigned long start, unsigned long end) | |||
643 | return; | 1374 | return; |
644 | 1375 | ||
645 | if ((end - start) >= l2x0_size) { | 1376 | if ((end - start) >= l2x0_size) { |
646 | l2x0_flush_all(); | 1377 | outer_cache.flush_all(); |
647 | return; | 1378 | return; |
648 | } | 1379 | } |
649 | 1380 | ||
@@ -652,283 +1383,67 @@ static void bcm_flush_range(unsigned long start, unsigned long end) | |||
652 | 1383 | ||
653 | /* normal case, no cross section between start and end */ | 1384 | /* normal case, no cross section between start and end */ |
654 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { | 1385 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { |
655 | l2x0_flush_range(new_start, new_end); | 1386 | l2c210_flush_range(new_start, new_end); |
656 | return; | 1387 | return; |
657 | } | 1388 | } |
658 | 1389 | ||
659 | /* They cross sections, so it can only be a cross from section | 1390 | /* They cross sections, so it can only be a cross from section |
660 | * 2 to section 3 | 1391 | * 2 to section 3 |
661 | */ | 1392 | */ |
662 | l2x0_flush_range(new_start, | 1393 | l2c210_flush_range(new_start, |
663 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); | 1394 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); |
664 | l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), | 1395 | l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), |
665 | new_end); | 1396 | new_end); |
666 | } | 1397 | } |
667 | 1398 | ||
668 | static void __init l2x0_of_setup(const struct device_node *np, | 1399 | /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ |
669 | u32 *aux_val, u32 *aux_mask) | 1400 | static const struct l2c_init_data of_bcm_l2x0_data __initconst = { |
670 | { | 1401 | .type = "BCM-L2C-310", |
671 | u32 data[2] = { 0, 0 }; | 1402 | .way_size_0 = SZ_8K, |
672 | u32 tag = 0; | 1403 | .num_lock = 8, |
673 | u32 dirty = 0; | 1404 | .of_parse = l2c310_of_parse, |
674 | u32 val = 0, mask = 0; | 1405 | .enable = l2c310_enable, |
675 | 1406 | .save = l2c310_save, | |
676 | of_property_read_u32(np, "arm,tag-latency", &tag); | 1407 | .outer_cache = { |
677 | if (tag) { | 1408 | .inv_range = bcm_inv_range, |
678 | mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; | 1409 | .clean_range = bcm_clean_range, |
679 | val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; | 1410 | .flush_range = bcm_flush_range, |
680 | } | 1411 | .flush_all = l2c210_flush_all, |
681 | 1412 | .disable = l2c310_disable, | |
682 | of_property_read_u32_array(np, "arm,data-latency", | 1413 | .sync = l2c210_sync, |
683 | data, ARRAY_SIZE(data)); | 1414 | .resume = l2c310_resume, |
684 | if (data[0] && data[1]) { | 1415 | }, |
685 | mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | | 1416 | }; |
686 | L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; | ||
687 | val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | | ||
688 | ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); | ||
689 | } | ||
690 | |||
691 | of_property_read_u32(np, "arm,dirty-latency", &dirty); | ||
692 | if (dirty) { | ||
693 | mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; | ||
694 | val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; | ||
695 | } | ||
696 | |||
697 | *aux_val &= ~mask; | ||
698 | *aux_val |= val; | ||
699 | *aux_mask &= ~mask; | ||
700 | } | ||
701 | |||
702 | static void __init pl310_of_setup(const struct device_node *np, | ||
703 | u32 *aux_val, u32 *aux_mask) | ||
704 | { | ||
705 | u32 data[3] = { 0, 0, 0 }; | ||
706 | u32 tag[3] = { 0, 0, 0 }; | ||
707 | u32 filter[2] = { 0, 0 }; | ||
708 | |||
709 | of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); | ||
710 | if (tag[0] && tag[1] && tag[2]) | ||
711 | writel_relaxed( | ||
712 | ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | | ||
713 | ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | | ||
714 | ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), | ||
715 | l2x0_base + L2X0_TAG_LATENCY_CTRL); | ||
716 | |||
717 | of_property_read_u32_array(np, "arm,data-latency", | ||
718 | data, ARRAY_SIZE(data)); | ||
719 | if (data[0] && data[1] && data[2]) | ||
720 | writel_relaxed( | ||
721 | ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | | ||
722 | ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | | ||
723 | ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), | ||
724 | l2x0_base + L2X0_DATA_LATENCY_CTRL); | ||
725 | |||
726 | of_property_read_u32_array(np, "arm,filter-ranges", | ||
727 | filter, ARRAY_SIZE(filter)); | ||
728 | if (filter[1]) { | ||
729 | writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), | ||
730 | l2x0_base + L2X0_ADDR_FILTER_END); | ||
731 | writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, | ||
732 | l2x0_base + L2X0_ADDR_FILTER_START); | ||
733 | } | ||
734 | } | ||
735 | |||
736 | static void __init pl310_save(void) | ||
737 | { | ||
738 | u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & | ||
739 | L2X0_CACHE_ID_RTL_MASK; | ||
740 | |||
741 | l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base + | ||
742 | L2X0_TAG_LATENCY_CTRL); | ||
743 | l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base + | ||
744 | L2X0_DATA_LATENCY_CTRL); | ||
745 | l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base + | ||
746 | L2X0_ADDR_FILTER_END); | ||
747 | l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base + | ||
748 | L2X0_ADDR_FILTER_START); | ||
749 | |||
750 | if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { | ||
751 | /* | ||
752 | * From r2p0, there is Prefetch offset/control register | ||
753 | */ | ||
754 | l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base + | ||
755 | L2X0_PREFETCH_CTRL); | ||
756 | /* | ||
757 | * From r3p0, there is Power control register | ||
758 | */ | ||
759 | if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) | ||
760 | l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base + | ||
761 | L2X0_POWER_CTRL); | ||
762 | } | ||
763 | } | ||
764 | 1417 | ||
765 | static void aurora_save(void) | 1418 | static void __init tauros3_save(void __iomem *base) |
766 | { | 1419 | { |
767 | l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL); | 1420 | l2c_save(base); |
768 | l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | ||
769 | } | ||
770 | 1421 | ||
771 | static void __init tauros3_save(void) | ||
772 | { | ||
773 | l2x0_saved_regs.aux2_ctrl = | 1422 | l2x0_saved_regs.aux2_ctrl = |
774 | readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL); | 1423 | readl_relaxed(base + TAUROS3_AUX2_CTRL); |
775 | l2x0_saved_regs.prefetch_ctrl = | 1424 | l2x0_saved_regs.prefetch_ctrl = |
776 | readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); | 1425 | readl_relaxed(base + L310_PREFETCH_CTRL); |
777 | } | ||
778 | |||
779 | static void l2x0_resume(void) | ||
780 | { | ||
781 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { | ||
782 | /* restore aux ctrl and enable l2 */ | ||
783 | l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); | ||
784 | |||
785 | writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base + | ||
786 | L2X0_AUX_CTRL); | ||
787 | |||
788 | l2x0_inv_all(); | ||
789 | |||
790 | writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); | ||
791 | } | ||
792 | } | ||
793 | |||
794 | static void pl310_resume(void) | ||
795 | { | ||
796 | u32 l2x0_revision; | ||
797 | |||
798 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { | ||
799 | /* restore pl310 setup */ | ||
800 | writel_relaxed(l2x0_saved_regs.tag_latency, | ||
801 | l2x0_base + L2X0_TAG_LATENCY_CTRL); | ||
802 | writel_relaxed(l2x0_saved_regs.data_latency, | ||
803 | l2x0_base + L2X0_DATA_LATENCY_CTRL); | ||
804 | writel_relaxed(l2x0_saved_regs.filter_end, | ||
805 | l2x0_base + L2X0_ADDR_FILTER_END); | ||
806 | writel_relaxed(l2x0_saved_regs.filter_start, | ||
807 | l2x0_base + L2X0_ADDR_FILTER_START); | ||
808 | |||
809 | l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & | ||
810 | L2X0_CACHE_ID_RTL_MASK; | ||
811 | |||
812 | if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { | ||
813 | writel_relaxed(l2x0_saved_regs.prefetch_ctrl, | ||
814 | l2x0_base + L2X0_PREFETCH_CTRL); | ||
815 | if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) | ||
816 | writel_relaxed(l2x0_saved_regs.pwr_ctrl, | ||
817 | l2x0_base + L2X0_POWER_CTRL); | ||
818 | } | ||
819 | } | ||
820 | |||
821 | l2x0_resume(); | ||
822 | } | ||
823 | |||
824 | static void aurora_resume(void) | ||
825 | { | ||
826 | if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { | ||
827 | writel_relaxed(l2x0_saved_regs.aux_ctrl, | ||
828 | l2x0_base + L2X0_AUX_CTRL); | ||
829 | writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL); | ||
830 | } | ||
831 | } | 1426 | } |
832 | 1427 | ||
833 | static void tauros3_resume(void) | 1428 | static void tauros3_resume(void) |
834 | { | 1429 | { |
835 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { | 1430 | void __iomem *base = l2x0_base; |
1431 | |||
1432 | if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { | ||
836 | writel_relaxed(l2x0_saved_regs.aux2_ctrl, | 1433 | writel_relaxed(l2x0_saved_regs.aux2_ctrl, |
837 | l2x0_base + TAUROS3_AUX2_CTRL); | 1434 | base + TAUROS3_AUX2_CTRL); |
838 | writel_relaxed(l2x0_saved_regs.prefetch_ctrl, | 1435 | writel_relaxed(l2x0_saved_regs.prefetch_ctrl, |
839 | l2x0_base + L2X0_PREFETCH_CTRL); | 1436 | base + L310_PREFETCH_CTRL); |
840 | } | ||
841 | 1437 | ||
842 | l2x0_resume(); | 1438 | l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); |
843 | } | ||
844 | |||
845 | static void __init aurora_broadcast_l2_commands(void) | ||
846 | { | ||
847 | __u32 u; | ||
848 | /* Enable Broadcasting of cache commands to L2*/ | ||
849 | __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u)); | ||
850 | u |= AURORA_CTRL_FW; /* Set the FW bit */ | ||
851 | __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u)); | ||
852 | isb(); | ||
853 | } | ||
854 | |||
855 | static void __init aurora_of_setup(const struct device_node *np, | ||
856 | u32 *aux_val, u32 *aux_mask) | ||
857 | { | ||
858 | u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; | ||
859 | u32 mask = AURORA_ACR_REPLACEMENT_MASK; | ||
860 | |||
861 | of_property_read_u32(np, "cache-id-part", | ||
862 | &cache_id_part_number_from_dt); | ||
863 | |||
864 | /* Determine and save the write policy */ | ||
865 | l2_wt_override = of_property_read_bool(np, "wt-override"); | ||
866 | |||
867 | if (l2_wt_override) { | ||
868 | val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; | ||
869 | mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; | ||
870 | } | 1439 | } |
871 | |||
872 | *aux_val &= ~mask; | ||
873 | *aux_val |= val; | ||
874 | *aux_mask &= ~mask; | ||
875 | } | 1440 | } |
876 | 1441 | ||
877 | static const struct l2x0_of_data pl310_data = { | 1442 | static const struct l2c_init_data of_tauros3_data __initconst = { |
878 | .setup = pl310_of_setup, | 1443 | .type = "Tauros3", |
879 | .save = pl310_save, | 1444 | .way_size_0 = SZ_8K, |
880 | .outer_cache = { | 1445 | .num_lock = 8, |
881 | .resume = pl310_resume, | 1446 | .enable = l2c_enable, |
882 | .inv_range = l2x0_inv_range, | ||
883 | .clean_range = l2x0_clean_range, | ||
884 | .flush_range = l2x0_flush_range, | ||
885 | .sync = l2x0_cache_sync, | ||
886 | .flush_all = l2x0_flush_all, | ||
887 | .inv_all = l2x0_inv_all, | ||
888 | .disable = l2x0_disable, | ||
889 | }, | ||
890 | }; | ||
891 | |||
892 | static const struct l2x0_of_data l2x0_data = { | ||
893 | .setup = l2x0_of_setup, | ||
894 | .save = NULL, | ||
895 | .outer_cache = { | ||
896 | .resume = l2x0_resume, | ||
897 | .inv_range = l2x0_inv_range, | ||
898 | .clean_range = l2x0_clean_range, | ||
899 | .flush_range = l2x0_flush_range, | ||
900 | .sync = l2x0_cache_sync, | ||
901 | .flush_all = l2x0_flush_all, | ||
902 | .inv_all = l2x0_inv_all, | ||
903 | .disable = l2x0_disable, | ||
904 | }, | ||
905 | }; | ||
906 | |||
907 | static const struct l2x0_of_data aurora_with_outer_data = { | ||
908 | .setup = aurora_of_setup, | ||
909 | .save = aurora_save, | ||
910 | .outer_cache = { | ||
911 | .resume = aurora_resume, | ||
912 | .inv_range = aurora_inv_range, | ||
913 | .clean_range = aurora_clean_range, | ||
914 | .flush_range = aurora_flush_range, | ||
915 | .sync = l2x0_cache_sync, | ||
916 | .flush_all = l2x0_flush_all, | ||
917 | .inv_all = l2x0_inv_all, | ||
918 | .disable = l2x0_disable, | ||
919 | }, | ||
920 | }; | ||
921 | |||
922 | static const struct l2x0_of_data aurora_no_outer_data = { | ||
923 | .setup = aurora_of_setup, | ||
924 | .save = aurora_save, | ||
925 | .outer_cache = { | ||
926 | .resume = aurora_resume, | ||
927 | }, | ||
928 | }; | ||
929 | |||
930 | static const struct l2x0_of_data tauros3_data = { | ||
931 | .setup = NULL, | ||
932 | .save = tauros3_save, | 1447 | .save = tauros3_save, |
933 | /* Tauros3 broadcasts L1 cache operations to L2 */ | 1448 | /* Tauros3 broadcasts L1 cache operations to L2 */ |
934 | .outer_cache = { | 1449 | .outer_cache = { |
@@ -936,43 +1451,26 @@ static const struct l2x0_of_data tauros3_data = { | |||
936 | }, | 1451 | }, |
937 | }; | 1452 | }; |
938 | 1453 | ||
939 | static const struct l2x0_of_data bcm_l2x0_data = { | 1454 | #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } |
940 | .setup = pl310_of_setup, | ||
941 | .save = pl310_save, | ||
942 | .outer_cache = { | ||
943 | .resume = pl310_resume, | ||
944 | .inv_range = bcm_inv_range, | ||
945 | .clean_range = bcm_clean_range, | ||
946 | .flush_range = bcm_flush_range, | ||
947 | .sync = l2x0_cache_sync, | ||
948 | .flush_all = l2x0_flush_all, | ||
949 | .inv_all = l2x0_inv_all, | ||
950 | .disable = l2x0_disable, | ||
951 | }, | ||
952 | }; | ||
953 | |||
954 | static const struct of_device_id l2x0_ids[] __initconst = { | 1455 | static const struct of_device_id l2x0_ids[] __initconst = { |
955 | { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, | 1456 | L2C_ID("arm,l210-cache", of_l2c210_data), |
956 | { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, | 1457 | L2C_ID("arm,l220-cache", of_l2c220_data), |
957 | { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, | 1458 | L2C_ID("arm,pl310-cache", of_l2c310_data), |
958 | { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */ | 1459 | L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), |
959 | .data = (void *)&bcm_l2x0_data}, | 1460 | L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), |
960 | { .compatible = "brcm,bcm11351-a2-pl310-cache", | 1461 | L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), |
961 | .data = (void *)&bcm_l2x0_data}, | 1462 | L2C_ID("marvell,tauros3-cache", of_tauros3_data), |
962 | { .compatible = "marvell,aurora-outer-cache", | 1463 | /* Deprecated IDs */ |
963 | .data = (void *)&aurora_with_outer_data}, | 1464 | L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), |
964 | { .compatible = "marvell,aurora-system-cache", | ||
965 | .data = (void *)&aurora_no_outer_data}, | ||
966 | { .compatible = "marvell,tauros3-cache", | ||
967 | .data = (void *)&tauros3_data }, | ||
968 | {} | 1465 | {} |
969 | }; | 1466 | }; |
970 | 1467 | ||
971 | int __init l2x0_of_init(u32 aux_val, u32 aux_mask) | 1468 | int __init l2x0_of_init(u32 aux_val, u32 aux_mask) |
972 | { | 1469 | { |
1470 | const struct l2c_init_data *data; | ||
973 | struct device_node *np; | 1471 | struct device_node *np; |
974 | const struct l2x0_of_data *data; | ||
975 | struct resource res; | 1472 | struct resource res; |
1473 | u32 cache_id, old_aux; | ||
976 | 1474 | ||
977 | np = of_find_matching_node(NULL, l2x0_ids); | 1475 | np = of_find_matching_node(NULL, l2x0_ids); |
978 | if (!np) | 1476 | if (!np) |
@@ -989,23 +1487,29 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) | |||
989 | 1487 | ||
990 | data = of_match_node(l2x0_ids, np)->data; | 1488 | data = of_match_node(l2x0_ids, np)->data; |
991 | 1489 | ||
992 | /* L2 configuration can only be changed if the cache is disabled */ | 1490 | old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); |
993 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { | 1491 | if (old_aux != ((old_aux & aux_mask) | aux_val)) { |
994 | if (data->setup) | 1492 | pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n", |
995 | data->setup(np, &aux_val, &aux_mask); | 1493 | old_aux, (old_aux & aux_mask) | aux_val); |
996 | 1494 | } else if (aux_mask != ~0U && aux_val != 0) { | |
997 | /* For aurora cache in no outer mode select the | 1495 | pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n"); |
998 | * correct mode using the coprocessor*/ | ||
999 | if (data == &aurora_no_outer_data) | ||
1000 | aurora_broadcast_l2_commands(); | ||
1001 | } | 1496 | } |
1002 | 1497 | ||
1003 | if (data->save) | 1498 | /* All L2 caches are unified, so this property should be specified */ |
1004 | data->save(); | 1499 | if (!of_property_read_bool(np, "cache-unified")) |
1500 | pr_err("L2C: device tree omits to specify unified cache\n"); | ||
1501 | |||
1502 | /* L2 configuration can only be changed if the cache is disabled */ | ||
1503 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) | ||
1504 | if (data->of_parse) | ||
1505 | data->of_parse(np, &aux_val, &aux_mask); | ||
1506 | |||
1507 | if (cache_id_part_number_from_dt) | ||
1508 | cache_id = cache_id_part_number_from_dt; | ||
1509 | else | ||
1510 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); | ||
1005 | 1511 | ||
1006 | of_init = true; | 1512 | __l2c_init(data, aux_val, aux_mask, cache_id); |
1007 | memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); | ||
1008 | l2x0_init(l2x0_base, aux_val, aux_mask); | ||
1009 | 1513 | ||
1010 | return 0; | 1514 | return 0; |
1011 | } | 1515 | } |
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 778bcf88ee79..615c99e38ba1 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
@@ -59,7 +59,7 @@ ENTRY(v7_invalidate_l1) | |||
59 | bgt 2b | 59 | bgt 2b |
60 | cmp r2, #0 | 60 | cmp r2, #0 |
61 | bgt 1b | 61 | bgt 1b |
62 | dsb | 62 | dsb st |
63 | isb | 63 | isb |
64 | mov pc, lr | 64 | mov pc, lr |
65 | ENDPROC(v7_invalidate_l1) | 65 | ENDPROC(v7_invalidate_l1) |
@@ -166,7 +166,7 @@ skip: | |||
166 | finished: | 166 | finished: |
167 | mov r10, #0 @ swith back to cache level 0 | 167 | mov r10, #0 @ swith back to cache level 0 |
168 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr | 168 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr |
169 | dsb | 169 | dsb st |
170 | isb | 170 | isb |
171 | mov pc, lr | 171 | mov pc, lr |
172 | ENDPROC(v7_flush_dcache_all) | 172 | ENDPROC(v7_flush_dcache_all) |
@@ -335,7 +335,7 @@ ENTRY(v7_flush_kern_dcache_area) | |||
335 | add r0, r0, r2 | 335 | add r0, r0, r2 |
336 | cmp r0, r1 | 336 | cmp r0, r1 |
337 | blo 1b | 337 | blo 1b |
338 | dsb | 338 | dsb st |
339 | mov pc, lr | 339 | mov pc, lr |
340 | ENDPROC(v7_flush_kern_dcache_area) | 340 | ENDPROC(v7_flush_kern_dcache_area) |
341 | 341 | ||
@@ -368,7 +368,7 @@ v7_dma_inv_range: | |||
368 | add r0, r0, r2 | 368 | add r0, r0, r2 |
369 | cmp r0, r1 | 369 | cmp r0, r1 |
370 | blo 1b | 370 | blo 1b |
371 | dsb | 371 | dsb st |
372 | mov pc, lr | 372 | mov pc, lr |
373 | ENDPROC(v7_dma_inv_range) | 373 | ENDPROC(v7_dma_inv_range) |
374 | 374 | ||
@@ -390,7 +390,7 @@ v7_dma_clean_range: | |||
390 | add r0, r0, r2 | 390 | add r0, r0, r2 |
391 | cmp r0, r1 | 391 | cmp r0, r1 |
392 | blo 1b | 392 | blo 1b |
393 | dsb | 393 | dsb st |
394 | mov pc, lr | 394 | mov pc, lr |
395 | ENDPROC(v7_dma_clean_range) | 395 | ENDPROC(v7_dma_clean_range) |
396 | 396 | ||
@@ -412,7 +412,7 @@ ENTRY(v7_dma_flush_range) | |||
412 | add r0, r0, r2 | 412 | add r0, r0, r2 |
413 | cmp r0, r1 | 413 | cmp r0, r1 |
414 | blo 1b | 414 | blo 1b |
415 | dsb | 415 | dsb st |
416 | mov pc, lr | 416 | mov pc, lr |
417 | ENDPROC(v7_dma_flush_range) | 417 | ENDPROC(v7_dma_flush_range) |
418 | 418 | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 5bef858568e6..4c88935654ca 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -885,7 +885,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
885 | static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | 885 | static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, |
886 | size_t size, enum dma_data_direction dir) | 886 | size_t size, enum dma_data_direction dir) |
887 | { | 887 | { |
888 | unsigned long paddr; | 888 | phys_addr_t paddr; |
889 | 889 | ||
890 | dma_cache_maint_page(page, off, size, dir, dmac_map_area); | 890 | dma_cache_maint_page(page, off, size, dir, dmac_map_area); |
891 | 891 | ||
@@ -901,14 +901,15 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | |||
901 | static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | 901 | static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, |
902 | size_t size, enum dma_data_direction dir) | 902 | size_t size, enum dma_data_direction dir) |
903 | { | 903 | { |
904 | unsigned long paddr = page_to_phys(page) + off; | 904 | phys_addr_t paddr = page_to_phys(page) + off; |
905 | 905 | ||
906 | /* FIXME: non-speculating: not required */ | 906 | /* FIXME: non-speculating: not required */ |
907 | /* don't bother invalidating if DMA to device */ | 907 | /* in any case, don't bother invalidating if DMA to device */ |
908 | if (dir != DMA_TO_DEVICE) | 908 | if (dir != DMA_TO_DEVICE) { |
909 | outer_inv_range(paddr, paddr + size); | 909 | outer_inv_range(paddr, paddr + size); |
910 | 910 | ||
911 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); | 911 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); |
912 | } | ||
912 | 913 | ||
913 | /* | 914 | /* |
914 | * Mark the D-cache clean for these pages to avoid extra flushing. | 915 | * Mark the D-cache clean for these pages to avoid extra flushing. |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 3387e60e4ea3..43d54f5b26b9 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -104,17 +104,20 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig | |||
104 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) | 104 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) |
105 | #endif | 105 | #endif |
106 | 106 | ||
107 | #define FLAG_PA_IS_EXEC 1 | ||
108 | #define FLAG_PA_CORE_IN_MM 2 | ||
109 | |||
107 | static void flush_ptrace_access_other(void *args) | 110 | static void flush_ptrace_access_other(void *args) |
108 | { | 111 | { |
109 | __flush_icache_all(); | 112 | __flush_icache_all(); |
110 | } | 113 | } |
111 | 114 | ||
112 | static | 115 | static inline |
113 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | 116 | void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, |
114 | unsigned long uaddr, void *kaddr, unsigned long len) | 117 | unsigned long len, unsigned int flags) |
115 | { | 118 | { |
116 | if (cache_is_vivt()) { | 119 | if (cache_is_vivt()) { |
117 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 120 | if (flags & FLAG_PA_CORE_IN_MM) { |
118 | unsigned long addr = (unsigned long)kaddr; | 121 | unsigned long addr = (unsigned long)kaddr; |
119 | __cpuc_coherent_kern_range(addr, addr + len); | 122 | __cpuc_coherent_kern_range(addr, addr + len); |
120 | } | 123 | } |
@@ -128,7 +131,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
128 | } | 131 | } |
129 | 132 | ||
130 | /* VIPT non-aliasing D-cache */ | 133 | /* VIPT non-aliasing D-cache */ |
131 | if (vma->vm_flags & VM_EXEC) { | 134 | if (flags & FLAG_PA_IS_EXEC) { |
132 | unsigned long addr = (unsigned long)kaddr; | 135 | unsigned long addr = (unsigned long)kaddr; |
133 | if (icache_is_vipt_aliasing()) | 136 | if (icache_is_vipt_aliasing()) |
134 | flush_icache_alias(page_to_pfn(page), uaddr, len); | 137 | flush_icache_alias(page_to_pfn(page), uaddr, len); |
@@ -140,6 +143,26 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
140 | } | 143 | } |
141 | } | 144 | } |
142 | 145 | ||
146 | static | ||
147 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | ||
148 | unsigned long uaddr, void *kaddr, unsigned long len) | ||
149 | { | ||
150 | unsigned int flags = 0; | ||
151 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) | ||
152 | flags |= FLAG_PA_CORE_IN_MM; | ||
153 | if (vma->vm_flags & VM_EXEC) | ||
154 | flags |= FLAG_PA_IS_EXEC; | ||
155 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); | ||
156 | } | ||
157 | |||
158 | void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, | ||
159 | void *kaddr, unsigned long len) | ||
160 | { | ||
161 | unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC; | ||
162 | |||
163 | __flush_ptrace_access(page, uaddr, kaddr, len, flags); | ||
164 | } | ||
165 | |||
143 | /* | 166 | /* |
144 | * Copy user data from/to a page which is mapped into a different | 167 | * Copy user data from/to a page which is mapped into a different |
145 | * processes address space. Really, we want to allow our "user | 168 | * processes address space. Really, we want to allow our "user |
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 21b9e1bf9b77..45aeaaca9052 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -18,6 +18,21 @@ | |||
18 | #include <asm/tlbflush.h> | 18 | #include <asm/tlbflush.h> |
19 | #include "mm.h" | 19 | #include "mm.h" |
20 | 20 | ||
21 | pte_t *fixmap_page_table; | ||
22 | |||
23 | static inline void set_fixmap_pte(int idx, pte_t pte) | ||
24 | { | ||
25 | unsigned long vaddr = __fix_to_virt(idx); | ||
26 | set_pte_ext(fixmap_page_table + idx, pte, 0); | ||
27 | local_flush_tlb_kernel_page(vaddr); | ||
28 | } | ||
29 | |||
30 | static inline pte_t get_fixmap_pte(unsigned long vaddr) | ||
31 | { | ||
32 | unsigned long idx = __virt_to_fix(vaddr); | ||
33 | return *(fixmap_page_table + idx); | ||
34 | } | ||
35 | |||
21 | void *kmap(struct page *page) | 36 | void *kmap(struct page *page) |
22 | { | 37 | { |
23 | might_sleep(); | 38 | might_sleep(); |
@@ -63,20 +78,20 @@ void *kmap_atomic(struct page *page) | |||
63 | type = kmap_atomic_idx_push(); | 78 | type = kmap_atomic_idx_push(); |
64 | 79 | ||
65 | idx = type + KM_TYPE_NR * smp_processor_id(); | 80 | idx = type + KM_TYPE_NR * smp_processor_id(); |
66 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 81 | vaddr = __fix_to_virt(idx); |
67 | #ifdef CONFIG_DEBUG_HIGHMEM | 82 | #ifdef CONFIG_DEBUG_HIGHMEM |
68 | /* | 83 | /* |
69 | * With debugging enabled, kunmap_atomic forces that entry to 0. | 84 | * With debugging enabled, kunmap_atomic forces that entry to 0. |
70 | * Make sure it was indeed properly unmapped. | 85 | * Make sure it was indeed properly unmapped. |
71 | */ | 86 | */ |
72 | BUG_ON(!pte_none(get_top_pte(vaddr))); | 87 | BUG_ON(!pte_none(*(fixmap_page_table + idx))); |
73 | #endif | 88 | #endif |
74 | /* | 89 | /* |
75 | * When debugging is off, kunmap_atomic leaves the previous mapping | 90 | * When debugging is off, kunmap_atomic leaves the previous mapping |
76 | * in place, so the contained TLB flush ensures the TLB is updated | 91 | * in place, so the contained TLB flush ensures the TLB is updated |
77 | * with the new mapping. | 92 | * with the new mapping. |
78 | */ | 93 | */ |
79 | set_top_pte(vaddr, mk_pte(page, kmap_prot)); | 94 | set_fixmap_pte(idx, mk_pte(page, kmap_prot)); |
80 | 95 | ||
81 | return (void *)vaddr; | 96 | return (void *)vaddr; |
82 | } | 97 | } |
@@ -94,8 +109,8 @@ void __kunmap_atomic(void *kvaddr) | |||
94 | if (cache_is_vivt()) | 109 | if (cache_is_vivt()) |
95 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | 110 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); |
96 | #ifdef CONFIG_DEBUG_HIGHMEM | 111 | #ifdef CONFIG_DEBUG_HIGHMEM |
97 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 112 | BUG_ON(vaddr != __fix_to_virt(idx)); |
98 | set_top_pte(vaddr, __pte(0)); | 113 | set_fixmap_pte(idx, __pte(0)); |
99 | #else | 114 | #else |
100 | (void) idx; /* to kill a warning */ | 115 | (void) idx; /* to kill a warning */ |
101 | #endif | 116 | #endif |
@@ -117,11 +132,11 @@ void *kmap_atomic_pfn(unsigned long pfn) | |||
117 | 132 | ||
118 | type = kmap_atomic_idx_push(); | 133 | type = kmap_atomic_idx_push(); |
119 | idx = type + KM_TYPE_NR * smp_processor_id(); | 134 | idx = type + KM_TYPE_NR * smp_processor_id(); |
120 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 135 | vaddr = __fix_to_virt(idx); |
121 | #ifdef CONFIG_DEBUG_HIGHMEM | 136 | #ifdef CONFIG_DEBUG_HIGHMEM |
122 | BUG_ON(!pte_none(get_top_pte(vaddr))); | 137 | BUG_ON(!pte_none(*(fixmap_page_table + idx))); |
123 | #endif | 138 | #endif |
124 | set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); | 139 | set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); |
125 | 140 | ||
126 | return (void *)vaddr; | 141 | return (void *)vaddr; |
127 | } | 142 | } |
@@ -133,5 +148,5 @@ struct page *kmap_atomic_to_page(const void *ptr) | |||
133 | if (vaddr < FIXADDR_START) | 148 | if (vaddr < FIXADDR_START) |
134 | return virt_to_page(ptr); | 149 | return virt_to_page(ptr); |
135 | 150 | ||
136 | return pte_page(get_top_pte(vaddr)); | 151 | return pte_page(get_fixmap_pte(vaddr)); |
137 | } | 152 | } |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 928d596d9ab4..659c75d808dc 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/dma-contiguous.h> | 23 | #include <linux/dma-contiguous.h> |
24 | #include <linux/sizes.h> | 24 | #include <linux/sizes.h> |
25 | 25 | ||
26 | #include <asm/cp15.h> | ||
26 | #include <asm/mach-types.h> | 27 | #include <asm/mach-types.h> |
27 | #include <asm/memblock.h> | 28 | #include <asm/memblock.h> |
28 | #include <asm/prom.h> | 29 | #include <asm/prom.h> |
@@ -36,6 +37,14 @@ | |||
36 | 37 | ||
37 | #include "mm.h" | 38 | #include "mm.h" |
38 | 39 | ||
40 | #ifdef CONFIG_CPU_CP15_MMU | ||
41 | unsigned long __init __clear_cr(unsigned long mask) | ||
42 | { | ||
43 | cr_alignment = cr_alignment & ~mask; | ||
44 | return cr_alignment; | ||
45 | } | ||
46 | #endif | ||
47 | |||
39 | static phys_addr_t phys_initrd_start __initdata = 0; | 48 | static phys_addr_t phys_initrd_start __initdata = 0; |
40 | static unsigned long phys_initrd_size __initdata = 0; | 49 | static unsigned long phys_initrd_size __initdata = 0; |
41 | 50 | ||
@@ -81,24 +90,21 @@ __tagtable(ATAG_INITRD2, parse_tag_initrd2); | |||
81 | * initialization functions, as well as show_mem() for the skipping | 90 | * initialization functions, as well as show_mem() for the skipping |
82 | * of holes in the memory map. It is populated by arm_add_memory(). | 91 | * of holes in the memory map. It is populated by arm_add_memory(). |
83 | */ | 92 | */ |
84 | struct meminfo meminfo; | ||
85 | |||
86 | void show_mem(unsigned int filter) | 93 | void show_mem(unsigned int filter) |
87 | { | 94 | { |
88 | int free = 0, total = 0, reserved = 0; | 95 | int free = 0, total = 0, reserved = 0; |
89 | int shared = 0, cached = 0, slab = 0, i; | 96 | int shared = 0, cached = 0, slab = 0; |
90 | struct meminfo * mi = &meminfo; | 97 | struct memblock_region *reg; |
91 | 98 | ||
92 | printk("Mem-info:\n"); | 99 | printk("Mem-info:\n"); |
93 | show_free_areas(filter); | 100 | show_free_areas(filter); |
94 | 101 | ||
95 | for_each_bank (i, mi) { | 102 | for_each_memblock (memory, reg) { |
96 | struct membank *bank = &mi->bank[i]; | ||
97 | unsigned int pfn1, pfn2; | 103 | unsigned int pfn1, pfn2; |
98 | struct page *page, *end; | 104 | struct page *page, *end; |
99 | 105 | ||
100 | pfn1 = bank_pfn_start(bank); | 106 | pfn1 = memblock_region_memory_base_pfn(reg); |
101 | pfn2 = bank_pfn_end(bank); | 107 | pfn2 = memblock_region_memory_end_pfn(reg); |
102 | 108 | ||
103 | page = pfn_to_page(pfn1); | 109 | page = pfn_to_page(pfn1); |
104 | end = pfn_to_page(pfn2 - 1) + 1; | 110 | end = pfn_to_page(pfn2 - 1) + 1; |
@@ -115,8 +121,9 @@ void show_mem(unsigned int filter) | |||
115 | free++; | 121 | free++; |
116 | else | 122 | else |
117 | shared += page_count(page) - 1; | 123 | shared += page_count(page) - 1; |
118 | page++; | 124 | pfn1++; |
119 | } while (page < end); | 125 | page = pfn_to_page(pfn1); |
126 | } while (pfn1 < pfn2); | ||
120 | } | 127 | } |
121 | 128 | ||
122 | printk("%d pages of RAM\n", total); | 129 | printk("%d pages of RAM\n", total); |
@@ -130,16 +137,9 @@ void show_mem(unsigned int filter) | |||
130 | static void __init find_limits(unsigned long *min, unsigned long *max_low, | 137 | static void __init find_limits(unsigned long *min, unsigned long *max_low, |
131 | unsigned long *max_high) | 138 | unsigned long *max_high) |
132 | { | 139 | { |
133 | struct meminfo *mi = &meminfo; | 140 | *max_low = PFN_DOWN(memblock_get_current_limit()); |
134 | int i; | 141 | *min = PFN_UP(memblock_start_of_DRAM()); |
135 | 142 | *max_high = PFN_DOWN(memblock_end_of_DRAM()); | |
136 | /* This assumes the meminfo array is properly sorted */ | ||
137 | *min = bank_pfn_start(&mi->bank[0]); | ||
138 | for_each_bank (i, mi) | ||
139 | if (mi->bank[i].highmem) | ||
140 | break; | ||
141 | *max_low = bank_pfn_end(&mi->bank[i - 1]); | ||
142 | *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]); | ||
143 | } | 143 | } |
144 | 144 | ||
145 | #ifdef CONFIG_ZONE_DMA | 145 | #ifdef CONFIG_ZONE_DMA |
@@ -274,14 +274,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) | |||
274 | return phys; | 274 | return phys; |
275 | } | 275 | } |
276 | 276 | ||
277 | void __init arm_memblock_init(struct meminfo *mi, | 277 | void __init arm_memblock_init(const struct machine_desc *mdesc) |
278 | const struct machine_desc *mdesc) | ||
279 | { | 278 | { |
280 | int i; | ||
281 | |||
282 | for (i = 0; i < mi->nr_banks; i++) | ||
283 | memblock_add(mi->bank[i].start, mi->bank[i].size); | ||
284 | |||
285 | /* Register the kernel text, kernel data and initrd with memblock. */ | 279 | /* Register the kernel text, kernel data and initrd with memblock. */ |
286 | #ifdef CONFIG_XIP_KERNEL | 280 | #ifdef CONFIG_XIP_KERNEL |
287 | memblock_reserve(__pa(_sdata), _end - _sdata); | 281 | memblock_reserve(__pa(_sdata), _end - _sdata); |
@@ -412,54 +406,53 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |||
412 | /* | 406 | /* |
413 | * The mem_map array can get very big. Free the unused area of the memory map. | 407 | * The mem_map array can get very big. Free the unused area of the memory map. |
414 | */ | 408 | */ |
415 | static void __init free_unused_memmap(struct meminfo *mi) | 409 | static void __init free_unused_memmap(void) |
416 | { | 410 | { |
417 | unsigned long bank_start, prev_bank_end = 0; | 411 | unsigned long start, prev_end = 0; |
418 | unsigned int i; | 412 | struct memblock_region *reg; |
419 | 413 | ||
420 | /* | 414 | /* |
421 | * This relies on each bank being in address order. | 415 | * This relies on each bank being in address order. |
422 | * The banks are sorted previously in bootmem_init(). | 416 | * The banks are sorted previously in bootmem_init(). |
423 | */ | 417 | */ |
424 | for_each_bank(i, mi) { | 418 | for_each_memblock(memory, reg) { |
425 | struct membank *bank = &mi->bank[i]; | 419 | start = memblock_region_memory_base_pfn(reg); |
426 | |||
427 | bank_start = bank_pfn_start(bank); | ||
428 | 420 | ||
429 | #ifdef CONFIG_SPARSEMEM | 421 | #ifdef CONFIG_SPARSEMEM |
430 | /* | 422 | /* |
431 | * Take care not to free memmap entries that don't exist | 423 | * Take care not to free memmap entries that don't exist |
432 | * due to SPARSEMEM sections which aren't present. | 424 | * due to SPARSEMEM sections which aren't present. |
433 | */ | 425 | */ |
434 | bank_start = min(bank_start, | 426 | start = min(start, |
435 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); | 427 | ALIGN(prev_end, PAGES_PER_SECTION)); |
436 | #else | 428 | #else |
437 | /* | 429 | /* |
438 | * Align down here since the VM subsystem insists that the | 430 | * Align down here since the VM subsystem insists that the |
439 | * memmap entries are valid from the bank start aligned to | 431 | * memmap entries are valid from the bank start aligned to |
440 | * MAX_ORDER_NR_PAGES. | 432 | * MAX_ORDER_NR_PAGES. |
441 | */ | 433 | */ |
442 | bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); | 434 | start = round_down(start, MAX_ORDER_NR_PAGES); |
443 | #endif | 435 | #endif |
444 | /* | 436 | /* |
445 | * If we had a previous bank, and there is a space | 437 | * If we had a previous bank, and there is a space |
446 | * between the current bank and the previous, free it. | 438 | * between the current bank and the previous, free it. |
447 | */ | 439 | */ |
448 | if (prev_bank_end && prev_bank_end < bank_start) | 440 | if (prev_end && prev_end < start) |
449 | free_memmap(prev_bank_end, bank_start); | 441 | free_memmap(prev_end, start); |
450 | 442 | ||
451 | /* | 443 | /* |
452 | * Align up here since the VM subsystem insists that the | 444 | * Align up here since the VM subsystem insists that the |
453 | * memmap entries are valid from the bank end aligned to | 445 | * memmap entries are valid from the bank end aligned to |
454 | * MAX_ORDER_NR_PAGES. | 446 | * MAX_ORDER_NR_PAGES. |
455 | */ | 447 | */ |
456 | prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); | 448 | prev_end = ALIGN(memblock_region_memory_end_pfn(reg), |
449 | MAX_ORDER_NR_PAGES); | ||
457 | } | 450 | } |
458 | 451 | ||
459 | #ifdef CONFIG_SPARSEMEM | 452 | #ifdef CONFIG_SPARSEMEM |
460 | if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) | 453 | if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) |
461 | free_memmap(prev_bank_end, | 454 | free_memmap(prev_end, |
462 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); | 455 | ALIGN(prev_end, PAGES_PER_SECTION)); |
463 | #endif | 456 | #endif |
464 | } | 457 | } |
465 | 458 | ||
@@ -535,7 +528,7 @@ void __init mem_init(void) | |||
535 | set_max_mapnr(pfn_to_page(max_pfn) - mem_map); | 528 | set_max_mapnr(pfn_to_page(max_pfn) - mem_map); |
536 | 529 | ||
537 | /* this will put all unused low memory onto the freelists */ | 530 | /* this will put all unused low memory onto the freelists */ |
538 | free_unused_memmap(&meminfo); | 531 | free_unused_memmap(); |
539 | free_all_bootmem(); | 532 | free_all_bootmem(); |
540 | 533 | ||
541 | #ifdef CONFIG_SA1111 | 534 | #ifdef CONFIG_SA1111 |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index f9c32ba73544..d1e5ad7ab3bc 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -438,6 +438,13 @@ void __arm_iounmap(volatile void __iomem *io_addr) | |||
438 | EXPORT_SYMBOL(__arm_iounmap); | 438 | EXPORT_SYMBOL(__arm_iounmap); |
439 | 439 | ||
440 | #ifdef CONFIG_PCI | 440 | #ifdef CONFIG_PCI |
441 | static int pci_ioremap_mem_type = MT_DEVICE; | ||
442 | |||
443 | void pci_ioremap_set_mem_type(int mem_type) | ||
444 | { | ||
445 | pci_ioremap_mem_type = mem_type; | ||
446 | } | ||
447 | |||
441 | int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) | 448 | int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) |
442 | { | 449 | { |
443 | BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); | 450 | BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); |
@@ -445,7 +452,7 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) | |||
445 | return ioremap_page_range(PCI_IO_VIRT_BASE + offset, | 452 | return ioremap_page_range(PCI_IO_VIRT_BASE + offset, |
446 | PCI_IO_VIRT_BASE + offset + SZ_64K, | 453 | PCI_IO_VIRT_BASE + offset + SZ_64K, |
447 | phys_addr, | 454 | phys_addr, |
448 | __pgprot(get_mem_type(MT_DEVICE)->prot_pte)); | 455 | __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte)); |
449 | } | 456 | } |
450 | EXPORT_SYMBOL_GPL(pci_ioremap_io); | 457 | EXPORT_SYMBOL_GPL(pci_ioremap_io); |
451 | #endif | 458 | #endif |
diff --git a/arch/arm/mm/l2c-common.c b/arch/arm/mm/l2c-common.c new file mode 100644 index 000000000000..10a3cf28c362 --- /dev/null +++ b/arch/arm/mm/l2c-common.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 ARM Ltd. | ||
3 | * Written by Catalin Marinas <catalin.marinas@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | #include <linux/bug.h> | ||
10 | #include <linux/smp.h> | ||
11 | #include <asm/outercache.h> | ||
12 | |||
13 | void outer_disable(void) | ||
14 | { | ||
15 | WARN_ON(!irqs_disabled()); | ||
16 | WARN_ON(num_online_cpus() > 1); | ||
17 | |||
18 | if (outer_cache.disable) | ||
19 | outer_cache.disable(); | ||
20 | } | ||
diff --git a/arch/arm/mm/l2c-l2x0-resume.S b/arch/arm/mm/l2c-l2x0-resume.S new file mode 100644 index 000000000000..99b05f21a59a --- /dev/null +++ b/arch/arm/mm/l2c-l2x0-resume.S | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * L2C-310 early resume code. This can be used by platforms to restore | ||
3 | * the settings of their L2 cache controller before restoring the | ||
4 | * processor state. | ||
5 | * | ||
6 | * This code can only be used to if you are running in the secure world. | ||
7 | */ | ||
8 | #include <linux/linkage.h> | ||
9 | #include <asm/hardware/cache-l2x0.h> | ||
10 | |||
11 | .text | ||
12 | |||
13 | ENTRY(l2c310_early_resume) | ||
14 | adr r0, 1f | ||
15 | ldr r2, [r0] | ||
16 | add r0, r2, r0 | ||
17 | |||
18 | ldmia r0, {r1, r2, r3, r4, r5, r6, r7, r8} | ||
19 | @ r1 = phys address of L2C-310 controller | ||
20 | @ r2 = aux_ctrl | ||
21 | @ r3 = tag_latency | ||
22 | @ r4 = data_latency | ||
23 | @ r5 = filter_start | ||
24 | @ r6 = filter_end | ||
25 | @ r7 = prefetch_ctrl | ||
26 | @ r8 = pwr_ctrl | ||
27 | |||
28 | @ Check that the address has been initialised | ||
29 | teq r1, #0 | ||
30 | moveq pc, lr | ||
31 | |||
32 | @ The prefetch and power control registers are revision dependent | ||
33 | @ and can be written whether or not the L2 cache is enabled | ||
34 | ldr r0, [r1, #L2X0_CACHE_ID] | ||
35 | and r0, r0, #L2X0_CACHE_ID_RTL_MASK | ||
36 | cmp r0, #L310_CACHE_ID_RTL_R2P0 | ||
37 | strcs r7, [r1, #L310_PREFETCH_CTRL] | ||
38 | cmp r0, #L310_CACHE_ID_RTL_R3P0 | ||
39 | strcs r8, [r1, #L310_POWER_CTRL] | ||
40 | |||
41 | @ Don't setup the L2 cache if it is already enabled | ||
42 | ldr r0, [r1, #L2X0_CTRL] | ||
43 | tst r0, #L2X0_CTRL_EN | ||
44 | movne pc, lr | ||
45 | |||
46 | str r3, [r1, #L310_TAG_LATENCY_CTRL] | ||
47 | str r4, [r1, #L310_DATA_LATENCY_CTRL] | ||
48 | str r6, [r1, #L310_ADDR_FILTER_END] | ||
49 | str r5, [r1, #L310_ADDR_FILTER_START] | ||
50 | |||
51 | str r2, [r1, #L2X0_AUX_CTRL] | ||
52 | mov r9, #L2X0_CTRL_EN | ||
53 | str r9, [r1, #L2X0_CTRL] | ||
54 | mov pc, lr | ||
55 | ENDPROC(l2c310_early_resume) | ||
56 | |||
57 | .align | ||
58 | 1: .long l2x0_saved_regs - . | ||
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 7ea641b7aa7d..ce727d47275c 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -2,6 +2,8 @@ | |||
2 | #include <linux/list.h> | 2 | #include <linux/list.h> |
3 | #include <linux/vmalloc.h> | 3 | #include <linux/vmalloc.h> |
4 | 4 | ||
5 | #include <asm/pgtable.h> | ||
6 | |||
5 | /* the upper-most page table pointer */ | 7 | /* the upper-most page table pointer */ |
6 | extern pmd_t *top_pmd; | 8 | extern pmd_t *top_pmd; |
7 | 9 | ||
@@ -93,3 +95,5 @@ extern phys_addr_t arm_lowmem_limit; | |||
93 | void __init bootmem_init(void); | 95 | void __init bootmem_init(void); |
94 | void arm_mm_memblock_reserve(void); | 96 | void arm_mm_memblock_reserve(void); |
95 | void dma_contiguous_remap(void); | 97 | void dma_contiguous_remap(void); |
98 | |||
99 | unsigned long __clear_cr(unsigned long mask); | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b68c6b22e1c8..ab14b79b03f0 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/mach/arch.h> | 35 | #include <asm/mach/arch.h> |
36 | #include <asm/mach/map.h> | 36 | #include <asm/mach/map.h> |
37 | #include <asm/mach/pci.h> | 37 | #include <asm/mach/pci.h> |
38 | #include <asm/fixmap.h> | ||
38 | 39 | ||
39 | #include "mm.h" | 40 | #include "mm.h" |
40 | #include "tcm.h" | 41 | #include "tcm.h" |
@@ -117,28 +118,54 @@ static struct cachepolicy cache_policies[] __initdata = { | |||
117 | }; | 118 | }; |
118 | 119 | ||
119 | #ifdef CONFIG_CPU_CP15 | 120 | #ifdef CONFIG_CPU_CP15 |
121 | static unsigned long initial_pmd_value __initdata = 0; | ||
122 | |||
120 | /* | 123 | /* |
121 | * These are useful for identifying cache coherency | 124 | * Initialise the cache_policy variable with the initial state specified |
122 | * problems by allowing the cache or the cache and | 125 | * via the "pmd" value. This is used to ensure that on ARMv6 and later, |
123 | * writebuffer to be turned off. (Note: the write | 126 | * the C code sets the page tables up with the same policy as the head |
124 | * buffer should not be on and the cache off). | 127 | * assembly code, which avoids an illegal state where the TLBs can get |
128 | * confused. See comments in early_cachepolicy() for more information. | ||
125 | */ | 129 | */ |
126 | static int __init early_cachepolicy(char *p) | 130 | void __init init_default_cache_policy(unsigned long pmd) |
127 | { | 131 | { |
128 | int i; | 132 | int i; |
129 | 133 | ||
134 | initial_pmd_value = pmd; | ||
135 | |||
136 | pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE; | ||
137 | |||
138 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) | ||
139 | if (cache_policies[i].pmd == pmd) { | ||
140 | cachepolicy = i; | ||
141 | break; | ||
142 | } | ||
143 | |||
144 | if (i == ARRAY_SIZE(cache_policies)) | ||
145 | pr_err("ERROR: could not find cache policy\n"); | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * These are useful for identifying cache coherency problems by allowing | ||
150 | * the cache or the cache and writebuffer to be turned off. (Note: the | ||
151 | * write buffer should not be on and the cache off). | ||
152 | */ | ||
153 | static int __init early_cachepolicy(char *p) | ||
154 | { | ||
155 | int i, selected = -1; | ||
156 | |||
130 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | 157 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { |
131 | int len = strlen(cache_policies[i].policy); | 158 | int len = strlen(cache_policies[i].policy); |
132 | 159 | ||
133 | if (memcmp(p, cache_policies[i].policy, len) == 0) { | 160 | if (memcmp(p, cache_policies[i].policy, len) == 0) { |
134 | cachepolicy = i; | 161 | selected = i; |
135 | cr_alignment &= ~cache_policies[i].cr_mask; | ||
136 | cr_no_alignment &= ~cache_policies[i].cr_mask; | ||
137 | break; | 162 | break; |
138 | } | 163 | } |
139 | } | 164 | } |
140 | if (i == ARRAY_SIZE(cache_policies)) | 165 | |
141 | printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); | 166 | if (selected == -1) |
167 | pr_err("ERROR: unknown or unsupported cache policy\n"); | ||
168 | |||
142 | /* | 169 | /* |
143 | * This restriction is partly to do with the way we boot; it is | 170 | * This restriction is partly to do with the way we boot; it is |
144 | * unpredictable to have memory mapped using two different sets of | 171 | * unpredictable to have memory mapped using two different sets of |
@@ -146,12 +173,18 @@ static int __init early_cachepolicy(char *p) | |||
146 | * change these attributes once the initial assembly has setup the | 173 | * change these attributes once the initial assembly has setup the |
147 | * page tables. | 174 | * page tables. |
148 | */ | 175 | */ |
149 | if (cpu_architecture() >= CPU_ARCH_ARMv6) { | 176 | if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) { |
150 | printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); | 177 | pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n", |
151 | cachepolicy = CPOLICY_WRITEBACK; | 178 | cache_policies[cachepolicy].policy); |
179 | return 0; | ||
180 | } | ||
181 | |||
182 | if (selected != cachepolicy) { | ||
183 | unsigned long cr = __clear_cr(cache_policies[selected].cr_mask); | ||
184 | cachepolicy = selected; | ||
185 | flush_cache_all(); | ||
186 | set_cr(cr); | ||
152 | } | 187 | } |
153 | flush_cache_all(); | ||
154 | set_cr(cr_alignment); | ||
155 | return 0; | 188 | return 0; |
156 | } | 189 | } |
157 | early_param("cachepolicy", early_cachepolicy); | 190 | early_param("cachepolicy", early_cachepolicy); |
@@ -186,35 +219,6 @@ static int __init early_ecc(char *p) | |||
186 | early_param("ecc", early_ecc); | 219 | early_param("ecc", early_ecc); |
187 | #endif | 220 | #endif |
188 | 221 | ||
189 | static int __init noalign_setup(char *__unused) | ||
190 | { | ||
191 | cr_alignment &= ~CR_A; | ||
192 | cr_no_alignment &= ~CR_A; | ||
193 | set_cr(cr_alignment); | ||
194 | return 1; | ||
195 | } | ||
196 | __setup("noalign", noalign_setup); | ||
197 | |||
198 | #ifndef CONFIG_SMP | ||
199 | void adjust_cr(unsigned long mask, unsigned long set) | ||
200 | { | ||
201 | unsigned long flags; | ||
202 | |||
203 | mask &= ~CR_A; | ||
204 | |||
205 | set &= mask; | ||
206 | |||
207 | local_irq_save(flags); | ||
208 | |||
209 | cr_no_alignment = (cr_no_alignment & ~mask) | set; | ||
210 | cr_alignment = (cr_alignment & ~mask) | set; | ||
211 | |||
212 | set_cr((get_cr() & ~mask) | set); | ||
213 | |||
214 | local_irq_restore(flags); | ||
215 | } | ||
216 | #endif | ||
217 | |||
218 | #else /* ifdef CONFIG_CPU_CP15 */ | 222 | #else /* ifdef CONFIG_CPU_CP15 */ |
219 | 223 | ||
220 | static int __init early_cachepolicy(char *p) | 224 | static int __init early_cachepolicy(char *p) |
@@ -414,8 +418,17 @@ static void __init build_mem_type_table(void) | |||
414 | cachepolicy = CPOLICY_WRITEBACK; | 418 | cachepolicy = CPOLICY_WRITEBACK; |
415 | ecc_mask = 0; | 419 | ecc_mask = 0; |
416 | } | 420 | } |
417 | if (is_smp()) | 421 | |
418 | cachepolicy = CPOLICY_WRITEALLOC; | 422 | if (is_smp()) { |
423 | if (cachepolicy != CPOLICY_WRITEALLOC) { | ||
424 | pr_warn("Forcing write-allocate cache policy for SMP\n"); | ||
425 | cachepolicy = CPOLICY_WRITEALLOC; | ||
426 | } | ||
427 | if (!(initial_pmd_value & PMD_SECT_S)) { | ||
428 | pr_warn("Forcing shared mappings for SMP\n"); | ||
429 | initial_pmd_value |= PMD_SECT_S; | ||
430 | } | ||
431 | } | ||
419 | 432 | ||
420 | /* | 433 | /* |
421 | * Strip out features not present on earlier architectures. | 434 | * Strip out features not present on earlier architectures. |
@@ -539,11 +552,12 @@ static void __init build_mem_type_table(void) | |||
539 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 552 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
540 | #endif | 553 | #endif |
541 | 554 | ||
542 | if (is_smp()) { | 555 | /* |
543 | /* | 556 | * If the initial page tables were created with the S bit |
544 | * Mark memory with the "shared" attribute | 557 | * set, then we need to do the same here for the same |
545 | * for SMP systems | 558 | * reasons given in early_cachepolicy(). |
546 | */ | 559 | */ |
560 | if (initial_pmd_value & PMD_SECT_S) { | ||
547 | user_pgprot |= L_PTE_SHARED; | 561 | user_pgprot |= L_PTE_SHARED; |
548 | kern_pgprot |= L_PTE_SHARED; | 562 | kern_pgprot |= L_PTE_SHARED; |
549 | vecs_pgprot |= L_PTE_SHARED; | 563 | vecs_pgprot |= L_PTE_SHARED; |
@@ -1061,74 +1075,47 @@ phys_addr_t arm_lowmem_limit __initdata = 0; | |||
1061 | void __init sanity_check_meminfo(void) | 1075 | void __init sanity_check_meminfo(void) |
1062 | { | 1076 | { |
1063 | phys_addr_t memblock_limit = 0; | 1077 | phys_addr_t memblock_limit = 0; |
1064 | int i, j, highmem = 0; | 1078 | int highmem = 0; |
1065 | phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; | 1079 | phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; |
1080 | struct memblock_region *reg; | ||
1066 | 1081 | ||
1067 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { | 1082 | for_each_memblock(memory, reg) { |
1068 | struct membank *bank = &meminfo.bank[j]; | 1083 | phys_addr_t block_start = reg->base; |
1069 | phys_addr_t size_limit; | 1084 | phys_addr_t block_end = reg->base + reg->size; |
1070 | 1085 | phys_addr_t size_limit = reg->size; | |
1071 | *bank = meminfo.bank[i]; | ||
1072 | size_limit = bank->size; | ||
1073 | 1086 | ||
1074 | if (bank->start >= vmalloc_limit) | 1087 | if (reg->base >= vmalloc_limit) |
1075 | highmem = 1; | 1088 | highmem = 1; |
1076 | else | 1089 | else |
1077 | size_limit = vmalloc_limit - bank->start; | 1090 | size_limit = vmalloc_limit - reg->base; |
1078 | 1091 | ||
1079 | bank->highmem = highmem; | ||
1080 | 1092 | ||
1081 | #ifdef CONFIG_HIGHMEM | 1093 | if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { |
1082 | /* | 1094 | |
1083 | * Split those memory banks which are partially overlapping | 1095 | if (highmem) { |
1084 | * the vmalloc area greatly simplifying things later. | 1096 | pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", |
1085 | */ | 1097 | &block_start, &block_end); |
1086 | if (!highmem && bank->size > size_limit) { | 1098 | memblock_remove(reg->base, reg->size); |
1087 | if (meminfo.nr_banks >= NR_BANKS) { | 1099 | continue; |
1088 | printk(KERN_CRIT "NR_BANKS too low, " | ||
1089 | "ignoring high memory\n"); | ||
1090 | } else { | ||
1091 | memmove(bank + 1, bank, | ||
1092 | (meminfo.nr_banks - i) * sizeof(*bank)); | ||
1093 | meminfo.nr_banks++; | ||
1094 | i++; | ||
1095 | bank[1].size -= size_limit; | ||
1096 | bank[1].start = vmalloc_limit; | ||
1097 | bank[1].highmem = highmem = 1; | ||
1098 | j++; | ||
1099 | } | 1100 | } |
1100 | bank->size = size_limit; | ||
1101 | } | ||
1102 | #else | ||
1103 | /* | ||
1104 | * Highmem banks not allowed with !CONFIG_HIGHMEM. | ||
1105 | */ | ||
1106 | if (highmem) { | ||
1107 | printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " | ||
1108 | "(!CONFIG_HIGHMEM).\n", | ||
1109 | (unsigned long long)bank->start, | ||
1110 | (unsigned long long)bank->start + bank->size - 1); | ||
1111 | continue; | ||
1112 | } | ||
1113 | 1101 | ||
1114 | /* | 1102 | if (reg->size > size_limit) { |
1115 | * Check whether this memory bank would partially overlap | 1103 | phys_addr_t overlap_size = reg->size - size_limit; |
1116 | * the vmalloc area. | 1104 | |
1117 | */ | 1105 | pr_notice("Truncating RAM at %pa-%pa to -%pa", |
1118 | if (bank->size > size_limit) { | 1106 | &block_start, &block_end, &vmalloc_limit); |
1119 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " | 1107 | memblock_remove(vmalloc_limit, overlap_size); |
1120 | "to -%.8llx (vmalloc region overlap).\n", | 1108 | block_end = vmalloc_limit; |
1121 | (unsigned long long)bank->start, | 1109 | } |
1122 | (unsigned long long)bank->start + bank->size - 1, | ||
1123 | (unsigned long long)bank->start + size_limit - 1); | ||
1124 | bank->size = size_limit; | ||
1125 | } | 1110 | } |
1126 | #endif | ||
1127 | if (!bank->highmem) { | ||
1128 | phys_addr_t bank_end = bank->start + bank->size; | ||
1129 | 1111 | ||
1130 | if (bank_end > arm_lowmem_limit) | 1112 | if (!highmem) { |
1131 | arm_lowmem_limit = bank_end; | 1113 | if (block_end > arm_lowmem_limit) { |
1114 | if (reg->size > size_limit) | ||
1115 | arm_lowmem_limit = vmalloc_limit; | ||
1116 | else | ||
1117 | arm_lowmem_limit = block_end; | ||
1118 | } | ||
1132 | 1119 | ||
1133 | /* | 1120 | /* |
1134 | * Find the first non-section-aligned page, and point | 1121 | * Find the first non-section-aligned page, and point |
@@ -1144,35 +1131,15 @@ void __init sanity_check_meminfo(void) | |||
1144 | * occurs before any free memory is mapped. | 1131 | * occurs before any free memory is mapped. |
1145 | */ | 1132 | */ |
1146 | if (!memblock_limit) { | 1133 | if (!memblock_limit) { |
1147 | if (!IS_ALIGNED(bank->start, SECTION_SIZE)) | 1134 | if (!IS_ALIGNED(block_start, SECTION_SIZE)) |
1148 | memblock_limit = bank->start; | 1135 | memblock_limit = block_start; |
1149 | else if (!IS_ALIGNED(bank_end, SECTION_SIZE)) | 1136 | else if (!IS_ALIGNED(block_end, SECTION_SIZE)) |
1150 | memblock_limit = bank_end; | 1137 | memblock_limit = arm_lowmem_limit; |
1151 | } | 1138 | } |
1152 | } | ||
1153 | j++; | ||
1154 | } | ||
1155 | #ifdef CONFIG_HIGHMEM | ||
1156 | if (highmem) { | ||
1157 | const char *reason = NULL; | ||
1158 | 1139 | ||
1159 | if (cache_is_vipt_aliasing()) { | ||
1160 | /* | ||
1161 | * Interactions between kmap and other mappings | ||
1162 | * make highmem support with aliasing VIPT caches | ||
1163 | * rather difficult. | ||
1164 | */ | ||
1165 | reason = "with VIPT aliasing cache"; | ||
1166 | } | ||
1167 | if (reason) { | ||
1168 | printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", | ||
1169 | reason); | ||
1170 | while (j > 0 && meminfo.bank[j - 1].highmem) | ||
1171 | j--; | ||
1172 | } | 1140 | } |
1173 | } | 1141 | } |
1174 | #endif | 1142 | |
1175 | meminfo.nr_banks = j; | ||
1176 | high_memory = __va(arm_lowmem_limit - 1) + 1; | 1143 | high_memory = __va(arm_lowmem_limit - 1) + 1; |
1177 | 1144 | ||
1178 | /* | 1145 | /* |
@@ -1359,6 +1326,9 @@ static void __init kmap_init(void) | |||
1359 | #ifdef CONFIG_HIGHMEM | 1326 | #ifdef CONFIG_HIGHMEM |
1360 | pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), | 1327 | pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), |
1361 | PKMAP_BASE, _PAGE_KERNEL_TABLE); | 1328 | PKMAP_BASE, _PAGE_KERNEL_TABLE); |
1329 | |||
1330 | fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START), | ||
1331 | FIXADDR_START, _PAGE_KERNEL_TABLE); | ||
1362 | #endif | 1332 | #endif |
1363 | } | 1333 | } |
1364 | 1334 | ||
@@ -1461,7 +1431,7 @@ void __init early_paging_init(const struct machine_desc *mdesc, | |||
1461 | * just complicate the code. | 1431 | * just complicate the code. |
1462 | */ | 1432 | */ |
1463 | flush_cache_louis(); | 1433 | flush_cache_louis(); |
1464 | dsb(); | 1434 | dsb(ishst); |
1465 | isb(); | 1435 | isb(); |
1466 | 1436 | ||
1467 | /* remap level 1 table */ | 1437 | /* remap level 1 table */ |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 55764a7ef1f0..da1874f9f8cf 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -88,30 +88,35 @@ static unsigned long irbar_read(void) | |||
88 | void __init sanity_check_meminfo_mpu(void) | 88 | void __init sanity_check_meminfo_mpu(void) |
89 | { | 89 | { |
90 | int i; | 90 | int i; |
91 | struct membank *bank = meminfo.bank; | ||
92 | phys_addr_t phys_offset = PHYS_OFFSET; | 91 | phys_addr_t phys_offset = PHYS_OFFSET; |
93 | phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; | 92 | phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; |
94 | 93 | struct memblock_region *reg; | |
95 | /* Initially only use memory continuous from PHYS_OFFSET */ | 94 | bool first = true; |
96 | if (bank_phys_start(&bank[0]) != phys_offset) | 95 | phys_addr_t mem_start; |
97 | panic("First memory bank must be contiguous from PHYS_OFFSET"); | 96 | phys_addr_t mem_end; |
98 | 97 | ||
99 | /* Banks have already been sorted by start address */ | 98 | for_each_memblock(memory, reg) { |
100 | for (i = 1; i < meminfo.nr_banks; i++) { | 99 | if (first) { |
101 | if (bank[i].start <= bank_phys_end(&bank[0]) && | 100 | /* |
102 | bank_phys_end(&bank[i]) > bank_phys_end(&bank[0])) { | 101 | * Initially only use memory continuous from |
103 | bank[0].size = bank_phys_end(&bank[i]) - bank[0].start; | 102 | * PHYS_OFFSET */ |
103 | if (reg->base != phys_offset) | ||
104 | panic("First memory bank must be contiguous from PHYS_OFFSET"); | ||
105 | |||
106 | mem_start = reg->base; | ||
107 | mem_end = reg->base + reg->size; | ||
108 | specified_mem_size = reg->size; | ||
109 | first = false; | ||
104 | } else { | 110 | } else { |
105 | pr_notice("Ignoring RAM after 0x%.8lx. " | 111 | /* |
106 | "First non-contiguous (ignored) bank start: 0x%.8lx\n", | 112 | * memblock auto merges contiguous blocks, remove |
107 | (unsigned long)bank_phys_end(&bank[0]), | 113 | * all blocks afterwards |
108 | (unsigned long)bank_phys_start(&bank[i])); | 114 | */ |
109 | break; | 115 | pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", |
116 | &mem_start, ®->base); | ||
117 | memblock_remove(reg->base, reg->size); | ||
110 | } | 118 | } |
111 | } | 119 | } |
112 | /* All contiguous banks are now merged in to the first bank */ | ||
113 | meminfo.nr_banks = 1; | ||
114 | specified_mem_size = bank[0].size; | ||
115 | 120 | ||
116 | /* | 121 | /* |
117 | * MPU has curious alignment requirements: Size must be power of 2, and | 122 | * MPU has curious alignment requirements: Size must be power of 2, and |
@@ -128,23 +133,24 @@ void __init sanity_check_meminfo_mpu(void) | |||
128 | */ | 133 | */ |
129 | aligned_region_size = (phys_offset - 1) ^ (phys_offset); | 134 | aligned_region_size = (phys_offset - 1) ^ (phys_offset); |
130 | /* Find the max power-of-two sized region that fits inside our bank */ | 135 | /* Find the max power-of-two sized region that fits inside our bank */ |
131 | rounded_mem_size = (1 << __fls(bank[0].size)) - 1; | 136 | rounded_mem_size = (1 << __fls(specified_mem_size)) - 1; |
132 | 137 | ||
133 | /* The actual region size is the smaller of the two */ | 138 | /* The actual region size is the smaller of the two */ |
134 | aligned_region_size = aligned_region_size < rounded_mem_size | 139 | aligned_region_size = aligned_region_size < rounded_mem_size |
135 | ? aligned_region_size + 1 | 140 | ? aligned_region_size + 1 |
136 | : rounded_mem_size + 1; | 141 | : rounded_mem_size + 1; |
137 | 142 | ||
138 | if (aligned_region_size != specified_mem_size) | 143 | if (aligned_region_size != specified_mem_size) { |
139 | pr_warn("Truncating memory from 0x%.8lx to 0x%.8lx (MPU region constraints)", | 144 | pr_warn("Truncating memory from %pa to %pa (MPU region constraints)", |
140 | (unsigned long)specified_mem_size, | 145 | &specified_mem_size, &aligned_region_size); |
141 | (unsigned long)aligned_region_size); | 146 | memblock_remove(mem_start + aligned_region_size, |
147 | specified_mem_size - aligned_round_size); | ||
148 | |||
149 | mem_end = mem_start + aligned_region_size; | ||
150 | } | ||
142 | 151 | ||
143 | meminfo.bank[0].size = aligned_region_size; | 152 | pr_debug("MPU Region from %pa size %pa (end %pa))\n", |
144 | pr_debug("MPU Region from 0x%.8lx size 0x%.8lx (end 0x%.8lx))\n", | 153 | &phys_offset, &aligned_region_size, &mem_end); |
145 | (unsigned long)phys_offset, | ||
146 | (unsigned long)aligned_region_size, | ||
147 | (unsigned long)bank_phys_end(&bank[0])); | ||
148 | 154 | ||
149 | } | 155 | } |
150 | 156 | ||
@@ -292,7 +298,7 @@ void __init sanity_check_meminfo(void) | |||
292 | { | 298 | { |
293 | phys_addr_t end; | 299 | phys_addr_t end; |
294 | sanity_check_meminfo_mpu(); | 300 | sanity_check_meminfo_mpu(); |
295 | end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); | 301 | end = memblock_end_of_DRAM(); |
296 | high_memory = __va(end - 1) + 1; | 302 | high_memory = __va(end - 1) + 1; |
297 | } | 303 | } |
298 | 304 | ||
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 01a719e18bb0..22e3ad63500c 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -64,6 +64,14 @@ ENTRY(cpu_v7_switch_mm) | |||
64 | mov pc, lr | 64 | mov pc, lr |
65 | ENDPROC(cpu_v7_switch_mm) | 65 | ENDPROC(cpu_v7_switch_mm) |
66 | 66 | ||
67 | #ifdef __ARMEB__ | ||
68 | #define rl r3 | ||
69 | #define rh r2 | ||
70 | #else | ||
71 | #define rl r2 | ||
72 | #define rh r3 | ||
73 | #endif | ||
74 | |||
67 | /* | 75 | /* |
68 | * cpu_v7_set_pte_ext(ptep, pte) | 76 | * cpu_v7_set_pte_ext(ptep, pte) |
69 | * | 77 | * |
@@ -73,13 +81,13 @@ ENDPROC(cpu_v7_switch_mm) | |||
73 | */ | 81 | */ |
74 | ENTRY(cpu_v7_set_pte_ext) | 82 | ENTRY(cpu_v7_set_pte_ext) |
75 | #ifdef CONFIG_MMU | 83 | #ifdef CONFIG_MMU |
76 | tst r2, #L_PTE_VALID | 84 | tst rl, #L_PTE_VALID |
77 | beq 1f | 85 | beq 1f |
78 | tst r3, #1 << (57 - 32) @ L_PTE_NONE | 86 | tst rh, #1 << (57 - 32) @ L_PTE_NONE |
79 | bicne r2, #L_PTE_VALID | 87 | bicne rl, #L_PTE_VALID |
80 | bne 1f | 88 | bne 1f |
81 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY | 89 | tst rh, #1 << (55 - 32) @ L_PTE_DIRTY |
82 | orreq r2, #L_PTE_RDONLY | 90 | orreq rl, #L_PTE_RDONLY |
83 | 1: strd r2, r3, [r0] | 91 | 1: strd r2, r3, [r0] |
84 | ALT_SMP(W(nop)) | 92 | ALT_SMP(W(nop)) |
85 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte | 93 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 195731d3813b..3db2c2f04a30 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -169,9 +169,31 @@ ENDPROC(cpu_pj4b_do_idle) | |||
169 | globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle | 169 | globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle |
170 | #endif | 170 | #endif |
171 | globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area | 171 | globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area |
172 | globl_equ cpu_pj4b_do_suspend, cpu_v7_do_suspend | 172 | #ifdef CONFIG_ARM_CPU_SUSPEND |
173 | globl_equ cpu_pj4b_do_resume, cpu_v7_do_resume | 173 | ENTRY(cpu_pj4b_do_suspend) |
174 | globl_equ cpu_pj4b_suspend_size, cpu_v7_suspend_size | 174 | stmfd sp!, {r6 - r10} |
175 | mrc p15, 1, r6, c15, c1, 0 @ save CP15 - extra features | ||
176 | mrc p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0 | ||
177 | mrc p15, 1, r8, c15, c1, 2 @ save CP15 - Aux Debug Modes Ctrl 2 | ||
178 | mrc p15, 1, r9, c15, c1, 1 @ save CP15 - Aux Debug Modes Ctrl 1 | ||
179 | mrc p15, 0, r10, c9, c14, 0 @ save CP15 - PMC | ||
180 | stmia r0!, {r6 - r10} | ||
181 | ldmfd sp!, {r6 - r10} | ||
182 | b cpu_v7_do_suspend | ||
183 | ENDPROC(cpu_pj4b_do_suspend) | ||
184 | |||
185 | ENTRY(cpu_pj4b_do_resume) | ||
186 | ldmia r0!, {r6 - r10} | ||
187 | mcr p15, 1, r6, c15, c1, 0 @ save CP15 - extra features | ||
188 | mcr p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0 | ||
189 | mcr p15, 1, r8, c15, c1, 2 @ save CP15 - Aux Debug Modes Ctrl 2 | ||
190 | mcr p15, 1, r9, c15, c1, 1 @ save CP15 - Aux Debug Modes Ctrl 1 | ||
191 | mcr p15, 0, r10, c9, c14, 0 @ save CP15 - PMC | ||
192 | b cpu_v7_do_resume | ||
193 | ENDPROC(cpu_pj4b_do_resume) | ||
194 | #endif | ||
195 | .globl cpu_pj4b_suspend_size | ||
196 | .equ cpu_pj4b_suspend_size, 4 * 14 | ||
175 | 197 | ||
176 | #endif | 198 | #endif |
177 | 199 | ||
@@ -194,6 +216,7 @@ __v7_cr7mp_setup: | |||
194 | __v7_ca7mp_setup: | 216 | __v7_ca7mp_setup: |
195 | __v7_ca12mp_setup: | 217 | __v7_ca12mp_setup: |
196 | __v7_ca15mp_setup: | 218 | __v7_ca15mp_setup: |
219 | __v7_ca17mp_setup: | ||
197 | mov r10, #0 | 220 | mov r10, #0 |
198 | 1: | 221 | 1: |
199 | #ifdef CONFIG_SMP | 222 | #ifdef CONFIG_SMP |
@@ -505,6 +528,16 @@ __v7_ca15mp_proc_info: | |||
505 | .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info | 528 | .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info |
506 | 529 | ||
507 | /* | 530 | /* |
531 | * ARM Ltd. Cortex A17 processor. | ||
532 | */ | ||
533 | .type __v7_ca17mp_proc_info, #object | ||
534 | __v7_ca17mp_proc_info: | ||
535 | .long 0x410fc0e0 | ||
536 | .long 0xff0ffff0 | ||
537 | __v7_proc __v7_ca17mp_setup | ||
538 | .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info | ||
539 | |||
540 | /* | ||
508 | * Qualcomm Inc. Krait processors. | 541 | * Qualcomm Inc. Krait processors. |
509 | */ | 542 | */ |
510 | .type __krait_proc_info, #object | 543 | .type __krait_proc_info, #object |
diff --git a/arch/arm/plat-samsung/s5p-sleep.S b/arch/arm/plat-samsung/s5p-sleep.S index c5001659bdf8..25c68ceb9e2b 100644 --- a/arch/arm/plat-samsung/s5p-sleep.S +++ b/arch/arm/plat-samsung/s5p-sleep.S | |||
@@ -22,7 +22,6 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/linkage.h> | 24 | #include <linux/linkage.h> |
25 | #include <asm/asm-offsets.h> | ||
26 | 25 | ||
27 | .data | 26 | .data |
28 | .align | 27 | .align |
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index f0759e70fb86..fe6ca574d093 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S | |||
@@ -22,11 +22,10 @@ | |||
22 | @ r9 = normal "successful" return address | 22 | @ r9 = normal "successful" return address |
23 | @ r10 = this threads thread_info structure | 23 | @ r10 = this threads thread_info structure |
24 | @ lr = unrecognised instruction return address | 24 | @ lr = unrecognised instruction return address |
25 | @ IRQs disabled. | 25 | @ IRQs enabled. |
26 | @ | 26 | @ |
27 | ENTRY(do_vfp) | 27 | ENTRY(do_vfp) |
28 | inc_preempt_count r10, r4 | 28 | inc_preempt_count r10, r4 |
29 | enable_irq | ||
30 | ldr r4, .LCvfp | 29 | ldr r4, .LCvfp |
31 | ldr r11, [r10, #TI_CPU] @ CPU number | 30 | ldr r11, [r10, #TI_CPU] @ CPU number |
32 | add r10, r10, #TI_VFPSTATE @ r10 = workspace | 31 | add r10, r10, #TI_VFPSTATE @ r10 = workspace |