aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm/memory.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.txt1
-rw-r--r--arch/arm/Kconfig66
-rw-r--r--arch/arm/boot/compressed/atags_to_fdt.c2
-rw-r--r--arch/arm/boot/dts/marco.dtsi2
-rw-r--r--arch/arm/boot/dts/prima2.dtsi2
-rw-r--r--arch/arm/common/bL_switcher.c10
-rw-r--r--arch/arm/common/mcpm_entry.c6
-rw-r--r--arch/arm/common/mcpm_platsmp.c2
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/assembler.h2
-rw-r--r--arch/arm/include/asm/cacheflush.h4
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/fixmap.h21
-rw-r--r--arch/arm/include/asm/glue-df.h8
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h104
-rw-r--r--arch/arm/include/asm/highmem.h1
-rw-r--r--arch/arm/include/asm/io.h6
-rw-r--r--arch/arm/include/asm/mach/arch.h7
-rw-r--r--arch/arm/include/asm/mcpm.h8
-rw-r--r--arch/arm/include/asm/memblock.h3
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/outercache.h66
-rw-r--r--arch/arm/include/asm/setup.h28
-rw-r--r--arch/arm/include/asm/uaccess.h3
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/atags_parse.c5
-rw-r--r--arch/arm/kernel/devtree.c5
-rw-r--r--arch/arm/kernel/entry-armv.S14
-rw-r--r--arch/arm/kernel/entry-header.S4
-rw-r--r--arch/arm/kernel/ftrace.c13
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/hibernate.c107
-rw-r--r--arch/arm/kernel/irq.c12
-rw-r--r--arch/arm/kernel/iwmmxt.S16
-rw-r--r--arch/arm/kernel/perf_event_cpu.c1
-rw-r--r--arch/arm/kernel/perf_event_v7.c12
-rw-r--r--arch/arm/kernel/setup.c30
-rw-r--r--arch/arm/kernel/sleep.S5
-rw-r--r--arch/arm/kernel/stacktrace.c60
-rw-r--r--arch/arm/kernel/topology.c8
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/kernel/uprobes.c20
-rw-r--r--arch/arm/mach-bcm/bcm_5301x.c9
-rw-r--r--arch/arm/mach-berlin/berlin.c17
-rw-r--r--arch/arm/mach-clps711x/board-clep7312.c7
-rw-r--r--arch/arm/mach-clps711x/board-edb7211.c10
-rw-r--r--arch/arm/mach-clps711x/board-p720t.c2
-rw-r--r--arch/arm/mach-cns3xxx/core.c10
-rw-r--r--arch/arm/mach-ep93xx/crunch-bits.S14
-rw-r--r--arch/arm/mach-exynos/common.h1
-rw-r--r--arch/arm/mach-exynos/exynos.c21
-rw-r--r--arch/arm/mach-exynos/sleep.S30
-rw-r--r--arch/arm/mach-footbridge/cats-hw.c2
-rw-r--r--arch/arm/mach-footbridge/netwinder-hw.c2
-rw-r--r--arch/arm/mach-highbank/highbank.c21
-rw-r--r--arch/arm/mach-imx/mach-vf610.c9
-rw-r--r--arch/arm/mach-imx/suspend-imx6.S24
-rw-r--r--arch/arm/mach-imx/system.c8
-rw-r--r--arch/arm/mach-msm/board-halibut.c6
-rw-r--r--arch/arm/mach-msm/board-mahimahi.c13
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c3
-rw-r--r--arch/arm/mach-msm/board-sapphire.c13
-rw-r--r--arch/arm/mach-msm/board-trout.c8
-rw-r--r--arch/arm/mach-mvebu/board-v7.c9
-rw-r--r--arch/arm/mach-nomadik/cpu-8815.c13
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/io.c2
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c16
-rw-r--r--arch/arm/mach-omap2/omap4-common.c86
-rw-r--r--arch/arm/mach-orion5x/common.c3
-rw-r--r--arch/arm/mach-orion5x/common.h3
-rw-r--r--arch/arm/mach-prima2/Makefile1
-rw-r--r--arch/arm/mach-prima2/common.c6
-rw-r--r--arch/arm/mach-prima2/l2x0.c49
-rw-r--r--arch/arm/mach-prima2/pm.c1
-rw-r--r--arch/arm/mach-pxa/cm-x300.c3
-rw-r--r--arch/arm/mach-pxa/corgi.c10
-rw-r--r--arch/arm/mach-pxa/eseries.c9
-rw-r--r--arch/arm/mach-pxa/poodle.c8
-rw-r--r--arch/arm/mach-pxa/spitz.c8
-rw-r--r--arch/arm/mach-pxa/tosa.c8
-rw-r--r--arch/arm/mach-realview/core.c11
-rw-r--r--arch/arm/mach-realview/core.h3
-rw-r--r--arch/arm/mach-realview/realview_eb.c9
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c16
-rw-r--r--arch/arm/mach-realview/realview_pb11mp.c9
-rw-r--r--arch/arm/mach-realview/realview_pbx.c21
-rw-r--r--arch/arm/mach-rockchip/rockchip.c9
-rw-r--r--arch/arm/mach-s3c24xx/mach-smdk2413.c8
-rw-r--r--arch/arm/mach-s3c24xx/mach-vstms.c8
-rw-r--r--arch/arm/mach-sa1100/assabet.c2
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva-reference.c4
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c4
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g-reference.c4
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c4
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7778.c4
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c4
-rw-r--r--arch/arm/mach-socfpga/socfpga.c9
-rw-r--r--arch/arm/mach-spear/platsmp.c19
-rw-r--r--arch/arm/mach-spear/spear13xx.c8
-rw-r--r--arch/arm/mach-sti/board-dt.c27
-rw-r--r--arch/arm/mach-tegra/pm.h2
-rw-r--r--arch/arm/mach-tegra/reset-handler.S11
-rw-r--r--arch/arm/mach-tegra/sleep.h31
-rw-r--r--arch/arm/mach-tegra/tegra.c32
-rw-r--r--arch/arm/mach-ux500/cache-l2x0.c32
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c28
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c4
-rw-r--r--arch/arm/mach-vexpress/v2m.c3
-rw-r--r--arch/arm/mach-zynq/common.c8
-rw-r--r--arch/arm/mm/Kconfig51
-rw-r--r--arch/arm/mm/Makefile3
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c1
-rw-r--r--arch/arm/mm/cache-l2x0.c1498
-rw-r--r--arch/arm/mm/cache-v7.S12
-rw-r--r--arch/arm/mm/dma-mapping.c7
-rw-r--r--arch/arm/mm/flush.c33
-rw-r--r--arch/arm/mm/highmem.c33
-rw-r--r--arch/arm/mm/init.c72
-rw-r--r--arch/arm/mm/ioremap.c9
-rw-r--r--arch/arm/mm/l2c-common.c20
-rw-r--r--arch/arm/mm/l2c-l2x0-resume.S58
-rw-r--r--arch/arm/mm/mmu.c123
-rw-r--r--arch/arm/mm/nommu.c66
-rw-r--r--arch/arm/mm/proc-v7-3level.S18
-rw-r--r--arch/arm/mm/proc-v7.S39
-rw-r--r--arch/arm/mm/proc-v7m.S8
-rw-r--r--arch/arm/plat-samsung/s5p-sleep.S1
-rw-r--r--arch/arm/vfp/entry.S3
-rw-r--r--include/linux/amba/bus.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/uprobes.h3
-rw-r--r--kernel/events/uprobes.c25
135 files changed, 2098 insertions, 1471 deletions
diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt
index 4bfb9ffbdbc1..38dc06d0a791 100644
--- a/Documentation/arm/memory.txt
+++ b/Documentation/arm/memory.txt
@@ -41,16 +41,9 @@ fffe8000 fffeffff DTCM mapping area for platforms with
41fffe0000 fffe7fff ITCM mapping area for platforms with 41fffe0000 fffe7fff ITCM mapping area for platforms with
42 ITCM mounted inside the CPU. 42 ITCM mounted inside the CPU.
43 43
44fff00000 fffdffff Fixmap mapping region. Addresses provided 44ffc00000 ffdfffff Fixmap mapping region. Addresses provided
45 by fix_to_virt() will be located here. 45 by fix_to_virt() will be located here.
46 46
47ffc00000 ffefffff DMA memory mapping region. Memory returned
48 by the dma_alloc_xxx functions will be
49 dynamically mapped here.
50
51ff000000 ffbfffff Reserved for future expansion of DMA
52 mapping region.
53
54fee00000 feffffff Mapping of PCI I/O space. This is a static 47fee00000 feffffff Mapping of PCI I/O space. This is a static
55 mapping within the vmalloc space. 48 mapping within the vmalloc space.
56 49
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index fe5cef8976cb..75ef91d08f3b 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -8,6 +8,7 @@ Required properties:
8 8
9- compatible : should be one of 9- compatible : should be one of
10 "arm,armv8-pmuv3" 10 "arm,armv8-pmuv3"
11 "arm,cortex-a17-pmu"
11 "arm,cortex-a15-pmu" 12 "arm,cortex-a15-pmu"
12 "arm,cortex-a12-pmu" 13 "arm,cortex-a12-pmu"
13 "arm,cortex-a9-pmu" 14 "arm,cortex-a9-pmu"
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index db3c5414223e..8615dfa604c4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -165,12 +165,9 @@ config TRACE_IRQFLAGS_SUPPORT
165 bool 165 bool
166 default y 166 default y
167 167
168config RWSEM_GENERIC_SPINLOCK
169 bool
170 default y
171
172config RWSEM_XCHGADD_ALGORITHM 168config RWSEM_XCHGADD_ALGORITHM
173 bool 169 bool
170 default y
174 171
175config ARCH_HAS_ILOG2_U32 172config ARCH_HAS_ILOG2_U32
176 bool 173 bool
@@ -1105,11 +1102,6 @@ source "arch/arm/firmware/Kconfig"
1105 1102
1106source arch/arm/mm/Kconfig 1103source arch/arm/mm/Kconfig
1107 1104
1108config ARM_NR_BANKS
1109 int
1110 default 16 if ARCH_EP93XX
1111 default 8
1112
1113config IWMMXT 1105config IWMMXT
1114 bool "Enable iWMMXt support" 1106 bool "Enable iWMMXt support"
1115 depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B 1107 depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B
@@ -1230,19 +1222,6 @@ config ARM_ERRATA_742231
1230 register of the Cortex-A9 which reduces the linefill issuing 1222 register of the Cortex-A9 which reduces the linefill issuing
1231 capabilities of the processor. 1223 capabilities of the processor.
1232 1224
1233config PL310_ERRATA_588369
1234 bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
1235 depends on CACHE_L2X0
1236 help
1237 The PL310 L2 cache controller implements three types of Clean &
1238 Invalidate maintenance operations: by Physical Address
1239 (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
1240 They are architecturally defined to behave as the execution of a
1241 clean operation followed immediately by an invalidate operation,
1242 both performing to the same memory location. This functionality
1243 is not correctly implemented in PL310 as clean lines are not
1244 invalidated as a result of these operations.
1245
1246config ARM_ERRATA_643719 1225config ARM_ERRATA_643719
1247 bool "ARM errata: LoUIS bit field in CLIDR register is incorrect" 1226 bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
1248 depends on CPU_V7 && SMP 1227 depends on CPU_V7 && SMP
@@ -1265,17 +1244,6 @@ config ARM_ERRATA_720789
1265 tables. The workaround changes the TLB flushing routines to invalidate 1244 tables. The workaround changes the TLB flushing routines to invalidate
1266 entries regardless of the ASID. 1245 entries regardless of the ASID.
1267 1246
1268config PL310_ERRATA_727915
1269 bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
1270 depends on CACHE_L2X0
1271 help
1272 PL310 implements the Clean & Invalidate by Way L2 cache maintenance
1273 operation (offset 0x7FC). This operation runs in background so that
1274 PL310 can handle normal accesses while it is in progress. Under very
1275 rare circumstances, due to this erratum, write data can be lost when
1276 PL310 treats a cacheable write transaction during a Clean &
1277 Invalidate by Way operation.
1278
1279config ARM_ERRATA_743622 1247config ARM_ERRATA_743622
1280 bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption" 1248 bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
1281 depends on CPU_V7 1249 depends on CPU_V7
@@ -1301,21 +1269,6 @@ config ARM_ERRATA_751472
1301 operation is received by a CPU before the ICIALLUIS has completed, 1269 operation is received by a CPU before the ICIALLUIS has completed,
1302 potentially leading to corrupted entries in the cache or TLB. 1270 potentially leading to corrupted entries in the cache or TLB.
1303 1271
1304config PL310_ERRATA_753970
1305 bool "PL310 errata: cache sync operation may be faulty"
1306 depends on CACHE_PL310
1307 help
1308 This option enables the workaround for the 753970 PL310 (r3p0) erratum.
1309
1310 Under some condition the effect of cache sync operation on
1311 the store buffer still remains when the operation completes.
1312 This means that the store buffer is always asked to drain and
1313 this prevents it from merging any further writes. The workaround
1314 is to replace the normal offset of cache sync operation (0x730)
1315 by another offset targeting an unmapped PL310 register 0x740.
1316 This has the same effect as the cache sync operation: store buffer
1317 drain and waiting for all buffers empty.
1318
1319config ARM_ERRATA_754322 1272config ARM_ERRATA_754322
1320 bool "ARM errata: possible faulty MMU translations following an ASID switch" 1273 bool "ARM errata: possible faulty MMU translations following an ASID switch"
1321 depends on CPU_V7 1274 depends on CPU_V7
@@ -1364,18 +1317,6 @@ config ARM_ERRATA_764369
1364 relevant cache maintenance functions and sets a specific bit 1317 relevant cache maintenance functions and sets a specific bit
1365 in the diagnostic control register of the SCU. 1318 in the diagnostic control register of the SCU.
1366 1319
1367config PL310_ERRATA_769419
1368 bool "PL310 errata: no automatic Store Buffer drain"
1369 depends on CACHE_L2X0
1370 help
1371 On revisions of the PL310 prior to r3p2, the Store Buffer does
1372 not automatically drain. This can cause normal, non-cacheable
1373 writes to be retained when the memory system is idle, leading
1374 to suboptimal I/O performance for drivers using coherent DMA.
1375 This option adds a write barrier to the cpu_idle loop so that,
1376 on systems with an outer cache, the store buffer is drained
1377 explicitly.
1378
1379config ARM_ERRATA_775420 1320config ARM_ERRATA_775420
1380 bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock" 1321 bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
1381 depends on CPU_V7 1322 depends on CPU_V7
@@ -2295,6 +2236,11 @@ config ARCH_SUSPEND_POSSIBLE
2295config ARM_CPU_SUSPEND 2236config ARM_CPU_SUSPEND
2296 def_bool PM_SLEEP 2237 def_bool PM_SLEEP
2297 2238
2239config ARCH_HIBERNATION_POSSIBLE
2240 bool
2241 depends on MMU
2242 default y if ARCH_SUSPEND_POSSIBLE
2243
2298endmenu 2244endmenu
2299 2245
2300source "net/Kconfig" 2246source "net/Kconfig"
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index d1153c8a765a..9448aa0c6686 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -7,6 +7,8 @@
7#define do_extend_cmdline 0 7#define do_extend_cmdline 0
8#endif 8#endif
9 9
10#define NR_BANKS 16
11
10static int node_offset(void *fdt, const char *node_path) 12static int node_offset(void *fdt, const char *node_path)
11{ 13{
12 int offset = fdt_path_offset(fdt, node_path); 14 int offset = fdt_path_offset(fdt, node_path);
diff --git a/arch/arm/boot/dts/marco.dtsi b/arch/arm/boot/dts/marco.dtsi
index 0c9647d28765..fb354225740a 100644
--- a/arch/arm/boot/dts/marco.dtsi
+++ b/arch/arm/boot/dts/marco.dtsi
@@ -36,7 +36,7 @@
36 ranges = <0x40000000 0x40000000 0xa0000000>; 36 ranges = <0x40000000 0x40000000 0xa0000000>;
37 37
38 l2-cache-controller@c0030000 { 38 l2-cache-controller@c0030000 {
39 compatible = "sirf,marco-pl310-cache", "arm,pl310-cache"; 39 compatible = "arm,pl310-cache";
40 reg = <0xc0030000 0x1000>; 40 reg = <0xc0030000 0x1000>;
41 interrupts = <0 59 0>; 41 interrupts = <0 59 0>;
42 arm,tag-latency = <1 1 1>; 42 arm,tag-latency = <1 1 1>;
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 1e82571d6823..0d6588d549bf 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -48,7 +48,7 @@
48 ranges = <0x40000000 0x40000000 0x80000000>; 48 ranges = <0x40000000 0x40000000 0x80000000>;
49 49
50 l2-cache-controller@80040000 { 50 l2-cache-controller@80040000 {
51 compatible = "arm,pl310-cache", "sirf,prima2-pl310-cache"; 51 compatible = "arm,pl310-cache";
52 reg = <0x80040000 0x1000>; 52 reg = <0x80040000 0x1000>;
53 interrupts = <59>; 53 interrupts = <59>;
54 arm,tag-latency = <1 1 1>; 54 arm,tag-latency = <1 1 1>;
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index f01c0ee0c87e..490f3dced749 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -433,8 +433,12 @@ static void bL_switcher_restore_cpus(void)
433{ 433{
434 int i; 434 int i;
435 435
436 for_each_cpu(i, &bL_switcher_removed_logical_cpus) 436 for_each_cpu(i, &bL_switcher_removed_logical_cpus) {
437 cpu_up(i); 437 struct device *cpu_dev = get_cpu_device(i);
438 int ret = device_online(cpu_dev);
439 if (ret)
440 dev_err(cpu_dev, "switcher: unable to restore CPU\n");
441 }
438} 442}
439 443
440static int bL_switcher_halve_cpus(void) 444static int bL_switcher_halve_cpus(void)
@@ -521,7 +525,7 @@ static int bL_switcher_halve_cpus(void)
521 continue; 525 continue;
522 } 526 }
523 527
524 ret = cpu_down(i); 528 ret = device_offline(get_cpu_device(i));
525 if (ret) { 529 if (ret) {
526 bL_switcher_restore_cpus(); 530 bL_switcher_restore_cpus();
527 return ret; 531 return ret;
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 86fd60fefbc9..f91136ab447e 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -106,14 +106,14 @@ void mcpm_cpu_power_down(void)
106 BUG(); 106 BUG();
107} 107}
108 108
109int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster) 109int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
110{ 110{
111 int ret; 111 int ret;
112 112
113 if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish)) 113 if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown))
114 return -EUNATCH; 114 return -EUNATCH;
115 115
116 ret = platform_ops->power_down_finish(cpu, cluster); 116 ret = platform_ops->wait_for_powerdown(cpu, cluster);
117 if (ret) 117 if (ret)
118 pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", 118 pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
119 __func__, cpu, cluster, ret); 119 __func__, cpu, cluster, ret);
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 177251a4dd9a..92e54d7c6f46 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -62,7 +62,7 @@ static int mcpm_cpu_kill(unsigned int cpu)
62 62
63 cpu_to_pcpu(cpu, &pcpu, &pcluster); 63 cpu_to_pcpu(cpu, &pcpu, &pcluster);
64 64
65 return !mcpm_cpu_power_down_finish(pcpu, pcluster); 65 return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
66} 66}
67 67
68static int mcpm_cpu_disable(unsigned int cpu) 68static int mcpm_cpu_disable(unsigned int cpu)
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 23e728ecf8ab..f5a357601983 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -21,6 +21,7 @@ generic-y += parport.h
21generic-y += poll.h 21generic-y += poll.h
22generic-y += preempt.h 22generic-y += preempt.h
23generic-y += resource.h 23generic-y += resource.h
24generic-y += rwsem.h
24generic-y += sections.h 25generic-y += sections.h
25generic-y += segment.h 26generic-y += segment.h
26generic-y += sembuf.h 27generic-y += sembuf.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index b974184f9941..57f0584e8d97 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -312,7 +312,7 @@
312 * you cannot return to the original mode. 312 * you cannot return to the original mode.
313 */ 313 */
314.macro safe_svcmode_maskall reg:req 314.macro safe_svcmode_maskall reg:req
315#if __LINUX_ARM_ARCH__ >= 6 315#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
316 mrs \reg , cpsr 316 mrs \reg , cpsr
317 eor \reg, \reg, #HYP_MODE 317 eor \reg, \reg, #HYP_MODE
318 tst \reg, #MODE_MASK 318 tst \reg, #MODE_MASK
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 8b8b61685a34..fd43f7f55b70 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -212,7 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
212static inline void __flush_icache_all(void) 212static inline void __flush_icache_all(void)
213{ 213{
214 __flush_icache_preferred(); 214 __flush_icache_preferred();
215 dsb(); 215 dsb(ishst);
216} 216}
217 217
218/* 218/*
@@ -487,4 +487,6 @@ int set_memory_rw(unsigned long addr, int numpages);
487int set_memory_x(unsigned long addr, int numpages); 487int set_memory_x(unsigned long addr, int numpages);
488int set_memory_nx(unsigned long addr, int numpages); 488int set_memory_nx(unsigned long addr, int numpages);
489 489
490void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
491 void *kaddr, unsigned long len);
490#endif 492#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 4764344367d4..8c2b7321a478 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -72,6 +72,7 @@
72#define ARM_CPU_PART_CORTEX_A15 0xC0F0 72#define ARM_CPU_PART_CORTEX_A15 0xC0F0
73#define ARM_CPU_PART_CORTEX_A7 0xC070 73#define ARM_CPU_PART_CORTEX_A7 0xC070
74#define ARM_CPU_PART_CORTEX_A12 0xC0D0 74#define ARM_CPU_PART_CORTEX_A12 0xC0D0
75#define ARM_CPU_PART_CORTEX_A17 0xC0E0
75 76
76#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 77#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
77#define ARM_CPU_XSCALE_ARCH_V1 0x2000 78#define ARM_CPU_XSCALE_ARCH_V1 0x2000
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index bbae919bceb4..74124b0d0d79 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -1,24 +1,11 @@
1#ifndef _ASM_FIXMAP_H 1#ifndef _ASM_FIXMAP_H
2#define _ASM_FIXMAP_H 2#define _ASM_FIXMAP_H
3 3
4/* 4#define FIXADDR_START 0xffc00000UL
5 * Nothing too fancy for now. 5#define FIXADDR_TOP 0xffe00000UL
6 *
7 * On ARM we already have well known fixed virtual addresses imposed by
8 * the architecture such as the vector page which is located at 0xffff0000,
9 * therefore a second level page table is already allocated covering
10 * 0xfff00000 upwards.
11 *
12 * The cache flushing code in proc-xscale.S uses the virtual area between
13 * 0xfffe0000 and 0xfffeffff.
14 */
15
16#define FIXADDR_START 0xfff00000UL
17#define FIXADDR_TOP 0xfffe0000UL
18#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START) 6#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START)
19 7
20#define FIX_KMAP_BEGIN 0 8#define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT)
21#define FIX_KMAP_END (FIXADDR_SIZE >> PAGE_SHIFT)
22 9
23#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) 10#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
24#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) 11#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
@@ -27,7 +14,7 @@ extern void __this_fixmap_does_not_exist(void);
27 14
28static inline unsigned long fix_to_virt(const unsigned int idx) 15static inline unsigned long fix_to_virt(const unsigned int idx)
29{ 16{
30 if (idx >= FIX_KMAP_END) 17 if (idx >= FIX_KMAP_NR_PTES)
31 __this_fixmap_does_not_exist(); 18 __this_fixmap_does_not_exist();
32 return __fix_to_virt(idx); 19 return __fix_to_virt(idx);
33} 20}
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 6b70f1b46a6e..04e18b656659 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -31,14 +31,6 @@
31#undef CPU_DABORT_HANDLER 31#undef CPU_DABORT_HANDLER
32#undef MULTI_DABORT 32#undef MULTI_DABORT
33 33
34#if defined(CONFIG_CPU_ARM710)
35# ifdef CPU_DABORT_HANDLER
36# define MULTI_DABORT 1
37# else
38# define CPU_DABORT_HANDLER cpu_arm7_data_abort
39# endif
40#endif
41
42#ifdef CONFIG_CPU_ABRT_EV4 34#ifdef CONFIG_CPU_ABRT_EV4
43# ifdef CPU_DABORT_HANDLER 35# ifdef CPU_DABORT_HANDLER
44# define MULTI_DABORT 1 36# define MULTI_DABORT 1
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 6795ff743b3d..3a5ec1c25659 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -26,8 +26,8 @@
26#define L2X0_CACHE_TYPE 0x004 26#define L2X0_CACHE_TYPE 0x004
27#define L2X0_CTRL 0x100 27#define L2X0_CTRL 0x100
28#define L2X0_AUX_CTRL 0x104 28#define L2X0_AUX_CTRL 0x104
29#define L2X0_TAG_LATENCY_CTRL 0x108 29#define L310_TAG_LATENCY_CTRL 0x108
30#define L2X0_DATA_LATENCY_CTRL 0x10C 30#define L310_DATA_LATENCY_CTRL 0x10C
31#define L2X0_EVENT_CNT_CTRL 0x200 31#define L2X0_EVENT_CNT_CTRL 0x200
32#define L2X0_EVENT_CNT1_CFG 0x204 32#define L2X0_EVENT_CNT1_CFG 0x204
33#define L2X0_EVENT_CNT0_CFG 0x208 33#define L2X0_EVENT_CNT0_CFG 0x208
@@ -54,53 +54,93 @@
54#define L2X0_LOCKDOWN_WAY_D_BASE 0x900 54#define L2X0_LOCKDOWN_WAY_D_BASE 0x900
55#define L2X0_LOCKDOWN_WAY_I_BASE 0x904 55#define L2X0_LOCKDOWN_WAY_I_BASE 0x904
56#define L2X0_LOCKDOWN_STRIDE 0x08 56#define L2X0_LOCKDOWN_STRIDE 0x08
57#define L2X0_ADDR_FILTER_START 0xC00 57#define L310_ADDR_FILTER_START 0xC00
58#define L2X0_ADDR_FILTER_END 0xC04 58#define L310_ADDR_FILTER_END 0xC04
59#define L2X0_TEST_OPERATION 0xF00 59#define L2X0_TEST_OPERATION 0xF00
60#define L2X0_LINE_DATA 0xF10 60#define L2X0_LINE_DATA 0xF10
61#define L2X0_LINE_TAG 0xF30 61#define L2X0_LINE_TAG 0xF30
62#define L2X0_DEBUG_CTRL 0xF40 62#define L2X0_DEBUG_CTRL 0xF40
63#define L2X0_PREFETCH_CTRL 0xF60 63#define L310_PREFETCH_CTRL 0xF60
64#define L2X0_POWER_CTRL 0xF80 64#define L310_POWER_CTRL 0xF80
65#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1) 65#define L310_DYNAMIC_CLK_GATING_EN (1 << 1)
66#define L2X0_STNDBY_MODE_EN (1 << 0) 66#define L310_STNDBY_MODE_EN (1 << 0)
67 67
68/* Registers shifts and masks */ 68/* Registers shifts and masks */
69#define L2X0_CACHE_ID_PART_MASK (0xf << 6) 69#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
70#define L2X0_CACHE_ID_PART_L210 (1 << 6) 70#define L2X0_CACHE_ID_PART_L210 (1 << 6)
71#define L2X0_CACHE_ID_PART_L220 (2 << 6)
71#define L2X0_CACHE_ID_PART_L310 (3 << 6) 72#define L2X0_CACHE_ID_PART_L310 (3 << 6)
72#define L2X0_CACHE_ID_RTL_MASK 0x3f 73#define L2X0_CACHE_ID_RTL_MASK 0x3f
73#define L2X0_CACHE_ID_RTL_R0P0 0x0 74#define L210_CACHE_ID_RTL_R0P2_02 0x00
74#define L2X0_CACHE_ID_RTL_R1P0 0x2 75#define L210_CACHE_ID_RTL_R0P1 0x01
75#define L2X0_CACHE_ID_RTL_R2P0 0x4 76#define L210_CACHE_ID_RTL_R0P2_01 0x02
76#define L2X0_CACHE_ID_RTL_R3P0 0x5 77#define L210_CACHE_ID_RTL_R0P3 0x03
77#define L2X0_CACHE_ID_RTL_R3P1 0x6 78#define L210_CACHE_ID_RTL_R0P4 0x0b
78#define L2X0_CACHE_ID_RTL_R3P2 0x8 79#define L210_CACHE_ID_RTL_R0P5 0x0f
80#define L220_CACHE_ID_RTL_R1P7_01REL0 0x06
81#define L310_CACHE_ID_RTL_R0P0 0x00
82#define L310_CACHE_ID_RTL_R1P0 0x02
83#define L310_CACHE_ID_RTL_R2P0 0x04
84#define L310_CACHE_ID_RTL_R3P0 0x05
85#define L310_CACHE_ID_RTL_R3P1 0x06
86#define L310_CACHE_ID_RTL_R3P1_50REL0 0x07
87#define L310_CACHE_ID_RTL_R3P2 0x08
88#define L310_CACHE_ID_RTL_R3P3 0x09
79 89
80#define L2X0_AUX_CTRL_MASK 0xc0000fff 90/* L2C auxiliary control register - bits common to L2C-210/220/310 */
91#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17
92#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17)
93#define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17)
94#define L2C_AUX_CTRL_EVTMON_ENABLE BIT(20)
95#define L2C_AUX_CTRL_PARITY_ENABLE BIT(21)
96#define L2C_AUX_CTRL_SHARED_OVERRIDE BIT(22)
97/* L2C-210/220 common bits */
81#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0 98#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
82#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7 99#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK (7 << 0)
83#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3 100#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
84#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3) 101#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (7 << 3)
85#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6 102#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
86#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6) 103#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (7 << 6)
87#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9 104#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
88#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9) 105#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (7 << 9)
89#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16 106#define L2X0_AUX_CTRL_ASSOC_SHIFT 13
90#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17 107#define L2X0_AUX_CTRL_ASSOC_MASK (15 << 13)
91#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17) 108/* L2C-210 specific bits */
92#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22 109#define L210_AUX_CTRL_WRAP_DISABLE BIT(12)
93#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26 110#define L210_AUX_CTRL_WA_OVERRIDE BIT(23)
94#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27 111#define L210_AUX_CTRL_EXCLUSIVE_ABORT BIT(24)
95#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28 112/* L2C-220 specific bits */
96#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29 113#define L220_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
97#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30 114#define L220_AUX_CTRL_FWA_SHIFT 23
115#define L220_AUX_CTRL_FWA_MASK (3 << 23)
116#define L220_AUX_CTRL_NS_LOCKDOWN BIT(26)
117#define L220_AUX_CTRL_NS_INT_CTRL BIT(27)
118/* L2C-310 specific bits */
119#define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */
120#define L310_AUX_CTRL_HIGHPRIO_SO_DEV BIT(10) /* R2P0+ */
121#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */
122#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
123#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16)
124#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */
125#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26)
126#define L310_AUX_CTRL_NS_INT_CTRL BIT(27)
127#define L310_AUX_CTRL_DATA_PREFETCH BIT(28)
128#define L310_AUX_CTRL_INSTR_PREFETCH BIT(29)
129#define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */
98 130
99#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0 131#define L310_LATENCY_CTRL_SETUP(n) ((n) << 0)
100#define L2X0_LATENCY_CTRL_RD_SHIFT 4 132#define L310_LATENCY_CTRL_RD(n) ((n) << 4)
101#define L2X0_LATENCY_CTRL_WR_SHIFT 8 133#define L310_LATENCY_CTRL_WR(n) ((n) << 8)
102 134
103#define L2X0_ADDR_FILTER_EN 1 135#define L310_ADDR_FILTER_EN 1
136
137#define L310_PREFETCH_CTRL_OFFSET_MASK 0x1f
138#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR BIT(23)
139#define L310_PREFETCH_CTRL_PREFETCH_DROP BIT(24)
140#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP BIT(27)
141#define L310_PREFETCH_CTRL_DATA_PREFETCH BIT(28)
142#define L310_PREFETCH_CTRL_INSTR_PREFETCH BIT(29)
143#define L310_PREFETCH_CTRL_DBL_LINEFILL BIT(30)
104 144
105#define L2X0_CTRL_EN 1 145#define L2X0_CTRL_EN 1
106 146
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 91b99abe7a95..535579511ed0 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -18,6 +18,7 @@
18 } while (0) 18 } while (0)
19 19
20extern pte_t *pkmap_page_table; 20extern pte_t *pkmap_page_table;
21extern pte_t *fixmap_page_table;
21 22
22extern void *kmap_high(struct page *page); 23extern void *kmap_high(struct page *page);
23extern void kunmap_high(struct page *page); 24extern void kunmap_high(struct page *page);
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 8aa4cca74501..3d23418cbddd 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -179,6 +179,12 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
179/* PCI fixed i/o mapping */ 179/* PCI fixed i/o mapping */
180#define PCI_IO_VIRT_BASE 0xfee00000 180#define PCI_IO_VIRT_BASE 0xfee00000
181 181
182#if defined(CONFIG_PCI)
183void pci_ioremap_set_mem_type(int mem_type);
184#else
185static inline void pci_ioremap_set_mem_type(int mem_type) {}
186#endif
187
182extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr); 188extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
183 189
184/* 190/*
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 17a3fa2979e8..060a75e99263 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -14,7 +14,6 @@
14#include <linux/reboot.h> 14#include <linux/reboot.h>
15 15
16struct tag; 16struct tag;
17struct meminfo;
18struct pt_regs; 17struct pt_regs;
19struct smp_operations; 18struct smp_operations;
20#ifdef CONFIG_SMP 19#ifdef CONFIG_SMP
@@ -45,10 +44,12 @@ struct machine_desc {
45 unsigned char reserve_lp1 :1; /* never has lp1 */ 44 unsigned char reserve_lp1 :1; /* never has lp1 */
46 unsigned char reserve_lp2 :1; /* never has lp2 */ 45 unsigned char reserve_lp2 :1; /* never has lp2 */
47 enum reboot_mode reboot_mode; /* default restart mode */ 46 enum reboot_mode reboot_mode; /* default restart mode */
47 unsigned l2c_aux_val; /* L2 cache aux value */
48 unsigned l2c_aux_mask; /* L2 cache aux mask */
49 void (*l2c_write_sec)(unsigned long, unsigned);
48 struct smp_operations *smp; /* SMP operations */ 50 struct smp_operations *smp; /* SMP operations */
49 bool (*smp_init)(void); 51 bool (*smp_init)(void);
50 void (*fixup)(struct tag *, char **, 52 void (*fixup)(struct tag *, char **);
51 struct meminfo *);
52 void (*init_meminfo)(void); 53 void (*init_meminfo)(void);
53 void (*reserve)(void);/* reserve mem blocks */ 54 void (*reserve)(void);/* reserve mem blocks */
54 void (*map_io)(void);/* IO mapping function */ 55 void (*map_io)(void);/* IO mapping function */
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index a5ff410dcdb6..d9702eb0b02b 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -98,14 +98,14 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
98 * previously in which case the caller should take appropriate action. 98 * previously in which case the caller should take appropriate action.
99 * 99 *
100 * On success, the CPU is not guaranteed to be truly halted until 100 * On success, the CPU is not guaranteed to be truly halted until
101 * mcpm_cpu_power_down_finish() subsequently returns non-zero for the 101 * mcpm_wait_for_cpu_powerdown() subsequently returns non-zero for the
102 * specified cpu. Until then, other CPUs should make sure they do not 102 * specified cpu. Until then, other CPUs should make sure they do not
103 * trash memory the target CPU might be executing/accessing. 103 * trash memory the target CPU might be executing/accessing.
104 */ 104 */
105void mcpm_cpu_power_down(void); 105void mcpm_cpu_power_down(void);
106 106
107/** 107/**
108 * mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and 108 * mcpm_wait_for_cpu_powerdown - wait for a specified CPU to halt, and
109 * make sure it is powered off 109 * make sure it is powered off
110 * 110 *
111 * @cpu: CPU number within given cluster 111 * @cpu: CPU number within given cluster
@@ -127,7 +127,7 @@ void mcpm_cpu_power_down(void);
127 * - zero if the CPU is in a safely parked state 127 * - zero if the CPU is in a safely parked state
128 * - nonzero otherwise (e.g., timeout) 128 * - nonzero otherwise (e.g., timeout)
129 */ 129 */
130int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster); 130int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
131 131
132/** 132/**
133 * mcpm_cpu_suspend - bring the calling CPU in a suspended state 133 * mcpm_cpu_suspend - bring the calling CPU in a suspended state
@@ -171,7 +171,7 @@ int mcpm_cpu_powered_up(void);
171struct mcpm_platform_ops { 171struct mcpm_platform_ops {
172 int (*power_up)(unsigned int cpu, unsigned int cluster); 172 int (*power_up)(unsigned int cpu, unsigned int cluster);
173 void (*power_down)(void); 173 void (*power_down)(void);
174 int (*power_down_finish)(unsigned int cpu, unsigned int cluster); 174 int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
175 void (*suspend)(u64); 175 void (*suspend)(u64);
176 void (*powered_up)(void); 176 void (*powered_up)(void);
177}; 177};
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h
index c2f5102ae659..bf47a6c110a2 100644
--- a/arch/arm/include/asm/memblock.h
+++ b/arch/arm/include/asm/memblock.h
@@ -1,10 +1,9 @@
1#ifndef _ASM_ARM_MEMBLOCK_H 1#ifndef _ASM_ARM_MEMBLOCK_H
2#define _ASM_ARM_MEMBLOCK_H 2#define _ASM_ARM_MEMBLOCK_H
3 3
4struct meminfo;
5struct machine_desc; 4struct machine_desc;
6 5
7void arm_memblock_init(struct meminfo *, const struct machine_desc *); 6void arm_memblock_init(const struct machine_desc *);
8phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align); 7phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);
9 8
10#endif 9#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 02fa2558f662..2b751464d6ff 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -83,8 +83,6 @@
83 */ 83 */
84#define IOREMAP_MAX_ORDER 24 84#define IOREMAP_MAX_ORDER 24
85 85
86#define CONSISTENT_END (0xffe00000UL)
87
88#else /* CONFIG_MMU */ 86#else /* CONFIG_MMU */
89 87
90/* 88/*
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index f94784f0e3a6..891a56b35bcf 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -28,53 +28,84 @@ struct outer_cache_fns {
28 void (*clean_range)(unsigned long, unsigned long); 28 void (*clean_range)(unsigned long, unsigned long);
29 void (*flush_range)(unsigned long, unsigned long); 29 void (*flush_range)(unsigned long, unsigned long);
30 void (*flush_all)(void); 30 void (*flush_all)(void);
31 void (*inv_all)(void);
32 void (*disable)(void); 31 void (*disable)(void);
33#ifdef CONFIG_OUTER_CACHE_SYNC 32#ifdef CONFIG_OUTER_CACHE_SYNC
34 void (*sync)(void); 33 void (*sync)(void);
35#endif 34#endif
36 void (*set_debug)(unsigned long);
37 void (*resume)(void); 35 void (*resume)(void);
36
37 /* This is an ARM L2C thing */
38 void (*write_sec)(unsigned long, unsigned);
38}; 39};
39 40
40extern struct outer_cache_fns outer_cache; 41extern struct outer_cache_fns outer_cache;
41 42
42#ifdef CONFIG_OUTER_CACHE 43#ifdef CONFIG_OUTER_CACHE
43 44/**
45 * outer_inv_range - invalidate range of outer cache lines
46 * @start: starting physical address, inclusive
47 * @end: end physical address, exclusive
48 */
44static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) 49static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
45{ 50{
46 if (outer_cache.inv_range) 51 if (outer_cache.inv_range)
47 outer_cache.inv_range(start, end); 52 outer_cache.inv_range(start, end);
48} 53}
54
55/**
56 * outer_clean_range - clean dirty outer cache lines
57 * @start: starting physical address, inclusive
58 * @end: end physical address, exclusive
59 */
49static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) 60static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
50{ 61{
51 if (outer_cache.clean_range) 62 if (outer_cache.clean_range)
52 outer_cache.clean_range(start, end); 63 outer_cache.clean_range(start, end);
53} 64}
65
66/**
67 * outer_flush_range - clean and invalidate outer cache lines
68 * @start: starting physical address, inclusive
69 * @end: end physical address, exclusive
70 */
54static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) 71static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
55{ 72{
56 if (outer_cache.flush_range) 73 if (outer_cache.flush_range)
57 outer_cache.flush_range(start, end); 74 outer_cache.flush_range(start, end);
58} 75}
59 76
77/**
78 * outer_flush_all - clean and invalidate all cache lines in the outer cache
79 *
80 * Note: depending on implementation, this may not be atomic - it must
81 * only be called with interrupts disabled and no other active outer
82 * cache masters.
83 *
84 * It is intended that this function is only used by implementations
85 * needing to override the outer_cache.disable() method due to security.
86 * (Some implementations perform this as a clean followed by an invalidate.)
87 */
60static inline void outer_flush_all(void) 88static inline void outer_flush_all(void)
61{ 89{
62 if (outer_cache.flush_all) 90 if (outer_cache.flush_all)
63 outer_cache.flush_all(); 91 outer_cache.flush_all();
64} 92}
65 93
66static inline void outer_inv_all(void) 94/**
67{ 95 * outer_disable - clean, invalidate and disable the outer cache
68 if (outer_cache.inv_all) 96 *
69 outer_cache.inv_all(); 97 * Disable the outer cache, ensuring that any data contained in the outer
70} 98 * cache is pushed out to lower levels of system memory. The note and
71 99 * conditions above concerning outer_flush_all() applies here.
72static inline void outer_disable(void) 100 */
73{ 101extern void outer_disable(void);
74 if (outer_cache.disable)
75 outer_cache.disable();
76}
77 102
103/**
104 * outer_resume - restore the cache configuration and re-enable outer cache
105 *
106 * Restore any configuration that the cache had when previously enabled,
107 * and re-enable the outer cache.
108 */
78static inline void outer_resume(void) 109static inline void outer_resume(void)
79{ 110{
80 if (outer_cache.resume) 111 if (outer_cache.resume)
@@ -90,13 +121,18 @@ static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
90static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) 121static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
91{ } 122{ }
92static inline void outer_flush_all(void) { } 123static inline void outer_flush_all(void) { }
93static inline void outer_inv_all(void) { }
94static inline void outer_disable(void) { } 124static inline void outer_disable(void) { }
95static inline void outer_resume(void) { } 125static inline void outer_resume(void) { }
96 126
97#endif 127#endif
98 128
99#ifdef CONFIG_OUTER_CACHE_SYNC 129#ifdef CONFIG_OUTER_CACHE_SYNC
130/**
131 * outer_sync - perform a sync point for outer cache
132 *
133 * Ensure that all outer cache operations are complete and any store
134 * buffers are drained.
135 */
100static inline void outer_sync(void) 136static inline void outer_sync(void)
101{ 137{
102 if (outer_cache.sync) 138 if (outer_cache.sync)
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 8d6a089dfb76..e0adb9f1bf94 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -21,34 +21,6 @@
21#define __tagtable(tag, fn) \ 21#define __tagtable(tag, fn) \
22static const struct tagtable __tagtable_##fn __tag = { tag, fn } 22static const struct tagtable __tagtable_##fn __tag = { tag, fn }
23 23
24/*
25 * Memory map description
26 */
27#define NR_BANKS CONFIG_ARM_NR_BANKS
28
29struct membank {
30 phys_addr_t start;
31 phys_addr_t size;
32 unsigned int highmem;
33};
34
35struct meminfo {
36 int nr_banks;
37 struct membank bank[NR_BANKS];
38};
39
40extern struct meminfo meminfo;
41
42#define for_each_bank(iter,mi) \
43 for (iter = 0; iter < (mi)->nr_banks; iter++)
44
45#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
46#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
47#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
48#define bank_phys_start(bank) (bank)->start
49#define bank_phys_end(bank) ((bank)->start + (bank)->size)
50#define bank_phys_size(bank) (bank)->size
51
52extern int arm_add_memory(u64 start, u64 size); 24extern int arm_add_memory(u64 start, u64 size);
53extern void early_print(const char *str, ...); 25extern void early_print(const char *str, ...);
54extern void dump_machine_table(void); 26extern void dump_machine_table(void);
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 12c3a5decc60..75d95799b6e6 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -171,8 +171,9 @@ extern int __put_user_8(void *, unsigned long long);
171#define __put_user_check(x,p) \ 171#define __put_user_check(x,p) \
172 ({ \ 172 ({ \
173 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 173 unsigned long __limit = current_thread_info()->addr_limit - 1; \
174 const typeof(*(p)) __user *__tmp_p = (p); \
174 register const typeof(*(p)) __r2 asm("r2") = (x); \ 175 register const typeof(*(p)) __r2 asm("r2") = (x); \
175 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 176 register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
176 register unsigned long __l asm("r1") = __limit; \ 177 register unsigned long __l asm("r1") = __limit; \
177 register int __e asm("r0"); \ 178 register int __e asm("r0"); \
178 switch (sizeof(*(__p))) { \ 179 switch (sizeof(*(__p))) { \
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 040619c32d68..38ddd9f83d0e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o
39obj-$(CONFIG_ISA_DMA) += dma-isa.o 39obj-$(CONFIG_ISA_DMA) += dma-isa.o
40obj-$(CONFIG_PCI) += bios32.o isa.o 40obj-$(CONFIG_PCI) += bios32.o isa.o
41obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o 41obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
42obj-$(CONFIG_HIBERNATION) += hibernate.o
42obj-$(CONFIG_SMP) += smp.o 43obj-$(CONFIG_SMP) += smp.o
43ifdef CONFIG_MMU 44ifdef CONFIG_MMU
44obj-$(CONFIG_SMP) += smp_tlb.o 45obj-$(CONFIG_SMP) += smp_tlb.o
diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c
index 8c14de8180c0..7807ef58a2ab 100644
--- a/arch/arm/kernel/atags_parse.c
+++ b/arch/arm/kernel/atags_parse.c
@@ -22,6 +22,7 @@
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/root_dev.h> 23#include <linux/root_dev.h>
24#include <linux/screen_info.h> 24#include <linux/screen_info.h>
25#include <linux/memblock.h>
25 26
26#include <asm/setup.h> 27#include <asm/setup.h>
27#include <asm/system_info.h> 28#include <asm/system_info.h>
@@ -222,10 +223,10 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
222 } 223 }
223 224
224 if (mdesc->fixup) 225 if (mdesc->fixup)
225 mdesc->fixup(tags, &from, &meminfo); 226 mdesc->fixup(tags, &from);
226 227
227 if (tags->hdr.tag == ATAG_CORE) { 228 if (tags->hdr.tag == ATAG_CORE) {
228 if (meminfo.nr_banks != 0) 229 if (memblock_phys_mem_size())
229 squash_mem_tags(tags); 230 squash_mem_tags(tags);
230 save_atags(tags); 231 save_atags(tags);
231 parse_tags(tags); 232 parse_tags(tags);
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index c7419a585ddc..679a83d470cc 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -27,11 +27,6 @@
27#include <asm/mach/arch.h> 27#include <asm/mach/arch.h>
28#include <asm/mach-types.h> 28#include <asm/mach-types.h>
29 29
30void __init early_init_dt_add_memory_arch(u64 base, u64 size)
31{
32 arm_add_memory(base, size);
33}
34
35void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 30void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
36{ 31{
37 return memblock_virt_alloc(size, align); 32 return memblock_virt_alloc(size, align);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index cb6fa30c22a7..52a949a8077d 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -413,6 +413,11 @@ __und_usr:
413 @ 413 @
414 adr r9, BSYM(ret_from_exception) 414 adr r9, BSYM(ret_from_exception)
415 415
416 @ IRQs must be enabled before attempting to read the instruction from
417 @ user space since that could cause a page/translation fault if the
418 @ page table was modified by another CPU.
419 enable_irq
420
416 tst r3, #PSR_T_BIT @ Thumb mode? 421 tst r3, #PSR_T_BIT @ Thumb mode?
417 bne __und_usr_thumb 422 bne __und_usr_thumb
418 sub r4, r2, #4 @ ARM instr at LR - 4 423 sub r4, r2, #4 @ ARM instr at LR - 4
@@ -484,7 +489,8 @@ ENDPROC(__und_usr)
484 */ 489 */
485 .pushsection .fixup, "ax" 490 .pushsection .fixup, "ax"
486 .align 2 491 .align 2
4874: mov pc, r9 4924: str r4, [sp, #S_PC] @ retry current instruction
493 mov pc, r9
488 .popsection 494 .popsection
489 .pushsection __ex_table,"a" 495 .pushsection __ex_table,"a"
490 .long 1b, 4b 496 .long 1b, 4b
@@ -517,7 +523,7 @@ ENDPROC(__und_usr)
517 * r9 = normal "successful" return address 523 * r9 = normal "successful" return address
518 * r10 = this threads thread_info structure 524 * r10 = this threads thread_info structure
519 * lr = unrecognised instruction return address 525 * lr = unrecognised instruction return address
520 * IRQs disabled, FIQs enabled. 526 * IRQs enabled, FIQs enabled.
521 */ 527 */
522 @ 528 @
523 @ Fall-through from Thumb-2 __und_usr 529 @ Fall-through from Thumb-2 __und_usr
@@ -624,7 +630,6 @@ call_fpe:
624#endif 630#endif
625 631
626do_fpe: 632do_fpe:
627 enable_irq
628 ldr r4, .LCfp 633 ldr r4, .LCfp
629 add r10, r10, #TI_FPSTATE @ r10 = workspace 634 add r10, r10, #TI_FPSTATE @ r10 = workspace
630 ldr pc, [r4] @ Call FP module USR entry point 635 ldr pc, [r4] @ Call FP module USR entry point
@@ -652,8 +657,7 @@ __und_usr_fault_32:
652 b 1f 657 b 1f
653__und_usr_fault_16: 658__und_usr_fault_16:
654 mov r1, #2 659 mov r1, #2
6551: enable_irq 6601: mov r0, sp
656 mov r0, sp
657 adr lr, BSYM(ret_from_exception) 661 adr lr, BSYM(ret_from_exception)
658 b __und_fault 662 b __und_fault
659ENDPROC(__und_usr_fault_32) 663ENDPROC(__und_usr_fault_32)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 45a1df9bf759..5d702f8900b1 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -132,6 +132,10 @@
132 orrne r5, V7M_xPSR_FRAMEPTRALIGN 132 orrne r5, V7M_xPSR_FRAMEPTRALIGN
133 biceq r5, V7M_xPSR_FRAMEPTRALIGN 133 biceq r5, V7M_xPSR_FRAMEPTRALIGN
134 134
135 @ ensure bit 0 is cleared in the PC, otherwise behaviour is
136 @ unpredictable
137 bic r4, #1
138
135 @ write basic exception frame 139 @ write basic exception frame
136 stmdb r2!, {r1, r3-r5} 140 stmdb r2!, {r1, r3-r5}
137 ldmia sp, {r1, r3-r5} 141 ldmia sp, {r1, r3-r5}
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index c108ddcb9ba4..af9a8a927a4e 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/module.h>
17 18
18#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
19#include <asm/opcodes.h> 20#include <asm/opcodes.h>
@@ -63,6 +64,18 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
63} 64}
64#endif 65#endif
65 66
67int ftrace_arch_code_modify_prepare(void)
68{
69 set_all_modules_text_rw();
70 return 0;
71}
72
73int ftrace_arch_code_modify_post_process(void)
74{
75 set_all_modules_text_ro();
76 return 0;
77}
78
66static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 79static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
67{ 80{
68 return arm_gen_branch_link(pc, addr); 81 return arm_gen_branch_link(pc, addr);
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 591d6e4a6492..2c35f0ff2fdc 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -475,7 +475,7 @@ ENDPROC(__turn_mmu_on)
475 475
476 476
477#ifdef CONFIG_SMP_ON_UP 477#ifdef CONFIG_SMP_ON_UP
478 __INIT 478 __HEAD
479__fixup_smp: 479__fixup_smp:
480 and r3, r9, #0x000f0000 @ architecture version 480 and r3, r9, #0x000f0000 @ architecture version
481 teq r3, #0x000f0000 @ CPU ID supported? 481 teq r3, #0x000f0000 @ CPU ID supported?
diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
new file mode 100644
index 000000000000..bb8b79648643
--- /dev/null
+++ b/arch/arm/kernel/hibernate.c
@@ -0,0 +1,107 @@
1/*
2 * Hibernation support specific for ARM
3 *
4 * Derived from work on ARM hibernation support by:
5 *
6 * Ubuntu project, hibernation support for mach-dove
7 * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
8 * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
9 * https://lkml.org/lkml/2010/6/18/4
10 * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
11 * https://patchwork.kernel.org/patch/96442/
12 *
13 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
14 *
15 * License terms: GNU General Public License (GPL) version 2
16 */
17
18#include <linux/mm.h>
19#include <linux/suspend.h>
20#include <asm/system_misc.h>
21#include <asm/idmap.h>
22#include <asm/suspend.h>
23#include <asm/memory.h>
24
25extern const void __nosave_begin, __nosave_end;
26
27int pfn_is_nosave(unsigned long pfn)
28{
29 unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
30 unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
31
32 return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
33}
34
35void notrace save_processor_state(void)
36{
37 WARN_ON(num_online_cpus() != 1);
38 local_fiq_disable();
39}
40
41void notrace restore_processor_state(void)
42{
43 local_fiq_enable();
44}
45
46/*
47 * Snapshot kernel memory and reset the system.
48 *
49 * swsusp_save() is executed in the suspend finisher so that the CPU
50 * context pointer and memory are part of the saved image, which is
51 * required by the resume kernel image to restart execution from
52 * swsusp_arch_suspend().
53 *
54 * soft_restart is not technically needed, but is used to get success
55 * returned from cpu_suspend.
56 *
57 * When soft reboot completes, the hibernation snapshot is written out.
58 */
59static int notrace arch_save_image(unsigned long unused)
60{
61 int ret;
62
63 ret = swsusp_save();
64 if (ret == 0)
65 soft_restart(virt_to_phys(cpu_resume));
66 return ret;
67}
68
69/*
70 * Save the current CPU state before suspend / poweroff.
71 */
72int notrace swsusp_arch_suspend(void)
73{
74 return cpu_suspend(0, arch_save_image);
75}
76
77/*
78 * Restore page contents for physical pages that were in use during loading
79 * hibernation image. Switch to idmap_pgd so the physical page tables
80 * are overwritten with the same contents.
81 */
82static void notrace arch_restore_image(void *unused)
83{
84 struct pbe *pbe;
85
86 cpu_switch_mm(idmap_pgd, &init_mm);
87 for (pbe = restore_pblist; pbe; pbe = pbe->next)
88 copy_page(pbe->orig_address, pbe->address);
89
90 soft_restart(virt_to_phys(cpu_resume));
91}
92
93static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
94
95/*
96 * Resume from the hibernation image.
97 * Due to the kernel heap / data restore, stack contents change underneath
98 * and that would make function calls impossible; switch to a temporary
99 * stack within the nosave region to avoid that problem.
100 */
101int swsusp_arch_resume(void)
102{
103 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
104 call_with_stack(arch_restore_image, 0,
105 resume_stack + ARRAY_SIZE(resume_stack));
106 return 0;
107}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 9723d17b8f38..2c4257604513 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -37,6 +37,7 @@
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/export.h> 38#include <linux/export.h>
39 39
40#include <asm/hardware/cache-l2x0.h>
40#include <asm/exception.h> 41#include <asm/exception.h>
41#include <asm/mach/arch.h> 42#include <asm/mach/arch.h>
42#include <asm/mach/irq.h> 43#include <asm/mach/irq.h>
@@ -115,10 +116,21 @@ EXPORT_SYMBOL_GPL(set_irq_flags);
115 116
116void __init init_IRQ(void) 117void __init init_IRQ(void)
117{ 118{
119 int ret;
120
118 if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq) 121 if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq)
119 irqchip_init(); 122 irqchip_init();
120 else 123 else
121 machine_desc->init_irq(); 124 machine_desc->init_irq();
125
126 if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_CACHE_L2X0) &&
127 (machine_desc->l2c_aux_mask || machine_desc->l2c_aux_val)) {
128 outer_cache.write_sec = machine_desc->l2c_write_sec;
129 ret = l2x0_of_init(machine_desc->l2c_aux_val,
130 machine_desc->l2c_aux_mask);
131 if (ret)
132 pr_err("L2C: failed to init: %d\n", ret);
133 }
122} 134}
123 135
124#ifdef CONFIG_MULTI_IRQ_HANDLER 136#ifdef CONFIG_MULTI_IRQ_HANDLER
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index 2452dd1bef53..a5599cfc43cb 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -18,6 +18,7 @@
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
19#include <asm/thread_info.h> 19#include <asm/thread_info.h>
20#include <asm/asm-offsets.h> 20#include <asm/asm-offsets.h>
21#include <asm/assembler.h>
21 22
22#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B) 23#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
23#define PJ4(code...) code 24#define PJ4(code...) code
@@ -65,17 +66,18 @@
65 * r9 = ret_from_exception 66 * r9 = ret_from_exception
66 * lr = undefined instr exit 67 * lr = undefined instr exit
67 * 68 *
68 * called from prefetch exception handler with interrupts disabled 69 * called from prefetch exception handler with interrupts enabled
69 */ 70 */
70 71
71ENTRY(iwmmxt_task_enable) 72ENTRY(iwmmxt_task_enable)
73 inc_preempt_count r10, r3
72 74
73 XSC(mrc p15, 0, r2, c15, c1, 0) 75 XSC(mrc p15, 0, r2, c15, c1, 0)
74 PJ4(mrc p15, 0, r2, c1, c0, 2) 76 PJ4(mrc p15, 0, r2, c1, c0, 2)
75 @ CP0 and CP1 accessible? 77 @ CP0 and CP1 accessible?
76 XSC(tst r2, #0x3) 78 XSC(tst r2, #0x3)
77 PJ4(tst r2, #0xf) 79 PJ4(tst r2, #0xf)
78 movne pc, lr @ if so no business here 80 bne 4f @ if so no business here
79 @ enable access to CP0 and CP1 81 @ enable access to CP0 and CP1
80 XSC(orr r2, r2, #0x3) 82 XSC(orr r2, r2, #0x3)
81 XSC(mcr p15, 0, r2, c15, c1, 0) 83 XSC(mcr p15, 0, r2, c15, c1, 0)
@@ -136,7 +138,7 @@ concan_dump:
136 wstrd wR15, [r1, #MMX_WR15] 138 wstrd wR15, [r1, #MMX_WR15]
137 139
1382: teq r0, #0 @ anything to load? 1402: teq r0, #0 @ anything to load?
139 moveq pc, lr 141 beq 3f
140 142
141concan_load: 143concan_load:
142 144
@@ -169,8 +171,14 @@ concan_load:
169 @ clear CUP/MUP (only if r1 != 0) 171 @ clear CUP/MUP (only if r1 != 0)
170 teq r1, #0 172 teq r1, #0
171 mov r2, #0 173 mov r2, #0
172 moveq pc, lr 174 beq 3f
173 tmcr wCon, r2 175 tmcr wCon, r2
176
1773:
178#ifdef CONFIG_PREEMPT_COUNT
179 get_thread_info r10
180#endif
1814: dec_preempt_count r10, r3
174 mov pc, lr 182 mov pc, lr
175 183
176/* 184/*
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 51798d7854ac..a71ae1523620 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -221,6 +221,7 @@ static struct notifier_block cpu_pmu_hotplug_notifier = {
221 * PMU platform driver and devicetree bindings. 221 * PMU platform driver and devicetree bindings.
222 */ 222 */
223static struct of_device_id cpu_pmu_of_device_ids[] = { 223static struct of_device_id cpu_pmu_of_device_ids[] = {
224 {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init},
224 {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, 225 {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
225 {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, 226 {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
226 {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, 227 {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index f4ef3981ed02..2037f7205987 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1599,6 +1599,13 @@ static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1599 return 0; 1599 return 0;
1600} 1600}
1601 1601
1602static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1603{
1604 armv7_a12_pmu_init(cpu_pmu);
1605 cpu_pmu->name = "ARMv7 Cortex-A17";
1606 return 0;
1607}
1608
1602/* 1609/*
1603 * Krait Performance Monitor Region Event Selection Register (PMRESRn) 1610 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1604 * 1611 *
@@ -2021,6 +2028,11 @@ static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
2021 return -ENODEV; 2028 return -ENODEV;
2022} 2029}
2023 2030
2031static inline int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
2032{
2033 return -ENODEV;
2034}
2035
2024static inline int krait_pmu_init(struct arm_pmu *cpu_pmu) 2036static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
2025{ 2037{
2026 return -ENODEV; 2038 return -ENODEV;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index f5120ca08671..8a16ee5d8a95 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -631,15 +631,8 @@ void __init dump_machine_table(void)
631 631
632int __init arm_add_memory(u64 start, u64 size) 632int __init arm_add_memory(u64 start, u64 size)
633{ 633{
634 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
635 u64 aligned_start; 634 u64 aligned_start;
636 635
637 if (meminfo.nr_banks >= NR_BANKS) {
638 pr_crit("NR_BANKS too low, ignoring memory at 0x%08llx\n",
639 (long long)start);
640 return -EINVAL;
641 }
642
643 /* 636 /*
644 * Ensure that start/size are aligned to a page boundary. 637 * Ensure that start/size are aligned to a page boundary.
645 * Size is appropriately rounded down, start is rounded up. 638 * Size is appropriately rounded down, start is rounded up.
@@ -680,17 +673,17 @@ int __init arm_add_memory(u64 start, u64 size)
680 aligned_start = PHYS_OFFSET; 673 aligned_start = PHYS_OFFSET;
681 } 674 }
682 675
683 bank->start = aligned_start; 676 start = aligned_start;
684 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1); 677 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
685 678
686 /* 679 /*
687 * Check whether this memory region has non-zero size or 680 * Check whether this memory region has non-zero size or
688 * invalid node number. 681 * invalid node number.
689 */ 682 */
690 if (bank->size == 0) 683 if (size == 0)
691 return -EINVAL; 684 return -EINVAL;
692 685
693 meminfo.nr_banks++; 686 memblock_add(start, size);
694 return 0; 687 return 0;
695} 688}
696 689
@@ -698,6 +691,7 @@ int __init arm_add_memory(u64 start, u64 size)
698 * Pick out the memory size. We look for mem=size@start, 691 * Pick out the memory size. We look for mem=size@start,
699 * where start and size are "size[KkMm]" 692 * where start and size are "size[KkMm]"
700 */ 693 */
694
701static int __init early_mem(char *p) 695static int __init early_mem(char *p)
702{ 696{
703 static int usermem __initdata = 0; 697 static int usermem __initdata = 0;
@@ -712,7 +706,8 @@ static int __init early_mem(char *p)
712 */ 706 */
713 if (usermem == 0) { 707 if (usermem == 0) {
714 usermem = 1; 708 usermem = 1;
715 meminfo.nr_banks = 0; 709 memblock_remove(memblock_start_of_DRAM(),
710 memblock_end_of_DRAM() - memblock_start_of_DRAM());
716 } 711 }
717 712
718 start = PHYS_OFFSET; 713 start = PHYS_OFFSET;
@@ -857,13 +852,6 @@ static void __init reserve_crashkernel(void)
857static inline void reserve_crashkernel(void) {} 852static inline void reserve_crashkernel(void) {}
858#endif /* CONFIG_KEXEC */ 853#endif /* CONFIG_KEXEC */
859 854
860static int __init meminfo_cmp(const void *_a, const void *_b)
861{
862 const struct membank *a = _a, *b = _b;
863 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
864 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
865}
866
867void __init hyp_mode_check(void) 855void __init hyp_mode_check(void)
868{ 856{
869#ifdef CONFIG_ARM_VIRT_EXT 857#ifdef CONFIG_ARM_VIRT_EXT
@@ -906,12 +894,10 @@ void __init setup_arch(char **cmdline_p)
906 894
907 parse_early_param(); 895 parse_early_param();
908 896
909 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
910
911 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); 897 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
912 setup_dma_zone(mdesc); 898 setup_dma_zone(mdesc);
913 sanity_check_meminfo(); 899 sanity_check_meminfo();
914 arm_memblock_init(&meminfo, mdesc); 900 arm_memblock_init(mdesc);
915 901
916 paging_init(mdesc); 902 paging_init(mdesc);
917 request_standard_resources(mdesc); 903 request_standard_resources(mdesc);
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index b907d9b790ab..1b880db2a033 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -127,6 +127,10 @@ ENDPROC(cpu_resume_after_mmu)
127 .align 127 .align
128ENTRY(cpu_resume) 128ENTRY(cpu_resume)
129ARM_BE8(setend be) @ ensure we are in BE mode 129ARM_BE8(setend be) @ ensure we are in BE mode
130#ifdef CONFIG_ARM_VIRT_EXT
131 bl __hyp_stub_install_secondary
132#endif
133 safe_svcmode_maskall r1
130 mov r1, #0 134 mov r1, #0
131 ALT_SMP(mrc p15, 0, r0, c0, c0, 5) 135 ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
132 ALT_UP_B(1f) 136 ALT_UP_B(1f)
@@ -144,7 +148,6 @@ ARM_BE8(setend be) @ ensure we are in BE mode
144 ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] 148 ldr r0, [r0, #SLEEP_SAVE_SP_PHYS]
145 ldr r0, [r0, r1, lsl #2] 149 ldr r0, [r0, r1, lsl #2]
146 150
147 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
148 @ load phys pgd, stack, resume fn 151 @ load phys pgd, stack, resume fn
149 ARM( ldmia r0!, {r1, sp, pc} ) 152 ARM( ldmia r0!, {r1, sp, pc} )
150THUMB( ldmia r0!, {r1, r2, r3} ) 153THUMB( ldmia r0!, {r1, r2, r3} )
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index af4e8c8a5422..f065eb05d254 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -3,6 +3,7 @@
3#include <linux/stacktrace.h> 3#include <linux/stacktrace.h>
4 4
5#include <asm/stacktrace.h> 5#include <asm/stacktrace.h>
6#include <asm/traps.h>
6 7
7#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) 8#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
8/* 9/*
@@ -61,6 +62,7 @@ EXPORT_SYMBOL(walk_stackframe);
61#ifdef CONFIG_STACKTRACE 62#ifdef CONFIG_STACKTRACE
62struct stack_trace_data { 63struct stack_trace_data {
63 struct stack_trace *trace; 64 struct stack_trace *trace;
65 unsigned long last_pc;
64 unsigned int no_sched_functions; 66 unsigned int no_sched_functions;
65 unsigned int skip; 67 unsigned int skip;
66}; 68};
@@ -69,6 +71,7 @@ static int save_trace(struct stackframe *frame, void *d)
69{ 71{
70 struct stack_trace_data *data = d; 72 struct stack_trace_data *data = d;
71 struct stack_trace *trace = data->trace; 73 struct stack_trace *trace = data->trace;
74 struct pt_regs *regs;
72 unsigned long addr = frame->pc; 75 unsigned long addr = frame->pc;
73 76
74 if (data->no_sched_functions && in_sched_functions(addr)) 77 if (data->no_sched_functions && in_sched_functions(addr))
@@ -80,16 +83,39 @@ static int save_trace(struct stackframe *frame, void *d)
80 83
81 trace->entries[trace->nr_entries++] = addr; 84 trace->entries[trace->nr_entries++] = addr;
82 85
86 if (trace->nr_entries >= trace->max_entries)
87 return 1;
88
89 /*
90 * in_exception_text() is designed to test if the PC is one of
91 * the functions which has an exception stack above it, but
92 * unfortunately what is in frame->pc is the return LR value,
93 * not the saved PC value. So, we need to track the previous
94 * frame PC value when doing this.
95 */
96 addr = data->last_pc;
97 data->last_pc = frame->pc;
98 if (!in_exception_text(addr))
99 return 0;
100
101 regs = (struct pt_regs *)frame->sp;
102
103 trace->entries[trace->nr_entries++] = regs->ARM_pc;
104
83 return trace->nr_entries >= trace->max_entries; 105 return trace->nr_entries >= trace->max_entries;
84} 106}
85 107
86void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 108/* This must be noinline to so that our skip calculation works correctly */
109static noinline void __save_stack_trace(struct task_struct *tsk,
110 struct stack_trace *trace, unsigned int nosched)
87{ 111{
88 struct stack_trace_data data; 112 struct stack_trace_data data;
89 struct stackframe frame; 113 struct stackframe frame;
90 114
91 data.trace = trace; 115 data.trace = trace;
116 data.last_pc = ULONG_MAX;
92 data.skip = trace->skip; 117 data.skip = trace->skip;
118 data.no_sched_functions = nosched;
93 119
94 if (tsk != current) { 120 if (tsk != current) {
95#ifdef CONFIG_SMP 121#ifdef CONFIG_SMP
@@ -102,7 +128,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
102 trace->entries[trace->nr_entries++] = ULONG_MAX; 128 trace->entries[trace->nr_entries++] = ULONG_MAX;
103 return; 129 return;
104#else 130#else
105 data.no_sched_functions = 1;
106 frame.fp = thread_saved_fp(tsk); 131 frame.fp = thread_saved_fp(tsk);
107 frame.sp = thread_saved_sp(tsk); 132 frame.sp = thread_saved_sp(tsk);
108 frame.lr = 0; /* recovered from the stack */ 133 frame.lr = 0; /* recovered from the stack */
@@ -111,11 +136,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
111 } else { 136 } else {
112 register unsigned long current_sp asm ("sp"); 137 register unsigned long current_sp asm ("sp");
113 138
114 data.no_sched_functions = 0; 139 /* We don't want this function nor the caller */
140 data.skip += 2;
115 frame.fp = (unsigned long)__builtin_frame_address(0); 141 frame.fp = (unsigned long)__builtin_frame_address(0);
116 frame.sp = current_sp; 142 frame.sp = current_sp;
117 frame.lr = (unsigned long)__builtin_return_address(0); 143 frame.lr = (unsigned long)__builtin_return_address(0);
118 frame.pc = (unsigned long)save_stack_trace_tsk; 144 frame.pc = (unsigned long)__save_stack_trace;
119 } 145 }
120 146
121 walk_stackframe(&frame, save_trace, &data); 147 walk_stackframe(&frame, save_trace, &data);
@@ -123,9 +149,33 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
123 trace->entries[trace->nr_entries++] = ULONG_MAX; 149 trace->entries[trace->nr_entries++] = ULONG_MAX;
124} 150}
125 151
152void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
153{
154 struct stack_trace_data data;
155 struct stackframe frame;
156
157 data.trace = trace;
158 data.skip = trace->skip;
159 data.no_sched_functions = 0;
160
161 frame.fp = regs->ARM_fp;
162 frame.sp = regs->ARM_sp;
163 frame.lr = regs->ARM_lr;
164 frame.pc = regs->ARM_pc;
165
166 walk_stackframe(&frame, save_trace, &data);
167 if (trace->nr_entries < trace->max_entries)
168 trace->entries[trace->nr_entries++] = ULONG_MAX;
169}
170
171void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
172{
173 __save_stack_trace(tsk, trace, 1);
174}
175
126void save_stack_trace(struct stack_trace *trace) 176void save_stack_trace(struct stack_trace *trace)
127{ 177{
128 save_stack_trace_tsk(current, trace); 178 __save_stack_trace(current, trace, 0);
129} 179}
130EXPORT_SYMBOL_GPL(save_stack_trace); 180EXPORT_SYMBOL_GPL(save_stack_trace);
131#endif 181#endif
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 0bc94b1fd1ae..0fa8825cea04 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -91,13 +91,13 @@ static void __init parse_dt_topology(void)
91{ 91{
92 const struct cpu_efficiency *cpu_eff; 92 const struct cpu_efficiency *cpu_eff;
93 struct device_node *cn = NULL; 93 struct device_node *cn = NULL;
94 unsigned long min_capacity = (unsigned long)(-1); 94 unsigned long min_capacity = ULONG_MAX;
95 unsigned long max_capacity = 0; 95 unsigned long max_capacity = 0;
96 unsigned long capacity = 0; 96 unsigned long capacity = 0;
97 int alloc_size, cpu = 0; 97 int cpu = 0;
98 98
99 alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity); 99 __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
100 __cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT); 100 GFP_NOWAIT);
101 101
102 for_each_possible_cpu(cpu) { 102 for_each_possible_cpu(cpu) {
103 const u32 *rate; 103 const u32 *rate;
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 3c217694ebec..cb791ac6a003 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -285,7 +285,7 @@ static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
285 if (unwind_pop_register(ctrl, &vsp, reg)) 285 if (unwind_pop_register(ctrl, &vsp, reg))
286 return -URC_FAILURE; 286 return -URC_FAILURE;
287 287
288 if (insn & 0x80) 288 if (insn & 0x8)
289 if (unwind_pop_register(ctrl, &vsp, 14)) 289 if (unwind_pop_register(ctrl, &vsp, 14))
290 return -URC_FAILURE; 290 return -URC_FAILURE;
291 291
diff --git a/arch/arm/kernel/uprobes.c b/arch/arm/kernel/uprobes.c
index f9bacee973bf..56adf9c1fde0 100644
--- a/arch/arm/kernel/uprobes.c
+++ b/arch/arm/kernel/uprobes.c
@@ -113,6 +113,26 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
113 return 0; 113 return 0;
114} 114}
115 115
116void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
117 void *src, unsigned long len)
118{
119 void *xol_page_kaddr = kmap_atomic(page);
120 void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
121
122 preempt_disable();
123
124 /* Initialize the slot */
125 memcpy(dst, src, len);
126
127 /* flush caches (dcache/icache) */
128 flush_uprobe_xol_access(page, vaddr, dst, len);
129
130 preempt_enable();
131
132 kunmap_atomic(xol_page_kaddr);
133}
134
135
116int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 136int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
117{ 137{
118 struct uprobe_task *utask = current->utask; 138 struct uprobe_task *utask = current->utask;
diff --git a/arch/arm/mach-bcm/bcm_5301x.c b/arch/arm/mach-bcm/bcm_5301x.c
index edff69761e04..e9bcbdbce555 100644
--- a/arch/arm/mach-bcm/bcm_5301x.c
+++ b/arch/arm/mach-bcm/bcm_5301x.c
@@ -43,19 +43,14 @@ static void __init bcm5301x_init_early(void)
43 "imprecise external abort"); 43 "imprecise external abort");
44} 44}
45 45
46static void __init bcm5301x_dt_init(void)
47{
48 l2x0_of_init(0, ~0UL);
49 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
50}
51
52static const char __initconst *bcm5301x_dt_compat[] = { 46static const char __initconst *bcm5301x_dt_compat[] = {
53 "brcm,bcm4708", 47 "brcm,bcm4708",
54 NULL, 48 NULL,
55}; 49};
56 50
57DT_MACHINE_START(BCM5301X, "BCM5301X") 51DT_MACHINE_START(BCM5301X, "BCM5301X")
52 .l2c_aux_val = 0,
53 .l2c_aux_mask = ~0,
58 .init_early = bcm5301x_init_early, 54 .init_early = bcm5301x_init_early,
59 .init_machine = bcm5301x_dt_init,
60 .dt_compat = bcm5301x_dt_compat, 55 .dt_compat = bcm5301x_dt_compat,
61MACHINE_END 56MACHINE_END
diff --git a/arch/arm/mach-berlin/berlin.c b/arch/arm/mach-berlin/berlin.c
index 025bcb5473eb..ac181c6797ee 100644
--- a/arch/arm/mach-berlin/berlin.c
+++ b/arch/arm/mach-berlin/berlin.c
@@ -18,16 +18,6 @@
18#include <asm/hardware/cache-l2x0.h> 18#include <asm/hardware/cache-l2x0.h>
19#include <asm/mach/arch.h> 19#include <asm/mach/arch.h>
20 20
21static void __init berlin_init_machine(void)
22{
23 /*
24 * with DT probing for L2CCs, berlin_init_machine can be removed.
25 * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc
26 */
27 l2x0_of_init(0x70c00000, 0xfeffffff);
28 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
29}
30
31static const char * const berlin_dt_compat[] = { 21static const char * const berlin_dt_compat[] = {
32 "marvell,berlin", 22 "marvell,berlin",
33 NULL, 23 NULL,
@@ -35,5 +25,10 @@ static const char * const berlin_dt_compat[] = {
35 25
36DT_MACHINE_START(BERLIN_DT, "Marvell Berlin") 26DT_MACHINE_START(BERLIN_DT, "Marvell Berlin")
37 .dt_compat = berlin_dt_compat, 27 .dt_compat = berlin_dt_compat,
38 .init_machine = berlin_init_machine, 28 /*
29 * with DT probing for L2CCs, berlin_init_machine can be removed.
30 * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc
31 */
32 .l2c_aux_val = 0x30c00000,
33 .l2c_aux_mask = 0xfeffffff,
39MACHINE_END 34MACHINE_END
diff --git a/arch/arm/mach-clps711x/board-clep7312.c b/arch/arm/mach-clps711x/board-clep7312.c
index 221b9de32dd6..94a7add88a3f 100644
--- a/arch/arm/mach-clps711x/board-clep7312.c
+++ b/arch/arm/mach-clps711x/board-clep7312.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/memblock.h>
21 22
22#include <asm/setup.h> 23#include <asm/setup.h>
23#include <asm/mach-types.h> 24#include <asm/mach-types.h>
@@ -26,11 +27,9 @@
26#include "common.h" 27#include "common.h"
27 28
28static void __init 29static void __init
29fixup_clep7312(struct tag *tags, char **cmdline, struct meminfo *mi) 30fixup_clep7312(struct tag *tags, char **cmdline)
30{ 31{
31 mi->nr_banks=1; 32 memblock_add(0xc0000000, 0x01000000);
32 mi->bank[0].start = 0xc0000000;
33 mi->bank[0].size = 0x01000000;
34} 33}
35 34
36MACHINE_START(CLEP7212, "Cirrus Logic 7212/7312") 35MACHINE_START(CLEP7212, "Cirrus Logic 7212/7312")
diff --git a/arch/arm/mach-clps711x/board-edb7211.c b/arch/arm/mach-clps711x/board-edb7211.c
index 077609841f14..f9828f89972a 100644
--- a/arch/arm/mach-clps711x/board-edb7211.c
+++ b/arch/arm/mach-clps711x/board-edb7211.c
@@ -16,6 +16,7 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/backlight.h> 17#include <linux/backlight.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/memblock.h>
19 20
20#include <linux/mtd/physmap.h> 21#include <linux/mtd/physmap.h>
21#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
@@ -133,7 +134,7 @@ static void __init edb7211_reserve(void)
133} 134}
134 135
135static void __init 136static void __init
136fixup_edb7211(struct tag *tags, char **cmdline, struct meminfo *mi) 137fixup_edb7211(struct tag *tags, char **cmdline)
137{ 138{
138 /* 139 /*
139 * Bank start addresses are not present in the information 140 * Bank start addresses are not present in the information
@@ -143,11 +144,8 @@ fixup_edb7211(struct tag *tags, char **cmdline, struct meminfo *mi)
143 * Banks sizes _are_ present in the param block, but we're 144 * Banks sizes _are_ present in the param block, but we're
144 * not using that information yet. 145 * not using that information yet.
145 */ 146 */
146 mi->bank[0].start = 0xc0000000; 147 memblock_add(0xc0000000, SZ_8M);
147 mi->bank[0].size = SZ_8M; 148 memblock_add(0xc1000000, SZ_8M);
148 mi->bank[1].start = 0xc1000000;
149 mi->bank[1].size = SZ_8M;
150 mi->nr_banks = 2;
151} 149}
152 150
153static void __init edb7211_init(void) 151static void __init edb7211_init(void)
diff --git a/arch/arm/mach-clps711x/board-p720t.c b/arch/arm/mach-clps711x/board-p720t.c
index 67b733744ed7..0cf0e51e6546 100644
--- a/arch/arm/mach-clps711x/board-p720t.c
+++ b/arch/arm/mach-clps711x/board-p720t.c
@@ -295,7 +295,7 @@ static struct generic_bl_info p720t_lcd_backlight_pdata = {
295}; 295};
296 296
297static void __init 297static void __init
298fixup_p720t(struct tag *tag, char **cmdline, struct meminfo *mi) 298fixup_p720t(struct tag *tag, char **cmdline)
299{ 299{
300 /* 300 /*
301 * Our bootloader doesn't setup any tags (yet). 301 * Our bootloader doesn't setup any tags (yet).
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index 2ae28a69e3e5..f85449a6accd 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -272,9 +272,9 @@ void __init cns3xxx_l2x0_init(void)
272 * 272 *
273 * 1 cycle of latency for setup, read and write accesses 273 * 1 cycle of latency for setup, read and write accesses
274 */ 274 */
275 val = readl(base + L2X0_TAG_LATENCY_CTRL); 275 val = readl(base + L310_TAG_LATENCY_CTRL);
276 val &= 0xfffff888; 276 val &= 0xfffff888;
277 writel(val, base + L2X0_TAG_LATENCY_CTRL); 277 writel(val, base + L310_TAG_LATENCY_CTRL);
278 278
279 /* 279 /*
280 * Data RAM Control register 280 * Data RAM Control register
@@ -285,12 +285,12 @@ void __init cns3xxx_l2x0_init(void)
285 * 285 *
286 * 1 cycle of latency for setup, read and write accesses 286 * 1 cycle of latency for setup, read and write accesses
287 */ 287 */
288 val = readl(base + L2X0_DATA_LATENCY_CTRL); 288 val = readl(base + L310_DATA_LATENCY_CTRL);
289 val &= 0xfffff888; 289 val &= 0xfffff888;
290 writel(val, base + L2X0_DATA_LATENCY_CTRL); 290 writel(val, base + L310_DATA_LATENCY_CTRL);
291 291
292 /* 32 KiB, 8-way, parity disable */ 292 /* 32 KiB, 8-way, parity disable */
293 l2x0_init(base, 0x00540000, 0xfe000fff); 293 l2x0_init(base, 0x00500000, 0xfe0f0fff);
294} 294}
295 295
296#endif /* CONFIG_CACHE_L2X0 */ 296#endif /* CONFIG_CACHE_L2X0 */
diff --git a/arch/arm/mach-ep93xx/crunch-bits.S b/arch/arm/mach-ep93xx/crunch-bits.S
index 0ec9bb48fab9..e96923a3017b 100644
--- a/arch/arm/mach-ep93xx/crunch-bits.S
+++ b/arch/arm/mach-ep93xx/crunch-bits.S
@@ -16,6 +16,7 @@
16#include <asm/ptrace.h> 16#include <asm/ptrace.h>
17#include <asm/thread_info.h> 17#include <asm/thread_info.h>
18#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
19#include <asm/assembler.h>
19#include <mach/ep93xx-regs.h> 20#include <mach/ep93xx-regs.h>
20 21
21/* 22/*
@@ -62,14 +63,16 @@
62 * r9 = ret_from_exception 63 * r9 = ret_from_exception
63 * lr = undefined instr exit 64 * lr = undefined instr exit
64 * 65 *
65 * called from prefetch exception handler with interrupts disabled 66 * called from prefetch exception handler with interrupts enabled
66 */ 67 */
67ENTRY(crunch_task_enable) 68ENTRY(crunch_task_enable)
69 inc_preempt_count r10, r3
70
68 ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr 71 ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
69 72
70 ldr r1, [r8, #0x80] 73 ldr r1, [r8, #0x80]
71 tst r1, #0x00800000 @ access to crunch enabled? 74 tst r1, #0x00800000 @ access to crunch enabled?
72 movne pc, lr @ if so no business here 75 bne 2f @ if so no business here
73 mov r3, #0xaa @ unlock syscon swlock 76 mov r3, #0xaa @ unlock syscon swlock
74 str r3, [r8, #0xc0] 77 str r3, [r8, #0xc0]
75 orr r1, r1, #0x00800000 @ enable access to crunch 78 orr r1, r1, #0x00800000 @ enable access to crunch
@@ -142,7 +145,7 @@ crunch_save:
142 145
143 teq r0, #0 @ anything to load? 146 teq r0, #0 @ anything to load?
144 cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered 147 cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered
145 moveq pc, lr 148 beq 1f
146 149
147crunch_load: 150crunch_load:
148 cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word 151 cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word
@@ -190,6 +193,11 @@ crunch_load:
190 cfldr64 mvdx14, [r0, #CRUNCH_MVDX14] 193 cfldr64 mvdx14, [r0, #CRUNCH_MVDX14]
191 cfldr64 mvdx15, [r0, #CRUNCH_MVDX15] 194 cfldr64 mvdx15, [r0, #CRUNCH_MVDX15]
192 195
1961:
197#ifdef CONFIG_PREEMPT_COUNT
198 get_thread_info r10
199#endif
2002: dec_preempt_count r10, r3
193 mov pc, lr 201 mov pc, lr
194 202
195/* 203/*
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 9ef3f83efaff..88c619d1e145 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -55,7 +55,6 @@ enum sys_powerdown {
55 NUM_SYS_POWERDOWN, 55 NUM_SYS_POWERDOWN,
56}; 56};
57 57
58extern unsigned long l2x0_regs_phys;
59struct exynos_pmu_conf { 58struct exynos_pmu_conf {
60 void __iomem *reg; 59 void __iomem *reg;
61 unsigned int val[NUM_SYS_POWERDOWN]; 60 unsigned int val[NUM_SYS_POWERDOWN];
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index b32a907d021d..a763c0862da9 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -32,9 +32,6 @@
32#include "mfc.h" 32#include "mfc.h"
33#include "regs-pmu.h" 33#include "regs-pmu.h"
34 34
35#define L2_AUX_VAL 0x7C470001
36#define L2_AUX_MASK 0xC200ffff
37
38static struct map_desc exynos4_iodesc[] __initdata = { 35static struct map_desc exynos4_iodesc[] __initdata = {
39 { 36 {
40 .virtual = (unsigned long)S3C_VA_SYS, 37 .virtual = (unsigned long)S3C_VA_SYS,
@@ -319,22 +316,6 @@ static int __init exynos_core_init(void)
319} 316}
320core_initcall(exynos_core_init); 317core_initcall(exynos_core_init);
321 318
322static int __init exynos4_l2x0_cache_init(void)
323{
324 int ret;
325
326 ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
327 if (ret)
328 return ret;
329
330 if (IS_ENABLED(CONFIG_S5P_SLEEP)) {
331 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
332 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
333 }
334 return 0;
335}
336early_initcall(exynos4_l2x0_cache_init);
337
338static void __init exynos_dt_machine_init(void) 319static void __init exynos_dt_machine_init(void)
339{ 320{
340 struct device_node *i2c_np; 321 struct device_node *i2c_np;
@@ -400,6 +381,8 @@ static void __init exynos_reserve(void)
400DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)") 381DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
401 /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */ 382 /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
402 /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ 383 /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
384 .l2c_aux_val = 0x3c400001,
385 .l2c_aux_mask = 0xc20fffff,
403 .smp = smp_ops(exynos_smp_ops), 386 .smp = smp_ops(exynos_smp_ops),
404 .map_io = exynos_init_io, 387 .map_io = exynos_init_io,
405 .init_early = exynos_firmware_init, 388 .init_early = exynos_firmware_init,
diff --git a/arch/arm/mach-exynos/sleep.S b/arch/arm/mach-exynos/sleep.S
index a2613e944e10..108a45f4bb62 100644
--- a/arch/arm/mach-exynos/sleep.S
+++ b/arch/arm/mach-exynos/sleep.S
@@ -16,8 +16,6 @@
16 */ 16 */
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <asm/asm-offsets.h>
20#include <asm/hardware/cache-l2x0.h>
21 19
22#define CPU_MASK 0xff0ffff0 20#define CPU_MASK 0xff0ffff0
23#define CPU_CORTEX_A9 0x410fc090 21#define CPU_CORTEX_A9 0x410fc090
@@ -53,33 +51,7 @@ ENTRY(exynos_cpu_resume)
53 and r0, r0, r1 51 and r0, r0, r1
54 ldr r1, =CPU_CORTEX_A9 52 ldr r1, =CPU_CORTEX_A9
55 cmp r0, r1 53 cmp r0, r1
56 bne skip_l2_resume 54 bleq l2c310_early_resume
57 adr r0, l2x0_regs_phys
58 ldr r0, [r0]
59 cmp r0, #0
60 beq skip_l2_resume
61 ldr r1, [r0, #L2X0_R_PHY_BASE]
62 ldr r2, [r1, #L2X0_CTRL]
63 tst r2, #0x1
64 bne skip_l2_resume
65 ldr r2, [r0, #L2X0_R_AUX_CTRL]
66 str r2, [r1, #L2X0_AUX_CTRL]
67 ldr r2, [r0, #L2X0_R_TAG_LATENCY]
68 str r2, [r1, #L2X0_TAG_LATENCY_CTRL]
69 ldr r2, [r0, #L2X0_R_DATA_LATENCY]
70 str r2, [r1, #L2X0_DATA_LATENCY_CTRL]
71 ldr r2, [r0, #L2X0_R_PREFETCH_CTRL]
72 str r2, [r1, #L2X0_PREFETCH_CTRL]
73 ldr r2, [r0, #L2X0_R_PWR_CTRL]
74 str r2, [r1, #L2X0_POWER_CTRL]
75 mov r2, #1
76 str r2, [r1, #L2X0_CTRL]
77skip_l2_resume:
78#endif 55#endif
79 b cpu_resume 56 b cpu_resume
80ENDPROC(exynos_cpu_resume) 57ENDPROC(exynos_cpu_resume)
81#ifdef CONFIG_CACHE_L2X0
82 .globl l2x0_regs_phys
83l2x0_regs_phys:
84 .long 0
85#endif
diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c
index da0415094856..8f05489671b7 100644
--- a/arch/arm/mach-footbridge/cats-hw.c
+++ b/arch/arm/mach-footbridge/cats-hw.c
@@ -76,7 +76,7 @@ __initcall(cats_hw_init);
76 * hard reboots fail on early boards. 76 * hard reboots fail on early boards.
77 */ 77 */
78static void __init 78static void __init
79fixup_cats(struct tag *tags, char **cmdline, struct meminfo *mi) 79fixup_cats(struct tag *tags, char **cmdline)
80{ 80{
81#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 81#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
82 screen_info.orig_video_lines = 25; 82 screen_info.orig_video_lines = 25;
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c
index eb1fa5c84723..cdee08c6d239 100644
--- a/arch/arm/mach-footbridge/netwinder-hw.c
+++ b/arch/arm/mach-footbridge/netwinder-hw.c
@@ -620,7 +620,7 @@ __initcall(nw_hw_init);
620 * the parameter page. 620 * the parameter page.
621 */ 621 */
622static void __init 622static void __init
623fixup_netwinder(struct tag *tags, char **cmdline, struct meminfo *mi) 623fixup_netwinder(struct tag *tags, char **cmdline)
624{ 624{
625#ifdef CONFIG_ISAPNP 625#ifdef CONFIG_ISAPNP
626 extern int isapnp_disable; 626 extern int isapnp_disable;
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index c7de89b263dd..8c35ae4ff176 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -51,11 +51,13 @@ static void __init highbank_scu_map_io(void)
51} 51}
52 52
53 53
54static void highbank_l2x0_disable(void) 54static void highbank_l2c310_write_sec(unsigned long val, unsigned reg)
55{ 55{
56 outer_flush_all(); 56 if (reg == L2X0_CTRL)
57 /* Disable PL310 L2 Cache controller */ 57 highbank_smc1(0x102, val);
58 highbank_smc1(0x102, 0x0); 58 else
59 WARN_ONCE(1, "Highbank L2C310: ignoring write to reg 0x%x\n",
60 reg);
59} 61}
60 62
61static void __init highbank_init_irq(void) 63static void __init highbank_init_irq(void)
@@ -64,14 +66,6 @@ static void __init highbank_init_irq(void)
64 66
65 if (of_find_compatible_node(NULL, NULL, "arm,cortex-a9")) 67 if (of_find_compatible_node(NULL, NULL, "arm,cortex-a9"))
66 highbank_scu_map_io(); 68 highbank_scu_map_io();
67
68 /* Enable PL310 L2 Cache controller */
69 if (IS_ENABLED(CONFIG_CACHE_L2X0) &&
70 of_find_compatible_node(NULL, NULL, "arm,pl310-cache")) {
71 highbank_smc1(0x102, 0x1);
72 l2x0_of_init(0, ~0UL);
73 outer_cache.disable = highbank_l2x0_disable;
74 }
75} 69}
76 70
77static void highbank_power_off(void) 71static void highbank_power_off(void)
@@ -185,6 +179,9 @@ DT_MACHINE_START(HIGHBANK, "Highbank")
185#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) 179#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
186 .dma_zone_size = (4ULL * SZ_1G), 180 .dma_zone_size = (4ULL * SZ_1G),
187#endif 181#endif
182 .l2c_aux_val = 0,
183 .l2c_aux_mask = ~0,
184 .l2c_write_sec = highbank_l2c310_write_sec,
188 .init_irq = highbank_init_irq, 185 .init_irq = highbank_init_irq,
189 .init_machine = highbank_init, 186 .init_machine = highbank_init,
190 .dt_compat = highbank_match, 187 .dt_compat = highbank_match,
diff --git a/arch/arm/mach-imx/mach-vf610.c b/arch/arm/mach-imx/mach-vf610.c
index 2d8aef5a6efa..c44602758120 100644
--- a/arch/arm/mach-imx/mach-vf610.c
+++ b/arch/arm/mach-imx/mach-vf610.c
@@ -20,19 +20,14 @@ static void __init vf610_init_machine(void)
20 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 20 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
21} 21}
22 22
23static void __init vf610_init_irq(void)
24{
25 l2x0_of_init(0, ~0UL);
26 irqchip_init();
27}
28
29static const char *vf610_dt_compat[] __initconst = { 23static const char *vf610_dt_compat[] __initconst = {
30 "fsl,vf610", 24 "fsl,vf610",
31 NULL, 25 NULL,
32}; 26};
33 27
34DT_MACHINE_START(VYBRID_VF610, "Freescale Vybrid VF610 (Device Tree)") 28DT_MACHINE_START(VYBRID_VF610, "Freescale Vybrid VF610 (Device Tree)")
35 .init_irq = vf610_init_irq, 29 .l2c_aux_val = 0,
30 .l2c_aux_mask = ~0,
36 .init_machine = vf610_init_machine, 31 .init_machine = vf610_init_machine,
37 .dt_compat = vf610_dt_compat, 32 .dt_compat = vf610_dt_compat,
38 .restart = mxc_restart, 33 .restart = mxc_restart,
diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
index 20048ff05739..fe123b079c05 100644
--- a/arch/arm/mach-imx/suspend-imx6.S
+++ b/arch/arm/mach-imx/suspend-imx6.S
@@ -334,28 +334,10 @@ ENDPROC(imx6_suspend)
334 * turned into relative ones. 334 * turned into relative ones.
335 */ 335 */
336 336
337#ifdef CONFIG_CACHE_L2X0
338 .macro pl310_resume
339 adr r0, l2x0_saved_regs_offset
340 ldr r2, [r0]
341 add r2, r2, r0
342 ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0
343 ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value
344 str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl
345 mov r1, #0x1
346 str r1, [r0, #L2X0_CTRL] @ re-enable L2
347 .endm
348
349l2x0_saved_regs_offset:
350 .word l2x0_saved_regs - .
351
352#else
353 .macro pl310_resume
354 .endm
355#endif
356
357ENTRY(v7_cpu_resume) 337ENTRY(v7_cpu_resume)
358 bl v7_invalidate_l1 338 bl v7_invalidate_l1
359 pl310_resume 339#ifdef CONFIG_CACHE_L2X0
340 bl l2c310_early_resume
341#endif
360 b cpu_resume 342 b cpu_resume
361ENDPROC(v7_cpu_resume) 343ENDPROC(v7_cpu_resume)
diff --git a/arch/arm/mach-imx/system.c b/arch/arm/mach-imx/system.c
index 5e3027d3692f..3b0733edb68c 100644
--- a/arch/arm/mach-imx/system.c
+++ b/arch/arm/mach-imx/system.c
@@ -124,7 +124,7 @@ void __init imx_init_l2cache(void)
124 } 124 }
125 125
126 /* Configure the L2 PREFETCH and POWER registers */ 126 /* Configure the L2 PREFETCH and POWER registers */
127 val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); 127 val = readl_relaxed(l2x0_base + L310_PREFETCH_CTRL);
128 val |= 0x70800000; 128 val |= 0x70800000;
129 /* 129 /*
130 * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0 130 * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0
@@ -137,14 +137,12 @@ void __init imx_init_l2cache(void)
137 */ 137 */
138 if (cpu_is_imx6q()) 138 if (cpu_is_imx6q())
139 val &= ~(1 << 30 | 1 << 23); 139 val &= ~(1 << 30 | 1 << 23);
140 writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL); 140 writel_relaxed(val, l2x0_base + L310_PREFETCH_CTRL);
141 val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN;
142 writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL);
143 141
144 iounmap(l2x0_base); 142 iounmap(l2x0_base);
145 of_node_put(np); 143 of_node_put(np);
146 144
147out: 145out:
148 l2x0_of_init(0, ~0UL); 146 l2x0_of_init(0, ~0);
149} 147}
150#endif 148#endif
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index a77529887cbc..61bfe584a9d7 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -83,11 +83,6 @@ static void __init halibut_init(void)
83 platform_add_devices(devices, ARRAY_SIZE(devices)); 83 platform_add_devices(devices, ARRAY_SIZE(devices));
84} 84}
85 85
86static void __init halibut_fixup(struct tag *tags, char **cmdline,
87 struct meminfo *mi)
88{
89}
90
91static void __init halibut_map_io(void) 86static void __init halibut_map_io(void)
92{ 87{
93 msm_map_common_io(); 88 msm_map_common_io();
@@ -100,7 +95,6 @@ static void __init halibut_init_late(void)
100 95
101MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)") 96MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
102 .atag_offset = 0x100, 97 .atag_offset = 0x100,
103 .fixup = halibut_fixup,
104 .map_io = halibut_map_io, 98 .map_io = halibut_map_io,
105 .init_early = halibut_init_early, 99 .init_early = halibut_init_early,
106 .init_irq = halibut_init_irq, 100 .init_irq = halibut_init_irq,
diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c
index 7d9981cb400e..873c3ca3cd7e 100644
--- a/arch/arm/mach-msm/board-mahimahi.c
+++ b/arch/arm/mach-msm/board-mahimahi.c
@@ -22,6 +22,7 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/memblock.h>
25 26
26#include <asm/mach-types.h> 27#include <asm/mach-types.h>
27#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
@@ -52,16 +53,10 @@ static void __init mahimahi_init(void)
52 platform_add_devices(devices, ARRAY_SIZE(devices)); 53 platform_add_devices(devices, ARRAY_SIZE(devices));
53} 54}
54 55
55static void __init mahimahi_fixup(struct tag *tags, char **cmdline, 56static void __init mahimahi_fixup(struct tag *tags, char **cmdline)
56 struct meminfo *mi)
57{ 57{
58 mi->nr_banks = 2; 58 memblock_add(PHYS_OFFSET, 219*SZ_1M);
59 mi->bank[0].start = PHYS_OFFSET; 59 memblock_add(MSM_HIGHMEM_BASE, MSM_HIGHMEM_SIZE);
60 mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
61 mi->bank[0].size = (219*1024*1024);
62 mi->bank[1].start = MSM_HIGHMEM_BASE;
63 mi->bank[1].node = PHYS_TO_NID(MSM_HIGHMEM_BASE);
64 mi->bank[1].size = MSM_HIGHMEM_SIZE;
65} 60}
66 61
67static void __init mahimahi_map_io(void) 62static void __init mahimahi_map_io(void)
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 46de789ad3ae..b621b23a5ecc 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -40,8 +40,7 @@
40#include "proc_comm.h" 40#include "proc_comm.h"
41#include "common.h" 41#include "common.h"
42 42
43static void __init msm7x30_fixup(struct tag *tag, char **cmdline, 43static void __init msm7x30_fixup(struct tag *tag, char **cmdline)
44 struct meminfo *mi)
45{ 44{
46 for (; tag->hdr.size; tag = tag_next(tag)) 45 for (; tag->hdr.size; tag = tag_next(tag))
47 if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) { 46 if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) {
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
index 327605174d63..e50967926dcd 100644
--- a/arch/arm/mach-msm/board-sapphire.c
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -35,6 +35,7 @@
35 35
36#include <linux/mtd/nand.h> 36#include <linux/mtd/nand.h>
37#include <linux/mtd/partitions.h> 37#include <linux/mtd/partitions.h>
38#include <linux/memblock.h>
38 39
39#include "gpio_chip.h" 40#include "gpio_chip.h"
40#include "board-sapphire.h" 41#include "board-sapphire.h"
@@ -74,22 +75,18 @@ static struct map_desc sapphire_io_desc[] __initdata = {
74 } 75 }
75}; 76};
76 77
77static void __init sapphire_fixup(struct tag *tags, char **cmdline, 78static void __init sapphire_fixup(struct tag *tags, char **cmdline)
78 struct meminfo *mi)
79{ 79{
80 int smi_sz = parse_tag_smi((const struct tag *)tags); 80 int smi_sz = parse_tag_smi((const struct tag *)tags);
81 81
82 mi->nr_banks = 1;
83 mi->bank[0].start = PHYS_OFFSET;
84 mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
85 if (smi_sz == 32) { 82 if (smi_sz == 32) {
86 mi->bank[0].size = (84*1024*1024); 83 memblock_add(PHYS_OFFSET, 84*SZ_1M);
87 } else if (smi_sz == 64) { 84 } else if (smi_sz == 64) {
88 mi->bank[0].size = (101*1024*1024); 85 memblock_add(PHYS_OFFSET, 101*SZ_1M);
89 } else { 86 } else {
87 memblock_add(PHYS_OFFSET, 101*SZ_1M);
90 /* Give a default value when not get smi size */ 88 /* Give a default value when not get smi size */
91 smi_sz = 64; 89 smi_sz = 64;
92 mi->bank[0].size = (101*1024*1024);
93 } 90 }
94} 91}
95 92
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
index 015d544aa017..58826cfab6b0 100644
--- a/arch/arm/mach-msm/board-trout.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/clkdev.h> 21#include <linux/clkdev.h>
22#include <linux/memblock.h>
22 23
23#include <asm/system_info.h> 24#include <asm/system_info.h>
24#include <asm/mach-types.h> 25#include <asm/mach-types.h>
@@ -55,12 +56,9 @@ static void __init trout_init_irq(void)
55 msm_init_irq(); 56 msm_init_irq();
56} 57}
57 58
58static void __init trout_fixup(struct tag *tags, char **cmdline, 59static void __init trout_fixup(struct tag *tags, char **cmdline)
59 struct meminfo *mi)
60{ 60{
61 mi->nr_banks = 1; 61 memblock_add(PHYS_OFFSET, 101*SZ_1M);
62 mi->bank[0].start = PHYS_OFFSET;
63 mi->bank[0].size = (101*1024*1024);
64} 62}
65 63
66static void __init trout_init(void) 64static void __init trout_init(void)
diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c
index 333fca8fdc41..48169caa56ea 100644
--- a/arch/arm/mach-mvebu/board-v7.c
+++ b/arch/arm/mach-mvebu/board-v7.c
@@ -59,9 +59,6 @@ static void __init mvebu_timer_and_clk_init(void)
59 clocksource_of_init(); 59 clocksource_of_init();
60 coherency_init(); 60 coherency_init();
61 BUG_ON(mvebu_mbus_dt_init()); 61 BUG_ON(mvebu_mbus_dt_init());
62#ifdef CONFIG_CACHE_L2X0
63 l2x0_of_init(0, ~0UL);
64#endif
65 62
66 if (of_machine_is_compatible("marvell,armada375")) 63 if (of_machine_is_compatible("marvell,armada375"))
67 hook_fault_code(16 + 6, armada_375_external_abort_wa, SIGBUS, 0, 64 hook_fault_code(16 + 6, armada_375_external_abort_wa, SIGBUS, 0,
@@ -109,6 +106,8 @@ static const char * const armada_370_xp_dt_compat[] = {
109}; 106};
110 107
111DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)") 108DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)")
109 .l2c_aux_val = 0,
110 .l2c_aux_mask = ~0,
112 .smp = smp_ops(armada_xp_smp_ops), 111 .smp = smp_ops(armada_xp_smp_ops),
113 .init_machine = mvebu_dt_init, 112 .init_machine = mvebu_dt_init,
114 .init_time = mvebu_timer_and_clk_init, 113 .init_time = mvebu_timer_and_clk_init,
@@ -122,6 +121,8 @@ static const char * const armada_375_dt_compat[] = {
122}; 121};
123 122
124DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)") 123DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)")
124 .l2c_aux_val = 0,
125 .l2c_aux_mask = ~0,
125 .init_time = mvebu_timer_and_clk_init, 126 .init_time = mvebu_timer_and_clk_init,
126 .restart = mvebu_restart, 127 .restart = mvebu_restart,
127 .dt_compat = armada_375_dt_compat, 128 .dt_compat = armada_375_dt_compat,
@@ -134,6 +135,8 @@ static const char * const armada_38x_dt_compat[] = {
134}; 135};
135 136
136DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)") 137DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)")
138 .l2c_aux_val = 0,
139 .l2c_aux_mask = ~0,
137 .init_time = mvebu_timer_and_clk_init, 140 .init_time = mvebu_timer_and_clk_init,
138 .restart = mvebu_restart, 141 .restart = mvebu_restart,
139 .dt_compat = armada_38x_dt_compat, 142 .dt_compat = armada_38x_dt_compat,
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c
index 4a1065e41e9c..9116ca476d7c 100644
--- a/arch/arm/mach-nomadik/cpu-8815.c
+++ b/arch/arm/mach-nomadik/cpu-8815.c
@@ -143,23 +143,16 @@ static int __init cpu8815_mmcsd_init(void)
143} 143}
144device_initcall(cpu8815_mmcsd_init); 144device_initcall(cpu8815_mmcsd_init);
145 145
146static void __init cpu8815_init_of(void)
147{
148#ifdef CONFIG_CACHE_L2X0
149 /* At full speed latency must be >=2, so 0x249 in low bits */
150 l2x0_of_init(0x00730249, 0xfe000fff);
151#endif
152 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
153}
154
155static const char * cpu8815_board_compat[] = { 146static const char * cpu8815_board_compat[] = {
156 "calaosystems,usb-s8815", 147 "calaosystems,usb-s8815",
157 NULL, 148 NULL,
158}; 149};
159 150
160DT_MACHINE_START(NOMADIK_DT, "Nomadik STn8815") 151DT_MACHINE_START(NOMADIK_DT, "Nomadik STn8815")
152 /* At full speed latency must be >=2, so 0x249 in low bits */
153 .l2c_aux_val = 0x00700249,
154 .l2c_aux_mask = 0xfe0fefff,
161 .map_io = cpu8815_map_io, 155 .map_io = cpu8815_map_io,
162 .init_machine = cpu8815_init_of,
163 .restart = cpu8815_restart, 156 .restart = cpu8815_restart,
164 .dt_compat = cpu8815_board_compat, 157 .dt_compat = cpu8815_board_compat,
165MACHINE_END 158MACHINE_END
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index cb31d4390d52..0ba482638ebf 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -65,6 +65,7 @@ config SOC_AM43XX
65 select ARCH_HAS_OPP 65 select ARCH_HAS_OPP
66 select ARM_GIC 66 select ARM_GIC
67 select MACH_OMAP_GENERIC 67 select MACH_OMAP_GENERIC
68 select MIGHT_HAVE_CACHE_L2X0
68 69
69config SOC_DRA7XX 70config SOC_DRA7XX
70 bool "TI DRA7XX" 71 bool "TI DRA7XX"
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index d88aff7baff8..ff029737c8f0 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -91,6 +91,7 @@ extern void omap3_sync32k_timer_init(void);
91extern void omap3_secure_sync32k_timer_init(void); 91extern void omap3_secure_sync32k_timer_init(void);
92extern void omap3_gptimer_timer_init(void); 92extern void omap3_gptimer_timer_init(void);
93extern void omap4_local_timer_init(void); 93extern void omap4_local_timer_init(void);
94int omap_l2_cache_init(void);
94extern void omap5_realtime_timer_init(void); 95extern void omap5_realtime_timer_init(void);
95 96
96void omap2420_init_early(void); 97void omap2420_init_early(void);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index f14f9ac2dca1..4e2df49991ad 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -609,6 +609,7 @@ void __init am43xx_init_early(void)
609 am43xx_clockdomains_init(); 609 am43xx_clockdomains_init();
610 am43xx_hwmod_init(); 610 am43xx_hwmod_init();
611 omap_hwmod_init_postsetup(); 611 omap_hwmod_init_postsetup();
612 omap_l2_cache_init();
612 omap_clk_soc_init = am43xx_dt_clk_init; 613 omap_clk_soc_init = am43xx_dt_clk_init;
613} 614}
614 615
@@ -640,6 +641,7 @@ void __init omap4430_init_early(void)
640 omap44xx_clockdomains_init(); 641 omap44xx_clockdomains_init();
641 omap44xx_hwmod_init(); 642 omap44xx_hwmod_init();
642 omap_hwmod_init_postsetup(); 643 omap_hwmod_init_postsetup();
644 omap_l2_cache_init();
643 omap_clk_soc_init = omap4xxx_dt_clk_init; 645 omap_clk_soc_init = omap4xxx_dt_clk_init;
644} 646}
645 647
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 667915d236f3..61cb77f8cf12 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -187,19 +187,15 @@ static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
187 * in every restore MPUSS OFF path. 187 * in every restore MPUSS OFF path.
188 */ 188 */
189#ifdef CONFIG_CACHE_L2X0 189#ifdef CONFIG_CACHE_L2X0
190static void save_l2x0_context(void) 190static void __init save_l2x0_context(void)
191{ 191{
192 u32 val; 192 __raw_writel(l2x0_saved_regs.aux_ctrl,
193 void __iomem *l2x0_base = omap4_get_l2cache_base(); 193 sar_base + L2X0_AUXCTRL_OFFSET);
194 if (l2x0_base) { 194 __raw_writel(l2x0_saved_regs.prefetch_ctrl,
195 val = __raw_readl(l2x0_base + L2X0_AUX_CTRL); 195 sar_base + L2X0_PREFETCH_CTRL_OFFSET);
196 __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
197 val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
198 __raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET);
199 }
200} 196}
201#else 197#else
202static void save_l2x0_context(void) 198static void __init save_l2x0_context(void)
203{} 199{}
204#endif 200#endif
205 201
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 95e171a055f3..c41ff8b638e1 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -167,75 +167,57 @@ void __iomem *omap4_get_l2cache_base(void)
167 return l2cache_base; 167 return l2cache_base;
168} 168}
169 169
170static void omap4_l2x0_disable(void) 170static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
171{ 171{
172 outer_flush_all(); 172 unsigned smc_op;
173 /* Disable PL310 L2 Cache controller */
174 omap_smc1(0x102, 0x0);
175}
176 173
177static void omap4_l2x0_set_debug(unsigned long val) 174 switch (reg) {
178{ 175 case L2X0_CTRL:
179 /* Program PL310 L2 Cache controller debug register */ 176 smc_op = OMAP4_MON_L2X0_CTRL_INDEX;
180 omap_smc1(0x100, val); 177 break;
178
179 case L2X0_AUX_CTRL:
180 smc_op = OMAP4_MON_L2X0_AUXCTRL_INDEX;
181 break;
182
183 case L2X0_DEBUG_CTRL:
184 smc_op = OMAP4_MON_L2X0_DBG_CTRL_INDEX;
185 break;
186
187 case L310_PREFETCH_CTRL:
188 smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
189 break;
190
191 default:
192 WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
193 return;
194 }
195
196 omap_smc1(smc_op, val);
181} 197}
182 198
183static int __init omap_l2_cache_init(void) 199int __init omap_l2_cache_init(void)
184{ 200{
185 u32 aux_ctrl = 0; 201 u32 aux_ctrl;
186
187 /*
188 * To avoid code running on other OMAPs in
189 * multi-omap builds
190 */
191 if (!cpu_is_omap44xx())
192 return -ENODEV;
193 202
194 /* Static mapping, never released */ 203 /* Static mapping, never released */
195 l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K); 204 l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
196 if (WARN_ON(!l2cache_base)) 205 if (WARN_ON(!l2cache_base))
197 return -ENOMEM; 206 return -ENOMEM;
198 207
199 /* 208 /* 16-way associativity, parity disabled, way size - 64KB (es2.0 +) */
200 * 16-way associativity, parity disabled 209 aux_ctrl = L2C_AUX_CTRL_SHARED_OVERRIDE |
201 * Way size - 32KB (es1.0) 210 L310_AUX_CTRL_DATA_PREFETCH |
202 * Way size - 64KB (es2.0 +) 211 L310_AUX_CTRL_INSTR_PREFETCH;
203 */
204 aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) |
205 (0x1 << 25) |
206 (0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) |
207 (0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT));
208
209 if (omap_rev() == OMAP4430_REV_ES1_0) {
210 aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
211 } else {
212 aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
213 (1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
214 (1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
215 (1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
216 (1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
217 }
218 if (omap_rev() != OMAP4430_REV_ES1_0)
219 omap_smc1(0x109, aux_ctrl);
220
221 /* Enable PL310 L2 Cache controller */
222 omap_smc1(0x102, 0x1);
223 212
213 outer_cache.write_sec = omap4_l2c310_write_sec;
224 if (of_have_populated_dt()) 214 if (of_have_populated_dt())
225 l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK); 215 l2x0_of_init(aux_ctrl, 0xcf9fffff);
226 else 216 else
227 l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK); 217 l2x0_init(l2cache_base, aux_ctrl, 0xcf9fffff);
228
229 /*
230 * Override default outer_cache.disable with a OMAP4
231 * specific one
232 */
233 outer_cache.disable = omap4_l2x0_disable;
234 outer_cache.set_debug = omap4_l2x0_set_debug;
235 218
236 return 0; 219 return 0;
237} 220}
238omap_early_initcall(omap_l2_cache_init);
239#endif 221#endif
240 222
241void __iomem *omap4_get_sar_ram_base(void) 223void __iomem *omap4_get_sar_ram_base(void)
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 3f1de1111e0f..6bbb7b55c6d1 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -365,8 +365,7 @@ void orion5x_restart(enum reboot_mode mode, const char *cmd)
365 * Many orion-based systems have buggy bootloader implementations. 365 * Many orion-based systems have buggy bootloader implementations.
366 * This is a common fixup for bogus memory tags. 366 * This is a common fixup for bogus memory tags.
367 */ 367 */
368void __init tag_fixup_mem32(struct tag *t, char **from, 368void __init tag_fixup_mem32(struct tag *t, char **from)
369 struct meminfo *meminfo)
370{ 369{
371 for (; t->hdr.size; t = tag_next(t)) 370 for (; t->hdr.size; t = tag_next(t))
372 if (t->hdr.tag == ATAG_MEM && 371 if (t->hdr.tag == ATAG_MEM &&
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index 7548db2bfb8a..ca3803017c59 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -71,9 +71,8 @@ void edmini_v2_init(void);
71static inline void edmini_v2_init(void) {}; 71static inline void edmini_v2_init(void) {};
72#endif 72#endif
73 73
74struct meminfo;
75struct tag; 74struct tag;
76extern void __init tag_fixup_mem32(struct tag *, char **, struct meminfo *); 75extern void __init tag_fixup_mem32(struct tag *, char **);
77 76
78/***************************************************************************** 77/*****************************************************************************
79 * Helpers to access Orion registers 78 * Helpers to access Orion registers
diff --git a/arch/arm/mach-prima2/Makefile b/arch/arm/mach-prima2/Makefile
index 7a6b4a323125..8846e7d87ea5 100644
--- a/arch/arm/mach-prima2/Makefile
+++ b/arch/arm/mach-prima2/Makefile
@@ -2,7 +2,6 @@ obj-y += rstc.o
2obj-y += common.o 2obj-y += common.o
3obj-y += rtciobrg.o 3obj-y += rtciobrg.o
4obj-$(CONFIG_DEBUG_LL) += lluart.o 4obj-$(CONFIG_DEBUG_LL) += lluart.o
5obj-$(CONFIG_CACHE_L2X0) += l2x0.o
6obj-$(CONFIG_SUSPEND) += pm.o sleep.o 5obj-$(CONFIG_SUSPEND) += pm.o sleep.o
7obj-$(CONFIG_SMP) += platsmp.o headsmp.o 6obj-$(CONFIG_SMP) += platsmp.o headsmp.o
8obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 7obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c
index 47c7819edb9b..a860ea27e8ae 100644
--- a/arch/arm/mach-prima2/common.c
+++ b/arch/arm/mach-prima2/common.c
@@ -34,6 +34,8 @@ static const char *atlas6_dt_match[] __initconst = {
34 34
35DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") 35DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)")
36 /* Maintainer: Barry Song <baohua.song@csr.com> */ 36 /* Maintainer: Barry Song <baohua.song@csr.com> */
37 .l2c_aux_val = 0,
38 .l2c_aux_mask = ~0,
37 .map_io = sirfsoc_map_io, 39 .map_io = sirfsoc_map_io,
38 .init_late = sirfsoc_init_late, 40 .init_late = sirfsoc_init_late,
39 .dt_compat = atlas6_dt_match, 41 .dt_compat = atlas6_dt_match,
@@ -48,6 +50,8 @@ static const char *prima2_dt_match[] __initconst = {
48 50
49DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") 51DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)")
50 /* Maintainer: Barry Song <baohua.song@csr.com> */ 52 /* Maintainer: Barry Song <baohua.song@csr.com> */
53 .l2c_aux_val = 0,
54 .l2c_aux_mask = ~0,
51 .map_io = sirfsoc_map_io, 55 .map_io = sirfsoc_map_io,
52 .dma_zone_size = SZ_256M, 56 .dma_zone_size = SZ_256M,
53 .init_late = sirfsoc_init_late, 57 .init_late = sirfsoc_init_late,
@@ -63,6 +67,8 @@ static const char *marco_dt_match[] __initconst = {
63 67
64DT_MACHINE_START(MARCO_DT, "Generic MARCO (Flattened Device Tree)") 68DT_MACHINE_START(MARCO_DT, "Generic MARCO (Flattened Device Tree)")
65 /* Maintainer: Barry Song <baohua.song@csr.com> */ 69 /* Maintainer: Barry Song <baohua.song@csr.com> */
70 .l2c_aux_val = 0,
71 .l2c_aux_mask = ~0,
66 .smp = smp_ops(sirfsoc_smp_ops), 72 .smp = smp_ops(sirfsoc_smp_ops),
67 .map_io = sirfsoc_map_io, 73 .map_io = sirfsoc_map_io,
68 .init_late = sirfsoc_init_late, 74 .init_late = sirfsoc_init_late,
diff --git a/arch/arm/mach-prima2/l2x0.c b/arch/arm/mach-prima2/l2x0.c
deleted file mode 100644
index c7102539c0b0..000000000000
--- a/arch/arm/mach-prima2/l2x0.c
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * l2 cache initialization for CSR SiRFprimaII
3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5 *
6 * Licensed under GPLv2 or later.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/of.h>
12#include <asm/hardware/cache-l2x0.h>
13
14struct l2x0_aux {
15 u32 val;
16 u32 mask;
17};
18
19static const struct l2x0_aux prima2_l2x0_aux __initconst = {
20 .val = 2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT,
21 .mask = 0,
22};
23
24static const struct l2x0_aux marco_l2x0_aux __initconst = {
25 .val = (2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
26 (1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT),
27 .mask = L2X0_AUX_CTRL_MASK,
28};
29
30static const struct of_device_id sirf_l2x0_ids[] __initconst = {
31 { .compatible = "sirf,prima2-pl310-cache", .data = &prima2_l2x0_aux, },
32 { .compatible = "sirf,marco-pl310-cache", .data = &marco_l2x0_aux, },
33 {},
34};
35
36static int __init sirfsoc_l2x0_init(void)
37{
38 struct device_node *np;
39 const struct l2x0_aux *aux;
40
41 np = of_find_matching_node(NULL, sirf_l2x0_ids);
42 if (np) {
43 aux = of_match_node(sirf_l2x0_ids, np)->data;
44 return l2x0_of_init(aux->val, aux->mask);
45 }
46
47 return 0;
48}
49early_initcall(sirfsoc_l2x0_init);
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index c4525a88e5da..96e9bc102117 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -71,7 +71,6 @@ static int sirfsoc_pm_enter(suspend_state_t state)
71 case PM_SUSPEND_MEM: 71 case PM_SUSPEND_MEM:
72 sirfsoc_pre_suspend_power_off(); 72 sirfsoc_pre_suspend_power_off();
73 73
74 outer_flush_all();
75 outer_disable(); 74 outer_disable();
76 /* go zzz */ 75 /* go zzz */
77 cpu_suspend(0, sirfsoc_finish_suspend); 76 cpu_suspend(0, sirfsoc_finish_suspend);
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 584439bfa59f..4d3588d26c2a 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -837,8 +837,7 @@ static void __init cm_x300_init(void)
837 cm_x300_init_bl(); 837 cm_x300_init_bl();
838} 838}
839 839
840static void __init cm_x300_fixup(struct tag *tags, char **cmdline, 840static void __init cm_x300_fixup(struct tag *tags, char **cmdline)
841 struct meminfo *mi)
842{ 841{
843 /* Make sure that mi->bank[0].start = PHYS_ADDR */ 842 /* Make sure that mi->bank[0].start = PHYS_ADDR */
844 for (; tags->hdr.size; tags = tag_next(tags)) 843 for (; tags->hdr.size; tags = tag_next(tags))
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 57d60542f982..91dd1c7cdbcd 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -34,6 +34,7 @@
34#include <linux/input/matrix_keypad.h> 34#include <linux/input/matrix_keypad.h>
35#include <linux/gpio_keys.h> 35#include <linux/gpio_keys.h>
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/memblock.h>
37#include <video/w100fb.h> 38#include <video/w100fb.h>
38 39
39#include <asm/setup.h> 40#include <asm/setup.h>
@@ -753,16 +754,13 @@ static void __init corgi_init(void)
753 platform_add_devices(devices, ARRAY_SIZE(devices)); 754 platform_add_devices(devices, ARRAY_SIZE(devices));
754} 755}
755 756
756static void __init fixup_corgi(struct tag *tags, char **cmdline, 757static void __init fixup_corgi(struct tag *tags, char **cmdline)
757 struct meminfo *mi)
758{ 758{
759 sharpsl_save_param(); 759 sharpsl_save_param();
760 mi->nr_banks=1;
761 mi->bank[0].start = 0xa0000000;
762 if (machine_is_corgi()) 760 if (machine_is_corgi())
763 mi->bank[0].size = (32*1024*1024); 761 memblock_add(0xa0000000, SZ_32M);
764 else 762 else
765 mi->bank[0].size = (64*1024*1024); 763 memblock_add(0xa0000000, SZ_64M);
766} 764}
767 765
768#ifdef CONFIG_MACH_CORGI 766#ifdef CONFIG_MACH_CORGI
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c
index 8280ebcaab9f..cfb864173ce3 100644
--- a/arch/arm/mach-pxa/eseries.c
+++ b/arch/arm/mach-pxa/eseries.c
@@ -21,6 +21,7 @@
21#include <linux/mtd/nand.h> 21#include <linux/mtd/nand.h>
22#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
23#include <linux/usb/gpio_vbus.h> 23#include <linux/usb/gpio_vbus.h>
24#include <linux/memblock.h>
24 25
25#include <video/w100fb.h> 26#include <video/w100fb.h>
26 27
@@ -41,14 +42,12 @@
41#include "clock.h" 42#include "clock.h"
42 43
43/* Only e800 has 128MB RAM */ 44/* Only e800 has 128MB RAM */
44void __init eseries_fixup(struct tag *tags, char **cmdline, struct meminfo *mi) 45void __init eseries_fixup(struct tag *tags, char **cmdline)
45{ 46{
46 mi->nr_banks=1;
47 mi->bank[0].start = 0xa0000000;
48 if (machine_is_e800()) 47 if (machine_is_e800())
49 mi->bank[0].size = (128*1024*1024); 48 memblock_add(0xa0000000, SZ_128M);
50 else 49 else
51 mi->bank[0].size = (64*1024*1024); 50 memblock_add(0xa0000000, SZ_64M);
52} 51}
53 52
54struct gpio_vbus_mach_info e7xx_udc_info = { 53struct gpio_vbus_mach_info e7xx_udc_info = {
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index aedf053a1de5..131991629116 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -29,6 +29,7 @@
29#include <linux/spi/ads7846.h> 29#include <linux/spi/ads7846.h>
30#include <linux/spi/pxa2xx_spi.h> 30#include <linux/spi/pxa2xx_spi.h>
31#include <linux/mtd/sharpsl.h> 31#include <linux/mtd/sharpsl.h>
32#include <linux/memblock.h>
32 33
33#include <mach/hardware.h> 34#include <mach/hardware.h>
34#include <asm/mach-types.h> 35#include <asm/mach-types.h>
@@ -456,13 +457,10 @@ static void __init poodle_init(void)
456 poodle_init_spi(); 457 poodle_init_spi();
457} 458}
458 459
459static void __init fixup_poodle(struct tag *tags, char **cmdline, 460static void __init fixup_poodle(struct tag *tags, char **cmdline)
460 struct meminfo *mi)
461{ 461{
462 sharpsl_save_param(); 462 sharpsl_save_param();
463 mi->nr_banks=1; 463 memblock_add(0xa0000000, SZ_32M);
464 mi->bank[0].start = 0xa0000000;
465 mi->bank[0].size = (32*1024*1024);
466} 464}
467 465
468MACHINE_START(POODLE, "SHARP Poodle") 466MACHINE_START(POODLE, "SHARP Poodle")
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 0b11c1af51c4..840c3a48e720 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -32,6 +32,7 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/reboot.h> 34#include <linux/reboot.h>
35#include <linux/memblock.h>
35 36
36#include <asm/setup.h> 37#include <asm/setup.h>
37#include <asm/mach-types.h> 38#include <asm/mach-types.h>
@@ -971,13 +972,10 @@ static void __init spitz_init(void)
971 spitz_i2c_init(); 972 spitz_i2c_init();
972} 973}
973 974
974static void __init spitz_fixup(struct tag *tags, char **cmdline, 975static void __init spitz_fixup(struct tag *tags, char **cmdline)
975 struct meminfo *mi)
976{ 976{
977 sharpsl_save_param(); 977 sharpsl_save_param();
978 mi->nr_banks = 1; 978 memblock_add(0xa0000000, SZ_64M);
979 mi->bank[0].start = 0xa0000000;
980 mi->bank[0].size = (64*1024*1024);
981} 979}
982 980
983#ifdef CONFIG_MACH_SPITZ 981#ifdef CONFIG_MACH_SPITZ
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index ef5557b807ed..c158a6e3e0aa 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -37,6 +37,7 @@
37#include <linux/i2c/pxa-i2c.h> 37#include <linux/i2c/pxa-i2c.h>
38#include <linux/usb/gpio_vbus.h> 38#include <linux/usb/gpio_vbus.h>
39#include <linux/reboot.h> 39#include <linux/reboot.h>
40#include <linux/memblock.h>
40 41
41#include <asm/setup.h> 42#include <asm/setup.h>
42#include <asm/mach-types.h> 43#include <asm/mach-types.h>
@@ -960,13 +961,10 @@ static void __init tosa_init(void)
960 platform_add_devices(devices, ARRAY_SIZE(devices)); 961 platform_add_devices(devices, ARRAY_SIZE(devices));
961} 962}
962 963
963static void __init fixup_tosa(struct tag *tags, char **cmdline, 964static void __init fixup_tosa(struct tag *tags, char **cmdline)
964 struct meminfo *mi)
965{ 965{
966 sharpsl_save_param(); 966 sharpsl_save_param();
967 mi->nr_banks=1; 967 memblock_add(0xa0000000, SZ_64M);
968 mi->bank[0].start = 0xa0000000;
969 mi->bank[0].size = (64*1024*1024);
970} 968}
971 969
972MACHINE_START(TOSA, "SHARP Tosa") 970MACHINE_START(TOSA, "SHARP Tosa")
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 1d5ee5c9a1dc..c2fae3a5aad8 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -31,6 +31,7 @@
31#include <linux/amba/mmci.h> 31#include <linux/amba/mmci.h>
32#include <linux/gfp.h> 32#include <linux/gfp.h>
33#include <linux/mtd/physmap.h> 33#include <linux/mtd/physmap.h>
34#include <linux/memblock.h>
34 35
35#include <mach/hardware.h> 36#include <mach/hardware.h>
36#include <asm/irq.h> 37#include <asm/irq.h>
@@ -370,19 +371,15 @@ void __init realview_timer_init(unsigned int timer_irq)
370/* 371/*
371 * Setup the memory banks. 372 * Setup the memory banks.
372 */ 373 */
373void realview_fixup(struct tag *tags, char **from, struct meminfo *meminfo) 374void realview_fixup(struct tag *tags, char **from)
374{ 375{
375 /* 376 /*
376 * Most RealView platforms have 512MB contiguous RAM at 0x70000000. 377 * Most RealView platforms have 512MB contiguous RAM at 0x70000000.
377 * Half of this is mirrored at 0. 378 * Half of this is mirrored at 0.
378 */ 379 */
379#ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET 380#ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET
380 meminfo->bank[0].start = 0x70000000; 381 memblock_add(0x70000000, SZ_512M);
381 meminfo->bank[0].size = SZ_512M;
382 meminfo->nr_banks = 1;
383#else 382#else
384 meminfo->bank[0].start = 0; 383 memblock_add(0, SZ_256M);
385 meminfo->bank[0].size = SZ_256M;
386 meminfo->nr_banks = 1;
387#endif 384#endif
388} 385}
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h
index 602ca5ec52c5..844946da3c66 100644
--- a/arch/arm/mach-realview/core.h
+++ b/arch/arm/mach-realview/core.h
@@ -51,8 +51,7 @@ extern int realview_flash_register(struct resource *res, u32 num);
51extern int realview_eth_register(const char *name, struct resource *res); 51extern int realview_eth_register(const char *name, struct resource *res);
52extern int realview_usb_register(struct resource *res); 52extern int realview_usb_register(struct resource *res);
53extern void realview_init_early(void); 53extern void realview_init_early(void);
54extern void realview_fixup(struct tag *tags, char **from, 54extern void realview_fixup(struct tag *tags, char **from);
55 struct meminfo *meminfo);
56 55
57extern struct smp_operations realview_smp_ops; 56extern struct smp_operations realview_smp_ops;
58extern void realview_cpu_die(unsigned int cpu); 57extern void realview_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index c85ddb2a0ad0..b575895037b8 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -442,8 +442,13 @@ static void __init realview_eb_init(void)
442 realview_eb11mp_fixup(); 442 realview_eb11mp_fixup();
443 443
444#ifdef CONFIG_CACHE_L2X0 444#ifdef CONFIG_CACHE_L2X0
445 /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled 445 /*
446 * Bits: .... ...0 0111 1001 0000 .... .... .... */ 446 * The PL220 needs to be manually configured as the hardware
447 * doesn't report the correct sizes.
448 * 1MB (128KB/way), 8-way associativity, event monitor and
449 * parity enabled, ignore share bit, no force write allocate
450 * Bits: .... ...0 0111 1001 0000 .... .... ....
451 */
447 l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff); 452 l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff);
448#endif 453#endif
449 platform_device_register(&pmu_device); 454 platform_device_register(&pmu_device);
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index c5eade76461b..aad9c5a40d47 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -32,6 +32,7 @@
32#include <linux/irqchip/arm-gic.h> 32#include <linux/irqchip/arm-gic.h>
33#include <linux/platform_data/clk-realview.h> 33#include <linux/platform_data/clk-realview.h>
34#include <linux/reboot.h> 34#include <linux/reboot.h>
35#include <linux/memblock.h>
35 36
36#include <mach/hardware.h> 37#include <mach/hardware.h>
37#include <asm/irq.h> 38#include <asm/irq.h>
@@ -339,15 +340,12 @@ static void realview_pb1176_restart(enum reboot_mode mode, const char *cmd)
339 dsb(); 340 dsb();
340} 341}
341 342
342static void realview_pb1176_fixup(struct tag *tags, char **from, 343static void realview_pb1176_fixup(struct tag *tags, char **from)
343 struct meminfo *meminfo)
344{ 344{
345 /* 345 /*
346 * RealView PB1176 only has 128MB of RAM mapped at 0. 346 * RealView PB1176 only has 128MB of RAM mapped at 0.
347 */ 347 */
348 meminfo->bank[0].start = 0; 348 memblock_add(0, SZ_128M);
349 meminfo->bank[0].size = SZ_128M;
350 meminfo->nr_banks = 1;
351} 349}
352 350
353static void __init realview_pb1176_init(void) 351static void __init realview_pb1176_init(void)
@@ -355,7 +353,13 @@ static void __init realview_pb1176_init(void)
355 int i; 353 int i;
356 354
357#ifdef CONFIG_CACHE_L2X0 355#ifdef CONFIG_CACHE_L2X0
358 /* 128Kb (16Kb/way) 8-way associativity. evmon/parity/share enabled. */ 356 /*
357 * The PL220 needs to be manually configured as the hardware
358 * doesn't report the correct sizes.
359 * 128kB (16kB/way), 8-way associativity, event monitor and
360 * parity enabled, ignore share bit, no force write allocate
361 * Bits: .... ...0 0111 0011 0000 .... .... ....
362 */
359 l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff); 363 l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff);
360#endif 364#endif
361 365
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
index f4b0962578fe..101deaf2630b 100644
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ b/arch/arm/mach-realview/realview_pb11mp.c
@@ -337,8 +337,13 @@ static void __init realview_pb11mp_init(void)
337 int i; 337 int i;
338 338
339#ifdef CONFIG_CACHE_L2X0 339#ifdef CONFIG_CACHE_L2X0
340 /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled 340 /*
341 * Bits: .... ...0 0111 1001 0000 .... .... .... */ 341 * The PL220 needs to be manually configured as the hardware
342 * doesn't report the correct sizes.
343 * 1MB (128KB/way), 8-way associativity, event monitor and
344 * parity enabled, ignore share bit, no force write allocate
345 * Bits: .... ...0 0111 1001 0000 .... .... ....
346 */
342 l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff); 347 l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff);
343#endif 348#endif
344 349
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index 9d75493e3f0c..535697abfd91 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -29,6 +29,7 @@
29#include <linux/irqchip/arm-gic.h> 29#include <linux/irqchip/arm-gic.h>
30#include <linux/platform_data/clk-realview.h> 30#include <linux/platform_data/clk-realview.h>
31#include <linux/reboot.h> 31#include <linux/reboot.h>
32#include <linux/memblock.h>
32 33
33#include <asm/irq.h> 34#include <asm/irq.h>
34#include <asm/mach-types.h> 35#include <asm/mach-types.h>
@@ -325,23 +326,19 @@ static void __init realview_pbx_timer_init(void)
325 realview_pbx_twd_init(); 326 realview_pbx_twd_init();
326} 327}
327 328
328static void realview_pbx_fixup(struct tag *tags, char **from, 329static void realview_pbx_fixup(struct tag *tags, char **from)
329 struct meminfo *meminfo)
330{ 330{
331#ifdef CONFIG_SPARSEMEM 331#ifdef CONFIG_SPARSEMEM
332 /* 332 /*
333 * Memory configuration with SPARSEMEM enabled on RealView PBX (see 333 * Memory configuration with SPARSEMEM enabled on RealView PBX (see
334 * asm/mach/memory.h for more information). 334 * asm/mach/memory.h for more information).
335 */ 335 */
336 meminfo->bank[0].start = 0; 336
337 meminfo->bank[0].size = SZ_256M; 337 memblock_add(0, SZ_256M);
338 meminfo->bank[1].start = 0x20000000; 338 memblock_add(0x20000000, SZ_512M);
339 meminfo->bank[1].size = SZ_512M; 339 memblock_add(0x80000000, SZ_256M);
340 meminfo->bank[2].start = 0x80000000;
341 meminfo->bank[2].size = SZ_256M;
342 meminfo->nr_banks = 3;
343#else 340#else
344 realview_fixup(tags, from, meminfo); 341 realview_fixup(tags, from);
345#endif 342#endif
346} 343}
347 344
@@ -370,8 +367,8 @@ static void __init realview_pbx_init(void)
370 __io_address(REALVIEW_PBX_TILE_L220_BASE); 367 __io_address(REALVIEW_PBX_TILE_L220_BASE);
371 368
372 /* set RAM latencies to 1 cycle for eASIC */ 369 /* set RAM latencies to 1 cycle for eASIC */
373 writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); 370 writel(0, l2x0_base + L310_TAG_LATENCY_CTRL);
374 writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); 371 writel(0, l2x0_base + L310_DATA_LATENCY_CTRL);
375 372
376 /* 16KB way size, 8-way associativity, parity disabled 373 /* 16KB way size, 8-way associativity, parity disabled
377 * Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */ 374 * Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index d211d6fa0d98..138b9975313a 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -24,12 +24,6 @@
24#include <asm/hardware/cache-l2x0.h> 24#include <asm/hardware/cache-l2x0.h>
25#include "core.h" 25#include "core.h"
26 26
27static void __init rockchip_dt_init(void)
28{
29 l2x0_of_init(0, ~0UL);
30 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
31}
32
33static const char * const rockchip_board_dt_compat[] = { 27static const char * const rockchip_board_dt_compat[] = {
34 "rockchip,rk2928", 28 "rockchip,rk2928",
35 "rockchip,rk3066a", 29 "rockchip,rk3066a",
@@ -39,7 +33,8 @@ static const char * const rockchip_board_dt_compat[] = {
39}; 33};
40 34
41DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)") 35DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)")
36 .l2c_aux_val = 0,
37 .l2c_aux_mask = ~0,
42 .smp = smp_ops(rockchip_smp_ops), 38 .smp = smp_ops(rockchip_smp_ops),
43 .init_machine = rockchip_dt_init,
44 .dt_compat = rockchip_board_dt_compat, 39 .dt_compat = rockchip_board_dt_compat,
45MACHINE_END 40MACHINE_END
diff --git a/arch/arm/mach-s3c24xx/mach-smdk2413.c b/arch/arm/mach-s3c24xx/mach-smdk2413.c
index 233fe52d2015..a03c855ee854 100644
--- a/arch/arm/mach-s3c24xx/mach-smdk2413.c
+++ b/arch/arm/mach-s3c24xx/mach-smdk2413.c
@@ -22,6 +22,7 @@
22#include <linux/serial_s3c.h> 22#include <linux/serial_s3c.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/memblock.h>
25 26
26#include <asm/mach/arch.h> 27#include <asm/mach/arch.h>
27#include <asm/mach/map.h> 28#include <asm/mach/map.h>
@@ -93,13 +94,10 @@ static struct platform_device *smdk2413_devices[] __initdata = {
93 &s3c2412_device_dma, 94 &s3c2412_device_dma,
94}; 95};
95 96
96static void __init smdk2413_fixup(struct tag *tags, char **cmdline, 97static void __init smdk2413_fixup(struct tag *tags, char **cmdline)
97 struct meminfo *mi)
98{ 98{
99 if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { 99 if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) {
100 mi->nr_banks=1; 100 memblock_add(0x30000000, SZ_64M);
101 mi->bank[0].start = 0x30000000;
102 mi->bank[0].size = SZ_64M;
103 } 101 }
104} 102}
105 103
diff --git a/arch/arm/mach-s3c24xx/mach-vstms.c b/arch/arm/mach-s3c24xx/mach-vstms.c
index 40868c0e0a68..a79af7843aed 100644
--- a/arch/arm/mach-s3c24xx/mach-vstms.c
+++ b/arch/arm/mach-s3c24xx/mach-vstms.c
@@ -23,6 +23,7 @@
23#include <linux/mtd/nand.h> 23#include <linux/mtd/nand.h>
24#include <linux/mtd/nand_ecc.h> 24#include <linux/mtd/nand_ecc.h>
25#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
26#include <linux/memblock.h>
26 27
27#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
28#include <asm/mach/map.h> 29#include <asm/mach/map.h>
@@ -129,13 +130,10 @@ static struct platform_device *vstms_devices[] __initdata = {
129 &s3c2412_device_dma, 130 &s3c2412_device_dma,
130}; 131};
131 132
132static void __init vstms_fixup(struct tag *tags, char **cmdline, 133static void __init vstms_fixup(struct tag *tags, char **cmdline)
133 struct meminfo *mi)
134{ 134{
135 if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { 135 if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) {
136 mi->nr_banks=1; 136 memblock_add(0x30000000, SZ_64M);
137 mi->bank[0].start = 0x30000000;
138 mi->bank[0].size = SZ_64M;
139 } 137 }
140} 138}
141 139
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 8443a27bca2f..7dd894ece9ae 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -531,7 +531,7 @@ static void __init get_assabet_scr(void)
531} 531}
532 532
533static void __init 533static void __init
534fixup_assabet(struct tag *tags, char **cmdline, struct meminfo *mi) 534fixup_assabet(struct tag *tags, char **cmdline)
535{ 535{
536 /* This must be done before any call to machine_has_neponset() */ 536 /* This must be done before any call to machine_has_neponset() */
537 map_sa1100_gpio_regs(); 537 map_sa1100_gpio_regs();
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva-reference.c b/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
index 57d1a78367b6..39e11f48e8bc 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
@@ -164,8 +164,8 @@ static void __init eva_init(void)
164 r8a7740_meram_workaround(); 164 r8a7740_meram_workaround();
165 165
166#ifdef CONFIG_CACHE_L2X0 166#ifdef CONFIG_CACHE_L2X0
167 /* Early BRESP enable, Shared attribute override enable, 32K*8way */ 167 /* Shared attribute override enable, 32K*8way */
168 l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff); 168 l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
169#endif 169#endif
170 170
171 r8a7740_add_standard_devices_dt(); 171 r8a7740_add_standard_devices_dt();
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 486063db2a2f..f8e25fd29b79 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -1271,8 +1271,8 @@ static void __init eva_init(void)
1271 1271
1272 1272
1273#ifdef CONFIG_CACHE_L2X0 1273#ifdef CONFIG_CACHE_L2X0
1274 /* Early BRESP enable, Shared attribute override enable, 32K*8way */ 1274 /* Shared attribute override enable, 32K*8way */
1275 l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff); 1275 l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
1276#endif 1276#endif
1277 1277
1278 i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); 1278 i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
diff --git a/arch/arm/mach-shmobile/board-kzm9g-reference.c b/arch/arm/mach-shmobile/board-kzm9g-reference.c
index 598e32488410..a735a1d80c28 100644
--- a/arch/arm/mach-shmobile/board-kzm9g-reference.c
+++ b/arch/arm/mach-shmobile/board-kzm9g-reference.c
@@ -36,8 +36,8 @@ static void __init kzm_init(void)
36 sh73a0_add_standard_devices_dt(); 36 sh73a0_add_standard_devices_dt();
37 37
38#ifdef CONFIG_CACHE_L2X0 38#ifdef CONFIG_CACHE_L2X0
39 /* Early BRESP enable, Shared attribute override enable, 64K*8way */ 39 /* Shared attribute override enable, 64K*8way */
40 l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff); 40 l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
41#endif 41#endif
42} 42}
43 43
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index 03dc3ac84502..f94ec8ca42c1 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -876,8 +876,8 @@ static void __init kzm_init(void)
876 gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */ 876 gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */
877 877
878#ifdef CONFIG_CACHE_L2X0 878#ifdef CONFIG_CACHE_L2X0
879 /* Early BRESP enable, Shared attribute override enable, 64K*8way */ 879 /* Shared attribute override enable, 64K*8way */
880 l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff); 880 l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
881#endif 881#endif
882 882
883 i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); 883 i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index 6d694526e4ca..6dd7ddf88741 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -298,10 +298,10 @@ void __init r8a7778_add_dt_devices(void)
298 void __iomem *base = ioremap_nocache(0xf0100000, 0x1000); 298 void __iomem *base = ioremap_nocache(0xf0100000, 0x1000);
299 if (base) { 299 if (base) {
300 /* 300 /*
301 * Early BRESP enable, Shared attribute override enable, 64K*16way 301 * Shared attribute override enable, 64K*16way
302 * don't call iounmap(base) 302 * don't call iounmap(base)
303 */ 303 */
304 l2x0_init(base, 0x40470000, 0x82000fff); 304 l2x0_init(base, 0x00400000, 0xc20f0fff);
305 } 305 }
306#endif 306#endif
307 307
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 8e860b36997a..a6630fccfc45 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -700,8 +700,8 @@ static struct platform_device *r8a7779_standard_devices[] __initdata = {
700void __init r8a7779_add_standard_devices(void) 700void __init r8a7779_add_standard_devices(void)
701{ 701{
702#ifdef CONFIG_CACHE_L2X0 702#ifdef CONFIG_CACHE_L2X0
703 /* Early BRESP enable, Shared attribute override enable, 64K*16way */ 703 /* Shared attribute override enable, 64K*16way */
704 l2x0_init(IOMEM(0xf0100000), 0x40470000, 0x82000fff); 704 l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
705#endif 705#endif
706 r8a7779_pm_init(); 706 r8a7779_pm_init();
707 707
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index d86231e11b34..adbf38314ca8 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -98,22 +98,17 @@ static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd)
98 writel(temp, rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); 98 writel(temp, rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL);
99} 99}
100 100
101static void __init socfpga_cyclone5_init(void)
102{
103 l2x0_of_init(0, ~0UL);
104 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
105}
106
107static const char *altera_dt_match[] = { 101static const char *altera_dt_match[] = {
108 "altr,socfpga", 102 "altr,socfpga",
109 NULL 103 NULL
110}; 104};
111 105
112DT_MACHINE_START(SOCFPGA, "Altera SOCFPGA") 106DT_MACHINE_START(SOCFPGA, "Altera SOCFPGA")
107 .l2c_aux_val = 0,
108 .l2c_aux_mask = ~0,
113 .smp = smp_ops(socfpga_smp_ops), 109 .smp = smp_ops(socfpga_smp_ops),
114 .map_io = socfpga_map_io, 110 .map_io = socfpga_map_io,
115 .init_irq = socfpga_init_irq, 111 .init_irq = socfpga_init_irq,
116 .init_machine = socfpga_cyclone5_init,
117 .restart = socfpga_cyclone5_restart, 112 .restart = socfpga_cyclone5_restart,
118 .dt_compat = altera_dt_match, 113 .dt_compat = altera_dt_match,
119MACHINE_END 114MACHINE_END
diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
index c19751fff2c6..fd4297713d67 100644
--- a/arch/arm/mach-spear/platsmp.c
+++ b/arch/arm/mach-spear/platsmp.c
@@ -20,6 +20,18 @@
20#include <mach/spear.h> 20#include <mach/spear.h>
21#include "generic.h" 21#include "generic.h"
22 22
23/*
24 * Write pen_release in a way that is guaranteed to be visible to all
25 * observers, irrespective of whether they're taking part in coherency
26 * or not. This is necessary for the hotplug code to work reliably.
27 */
28static void write_pen_release(int val)
29{
30 pen_release = val;
31 smp_wmb();
32 sync_cache_w(&pen_release);
33}
34
23static DEFINE_SPINLOCK(boot_lock); 35static DEFINE_SPINLOCK(boot_lock);
24 36
25static void __iomem *scu_base = IOMEM(VA_SCU_BASE); 37static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
@@ -30,8 +42,7 @@ static void spear13xx_secondary_init(unsigned int cpu)
30 * let the primary processor know we're out of the 42 * let the primary processor know we're out of the
31 * pen, then head off into the C entry point 43 * pen, then head off into the C entry point
32 */ 44 */
33 pen_release = -1; 45 write_pen_release(-1);
34 smp_wmb();
35 46
36 /* 47 /*
37 * Synchronise with the boot thread. 48 * Synchronise with the boot thread.
@@ -58,9 +69,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
58 * Note that "pen_release" is the hardware CPU ID, whereas 69 * Note that "pen_release" is the hardware CPU ID, whereas
59 * "cpu" is Linux's internal ID. 70 * "cpu" is Linux's internal ID.
60 */ 71 */
61 pen_release = cpu; 72 write_pen_release(cpu);
62 flush_cache_all();
63 outer_flush_all();
64 73
65 timeout = jiffies + (1 * HZ); 74 timeout = jiffies + (1 * HZ);
66 while (time_before(jiffies, timeout)) { 75 while (time_before(jiffies, timeout)) {
diff --git a/arch/arm/mach-spear/spear13xx.c b/arch/arm/mach-spear/spear13xx.c
index 7aa6e8cf830f..c9897ea38980 100644
--- a/arch/arm/mach-spear/spear13xx.c
+++ b/arch/arm/mach-spear/spear13xx.c
@@ -38,15 +38,15 @@ void __init spear13xx_l2x0_init(void)
38 if (!IS_ENABLED(CONFIG_CACHE_L2X0)) 38 if (!IS_ENABLED(CONFIG_CACHE_L2X0))
39 return; 39 return;
40 40
41 writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL); 41 writel_relaxed(0x06, VA_L2CC_BASE + L310_PREFETCH_CTRL);
42 42
43 /* 43 /*
44 * Program following latencies in order to make 44 * Program following latencies in order to make
45 * SPEAr1340 work at 600 MHz 45 * SPEAr1340 work at 600 MHz
46 */ 46 */
47 writel_relaxed(0x221, VA_L2CC_BASE + L2X0_TAG_LATENCY_CTRL); 47 writel_relaxed(0x221, VA_L2CC_BASE + L310_TAG_LATENCY_CTRL);
48 writel_relaxed(0x441, VA_L2CC_BASE + L2X0_DATA_LATENCY_CTRL); 48 writel_relaxed(0x441, VA_L2CC_BASE + L310_DATA_LATENCY_CTRL);
49 l2x0_init(VA_L2CC_BASE, 0x70A60001, 0xfe00ffff); 49 l2x0_init(VA_L2CC_BASE, 0x30a00001, 0xfe0fffff);
50} 50}
51 51
52/* 52/*
diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c
index 1217fb598cfd..910e978dfbc0 100644
--- a/arch/arm/mach-sti/board-dt.c
+++ b/arch/arm/mach-sti/board-dt.c
@@ -14,25 +14,6 @@
14 14
15#include "smp.h" 15#include "smp.h"
16 16
17void __init stih41x_l2x0_init(void)
18{
19 u32 way_size = 0x4;
20 u32 aux_ctrl;
21 /* may be this can be encoded in macros like BIT*() */
22 aux_ctrl = (0x1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
23 (0x1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
24 (0x1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
25 (way_size << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
26
27 l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK);
28}
29
30static void __init stih41x_machine_init(void)
31{
32 stih41x_l2x0_init();
33 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
34}
35
36static const char *stih41x_dt_match[] __initdata = { 17static const char *stih41x_dt_match[] __initdata = {
37 "st,stih415", 18 "st,stih415",
38 "st,stih416", 19 "st,stih416",
@@ -40,7 +21,11 @@ static const char *stih41x_dt_match[] __initdata = {
40}; 21};
41 22
42DT_MACHINE_START(STM, "STiH415/416 SoC with Flattened Device Tree") 23DT_MACHINE_START(STM, "STiH415/416 SoC with Flattened Device Tree")
43 .init_machine = stih41x_machine_init,
44 .smp = smp_ops(sti_smp_ops),
45 .dt_compat = stih41x_dt_match, 24 .dt_compat = stih41x_dt_match,
25 .l2c_aux_val = L2C_AUX_CTRL_SHARED_OVERRIDE |
26 L310_AUX_CTRL_DATA_PREFETCH |
27 L310_AUX_CTRL_INSTR_PREFETCH |
28 L2C_AUX_CTRL_WAY_SIZE(4),
29 .l2c_aux_mask = 0xc0000fff,
30 .smp = smp_ops(sti_smp_ops),
46MACHINE_END 31MACHINE_END
diff --git a/arch/arm/mach-tegra/pm.h b/arch/arm/mach-tegra/pm.h
index 6e92a7c2ecbd..f4a89698e5b0 100644
--- a/arch/arm/mach-tegra/pm.h
+++ b/arch/arm/mach-tegra/pm.h
@@ -35,8 +35,6 @@ void tegra20_sleep_core_init(void);
35void tegra30_lp1_iram_hook(void); 35void tegra30_lp1_iram_hook(void);
36void tegra30_sleep_core_init(void); 36void tegra30_sleep_core_init(void);
37 37
38extern unsigned long l2x0_saved_regs_addr;
39
40void tegra_clear_cpu_in_lp2(void); 38void tegra_clear_cpu_in_lp2(void);
41bool tegra_set_cpu_in_lp2(void); 39bool tegra_set_cpu_in_lp2(void);
42 40
diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
index 8c1ba4fea384..578d4d1ad648 100644
--- a/arch/arm/mach-tegra/reset-handler.S
+++ b/arch/arm/mach-tegra/reset-handler.S
@@ -19,7 +19,6 @@
19 19
20#include <asm/cache.h> 20#include <asm/cache.h>
21#include <asm/asm-offsets.h> 21#include <asm/asm-offsets.h>
22#include <asm/hardware/cache-l2x0.h>
23 22
24#include "flowctrl.h" 23#include "flowctrl.h"
25#include "fuse.h" 24#include "fuse.h"
@@ -78,8 +77,10 @@ ENTRY(tegra_resume)
78 str r1, [r0] 77 str r1, [r0]
79#endif 78#endif
80 79
80#ifdef CONFIG_CACHE_L2X0
81 /* L2 cache resume & re-enable */ 81 /* L2 cache resume & re-enable */
82 l2_cache_resume r0, r1, r2, l2x0_saved_regs_addr 82 bl l2c310_early_resume
83#endif
83end_ca9_scu_l2_resume: 84end_ca9_scu_l2_resume:
84 mov32 r9, 0xc0f 85 mov32 r9, 0xc0f
85 cmp r8, r9 86 cmp r8, r9
@@ -89,12 +90,6 @@ end_ca9_scu_l2_resume:
89ENDPROC(tegra_resume) 90ENDPROC(tegra_resume)
90#endif 91#endif
91 92
92#ifdef CONFIG_CACHE_L2X0
93 .globl l2x0_saved_regs_addr
94l2x0_saved_regs_addr:
95 .long 0
96#endif
97
98 .align L1_CACHE_SHIFT 93 .align L1_CACHE_SHIFT
99ENTRY(__tegra_cpu_reset_handler_start) 94ENTRY(__tegra_cpu_reset_handler_start)
100 95
diff --git a/arch/arm/mach-tegra/sleep.h b/arch/arm/mach-tegra/sleep.h
index a4edbb3abd3d..339fe42cd6fb 100644
--- a/arch/arm/mach-tegra/sleep.h
+++ b/arch/arm/mach-tegra/sleep.h
@@ -120,37 +120,6 @@
120 mov \tmp1, \tmp1, lsr #8 120 mov \tmp1, \tmp1, lsr #8
121.endm 121.endm
122 122
123/* Macro to resume & re-enable L2 cache */
124#ifndef L2X0_CTRL_EN
125#define L2X0_CTRL_EN 1
126#endif
127
128#ifdef CONFIG_CACHE_L2X0
129.macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs
130 W(adr) \tmp1, \phys_l2x0_saved_regs
131 ldr \tmp1, [\tmp1]
132 ldr \tmp2, [\tmp1, #L2X0_R_PHY_BASE]
133 ldr \tmp3, [\tmp2, #L2X0_CTRL]
134 tst \tmp3, #L2X0_CTRL_EN
135 bne exit_l2_resume
136 ldr \tmp3, [\tmp1, #L2X0_R_TAG_LATENCY]
137 str \tmp3, [\tmp2, #L2X0_TAG_LATENCY_CTRL]
138 ldr \tmp3, [\tmp1, #L2X0_R_DATA_LATENCY]
139 str \tmp3, [\tmp2, #L2X0_DATA_LATENCY_CTRL]
140 ldr \tmp3, [\tmp1, #L2X0_R_PREFETCH_CTRL]
141 str \tmp3, [\tmp2, #L2X0_PREFETCH_CTRL]
142 ldr \tmp3, [\tmp1, #L2X0_R_PWR_CTRL]
143 str \tmp3, [\tmp2, #L2X0_POWER_CTRL]
144 ldr \tmp3, [\tmp1, #L2X0_R_AUX_CTRL]
145 str \tmp3, [\tmp2, #L2X0_AUX_CTRL]
146 mov \tmp3, #L2X0_CTRL_EN
147 str \tmp3, [\tmp2, #L2X0_CTRL]
148exit_l2_resume:
149.endm
150#else /* CONFIG_CACHE_L2X0 */
151.macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs
152.endm
153#endif /* CONFIG_CACHE_L2X0 */
154#else 123#else
155void tegra_pen_lock(void); 124void tegra_pen_lock(void);
156void tegra_pen_unlock(void); 125void tegra_pen_unlock(void);
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index 6191603379e1..15ac9fcc96b1 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -70,40 +70,12 @@ u32 tegra_uart_config[3] = {
70 0, 70 0,
71}; 71};
72 72
73static void __init tegra_init_cache(void)
74{
75#ifdef CONFIG_CACHE_L2X0
76 static const struct of_device_id pl310_ids[] __initconst = {
77 { .compatible = "arm,pl310-cache", },
78 {}
79 };
80
81 struct device_node *np;
82 int ret;
83 void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
84 u32 aux_ctrl, cache_type;
85
86 np = of_find_matching_node(NULL, pl310_ids);
87 if (!np)
88 return;
89
90 cache_type = readl(p + L2X0_CACHE_TYPE);
91 aux_ctrl = (cache_type & 0x700) << (17-8);
92 aux_ctrl |= 0x7C400001;
93
94 ret = l2x0_of_init(aux_ctrl, 0x8200c3fe);
95 if (!ret)
96 l2x0_saved_regs_addr = virt_to_phys(&l2x0_saved_regs);
97#endif
98}
99
100static void __init tegra_init_early(void) 73static void __init tegra_init_early(void)
101{ 74{
102 of_register_trusted_foundations(); 75 of_register_trusted_foundations();
103 tegra_apb_io_init(); 76 tegra_apb_io_init();
104 tegra_init_fuse(); 77 tegra_init_fuse();
105 tegra_cpu_reset_handler_init(); 78 tegra_cpu_reset_handler_init();
106 tegra_init_cache();
107 tegra_powergate_init(); 79 tegra_powergate_init();
108 tegra_hotplug_init(); 80 tegra_hotplug_init();
109} 81}
@@ -191,8 +163,10 @@ static const char * const tegra_dt_board_compat[] = {
191}; 163};
192 164
193DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)") 165DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)")
194 .map_io = tegra_map_common_io, 166 .l2c_aux_val = 0x3c400001,
167 .l2c_aux_mask = 0xc20fc3fe,
195 .smp = smp_ops(tegra_smp_ops), 168 .smp = smp_ops(tegra_smp_ops),
169 .map_io = tegra_map_common_io,
196 .init_early = tegra_init_early, 170 .init_early = tegra_init_early,
197 .init_irq = tegra_dt_init_irq, 171 .init_irq = tegra_dt_init_irq,
198 .init_machine = tegra_dt_init, 172 .init_machine = tegra_dt_init,
diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c
index 264f894c0e3d..842ebedbdd1c 100644
--- a/arch/arm/mach-ux500/cache-l2x0.c
+++ b/arch/arm/mach-ux500/cache-l2x0.c
@@ -35,10 +35,16 @@ static int __init ux500_l2x0_unlock(void)
35 return 0; 35 return 0;
36} 36}
37 37
38static int __init ux500_l2x0_init(void) 38static void ux500_l2c310_write_sec(unsigned long val, unsigned reg)
39{ 39{
40 u32 aux_val = 0x3e000000; 40 /*
41 * We can't write to secure registers as we are in non-secure
42 * mode, until we have some SMI service available.
43 */
44}
41 45
46static int __init ux500_l2x0_init(void)
47{
42 if (cpu_is_u8500_family() || cpu_is_ux540_family()) 48 if (cpu_is_u8500_family() || cpu_is_ux540_family())
43 l2x0_base = __io_address(U8500_L2CC_BASE); 49 l2x0_base = __io_address(U8500_L2CC_BASE);
44 else 50 else
@@ -48,28 +54,12 @@ static int __init ux500_l2x0_init(void)
48 /* Unlock before init */ 54 /* Unlock before init */
49 ux500_l2x0_unlock(); 55 ux500_l2x0_unlock();
50 56
51 /* DBx540's L2 has 128KB way size */ 57 outer_cache.write_sec = ux500_l2c310_write_sec;
52 if (cpu_is_ux540_family())
53 /* 128KB way size */
54 aux_val |= (0x4 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
55 else
56 /* 64KB way size */
57 aux_val |= (0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
58 58
59 /* 64KB way size, 8 way associativity, force WA */
60 if (of_have_populated_dt()) 59 if (of_have_populated_dt())
61 l2x0_of_init(aux_val, 0xc0000fff); 60 l2x0_of_init(0, ~0);
62 else 61 else
63 l2x0_init(l2x0_base, aux_val, 0xc0000fff); 62 l2x0_init(l2x0_base, 0, ~0);
64
65 /*
66 * We can't disable l2 as we are in non secure mode, currently
67 * this seems be called only during kexec path. So let's
68 * override outer.disable with nasty assignment until we have
69 * some SMI service available.
70 */
71 outer_cache.disable = NULL;
72 outer_cache.set_debug = NULL;
73 63
74 return 0; 64 return 0;
75} 65}
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index 6f34497a4245..204038ef3795 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -45,6 +45,23 @@ static void __init ct_ca9x4_map_io(void)
45 iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); 45 iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
46} 46}
47 47
48static void __init ca9x4_l2_init(void)
49{
50#ifdef CONFIG_CACHE_L2X0
51 void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
52
53 if (l2x0_base) {
54 /* set RAM latencies to 1 cycle for this core tile. */
55 writel(0, l2x0_base + L310_TAG_LATENCY_CTRL);
56 writel(0, l2x0_base + L310_DATA_LATENCY_CTRL);
57
58 l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
59 } else {
60 pr_err("L2C: unable to map L2 cache controller\n");
61 }
62#endif
63}
64
48#ifdef CONFIG_HAVE_ARM_TWD 65#ifdef CONFIG_HAVE_ARM_TWD
49static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER); 66static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER);
50 67
@@ -63,6 +80,7 @@ static void __init ct_ca9x4_init_irq(void)
63 gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K), 80 gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K),
64 ioremap(A9_MPCORE_GIC_CPU, SZ_256)); 81 ioremap(A9_MPCORE_GIC_CPU, SZ_256));
65 ca9x4_twd_init(); 82 ca9x4_twd_init();
83 ca9x4_l2_init();
66} 84}
67 85
68static int ct_ca9x4_clcd_setup(struct clcd_fb *fb) 86static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
@@ -141,16 +159,6 @@ static void __init ct_ca9x4_init(void)
141{ 159{
142 int i; 160 int i;
143 161
144#ifdef CONFIG_CACHE_L2X0
145 void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
146
147 /* set RAM latencies to 1 cycle for this core tile. */
148 writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
149 writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
150
151 l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
152#endif
153
154 for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++) 162 for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
155 amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource); 163 amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
156 164
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 29e7785a54bc..b743a0ae02ce 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -209,7 +209,7 @@ static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
209#define POLL_MSEC 10 209#define POLL_MSEC 10
210#define TIMEOUT_MSEC 1000 210#define TIMEOUT_MSEC 1000
211 211
212static int tc2_pm_power_down_finish(unsigned int cpu, unsigned int cluster) 212static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
213{ 213{
214 unsigned tries; 214 unsigned tries;
215 215
@@ -290,7 +290,7 @@ static void tc2_pm_powered_up(void)
290static const struct mcpm_platform_ops tc2_pm_power_ops = { 290static const struct mcpm_platform_ops tc2_pm_power_ops = {
291 .power_up = tc2_pm_power_up, 291 .power_up = tc2_pm_power_up,
292 .power_down = tc2_pm_power_down, 292 .power_down = tc2_pm_power_down,
293 .power_down_finish = tc2_pm_power_down_finish, 293 .wait_for_powerdown = tc2_pm_wait_for_powerdown,
294 .suspend = tc2_pm_suspend, 294 .suspend = tc2_pm_suspend,
295 .powered_up = tc2_pm_powered_up, 295 .powered_up = tc2_pm_powered_up,
296}; 296};
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 4f8b8cb17ff5..b2fea70d412d 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -432,7 +432,6 @@ static const struct of_device_id v2m_dt_bus_match[] __initconst = {
432 432
433static void __init v2m_dt_init(void) 433static void __init v2m_dt_init(void)
434{ 434{
435 l2x0_of_init(0x00400000, 0xfe0fffff);
436 of_platform_populate(NULL, v2m_dt_bus_match, NULL, NULL); 435 of_platform_populate(NULL, v2m_dt_bus_match, NULL, NULL);
437} 436}
438 437
@@ -443,6 +442,8 @@ static const char * const v2m_dt_match[] __initconst = {
443 442
444DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express") 443DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express")
445 .dt_compat = v2m_dt_match, 444 .dt_compat = v2m_dt_match,
445 .l2c_aux_val = 0x00400000,
446 .l2c_aux_mask = 0xfe0fffff,
446 .smp = smp_ops(vexpress_smp_ops), 447 .smp = smp_ops(vexpress_smp_ops),
447 .smp_init = smp_init_ops(vexpress_smp_init_ops), 448 .smp_init = smp_init_ops(vexpress_smp_init_ops),
448 .map_io = v2m_dt_map_io, 449 .map_io = v2m_dt_map_io,
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 6fcc584c1a11..d1e992e6403e 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -67,11 +67,6 @@ static void __init zynq_init_machine(void)
67{ 67{
68 struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; 68 struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
69 69
70 /*
71 * 64KB way size, 8-way associativity, parity disabled
72 */
73 l2x0_of_init(0x02060000, 0xF0F0FFFF);
74
75 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 70 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
76 71
77 platform_device_register(&zynq_cpuidle_device); 72 platform_device_register(&zynq_cpuidle_device);
@@ -133,6 +128,9 @@ static const char * const zynq_dt_match[] = {
133}; 128};
134 129
135DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") 130DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform")
131 /* 64KB way size, 8-way associativity, parity disabled */
132 .l2c_aux_val = 0x02000000,
133 .l2c_aux_mask = 0xf0ffffff,
136 .smp = smp_ops(zynq_smp_ops), 134 .smp = smp_ops(zynq_smp_ops),
137 .map_io = zynq_map_io, 135 .map_io = zynq_map_io,
138 .init_irq = zynq_irq_init, 136 .init_irq = zynq_irq_init,
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 5bf7c3c3b301..eda0dd0ab97b 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -897,6 +897,57 @@ config CACHE_PL310
897 This option enables optimisations for the PL310 cache 897 This option enables optimisations for the PL310 cache
898 controller. 898 controller.
899 899
900config PL310_ERRATA_588369
901 bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
902 depends on CACHE_L2X0
903 help
904 The PL310 L2 cache controller implements three types of Clean &
905 Invalidate maintenance operations: by Physical Address
906 (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
907 They are architecturally defined to behave as the execution of a
908 clean operation followed immediately by an invalidate operation,
909 both performing to the same memory location. This functionality
910 is not correctly implemented in PL310 as clean lines are not
911 invalidated as a result of these operations.
912
913config PL310_ERRATA_727915
914 bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
915 depends on CACHE_L2X0
916 help
917 PL310 implements the Clean & Invalidate by Way L2 cache maintenance
918 operation (offset 0x7FC). This operation runs in background so that
919 PL310 can handle normal accesses while it is in progress. Under very
920 rare circumstances, due to this erratum, write data can be lost when
921 PL310 treats a cacheable write transaction during a Clean &
922 Invalidate by Way operation.
923
924config PL310_ERRATA_753970
925 bool "PL310 errata: cache sync operation may be faulty"
926 depends on CACHE_PL310
927 help
928 This option enables the workaround for the 753970 PL310 (r3p0) erratum.
929
930 Under some condition the effect of cache sync operation on
931 the store buffer still remains when the operation completes.
932 This means that the store buffer is always asked to drain and
933 this prevents it from merging any further writes. The workaround
934 is to replace the normal offset of cache sync operation (0x730)
935 by another offset targeting an unmapped PL310 register 0x740.
936 This has the same effect as the cache sync operation: store buffer
937 drain and waiting for all buffers empty.
938
939config PL310_ERRATA_769419
940 bool "PL310 errata: no automatic Store Buffer drain"
941 depends on CACHE_L2X0
942 help
943 On revisions of the PL310 prior to r3p2, the Store Buffer does
944 not automatically drain. This can cause normal, non-cacheable
945 writes to be retained when the memory system is idle, leading
946 to suboptimal I/O performance for drivers using coherent DMA.
947 This option adds a write barrier to the cpu_idle loop so that,
948 on systems with an outer cache, the store buffer is drained
949 explicitly.
950
900config CACHE_TAUROS2 951config CACHE_TAUROS2
901 bool "Enable the Tauros2 L2 cache controller" 952 bool "Enable the Tauros2 L2 cache controller"
902 depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4) 953 depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4)
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 7f39ce2f841f..91da64de440f 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -95,7 +95,8 @@ obj-$(CONFIG_CPU_V7M) += proc-v7m.o
95AFLAGS_proc-v6.o :=-Wa,-march=armv6 95AFLAGS_proc-v6.o :=-Wa,-march=armv6
96AFLAGS_proc-v7.o :=-Wa,-march=armv7-a 96AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
97 97
98obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
98obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o 99obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
99obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o 100obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o
100obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o 101obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
101obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o 102obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index dc814a548056..e028a7f2ebcc 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -350,7 +350,6 @@ void __init feroceon_l2_init(int __l2_wt_override)
350 outer_cache.inv_range = feroceon_l2_inv_range; 350 outer_cache.inv_range = feroceon_l2_inv_range;
351 outer_cache.clean_range = feroceon_l2_clean_range; 351 outer_cache.clean_range = feroceon_l2_clean_range;
352 outer_cache.flush_range = feroceon_l2_flush_range; 352 outer_cache.flush_range = feroceon_l2_flush_range;
353 outer_cache.inv_all = l2_inv_all;
354 353
355 enable_l2(); 354 enable_l2();
356 355
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 7abde2ce8973..efc5cabf70e0 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -16,18 +16,33 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#include <linux/cpu.h>
19#include <linux/err.h> 20#include <linux/err.h>
20#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/smp.h>
21#include <linux/spinlock.h> 23#include <linux/spinlock.h>
22#include <linux/io.h> 24#include <linux/io.h>
23#include <linux/of.h> 25#include <linux/of.h>
24#include <linux/of_address.h> 26#include <linux/of_address.h>
25 27
26#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/cp15.h>
30#include <asm/cputype.h>
27#include <asm/hardware/cache-l2x0.h> 31#include <asm/hardware/cache-l2x0.h>
28#include "cache-tauros3.h" 32#include "cache-tauros3.h"
29#include "cache-aurora-l2.h" 33#include "cache-aurora-l2.h"
30 34
35struct l2c_init_data {
36 const char *type;
37 unsigned way_size_0;
38 unsigned num_lock;
39 void (*of_parse)(const struct device_node *, u32 *, u32 *);
40 void (*enable)(void __iomem *, u32, unsigned);
41 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
42 void (*save)(void __iomem *);
43 struct outer_cache_fns outer_cache;
44};
45
31#define CACHE_LINE_SIZE 32 46#define CACHE_LINE_SIZE 32
32 47
33static void __iomem *l2x0_base; 48static void __iomem *l2x0_base;
@@ -36,96 +51,116 @@ static u32 l2x0_way_mask; /* Bitmask of active ways */
36static u32 l2x0_size; 51static u32 l2x0_size;
37static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; 52static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
38 53
39/* Aurora don't have the cache ID register available, so we have to
40 * pass it though the device tree */
41static u32 cache_id_part_number_from_dt;
42
43struct l2x0_regs l2x0_saved_regs; 54struct l2x0_regs l2x0_saved_regs;
44 55
45struct l2x0_of_data { 56/*
46 void (*setup)(const struct device_node *, u32 *, u32 *); 57 * Common code for all cache controllers.
47 void (*save)(void); 58 */
48 struct outer_cache_fns outer_cache; 59static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
49};
50
51static bool of_init = false;
52
53static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
54{ 60{
55 /* wait for cache operation by line or way to complete */ 61 /* wait for cache operation by line or way to complete */
56 while (readl_relaxed(reg) & mask) 62 while (readl_relaxed(reg) & mask)
57 cpu_relax(); 63 cpu_relax();
58} 64}
59 65
60#ifdef CONFIG_CACHE_PL310 66/*
61static inline void cache_wait(void __iomem *reg, unsigned long mask) 67 * By default, we write directly to secure registers. Platforms must
68 * override this if they are running non-secure.
69 */
70static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
62{ 71{
63 /* cache operations by line are atomic on PL310 */ 72 if (val == readl_relaxed(base + reg))
73 return;
74 if (outer_cache.write_sec)
75 outer_cache.write_sec(val, reg);
76 else
77 writel_relaxed(val, base + reg);
64} 78}
65#else
66#define cache_wait cache_wait_way
67#endif
68 79
69static inline void cache_sync(void) 80/*
81 * This should only be called when we have a requirement that the
82 * register be written due to a work-around, as platforms running
83 * in non-secure mode may not be able to access this register.
84 */
85static inline void l2c_set_debug(void __iomem *base, unsigned long val)
70{ 86{
71 void __iomem *base = l2x0_base; 87 l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
72
73 writel_relaxed(0, base + sync_reg_offset);
74 cache_wait(base + L2X0_CACHE_SYNC, 1);
75} 88}
76 89
77static inline void l2x0_clean_line(unsigned long addr) 90static void __l2c_op_way(void __iomem *reg)
78{ 91{
79 void __iomem *base = l2x0_base; 92 writel_relaxed(l2x0_way_mask, reg);
80 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 93 l2c_wait_mask(reg, l2x0_way_mask);
81 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
82} 94}
83 95
84static inline void l2x0_inv_line(unsigned long addr) 96static inline void l2c_unlock(void __iomem *base, unsigned num)
85{ 97{
86 void __iomem *base = l2x0_base; 98 unsigned i;
87 cache_wait(base + L2X0_INV_LINE_PA, 1); 99
88 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 100 for (i = 0; i < num; i++) {
101 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
102 i * L2X0_LOCKDOWN_STRIDE);
103 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
104 i * L2X0_LOCKDOWN_STRIDE);
105 }
89} 106}
90 107
91#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 108/*
92static inline void debug_writel(unsigned long val) 109 * Enable the L2 cache controller. This function must only be
110 * called when the cache controller is known to be disabled.
111 */
112static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
93{ 113{
94 if (outer_cache.set_debug) 114 unsigned long flags;
95 outer_cache.set_debug(val); 115
116 l2c_write_sec(aux, base, L2X0_AUX_CTRL);
117
118 l2c_unlock(base, num_lock);
119
120 local_irq_save(flags);
121 __l2c_op_way(base + L2X0_INV_WAY);
122 writel_relaxed(0, base + sync_reg_offset);
123 l2c_wait_mask(base + sync_reg_offset, 1);
124 local_irq_restore(flags);
125
126 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
96} 127}
97 128
98static void pl310_set_debug(unsigned long val) 129static void l2c_disable(void)
99{ 130{
100 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); 131 void __iomem *base = l2x0_base;
132
133 outer_cache.flush_all();
134 l2c_write_sec(0, base, L2X0_CTRL);
135 dsb(st);
101} 136}
102#else 137
103/* Optimised out for non-errata case */ 138#ifdef CONFIG_CACHE_PL310
104static inline void debug_writel(unsigned long val) 139static inline void cache_wait(void __iomem *reg, unsigned long mask)
105{ 140{
141 /* cache operations by line are atomic on PL310 */
106} 142}
107 143#else
108#define pl310_set_debug NULL 144#define cache_wait l2c_wait_mask
109#endif 145#endif
110 146
111#ifdef CONFIG_PL310_ERRATA_588369 147static inline void cache_sync(void)
112static inline void l2x0_flush_line(unsigned long addr)
113{ 148{
114 void __iomem *base = l2x0_base; 149 void __iomem *base = l2x0_base;
115 150
116 /* Clean by PA followed by Invalidate by PA */ 151 writel_relaxed(0, base + sync_reg_offset);
117 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 152 cache_wait(base + L2X0_CACHE_SYNC, 1);
118 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
119 cache_wait(base + L2X0_INV_LINE_PA, 1);
120 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
121} 153}
122#else
123 154
124static inline void l2x0_flush_line(unsigned long addr) 155#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
156static inline void debug_writel(unsigned long val)
157{
158 l2c_set_debug(l2x0_base, val);
159}
160#else
161/* Optimised out for non-errata case */
162static inline void debug_writel(unsigned long val)
125{ 163{
126 void __iomem *base = l2x0_base;
127 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
128 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
129} 164}
130#endif 165#endif
131 166
@@ -141,8 +176,7 @@ static void l2x0_cache_sync(void)
141static void __l2x0_flush_all(void) 176static void __l2x0_flush_all(void)
142{ 177{
143 debug_writel(0x03); 178 debug_writel(0x03);
144 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); 179 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
145 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
146 cache_sync(); 180 cache_sync();
147 debug_writel(0x00); 181 debug_writel(0x00);
148} 182}
@@ -157,275 +191,883 @@ static void l2x0_flush_all(void)
157 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 191 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
158} 192}
159 193
160static void l2x0_clean_all(void) 194static void l2x0_disable(void)
161{ 195{
162 unsigned long flags; 196 unsigned long flags;
163 197
164 /* clean all ways */
165 raw_spin_lock_irqsave(&l2x0_lock, flags); 198 raw_spin_lock_irqsave(&l2x0_lock, flags);
166 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); 199 __l2x0_flush_all();
167 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); 200 l2c_write_sec(0, l2x0_base, L2X0_CTRL);
168 cache_sync(); 201 dsb(st);
169 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 202 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
170} 203}
171 204
172static void l2x0_inv_all(void) 205static void l2c_save(void __iomem *base)
173{ 206{
174 unsigned long flags; 207 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
208}
175 209
176 /* invalidate all ways */ 210/*
177 raw_spin_lock_irqsave(&l2x0_lock, flags); 211 * L2C-210 specific code.
178 /* Invalidating when L2 is enabled is a nono */ 212 *
179 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN); 213 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
180 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 214 * ensure that no background operation is running. The way operations
181 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 215 * are all background tasks.
182 cache_sync(); 216 *
183 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 217 * While a background operation is in progress, any new operation is
218 * ignored (unspecified whether this causes an error.) Thankfully, not
219 * used on SMP.
220 *
221 * Never has a different sync register other than L2X0_CACHE_SYNC, but
222 * we use sync_reg_offset here so we can share some of this with L2C-310.
223 */
224static void __l2c210_cache_sync(void __iomem *base)
225{
226 writel_relaxed(0, base + sync_reg_offset);
184} 227}
185 228
186static void l2x0_inv_range(unsigned long start, unsigned long end) 229static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
230 unsigned long end)
231{
232 while (start < end) {
233 writel_relaxed(start, reg);
234 start += CACHE_LINE_SIZE;
235 }
236}
237
238static void l2c210_inv_range(unsigned long start, unsigned long end)
187{ 239{
188 void __iomem *base = l2x0_base; 240 void __iomem *base = l2x0_base;
189 unsigned long flags;
190 241
191 raw_spin_lock_irqsave(&l2x0_lock, flags);
192 if (start & (CACHE_LINE_SIZE - 1)) { 242 if (start & (CACHE_LINE_SIZE - 1)) {
193 start &= ~(CACHE_LINE_SIZE - 1); 243 start &= ~(CACHE_LINE_SIZE - 1);
194 debug_writel(0x03); 244 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
195 l2x0_flush_line(start);
196 debug_writel(0x00);
197 start += CACHE_LINE_SIZE; 245 start += CACHE_LINE_SIZE;
198 } 246 }
199 247
200 if (end & (CACHE_LINE_SIZE - 1)) { 248 if (end & (CACHE_LINE_SIZE - 1)) {
201 end &= ~(CACHE_LINE_SIZE - 1); 249 end &= ~(CACHE_LINE_SIZE - 1);
202 debug_writel(0x03); 250 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
203 l2x0_flush_line(end);
204 debug_writel(0x00);
205 } 251 }
206 252
253 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
254 __l2c210_cache_sync(base);
255}
256
257static void l2c210_clean_range(unsigned long start, unsigned long end)
258{
259 void __iomem *base = l2x0_base;
260
261 start &= ~(CACHE_LINE_SIZE - 1);
262 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
263 __l2c210_cache_sync(base);
264}
265
266static void l2c210_flush_range(unsigned long start, unsigned long end)
267{
268 void __iomem *base = l2x0_base;
269
270 start &= ~(CACHE_LINE_SIZE - 1);
271 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
272 __l2c210_cache_sync(base);
273}
274
275static void l2c210_flush_all(void)
276{
277 void __iomem *base = l2x0_base;
278
279 BUG_ON(!irqs_disabled());
280
281 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
282 __l2c210_cache_sync(base);
283}
284
285static void l2c210_sync(void)
286{
287 __l2c210_cache_sync(l2x0_base);
288}
289
290static void l2c210_resume(void)
291{
292 void __iomem *base = l2x0_base;
293
294 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
295 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
296}
297
298static const struct l2c_init_data l2c210_data __initconst = {
299 .type = "L2C-210",
300 .way_size_0 = SZ_8K,
301 .num_lock = 1,
302 .enable = l2c_enable,
303 .save = l2c_save,
304 .outer_cache = {
305 .inv_range = l2c210_inv_range,
306 .clean_range = l2c210_clean_range,
307 .flush_range = l2c210_flush_range,
308 .flush_all = l2c210_flush_all,
309 .disable = l2c_disable,
310 .sync = l2c210_sync,
311 .resume = l2c210_resume,
312 },
313};
314
315/*
316 * L2C-220 specific code.
317 *
318 * All operations are background operations: they have to be waited for.
319 * Conflicting requests generate a slave error (which will cause an
320 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
321 * sync register here.
322 *
323 * However, we can re-use the l2c210_resume call.
324 */
325static inline void __l2c220_cache_sync(void __iomem *base)
326{
327 writel_relaxed(0, base + L2X0_CACHE_SYNC);
328 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
329}
330
331static void l2c220_op_way(void __iomem *base, unsigned reg)
332{
333 unsigned long flags;
334
335 raw_spin_lock_irqsave(&l2x0_lock, flags);
336 __l2c_op_way(base + reg);
337 __l2c220_cache_sync(base);
338 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
339}
340
341static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
342 unsigned long end, unsigned long flags)
343{
344 raw_spinlock_t *lock = &l2x0_lock;
345
207 while (start < end) { 346 while (start < end) {
208 unsigned long blk_end = start + min(end - start, 4096UL); 347 unsigned long blk_end = start + min(end - start, 4096UL);
209 348
210 while (start < blk_end) { 349 while (start < blk_end) {
211 l2x0_inv_line(start); 350 l2c_wait_mask(reg, 1);
351 writel_relaxed(start, reg);
212 start += CACHE_LINE_SIZE; 352 start += CACHE_LINE_SIZE;
213 } 353 }
214 354
215 if (blk_end < end) { 355 if (blk_end < end) {
216 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 356 raw_spin_unlock_irqrestore(lock, flags);
217 raw_spin_lock_irqsave(&l2x0_lock, flags); 357 raw_spin_lock_irqsave(lock, flags);
218 } 358 }
219 } 359 }
220 cache_wait(base + L2X0_INV_LINE_PA, 1); 360
221 cache_sync(); 361 return flags;
222 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
223} 362}
224 363
225static void l2x0_clean_range(unsigned long start, unsigned long end) 364static void l2c220_inv_range(unsigned long start, unsigned long end)
226{ 365{
227 void __iomem *base = l2x0_base; 366 void __iomem *base = l2x0_base;
228 unsigned long flags; 367 unsigned long flags;
229 368
230 if ((end - start) >= l2x0_size) {
231 l2x0_clean_all();
232 return;
233 }
234
235 raw_spin_lock_irqsave(&l2x0_lock, flags); 369 raw_spin_lock_irqsave(&l2x0_lock, flags);
236 start &= ~(CACHE_LINE_SIZE - 1); 370 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
237 while (start < end) { 371 if (start & (CACHE_LINE_SIZE - 1)) {
238 unsigned long blk_end = start + min(end - start, 4096UL); 372 start &= ~(CACHE_LINE_SIZE - 1);
239 373 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
240 while (start < blk_end) {
241 l2x0_clean_line(start);
242 start += CACHE_LINE_SIZE; 374 start += CACHE_LINE_SIZE;
243 } 375 }
244 376
245 if (blk_end < end) { 377 if (end & (CACHE_LINE_SIZE - 1)) {
246 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 378 end &= ~(CACHE_LINE_SIZE - 1);
247 raw_spin_lock_irqsave(&l2x0_lock, flags); 379 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
380 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
248 } 381 }
249 } 382 }
250 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 383
251 cache_sync(); 384 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
385 start, end, flags);
386 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
387 __l2c220_cache_sync(base);
252 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 388 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
253} 389}
254 390
255static void l2x0_flush_range(unsigned long start, unsigned long end) 391static void l2c220_clean_range(unsigned long start, unsigned long end)
256{ 392{
257 void __iomem *base = l2x0_base; 393 void __iomem *base = l2x0_base;
258 unsigned long flags; 394 unsigned long flags;
259 395
396 start &= ~(CACHE_LINE_SIZE - 1);
260 if ((end - start) >= l2x0_size) { 397 if ((end - start) >= l2x0_size) {
261 l2x0_flush_all(); 398 l2c220_op_way(base, L2X0_CLEAN_WAY);
262 return; 399 return;
263 } 400 }
264 401
265 raw_spin_lock_irqsave(&l2x0_lock, flags); 402 raw_spin_lock_irqsave(&l2x0_lock, flags);
403 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
404 start, end, flags);
405 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
406 __l2c220_cache_sync(base);
407 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
408}
409
410static void l2c220_flush_range(unsigned long start, unsigned long end)
411{
412 void __iomem *base = l2x0_base;
413 unsigned long flags;
414
266 start &= ~(CACHE_LINE_SIZE - 1); 415 start &= ~(CACHE_LINE_SIZE - 1);
416 if ((end - start) >= l2x0_size) {
417 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
418 return;
419 }
420
421 raw_spin_lock_irqsave(&l2x0_lock, flags);
422 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
423 start, end, flags);
424 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
425 __l2c220_cache_sync(base);
426 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
427}
428
429static void l2c220_flush_all(void)
430{
431 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
432}
433
434static void l2c220_sync(void)
435{
436 unsigned long flags;
437
438 raw_spin_lock_irqsave(&l2x0_lock, flags);
439 __l2c220_cache_sync(l2x0_base);
440 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
441}
442
443static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock)
444{
445 /*
446 * Always enable non-secure access to the lockdown registers -
447 * we write to them as part of the L2C enable sequence so they
448 * need to be accessible.
449 */
450 aux |= L220_AUX_CTRL_NS_LOCKDOWN;
451
452 l2c_enable(base, aux, num_lock);
453}
454
455static const struct l2c_init_data l2c220_data = {
456 .type = "L2C-220",
457 .way_size_0 = SZ_8K,
458 .num_lock = 1,
459 .enable = l2c220_enable,
460 .save = l2c_save,
461 .outer_cache = {
462 .inv_range = l2c220_inv_range,
463 .clean_range = l2c220_clean_range,
464 .flush_range = l2c220_flush_range,
465 .flush_all = l2c220_flush_all,
466 .disable = l2c_disable,
467 .sync = l2c220_sync,
468 .resume = l2c210_resume,
469 },
470};
471
472/*
473 * L2C-310 specific code.
474 *
475 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
476 * and the way operations are all background tasks. However, issuing an
477 * operation while a background operation is in progress results in a
478 * SLVERR response. We can reuse:
479 *
480 * __l2c210_cache_sync (using sync_reg_offset)
481 * l2c210_sync
482 * l2c210_inv_range (if 588369 is not applicable)
483 * l2c210_clean_range
484 * l2c210_flush_range (if 588369 is not applicable)
485 * l2c210_flush_all (if 727915 is not applicable)
486 *
487 * Errata:
488 * 588369: PL310 R0P0->R1P0, fixed R2P0.
489 * Affects: all clean+invalidate operations
490 * clean and invalidate skips the invalidate step, so we need to issue
491 * separate operations. We also require the above debug workaround
492 * enclosing this code fragment on affected parts. On unaffected parts,
493 * we must not use this workaround without the debug register writes
494 * to avoid exposing a problem similar to 727915.
495 *
496 * 727915: PL310 R2P0->R3P0, fixed R3P1.
497 * Affects: clean+invalidate by way
498 * clean and invalidate by way runs in the background, and a store can
499 * hit the line between the clean operation and invalidate operation,
500 * resulting in the store being lost.
501 *
502 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
503 * Affects: 8x64-bit (double fill) line fetches
504 * double fill line fetches can fail to cause dirty data to be evicted
505 * from the cache before the new data overwrites the second line.
506 *
507 * 753970: PL310 R3P0, fixed R3P1.
508 * Affects: sync
509 * prevents merging writes after the sync operation, until another L2C
510 * operation is performed (or a number of other conditions.)
511 *
512 * 769419: PL310 R0P0->R3P1, fixed R3P2.
513 * Affects: store buffer
514 * store buffer is not automatically drained.
515 */
516static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
517{
518 void __iomem *base = l2x0_base;
519
520 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
521 unsigned long flags;
522
523 /* Erratum 588369 for both clean+invalidate operations */
524 raw_spin_lock_irqsave(&l2x0_lock, flags);
525 l2c_set_debug(base, 0x03);
526
527 if (start & (CACHE_LINE_SIZE - 1)) {
528 start &= ~(CACHE_LINE_SIZE - 1);
529 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
530 writel_relaxed(start, base + L2X0_INV_LINE_PA);
531 start += CACHE_LINE_SIZE;
532 }
533
534 if (end & (CACHE_LINE_SIZE - 1)) {
535 end &= ~(CACHE_LINE_SIZE - 1);
536 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
537 writel_relaxed(end, base + L2X0_INV_LINE_PA);
538 }
539
540 l2c_set_debug(base, 0x00);
541 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
542 }
543
544 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
545 __l2c210_cache_sync(base);
546}
547
548static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
549{
550 raw_spinlock_t *lock = &l2x0_lock;
551 unsigned long flags;
552 void __iomem *base = l2x0_base;
553
554 raw_spin_lock_irqsave(lock, flags);
267 while (start < end) { 555 while (start < end) {
268 unsigned long blk_end = start + min(end - start, 4096UL); 556 unsigned long blk_end = start + min(end - start, 4096UL);
269 557
270 debug_writel(0x03); 558 l2c_set_debug(base, 0x03);
271 while (start < blk_end) { 559 while (start < blk_end) {
272 l2x0_flush_line(start); 560 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
561 writel_relaxed(start, base + L2X0_INV_LINE_PA);
273 start += CACHE_LINE_SIZE; 562 start += CACHE_LINE_SIZE;
274 } 563 }
275 debug_writel(0x00); 564 l2c_set_debug(base, 0x00);
276 565
277 if (blk_end < end) { 566 if (blk_end < end) {
278 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 567 raw_spin_unlock_irqrestore(lock, flags);
279 raw_spin_lock_irqsave(&l2x0_lock, flags); 568 raw_spin_lock_irqsave(lock, flags);
280 } 569 }
281 } 570 }
282 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 571 raw_spin_unlock_irqrestore(lock, flags);
283 cache_sync(); 572 __l2c210_cache_sync(base);
284 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
285} 573}
286 574
287static void l2x0_disable(void) 575static void l2c310_flush_all_erratum(void)
288{ 576{
577 void __iomem *base = l2x0_base;
289 unsigned long flags; 578 unsigned long flags;
290 579
291 raw_spin_lock_irqsave(&l2x0_lock, flags); 580 raw_spin_lock_irqsave(&l2x0_lock, flags);
292 __l2x0_flush_all(); 581 l2c_set_debug(base, 0x03);
293 writel_relaxed(0, l2x0_base + L2X0_CTRL); 582 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
294 dsb(st); 583 l2c_set_debug(base, 0x00);
584 __l2c210_cache_sync(base);
295 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 585 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
296} 586}
297 587
298static void l2x0_unlock(u32 cache_id) 588static void __init l2c310_save(void __iomem *base)
299{ 589{
300 int lockregs; 590 unsigned revision;
301 int i;
302 591
303 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 592 l2c_save(base);
304 case L2X0_CACHE_ID_PART_L310: 593
305 lockregs = 8; 594 l2x0_saved_regs.tag_latency = readl_relaxed(base +
306 break; 595 L310_TAG_LATENCY_CTRL);
307 case AURORA_CACHE_ID: 596 l2x0_saved_regs.data_latency = readl_relaxed(base +
308 lockregs = 4; 597 L310_DATA_LATENCY_CTRL);
598 l2x0_saved_regs.filter_end = readl_relaxed(base +
599 L310_ADDR_FILTER_END);
600 l2x0_saved_regs.filter_start = readl_relaxed(base +
601 L310_ADDR_FILTER_START);
602
603 revision = readl_relaxed(base + L2X0_CACHE_ID) &
604 L2X0_CACHE_ID_RTL_MASK;
605
606 /* From r2p0, there is Prefetch offset/control register */
607 if (revision >= L310_CACHE_ID_RTL_R2P0)
608 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
609 L310_PREFETCH_CTRL);
610
611 /* From r3p0, there is Power control register */
612 if (revision >= L310_CACHE_ID_RTL_R3P0)
613 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
614 L310_POWER_CTRL);
615}
616
617static void l2c310_resume(void)
618{
619 void __iomem *base = l2x0_base;
620
621 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
622 unsigned revision;
623
624 /* restore pl310 setup */
625 writel_relaxed(l2x0_saved_regs.tag_latency,
626 base + L310_TAG_LATENCY_CTRL);
627 writel_relaxed(l2x0_saved_regs.data_latency,
628 base + L310_DATA_LATENCY_CTRL);
629 writel_relaxed(l2x0_saved_regs.filter_end,
630 base + L310_ADDR_FILTER_END);
631 writel_relaxed(l2x0_saved_regs.filter_start,
632 base + L310_ADDR_FILTER_START);
633
634 revision = readl_relaxed(base + L2X0_CACHE_ID) &
635 L2X0_CACHE_ID_RTL_MASK;
636
637 if (revision >= L310_CACHE_ID_RTL_R2P0)
638 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
639 L310_PREFETCH_CTRL);
640 if (revision >= L310_CACHE_ID_RTL_R3P0)
641 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
642 L310_POWER_CTRL);
643
644 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
645
646 /* Re-enable full-line-of-zeros for Cortex-A9 */
647 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
648 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
649 }
650}
651
652static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
653{
654 switch (act & ~CPU_TASKS_FROZEN) {
655 case CPU_STARTING:
656 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
309 break; 657 break;
310 default: 658 case CPU_DYING:
311 /* L210 and unknown types */ 659 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
312 lockregs = 1;
313 break; 660 break;
314 } 661 }
662 return NOTIFY_OK;
663}
315 664
316 for (i = 0; i < lockregs; i++) { 665static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
317 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + 666{
318 i * L2X0_LOCKDOWN_STRIDE); 667 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
319 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + 668 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
320 i * L2X0_LOCKDOWN_STRIDE); 669
670 if (rev >= L310_CACHE_ID_RTL_R2P0) {
671 if (cortex_a9) {
672 aux |= L310_AUX_CTRL_EARLY_BRESP;
673 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
674 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
675 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
676 aux &= ~L310_AUX_CTRL_EARLY_BRESP;
677 }
678 }
679
680 if (cortex_a9) {
681 u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
682 u32 acr = get_auxcr();
683
684 pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
685
686 if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
687 pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
688
689 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
690 pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
691
692 if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
693 aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
694 pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
695 }
696 } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
697 pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
698 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
699 }
700
701 if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
702 u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
703
704 pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
705 aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
706 aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
707 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
708 }
709
710 /* r3p0 or later has power control register */
711 if (rev >= L310_CACHE_ID_RTL_R3P0) {
712 u32 power_ctrl;
713
714 l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN,
715 base, L310_POWER_CTRL);
716 power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
717 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
718 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
719 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
720 }
721
722 /*
723 * Always enable non-secure access to the lockdown registers -
724 * we write to them as part of the L2C enable sequence so they
725 * need to be accessible.
726 */
727 aux |= L310_AUX_CTRL_NS_LOCKDOWN;
728
729 l2c_enable(base, aux, num_lock);
730
731 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
732 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
733 cpu_notifier(l2c310_cpu_enable_flz, 0);
321 } 734 }
322} 735}
323 736
324void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) 737static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
738 struct outer_cache_fns *fns)
325{ 739{
326 u32 aux; 740 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
327 u32 cache_id; 741 const char *errata[8];
328 u32 way_size = 0; 742 unsigned n = 0;
329 int ways; 743
330 int way_size_shift = L2X0_WAY_SIZE_SHIFT; 744 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
331 const char *type; 745 revision < L310_CACHE_ID_RTL_R2P0 &&
746 /* For bcm compatibility */
747 fns->inv_range == l2c210_inv_range) {
748 fns->inv_range = l2c310_inv_range_erratum;
749 fns->flush_range = l2c310_flush_range_erratum;
750 errata[n++] = "588369";
751 }
332 752
333 l2x0_base = base; 753 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
334 if (cache_id_part_number_from_dt) 754 revision >= L310_CACHE_ID_RTL_R2P0 &&
335 cache_id = cache_id_part_number_from_dt; 755 revision < L310_CACHE_ID_RTL_R3P1) {
336 else 756 fns->flush_all = l2c310_flush_all_erratum;
337 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 757 errata[n++] = "727915";
338 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 758 }
759
760 if (revision >= L310_CACHE_ID_RTL_R3P0 &&
761 revision < L310_CACHE_ID_RTL_R3P2) {
762 u32 val = readl_relaxed(base + L310_PREFETCH_CTRL);
763 /* I don't think bit23 is required here... but iMX6 does so */
764 if (val & (BIT(30) | BIT(23))) {
765 val &= ~(BIT(30) | BIT(23));
766 l2c_write_sec(val, base, L310_PREFETCH_CTRL);
767 errata[n++] = "752271";
768 }
769 }
770
771 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
772 revision == L310_CACHE_ID_RTL_R3P0) {
773 sync_reg_offset = L2X0_DUMMY_REG;
774 errata[n++] = "753970";
775 }
776
777 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
778 errata[n++] = "769419";
779
780 if (n) {
781 unsigned i;
339 782
783 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
784 for (i = 0; i < n; i++)
785 pr_cont(" %s", errata[i]);
786 pr_cont(" enabled\n");
787 }
788}
789
790static void l2c310_disable(void)
791{
792 /*
793 * If full-line-of-zeros is enabled, we must first disable it in the
794 * Cortex-A9 auxiliary control register before disabling the L2 cache.
795 */
796 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
797 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
798
799 l2c_disable();
800}
801
802static const struct l2c_init_data l2c310_init_fns __initconst = {
803 .type = "L2C-310",
804 .way_size_0 = SZ_8K,
805 .num_lock = 8,
806 .enable = l2c310_enable,
807 .fixup = l2c310_fixup,
808 .save = l2c310_save,
809 .outer_cache = {
810 .inv_range = l2c210_inv_range,
811 .clean_range = l2c210_clean_range,
812 .flush_range = l2c210_flush_range,
813 .flush_all = l2c210_flush_all,
814 .disable = l2c310_disable,
815 .sync = l2c210_sync,
816 .resume = l2c310_resume,
817 },
818};
819
820static void __init __l2c_init(const struct l2c_init_data *data,
821 u32 aux_val, u32 aux_mask, u32 cache_id)
822{
823 struct outer_cache_fns fns;
824 unsigned way_size_bits, ways;
825 u32 aux, old_aux;
826
827 /*
828 * Sanity check the aux values. aux_mask is the bits we preserve
829 * from reading the hardware register, and aux_val is the bits we
830 * set.
831 */
832 if (aux_val & aux_mask)
833 pr_alert("L2C: platform provided aux values permit register corruption.\n");
834
835 old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
340 aux &= aux_mask; 836 aux &= aux_mask;
341 aux |= aux_val; 837 aux |= aux_val;
342 838
839 if (old_aux != aux)
840 pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
841 old_aux, aux);
842
343 /* Determine the number of ways */ 843 /* Determine the number of ways */
344 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 844 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
345 case L2X0_CACHE_ID_PART_L310: 845 case L2X0_CACHE_ID_PART_L310:
846 if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
847 pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
346 if (aux & (1 << 16)) 848 if (aux & (1 << 16))
347 ways = 16; 849 ways = 16;
348 else 850 else
349 ways = 8; 851 ways = 8;
350 type = "L310";
351#ifdef CONFIG_PL310_ERRATA_753970
352 /* Unmapped register. */
353 sync_reg_offset = L2X0_DUMMY_REG;
354#endif
355 if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
356 outer_cache.set_debug = pl310_set_debug;
357 break; 852 break;
853
358 case L2X0_CACHE_ID_PART_L210: 854 case L2X0_CACHE_ID_PART_L210:
855 case L2X0_CACHE_ID_PART_L220:
359 ways = (aux >> 13) & 0xf; 856 ways = (aux >> 13) & 0xf;
360 type = "L210";
361 break; 857 break;
362 858
363 case AURORA_CACHE_ID: 859 case AURORA_CACHE_ID:
364 sync_reg_offset = AURORA_SYNC_REG;
365 ways = (aux >> 13) & 0xf; 860 ways = (aux >> 13) & 0xf;
366 ways = 2 << ((ways + 1) >> 2); 861 ways = 2 << ((ways + 1) >> 2);
367 way_size_shift = AURORA_WAY_SIZE_SHIFT;
368 type = "Aurora";
369 break; 862 break;
863
370 default: 864 default:
371 /* Assume unknown chips have 8 ways */ 865 /* Assume unknown chips have 8 ways */
372 ways = 8; 866 ways = 8;
373 type = "L2x0 series";
374 break; 867 break;
375 } 868 }
376 869
377 l2x0_way_mask = (1 << ways) - 1; 870 l2x0_way_mask = (1 << ways) - 1;
378 871
379 /* 872 /*
380 * L2 cache Size = Way size * Number of ways 873 * way_size_0 is the size that a way_size value of zero would be
874 * given the calculation: way_size = way_size_0 << way_size_bits.
875 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
876 * then way_size_0 would be 8k.
877 *
878 * L2 cache size = number of ways * way size.
381 */ 879 */
382 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 880 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
383 way_size = 1 << (way_size + way_size_shift); 881 L2C_AUX_CTRL_WAY_SIZE_SHIFT;
882 l2x0_size = ways * (data->way_size_0 << way_size_bits);
384 883
385 l2x0_size = ways * way_size * SZ_1K; 884 fns = data->outer_cache;
885 fns.write_sec = outer_cache.write_sec;
886 if (data->fixup)
887 data->fixup(l2x0_base, cache_id, &fns);
386 888
387 /* 889 /*
388 * Check if l2x0 controller is already enabled. 890 * Check if l2x0 controller is already enabled. If we are booting
389 * If you are booting from non-secure mode 891 * in non-secure mode accessing the below registers will fault.
390 * accessing the below registers will fault.
391 */ 892 */
392 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 893 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
393 /* Make sure that I&D is not locked down when starting */ 894 data->enable(l2x0_base, aux, data->num_lock);
394 l2x0_unlock(cache_id);
395 895
396 /* l2x0 controller is disabled */ 896 outer_cache = fns;
397 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
398 897
399 l2x0_inv_all(); 898 /*
400 899 * It is strange to save the register state before initialisation,
401 /* enable L2X0 */ 900 * but hey, this is what the DT implementations decided to do.
402 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); 901 */
403 } 902 if (data->save)
903 data->save(l2x0_base);
404 904
405 /* Re-read it in case some bits are reserved. */ 905 /* Re-read it in case some bits are reserved. */
406 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 906 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
407 907
408 /* Save the value for resuming. */ 908 pr_info("%s cache controller enabled, %d ways, %d kB\n",
409 l2x0_saved_regs.aux_ctrl = aux; 909 data->type, ways, l2x0_size >> 10);
910 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
911 data->type, cache_id, aux);
912}
913
914void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
915{
916 const struct l2c_init_data *data;
917 u32 cache_id;
918
919 l2x0_base = base;
920
921 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
922
923 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
924 default:
925 case L2X0_CACHE_ID_PART_L210:
926 data = &l2c210_data;
927 break;
410 928
411 if (!of_init) { 929 case L2X0_CACHE_ID_PART_L220:
412 outer_cache.inv_range = l2x0_inv_range; 930 data = &l2c220_data;
413 outer_cache.clean_range = l2x0_clean_range; 931 break;
414 outer_cache.flush_range = l2x0_flush_range; 932
415 outer_cache.sync = l2x0_cache_sync; 933 case L2X0_CACHE_ID_PART_L310:
416 outer_cache.flush_all = l2x0_flush_all; 934 data = &l2c310_init_fns;
417 outer_cache.inv_all = l2x0_inv_all; 935 break;
418 outer_cache.disable = l2x0_disable;
419 } 936 }
420 937
421 pr_info("%s cache controller enabled\n", type); 938 __l2c_init(data, aux_val, aux_mask, cache_id);
422 pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
423 ways, cache_id, aux, l2x0_size >> 10);
424} 939}
425 940
426#ifdef CONFIG_OF 941#ifdef CONFIG_OF
427static int l2_wt_override; 942static int l2_wt_override;
428 943
944/* Aurora don't have the cache ID register available, so we have to
945 * pass it though the device tree */
946static u32 cache_id_part_number_from_dt;
947
948static void __init l2x0_of_parse(const struct device_node *np,
949 u32 *aux_val, u32 *aux_mask)
950{
951 u32 data[2] = { 0, 0 };
952 u32 tag = 0;
953 u32 dirty = 0;
954 u32 val = 0, mask = 0;
955
956 of_property_read_u32(np, "arm,tag-latency", &tag);
957 if (tag) {
958 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
959 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
960 }
961
962 of_property_read_u32_array(np, "arm,data-latency",
963 data, ARRAY_SIZE(data));
964 if (data[0] && data[1]) {
965 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
966 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
967 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
968 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
969 }
970
971 of_property_read_u32(np, "arm,dirty-latency", &dirty);
972 if (dirty) {
973 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
974 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
975 }
976
977 *aux_val &= ~mask;
978 *aux_val |= val;
979 *aux_mask &= ~mask;
980}
981
982static const struct l2c_init_data of_l2c210_data __initconst = {
983 .type = "L2C-210",
984 .way_size_0 = SZ_8K,
985 .num_lock = 1,
986 .of_parse = l2x0_of_parse,
987 .enable = l2c_enable,
988 .save = l2c_save,
989 .outer_cache = {
990 .inv_range = l2c210_inv_range,
991 .clean_range = l2c210_clean_range,
992 .flush_range = l2c210_flush_range,
993 .flush_all = l2c210_flush_all,
994 .disable = l2c_disable,
995 .sync = l2c210_sync,
996 .resume = l2c210_resume,
997 },
998};
999
1000static const struct l2c_init_data of_l2c220_data __initconst = {
1001 .type = "L2C-220",
1002 .way_size_0 = SZ_8K,
1003 .num_lock = 1,
1004 .of_parse = l2x0_of_parse,
1005 .enable = l2c220_enable,
1006 .save = l2c_save,
1007 .outer_cache = {
1008 .inv_range = l2c220_inv_range,
1009 .clean_range = l2c220_clean_range,
1010 .flush_range = l2c220_flush_range,
1011 .flush_all = l2c220_flush_all,
1012 .disable = l2c_disable,
1013 .sync = l2c220_sync,
1014 .resume = l2c210_resume,
1015 },
1016};
1017
1018static void __init l2c310_of_parse(const struct device_node *np,
1019 u32 *aux_val, u32 *aux_mask)
1020{
1021 u32 data[3] = { 0, 0, 0 };
1022 u32 tag[3] = { 0, 0, 0 };
1023 u32 filter[2] = { 0, 0 };
1024
1025 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
1026 if (tag[0] && tag[1] && tag[2])
1027 writel_relaxed(
1028 L310_LATENCY_CTRL_RD(tag[0] - 1) |
1029 L310_LATENCY_CTRL_WR(tag[1] - 1) |
1030 L310_LATENCY_CTRL_SETUP(tag[2] - 1),
1031 l2x0_base + L310_TAG_LATENCY_CTRL);
1032
1033 of_property_read_u32_array(np, "arm,data-latency",
1034 data, ARRAY_SIZE(data));
1035 if (data[0] && data[1] && data[2])
1036 writel_relaxed(
1037 L310_LATENCY_CTRL_RD(data[0] - 1) |
1038 L310_LATENCY_CTRL_WR(data[1] - 1) |
1039 L310_LATENCY_CTRL_SETUP(data[2] - 1),
1040 l2x0_base + L310_DATA_LATENCY_CTRL);
1041
1042 of_property_read_u32_array(np, "arm,filter-ranges",
1043 filter, ARRAY_SIZE(filter));
1044 if (filter[1]) {
1045 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
1046 l2x0_base + L310_ADDR_FILTER_END);
1047 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
1048 l2x0_base + L310_ADDR_FILTER_START);
1049 }
1050}
1051
1052static const struct l2c_init_data of_l2c310_data __initconst = {
1053 .type = "L2C-310",
1054 .way_size_0 = SZ_8K,
1055 .num_lock = 8,
1056 .of_parse = l2c310_of_parse,
1057 .enable = l2c310_enable,
1058 .fixup = l2c310_fixup,
1059 .save = l2c310_save,
1060 .outer_cache = {
1061 .inv_range = l2c210_inv_range,
1062 .clean_range = l2c210_clean_range,
1063 .flush_range = l2c210_flush_range,
1064 .flush_all = l2c210_flush_all,
1065 .disable = l2c310_disable,
1066 .sync = l2c210_sync,
1067 .resume = l2c310_resume,
1068 },
1069};
1070
429/* 1071/*
430 * Note that the end addresses passed to Linux primitives are 1072 * Note that the end addresses passed to Linux primitives are
431 * noninclusive, while the hardware cache range operations use 1073 * noninclusive, while the hardware cache range operations use
@@ -524,6 +1166,100 @@ static void aurora_flush_range(unsigned long start, unsigned long end)
524 } 1166 }
525} 1167}
526 1168
1169static void aurora_save(void __iomem *base)
1170{
1171 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1172 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1173}
1174
1175static void aurora_resume(void)
1176{
1177 void __iomem *base = l2x0_base;
1178
1179 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1180 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
1181 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
1182 }
1183}
1184
1185/*
1186 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1187 * broadcasting of cache commands to L2.
1188 */
1189static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1190 unsigned num_lock)
1191{
1192 u32 u;
1193
1194 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
1195 u |= AURORA_CTRL_FW; /* Set the FW bit */
1196 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1197
1198 isb();
1199
1200 l2c_enable(base, aux, num_lock);
1201}
1202
1203static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1204 struct outer_cache_fns *fns)
1205{
1206 sync_reg_offset = AURORA_SYNC_REG;
1207}
1208
1209static void __init aurora_of_parse(const struct device_node *np,
1210 u32 *aux_val, u32 *aux_mask)
1211{
1212 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1213 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1214
1215 of_property_read_u32(np, "cache-id-part",
1216 &cache_id_part_number_from_dt);
1217
1218 /* Determine and save the write policy */
1219 l2_wt_override = of_property_read_bool(np, "wt-override");
1220
1221 if (l2_wt_override) {
1222 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1223 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1224 }
1225
1226 *aux_val &= ~mask;
1227 *aux_val |= val;
1228 *aux_mask &= ~mask;
1229}
1230
1231static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
1232 .type = "Aurora",
1233 .way_size_0 = SZ_4K,
1234 .num_lock = 4,
1235 .of_parse = aurora_of_parse,
1236 .enable = l2c_enable,
1237 .fixup = aurora_fixup,
1238 .save = aurora_save,
1239 .outer_cache = {
1240 .inv_range = aurora_inv_range,
1241 .clean_range = aurora_clean_range,
1242 .flush_range = aurora_flush_range,
1243 .flush_all = l2x0_flush_all,
1244 .disable = l2x0_disable,
1245 .sync = l2x0_cache_sync,
1246 .resume = aurora_resume,
1247 },
1248};
1249
1250static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
1251 .type = "Aurora",
1252 .way_size_0 = SZ_4K,
1253 .num_lock = 4,
1254 .of_parse = aurora_of_parse,
1255 .enable = aurora_enable_no_outer,
1256 .fixup = aurora_fixup,
1257 .save = aurora_save,
1258 .outer_cache = {
1259 .resume = aurora_resume,
1260 },
1261};
1262
527/* 1263/*
528 * For certain Broadcom SoCs, depending on the address range, different offsets 1264 * For certain Broadcom SoCs, depending on the address range, different offsets
529 * need to be added to the address before passing it to L2 for 1265 * need to be added to the address before passing it to L2 for
@@ -588,16 +1324,16 @@ static void bcm_inv_range(unsigned long start, unsigned long end)
588 1324
589 /* normal case, no cross section between start and end */ 1325 /* normal case, no cross section between start and end */
590 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1326 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
591 l2x0_inv_range(new_start, new_end); 1327 l2c210_inv_range(new_start, new_end);
592 return; 1328 return;
593 } 1329 }
594 1330
595 /* They cross sections, so it can only be a cross from section 1331 /* They cross sections, so it can only be a cross from section
596 * 2 to section 3 1332 * 2 to section 3
597 */ 1333 */
598 l2x0_inv_range(new_start, 1334 l2c210_inv_range(new_start,
599 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1335 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
600 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1336 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
601 new_end); 1337 new_end);
602} 1338}
603 1339
@@ -610,26 +1346,21 @@ static void bcm_clean_range(unsigned long start, unsigned long end)
610 if (unlikely(end <= start)) 1346 if (unlikely(end <= start))
611 return; 1347 return;
612 1348
613 if ((end - start) >= l2x0_size) {
614 l2x0_clean_all();
615 return;
616 }
617
618 new_start = bcm_l2_phys_addr(start); 1349 new_start = bcm_l2_phys_addr(start);
619 new_end = bcm_l2_phys_addr(end); 1350 new_end = bcm_l2_phys_addr(end);
620 1351
621 /* normal case, no cross section between start and end */ 1352 /* normal case, no cross section between start and end */
622 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1353 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
623 l2x0_clean_range(new_start, new_end); 1354 l2c210_clean_range(new_start, new_end);
624 return; 1355 return;
625 } 1356 }
626 1357
627 /* They cross sections, so it can only be a cross from section 1358 /* They cross sections, so it can only be a cross from section
628 * 2 to section 3 1359 * 2 to section 3
629 */ 1360 */
630 l2x0_clean_range(new_start, 1361 l2c210_clean_range(new_start,
631 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1362 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
632 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1363 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
633 new_end); 1364 new_end);
634} 1365}
635 1366
@@ -643,7 +1374,7 @@ static void bcm_flush_range(unsigned long start, unsigned long end)
643 return; 1374 return;
644 1375
645 if ((end - start) >= l2x0_size) { 1376 if ((end - start) >= l2x0_size) {
646 l2x0_flush_all(); 1377 outer_cache.flush_all();
647 return; 1378 return;
648 } 1379 }
649 1380
@@ -652,283 +1383,67 @@ static void bcm_flush_range(unsigned long start, unsigned long end)
652 1383
653 /* normal case, no cross section between start and end */ 1384 /* normal case, no cross section between start and end */
654 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { 1385 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
655 l2x0_flush_range(new_start, new_end); 1386 l2c210_flush_range(new_start, new_end);
656 return; 1387 return;
657 } 1388 }
658 1389
659 /* They cross sections, so it can only be a cross from section 1390 /* They cross sections, so it can only be a cross from section
660 * 2 to section 3 1391 * 2 to section 3
661 */ 1392 */
662 l2x0_flush_range(new_start, 1393 l2c210_flush_range(new_start,
663 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); 1394 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
664 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), 1395 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
665 new_end); 1396 new_end);
666} 1397}
667 1398
668static void __init l2x0_of_setup(const struct device_node *np, 1399/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
669 u32 *aux_val, u32 *aux_mask) 1400static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
670{ 1401 .type = "BCM-L2C-310",
671 u32 data[2] = { 0, 0 }; 1402 .way_size_0 = SZ_8K,
672 u32 tag = 0; 1403 .num_lock = 8,
673 u32 dirty = 0; 1404 .of_parse = l2c310_of_parse,
674 u32 val = 0, mask = 0; 1405 .enable = l2c310_enable,
675 1406 .save = l2c310_save,
676 of_property_read_u32(np, "arm,tag-latency", &tag); 1407 .outer_cache = {
677 if (tag) { 1408 .inv_range = bcm_inv_range,
678 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; 1409 .clean_range = bcm_clean_range,
679 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; 1410 .flush_range = bcm_flush_range,
680 } 1411 .flush_all = l2c210_flush_all,
681 1412 .disable = l2c310_disable,
682 of_property_read_u32_array(np, "arm,data-latency", 1413 .sync = l2c210_sync,
683 data, ARRAY_SIZE(data)); 1414 .resume = l2c310_resume,
684 if (data[0] && data[1]) { 1415 },
685 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | 1416};
686 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
687 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
688 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
689 }
690
691 of_property_read_u32(np, "arm,dirty-latency", &dirty);
692 if (dirty) {
693 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
694 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
695 }
696
697 *aux_val &= ~mask;
698 *aux_val |= val;
699 *aux_mask &= ~mask;
700}
701
702static void __init pl310_of_setup(const struct device_node *np,
703 u32 *aux_val, u32 *aux_mask)
704{
705 u32 data[3] = { 0, 0, 0 };
706 u32 tag[3] = { 0, 0, 0 };
707 u32 filter[2] = { 0, 0 };
708
709 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
710 if (tag[0] && tag[1] && tag[2])
711 writel_relaxed(
712 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
713 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
714 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
715 l2x0_base + L2X0_TAG_LATENCY_CTRL);
716
717 of_property_read_u32_array(np, "arm,data-latency",
718 data, ARRAY_SIZE(data));
719 if (data[0] && data[1] && data[2])
720 writel_relaxed(
721 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
722 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
723 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
724 l2x0_base + L2X0_DATA_LATENCY_CTRL);
725
726 of_property_read_u32_array(np, "arm,filter-ranges",
727 filter, ARRAY_SIZE(filter));
728 if (filter[1]) {
729 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
730 l2x0_base + L2X0_ADDR_FILTER_END);
731 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
732 l2x0_base + L2X0_ADDR_FILTER_START);
733 }
734}
735
736static void __init pl310_save(void)
737{
738 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
739 L2X0_CACHE_ID_RTL_MASK;
740
741 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
742 L2X0_TAG_LATENCY_CTRL);
743 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
744 L2X0_DATA_LATENCY_CTRL);
745 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
746 L2X0_ADDR_FILTER_END);
747 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
748 L2X0_ADDR_FILTER_START);
749
750 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
751 /*
752 * From r2p0, there is Prefetch offset/control register
753 */
754 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
755 L2X0_PREFETCH_CTRL);
756 /*
757 * From r3p0, there is Power control register
758 */
759 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
760 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
761 L2X0_POWER_CTRL);
762 }
763}
764 1417
765static void aurora_save(void) 1418static void __init tauros3_save(void __iomem *base)
766{ 1419{
767 l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL); 1420 l2c_save(base);
768 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
769}
770 1421
771static void __init tauros3_save(void)
772{
773 l2x0_saved_regs.aux2_ctrl = 1422 l2x0_saved_regs.aux2_ctrl =
774 readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL); 1423 readl_relaxed(base + TAUROS3_AUX2_CTRL);
775 l2x0_saved_regs.prefetch_ctrl = 1424 l2x0_saved_regs.prefetch_ctrl =
776 readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); 1425 readl_relaxed(base + L310_PREFETCH_CTRL);
777}
778
779static void l2x0_resume(void)
780{
781 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
782 /* restore aux ctrl and enable l2 */
783 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
784
785 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
786 L2X0_AUX_CTRL);
787
788 l2x0_inv_all();
789
790 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
791 }
792}
793
794static void pl310_resume(void)
795{
796 u32 l2x0_revision;
797
798 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
799 /* restore pl310 setup */
800 writel_relaxed(l2x0_saved_regs.tag_latency,
801 l2x0_base + L2X0_TAG_LATENCY_CTRL);
802 writel_relaxed(l2x0_saved_regs.data_latency,
803 l2x0_base + L2X0_DATA_LATENCY_CTRL);
804 writel_relaxed(l2x0_saved_regs.filter_end,
805 l2x0_base + L2X0_ADDR_FILTER_END);
806 writel_relaxed(l2x0_saved_regs.filter_start,
807 l2x0_base + L2X0_ADDR_FILTER_START);
808
809 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
810 L2X0_CACHE_ID_RTL_MASK;
811
812 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
813 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
814 l2x0_base + L2X0_PREFETCH_CTRL);
815 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
816 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
817 l2x0_base + L2X0_POWER_CTRL);
818 }
819 }
820
821 l2x0_resume();
822}
823
824static void aurora_resume(void)
825{
826 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
827 writel_relaxed(l2x0_saved_regs.aux_ctrl,
828 l2x0_base + L2X0_AUX_CTRL);
829 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
830 }
831} 1426}
832 1427
833static void tauros3_resume(void) 1428static void tauros3_resume(void)
834{ 1429{
835 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1430 void __iomem *base = l2x0_base;
1431
1432 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
836 writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1433 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
837 l2x0_base + TAUROS3_AUX2_CTRL); 1434 base + TAUROS3_AUX2_CTRL);
838 writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1435 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
839 l2x0_base + L2X0_PREFETCH_CTRL); 1436 base + L310_PREFETCH_CTRL);
840 }
841 1437
842 l2x0_resume(); 1438 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
843}
844
845static void __init aurora_broadcast_l2_commands(void)
846{
847 __u32 u;
848 /* Enable Broadcasting of cache commands to L2*/
849 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
850 u |= AURORA_CTRL_FW; /* Set the FW bit */
851 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
852 isb();
853}
854
855static void __init aurora_of_setup(const struct device_node *np,
856 u32 *aux_val, u32 *aux_mask)
857{
858 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
859 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
860
861 of_property_read_u32(np, "cache-id-part",
862 &cache_id_part_number_from_dt);
863
864 /* Determine and save the write policy */
865 l2_wt_override = of_property_read_bool(np, "wt-override");
866
867 if (l2_wt_override) {
868 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
869 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
870 } 1439 }
871
872 *aux_val &= ~mask;
873 *aux_val |= val;
874 *aux_mask &= ~mask;
875} 1440}
876 1441
877static const struct l2x0_of_data pl310_data = { 1442static const struct l2c_init_data of_tauros3_data __initconst = {
878 .setup = pl310_of_setup, 1443 .type = "Tauros3",
879 .save = pl310_save, 1444 .way_size_0 = SZ_8K,
880 .outer_cache = { 1445 .num_lock = 8,
881 .resume = pl310_resume, 1446 .enable = l2c_enable,
882 .inv_range = l2x0_inv_range,
883 .clean_range = l2x0_clean_range,
884 .flush_range = l2x0_flush_range,
885 .sync = l2x0_cache_sync,
886 .flush_all = l2x0_flush_all,
887 .inv_all = l2x0_inv_all,
888 .disable = l2x0_disable,
889 },
890};
891
892static const struct l2x0_of_data l2x0_data = {
893 .setup = l2x0_of_setup,
894 .save = NULL,
895 .outer_cache = {
896 .resume = l2x0_resume,
897 .inv_range = l2x0_inv_range,
898 .clean_range = l2x0_clean_range,
899 .flush_range = l2x0_flush_range,
900 .sync = l2x0_cache_sync,
901 .flush_all = l2x0_flush_all,
902 .inv_all = l2x0_inv_all,
903 .disable = l2x0_disable,
904 },
905};
906
907static const struct l2x0_of_data aurora_with_outer_data = {
908 .setup = aurora_of_setup,
909 .save = aurora_save,
910 .outer_cache = {
911 .resume = aurora_resume,
912 .inv_range = aurora_inv_range,
913 .clean_range = aurora_clean_range,
914 .flush_range = aurora_flush_range,
915 .sync = l2x0_cache_sync,
916 .flush_all = l2x0_flush_all,
917 .inv_all = l2x0_inv_all,
918 .disable = l2x0_disable,
919 },
920};
921
922static const struct l2x0_of_data aurora_no_outer_data = {
923 .setup = aurora_of_setup,
924 .save = aurora_save,
925 .outer_cache = {
926 .resume = aurora_resume,
927 },
928};
929
930static const struct l2x0_of_data tauros3_data = {
931 .setup = NULL,
932 .save = tauros3_save, 1447 .save = tauros3_save,
933 /* Tauros3 broadcasts L1 cache operations to L2 */ 1448 /* Tauros3 broadcasts L1 cache operations to L2 */
934 .outer_cache = { 1449 .outer_cache = {
@@ -936,43 +1451,26 @@ static const struct l2x0_of_data tauros3_data = {
936 }, 1451 },
937}; 1452};
938 1453
939static const struct l2x0_of_data bcm_l2x0_data = { 1454#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
940 .setup = pl310_of_setup,
941 .save = pl310_save,
942 .outer_cache = {
943 .resume = pl310_resume,
944 .inv_range = bcm_inv_range,
945 .clean_range = bcm_clean_range,
946 .flush_range = bcm_flush_range,
947 .sync = l2x0_cache_sync,
948 .flush_all = l2x0_flush_all,
949 .inv_all = l2x0_inv_all,
950 .disable = l2x0_disable,
951 },
952};
953
954static const struct of_device_id l2x0_ids[] __initconst = { 1455static const struct of_device_id l2x0_ids[] __initconst = {
955 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, 1456 L2C_ID("arm,l210-cache", of_l2c210_data),
956 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, 1457 L2C_ID("arm,l220-cache", of_l2c220_data),
957 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, 1458 L2C_ID("arm,pl310-cache", of_l2c310_data),
958 { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */ 1459 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
959 .data = (void *)&bcm_l2x0_data}, 1460 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
960 { .compatible = "brcm,bcm11351-a2-pl310-cache", 1461 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
961 .data = (void *)&bcm_l2x0_data}, 1462 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
962 { .compatible = "marvell,aurora-outer-cache", 1463 /* Deprecated IDs */
963 .data = (void *)&aurora_with_outer_data}, 1464 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
964 { .compatible = "marvell,aurora-system-cache",
965 .data = (void *)&aurora_no_outer_data},
966 { .compatible = "marvell,tauros3-cache",
967 .data = (void *)&tauros3_data },
968 {} 1465 {}
969}; 1466};
970 1467
971int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 1468int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
972{ 1469{
1470 const struct l2c_init_data *data;
973 struct device_node *np; 1471 struct device_node *np;
974 const struct l2x0_of_data *data;
975 struct resource res; 1472 struct resource res;
1473 u32 cache_id, old_aux;
976 1474
977 np = of_find_matching_node(NULL, l2x0_ids); 1475 np = of_find_matching_node(NULL, l2x0_ids);
978 if (!np) 1476 if (!np)
@@ -989,23 +1487,29 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
989 1487
990 data = of_match_node(l2x0_ids, np)->data; 1488 data = of_match_node(l2x0_ids, np)->data;
991 1489
992 /* L2 configuration can only be changed if the cache is disabled */ 1490 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
993 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1491 if (old_aux != ((old_aux & aux_mask) | aux_val)) {
994 if (data->setup) 1492 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
995 data->setup(np, &aux_val, &aux_mask); 1493 old_aux, (old_aux & aux_mask) | aux_val);
996 1494 } else if (aux_mask != ~0U && aux_val != 0) {
997 /* For aurora cache in no outer mode select the 1495 pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
998 * correct mode using the coprocessor*/
999 if (data == &aurora_no_outer_data)
1000 aurora_broadcast_l2_commands();
1001 } 1496 }
1002 1497
1003 if (data->save) 1498 /* All L2 caches are unified, so this property should be specified */
1004 data->save(); 1499 if (!of_property_read_bool(np, "cache-unified"))
1500 pr_err("L2C: device tree omits to specify unified cache\n");
1501
1502 /* L2 configuration can only be changed if the cache is disabled */
1503 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
1504 if (data->of_parse)
1505 data->of_parse(np, &aux_val, &aux_mask);
1506
1507 if (cache_id_part_number_from_dt)
1508 cache_id = cache_id_part_number_from_dt;
1509 else
1510 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1005 1511
1006 of_init = true; 1512 __l2c_init(data, aux_val, aux_mask, cache_id);
1007 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
1008 l2x0_init(l2x0_base, aux_val, aux_mask);
1009 1513
1010 return 0; 1514 return 0;
1011} 1515}
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 778bcf88ee79..615c99e38ba1 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -59,7 +59,7 @@ ENTRY(v7_invalidate_l1)
59 bgt 2b 59 bgt 2b
60 cmp r2, #0 60 cmp r2, #0
61 bgt 1b 61 bgt 1b
62 dsb 62 dsb st
63 isb 63 isb
64 mov pc, lr 64 mov pc, lr
65ENDPROC(v7_invalidate_l1) 65ENDPROC(v7_invalidate_l1)
@@ -166,7 +166,7 @@ skip:
166finished: 166finished:
167 mov r10, #0 @ swith back to cache level 0 167 mov r10, #0 @ swith back to cache level 0
168 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 168 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
169 dsb 169 dsb st
170 isb 170 isb
171 mov pc, lr 171 mov pc, lr
172ENDPROC(v7_flush_dcache_all) 172ENDPROC(v7_flush_dcache_all)
@@ -335,7 +335,7 @@ ENTRY(v7_flush_kern_dcache_area)
335 add r0, r0, r2 335 add r0, r0, r2
336 cmp r0, r1 336 cmp r0, r1
337 blo 1b 337 blo 1b
338 dsb 338 dsb st
339 mov pc, lr 339 mov pc, lr
340ENDPROC(v7_flush_kern_dcache_area) 340ENDPROC(v7_flush_kern_dcache_area)
341 341
@@ -368,7 +368,7 @@ v7_dma_inv_range:
368 add r0, r0, r2 368 add r0, r0, r2
369 cmp r0, r1 369 cmp r0, r1
370 blo 1b 370 blo 1b
371 dsb 371 dsb st
372 mov pc, lr 372 mov pc, lr
373ENDPROC(v7_dma_inv_range) 373ENDPROC(v7_dma_inv_range)
374 374
@@ -390,7 +390,7 @@ v7_dma_clean_range:
390 add r0, r0, r2 390 add r0, r0, r2
391 cmp r0, r1 391 cmp r0, r1
392 blo 1b 392 blo 1b
393 dsb 393 dsb st
394 mov pc, lr 394 mov pc, lr
395ENDPROC(v7_dma_clean_range) 395ENDPROC(v7_dma_clean_range)
396 396
@@ -412,7 +412,7 @@ ENTRY(v7_dma_flush_range)
412 add r0, r0, r2 412 add r0, r0, r2
413 cmp r0, r1 413 cmp r0, r1
414 blo 1b 414 blo 1b
415 dsb 415 dsb st
416 mov pc, lr 416 mov pc, lr
417ENDPROC(v7_dma_flush_range) 417ENDPROC(v7_dma_flush_range)
418 418
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6b00be1f971e..b05e08c4734c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -904,11 +904,12 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
904 unsigned long paddr = page_to_phys(page) + off; 904 unsigned long paddr = page_to_phys(page) + off;
905 905
906 /* FIXME: non-speculating: not required */ 906 /* FIXME: non-speculating: not required */
907 /* don't bother invalidating if DMA to device */ 907 /* in any case, don't bother invalidating if DMA to device */
908 if (dir != DMA_TO_DEVICE) 908 if (dir != DMA_TO_DEVICE) {
909 outer_inv_range(paddr, paddr + size); 909 outer_inv_range(paddr, paddr + size);
910 910
911 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 911 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
912 }
912 913
913 /* 914 /*
914 * Mark the D-cache clean for these pages to avoid extra flushing. 915 * Mark the D-cache clean for these pages to avoid extra flushing.
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 3387e60e4ea3..43d54f5b26b9 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -104,17 +104,20 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
104#define flush_icache_alias(pfn,vaddr,len) do { } while (0) 104#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
105#endif 105#endif
106 106
107#define FLAG_PA_IS_EXEC 1
108#define FLAG_PA_CORE_IN_MM 2
109
107static void flush_ptrace_access_other(void *args) 110static void flush_ptrace_access_other(void *args)
108{ 111{
109 __flush_icache_all(); 112 __flush_icache_all();
110} 113}
111 114
112static 115static inline
113void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 116void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
114 unsigned long uaddr, void *kaddr, unsigned long len) 117 unsigned long len, unsigned int flags)
115{ 118{
116 if (cache_is_vivt()) { 119 if (cache_is_vivt()) {
117 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 120 if (flags & FLAG_PA_CORE_IN_MM) {
118 unsigned long addr = (unsigned long)kaddr; 121 unsigned long addr = (unsigned long)kaddr;
119 __cpuc_coherent_kern_range(addr, addr + len); 122 __cpuc_coherent_kern_range(addr, addr + len);
120 } 123 }
@@ -128,7 +131,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
128 } 131 }
129 132
130 /* VIPT non-aliasing D-cache */ 133 /* VIPT non-aliasing D-cache */
131 if (vma->vm_flags & VM_EXEC) { 134 if (flags & FLAG_PA_IS_EXEC) {
132 unsigned long addr = (unsigned long)kaddr; 135 unsigned long addr = (unsigned long)kaddr;
133 if (icache_is_vipt_aliasing()) 136 if (icache_is_vipt_aliasing())
134 flush_icache_alias(page_to_pfn(page), uaddr, len); 137 flush_icache_alias(page_to_pfn(page), uaddr, len);
@@ -140,6 +143,26 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
140 } 143 }
141} 144}
142 145
146static
147void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
148 unsigned long uaddr, void *kaddr, unsigned long len)
149{
150 unsigned int flags = 0;
151 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
152 flags |= FLAG_PA_CORE_IN_MM;
153 if (vma->vm_flags & VM_EXEC)
154 flags |= FLAG_PA_IS_EXEC;
155 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
156}
157
158void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
159 void *kaddr, unsigned long len)
160{
161 unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
162
163 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
164}
165
143/* 166/*
144 * Copy user data from/to a page which is mapped into a different 167 * Copy user data from/to a page which is mapped into a different
145 * processes address space. Really, we want to allow our "user 168 * processes address space. Really, we want to allow our "user
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 21b9e1bf9b77..45aeaaca9052 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -18,6 +18,21 @@
18#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
19#include "mm.h" 19#include "mm.h"
20 20
21pte_t *fixmap_page_table;
22
23static inline void set_fixmap_pte(int idx, pte_t pte)
24{
25 unsigned long vaddr = __fix_to_virt(idx);
26 set_pte_ext(fixmap_page_table + idx, pte, 0);
27 local_flush_tlb_kernel_page(vaddr);
28}
29
30static inline pte_t get_fixmap_pte(unsigned long vaddr)
31{
32 unsigned long idx = __virt_to_fix(vaddr);
33 return *(fixmap_page_table + idx);
34}
35
21void *kmap(struct page *page) 36void *kmap(struct page *page)
22{ 37{
23 might_sleep(); 38 might_sleep();
@@ -63,20 +78,20 @@ void *kmap_atomic(struct page *page)
63 type = kmap_atomic_idx_push(); 78 type = kmap_atomic_idx_push();
64 79
65 idx = type + KM_TYPE_NR * smp_processor_id(); 80 idx = type + KM_TYPE_NR * smp_processor_id();
66 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 81 vaddr = __fix_to_virt(idx);
67#ifdef CONFIG_DEBUG_HIGHMEM 82#ifdef CONFIG_DEBUG_HIGHMEM
68 /* 83 /*
69 * With debugging enabled, kunmap_atomic forces that entry to 0. 84 * With debugging enabled, kunmap_atomic forces that entry to 0.
70 * Make sure it was indeed properly unmapped. 85 * Make sure it was indeed properly unmapped.
71 */ 86 */
72 BUG_ON(!pte_none(get_top_pte(vaddr))); 87 BUG_ON(!pte_none(*(fixmap_page_table + idx)));
73#endif 88#endif
74 /* 89 /*
75 * When debugging is off, kunmap_atomic leaves the previous mapping 90 * When debugging is off, kunmap_atomic leaves the previous mapping
76 * in place, so the contained TLB flush ensures the TLB is updated 91 * in place, so the contained TLB flush ensures the TLB is updated
77 * with the new mapping. 92 * with the new mapping.
78 */ 93 */
79 set_top_pte(vaddr, mk_pte(page, kmap_prot)); 94 set_fixmap_pte(idx, mk_pte(page, kmap_prot));
80 95
81 return (void *)vaddr; 96 return (void *)vaddr;
82} 97}
@@ -94,8 +109,8 @@ void __kunmap_atomic(void *kvaddr)
94 if (cache_is_vivt()) 109 if (cache_is_vivt())
95 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 110 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
96#ifdef CONFIG_DEBUG_HIGHMEM 111#ifdef CONFIG_DEBUG_HIGHMEM
97 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 112 BUG_ON(vaddr != __fix_to_virt(idx));
98 set_top_pte(vaddr, __pte(0)); 113 set_fixmap_pte(idx, __pte(0));
99#else 114#else
100 (void) idx; /* to kill a warning */ 115 (void) idx; /* to kill a warning */
101#endif 116#endif
@@ -117,11 +132,11 @@ void *kmap_atomic_pfn(unsigned long pfn)
117 132
118 type = kmap_atomic_idx_push(); 133 type = kmap_atomic_idx_push();
119 idx = type + KM_TYPE_NR * smp_processor_id(); 134 idx = type + KM_TYPE_NR * smp_processor_id();
120 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 135 vaddr = __fix_to_virt(idx);
121#ifdef CONFIG_DEBUG_HIGHMEM 136#ifdef CONFIG_DEBUG_HIGHMEM
122 BUG_ON(!pte_none(get_top_pte(vaddr))); 137 BUG_ON(!pte_none(*(fixmap_page_table + idx)));
123#endif 138#endif
124 set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); 139 set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
125 140
126 return (void *)vaddr; 141 return (void *)vaddr;
127} 142}
@@ -133,5 +148,5 @@ struct page *kmap_atomic_to_page(const void *ptr)
133 if (vaddr < FIXADDR_START) 148 if (vaddr < FIXADDR_START)
134 return virt_to_page(ptr); 149 return virt_to_page(ptr);
135 150
136 return pte_page(get_top_pte(vaddr)); 151 return pte_page(get_fixmap_pte(vaddr));
137} 152}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 13ce33e096b5..5958ac05181e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -90,24 +90,21 @@ __tagtable(ATAG_INITRD2, parse_tag_initrd2);
90 * initialization functions, as well as show_mem() for the skipping 90 * initialization functions, as well as show_mem() for the skipping
91 * of holes in the memory map. It is populated by arm_add_memory(). 91 * of holes in the memory map. It is populated by arm_add_memory().
92 */ 92 */
93struct meminfo meminfo;
94
95void show_mem(unsigned int filter) 93void show_mem(unsigned int filter)
96{ 94{
97 int free = 0, total = 0, reserved = 0; 95 int free = 0, total = 0, reserved = 0;
98 int shared = 0, cached = 0, slab = 0, i; 96 int shared = 0, cached = 0, slab = 0;
99 struct meminfo * mi = &meminfo; 97 struct memblock_region *reg;
100 98
101 printk("Mem-info:\n"); 99 printk("Mem-info:\n");
102 show_free_areas(filter); 100 show_free_areas(filter);
103 101
104 for_each_bank (i, mi) { 102 for_each_memblock (memory, reg) {
105 struct membank *bank = &mi->bank[i];
106 unsigned int pfn1, pfn2; 103 unsigned int pfn1, pfn2;
107 struct page *page, *end; 104 struct page *page, *end;
108 105
109 pfn1 = bank_pfn_start(bank); 106 pfn1 = memblock_region_memory_base_pfn(reg);
110 pfn2 = bank_pfn_end(bank); 107 pfn2 = memblock_region_memory_end_pfn(reg);
111 108
112 page = pfn_to_page(pfn1); 109 page = pfn_to_page(pfn1);
113 end = pfn_to_page(pfn2 - 1) + 1; 110 end = pfn_to_page(pfn2 - 1) + 1;
@@ -124,8 +121,9 @@ void show_mem(unsigned int filter)
124 free++; 121 free++;
125 else 122 else
126 shared += page_count(page) - 1; 123 shared += page_count(page) - 1;
127 page++; 124 pfn1++;
128 } while (page < end); 125 page = pfn_to_page(pfn1);
126 } while (pfn1 < pfn2);
129 } 127 }
130 128
131 printk("%d pages of RAM\n", total); 129 printk("%d pages of RAM\n", total);
@@ -139,16 +137,9 @@ void show_mem(unsigned int filter)
139static void __init find_limits(unsigned long *min, unsigned long *max_low, 137static void __init find_limits(unsigned long *min, unsigned long *max_low,
140 unsigned long *max_high) 138 unsigned long *max_high)
141{ 139{
142 struct meminfo *mi = &meminfo; 140 *max_low = PFN_DOWN(memblock_get_current_limit());
143 int i; 141 *min = PFN_UP(memblock_start_of_DRAM());
144 142 *max_high = PFN_DOWN(memblock_end_of_DRAM());
145 /* This assumes the meminfo array is properly sorted */
146 *min = bank_pfn_start(&mi->bank[0]);
147 for_each_bank (i, mi)
148 if (mi->bank[i].highmem)
149 break;
150 *max_low = bank_pfn_end(&mi->bank[i - 1]);
151 *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
152} 143}
153 144
154#ifdef CONFIG_ZONE_DMA 145#ifdef CONFIG_ZONE_DMA
@@ -283,14 +274,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
283 return phys; 274 return phys;
284} 275}
285 276
286void __init arm_memblock_init(struct meminfo *mi, 277void __init arm_memblock_init(const struct machine_desc *mdesc)
287 const struct machine_desc *mdesc)
288{ 278{
289 int i;
290
291 for (i = 0; i < mi->nr_banks; i++)
292 memblock_add(mi->bank[i].start, mi->bank[i].size);
293
294 /* Register the kernel text, kernel data and initrd with memblock. */ 279 /* Register the kernel text, kernel data and initrd with memblock. */
295#ifdef CONFIG_XIP_KERNEL 280#ifdef CONFIG_XIP_KERNEL
296 memblock_reserve(__pa(_sdata), _end - _sdata); 281 memblock_reserve(__pa(_sdata), _end - _sdata);
@@ -422,54 +407,53 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
422/* 407/*
423 * The mem_map array can get very big. Free the unused area of the memory map. 408 * The mem_map array can get very big. Free the unused area of the memory map.
424 */ 409 */
425static void __init free_unused_memmap(struct meminfo *mi) 410static void __init free_unused_memmap(void)
426{ 411{
427 unsigned long bank_start, prev_bank_end = 0; 412 unsigned long start, prev_end = 0;
428 unsigned int i; 413 struct memblock_region *reg;
429 414
430 /* 415 /*
431 * This relies on each bank being in address order. 416 * This relies on each bank being in address order.
432 * The banks are sorted previously in bootmem_init(). 417 * The banks are sorted previously in bootmem_init().
433 */ 418 */
434 for_each_bank(i, mi) { 419 for_each_memblock(memory, reg) {
435 struct membank *bank = &mi->bank[i]; 420 start = memblock_region_memory_base_pfn(reg);
436
437 bank_start = bank_pfn_start(bank);
438 421
439#ifdef CONFIG_SPARSEMEM 422#ifdef CONFIG_SPARSEMEM
440 /* 423 /*
441 * Take care not to free memmap entries that don't exist 424 * Take care not to free memmap entries that don't exist
442 * due to SPARSEMEM sections which aren't present. 425 * due to SPARSEMEM sections which aren't present.
443 */ 426 */
444 bank_start = min(bank_start, 427 start = min(start,
445 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 428 ALIGN(prev_end, PAGES_PER_SECTION));
446#else 429#else
447 /* 430 /*
448 * Align down here since the VM subsystem insists that the 431 * Align down here since the VM subsystem insists that the
449 * memmap entries are valid from the bank start aligned to 432 * memmap entries are valid from the bank start aligned to
450 * MAX_ORDER_NR_PAGES. 433 * MAX_ORDER_NR_PAGES.
451 */ 434 */
452 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); 435 start = round_down(start, MAX_ORDER_NR_PAGES);
453#endif 436#endif
454 /* 437 /*
455 * If we had a previous bank, and there is a space 438 * If we had a previous bank, and there is a space
456 * between the current bank and the previous, free it. 439 * between the current bank and the previous, free it.
457 */ 440 */
458 if (prev_bank_end && prev_bank_end < bank_start) 441 if (prev_end && prev_end < start)
459 free_memmap(prev_bank_end, bank_start); 442 free_memmap(prev_end, start);
460 443
461 /* 444 /*
462 * Align up here since the VM subsystem insists that the 445 * Align up here since the VM subsystem insists that the
463 * memmap entries are valid from the bank end aligned to 446 * memmap entries are valid from the bank end aligned to
464 * MAX_ORDER_NR_PAGES. 447 * MAX_ORDER_NR_PAGES.
465 */ 448 */
466 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); 449 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
450 MAX_ORDER_NR_PAGES);
467 } 451 }
468 452
469#ifdef CONFIG_SPARSEMEM 453#ifdef CONFIG_SPARSEMEM
470 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) 454 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
471 free_memmap(prev_bank_end, 455 free_memmap(prev_end,
472 ALIGN(prev_bank_end, PAGES_PER_SECTION)); 456 ALIGN(prev_end, PAGES_PER_SECTION));
473#endif 457#endif
474} 458}
475 459
@@ -545,7 +529,7 @@ void __init mem_init(void)
545 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 529 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
546 530
547 /* this will put all unused low memory onto the freelists */ 531 /* this will put all unused low memory onto the freelists */
548 free_unused_memmap(&meminfo); 532 free_unused_memmap();
549 free_all_bootmem(); 533 free_all_bootmem();
550 534
551#ifdef CONFIG_SA1111 535#ifdef CONFIG_SA1111
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index f9c32ba73544..d1e5ad7ab3bc 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -438,6 +438,13 @@ void __arm_iounmap(volatile void __iomem *io_addr)
438EXPORT_SYMBOL(__arm_iounmap); 438EXPORT_SYMBOL(__arm_iounmap);
439 439
440#ifdef CONFIG_PCI 440#ifdef CONFIG_PCI
441static int pci_ioremap_mem_type = MT_DEVICE;
442
443void pci_ioremap_set_mem_type(int mem_type)
444{
445 pci_ioremap_mem_type = mem_type;
446}
447
441int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) 448int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
442{ 449{
443 BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); 450 BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
@@ -445,7 +452,7 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
445 return ioremap_page_range(PCI_IO_VIRT_BASE + offset, 452 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
446 PCI_IO_VIRT_BASE + offset + SZ_64K, 453 PCI_IO_VIRT_BASE + offset + SZ_64K,
447 phys_addr, 454 phys_addr,
448 __pgprot(get_mem_type(MT_DEVICE)->prot_pte)); 455 __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
449} 456}
450EXPORT_SYMBOL_GPL(pci_ioremap_io); 457EXPORT_SYMBOL_GPL(pci_ioremap_io);
451#endif 458#endif
diff --git a/arch/arm/mm/l2c-common.c b/arch/arm/mm/l2c-common.c
new file mode 100644
index 000000000000..10a3cf28c362
--- /dev/null
+++ b/arch/arm/mm/l2c-common.c
@@ -0,0 +1,20 @@
1/*
2 * Copyright (C) 2010 ARM Ltd.
3 * Written by Catalin Marinas <catalin.marinas@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/bug.h>
10#include <linux/smp.h>
11#include <asm/outercache.h>
12
13void outer_disable(void)
14{
15 WARN_ON(!irqs_disabled());
16 WARN_ON(num_online_cpus() > 1);
17
18 if (outer_cache.disable)
19 outer_cache.disable();
20}
diff --git a/arch/arm/mm/l2c-l2x0-resume.S b/arch/arm/mm/l2c-l2x0-resume.S
new file mode 100644
index 000000000000..99b05f21a59a
--- /dev/null
+++ b/arch/arm/mm/l2c-l2x0-resume.S
@@ -0,0 +1,58 @@
1/*
2 * L2C-310 early resume code. This can be used by platforms to restore
3 * the settings of their L2 cache controller before restoring the
4 * processor state.
5 *
6 * This code can only be used to if you are running in the secure world.
7 */
8#include <linux/linkage.h>
9#include <asm/hardware/cache-l2x0.h>
10
11 .text
12
13ENTRY(l2c310_early_resume)
14 adr r0, 1f
15 ldr r2, [r0]
16 add r0, r2, r0
17
18 ldmia r0, {r1, r2, r3, r4, r5, r6, r7, r8}
19 @ r1 = phys address of L2C-310 controller
20 @ r2 = aux_ctrl
21 @ r3 = tag_latency
22 @ r4 = data_latency
23 @ r5 = filter_start
24 @ r6 = filter_end
25 @ r7 = prefetch_ctrl
26 @ r8 = pwr_ctrl
27
28 @ Check that the address has been initialised
29 teq r1, #0
30 moveq pc, lr
31
32 @ The prefetch and power control registers are revision dependent
33 @ and can be written whether or not the L2 cache is enabled
34 ldr r0, [r1, #L2X0_CACHE_ID]
35 and r0, r0, #L2X0_CACHE_ID_RTL_MASK
36 cmp r0, #L310_CACHE_ID_RTL_R2P0
37 strcs r7, [r1, #L310_PREFETCH_CTRL]
38 cmp r0, #L310_CACHE_ID_RTL_R3P0
39 strcs r8, [r1, #L310_POWER_CTRL]
40
41 @ Don't setup the L2 cache if it is already enabled
42 ldr r0, [r1, #L2X0_CTRL]
43 tst r0, #L2X0_CTRL_EN
44 movne pc, lr
45
46 str r3, [r1, #L310_TAG_LATENCY_CTRL]
47 str r4, [r1, #L310_DATA_LATENCY_CTRL]
48 str r6, [r1, #L310_ADDR_FILTER_END]
49 str r5, [r1, #L310_ADDR_FILTER_START]
50
51 str r2, [r1, #L2X0_AUX_CTRL]
52 mov r9, #L2X0_CTRL_EN
53 str r9, [r1, #L2X0_CTRL]
54 mov pc, lr
55ENDPROC(l2c310_early_resume)
56
57 .align
581: .long l2x0_saved_regs - .
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index df875c457068..ab14b79b03f0 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -35,6 +35,7 @@
35#include <asm/mach/arch.h> 35#include <asm/mach/arch.h>
36#include <asm/mach/map.h> 36#include <asm/mach/map.h>
37#include <asm/mach/pci.h> 37#include <asm/mach/pci.h>
38#include <asm/fixmap.h>
38 39
39#include "mm.h" 40#include "mm.h"
40#include "tcm.h" 41#include "tcm.h"
@@ -1074,74 +1075,47 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
1074void __init sanity_check_meminfo(void) 1075void __init sanity_check_meminfo(void)
1075{ 1076{
1076 phys_addr_t memblock_limit = 0; 1077 phys_addr_t memblock_limit = 0;
1077 int i, j, highmem = 0; 1078 int highmem = 0;
1078 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; 1079 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
1080 struct memblock_region *reg;
1079 1081
1080 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 1082 for_each_memblock(memory, reg) {
1081 struct membank *bank = &meminfo.bank[j]; 1083 phys_addr_t block_start = reg->base;
1082 phys_addr_t size_limit; 1084 phys_addr_t block_end = reg->base + reg->size;
1083 1085 phys_addr_t size_limit = reg->size;
1084 *bank = meminfo.bank[i];
1085 size_limit = bank->size;
1086 1086
1087 if (bank->start >= vmalloc_limit) 1087 if (reg->base >= vmalloc_limit)
1088 highmem = 1; 1088 highmem = 1;
1089 else 1089 else
1090 size_limit = vmalloc_limit - bank->start; 1090 size_limit = vmalloc_limit - reg->base;
1091 1091
1092 bank->highmem = highmem;
1093 1092
1094#ifdef CONFIG_HIGHMEM 1093 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
1095 /* 1094
1096 * Split those memory banks which are partially overlapping 1095 if (highmem) {
1097 * the vmalloc area greatly simplifying things later. 1096 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1098 */ 1097 &block_start, &block_end);
1099 if (!highmem && bank->size > size_limit) { 1098 memblock_remove(reg->base, reg->size);
1100 if (meminfo.nr_banks >= NR_BANKS) { 1099 continue;
1101 printk(KERN_CRIT "NR_BANKS too low, "
1102 "ignoring high memory\n");
1103 } else {
1104 memmove(bank + 1, bank,
1105 (meminfo.nr_banks - i) * sizeof(*bank));
1106 meminfo.nr_banks++;
1107 i++;
1108 bank[1].size -= size_limit;
1109 bank[1].start = vmalloc_limit;
1110 bank[1].highmem = highmem = 1;
1111 j++;
1112 } 1100 }
1113 bank->size = size_limit;
1114 }
1115#else
1116 /*
1117 * Highmem banks not allowed with !CONFIG_HIGHMEM.
1118 */
1119 if (highmem) {
1120 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
1121 "(!CONFIG_HIGHMEM).\n",
1122 (unsigned long long)bank->start,
1123 (unsigned long long)bank->start + bank->size - 1);
1124 continue;
1125 }
1126 1101
1127 /* 1102 if (reg->size > size_limit) {
1128 * Check whether this memory bank would partially overlap 1103 phys_addr_t overlap_size = reg->size - size_limit;
1129 * the vmalloc area. 1104
1130 */ 1105 pr_notice("Truncating RAM at %pa-%pa to -%pa",
1131 if (bank->size > size_limit) { 1106 &block_start, &block_end, &vmalloc_limit);
1132 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " 1107 memblock_remove(vmalloc_limit, overlap_size);
1133 "to -%.8llx (vmalloc region overlap).\n", 1108 block_end = vmalloc_limit;
1134 (unsigned long long)bank->start, 1109 }
1135 (unsigned long long)bank->start + bank->size - 1,
1136 (unsigned long long)bank->start + size_limit - 1);
1137 bank->size = size_limit;
1138 } 1110 }
1139#endif
1140 if (!bank->highmem) {
1141 phys_addr_t bank_end = bank->start + bank->size;
1142 1111
1143 if (bank_end > arm_lowmem_limit) 1112 if (!highmem) {
1144 arm_lowmem_limit = bank_end; 1113 if (block_end > arm_lowmem_limit) {
1114 if (reg->size > size_limit)
1115 arm_lowmem_limit = vmalloc_limit;
1116 else
1117 arm_lowmem_limit = block_end;
1118 }
1145 1119
1146 /* 1120 /*
1147 * Find the first non-section-aligned page, and point 1121 * Find the first non-section-aligned page, and point
@@ -1157,35 +1131,15 @@ void __init sanity_check_meminfo(void)
1157 * occurs before any free memory is mapped. 1131 * occurs before any free memory is mapped.
1158 */ 1132 */
1159 if (!memblock_limit) { 1133 if (!memblock_limit) {
1160 if (!IS_ALIGNED(bank->start, SECTION_SIZE)) 1134 if (!IS_ALIGNED(block_start, SECTION_SIZE))
1161 memblock_limit = bank->start; 1135 memblock_limit = block_start;
1162 else if (!IS_ALIGNED(bank_end, SECTION_SIZE)) 1136 else if (!IS_ALIGNED(block_end, SECTION_SIZE))
1163 memblock_limit = bank_end; 1137 memblock_limit = arm_lowmem_limit;
1164 } 1138 }
1165 }
1166 j++;
1167 }
1168#ifdef CONFIG_HIGHMEM
1169 if (highmem) {
1170 const char *reason = NULL;
1171 1139
1172 if (cache_is_vipt_aliasing()) {
1173 /*
1174 * Interactions between kmap and other mappings
1175 * make highmem support with aliasing VIPT caches
1176 * rather difficult.
1177 */
1178 reason = "with VIPT aliasing cache";
1179 }
1180 if (reason) {
1181 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
1182 reason);
1183 while (j > 0 && meminfo.bank[j - 1].highmem)
1184 j--;
1185 } 1140 }
1186 } 1141 }
1187#endif 1142
1188 meminfo.nr_banks = j;
1189 high_memory = __va(arm_lowmem_limit - 1) + 1; 1143 high_memory = __va(arm_lowmem_limit - 1) + 1;
1190 1144
1191 /* 1145 /*
@@ -1372,6 +1326,9 @@ static void __init kmap_init(void)
1372#ifdef CONFIG_HIGHMEM 1326#ifdef CONFIG_HIGHMEM
1373 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 1327 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1374 PKMAP_BASE, _PAGE_KERNEL_TABLE); 1328 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1329
1330 fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
1331 FIXADDR_START, _PAGE_KERNEL_TABLE);
1375#endif 1332#endif
1376} 1333}
1377 1334
@@ -1474,7 +1431,7 @@ void __init early_paging_init(const struct machine_desc *mdesc,
1474 * just complicate the code. 1431 * just complicate the code.
1475 */ 1432 */
1476 flush_cache_louis(); 1433 flush_cache_louis();
1477 dsb(); 1434 dsb(ishst);
1478 isb(); 1435 isb();
1479 1436
1480 /* remap level 1 table */ 1437 /* remap level 1 table */
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 55764a7ef1f0..da1874f9f8cf 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -88,30 +88,35 @@ static unsigned long irbar_read(void)
88void __init sanity_check_meminfo_mpu(void) 88void __init sanity_check_meminfo_mpu(void)
89{ 89{
90 int i; 90 int i;
91 struct membank *bank = meminfo.bank;
92 phys_addr_t phys_offset = PHYS_OFFSET; 91 phys_addr_t phys_offset = PHYS_OFFSET;
93 phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; 92 phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
94 93 struct memblock_region *reg;
95 /* Initially only use memory continuous from PHYS_OFFSET */ 94 bool first = true;
96 if (bank_phys_start(&bank[0]) != phys_offset) 95 phys_addr_t mem_start;
97 panic("First memory bank must be contiguous from PHYS_OFFSET"); 96 phys_addr_t mem_end;
98 97
99 /* Banks have already been sorted by start address */ 98 for_each_memblock(memory, reg) {
100 for (i = 1; i < meminfo.nr_banks; i++) { 99 if (first) {
101 if (bank[i].start <= bank_phys_end(&bank[0]) && 100 /*
102 bank_phys_end(&bank[i]) > bank_phys_end(&bank[0])) { 101 * Initially only use memory continuous from
103 bank[0].size = bank_phys_end(&bank[i]) - bank[0].start; 102 * PHYS_OFFSET */
103 if (reg->base != phys_offset)
104 panic("First memory bank must be contiguous from PHYS_OFFSET");
105
106 mem_start = reg->base;
107 mem_end = reg->base + reg->size;
108 specified_mem_size = reg->size;
109 first = false;
104 } else { 110 } else {
105 pr_notice("Ignoring RAM after 0x%.8lx. " 111 /*
106 "First non-contiguous (ignored) bank start: 0x%.8lx\n", 112 * memblock auto merges contiguous blocks, remove
107 (unsigned long)bank_phys_end(&bank[0]), 113 * all blocks afterwards
108 (unsigned long)bank_phys_start(&bank[i])); 114 */
109 break; 115 pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
116 &mem_start, &reg->base);
117 memblock_remove(reg->base, reg->size);
110 } 118 }
111 } 119 }
112 /* All contiguous banks are now merged in to the first bank */
113 meminfo.nr_banks = 1;
114 specified_mem_size = bank[0].size;
115 120
116 /* 121 /*
117 * MPU has curious alignment requirements: Size must be power of 2, and 122 * MPU has curious alignment requirements: Size must be power of 2, and
@@ -128,23 +133,24 @@ void __init sanity_check_meminfo_mpu(void)
128 */ 133 */
129 aligned_region_size = (phys_offset - 1) ^ (phys_offset); 134 aligned_region_size = (phys_offset - 1) ^ (phys_offset);
130 /* Find the max power-of-two sized region that fits inside our bank */ 135 /* Find the max power-of-two sized region that fits inside our bank */
131 rounded_mem_size = (1 << __fls(bank[0].size)) - 1; 136 rounded_mem_size = (1 << __fls(specified_mem_size)) - 1;
132 137
133 /* The actual region size is the smaller of the two */ 138 /* The actual region size is the smaller of the two */
134 aligned_region_size = aligned_region_size < rounded_mem_size 139 aligned_region_size = aligned_region_size < rounded_mem_size
135 ? aligned_region_size + 1 140 ? aligned_region_size + 1
136 : rounded_mem_size + 1; 141 : rounded_mem_size + 1;
137 142
138 if (aligned_region_size != specified_mem_size) 143 if (aligned_region_size != specified_mem_size) {
139 pr_warn("Truncating memory from 0x%.8lx to 0x%.8lx (MPU region constraints)", 144 pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
140 (unsigned long)specified_mem_size, 145 &specified_mem_size, &aligned_region_size);
141 (unsigned long)aligned_region_size); 146 memblock_remove(mem_start + aligned_region_size,
147 specified_mem_size - aligned_round_size);
148
149 mem_end = mem_start + aligned_region_size;
150 }
142 151
143 meminfo.bank[0].size = aligned_region_size; 152 pr_debug("MPU Region from %pa size %pa (end %pa))\n",
144 pr_debug("MPU Region from 0x%.8lx size 0x%.8lx (end 0x%.8lx))\n", 153 &phys_offset, &aligned_region_size, &mem_end);
145 (unsigned long)phys_offset,
146 (unsigned long)aligned_region_size,
147 (unsigned long)bank_phys_end(&bank[0]));
148 154
149} 155}
150 156
@@ -292,7 +298,7 @@ void __init sanity_check_meminfo(void)
292{ 298{
293 phys_addr_t end; 299 phys_addr_t end;
294 sanity_check_meminfo_mpu(); 300 sanity_check_meminfo_mpu();
295 end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); 301 end = memblock_end_of_DRAM();
296 high_memory = __va(end - 1) + 1; 302 high_memory = __va(end - 1) + 1;
297} 303}
298 304
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 01a719e18bb0..22e3ad63500c 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -64,6 +64,14 @@ ENTRY(cpu_v7_switch_mm)
64 mov pc, lr 64 mov pc, lr
65ENDPROC(cpu_v7_switch_mm) 65ENDPROC(cpu_v7_switch_mm)
66 66
67#ifdef __ARMEB__
68#define rl r3
69#define rh r2
70#else
71#define rl r2
72#define rh r3
73#endif
74
67/* 75/*
68 * cpu_v7_set_pte_ext(ptep, pte) 76 * cpu_v7_set_pte_ext(ptep, pte)
69 * 77 *
@@ -73,13 +81,13 @@ ENDPROC(cpu_v7_switch_mm)
73 */ 81 */
74ENTRY(cpu_v7_set_pte_ext) 82ENTRY(cpu_v7_set_pte_ext)
75#ifdef CONFIG_MMU 83#ifdef CONFIG_MMU
76 tst r2, #L_PTE_VALID 84 tst rl, #L_PTE_VALID
77 beq 1f 85 beq 1f
78 tst r3, #1 << (57 - 32) @ L_PTE_NONE 86 tst rh, #1 << (57 - 32) @ L_PTE_NONE
79 bicne r2, #L_PTE_VALID 87 bicne rl, #L_PTE_VALID
80 bne 1f 88 bne 1f
81 tst r3, #1 << (55 - 32) @ L_PTE_DIRTY 89 tst rh, #1 << (55 - 32) @ L_PTE_DIRTY
82 orreq r2, #L_PTE_RDONLY 90 orreq rl, #L_PTE_RDONLY
831: strd r2, r3, [r0] 911: strd r2, r3, [r0]
84 ALT_SMP(W(nop)) 92 ALT_SMP(W(nop))
85 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 93 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 195731d3813b..3db2c2f04a30 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -169,9 +169,31 @@ ENDPROC(cpu_pj4b_do_idle)
169 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle 169 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle
170#endif 170#endif
171 globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area 171 globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area
172 globl_equ cpu_pj4b_do_suspend, cpu_v7_do_suspend 172#ifdef CONFIG_ARM_CPU_SUSPEND
173 globl_equ cpu_pj4b_do_resume, cpu_v7_do_resume 173ENTRY(cpu_pj4b_do_suspend)
174 globl_equ cpu_pj4b_suspend_size, cpu_v7_suspend_size 174 stmfd sp!, {r6 - r10}
175 mrc p15, 1, r6, c15, c1, 0 @ save CP15 - extra features
176 mrc p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0
177 mrc p15, 1, r8, c15, c1, 2 @ save CP15 - Aux Debug Modes Ctrl 2
178 mrc p15, 1, r9, c15, c1, 1 @ save CP15 - Aux Debug Modes Ctrl 1
179 mrc p15, 0, r10, c9, c14, 0 @ save CP15 - PMC
180 stmia r0!, {r6 - r10}
181 ldmfd sp!, {r6 - r10}
182 b cpu_v7_do_suspend
183ENDPROC(cpu_pj4b_do_suspend)
184
185ENTRY(cpu_pj4b_do_resume)
186 ldmia r0!, {r6 - r10}
187 mcr p15, 1, r6, c15, c1, 0 @ save CP15 - extra features
188 mcr p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0
189 mcr p15, 1, r8, c15, c1, 2 @ save CP15 - Aux Debug Modes Ctrl 2
190 mcr p15, 1, r9, c15, c1, 1 @ save CP15 - Aux Debug Modes Ctrl 1
191 mcr p15, 0, r10, c9, c14, 0 @ save CP15 - PMC
192 b cpu_v7_do_resume
193ENDPROC(cpu_pj4b_do_resume)
194#endif
195.globl cpu_pj4b_suspend_size
196.equ cpu_pj4b_suspend_size, 4 * 14
175 197
176#endif 198#endif
177 199
@@ -194,6 +216,7 @@ __v7_cr7mp_setup:
194__v7_ca7mp_setup: 216__v7_ca7mp_setup:
195__v7_ca12mp_setup: 217__v7_ca12mp_setup:
196__v7_ca15mp_setup: 218__v7_ca15mp_setup:
219__v7_ca17mp_setup:
197 mov r10, #0 220 mov r10, #0
1981: 2211:
199#ifdef CONFIG_SMP 222#ifdef CONFIG_SMP
@@ -505,6 +528,16 @@ __v7_ca15mp_proc_info:
505 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info 528 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
506 529
507 /* 530 /*
531 * ARM Ltd. Cortex A17 processor.
532 */
533 .type __v7_ca17mp_proc_info, #object
534__v7_ca17mp_proc_info:
535 .long 0x410fc0e0
536 .long 0xff0ffff0
537 __v7_proc __v7_ca17mp_setup
538 .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
539
540 /*
508 * Qualcomm Inc. Krait processors. 541 * Qualcomm Inc. Krait processors.
509 */ 542 */
510 .type __krait_proc_info, #object 543 .type __krait_proc_info, #object
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 0c93588fcb91..1ca37c72f12f 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -123,6 +123,11 @@ __v7m_setup:
123 mov pc, lr 123 mov pc, lr
124ENDPROC(__v7m_setup) 124ENDPROC(__v7m_setup)
125 125
126 .align 2
127__v7m_setup_stack:
128 .space 4 * 8 @ 8 registers
129__v7m_setup_stack_top:
130
126 define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 131 define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
127 132
128 .section ".rodata" 133 .section ".rodata"
@@ -152,6 +157,3 @@ __v7m_proc_info:
152 .long nop_cache_fns @ proc_info_list.cache 157 .long nop_cache_fns @ proc_info_list.cache
153 .size __v7m_proc_info, . - __v7m_proc_info 158 .size __v7m_proc_info, . - __v7m_proc_info
154 159
155__v7m_setup_stack:
156 .space 4 * 8 @ 8 registers
157__v7m_setup_stack_top:
diff --git a/arch/arm/plat-samsung/s5p-sleep.S b/arch/arm/plat-samsung/s5p-sleep.S
index c5001659bdf8..25c68ceb9e2b 100644
--- a/arch/arm/plat-samsung/s5p-sleep.S
+++ b/arch/arm/plat-samsung/s5p-sleep.S
@@ -22,7 +22,6 @@
22*/ 22*/
23 23
24#include <linux/linkage.h> 24#include <linux/linkage.h>
25#include <asm/asm-offsets.h>
26 25
27 .data 26 .data
28 .align 27 .align
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index f0759e70fb86..fe6ca574d093 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -22,11 +22,10 @@
22@ r9 = normal "successful" return address 22@ r9 = normal "successful" return address
23@ r10 = this threads thread_info structure 23@ r10 = this threads thread_info structure
24@ lr = unrecognised instruction return address 24@ lr = unrecognised instruction return address
25@ IRQs disabled. 25@ IRQs enabled.
26@ 26@
27ENTRY(do_vfp) 27ENTRY(do_vfp)
28 inc_preempt_count r10, r4 28 inc_preempt_count r10, r4
29 enable_irq
30 ldr r4, .LCvfp 29 ldr r4, .LCvfp
31 ldr r11, [r10, #TI_CPU] @ CPU number 30 ldr r11, [r10, #TI_CPU] @ CPU number
32 add r10, r10, #TI_VFPSTATE @ r10 = workspace 31 add r10, r10, #TI_VFPSTATE @ r10 = workspace
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 63b5eff0a80f..fdd7e1b61f60 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -47,6 +47,7 @@ struct amba_driver {
47enum amba_vendor { 47enum amba_vendor {
48 AMBA_VENDOR_ARM = 0x41, 48 AMBA_VENDOR_ARM = 0x41,
49 AMBA_VENDOR_ST = 0x80, 49 AMBA_VENDOR_ST = 0x80,
50 AMBA_VENDOR_QCOM = 0x51,
50}; 51};
51 52
52extern struct bus_type amba_bustype; 53extern struct bus_type amba_bustype;
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index f73cabf59012..38bbf95109da 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -320,6 +320,8 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
320extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); 320extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
321extern int hibernate(void); 321extern int hibernate(void);
322extern bool system_entering_hibernation(void); 322extern bool system_entering_hibernation(void);
323asmlinkage int swsusp_save(void);
324extern struct pbe *restore_pblist;
323#else /* CONFIG_HIBERNATION */ 325#else /* CONFIG_HIBERNATION */
324static inline void register_nosave_region(unsigned long b, unsigned long e) {} 326static inline void register_nosave_region(unsigned long b, unsigned long e) {}
325static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} 327static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index edff2b97b864..c52f827ba6ce 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -32,6 +32,7 @@ struct vm_area_struct;
32struct mm_struct; 32struct mm_struct;
33struct inode; 33struct inode;
34struct notifier_block; 34struct notifier_block;
35struct page;
35 36
36#define UPROBE_HANDLER_REMOVE 1 37#define UPROBE_HANDLER_REMOVE 1
37#define UPROBE_HANDLER_MASK 1 38#define UPROBE_HANDLER_MASK 1
@@ -127,6 +128,8 @@ extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned l
127extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); 128extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
128extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); 129extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
129extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); 130extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
131extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
132 void *src, unsigned long len);
130#else /* !CONFIG_UPROBES */ 133#else /* !CONFIG_UPROBES */
131struct uprobes_state { 134struct uprobes_state {
132}; 135};
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 04709b66369d..4968213c63fa 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1296,14 +1296,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1296 if (unlikely(!xol_vaddr)) 1296 if (unlikely(!xol_vaddr))
1297 return 0; 1297 return 0;
1298 1298
1299 /* Initialize the slot */ 1299 arch_uprobe_copy_ixol(area->page, xol_vaddr,
1300 copy_to_page(area->page, xol_vaddr, 1300 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1301 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1302 /*
1303 * We probably need flush_icache_user_range() but it needs vma.
1304 * This should work on supported architectures too.
1305 */
1306 flush_dcache_page(area->page);
1307 1301
1308 return xol_vaddr; 1302 return xol_vaddr;
1309} 1303}
@@ -1346,6 +1340,21 @@ static void xol_free_insn_slot(struct task_struct *tsk)
1346 } 1340 }
1347} 1341}
1348 1342
1343void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1344 void *src, unsigned long len)
1345{
1346 /* Initialize the slot */
1347 copy_to_page(page, vaddr, src, len);
1348
1349 /*
1350 * We probably need flush_icache_user_range() but it needs vma.
1351 * This should work on most of architectures by default. If
1352 * architecture needs to do something different it can define
1353 * its own version of the function.
1354 */
1355 flush_dcache_page(page);
1356}
1357
1349/** 1358/**
1350 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs 1359 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1351 * @regs: Reflects the saved state of the task after it has hit a breakpoint 1360 * @regs: Reflects the saved state of the task after it has hit a breakpoint