aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt7
-rw-r--r--arch/arm/Kconfig6
-rw-r--r--arch/arm/Makefile10
-rw-r--r--arch/arm/boot/compressed/head.S52
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/assembler.h3
-rw-r--r--arch/arm/include/asm/auxvec.h1
-rw-r--r--arch/arm/include/asm/cputype.h16
-rw-r--r--arch/arm/include/asm/elf.h11
-rw-r--r--arch/arm/include/asm/futex.h2
-rw-r--r--arch/arm/include/asm/mmu.h3
-rw-r--r--arch/arm/include/asm/pmu.h1
-rw-r--r--arch/arm/include/asm/smp_plat.h1
-rw-r--r--arch/arm/include/asm/uaccess.h10
-rw-r--r--arch/arm/include/asm/unified.h8
-rw-r--r--arch/arm/include/asm/vdso.h32
-rw-r--r--arch/arm/include/asm/vdso_datapage.h60
-rw-r--r--arch/arm/include/asm/word-at-a-time.h2
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/auxvec.h7
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/asm-offsets.c5
-rw-r--r--arch/arm/kernel/bios32.c10
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/head.S14
-rw-r--r--arch/arm/kernel/hibernate.c6
-rw-r--r--arch/arm/kernel/machine_kexec.c3
-rw-r--r--arch/arm/kernel/module.c38
-rw-r--r--arch/arm/kernel/perf_event.c21
-rw-r--r--arch/arm/kernel/perf_event_cpu.c71
-rw-r--r--arch/arm/kernel/perf_event_v7.c525
-rw-r--r--arch/arm/kernel/process.c159
-rw-r--r--arch/arm/kernel/psci-call.S31
-rw-r--r--arch/arm/kernel/psci.c39
-rw-r--r--arch/arm/kernel/reboot.c155
-rw-r--r--arch/arm/kernel/reboot.h7
-rw-r--r--arch/arm/kernel/return_address.c4
-rw-r--r--arch/arm/kernel/setup.c44
-rw-r--r--arch/arm/kernel/sleep.S15
-rw-r--r--arch/arm/kernel/smp.c5
-rw-r--r--arch/arm/kernel/swp_emulate.c2
-rw-r--r--arch/arm/kernel/vdso.c337
-rw-r--r--arch/arm/kernel/vmlinux.lds.S7
-rw-r--r--arch/arm/lib/clear_user.S2
-rw-r--r--arch/arm/lib/copy_to_user.S2
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S2
-rw-r--r--arch/arm/lib/delay.c6
-rw-r--r--arch/arm/mach-exynos/sleep.S31
-rw-r--r--arch/arm/mach-s5pv210/sleep.S2
-rw-r--r--arch/arm/mach-vexpress/Kconfig1
-rw-r--r--arch/arm/mm/Kconfig16
-rw-r--r--arch/arm/mm/alignment.c6
-rw-r--r--arch/arm/mm/cache-l2x0.c7
-rw-r--r--arch/arm/mm/cache-v7.S38
-rw-r--r--arch/arm/mm/dma-mapping.c116
-rw-r--r--arch/arm/mm/init.c49
-rw-r--r--arch/arm/mm/proc-arm1020.S4
-rw-r--r--arch/arm/mm/proc-arm1020e.S4
-rw-r--r--arch/arm/mm/proc-arm1022.S4
-rw-r--r--arch/arm/mm/proc-arm1026.S4
-rw-r--r--arch/arm/mm/proc-arm720.S4
-rw-r--r--arch/arm/mm/proc-arm740.S4
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S4
-rw-r--r--arch/arm/mm/proc-arm920.S4
-rw-r--r--arch/arm/mm/proc-arm922.S4
-rw-r--r--arch/arm/mm/proc-arm925.S4
-rw-r--r--arch/arm/mm/proc-arm926.S4
-rw-r--r--arch/arm/mm/proc-arm940.S30
-rw-r--r--arch/arm/mm/proc-arm946.S26
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S4
-rw-r--r--arch/arm/mm/proc-fa526.S4
-rw-r--r--arch/arm/mm/proc-feroceon.S5
-rw-r--r--arch/arm/mm/proc-macros.S28
-rw-r--r--arch/arm/mm/proc-mohawk.S4
-rw-r--r--arch/arm/mm/proc-sa110.S4
-rw-r--r--arch/arm/mm/proc-sa1100.S4
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7-2level.S12
-rw-r--r--arch/arm/mm/proc-v7.S56
-rw-r--r--arch/arm/mm/proc-v7m.S4
-rw-r--r--arch/arm/mm/proc-xsc3.S4
-rw-r--r--arch/arm/mm/proc-xscale.S4
-rw-r--r--arch/arm/nwfpe/entry.S2
-rw-r--r--arch/arm/vdso/.gitignore1
-rw-r--r--arch/arm/vdso/Makefile74
-rw-r--r--arch/arm/vdso/datapage.S15
-rw-r--r--arch/arm/vdso/vdso.S35
-rw-r--r--arch/arm/vdso/vdso.lds.S87
-rw-r--r--arch/arm/vdso/vdsomunge.c201
-rw-r--r--arch/arm/vdso/vgettimeofday.c282
-rw-r--r--drivers/amba/tegra-ahb.c78
-rw-r--r--include/asm-generic/vmlinux.lds.h2
93 files changed, 2429 insertions, 604 deletions
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 75ef91d08f3b..6e54a9d88b7a 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -18,6 +18,8 @@ Required properties:
18 "arm,arm11mpcore-pmu" 18 "arm,arm11mpcore-pmu"
19 "arm,arm1176-pmu" 19 "arm,arm1176-pmu"
20 "arm,arm1136-pmu" 20 "arm,arm1136-pmu"
21 "qcom,scorpion-pmu"
22 "qcom,scorpion-mp-pmu"
21 "qcom,krait-pmu" 23 "qcom,krait-pmu"
22- interrupts : 1 combined interrupt or 1 per core. If the interrupt is a per-cpu 24- interrupts : 1 combined interrupt or 1 per core. If the interrupt is a per-cpu
23 interrupt (PPI) then 1 interrupt should be specified. 25 interrupt (PPI) then 1 interrupt should be specified.
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
index 067c9790062f..9a4295b54539 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
@@ -5,9 +5,12 @@ Required properties:
5 Tegra30, must contain "nvidia,tegra30-ahb". Otherwise, must contain 5 Tegra30, must contain "nvidia,tegra30-ahb". Otherwise, must contain
6 '"nvidia,<chip>-ahb", "nvidia,tegra30-ahb"' where <chip> is tegra124, 6 '"nvidia,<chip>-ahb", "nvidia,tegra30-ahb"' where <chip> is tegra124,
7 tegra132, or tegra210. 7 tegra132, or tegra210.
8- reg : Should contain 1 register ranges(address and length) 8- reg : Should contain 1 register ranges(address and length). For
9 Tegra20, Tegra30, and Tegra114 chips, the value must be <0x6000c004
10 0x10c>. For Tegra124, Tegra132 and Tegra210 chips, the value should
11 be be <0x6000c000 0x150>.
9 12
10Example: 13Example (for a Tegra20 chip):
11 ahb: ahb@6000c004 { 14 ahb: ahb@6000c004 {
12 compatible = "nvidia,tegra20-ahb"; 15 compatible = "nvidia,tegra20-ahb";
13 reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */ 16 reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4b62f4caf0ce..da1266c53c13 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -21,6 +21,7 @@ config ARM
21 select GENERIC_IDLE_POLL_SETUP 21 select GENERIC_IDLE_POLL_SETUP
22 select GENERIC_IRQ_PROBE 22 select GENERIC_IRQ_PROBE
23 select GENERIC_IRQ_SHOW 23 select GENERIC_IRQ_SHOW
24 select GENERIC_IRQ_SHOW_LEVEL
24 select GENERIC_PCI_IOMAP 25 select GENERIC_PCI_IOMAP
25 select GENERIC_SCHED_CLOCK 26 select GENERIC_SCHED_CLOCK
26 select GENERIC_SMP_IDLE_THREAD 27 select GENERIC_SMP_IDLE_THREAD
@@ -1063,7 +1064,7 @@ config ARM_ERRATA_430973
1063 depends on CPU_V7 1064 depends on CPU_V7
1064 help 1065 help
1065 This option enables the workaround for the 430973 Cortex-A8 1066 This option enables the workaround for the 430973 Cortex-A8
1066 (r1p0..r1p2) erratum. If a code sequence containing an ARM/Thumb 1067 r1p* erratum. If a code sequence containing an ARM/Thumb
1067 interworking branch is replaced with another code sequence at the 1068 interworking branch is replaced with another code sequence at the
1068 same virtual address, whether due to self-modifying code or virtual 1069 same virtual address, whether due to self-modifying code or virtual
1069 to physical address re-mapping, Cortex-A8 does not recover from the 1070 to physical address re-mapping, Cortex-A8 does not recover from the
@@ -1132,6 +1133,7 @@ config ARM_ERRATA_742231
1132config ARM_ERRATA_643719 1133config ARM_ERRATA_643719
1133 bool "ARM errata: LoUIS bit field in CLIDR register is incorrect" 1134 bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
1134 depends on CPU_V7 && SMP 1135 depends on CPU_V7 && SMP
1136 default y
1135 help 1137 help
1136 This option enables the workaround for the 643719 Cortex-A9 (prior to 1138 This option enables the workaround for the 643719 Cortex-A9 (prior to
1137 r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR 1139 r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR
@@ -1349,7 +1351,7 @@ config SMP
1349 If you don't know what to do here, say N. 1351 If you don't know what to do here, say N.
1350 1352
1351config SMP_ON_UP 1353config SMP_ON_UP
1352 bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)" 1354 bool "Allow booting SMP kernel on uniprocessor systems"
1353 depends on SMP && !XIP_KERNEL && MMU 1355 depends on SMP && !XIP_KERNEL && MMU
1354 default y 1356 default y
1355 help 1357 help
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index eb7bb511f853..5575d9fa8806 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -13,7 +13,7 @@
13# Ensure linker flags are correct 13# Ensure linker flags are correct
14LDFLAGS := 14LDFLAGS :=
15 15
16LDFLAGS_vmlinux :=-p --no-undefined -X 16LDFLAGS_vmlinux :=-p --no-undefined -X --pic-veneer
17ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) 17ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
18LDFLAGS_vmlinux += --be8 18LDFLAGS_vmlinux += --be8
19LDFLAGS_MODULE += --be8 19LDFLAGS_MODULE += --be8
@@ -264,6 +264,7 @@ core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ)
264core-$(CONFIG_VFP) += arch/arm/vfp/ 264core-$(CONFIG_VFP) += arch/arm/vfp/
265core-$(CONFIG_XEN) += arch/arm/xen/ 265core-$(CONFIG_XEN) += arch/arm/xen/
266core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/ 266core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/
267core-$(CONFIG_VDSO) += arch/arm/vdso/
267 268
268# If we have a machine-specific directory, then include it in the build. 269# If we have a machine-specific directory, then include it in the build.
269core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ 270core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
@@ -321,6 +322,12 @@ dtbs: prepare scripts
321dtbs_install: 322dtbs_install:
322 $(Q)$(MAKE) $(dtbinst)=$(boot)/dts 323 $(Q)$(MAKE) $(dtbinst)=$(boot)/dts
323 324
325PHONY += vdso_install
326vdso_install:
327ifeq ($(CONFIG_VDSO),y)
328 $(Q)$(MAKE) $(build)=arch/arm/vdso $@
329endif
330
324# We use MRPROPER_FILES and CLEAN_FILES now 331# We use MRPROPER_FILES and CLEAN_FILES now
325archclean: 332archclean:
326 $(Q)$(MAKE) $(clean)=$(boot) 333 $(Q)$(MAKE) $(clean)=$(boot)
@@ -345,4 +352,5 @@ define archhelp
345 echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or' 352 echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or'
346 echo ' (distribution) /sbin/$(INSTALLKERNEL) or' 353 echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
347 echo ' install to $$(INSTALL_PATH) and run lilo' 354 echo ' install to $$(INSTALL_PATH) and run lilo'
355 echo ' vdso_install - Install unstripped vdso.so to $$(INSTALL_MOD_PATH)/vdso'
348endef 356endef
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index c41a793b519c..2c45b5709fa4 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -10,8 +10,11 @@
10 */ 10 */
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <asm/assembler.h> 12#include <asm/assembler.h>
13#include <asm/v7m.h>
14
15 AR_CLASS( .arch armv7-a )
16 M_CLASS( .arch armv7-m )
13 17
14 .arch armv7-a
15/* 18/*
16 * Debugging stuff 19 * Debugging stuff
17 * 20 *
@@ -114,7 +117,12 @@
114 * sort out different calling conventions 117 * sort out different calling conventions
115 */ 118 */
116 .align 119 .align
117 .arm @ Always enter in ARM state 120 /*
121 * Always enter in ARM state for CPUs that support the ARM ISA.
122 * As of today (2014) that's exactly the members of the A and R
123 * classes.
124 */
125 AR_CLASS( .arm )
118start: 126start:
119 .type start,#function 127 .type start,#function
120 .rept 7 128 .rept 7
@@ -132,14 +140,15 @@ start:
132 140
133 THUMB( .thumb ) 141 THUMB( .thumb )
1341: 1421:
135 ARM_BE8( setend be ) @ go BE8 if compiled for BE8 143 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
136 mrs r9, cpsr 144 AR_CLASS( mrs r9, cpsr )
137#ifdef CONFIG_ARM_VIRT_EXT 145#ifdef CONFIG_ARM_VIRT_EXT
138 bl __hyp_stub_install @ get into SVC mode, reversibly 146 bl __hyp_stub_install @ get into SVC mode, reversibly
139#endif 147#endif
140 mov r7, r1 @ save architecture ID 148 mov r7, r1 @ save architecture ID
141 mov r8, r2 @ save atags pointer 149 mov r8, r2 @ save atags pointer
142 150
151#ifndef CONFIG_CPU_V7M
143 /* 152 /*
144 * Booting from Angel - need to enter SVC mode and disable 153 * Booting from Angel - need to enter SVC mode and disable
145 * FIQs/IRQs (numeric definitions from angel arm.h source). 154 * FIQs/IRQs (numeric definitions from angel arm.h source).
@@ -155,6 +164,7 @@ not_angel:
155 safe_svcmode_maskall r0 164 safe_svcmode_maskall r0
156 msr spsr_cxsf, r9 @ Save the CPU boot mode in 165 msr spsr_cxsf, r9 @ Save the CPU boot mode in
157 @ SPSR 166 @ SPSR
167#endif
158 /* 168 /*
159 * Note that some cache flushing and other stuff may 169 * Note that some cache flushing and other stuff may
160 * be needed here - is there an Angel SWI call for this? 170 * be needed here - is there an Angel SWI call for this?
@@ -168,9 +178,26 @@ not_angel:
168 .text 178 .text
169 179
170#ifdef CONFIG_AUTO_ZRELADDR 180#ifdef CONFIG_AUTO_ZRELADDR
171 @ determine final kernel image address 181 /*
182 * Find the start of physical memory. As we are executing
183 * without the MMU on, we are in the physical address space.
184 * We just need to get rid of any offset by aligning the
185 * address.
186 *
187 * This alignment is a balance between the requirements of
188 * different platforms - we have chosen 128MB to allow
189 * platforms which align the start of their physical memory
190 * to 128MB to use this feature, while allowing the zImage
191 * to be placed within the first 128MB of memory on other
192 * platforms. Increasing the alignment means we place
193 * stricter alignment requirements on the start of physical
194 * memory, but relaxing it means that we break people who
195 * are already placing their zImage in (eg) the top 64MB
196 * of this range.
197 */
172 mov r4, pc 198 mov r4, pc
173 and r4, r4, #0xf8000000 199 and r4, r4, #0xf8000000
200 /* Determine final kernel image address. */
174 add r4, r4, #TEXT_OFFSET 201 add r4, r4, #TEXT_OFFSET
175#else 202#else
176 ldr r4, =zreladdr 203 ldr r4, =zreladdr
@@ -810,6 +837,16 @@ __common_mmu_cache_on:
810call_cache_fn: adr r12, proc_types 837call_cache_fn: adr r12, proc_types
811#ifdef CONFIG_CPU_CP15 838#ifdef CONFIG_CPU_CP15
812 mrc p15, 0, r9, c0, c0 @ get processor ID 839 mrc p15, 0, r9, c0, c0 @ get processor ID
840#elif defined(CONFIG_CPU_V7M)
841 /*
842 * On v7-M the processor id is located in the V7M_SCB_CPUID
843 * register, but as cache handling is IMPLEMENTATION DEFINED on
844 * v7-M (if existant at all) we just return early here.
845 * If V7M_SCB_CPUID were used the cpu ID functions (i.e.
846 * __armv7_mmu_cache_{on,off,flush}) would be selected which
847 * use cp15 registers that are not implemented on v7-M.
848 */
849 bx lr
813#else 850#else
814 ldr r9, =CONFIG_PROCESSOR_ID 851 ldr r9, =CONFIG_PROCESSOR_ID
815#endif 852#endif
@@ -1310,8 +1347,9 @@ __hyp_reentry_vectors:
1310 1347
1311__enter_kernel: 1348__enter_kernel:
1312 mov r0, #0 @ must be 0 1349 mov r0, #0 @ must be 0
1313 ARM( mov pc, r4 ) @ call kernel 1350 ARM( mov pc, r4 ) @ call kernel
1314 THUMB( bx r4 ) @ entry point is always ARM 1351 M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
1352 THUMB( bx r4 ) @ entry point is always ARM for A/R classes
1315 1353
1316reloc_code_end: 1354reloc_code_end:
1317 1355
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index fe74c0d1e485..eb0f43f3e3f1 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -1,6 +1,5 @@
1 1
2 2
3generic-y += auxvec.h
4generic-y += bitsperlong.h 3generic-y += bitsperlong.h
5generic-y += cputime.h 4generic-y += cputime.h
6generic-y += current.h 5generic-y += current.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index f67fd3afebdf..186270b3e194 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -237,6 +237,9 @@
237 .pushsection ".alt.smp.init", "a" ;\ 237 .pushsection ".alt.smp.init", "a" ;\
238 .long 9998b ;\ 238 .long 9998b ;\
2399997: instr ;\ 2399997: instr ;\
240 .if . - 9997b == 2 ;\
241 nop ;\
242 .endif ;\
240 .if . - 9997b != 4 ;\ 243 .if . - 9997b != 4 ;\
241 .error "ALT_UP() content must assemble to exactly 4 bytes";\ 244 .error "ALT_UP() content must assemble to exactly 4 bytes";\
242 .endif ;\ 245 .endif ;\
diff --git a/arch/arm/include/asm/auxvec.h b/arch/arm/include/asm/auxvec.h
new file mode 100644
index 000000000000..fbd388c46299
--- /dev/null
+++ b/arch/arm/include/asm/auxvec.h
@@ -0,0 +1 @@
#include <uapi/asm/auxvec.h>
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 819777d0e91f..85e374f873ac 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -253,4 +253,20 @@ static inline int cpu_is_pj4(void)
253#else 253#else
254#define cpu_is_pj4() 0 254#define cpu_is_pj4() 0
255#endif 255#endif
256
257static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
258 int field)
259{
260 int feature = (features >> field) & 15;
261
262 /* feature registers are signed values */
263 if (feature > 8)
264 feature -= 16;
265
266 return feature;
267}
268
269#define cpuid_feature_extract(reg, field) \
270 cpuid_feature_extract_field(read_cpuid_ext(reg), field)
271
256#endif 272#endif
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index c1ff8ab12914..d2315ffd8f12 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -1,7 +1,9 @@
1#ifndef __ASMARM_ELF_H 1#ifndef __ASMARM_ELF_H
2#define __ASMARM_ELF_H 2#define __ASMARM_ELF_H
3 3
4#include <asm/auxvec.h>
4#include <asm/hwcap.h> 5#include <asm/hwcap.h>
6#include <asm/vdso_datapage.h>
5 7
6/* 8/*
7 * ELF register definitions.. 9 * ELF register definitions..
@@ -115,7 +117,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
115 the loader. We need to make sure that it is out of the way of the program 117 the loader. We need to make sure that it is out of the way of the program
116 that it will "exec", and that there is sufficient room for the brk. */ 118 that it will "exec", and that there is sufficient room for the brk. */
117 119
118#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) 120#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
119 121
120/* When the program starts, a1 contains a pointer to a function to be 122/* When the program starts, a1 contains a pointer to a function to be
121 registered with atexit, as per the SVR4 ABI. A value of 0 means we 123 registered with atexit, as per the SVR4 ABI. A value of 0 means we
@@ -126,6 +128,13 @@ extern void elf_set_personality(const struct elf32_hdr *);
126#define SET_PERSONALITY(ex) elf_set_personality(&(ex)) 128#define SET_PERSONALITY(ex) elf_set_personality(&(ex))
127 129
128#ifdef CONFIG_MMU 130#ifdef CONFIG_MMU
131#ifdef CONFIG_VDSO
132#define ARCH_DLINFO \
133do { \
134 NEW_AUX_ENT(AT_SYSINFO_EHDR, \
135 (elf_addr_t)current->mm->context.vdso); \
136} while (0)
137#endif
129#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 138#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
130struct linux_binprm; 139struct linux_binprm;
131int arch_setup_additional_pages(struct linux_binprm *, int); 140int arch_setup_additional_pages(struct linux_binprm *, int);
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 53e69dae796f..4e78065a16aa 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -13,7 +13,7 @@
13 " .align 3\n" \ 13 " .align 3\n" \
14 " .long 1b, 4f, 2b, 4f\n" \ 14 " .long 1b, 4f, 2b, 4f\n" \
15 " .popsection\n" \ 15 " .popsection\n" \
16 " .pushsection .fixup,\"ax\"\n" \ 16 " .pushsection .text.fixup,\"ax\"\n" \
17 " .align 2\n" \ 17 " .align 2\n" \
18 "4: mov %0, " err_reg "\n" \ 18 "4: mov %0, " err_reg "\n" \
19 " b 3b\n" \ 19 " b 3b\n" \
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 64fd15159b7d..a5b47421059d 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -11,6 +11,9 @@ typedef struct {
11#endif 11#endif
12 unsigned int vmalloc_seq; 12 unsigned int vmalloc_seq;
13 unsigned long sigpage; 13 unsigned long sigpage;
14#ifdef CONFIG_VDSO
15 unsigned long vdso;
16#endif
14} mm_context_t; 17} mm_context_t;
15 18
16#ifdef CONFIG_CPU_HAS_ASID 19#ifdef CONFIG_CPU_HAS_ASID
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index b1596bd59129..675e4ab79f68 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -92,6 +92,7 @@ struct pmu_hw_events {
92struct arm_pmu { 92struct arm_pmu {
93 struct pmu pmu; 93 struct pmu pmu;
94 cpumask_t active_irqs; 94 cpumask_t active_irqs;
95 int *irq_affinity;
95 char *name; 96 char *name;
96 irqreturn_t (*handle_irq)(int irq_num, void *dev); 97 irqreturn_t (*handle_irq)(int irq_num, void *dev);
97 void (*enable)(struct perf_event *event); 98 void (*enable)(struct perf_event *event);
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 0ad7d490ee6f..993e5224d8f7 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -104,6 +104,7 @@ static inline u32 mpidr_hash_size(void)
104 return 1 << mpidr_hash.bits; 104 return 1 << mpidr_hash.bits;
105} 105}
106 106
107extern int platform_can_secondary_boot(void);
107extern int platform_can_cpu_hotplug(void); 108extern int platform_can_cpu_hotplug(void);
108 109
109#endif 110#endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index ce0786efd26c..74b17d09ef7a 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -315,7 +315,7 @@ do { \
315 __asm__ __volatile__( \ 315 __asm__ __volatile__( \
316 "1: " TUSER(ldrb) " %1,[%2],#0\n" \ 316 "1: " TUSER(ldrb) " %1,[%2],#0\n" \
317 "2:\n" \ 317 "2:\n" \
318 " .pushsection .fixup,\"ax\"\n" \ 318 " .pushsection .text.fixup,\"ax\"\n" \
319 " .align 2\n" \ 319 " .align 2\n" \
320 "3: mov %0, %3\n" \ 320 "3: mov %0, %3\n" \
321 " mov %1, #0\n" \ 321 " mov %1, #0\n" \
@@ -351,7 +351,7 @@ do { \
351 __asm__ __volatile__( \ 351 __asm__ __volatile__( \
352 "1: " TUSER(ldr) " %1,[%2],#0\n" \ 352 "1: " TUSER(ldr) " %1,[%2],#0\n" \
353 "2:\n" \ 353 "2:\n" \
354 " .pushsection .fixup,\"ax\"\n" \ 354 " .pushsection .text.fixup,\"ax\"\n" \
355 " .align 2\n" \ 355 " .align 2\n" \
356 "3: mov %0, %3\n" \ 356 "3: mov %0, %3\n" \
357 " mov %1, #0\n" \ 357 " mov %1, #0\n" \
@@ -397,7 +397,7 @@ do { \
397 __asm__ __volatile__( \ 397 __asm__ __volatile__( \
398 "1: " TUSER(strb) " %1,[%2],#0\n" \ 398 "1: " TUSER(strb) " %1,[%2],#0\n" \
399 "2:\n" \ 399 "2:\n" \
400 " .pushsection .fixup,\"ax\"\n" \ 400 " .pushsection .text.fixup,\"ax\"\n" \
401 " .align 2\n" \ 401 " .align 2\n" \
402 "3: mov %0, %3\n" \ 402 "3: mov %0, %3\n" \
403 " b 2b\n" \ 403 " b 2b\n" \
@@ -430,7 +430,7 @@ do { \
430 __asm__ __volatile__( \ 430 __asm__ __volatile__( \
431 "1: " TUSER(str) " %1,[%2],#0\n" \ 431 "1: " TUSER(str) " %1,[%2],#0\n" \
432 "2:\n" \ 432 "2:\n" \
433 " .pushsection .fixup,\"ax\"\n" \ 433 " .pushsection .text.fixup,\"ax\"\n" \
434 " .align 2\n" \ 434 " .align 2\n" \
435 "3: mov %0, %3\n" \ 435 "3: mov %0, %3\n" \
436 " b 2b\n" \ 436 " b 2b\n" \
@@ -458,7 +458,7 @@ do { \
458 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \ 458 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
459 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \ 459 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
460 "3:\n" \ 460 "3:\n" \
461 " .pushsection .fixup,\"ax\"\n" \ 461 " .pushsection .text.fixup,\"ax\"\n" \
462 " .align 2\n" \ 462 " .align 2\n" \
463 "4: mov %0, %3\n" \ 463 "4: mov %0, %3\n" \
464 " b 3b\n" \ 464 " b 3b\n" \
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index b88beaba6b4a..200f9a7cd623 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -24,6 +24,14 @@
24 .syntax unified 24 .syntax unified
25#endif 25#endif
26 26
27#ifdef CONFIG_CPU_V7M
28#define AR_CLASS(x...)
29#define M_CLASS(x...) x
30#else
31#define AR_CLASS(x...) x
32#define M_CLASS(x...)
33#endif
34
27#ifdef CONFIG_THUMB2_KERNEL 35#ifdef CONFIG_THUMB2_KERNEL
28 36
29#if __GNUC__ < 4 37#if __GNUC__ < 4
diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
new file mode 100644
index 000000000000..d0295f1dd1a3
--- /dev/null
+++ b/arch/arm/include/asm/vdso.h
@@ -0,0 +1,32 @@
1#ifndef __ASM_VDSO_H
2#define __ASM_VDSO_H
3
4#ifdef __KERNEL__
5
6#ifndef __ASSEMBLY__
7
8struct mm_struct;
9
10#ifdef CONFIG_VDSO
11
12void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
13
14extern char vdso_start, vdso_end;
15
16extern unsigned int vdso_total_pages;
17
18#else /* CONFIG_VDSO */
19
20static inline void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
21{
22}
23
24#define vdso_total_pages 0
25
26#endif /* CONFIG_VDSO */
27
28#endif /* __ASSEMBLY__ */
29
30#endif /* __KERNEL__ */
31
32#endif /* __ASM_VDSO_H */
diff --git a/arch/arm/include/asm/vdso_datapage.h b/arch/arm/include/asm/vdso_datapage.h
new file mode 100644
index 000000000000..9be259442fca
--- /dev/null
+++ b/arch/arm/include/asm/vdso_datapage.h
@@ -0,0 +1,60 @@
1/*
2 * Adapted from arm64 version.
3 *
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_VDSO_DATAPAGE_H
19#define __ASM_VDSO_DATAPAGE_H
20
21#ifdef __KERNEL__
22
23#ifndef __ASSEMBLY__
24
25#include <asm/page.h>
26
27/* Try to be cache-friendly on systems that don't implement the
28 * generic timer: fit the unconditionally updated fields in the first
29 * 32 bytes.
30 */
31struct vdso_data {
32 u32 seq_count; /* sequence count - odd during updates */
33 u16 tk_is_cntvct; /* fall back to syscall if false */
34 u16 cs_shift; /* clocksource shift */
35 u32 xtime_coarse_sec; /* coarse time */
36 u32 xtime_coarse_nsec;
37
38 u32 wtm_clock_sec; /* wall to monotonic offset */
39 u32 wtm_clock_nsec;
40 u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */
41 u32 cs_mult; /* clocksource multiplier */
42
43 u64 cs_cycle_last; /* last cycle value */
44 u64 cs_mask; /* clocksource mask */
45
46 u64 xtime_clock_snsec; /* CLOCK_REALTIME sub-ns base */
47 u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
48 u32 tz_dsttime;
49};
50
51union vdso_data_store {
52 struct vdso_data data;
53 u8 page[PAGE_SIZE];
54};
55
56#endif /* !__ASSEMBLY__ */
57
58#endif /* __KERNEL__ */
59
60#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h
index a6d0a29861e7..5831dce4b51c 100644
--- a/arch/arm/include/asm/word-at-a-time.h
+++ b/arch/arm/include/asm/word-at-a-time.h
@@ -71,7 +71,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
71 asm( 71 asm(
72 "1: ldr %0, [%2]\n" 72 "1: ldr %0, [%2]\n"
73 "2:\n" 73 "2:\n"
74 " .pushsection .fixup,\"ax\"\n" 74 " .pushsection .text.fixup,\"ax\"\n"
75 " .align 2\n" 75 " .align 2\n"
76 "3: and %1, %2, #0x3\n" 76 "3: and %1, %2, #0x3\n"
77 " bic %2, %2, #0x3\n" 77 " bic %2, %2, #0x3\n"
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 70a1c9da30ca..a1c05f93d920 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,6 +1,7 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4header-y += auxvec.h
4header-y += byteorder.h 5header-y += byteorder.h
5header-y += fcntl.h 6header-y += fcntl.h
6header-y += hwcap.h 7header-y += hwcap.h
diff --git a/arch/arm/include/uapi/asm/auxvec.h b/arch/arm/include/uapi/asm/auxvec.h
new file mode 100644
index 000000000000..cb02a767a500
--- /dev/null
+++ b/arch/arm/include/uapi/asm/auxvec.h
@@ -0,0 +1,7 @@
1#ifndef __ASM_AUXVEC_H
2#define __ASM_AUXVEC_H
3
4/* VDSO location */
5#define AT_SYSINFO_EHDR 33
6
7#endif
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 902397dd1000..ba5f83226011 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -16,7 +16,7 @@ CFLAGS_REMOVE_return_address.o = -pg
16# Object file lists. 16# Object file lists.
17 17
18obj-y := elf.o entry-common.o irq.o opcodes.o \ 18obj-y := elf.o entry-common.o irq.o opcodes.o \
19 process.o ptrace.o return_address.o \ 19 process.o ptrace.o reboot.o return_address.o \
20 setup.o signal.o sigreturn_codes.o \ 20 setup.o signal.o sigreturn_codes.o \
21 stacktrace.o sys_arm.o time.o traps.o 21 stacktrace.o sys_arm.o time.o traps.o
22 22
@@ -75,6 +75,7 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
75CFLAGS_pj4-cp0.o := -marm 75CFLAGS_pj4-cp0.o := -marm
76AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt 76AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
77obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o 77obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
78obj-$(CONFIG_VDSO) += vdso.o
78 79
79ifneq ($(CONFIG_ARCH_EBSA110),y) 80ifneq ($(CONFIG_ARCH_EBSA110),y)
80 obj-y += io.o 81 obj-y += io.o
@@ -86,7 +87,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
86 87
87obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o 88obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
88ifeq ($(CONFIG_ARM_PSCI),y) 89ifeq ($(CONFIG_ARM_PSCI),y)
89obj-y += psci.o 90obj-y += psci.o psci-call.o
90obj-$(CONFIG_SMP) += psci_smp.o 91obj-$(CONFIG_SMP) += psci_smp.o
91endif 92endif
92 93
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 488eaac56028..61bb5a65eb37 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -25,6 +25,7 @@
25#include <asm/memory.h> 25#include <asm/memory.h>
26#include <asm/procinfo.h> 26#include <asm/procinfo.h>
27#include <asm/suspend.h> 27#include <asm/suspend.h>
28#include <asm/vdso_datapage.h>
28#include <asm/hardware/cache-l2x0.h> 29#include <asm/hardware/cache-l2x0.h>
29#include <linux/kbuild.h> 30#include <linux/kbuild.h>
30 31
@@ -206,5 +207,9 @@ int main(void)
206 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); 207 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
207 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); 208 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
208#endif 209#endif
210 BLANK();
211#ifdef CONFIG_VDSO
212 DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store));
213#endif
209 return 0; 214 return 0;
210} 215}
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index ab19b7c03423..fcbbbb1b9e95 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -618,21 +618,15 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
618int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 618int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
619 enum pci_mmap_state mmap_state, int write_combine) 619 enum pci_mmap_state mmap_state, int write_combine)
620{ 620{
621 struct pci_sys_data *root = dev->sysdata; 621 if (mmap_state == pci_mmap_io)
622 unsigned long phys;
623
624 if (mmap_state == pci_mmap_io) {
625 return -EINVAL; 622 return -EINVAL;
626 } else {
627 phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT);
628 }
629 623
630 /* 624 /*
631 * Mark this as IO 625 * Mark this as IO
632 */ 626 */
633 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 627 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
634 628
635 if (remap_pfn_range(vma, vma->vm_start, phys, 629 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
636 vma->vm_end - vma->vm_start, 630 vma->vm_end - vma->vm_start,
637 vma->vm_page_prot)) 631 vma->vm_page_prot))
638 return -EAGAIN; 632 return -EAGAIN;
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 672b21942fff..570306c49406 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -545,7 +545,7 @@ ENDPROC(__und_usr)
545/* 545/*
546 * The out of line fixup for the ldrt instructions above. 546 * The out of line fixup for the ldrt instructions above.
547 */ 547 */
548 .pushsection .fixup, "ax" 548 .pushsection .text.fixup, "ax"
549 .align 2 549 .align 2
5504: str r4, [sp, #S_PC] @ retry current instruction 5504: str r4, [sp, #S_PC] @ retry current instruction
551 ret r9 551 ret r9
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 01963273c07a..3637973a9708 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -138,9 +138,9 @@ ENTRY(stext)
138 @ mmu has been enabled 138 @ mmu has been enabled
139 adr lr, BSYM(1f) @ return (PIC) address 139 adr lr, BSYM(1f) @ return (PIC) address
140 mov r8, r4 @ set TTBR1 to swapper_pg_dir 140 mov r8, r4 @ set TTBR1 to swapper_pg_dir
141 ARM( add pc, r10, #PROCINFO_INITFUNC ) 141 ldr r12, [r10, #PROCINFO_INITFUNC]
142 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 142 add r12, r12, r10
143 THUMB( ret r12 ) 143 ret r12
1441: b __enable_mmu 1441: b __enable_mmu
145ENDPROC(stext) 145ENDPROC(stext)
146 .ltorg 146 .ltorg
@@ -386,10 +386,10 @@ ENTRY(secondary_startup)
386 ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir 386 ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir
387 adr lr, BSYM(__enable_mmu) @ return address 387 adr lr, BSYM(__enable_mmu) @ return address
388 mov r13, r12 @ __secondary_switched address 388 mov r13, r12 @ __secondary_switched address
389 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor 389 ldr r12, [r10, #PROCINFO_INITFUNC]
390 @ (return control reg) 390 add r12, r12, r10 @ initialise processor
391 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 391 @ (return control reg)
392 THUMB( ret r12 ) 392 ret r12
393ENDPROC(secondary_startup) 393ENDPROC(secondary_startup)
394ENDPROC(secondary_startup_arm) 394ENDPROC(secondary_startup_arm)
395 395
diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
index c4cc50e58c13..a71501ff6f18 100644
--- a/arch/arm/kernel/hibernate.c
+++ b/arch/arm/kernel/hibernate.c
@@ -22,6 +22,7 @@
22#include <asm/suspend.h> 22#include <asm/suspend.h>
23#include <asm/memory.h> 23#include <asm/memory.h>
24#include <asm/sections.h> 24#include <asm/sections.h>
25#include "reboot.h"
25 26
26int pfn_is_nosave(unsigned long pfn) 27int pfn_is_nosave(unsigned long pfn)
27{ 28{
@@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)
61 62
62 ret = swsusp_save(); 63 ret = swsusp_save();
63 if (ret == 0) 64 if (ret == 0)
64 soft_restart(virt_to_phys(cpu_resume)); 65 _soft_restart(virt_to_phys(cpu_resume), false);
65 return ret; 66 return ret;
66} 67}
67 68
@@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
86 for (pbe = restore_pblist; pbe; pbe = pbe->next) 87 for (pbe = restore_pblist; pbe; pbe = pbe->next)
87 copy_page(pbe->orig_address, pbe->address); 88 copy_page(pbe->orig_address, pbe->address);
88 89
89 soft_restart(virt_to_phys(cpu_resume)); 90 _soft_restart(virt_to_phys(cpu_resume), false);
90} 91}
91 92
92static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata; 93static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
@@ -99,7 +100,6 @@ static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
99 */ 100 */
100int swsusp_arch_resume(void) 101int swsusp_arch_resume(void)
101{ 102{
102 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
103 call_with_stack(arch_restore_image, 0, 103 call_with_stack(arch_restore_image, 0,
104 resume_stack + ARRAY_SIZE(resume_stack)); 104 resume_stack + ARRAY_SIZE(resume_stack));
105 return 0; 105 return 0;
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index de2b085ad753..8bf3b7c09888 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -46,7 +46,8 @@ int machine_kexec_prepare(struct kimage *image)
46 * and implements CPU hotplug for the current HW. If not, we won't be 46 * and implements CPU hotplug for the current HW. If not, we won't be
47 * able to kexec reliably, so fail the prepare operation. 47 * able to kexec reliably, so fail the prepare operation.
48 */ 48 */
49 if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) 49 if (num_possible_cpus() > 1 && platform_can_secondary_boot() &&
50 !platform_can_cpu_hotplug())
50 return -EINVAL; 51 return -EINVAL;
51 52
52 /* 53 /*
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 2e11961f65ae..af791f4a6205 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -98,14 +98,19 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
98 case R_ARM_PC24: 98 case R_ARM_PC24:
99 case R_ARM_CALL: 99 case R_ARM_CALL:
100 case R_ARM_JUMP24: 100 case R_ARM_JUMP24:
101 if (sym->st_value & 3) {
102 pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (ARM -> Thumb)\n",
103 module->name, relindex, i, symname);
104 return -ENOEXEC;
105 }
106
101 offset = __mem_to_opcode_arm(*(u32 *)loc); 107 offset = __mem_to_opcode_arm(*(u32 *)loc);
102 offset = (offset & 0x00ffffff) << 2; 108 offset = (offset & 0x00ffffff) << 2;
103 if (offset & 0x02000000) 109 if (offset & 0x02000000)
104 offset -= 0x04000000; 110 offset -= 0x04000000;
105 111
106 offset += sym->st_value - loc; 112 offset += sym->st_value - loc;
107 if (offset & 3 || 113 if (offset <= (s32)0xfe000000 ||
108 offset <= (s32)0xfe000000 ||
109 offset >= (s32)0x02000000) { 114 offset >= (s32)0x02000000) {
110 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", 115 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
111 module->name, relindex, i, symname, 116 module->name, relindex, i, symname,
@@ -155,6 +160,22 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
155#ifdef CONFIG_THUMB2_KERNEL 160#ifdef CONFIG_THUMB2_KERNEL
156 case R_ARM_THM_CALL: 161 case R_ARM_THM_CALL:
157 case R_ARM_THM_JUMP24: 162 case R_ARM_THM_JUMP24:
163 /*
164 * For function symbols, only Thumb addresses are
165 * allowed (no interworking).
166 *
167 * For non-function symbols, the destination
168 * has no specific ARM/Thumb disposition, so
169 * the branch is resolved under the assumption
170 * that interworking is not required.
171 */
172 if (ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
173 !(sym->st_value & 1)) {
174 pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (Thumb -> ARM)\n",
175 module->name, relindex, i, symname);
176 return -ENOEXEC;
177 }
178
158 upper = __mem_to_opcode_thumb16(*(u16 *)loc); 179 upper = __mem_to_opcode_thumb16(*(u16 *)loc);
159 lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2)); 180 lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
160 181
@@ -182,18 +203,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
182 offset -= 0x02000000; 203 offset -= 0x02000000;
183 offset += sym->st_value - loc; 204 offset += sym->st_value - loc;
184 205
185 /* 206 if (offset <= (s32)0xff000000 ||
186 * For function symbols, only Thumb addresses are
187 * allowed (no interworking).
188 *
189 * For non-function symbols, the destination
190 * has no specific ARM/Thumb disposition, so
191 * the branch is resolved under the assumption
192 * that interworking is not required.
193 */
194 if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
195 !(offset & 1)) ||
196 offset <= (s32)0xff000000 ||
197 offset >= (s32)0x01000000) { 207 offset >= (s32)0x01000000) {
198 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", 208 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
199 module->name, relindex, i, symname, 209 module->name, relindex, i, symname,
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 557e128e4df0..4a86a0133ac3 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -259,20 +259,29 @@ out:
259} 259}
260 260
261static int 261static int
262validate_event(struct pmu_hw_events *hw_events, 262validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
263 struct perf_event *event) 263 struct perf_event *event)
264{ 264{
265 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 265 struct arm_pmu *armpmu;
266 266
267 if (is_software_event(event)) 267 if (is_software_event(event))
268 return 1; 268 return 1;
269 269
270 /*
271 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
272 * core perf code won't check that the pmu->ctx == leader->ctx
273 * until after pmu->event_init(event).
274 */
275 if (event->pmu != pmu)
276 return 0;
277
270 if (event->state < PERF_EVENT_STATE_OFF) 278 if (event->state < PERF_EVENT_STATE_OFF)
271 return 1; 279 return 1;
272 280
273 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) 281 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
274 return 1; 282 return 1;
275 283
284 armpmu = to_arm_pmu(event->pmu);
276 return armpmu->get_event_idx(hw_events, event) >= 0; 285 return armpmu->get_event_idx(hw_events, event) >= 0;
277} 286}
278 287
@@ -288,15 +297,15 @@ validate_group(struct perf_event *event)
288 */ 297 */
289 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); 298 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
290 299
291 if (!validate_event(&fake_pmu, leader)) 300 if (!validate_event(event->pmu, &fake_pmu, leader))
292 return -EINVAL; 301 return -EINVAL;
293 302
294 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 303 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
295 if (!validate_event(&fake_pmu, sibling)) 304 if (!validate_event(event->pmu, &fake_pmu, sibling))
296 return -EINVAL; 305 return -EINVAL;
297 } 306 }
298 307
299 if (!validate_event(&fake_pmu, event)) 308 if (!validate_event(event->pmu, &fake_pmu, event))
300 return -EINVAL; 309 return -EINVAL;
301 310
302 return 0; 311 return 0;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 61b53c46edfa..91c7ba182dcd 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -92,11 +92,16 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
92 free_percpu_irq(irq, &hw_events->percpu_pmu); 92 free_percpu_irq(irq, &hw_events->percpu_pmu);
93 } else { 93 } else {
94 for (i = 0; i < irqs; ++i) { 94 for (i = 0; i < irqs; ++i) {
95 if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) 95 int cpu = i;
96
97 if (cpu_pmu->irq_affinity)
98 cpu = cpu_pmu->irq_affinity[i];
99
100 if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
96 continue; 101 continue;
97 irq = platform_get_irq(pmu_device, i); 102 irq = platform_get_irq(pmu_device, i);
98 if (irq >= 0) 103 if (irq >= 0)
99 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, i)); 104 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
100 } 105 }
101 } 106 }
102} 107}
@@ -128,32 +133,37 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
128 on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); 133 on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
129 } else { 134 } else {
130 for (i = 0; i < irqs; ++i) { 135 for (i = 0; i < irqs; ++i) {
136 int cpu = i;
137
131 err = 0; 138 err = 0;
132 irq = platform_get_irq(pmu_device, i); 139 irq = platform_get_irq(pmu_device, i);
133 if (irq < 0) 140 if (irq < 0)
134 continue; 141 continue;
135 142
143 if (cpu_pmu->irq_affinity)
144 cpu = cpu_pmu->irq_affinity[i];
145
136 /* 146 /*
137 * If we have a single PMU interrupt that we can't shift, 147 * If we have a single PMU interrupt that we can't shift,
138 * assume that we're running on a uniprocessor machine and 148 * assume that we're running on a uniprocessor machine and
139 * continue. Otherwise, continue without this interrupt. 149 * continue. Otherwise, continue without this interrupt.
140 */ 150 */
141 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { 151 if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
142 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", 152 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
143 irq, i); 153 irq, cpu);
144 continue; 154 continue;
145 } 155 }
146 156
147 err = request_irq(irq, handler, 157 err = request_irq(irq, handler,
148 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", 158 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
149 per_cpu_ptr(&hw_events->percpu_pmu, i)); 159 per_cpu_ptr(&hw_events->percpu_pmu, cpu));
150 if (err) { 160 if (err) {
151 pr_err("unable to request IRQ%d for ARM PMU counters\n", 161 pr_err("unable to request IRQ%d for ARM PMU counters\n",
152 irq); 162 irq);
153 return err; 163 return err;
154 } 164 }
155 165
156 cpumask_set_cpu(i, &cpu_pmu->active_irqs); 166 cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
157 } 167 }
158 } 168 }
159 169
@@ -243,6 +253,8 @@ static const struct of_device_id cpu_pmu_of_device_ids[] = {
243 {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init}, 253 {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
244 {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init}, 254 {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
245 {.compatible = "qcom,krait-pmu", .data = krait_pmu_init}, 255 {.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
256 {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init},
257 {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init},
246 {}, 258 {},
247}; 259};
248 260
@@ -289,6 +301,48 @@ static int probe_current_pmu(struct arm_pmu *pmu)
289 return ret; 301 return ret;
290} 302}
291 303
304static int of_pmu_irq_cfg(struct platform_device *pdev)
305{
306 int i;
307 int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
308
309 if (!irqs)
310 return -ENOMEM;
311
312 for (i = 0; i < pdev->num_resources; ++i) {
313 struct device_node *dn;
314 int cpu;
315
316 dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
317 i);
318 if (!dn) {
319 pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
320 of_node_full_name(dn), i);
321 break;
322 }
323
324 for_each_possible_cpu(cpu)
325 if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
326 break;
327
328 of_node_put(dn);
329 if (cpu >= nr_cpu_ids) {
330 pr_warn("Failed to find logical CPU for %s\n",
331 dn->name);
332 break;
333 }
334
335 irqs[i] = cpu;
336 }
337
338 if (i == pdev->num_resources)
339 cpu_pmu->irq_affinity = irqs;
340 else
341 kfree(irqs);
342
343 return 0;
344}
345
292static int cpu_pmu_device_probe(struct platform_device *pdev) 346static int cpu_pmu_device_probe(struct platform_device *pdev)
293{ 347{
294 const struct of_device_id *of_id; 348 const struct of_device_id *of_id;
@@ -313,7 +367,10 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
313 367
314 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { 368 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
315 init_fn = of_id->data; 369 init_fn = of_id->data;
316 ret = init_fn(pmu); 370
371 ret = of_pmu_irq_cfg(pdev);
372 if (!ret)
373 ret = init_fn(pmu);
317 } else { 374 } else {
318 ret = probe_current_pmu(pmu); 375 ret = probe_current_pmu(pmu);
319 } 376 }
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 8993770c47de..f4207a4dcb01 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -140,6 +140,23 @@ enum krait_perf_types {
140 KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210, 140 KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210,
141}; 141};
142 142
143/* ARMv7 Scorpion specific event types */
144enum scorpion_perf_types {
145 SCORPION_LPM0_GROUP0 = 0x4c,
146 SCORPION_LPM1_GROUP0 = 0x50,
147 SCORPION_LPM2_GROUP0 = 0x54,
148 SCORPION_L2LPM_GROUP0 = 0x58,
149 SCORPION_VLPM_GROUP0 = 0x5c,
150
151 SCORPION_ICACHE_ACCESS = 0x10053,
152 SCORPION_ICACHE_MISS = 0x10052,
153
154 SCORPION_DTLB_ACCESS = 0x12013,
155 SCORPION_DTLB_MISS = 0x12012,
156
157 SCORPION_ITLB_MISS = 0x12021,
158};
159
143/* 160/*
144 * Cortex-A8 HW events mapping 161 * Cortex-A8 HW events mapping
145 * 162 *
@@ -482,6 +499,49 @@ static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
482}; 499};
483 500
484/* 501/*
502 * Scorpion HW events mapping
503 */
504static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
505 PERF_MAP_ALL_UNSUPPORTED,
506 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
507 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
508 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
509 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
510 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
511};
512
513static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
514 [PERF_COUNT_HW_CACHE_OP_MAX]
515 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
516 PERF_CACHE_MAP_ALL_UNSUPPORTED,
517 /*
518 * The performance counters don't differentiate between read and write
519 * accesses/misses so this isn't strictly correct, but it's the best we
520 * can do. Writes and reads get combined.
521 */
522 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
523 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
524 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
525 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
526 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
527 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
528 /*
529 * Only ITLB misses and DTLB refills are supported. If users want the
530 * DTLB refills misses a raw counter must be used.
531 */
532 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
533 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
534 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
535 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
536 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
537 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
538 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
539 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
540 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
541 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
542};
543
544/*
485 * Perf Events' indices 545 * Perf Events' indices
486 */ 546 */
487#define ARMV7_IDX_CYCLE_COUNTER 0 547#define ARMV7_IDX_CYCLE_COUNTER 0
@@ -976,6 +1036,12 @@ static int krait_map_event_no_branch(struct perf_event *event)
976 &krait_perf_cache_map, 0xFFFFF); 1036 &krait_perf_cache_map, 0xFFFFF);
977} 1037}
978 1038
1039static int scorpion_map_event(struct perf_event *event)
1040{
1041 return armpmu_map_event(event, &scorpion_perf_map,
1042 &scorpion_perf_cache_map, 0xFFFFF);
1043}
1044
979static void armv7pmu_init(struct arm_pmu *cpu_pmu) 1045static void armv7pmu_init(struct arm_pmu *cpu_pmu)
980{ 1046{
981 cpu_pmu->handle_irq = armv7pmu_handle_irq; 1047 cpu_pmu->handle_irq = armv7pmu_handle_irq;
@@ -1103,6 +1169,12 @@ static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1103#define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT) 1169#define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
1104#define PMRESRn_EN BIT(31) 1170#define PMRESRn_EN BIT(31)
1105 1171
1172#define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
1173#define EVENT_GROUP(event) ((event) & 0xf) /* G */
1174#define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
1175#define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
1176#define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
1177
1106static u32 krait_read_pmresrn(int n) 1178static u32 krait_read_pmresrn(int n)
1107{ 1179{
1108 u32 val; 1180 u32 val;
@@ -1141,19 +1213,19 @@ static void krait_write_pmresrn(int n, u32 val)
1141 } 1213 }
1142} 1214}
1143 1215
1144static u32 krait_read_vpmresr0(void) 1216static u32 venum_read_pmresr(void)
1145{ 1217{
1146 u32 val; 1218 u32 val;
1147 asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val)); 1219 asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1148 return val; 1220 return val;
1149} 1221}
1150 1222
1151static void krait_write_vpmresr0(u32 val) 1223static void venum_write_pmresr(u32 val)
1152{ 1224{
1153 asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val)); 1225 asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1154} 1226}
1155 1227
1156static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val) 1228static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1157{ 1229{
1158 u32 venum_new_val; 1230 u32 venum_new_val;
1159 u32 fp_new_val; 1231 u32 fp_new_val;
@@ -1170,7 +1242,7 @@ static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
1170 fmxr(FPEXC, fp_new_val); 1242 fmxr(FPEXC, fp_new_val);
1171} 1243}
1172 1244
1173static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val) 1245static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1174{ 1246{
1175 BUG_ON(preemptible()); 1247 BUG_ON(preemptible());
1176 /* Restore FPEXC */ 1248 /* Restore FPEXC */
@@ -1193,16 +1265,11 @@ static void krait_evt_setup(int idx, u32 config_base)
1193 u32 val; 1265 u32 val;
1194 u32 mask; 1266 u32 mask;
1195 u32 vval, fval; 1267 u32 vval, fval;
1196 unsigned int region; 1268 unsigned int region = EVENT_REGION(config_base);
1197 unsigned int group; 1269 unsigned int group = EVENT_GROUP(config_base);
1198 unsigned int code; 1270 unsigned int code = EVENT_CODE(config_base);
1199 unsigned int group_shift; 1271 unsigned int group_shift;
1200 bool venum_event; 1272 bool venum_event = EVENT_VENUM(config_base);
1201
1202 venum_event = !!(config_base & VENUM_EVENT);
1203 region = (config_base >> 12) & 0xf;
1204 code = (config_base >> 4) & 0xff;
1205 group = (config_base >> 0) & 0xf;
1206 1273
1207 group_shift = group * 8; 1274 group_shift = group * 8;
1208 mask = 0xff << group_shift; 1275 mask = 0xff << group_shift;
@@ -1217,16 +1284,14 @@ static void krait_evt_setup(int idx, u32 config_base)
1217 val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); 1284 val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1218 armv7_pmnc_write_evtsel(idx, val); 1285 armv7_pmnc_write_evtsel(idx, val);
1219 1286
1220 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1221
1222 if (venum_event) { 1287 if (venum_event) {
1223 krait_pre_vpmresr0(&vval, &fval); 1288 venum_pre_pmresr(&vval, &fval);
1224 val = krait_read_vpmresr0(); 1289 val = venum_read_pmresr();
1225 val &= ~mask; 1290 val &= ~mask;
1226 val |= code << group_shift; 1291 val |= code << group_shift;
1227 val |= PMRESRn_EN; 1292 val |= PMRESRn_EN;
1228 krait_write_vpmresr0(val); 1293 venum_write_pmresr(val);
1229 krait_post_vpmresr0(vval, fval); 1294 venum_post_pmresr(vval, fval);
1230 } else { 1295 } else {
1231 val = krait_read_pmresrn(region); 1296 val = krait_read_pmresrn(region);
1232 val &= ~mask; 1297 val &= ~mask;
@@ -1236,7 +1301,7 @@ static void krait_evt_setup(int idx, u32 config_base)
1236 } 1301 }
1237} 1302}
1238 1303
1239static u32 krait_clear_pmresrn_group(u32 val, int group) 1304static u32 clear_pmresrn_group(u32 val, int group)
1240{ 1305{
1241 u32 mask; 1306 u32 mask;
1242 int group_shift; 1307 int group_shift;
@@ -1256,23 +1321,19 @@ static void krait_clearpmu(u32 config_base)
1256{ 1321{
1257 u32 val; 1322 u32 val;
1258 u32 vval, fval; 1323 u32 vval, fval;
1259 unsigned int region; 1324 unsigned int region = EVENT_REGION(config_base);
1260 unsigned int group; 1325 unsigned int group = EVENT_GROUP(config_base);
1261 bool venum_event; 1326 bool venum_event = EVENT_VENUM(config_base);
1262
1263 venum_event = !!(config_base & VENUM_EVENT);
1264 region = (config_base >> 12) & 0xf;
1265 group = (config_base >> 0) & 0xf;
1266 1327
1267 if (venum_event) { 1328 if (venum_event) {
1268 krait_pre_vpmresr0(&vval, &fval); 1329 venum_pre_pmresr(&vval, &fval);
1269 val = krait_read_vpmresr0(); 1330 val = venum_read_pmresr();
1270 val = krait_clear_pmresrn_group(val, group); 1331 val = clear_pmresrn_group(val, group);
1271 krait_write_vpmresr0(val); 1332 venum_write_pmresr(val);
1272 krait_post_vpmresr0(vval, fval); 1333 venum_post_pmresr(vval, fval);
1273 } else { 1334 } else {
1274 val = krait_read_pmresrn(region); 1335 val = krait_read_pmresrn(region);
1275 val = krait_clear_pmresrn_group(val, group); 1336 val = clear_pmresrn_group(val, group);
1276 krait_write_pmresrn(region, val); 1337 krait_write_pmresrn(region, val);
1277 } 1338 }
1278} 1339}
@@ -1342,6 +1403,8 @@ static void krait_pmu_enable_event(struct perf_event *event)
1342static void krait_pmu_reset(void *info) 1403static void krait_pmu_reset(void *info)
1343{ 1404{
1344 u32 vval, fval; 1405 u32 vval, fval;
1406 struct arm_pmu *cpu_pmu = info;
1407 u32 idx, nb_cnt = cpu_pmu->num_events;
1345 1408
1346 armv7pmu_reset(info); 1409 armv7pmu_reset(info);
1347 1410
@@ -1350,9 +1413,16 @@ static void krait_pmu_reset(void *info)
1350 krait_write_pmresrn(1, 0); 1413 krait_write_pmresrn(1, 0);
1351 krait_write_pmresrn(2, 0); 1414 krait_write_pmresrn(2, 0);
1352 1415
1353 krait_pre_vpmresr0(&vval, &fval); 1416 venum_pre_pmresr(&vval, &fval);
1354 krait_write_vpmresr0(0); 1417 venum_write_pmresr(0);
1355 krait_post_vpmresr0(vval, fval); 1418 venum_post_pmresr(vval, fval);
1419
1420 /* Reset PMxEVNCTCR to sane default */
1421 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1422 armv7_pmnc_select_counter(idx);
1423 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1424 }
1425
1356} 1426}
1357 1427
1358static int krait_event_to_bit(struct perf_event *event, unsigned int region, 1428static int krait_event_to_bit(struct perf_event *event, unsigned int region,
@@ -1386,26 +1456,18 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1386{ 1456{
1387 int idx; 1457 int idx;
1388 int bit = -1; 1458 int bit = -1;
1389 unsigned int prefix;
1390 unsigned int region;
1391 unsigned int code;
1392 unsigned int group;
1393 bool krait_event;
1394 struct hw_perf_event *hwc = &event->hw; 1459 struct hw_perf_event *hwc = &event->hw;
1460 unsigned int region = EVENT_REGION(hwc->config_base);
1461 unsigned int code = EVENT_CODE(hwc->config_base);
1462 unsigned int group = EVENT_GROUP(hwc->config_base);
1463 bool venum_event = EVENT_VENUM(hwc->config_base);
1464 bool krait_event = EVENT_CPU(hwc->config_base);
1395 1465
1396 region = (hwc->config_base >> 12) & 0xf; 1466 if (venum_event || krait_event) {
1397 code = (hwc->config_base >> 4) & 0xff;
1398 group = (hwc->config_base >> 0) & 0xf;
1399 krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1400
1401 if (krait_event) {
1402 /* Ignore invalid events */ 1467 /* Ignore invalid events */
1403 if (group > 3 || region > 2) 1468 if (group > 3 || region > 2)
1404 return -EINVAL; 1469 return -EINVAL;
1405 prefix = hwc->config_base & KRAIT_EVENT_MASK; 1470 if (venum_event && (code & 0xe0))
1406 if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT)
1407 return -EINVAL;
1408 if (prefix == VENUM_EVENT && (code & 0xe0))
1409 return -EINVAL; 1471 return -EINVAL;
1410 1472
1411 bit = krait_event_to_bit(event, region, group); 1473 bit = krait_event_to_bit(event, region, group);
@@ -1425,15 +1487,12 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1425{ 1487{
1426 int bit; 1488 int bit;
1427 struct hw_perf_event *hwc = &event->hw; 1489 struct hw_perf_event *hwc = &event->hw;
1428 unsigned int region; 1490 unsigned int region = EVENT_REGION(hwc->config_base);
1429 unsigned int group; 1491 unsigned int group = EVENT_GROUP(hwc->config_base);
1430 bool krait_event; 1492 bool venum_event = EVENT_VENUM(hwc->config_base);
1493 bool krait_event = EVENT_CPU(hwc->config_base);
1431 1494
1432 region = (hwc->config_base >> 12) & 0xf; 1495 if (venum_event || krait_event) {
1433 group = (hwc->config_base >> 0) & 0xf;
1434 krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1435
1436 if (krait_event) {
1437 bit = krait_event_to_bit(event, region, group); 1496 bit = krait_event_to_bit(event, region, group);
1438 clear_bit(bit, cpuc->used_mask); 1497 clear_bit(bit, cpuc->used_mask);
1439 } 1498 }
@@ -1458,6 +1517,344 @@ static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1458 cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx; 1517 cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1459 return 0; 1518 return 0;
1460} 1519}
1520
1521/*
1522 * Scorpion Local Performance Monitor Register (LPMn)
1523 *
1524 * 31 30 24 16 8 0
1525 * +--------------------------------+
1526 * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0
1527 * +--------------------------------+
1528 * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1
1529 * +--------------------------------+
1530 * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2
1531 * +--------------------------------+
1532 * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3
1533 * +--------------------------------+
1534 * VLPM | EN | CC | CC | CC | CC | N = 2, R = ?
1535 * +--------------------------------+
1536 * EN | G=3 | G=2 | G=1 | G=0
1537 *
1538 *
1539 * Event Encoding:
1540 *
1541 * hwc->config_base = 0xNRCCG
1542 *
1543 * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1544 * R = region register
1545 * CC = class of events the group G is choosing from
1546 * G = group or particular event
1547 *
1548 * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1549 *
1550 * A region (R) corresponds to a piece of the CPU (execution unit, instruction
1551 * unit, etc.) while the event code (CC) corresponds to a particular class of
1552 * events (interrupts for example). An event code is broken down into
1553 * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1554 * example).
1555 */
1556
1557static u32 scorpion_read_pmresrn(int n)
1558{
1559 u32 val;
1560
1561 switch (n) {
1562 case 0:
1563 asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1564 break;
1565 case 1:
1566 asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1567 break;
1568 case 2:
1569 asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1570 break;
1571 case 3:
1572 asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1573 break;
1574 default:
1575 BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1576 }
1577
1578 return val;
1579}
1580
1581static void scorpion_write_pmresrn(int n, u32 val)
1582{
1583 switch (n) {
1584 case 0:
1585 asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1586 break;
1587 case 1:
1588 asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1589 break;
1590 case 2:
1591 asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1592 break;
1593 case 3:
1594 asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1595 break;
1596 default:
1597 BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1598 }
1599}
1600
1601static u32 scorpion_get_pmresrn_event(unsigned int region)
1602{
1603 static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1604 SCORPION_LPM1_GROUP0,
1605 SCORPION_LPM2_GROUP0,
1606 SCORPION_L2LPM_GROUP0 };
1607 return pmresrn_table[region];
1608}
1609
1610static void scorpion_evt_setup(int idx, u32 config_base)
1611{
1612 u32 val;
1613 u32 mask;
1614 u32 vval, fval;
1615 unsigned int region = EVENT_REGION(config_base);
1616 unsigned int group = EVENT_GROUP(config_base);
1617 unsigned int code = EVENT_CODE(config_base);
1618 unsigned int group_shift;
1619 bool venum_event = EVENT_VENUM(config_base);
1620
1621 group_shift = group * 8;
1622 mask = 0xff << group_shift;
1623
1624 /* Configure evtsel for the region and group */
1625 if (venum_event)
1626 val = SCORPION_VLPM_GROUP0;
1627 else
1628 val = scorpion_get_pmresrn_event(region);
1629 val += group;
1630 /* Mix in mode-exclusion bits */
1631 val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1632 armv7_pmnc_write_evtsel(idx, val);
1633
1634 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1635
1636 if (venum_event) {
1637 venum_pre_pmresr(&vval, &fval);
1638 val = venum_read_pmresr();
1639 val &= ~mask;
1640 val |= code << group_shift;
1641 val |= PMRESRn_EN;
1642 venum_write_pmresr(val);
1643 venum_post_pmresr(vval, fval);
1644 } else {
1645 val = scorpion_read_pmresrn(region);
1646 val &= ~mask;
1647 val |= code << group_shift;
1648 val |= PMRESRn_EN;
1649 scorpion_write_pmresrn(region, val);
1650 }
1651}
1652
1653static void scorpion_clearpmu(u32 config_base)
1654{
1655 u32 val;
1656 u32 vval, fval;
1657 unsigned int region = EVENT_REGION(config_base);
1658 unsigned int group = EVENT_GROUP(config_base);
1659 bool venum_event = EVENT_VENUM(config_base);
1660
1661 if (venum_event) {
1662 venum_pre_pmresr(&vval, &fval);
1663 val = venum_read_pmresr();
1664 val = clear_pmresrn_group(val, group);
1665 venum_write_pmresr(val);
1666 venum_post_pmresr(vval, fval);
1667 } else {
1668 val = scorpion_read_pmresrn(region);
1669 val = clear_pmresrn_group(val, group);
1670 scorpion_write_pmresrn(region, val);
1671 }
1672}
1673
1674static void scorpion_pmu_disable_event(struct perf_event *event)
1675{
1676 unsigned long flags;
1677 struct hw_perf_event *hwc = &event->hw;
1678 int idx = hwc->idx;
1679 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1680 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1681
1682 /* Disable counter and interrupt */
1683 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1684
1685 /* Disable counter */
1686 armv7_pmnc_disable_counter(idx);
1687
1688 /*
1689 * Clear pmresr code (if destined for PMNx counters)
1690 */
1691 if (hwc->config_base & KRAIT_EVENT_MASK)
1692 scorpion_clearpmu(hwc->config_base);
1693
1694 /* Disable interrupt for this counter */
1695 armv7_pmnc_disable_intens(idx);
1696
1697 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1698}
1699
1700static void scorpion_pmu_enable_event(struct perf_event *event)
1701{
1702 unsigned long flags;
1703 struct hw_perf_event *hwc = &event->hw;
1704 int idx = hwc->idx;
1705 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1706 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1707
1708 /*
1709 * Enable counter and interrupt, and set the counter to count
1710 * the event that we're interested in.
1711 */
1712 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1713
1714 /* Disable counter */
1715 armv7_pmnc_disable_counter(idx);
1716
1717 /*
1718 * Set event (if destined for PMNx counters)
1719 * We don't set the event for the cycle counter because we
1720 * don't have the ability to perform event filtering.
1721 */
1722 if (hwc->config_base & KRAIT_EVENT_MASK)
1723 scorpion_evt_setup(idx, hwc->config_base);
1724 else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1725 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1726
1727 /* Enable interrupt for this counter */
1728 armv7_pmnc_enable_intens(idx);
1729
1730 /* Enable counter */
1731 armv7_pmnc_enable_counter(idx);
1732
1733 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1734}
1735
1736static void scorpion_pmu_reset(void *info)
1737{
1738 u32 vval, fval;
1739 struct arm_pmu *cpu_pmu = info;
1740 u32 idx, nb_cnt = cpu_pmu->num_events;
1741
1742 armv7pmu_reset(info);
1743
1744 /* Clear all pmresrs */
1745 scorpion_write_pmresrn(0, 0);
1746 scorpion_write_pmresrn(1, 0);
1747 scorpion_write_pmresrn(2, 0);
1748 scorpion_write_pmresrn(3, 0);
1749
1750 venum_pre_pmresr(&vval, &fval);
1751 venum_write_pmresr(0);
1752 venum_post_pmresr(vval, fval);
1753
1754 /* Reset PMxEVNCTCR to sane default */
1755 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1756 armv7_pmnc_select_counter(idx);
1757 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1758 }
1759}
1760
1761static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1762 unsigned int group)
1763{
1764 int bit;
1765 struct hw_perf_event *hwc = &event->hw;
1766 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1767
1768 if (hwc->config_base & VENUM_EVENT)
1769 bit = SCORPION_VLPM_GROUP0;
1770 else
1771 bit = scorpion_get_pmresrn_event(region);
1772 bit -= scorpion_get_pmresrn_event(0);
1773 bit += group;
1774 /*
1775 * Lower bits are reserved for use by the counters (see
1776 * armv7pmu_get_event_idx() for more info)
1777 */
1778 bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1779
1780 return bit;
1781}
1782
1783/*
1784 * We check for column exclusion constraints here.
1785 * Two events cant use the same group within a pmresr register.
1786 */
1787static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1788 struct perf_event *event)
1789{
1790 int idx;
1791 int bit = -1;
1792 struct hw_perf_event *hwc = &event->hw;
1793 unsigned int region = EVENT_REGION(hwc->config_base);
1794 unsigned int group = EVENT_GROUP(hwc->config_base);
1795 bool venum_event = EVENT_VENUM(hwc->config_base);
1796 bool scorpion_event = EVENT_CPU(hwc->config_base);
1797
1798 if (venum_event || scorpion_event) {
1799 /* Ignore invalid events */
1800 if (group > 3 || region > 3)
1801 return -EINVAL;
1802
1803 bit = scorpion_event_to_bit(event, region, group);
1804 if (test_and_set_bit(bit, cpuc->used_mask))
1805 return -EAGAIN;
1806 }
1807
1808 idx = armv7pmu_get_event_idx(cpuc, event);
1809 if (idx < 0 && bit >= 0)
1810 clear_bit(bit, cpuc->used_mask);
1811
1812 return idx;
1813}
1814
1815static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1816 struct perf_event *event)
1817{
1818 int bit;
1819 struct hw_perf_event *hwc = &event->hw;
1820 unsigned int region = EVENT_REGION(hwc->config_base);
1821 unsigned int group = EVENT_GROUP(hwc->config_base);
1822 bool venum_event = EVENT_VENUM(hwc->config_base);
1823 bool scorpion_event = EVENT_CPU(hwc->config_base);
1824
1825 if (venum_event || scorpion_event) {
1826 bit = scorpion_event_to_bit(event, region, group);
1827 clear_bit(bit, cpuc->used_mask);
1828 }
1829}
1830
1831static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1832{
1833 armv7pmu_init(cpu_pmu);
1834 cpu_pmu->name = "armv7_scorpion";
1835 cpu_pmu->map_event = scorpion_map_event;
1836 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1837 cpu_pmu->reset = scorpion_pmu_reset;
1838 cpu_pmu->enable = scorpion_pmu_enable_event;
1839 cpu_pmu->disable = scorpion_pmu_disable_event;
1840 cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
1841 cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1842 return 0;
1843}
1844
1845static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1846{
1847 armv7pmu_init(cpu_pmu);
1848 cpu_pmu->name = "armv7_scorpion_mp";
1849 cpu_pmu->map_event = scorpion_map_event;
1850 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1851 cpu_pmu->reset = scorpion_pmu_reset;
1852 cpu_pmu->enable = scorpion_pmu_enable_event;
1853 cpu_pmu->disable = scorpion_pmu_disable_event;
1854 cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
1855 cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1856 return 0;
1857}
1461#else 1858#else
1462static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) 1859static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1463{ 1860{
@@ -1498,4 +1895,14 @@ static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
1498{ 1895{
1499 return -ENODEV; 1896 return -ENODEV;
1500} 1897}
1898
1899static inline int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1900{
1901 return -ENODEV;
1902}
1903
1904static inline int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1905{
1906 return -ENODEV;
1907}
1501#endif /* CONFIG_CPU_V7 */ 1908#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index fdfa3a78ec8c..f192a2a41719 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -17,12 +17,9 @@
17#include <linux/stddef.h> 17#include <linux/stddef.h>
18#include <linux/unistd.h> 18#include <linux/unistd.h>
19#include <linux/user.h> 19#include <linux/user.h>
20#include <linux/delay.h>
21#include <linux/reboot.h>
22#include <linux/interrupt.h> 20#include <linux/interrupt.h>
23#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
24#include <linux/init.h> 22#include <linux/init.h>
25#include <linux/cpu.h>
26#include <linux/elfcore.h> 23#include <linux/elfcore.h>
27#include <linux/pm.h> 24#include <linux/pm.h>
28#include <linux/tick.h> 25#include <linux/tick.h>
@@ -31,16 +28,14 @@
31#include <linux/random.h> 28#include <linux/random.h>
32#include <linux/hw_breakpoint.h> 29#include <linux/hw_breakpoint.h>
33#include <linux/leds.h> 30#include <linux/leds.h>
34#include <linux/reboot.h>
35 31
36#include <asm/cacheflush.h>
37#include <asm/idmap.h>
38#include <asm/processor.h> 32#include <asm/processor.h>
39#include <asm/thread_notify.h> 33#include <asm/thread_notify.h>
40#include <asm/stacktrace.h> 34#include <asm/stacktrace.h>
41#include <asm/system_misc.h> 35#include <asm/system_misc.h>
42#include <asm/mach/time.h> 36#include <asm/mach/time.h>
43#include <asm/tls.h> 37#include <asm/tls.h>
38#include <asm/vdso.h>
44 39
45#ifdef CONFIG_CC_STACKPROTECTOR 40#ifdef CONFIG_CC_STACKPROTECTOR
46#include <linux/stackprotector.h> 41#include <linux/stackprotector.h>
@@ -59,69 +54,6 @@ static const char *isa_modes[] __maybe_unused = {
59 "ARM" , "Thumb" , "Jazelle", "ThumbEE" 54 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
60}; 55};
61 56
62extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
63typedef void (*phys_reset_t)(unsigned long);
64
65/*
66 * A temporary stack to use for CPU reset. This is static so that we
67 * don't clobber it with the identity mapping. When running with this
68 * stack, any references to the current task *will not work* so you
69 * should really do as little as possible before jumping to your reset
70 * code.
71 */
72static u64 soft_restart_stack[16];
73
74static void __soft_restart(void *addr)
75{
76 phys_reset_t phys_reset;
77
78 /* Take out a flat memory mapping. */
79 setup_mm_for_reboot();
80
81 /* Clean and invalidate caches */
82 flush_cache_all();
83
84 /* Turn off caching */
85 cpu_proc_fin();
86
87 /* Push out any further dirty data, and ensure cache is empty */
88 flush_cache_all();
89
90 /* Switch to the identity mapping. */
91 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
92 phys_reset((unsigned long)addr);
93
94 /* Should never get here. */
95 BUG();
96}
97
98void soft_restart(unsigned long addr)
99{
100 u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
101
102 /* Disable interrupts first */
103 raw_local_irq_disable();
104 local_fiq_disable();
105
106 /* Disable the L2 if we're the last man standing. */
107 if (num_online_cpus() == 1)
108 outer_disable();
109
110 /* Change to the new stack and continue with the reset. */
111 call_with_stack(__soft_restart, (void *)addr, (void *)stack);
112
113 /* Should never get here. */
114 BUG();
115}
116
117/*
118 * Function pointers to optional machine specific functions
119 */
120void (*pm_power_off)(void);
121EXPORT_SYMBOL(pm_power_off);
122
123void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
124
125/* 57/*
126 * This is our default idle handler. 58 * This is our default idle handler.
127 */ 59 */
@@ -166,79 +98,6 @@ void arch_cpu_idle_dead(void)
166} 98}
167#endif 99#endif
168 100
169/*
170 * Called by kexec, immediately prior to machine_kexec().
171 *
172 * This must completely disable all secondary CPUs; simply causing those CPUs
173 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
174 * kexec'd kernel to use any and all RAM as it sees fit, without having to
175 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
176 * functionality embodied in disable_nonboot_cpus() to achieve this.
177 */
178void machine_shutdown(void)
179{
180 disable_nonboot_cpus();
181}
182
183/*
184 * Halting simply requires that the secondary CPUs stop performing any
185 * activity (executing tasks, handling interrupts). smp_send_stop()
186 * achieves this.
187 */
188void machine_halt(void)
189{
190 local_irq_disable();
191 smp_send_stop();
192
193 local_irq_disable();
194 while (1);
195}
196
197/*
198 * Power-off simply requires that the secondary CPUs stop performing any
199 * activity (executing tasks, handling interrupts). smp_send_stop()
200 * achieves this. When the system power is turned off, it will take all CPUs
201 * with it.
202 */
203void machine_power_off(void)
204{
205 local_irq_disable();
206 smp_send_stop();
207
208 if (pm_power_off)
209 pm_power_off();
210}
211
212/*
213 * Restart requires that the secondary CPUs stop performing any activity
214 * while the primary CPU resets the system. Systems with a single CPU can
215 * use soft_restart() as their machine descriptor's .restart hook, since that
216 * will cause the only available CPU to reset. Systems with multiple CPUs must
217 * provide a HW restart implementation, to ensure that all CPUs reset at once.
218 * This is required so that any code running after reset on the primary CPU
219 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
220 * executing pre-reset code, and using RAM that the primary CPU's code wishes
221 * to use. Implementing such co-ordination would be essentially impossible.
222 */
223void machine_restart(char *cmd)
224{
225 local_irq_disable();
226 smp_send_stop();
227
228 if (arm_pm_restart)
229 arm_pm_restart(reboot_mode, cmd);
230 else
231 do_kernel_restart(cmd);
232
233 /* Give a grace period for failure to restart of 1s */
234 mdelay(1000);
235
236 /* Whoops - the platform was unable to reboot. Tell the user! */
237 printk("Reboot failed -- System halted\n");
238 local_irq_disable();
239 while (1);
240}
241
242void __show_regs(struct pt_regs *regs) 101void __show_regs(struct pt_regs *regs)
243{ 102{
244 unsigned long flags; 103 unsigned long flags;
@@ -475,7 +334,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
475} 334}
476 335
477/* If possible, provide a placement hint at a random offset from the 336/* If possible, provide a placement hint at a random offset from the
478 * stack for the signal page. 337 * stack for the sigpage and vdso pages.
479 */ 338 */
480static unsigned long sigpage_addr(const struct mm_struct *mm, 339static unsigned long sigpage_addr(const struct mm_struct *mm,
481 unsigned int npages) 340 unsigned int npages)
@@ -519,6 +378,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
519{ 378{
520 struct mm_struct *mm = current->mm; 379 struct mm_struct *mm = current->mm;
521 struct vm_area_struct *vma; 380 struct vm_area_struct *vma;
381 unsigned long npages;
522 unsigned long addr; 382 unsigned long addr;
523 unsigned long hint; 383 unsigned long hint;
524 int ret = 0; 384 int ret = 0;
@@ -528,9 +388,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
528 if (!signal_page) 388 if (!signal_page)
529 return -ENOMEM; 389 return -ENOMEM;
530 390
391 npages = 1; /* for sigpage */
392 npages += vdso_total_pages;
393
531 down_write(&mm->mmap_sem); 394 down_write(&mm->mmap_sem);
532 hint = sigpage_addr(mm, 1); 395 hint = sigpage_addr(mm, npages);
533 addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0); 396 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
534 if (IS_ERR_VALUE(addr)) { 397 if (IS_ERR_VALUE(addr)) {
535 ret = addr; 398 ret = addr;
536 goto up_fail; 399 goto up_fail;
@@ -547,6 +410,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
547 410
548 mm->context.sigpage = addr; 411 mm->context.sigpage = addr;
549 412
413 /* Unlike the sigpage, failure to install the vdso is unlikely
414 * to be fatal to the process, so no error check needed
415 * here.
416 */
417 arm_install_vdso(mm, addr + PAGE_SIZE);
418
550 up_fail: 419 up_fail:
551 up_write(&mm->mmap_sem); 420 up_write(&mm->mmap_sem);
552 return ret; 421 return ret;
diff --git a/arch/arm/kernel/psci-call.S b/arch/arm/kernel/psci-call.S
new file mode 100644
index 000000000000..a78e9e1e206d
--- /dev/null
+++ b/arch/arm/kernel/psci-call.S
@@ -0,0 +1,31 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2015 ARM Limited
12 *
13 * Author: Mark Rutland <mark.rutland@arm.com>
14 */
15
16#include <linux/linkage.h>
17
18#include <asm/opcodes-sec.h>
19#include <asm/opcodes-virt.h>
20
21/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
22ENTRY(__invoke_psci_fn_hvc)
23 __HVC(0)
24 bx lr
25ENDPROC(__invoke_psci_fn_hvc)
26
27/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
28ENTRY(__invoke_psci_fn_smc)
29 __SMC(0)
30 bx lr
31ENDPROC(__invoke_psci_fn_smc)
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
index f73891b6b730..f90fdf4ce7c7 100644
--- a/arch/arm/kernel/psci.c
+++ b/arch/arm/kernel/psci.c
@@ -23,8 +23,6 @@
23 23
24#include <asm/compiler.h> 24#include <asm/compiler.h>
25#include <asm/errno.h> 25#include <asm/errno.h>
26#include <asm/opcodes-sec.h>
27#include <asm/opcodes-virt.h>
28#include <asm/psci.h> 26#include <asm/psci.h>
29#include <asm/system_misc.h> 27#include <asm/system_misc.h>
30 28
@@ -33,6 +31,9 @@ struct psci_operations psci_ops;
33static int (*invoke_psci_fn)(u32, u32, u32, u32); 31static int (*invoke_psci_fn)(u32, u32, u32, u32);
34typedef int (*psci_initcall_t)(const struct device_node *); 32typedef int (*psci_initcall_t)(const struct device_node *);
35 33
34asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32);
35asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32);
36
36enum psci_function { 37enum psci_function {
37 PSCI_FN_CPU_SUSPEND, 38 PSCI_FN_CPU_SUSPEND,
38 PSCI_FN_CPU_ON, 39 PSCI_FN_CPU_ON,
@@ -71,40 +72,6 @@ static u32 psci_power_state_pack(struct psci_power_state state)
71 & PSCI_0_2_POWER_STATE_AFFL_MASK); 72 & PSCI_0_2_POWER_STATE_AFFL_MASK);
72} 73}
73 74
74/*
75 * The following two functions are invoked via the invoke_psci_fn pointer
76 * and will not be inlined, allowing us to piggyback on the AAPCS.
77 */
78static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1,
79 u32 arg2)
80{
81 asm volatile(
82 __asmeq("%0", "r0")
83 __asmeq("%1", "r1")
84 __asmeq("%2", "r2")
85 __asmeq("%3", "r3")
86 __HVC(0)
87 : "+r" (function_id)
88 : "r" (arg0), "r" (arg1), "r" (arg2));
89
90 return function_id;
91}
92
93static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
94 u32 arg2)
95{
96 asm volatile(
97 __asmeq("%0", "r0")
98 __asmeq("%1", "r1")
99 __asmeq("%2", "r2")
100 __asmeq("%3", "r3")
101 __SMC(0)
102 : "+r" (function_id)
103 : "r" (arg0), "r" (arg1), "r" (arg2));
104
105 return function_id;
106}
107
108static int psci_get_version(void) 75static int psci_get_version(void)
109{ 76{
110 int err; 77 int err;
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
new file mode 100644
index 000000000000..1a4d232796be
--- /dev/null
+++ b/arch/arm/kernel/reboot.c
@@ -0,0 +1,155 @@
1/*
2 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
3 * Original Copyright (C) 1995 Linus Torvalds
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/cpu.h>
10#include <linux/delay.h>
11#include <linux/reboot.h>
12
13#include <asm/cacheflush.h>
14#include <asm/idmap.h>
15
16#include "reboot.h"
17
18typedef void (*phys_reset_t)(unsigned long);
19
20/*
21 * Function pointers to optional machine specific functions
22 */
23void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
24void (*pm_power_off)(void);
25EXPORT_SYMBOL(pm_power_off);
26
27/*
28 * A temporary stack to use for CPU reset. This is static so that we
29 * don't clobber it with the identity mapping. When running with this
30 * stack, any references to the current task *will not work* so you
31 * should really do as little as possible before jumping to your reset
32 * code.
33 */
34static u64 soft_restart_stack[16];
35
36static void __soft_restart(void *addr)
37{
38 phys_reset_t phys_reset;
39
40 /* Take out a flat memory mapping. */
41 setup_mm_for_reboot();
42
43 /* Clean and invalidate caches */
44 flush_cache_all();
45
46 /* Turn off caching */
47 cpu_proc_fin();
48
49 /* Push out any further dirty data, and ensure cache is empty */
50 flush_cache_all();
51
52 /* Switch to the identity mapping. */
53 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
54 phys_reset((unsigned long)addr);
55
56 /* Should never get here. */
57 BUG();
58}
59
60void _soft_restart(unsigned long addr, bool disable_l2)
61{
62 u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
63
64 /* Disable interrupts first */
65 raw_local_irq_disable();
66 local_fiq_disable();
67
68 /* Disable the L2 if we're the last man standing. */
69 if (disable_l2)
70 outer_disable();
71
72 /* Change to the new stack and continue with the reset. */
73 call_with_stack(__soft_restart, (void *)addr, (void *)stack);
74
75 /* Should never get here. */
76 BUG();
77}
78
79void soft_restart(unsigned long addr)
80{
81 _soft_restart(addr, num_online_cpus() == 1);
82}
83
84/*
85 * Called by kexec, immediately prior to machine_kexec().
86 *
87 * This must completely disable all secondary CPUs; simply causing those CPUs
88 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
89 * kexec'd kernel to use any and all RAM as it sees fit, without having to
90 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
91 * functionality embodied in disable_nonboot_cpus() to achieve this.
92 */
93void machine_shutdown(void)
94{
95 disable_nonboot_cpus();
96}
97
98/*
99 * Halting simply requires that the secondary CPUs stop performing any
100 * activity (executing tasks, handling interrupts). smp_send_stop()
101 * achieves this.
102 */
103void machine_halt(void)
104{
105 local_irq_disable();
106 smp_send_stop();
107
108 local_irq_disable();
109 while (1);
110}
111
112/*
113 * Power-off simply requires that the secondary CPUs stop performing any
114 * activity (executing tasks, handling interrupts). smp_send_stop()
115 * achieves this. When the system power is turned off, it will take all CPUs
116 * with it.
117 */
118void machine_power_off(void)
119{
120 local_irq_disable();
121 smp_send_stop();
122
123 if (pm_power_off)
124 pm_power_off();
125}
126
127/*
128 * Restart requires that the secondary CPUs stop performing any activity
129 * while the primary CPU resets the system. Systems with a single CPU can
130 * use soft_restart() as their machine descriptor's .restart hook, since that
131 * will cause the only available CPU to reset. Systems with multiple CPUs must
132 * provide a HW restart implementation, to ensure that all CPUs reset at once.
133 * This is required so that any code running after reset on the primary CPU
134 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
135 * executing pre-reset code, and using RAM that the primary CPU's code wishes
136 * to use. Implementing such co-ordination would be essentially impossible.
137 */
138void machine_restart(char *cmd)
139{
140 local_irq_disable();
141 smp_send_stop();
142
143 if (arm_pm_restart)
144 arm_pm_restart(reboot_mode, cmd);
145 else
146 do_kernel_restart(cmd);
147
148 /* Give a grace period for failure to restart of 1s */
149 mdelay(1000);
150
151 /* Whoops - the platform was unable to reboot. Tell the user! */
152 printk("Reboot failed -- System halted\n");
153 local_irq_disable();
154 while (1);
155}
diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h
new file mode 100644
index 000000000000..bf7a0b1f076e
--- /dev/null
+++ b/arch/arm/kernel/reboot.h
@@ -0,0 +1,7 @@
1#ifndef REBOOT_H
2#define REBOOT_H
3
4extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
5extern void _soft_restart(unsigned long addr, bool disable_l2);
6
7#endif
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 24b4a04846eb..36ed35073289 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -56,8 +56,6 @@ void *return_address(unsigned int level)
56 return NULL; 56 return NULL;
57} 57}
58 58
59#else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */ 59#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
60
61#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */
62 60
63EXPORT_SYMBOL_GPL(return_address); 61EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1d60bebea4b8..6c777e908a24 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -372,30 +372,48 @@ void __init early_print(const char *str, ...)
372 372
373static void __init cpuid_init_hwcaps(void) 373static void __init cpuid_init_hwcaps(void)
374{ 374{
375 unsigned int divide_instrs, vmsa; 375 int block;
376 u32 isar5;
376 377
377 if (cpu_architecture() < CPU_ARCH_ARMv7) 378 if (cpu_architecture() < CPU_ARCH_ARMv7)
378 return; 379 return;
379 380
380 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; 381 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
381 382 if (block >= 2)
382 switch (divide_instrs) {
383 case 2:
384 elf_hwcap |= HWCAP_IDIVA; 383 elf_hwcap |= HWCAP_IDIVA;
385 case 1: 384 if (block >= 1)
386 elf_hwcap |= HWCAP_IDIVT; 385 elf_hwcap |= HWCAP_IDIVT;
387 }
388 386
389 /* LPAE implies atomic ldrd/strd instructions */ 387 /* LPAE implies atomic ldrd/strd instructions */
390 vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0; 388 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
391 if (vmsa >= 5) 389 if (block >= 5)
392 elf_hwcap |= HWCAP_LPAE; 390 elf_hwcap |= HWCAP_LPAE;
391
392 /* check for supported v8 Crypto instructions */
393 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
394
395 block = cpuid_feature_extract_field(isar5, 4);
396 if (block >= 2)
397 elf_hwcap2 |= HWCAP2_PMULL;
398 if (block >= 1)
399 elf_hwcap2 |= HWCAP2_AES;
400
401 block = cpuid_feature_extract_field(isar5, 8);
402 if (block >= 1)
403 elf_hwcap2 |= HWCAP2_SHA1;
404
405 block = cpuid_feature_extract_field(isar5, 12);
406 if (block >= 1)
407 elf_hwcap2 |= HWCAP2_SHA2;
408
409 block = cpuid_feature_extract_field(isar5, 16);
410 if (block >= 1)
411 elf_hwcap2 |= HWCAP2_CRC32;
393} 412}
394 413
395static void __init elf_hwcap_fixup(void) 414static void __init elf_hwcap_fixup(void)
396{ 415{
397 unsigned id = read_cpuid_id(); 416 unsigned id = read_cpuid_id();
398 unsigned sync_prim;
399 417
400 /* 418 /*
401 * HWCAP_TLS is available only on 1136 r1p0 and later, 419 * HWCAP_TLS is available only on 1136 r1p0 and later,
@@ -416,9 +434,9 @@ static void __init elf_hwcap_fixup(void)
416 * avoid advertising SWP; it may not be atomic with 434 * avoid advertising SWP; it may not be atomic with
417 * multiprocessing cores. 435 * multiprocessing cores.
418 */ 436 */
419 sync_prim = ((read_cpuid_ext(CPUID_EXT_ISAR3) >> 8) & 0xf0) | 437 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
420 ((read_cpuid_ext(CPUID_EXT_ISAR4) >> 20) & 0x0f); 438 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
421 if (sync_prim >= 0x13) 439 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
422 elf_hwcap &= ~HWCAP_SWP; 440 elf_hwcap &= ~HWCAP_SWP;
423} 441}
424 442
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index e1e60e5a7a27..7d37bfc50830 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -116,14 +116,7 @@ cpu_resume_after_mmu:
116 ldmfd sp!, {r4 - r11, pc} 116 ldmfd sp!, {r4 - r11, pc}
117ENDPROC(cpu_resume_after_mmu) 117ENDPROC(cpu_resume_after_mmu)
118 118
119/* 119 .text
120 * Note: Yes, part of the following code is located into the .data section.
121 * This is to allow sleep_save_sp to be accessed with a relative load
122 * while we can't rely on any MMU translation. We could have put
123 * sleep_save_sp in the .text section as well, but some setups might
124 * insist on it to be truly read-only.
125 */
126 .data
127 .align 120 .align
128ENTRY(cpu_resume) 121ENTRY(cpu_resume)
129ARM_BE8(setend be) @ ensure we are in BE mode 122ARM_BE8(setend be) @ ensure we are in BE mode
@@ -145,6 +138,8 @@ ARM_BE8(setend be) @ ensure we are in BE mode
145 compute_mpidr_hash r1, r4, r5, r6, r0, r3 138 compute_mpidr_hash r1, r4, r5, r6, r0, r3
1461: 1391:
147 adr r0, _sleep_save_sp 140 adr r0, _sleep_save_sp
141 ldr r2, [r0]
142 add r0, r0, r2
148 ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] 143 ldr r0, [r0, #SLEEP_SAVE_SP_PHYS]
149 ldr r0, [r0, r1, lsl #2] 144 ldr r0, [r0, r1, lsl #2]
150 145
@@ -156,10 +151,12 @@ THUMB( bx r3 )
156ENDPROC(cpu_resume) 151ENDPROC(cpu_resume)
157 152
158 .align 2 153 .align 2
154_sleep_save_sp:
155 .long sleep_save_sp - .
159mpidr_hash_ptr: 156mpidr_hash_ptr:
160 .long mpidr_hash - . @ mpidr_hash struct offset 157 .long mpidr_hash - . @ mpidr_hash struct offset
161 158
159 .data
162 .type sleep_save_sp, #object 160 .type sleep_save_sp, #object
163ENTRY(sleep_save_sp) 161ENTRY(sleep_save_sp)
164_sleep_save_sp:
165 .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp 162 .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 86ef244c5a24..cca5b8758185 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -145,6 +145,11 @@ void __init smp_init_cpus(void)
145 smp_ops.smp_init_cpus(); 145 smp_ops.smp_init_cpus();
146} 146}
147 147
148int platform_can_secondary_boot(void)
149{
150 return !!smp_ops.smp_boot_secondary;
151}
152
148int platform_can_cpu_hotplug(void) 153int platform_can_cpu_hotplug(void)
149{ 154{
150#ifdef CONFIG_HOTPLUG_CPU 155#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index afdd51e30bec..1361756782c7 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -42,7 +42,7 @@
42 " cmp %0, #0\n" \ 42 " cmp %0, #0\n" \
43 " movne %0, %4\n" \ 43 " movne %0, %4\n" \
44 "2:\n" \ 44 "2:\n" \
45 " .section .fixup,\"ax\"\n" \ 45 " .section .text.fixup,\"ax\"\n" \
46 " .align 2\n" \ 46 " .align 2\n" \
47 "3: mov %0, %5\n" \ 47 "3: mov %0, %5\n" \
48 " b 2b\n" \ 48 " b 2b\n" \
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
new file mode 100644
index 000000000000..efe17dd9b921
--- /dev/null
+++ b/arch/arm/kernel/vdso.c
@@ -0,0 +1,337 @@
1/*
2 * Adapted from arm64 version.
3 *
4 * Copyright (C) 2012 ARM Limited
5 * Copyright (C) 2015 Mentor Graphics Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/elf.h>
21#include <linux/err.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/of.h>
25#include <linux/printk.h>
26#include <linux/slab.h>
27#include <linux/timekeeper_internal.h>
28#include <linux/vmalloc.h>
29#include <asm/arch_timer.h>
30#include <asm/barrier.h>
31#include <asm/cacheflush.h>
32#include <asm/page.h>
33#include <asm/vdso.h>
34#include <asm/vdso_datapage.h>
35#include <clocksource/arm_arch_timer.h>
36
37#define MAX_SYMNAME 64
38
39static struct page **vdso_text_pagelist;
40
41/* Total number of pages needed for the data and text portions of the VDSO. */
42unsigned int vdso_total_pages __read_mostly;
43
44/*
45 * The VDSO data page.
46 */
47static union vdso_data_store vdso_data_store __page_aligned_data;
48static struct vdso_data *vdso_data = &vdso_data_store.data;
49
50static struct page *vdso_data_page;
51static struct vm_special_mapping vdso_data_mapping = {
52 .name = "[vvar]",
53 .pages = &vdso_data_page,
54};
55
56static struct vm_special_mapping vdso_text_mapping = {
57 .name = "[vdso]",
58};
59
60struct elfinfo {
61 Elf32_Ehdr *hdr; /* ptr to ELF */
62 Elf32_Sym *dynsym; /* ptr to .dynsym section */
63 unsigned long dynsymsize; /* size of .dynsym section */
64 char *dynstr; /* ptr to .dynstr section */
65};
66
67/* Cached result of boot-time check for whether the arch timer exists,
68 * and if so, whether the virtual counter is useable.
69 */
70static bool cntvct_ok __read_mostly;
71
72static bool __init cntvct_functional(void)
73{
74 struct device_node *np;
75 bool ret = false;
76
77 if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
78 goto out;
79
80 /* The arm_arch_timer core should export
81 * arch_timer_use_virtual or similar so we don't have to do
82 * this.
83 */
84 np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
85 if (!np)
86 goto out_put;
87
88 if (of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
89 goto out_put;
90
91 ret = true;
92
93out_put:
94 of_node_put(np);
95out:
96 return ret;
97}
98
99static void * __init find_section(Elf32_Ehdr *ehdr, const char *name,
100 unsigned long *size)
101{
102 Elf32_Shdr *sechdrs;
103 unsigned int i;
104 char *secnames;
105
106 /* Grab section headers and strings so we can tell who is who */
107 sechdrs = (void *)ehdr + ehdr->e_shoff;
108 secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
109
110 /* Find the section they want */
111 for (i = 1; i < ehdr->e_shnum; i++) {
112 if (strcmp(secnames + sechdrs[i].sh_name, name) == 0) {
113 if (size)
114 *size = sechdrs[i].sh_size;
115 return (void *)ehdr + sechdrs[i].sh_offset;
116 }
117 }
118
119 if (size)
120 *size = 0;
121 return NULL;
122}
123
124static Elf32_Sym * __init find_symbol(struct elfinfo *lib, const char *symname)
125{
126 unsigned int i;
127
128 for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) {
129 char name[MAX_SYMNAME], *c;
130
131 if (lib->dynsym[i].st_name == 0)
132 continue;
133 strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
134 MAX_SYMNAME);
135 c = strchr(name, '@');
136 if (c)
137 *c = 0;
138 if (strcmp(symname, name) == 0)
139 return &lib->dynsym[i];
140 }
141 return NULL;
142}
143
144static void __init vdso_nullpatch_one(struct elfinfo *lib, const char *symname)
145{
146 Elf32_Sym *sym;
147
148 sym = find_symbol(lib, symname);
149 if (!sym)
150 return;
151
152 sym->st_name = 0;
153}
154
155static void __init patch_vdso(void *ehdr)
156{
157 struct elfinfo einfo;
158
159 einfo = (struct elfinfo) {
160 .hdr = ehdr,
161 };
162
163 einfo.dynsym = find_section(einfo.hdr, ".dynsym", &einfo.dynsymsize);
164 einfo.dynstr = find_section(einfo.hdr, ".dynstr", NULL);
165
166 /* If the virtual counter is absent or non-functional we don't
167 * want programs to incur the slight additional overhead of
168 * dispatching through the VDSO only to fall back to syscalls.
169 */
170 if (!cntvct_ok) {
171 vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
172 vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
173 }
174}
175
176static int __init vdso_init(void)
177{
178 unsigned int text_pages;
179 int i;
180
181 if (memcmp(&vdso_start, "\177ELF", 4)) {
182 pr_err("VDSO is not a valid ELF object!\n");
183 return -ENOEXEC;
184 }
185
186 text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
187 pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
188
189 /* Allocate the VDSO text pagelist */
190 vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
191 GFP_KERNEL);
192 if (vdso_text_pagelist == NULL)
193 return -ENOMEM;
194
195 /* Grab the VDSO data page. */
196 vdso_data_page = virt_to_page(vdso_data);
197
198 /* Grab the VDSO text pages. */
199 for (i = 0; i < text_pages; i++) {
200 struct page *page;
201
202 page = virt_to_page(&vdso_start + i * PAGE_SIZE);
203 vdso_text_pagelist[i] = page;
204 }
205
206 vdso_text_mapping.pages = vdso_text_pagelist;
207
208 vdso_total_pages = 1; /* for the data/vvar page */
209 vdso_total_pages += text_pages;
210
211 cntvct_ok = cntvct_functional();
212
213 patch_vdso(&vdso_start);
214
215 return 0;
216}
217arch_initcall(vdso_init);
218
219static int install_vvar(struct mm_struct *mm, unsigned long addr)
220{
221 struct vm_area_struct *vma;
222
223 vma = _install_special_mapping(mm, addr, PAGE_SIZE,
224 VM_READ | VM_MAYREAD,
225 &vdso_data_mapping);
226
227 return IS_ERR(vma) ? PTR_ERR(vma) : 0;
228}
229
230/* assumes mmap_sem is write-locked */
231void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
232{
233 struct vm_area_struct *vma;
234 unsigned long len;
235
236 mm->context.vdso = 0;
237
238 if (vdso_text_pagelist == NULL)
239 return;
240
241 if (install_vvar(mm, addr))
242 return;
243
244 /* Account for vvar page. */
245 addr += PAGE_SIZE;
246 len = (vdso_total_pages - 1) << PAGE_SHIFT;
247
248 vma = _install_special_mapping(mm, addr, len,
249 VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
250 &vdso_text_mapping);
251
252 if (!IS_ERR(vma))
253 mm->context.vdso = addr;
254}
255
256static void vdso_write_begin(struct vdso_data *vdata)
257{
258 ++vdso_data->seq_count;
259 smp_wmb(); /* Pairs with smp_rmb in vdso_read_retry */
260}
261
262static void vdso_write_end(struct vdso_data *vdata)
263{
264 smp_wmb(); /* Pairs with smp_rmb in vdso_read_begin */
265 ++vdso_data->seq_count;
266}
267
268static bool tk_is_cntvct(const struct timekeeper *tk)
269{
270 if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
271 return false;
272
273 if (strcmp(tk->tkr_mono.clock->name, "arch_sys_counter") != 0)
274 return false;
275
276 return true;
277}
278
279/**
280 * update_vsyscall - update the vdso data page
281 *
282 * Increment the sequence counter, making it odd, indicating to
283 * userspace that an update is in progress. Update the fields used
284 * for coarse clocks and, if the architected system timer is in use,
285 * the fields used for high precision clocks. Increment the sequence
286 * counter again, making it even, indicating to userspace that the
287 * update is finished.
288 *
289 * Userspace is expected to sample seq_count before reading any other
290 * fields from the data page. If seq_count is odd, userspace is
291 * expected to wait until it becomes even. After copying data from
292 * the page, userspace must sample seq_count again; if it has changed
293 * from its previous value, userspace must retry the whole sequence.
294 *
295 * Calls to update_vsyscall are serialized by the timekeeping core.
296 */
297void update_vsyscall(struct timekeeper *tk)
298{
299 struct timespec xtime_coarse;
300 struct timespec64 *wtm = &tk->wall_to_monotonic;
301
302 if (!cntvct_ok) {
303 /* The entry points have been zeroed, so there is no
304 * point in updating the data page.
305 */
306 return;
307 }
308
309 vdso_write_begin(vdso_data);
310
311 xtime_coarse = __current_kernel_time();
312 vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
313 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
314 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
315 vdso_data->wtm_clock_sec = wtm->tv_sec;
316 vdso_data->wtm_clock_nsec = wtm->tv_nsec;
317
318 if (vdso_data->tk_is_cntvct) {
319 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
320 vdso_data->xtime_clock_sec = tk->xtime_sec;
321 vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec;
322 vdso_data->cs_mult = tk->tkr_mono.mult;
323 vdso_data->cs_shift = tk->tkr_mono.shift;
324 vdso_data->cs_mask = tk->tkr_mono.mask;
325 }
326
327 vdso_write_end(vdso_data);
328
329 flush_dcache_page(virt_to_page(vdso_data));
330}
331
332void update_vsyscall_tz(void)
333{
334 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
335 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
336 flush_dcache_page(virt_to_page(vdso_data));
337}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index b31aa73e8076..7a301be9ac67 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -74,7 +74,7 @@ SECTIONS
74 ARM_EXIT_DISCARD(EXIT_DATA) 74 ARM_EXIT_DISCARD(EXIT_DATA)
75 EXIT_CALL 75 EXIT_CALL
76#ifndef CONFIG_MMU 76#ifndef CONFIG_MMU
77 *(.fixup) 77 *(.text.fixup)
78 *(__ex_table) 78 *(__ex_table)
79#endif 79#endif
80#ifndef CONFIG_SMP_ON_UP 80#ifndef CONFIG_SMP_ON_UP
@@ -100,6 +100,7 @@ SECTIONS
100 100
101 .text : { /* Real text segment */ 101 .text : { /* Real text segment */
102 _stext = .; /* Text and read-only data */ 102 _stext = .; /* Text and read-only data */
103 IDMAP_TEXT
103 __exception_text_start = .; 104 __exception_text_start = .;
104 *(.exception.text) 105 *(.exception.text)
105 __exception_text_end = .; 106 __exception_text_end = .;
@@ -108,10 +109,6 @@ SECTIONS
108 SCHED_TEXT 109 SCHED_TEXT
109 LOCK_TEXT 110 LOCK_TEXT
110 KPROBES_TEXT 111 KPROBES_TEXT
111 IDMAP_TEXT
112#ifdef CONFIG_MMU
113 *(.fixup)
114#endif
115 *(.gnu.warning) 112 *(.gnu.warning)
116 *(.glue_7) 113 *(.glue_7)
117 *(.glue_7t) 114 *(.glue_7t)
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 14a0d988c82c..1710fd7db2d5 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -47,7 +47,7 @@ USER( strnebt r2, [r0])
47ENDPROC(__clear_user) 47ENDPROC(__clear_user)
48ENDPROC(__clear_user_std) 48ENDPROC(__clear_user_std)
49 49
50 .pushsection .fixup,"ax" 50 .pushsection .text.fixup,"ax"
51 .align 0 51 .align 0
529001: ldmfd sp!, {r0, pc} 529001: ldmfd sp!, {r0, pc}
53 .popsection 53 .popsection
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index a9d3db16ecb5..9648b0675a3e 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -100,7 +100,7 @@ WEAK(__copy_to_user)
100ENDPROC(__copy_to_user) 100ENDPROC(__copy_to_user)
101ENDPROC(__copy_to_user_std) 101ENDPROC(__copy_to_user_std)
102 102
103 .pushsection .fixup,"ax" 103 .pushsection .text.fixup,"ax"
104 .align 0 104 .align 0
105 copy_abort_preamble 105 copy_abort_preamble
106 ldmfd sp!, {r1, r2, r3} 106 ldmfd sp!, {r1, r2, r3}
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 7d08b43d2c0e..1d0957e61f89 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -68,7 +68,7 @@
68 * so properly, we would have to add in whatever registers were loaded before 68 * so properly, we would have to add in whatever registers were loaded before
69 * the fault, which, with the current asm above is not predictable. 69 * the fault, which, with the current asm above is not predictable.
70 */ 70 */
71 .pushsection .fixup,"ax" 71 .pushsection .text.fixup,"ax"
72 .align 4 72 .align 4
739001: mov r4, #-EFAULT 739001: mov r4, #-EFAULT
74 ldr r5, [sp, #8*4] @ *err_ptr 74 ldr r5, [sp, #8*4] @ *err_ptr
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 312d43eb686a..8044591dca72 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -83,6 +83,12 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
83 NSEC_PER_SEC, 3600); 83 NSEC_PER_SEC, 3600);
84 res = cyc_to_ns(1ULL, new_mult, new_shift); 84 res = cyc_to_ns(1ULL, new_mult, new_shift);
85 85
86 if (res > 1000) {
87 pr_err("Ignoring delay timer %ps, which has insufficient resolution of %lluns\n",
88 timer, res);
89 return;
90 }
91
86 if (!delay_calibrated && (!delay_res || (res < delay_res))) { 92 if (!delay_calibrated && (!delay_res || (res < delay_res))) {
87 pr_info("Switching to timer-based delay loop, resolution %lluns\n", res); 93 pr_info("Switching to timer-based delay loop, resolution %lluns\n", res);
88 delay_timer = timer; 94 delay_timer = timer;
diff --git a/arch/arm/mach-exynos/sleep.S b/arch/arm/mach-exynos/sleep.S
index 31d25834b9c4..cf950790fbdc 100644
--- a/arch/arm/mach-exynos/sleep.S
+++ b/arch/arm/mach-exynos/sleep.S
@@ -23,14 +23,7 @@
23#define CPU_MASK 0xff0ffff0 23#define CPU_MASK 0xff0ffff0
24#define CPU_CORTEX_A9 0x410fc090 24#define CPU_CORTEX_A9 0x410fc090
25 25
26 /* 26 .text
27 * The following code is located into the .data section. This is to
28 * allow l2x0_regs_phys to be accessed with a relative load while we
29 * can't rely on any MMU translation. We could have put l2x0_regs_phys
30 * in the .text section as well, but some setups might insist on it to
31 * be truly read-only. (Reference from: arch/arm/kernel/sleep.S)
32 */
33 .data
34 .align 27 .align
35 28
36 /* 29 /*
@@ -69,10 +62,12 @@ ENTRY(exynos_cpu_resume_ns)
69 cmp r0, r1 62 cmp r0, r1
70 bne skip_cp15 63 bne skip_cp15
71 64
72 adr r0, cp15_save_power 65 adr r0, _cp15_save_power
73 ldr r1, [r0] 66 ldr r1, [r0]
74 adr r0, cp15_save_diag 67 ldr r1, [r0, r1]
68 adr r0, _cp15_save_diag
75 ldr r2, [r0] 69 ldr r2, [r0]
70 ldr r2, [r0, r2]
76 mov r0, #SMC_CMD_C15RESUME 71 mov r0, #SMC_CMD_C15RESUME
77 dsb 72 dsb
78 smc #0 73 smc #0
@@ -118,14 +113,20 @@ skip_l2x0:
118skip_cp15: 113skip_cp15:
119 b cpu_resume 114 b cpu_resume
120ENDPROC(exynos_cpu_resume_ns) 115ENDPROC(exynos_cpu_resume_ns)
116
117 .align
118_cp15_save_power:
119 .long cp15_save_power - .
120_cp15_save_diag:
121 .long cp15_save_diag - .
122#ifdef CONFIG_CACHE_L2X0
1231: .long l2x0_saved_regs - .
124#endif /* CONFIG_CACHE_L2X0 */
125
126 .data
121 .globl cp15_save_diag 127 .globl cp15_save_diag
122cp15_save_diag: 128cp15_save_diag:
123 .long 0 @ cp15 diagnostic 129 .long 0 @ cp15 diagnostic
124 .globl cp15_save_power 130 .globl cp15_save_power
125cp15_save_power: 131cp15_save_power:
126 .long 0 @ cp15 power control 132 .long 0 @ cp15 power control
127
128#ifdef CONFIG_CACHE_L2X0
129 .align
1301: .long l2x0_saved_regs - .
131#endif /* CONFIG_CACHE_L2X0 */
diff --git a/arch/arm/mach-s5pv210/sleep.S b/arch/arm/mach-s5pv210/sleep.S
index 7c43ddd33ba8..dfbfc0f7f8b8 100644
--- a/arch/arm/mach-s5pv210/sleep.S
+++ b/arch/arm/mach-s5pv210/sleep.S
@@ -14,7 +14,7 @@
14 14
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16 16
17 .data 17 .text
18 .align 18 .align
19 19
20 /* 20 /*
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 3c2509b4b694..4be537977040 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -42,6 +42,7 @@ if ARCH_VEXPRESS
42config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA 42config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
43 bool "Enable A5 and A9 only errata work-arounds" 43 bool "Enable A5 and A9 only errata work-arounds"
44 default y 44 default y
45 select ARM_ERRATA_643719 if SMP
45 select ARM_ERRATA_720789 46 select ARM_ERRATA_720789
46 select PL310_ERRATA_753970 if CACHE_L2X0 47 select PL310_ERRATA_753970 if CACHE_L2X0
47 help 48 help
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 9b4f29e595a4..b7644310236b 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -738,7 +738,7 @@ config CPU_ICACHE_DISABLE
738 738
739config CPU_DCACHE_DISABLE 739config CPU_DCACHE_DISABLE
740 bool "Disable D-Cache (C-bit)" 740 bool "Disable D-Cache (C-bit)"
741 depends on CPU_CP15 741 depends on CPU_CP15 && !SMP
742 help 742 help
743 Say Y here to disable the processor data cache. Unless 743 Say Y here to disable the processor data cache. Unless
744 you have a reason not to or are unsure, say N. 744 you have a reason not to or are unsure, say N.
@@ -825,6 +825,20 @@ config KUSER_HELPERS
825 Say N here only if you are absolutely certain that you do not 825 Say N here only if you are absolutely certain that you do not
826 need these helpers; otherwise, the safe option is to say Y. 826 need these helpers; otherwise, the safe option is to say Y.
827 827
828config VDSO
829 bool "Enable VDSO for acceleration of some system calls"
830 depends on AEABI && MMU
831 default y if ARM_ARCH_TIMER
832 select GENERIC_TIME_VSYSCALL
833 help
834 Place in the process address space an ELF shared object
835 providing fast implementations of gettimeofday and
836 clock_gettime. Systems that implement the ARM architected
837 timer will receive maximum benefit.
838
839 You must have glibc 2.22 or later for programs to seamlessly
840 take advantage of this.
841
828config DMA_CACHE_RWFO 842config DMA_CACHE_RWFO
829 bool "Enable read/write for ownership DMA cache maintenance" 843 bool "Enable read/write for ownership DMA cache maintenance"
830 depends on CPU_V6K && SMP 844 depends on CPU_V6K && SMP
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 2c0c541c60ca..9769f1eefe3b 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -201,7 +201,7 @@ union offset_union {
201 THUMB( "1: "ins" %1, [%2]\n" ) \ 201 THUMB( "1: "ins" %1, [%2]\n" ) \
202 THUMB( " add %2, %2, #1\n" ) \ 202 THUMB( " add %2, %2, #1\n" ) \
203 "2:\n" \ 203 "2:\n" \
204 " .pushsection .fixup,\"ax\"\n" \ 204 " .pushsection .text.fixup,\"ax\"\n" \
205 " .align 2\n" \ 205 " .align 2\n" \
206 "3: mov %0, #1\n" \ 206 "3: mov %0, #1\n" \
207 " b 2b\n" \ 207 " b 2b\n" \
@@ -261,7 +261,7 @@ union offset_union {
261 " mov %1, %1, "NEXT_BYTE"\n" \ 261 " mov %1, %1, "NEXT_BYTE"\n" \
262 "2: "ins" %1, [%2]\n" \ 262 "2: "ins" %1, [%2]\n" \
263 "3:\n" \ 263 "3:\n" \
264 " .pushsection .fixup,\"ax\"\n" \ 264 " .pushsection .text.fixup,\"ax\"\n" \
265 " .align 2\n" \ 265 " .align 2\n" \
266 "4: mov %0, #1\n" \ 266 "4: mov %0, #1\n" \
267 " b 3b\n" \ 267 " b 3b\n" \
@@ -301,7 +301,7 @@ union offset_union {
301 " mov %1, %1, "NEXT_BYTE"\n" \ 301 " mov %1, %1, "NEXT_BYTE"\n" \
302 "4: "ins" %1, [%2]\n" \ 302 "4: "ins" %1, [%2]\n" \
303 "5:\n" \ 303 "5:\n" \
304 " .pushsection .fixup,\"ax\"\n" \ 304 " .pushsection .text.fixup,\"ax\"\n" \
305 " .align 2\n" \ 305 " .align 2\n" \
306 "6: mov %0, #1\n" \ 306 "6: mov %0, #1\n" \
307 " b 5b\n" \ 307 " b 5b\n" \
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 8f15f70622a6..e309c8f35af5 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1647,6 +1647,7 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1647 struct device_node *np; 1647 struct device_node *np;
1648 struct resource res; 1648 struct resource res;
1649 u32 cache_id, old_aux; 1649 u32 cache_id, old_aux;
1650 u32 cache_level = 2;
1650 1651
1651 np = of_find_matching_node(NULL, l2x0_ids); 1652 np = of_find_matching_node(NULL, l2x0_ids);
1652 if (!np) 1653 if (!np)
@@ -1679,6 +1680,12 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1679 if (!of_property_read_bool(np, "cache-unified")) 1680 if (!of_property_read_bool(np, "cache-unified"))
1680 pr_err("L2C: device tree omits to specify unified cache\n"); 1681 pr_err("L2C: device tree omits to specify unified cache\n");
1681 1682
1683 if (of_property_read_u32(np, "cache-level", &cache_level))
1684 pr_err("L2C: device tree omits to specify cache-level\n");
1685
1686 if (cache_level != 2)
1687 pr_err("L2C: device tree specifies invalid cache level\n");
1688
1682 /* Read back current (default) hardware configuration */ 1689 /* Read back current (default) hardware configuration */
1683 if (data->save) 1690 if (data->save)
1684 data->save(l2x0_base); 1691 data->save(l2x0_base);
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index b966656d2c2d..a134d8a13d00 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -36,10 +36,10 @@ ENTRY(v7_invalidate_l1)
36 mcr p15, 2, r0, c0, c0, 0 36 mcr p15, 2, r0, c0, c0, 0
37 mrc p15, 1, r0, c0, c0, 0 37 mrc p15, 1, r0, c0, c0, 0
38 38
39 ldr r1, =0x7fff 39 movw r1, #0x7fff
40 and r2, r1, r0, lsr #13 40 and r2, r1, r0, lsr #13
41 41
42 ldr r1, =0x3ff 42 movw r1, #0x3ff
43 43
44 and r3, r1, r0, lsr #3 @ NumWays - 1 44 and r3, r1, r0, lsr #3 @ NumWays - 1
45 add r2, r2, #1 @ NumSets 45 add r2, r2, #1 @ NumSets
@@ -90,21 +90,20 @@ ENDPROC(v7_flush_icache_all)
90ENTRY(v7_flush_dcache_louis) 90ENTRY(v7_flush_dcache_louis)
91 dmb @ ensure ordering with previous memory accesses 91 dmb @ ensure ordering with previous memory accesses
92 mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr 92 mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
93 ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr 93ALT_SMP(mov r3, r0, lsr #20) @ move LoUIS into position
94 ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr 94ALT_UP( mov r3, r0, lsr #26) @ move LoUU into position
95 ands r3, r3, #7 << 1 @ extract LoU*2 field from clidr
96 bne start_flush_levels @ LoU != 0, start flushing
95#ifdef CONFIG_ARM_ERRATA_643719 97#ifdef CONFIG_ARM_ERRATA_643719
96 ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register 98ALT_SMP(mrc p15, 0, r2, c0, c0, 0) @ read main ID register
97 ALT_UP(reteq lr) @ LoUU is zero, so nothing to do 99ALT_UP( ret lr) @ LoUU is zero, so nothing to do
98 ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? 100 movw r1, #:lower16:(0x410fc090 >> 4) @ ID of ARM Cortex A9 r0p?
99 biceq r2, r2, #0x0000000f @ clear minor revision number 101 movt r1, #:upper16:(0x410fc090 >> 4)
100 teqeq r2, r1 @ test for errata affected core and if so... 102 teq r1, r2, lsr #4 @ test for errata affected core and if so...
101 orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne') 103 moveq r3, #1 << 1 @ fix LoUIS value
104 beq start_flush_levels @ start flushing cache levels
102#endif 105#endif
103 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 106 ret lr
104 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
105 reteq lr @ return if level == 0
106 mov r10, #0 @ r10 (starting level) = 0
107 b flush_levels @ start flushing cache levels
108ENDPROC(v7_flush_dcache_louis) 107ENDPROC(v7_flush_dcache_louis)
109 108
110/* 109/*
@@ -119,9 +118,10 @@ ENDPROC(v7_flush_dcache_louis)
119ENTRY(v7_flush_dcache_all) 118ENTRY(v7_flush_dcache_all)
120 dmb @ ensure ordering with previous memory accesses 119 dmb @ ensure ordering with previous memory accesses
121 mrc p15, 1, r0, c0, c0, 1 @ read clidr 120 mrc p15, 1, r0, c0, c0, 1 @ read clidr
122 ands r3, r0, #0x7000000 @ extract loc from clidr 121 mov r3, r0, lsr #23 @ move LoC into position
123 mov r3, r3, lsr #23 @ left align loc bit field 122 ands r3, r3, #7 << 1 @ extract LoC*2 from clidr
124 beq finished @ if loc is 0, then no need to clean 123 beq finished @ if loc is 0, then no need to clean
124start_flush_levels:
125 mov r10, #0 @ start clean at cache level 0 125 mov r10, #0 @ start clean at cache level 0
126flush_levels: 126flush_levels:
127 add r2, r10, r10, lsr #1 @ work out 3x current cache level 127 add r2, r10, r10, lsr #1 @ work out 3x current cache level
@@ -140,10 +140,10 @@ flush_levels:
140#endif 140#endif
141 and r2, r1, #7 @ extract the length of the cache lines 141 and r2, r1, #7 @ extract the length of the cache lines
142 add r2, r2, #4 @ add 4 (line length offset) 142 add r2, r2, #4 @ add 4 (line length offset)
143 ldr r4, =0x3ff 143 movw r4, #0x3ff
144 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 144 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
145 clz r5, r4 @ find bit position of way size increment 145 clz r5, r4 @ find bit position of way size increment
146 ldr r7, =0x7fff 146 movw r7, #0x7fff
147 ands r7, r7, r1, lsr #13 @ extract max number of the index size 147 ands r7, r7, r1, lsr #13 @ extract max number of the index size
148loop1: 148loop1:
149 mov r9, r7 @ create working copy of max index 149 mov r9, r7 @ create working copy of max index
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e315dfe3af1b..09c5fe3d30c2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -289,11 +289,11 @@ static void __dma_free_buffer(struct page *page, size_t size)
289 289
290static void *__alloc_from_contiguous(struct device *dev, size_t size, 290static void *__alloc_from_contiguous(struct device *dev, size_t size,
291 pgprot_t prot, struct page **ret_page, 291 pgprot_t prot, struct page **ret_page,
292 const void *caller); 292 const void *caller, bool want_vaddr);
293 293
294static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 294static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
295 pgprot_t prot, struct page **ret_page, 295 pgprot_t prot, struct page **ret_page,
296 const void *caller); 296 const void *caller, bool want_vaddr);
297 297
298static void * 298static void *
299__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 299__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
@@ -357,10 +357,10 @@ static int __init atomic_pool_init(void)
357 357
358 if (dev_get_cma_area(NULL)) 358 if (dev_get_cma_area(NULL))
359 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 359 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
360 &page, atomic_pool_init); 360 &page, atomic_pool_init, true);
361 else 361 else
362 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 362 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
363 &page, atomic_pool_init); 363 &page, atomic_pool_init, true);
364 if (ptr) { 364 if (ptr) {
365 int ret; 365 int ret;
366 366
@@ -467,13 +467,15 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
467 467
468static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 468static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
469 pgprot_t prot, struct page **ret_page, 469 pgprot_t prot, struct page **ret_page,
470 const void *caller) 470 const void *caller, bool want_vaddr)
471{ 471{
472 struct page *page; 472 struct page *page;
473 void *ptr; 473 void *ptr = NULL;
474 page = __dma_alloc_buffer(dev, size, gfp); 474 page = __dma_alloc_buffer(dev, size, gfp);
475 if (!page) 475 if (!page)
476 return NULL; 476 return NULL;
477 if (!want_vaddr)
478 goto out;
477 479
478 ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 480 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
479 if (!ptr) { 481 if (!ptr) {
@@ -481,6 +483,7 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
481 return NULL; 483 return NULL;
482 } 484 }
483 485
486 out:
484 *ret_page = page; 487 *ret_page = page;
485 return ptr; 488 return ptr;
486} 489}
@@ -523,12 +526,12 @@ static int __free_from_pool(void *start, size_t size)
523 526
524static void *__alloc_from_contiguous(struct device *dev, size_t size, 527static void *__alloc_from_contiguous(struct device *dev, size_t size,
525 pgprot_t prot, struct page **ret_page, 528 pgprot_t prot, struct page **ret_page,
526 const void *caller) 529 const void *caller, bool want_vaddr)
527{ 530{
528 unsigned long order = get_order(size); 531 unsigned long order = get_order(size);
529 size_t count = size >> PAGE_SHIFT; 532 size_t count = size >> PAGE_SHIFT;
530 struct page *page; 533 struct page *page;
531 void *ptr; 534 void *ptr = NULL;
532 535
533 page = dma_alloc_from_contiguous(dev, count, order); 536 page = dma_alloc_from_contiguous(dev, count, order);
534 if (!page) 537 if (!page)
@@ -536,6 +539,9 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
536 539
537 __dma_clear_buffer(page, size); 540 __dma_clear_buffer(page, size);
538 541
542 if (!want_vaddr)
543 goto out;
544
539 if (PageHighMem(page)) { 545 if (PageHighMem(page)) {
540 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); 546 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
541 if (!ptr) { 547 if (!ptr) {
@@ -546,17 +552,21 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
546 __dma_remap(page, size, prot); 552 __dma_remap(page, size, prot);
547 ptr = page_address(page); 553 ptr = page_address(page);
548 } 554 }
555
556 out:
549 *ret_page = page; 557 *ret_page = page;
550 return ptr; 558 return ptr;
551} 559}
552 560
553static void __free_from_contiguous(struct device *dev, struct page *page, 561static void __free_from_contiguous(struct device *dev, struct page *page,
554 void *cpu_addr, size_t size) 562 void *cpu_addr, size_t size, bool want_vaddr)
555{ 563{
556 if (PageHighMem(page)) 564 if (want_vaddr) {
557 __dma_free_remap(cpu_addr, size); 565 if (PageHighMem(page))
558 else 566 __dma_free_remap(cpu_addr, size);
559 __dma_remap(page, size, PAGE_KERNEL); 567 else
568 __dma_remap(page, size, PAGE_KERNEL);
569 }
560 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 570 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
561} 571}
562 572
@@ -574,12 +584,12 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
574 584
575#define nommu() 1 585#define nommu() 1
576 586
577#define __get_dma_pgprot(attrs, prot) __pgprot(0) 587#define __get_dma_pgprot(attrs, prot) __pgprot(0)
578#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL 588#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
579#define __alloc_from_pool(size, ret_page) NULL 589#define __alloc_from_pool(size, ret_page) NULL
580#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL 590#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
581#define __free_from_pool(cpu_addr, size) 0 591#define __free_from_pool(cpu_addr, size) 0
582#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) 592#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
583#define __dma_free_remap(cpu_addr, size) do { } while (0) 593#define __dma_free_remap(cpu_addr, size) do { } while (0)
584 594
585#endif /* CONFIG_MMU */ 595#endif /* CONFIG_MMU */
@@ -599,11 +609,13 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
599 609
600 610
601static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 611static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
602 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) 612 gfp_t gfp, pgprot_t prot, bool is_coherent,
613 struct dma_attrs *attrs, const void *caller)
603{ 614{
604 u64 mask = get_coherent_dma_mask(dev); 615 u64 mask = get_coherent_dma_mask(dev);
605 struct page *page = NULL; 616 struct page *page = NULL;
606 void *addr; 617 void *addr;
618 bool want_vaddr;
607 619
608#ifdef CONFIG_DMA_API_DEBUG 620#ifdef CONFIG_DMA_API_DEBUG
609 u64 limit = (mask + 1) & ~mask; 621 u64 limit = (mask + 1) & ~mask;
@@ -631,20 +643,21 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
631 643
632 *handle = DMA_ERROR_CODE; 644 *handle = DMA_ERROR_CODE;
633 size = PAGE_ALIGN(size); 645 size = PAGE_ALIGN(size);
646 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
634 647
635 if (is_coherent || nommu()) 648 if (is_coherent || nommu())
636 addr = __alloc_simple_buffer(dev, size, gfp, &page); 649 addr = __alloc_simple_buffer(dev, size, gfp, &page);
637 else if (!(gfp & __GFP_WAIT)) 650 else if (!(gfp & __GFP_WAIT))
638 addr = __alloc_from_pool(size, &page); 651 addr = __alloc_from_pool(size, &page);
639 else if (!dev_get_cma_area(dev)) 652 else if (!dev_get_cma_area(dev))
640 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 653 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
641 else 654 else
642 addr = __alloc_from_contiguous(dev, size, prot, &page, caller); 655 addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
643 656
644 if (addr) 657 if (page)
645 *handle = pfn_to_dma(dev, page_to_pfn(page)); 658 *handle = pfn_to_dma(dev, page_to_pfn(page));
646 659
647 return addr; 660 return want_vaddr ? addr : page;
648} 661}
649 662
650/* 663/*
@@ -661,7 +674,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
661 return memory; 674 return memory;
662 675
663 return __dma_alloc(dev, size, handle, gfp, prot, false, 676 return __dma_alloc(dev, size, handle, gfp, prot, false,
664 __builtin_return_address(0)); 677 attrs, __builtin_return_address(0));
665} 678}
666 679
667static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 680static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
@@ -674,7 +687,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
674 return memory; 687 return memory;
675 688
676 return __dma_alloc(dev, size, handle, gfp, prot, true, 689 return __dma_alloc(dev, size, handle, gfp, prot, true,
677 __builtin_return_address(0)); 690 attrs, __builtin_return_address(0));
678} 691}
679 692
680/* 693/*
@@ -715,6 +728,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
715 bool is_coherent) 728 bool is_coherent)
716{ 729{
717 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 730 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
731 bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
718 732
719 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 733 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
720 return; 734 return;
@@ -726,14 +740,15 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
726 } else if (__free_from_pool(cpu_addr, size)) { 740 } else if (__free_from_pool(cpu_addr, size)) {
727 return; 741 return;
728 } else if (!dev_get_cma_area(dev)) { 742 } else if (!dev_get_cma_area(dev)) {
729 __dma_free_remap(cpu_addr, size); 743 if (want_vaddr)
744 __dma_free_remap(cpu_addr, size);
730 __dma_free_buffer(page, size); 745 __dma_free_buffer(page, size);
731 } else { 746 } else {
732 /* 747 /*
733 * Non-atomic allocations cannot be freed with IRQs disabled 748 * Non-atomic allocations cannot be freed with IRQs disabled
734 */ 749 */
735 WARN_ON(irqs_disabled()); 750 WARN_ON(irqs_disabled());
736 __free_from_contiguous(dev, page, cpu_addr, size); 751 __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr);
737 } 752 }
738} 753}
739 754
@@ -1135,13 +1150,28 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1135 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1150 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1136 1151
1137 while (count) { 1152 while (count) {
1138 int j, order = __fls(count); 1153 int j, order;
1154
1155 for (order = __fls(count); order > 0; --order) {
1156 /*
1157 * We do not want OOM killer to be invoked as long
1158 * as we can fall back to single pages, so we force
1159 * __GFP_NORETRY for orders higher than zero.
1160 */
1161 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
1162 if (pages[i])
1163 break;
1164 }
1139 1165
1140 pages[i] = alloc_pages(gfp, order); 1166 if (!pages[i]) {
1141 while (!pages[i] && order) 1167 /*
1142 pages[i] = alloc_pages(gfp, --order); 1168 * Fall back to single page allocation.
1143 if (!pages[i]) 1169 * Might invoke OOM killer as last resort.
1144 goto error; 1170 */
1171 pages[i] = alloc_pages(gfp, 0);
1172 if (!pages[i])
1173 goto error;
1174 }
1145 1175
1146 if (order) { 1176 if (order) {
1147 split_page(pages[i], order); 1177 split_page(pages[i], order);
@@ -1206,7 +1236,7 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1206static dma_addr_t 1236static dma_addr_t
1207__iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 1237__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1208{ 1238{
1209 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1239 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1210 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1240 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1211 dma_addr_t dma_addr, iova; 1241 dma_addr_t dma_addr, iova;
1212 int i, ret = DMA_ERROR_CODE; 1242 int i, ret = DMA_ERROR_CODE;
@@ -1242,7 +1272,7 @@ fail:
1242 1272
1243static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 1273static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1244{ 1274{
1245 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1275 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1246 1276
1247 /* 1277 /*
1248 * add optional in-page offset from iova to size and align 1278 * add optional in-page offset from iova to size and align
@@ -1457,7 +1487,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1457 enum dma_data_direction dir, struct dma_attrs *attrs, 1487 enum dma_data_direction dir, struct dma_attrs *attrs,
1458 bool is_coherent) 1488 bool is_coherent)
1459{ 1489{
1460 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1490 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1461 dma_addr_t iova, iova_base; 1491 dma_addr_t iova, iova_base;
1462 int ret = 0; 1492 int ret = 0;
1463 unsigned int count; 1493 unsigned int count;
@@ -1678,7 +1708,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
1678 unsigned long offset, size_t size, enum dma_data_direction dir, 1708 unsigned long offset, size_t size, enum dma_data_direction dir,
1679 struct dma_attrs *attrs) 1709 struct dma_attrs *attrs)
1680{ 1710{
1681 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1711 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1682 dma_addr_t dma_addr; 1712 dma_addr_t dma_addr;
1683 int ret, prot, len = PAGE_ALIGN(size + offset); 1713 int ret, prot, len = PAGE_ALIGN(size + offset);
1684 1714
@@ -1731,7 +1761,7 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1731 size_t size, enum dma_data_direction dir, 1761 size_t size, enum dma_data_direction dir,
1732 struct dma_attrs *attrs) 1762 struct dma_attrs *attrs)
1733{ 1763{
1734 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1764 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1735 dma_addr_t iova = handle & PAGE_MASK; 1765 dma_addr_t iova = handle & PAGE_MASK;
1736 int offset = handle & ~PAGE_MASK; 1766 int offset = handle & ~PAGE_MASK;
1737 int len = PAGE_ALIGN(size + offset); 1767 int len = PAGE_ALIGN(size + offset);
@@ -1756,7 +1786,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1756 size_t size, enum dma_data_direction dir, 1786 size_t size, enum dma_data_direction dir,
1757 struct dma_attrs *attrs) 1787 struct dma_attrs *attrs)
1758{ 1788{
1759 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1789 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1760 dma_addr_t iova = handle & PAGE_MASK; 1790 dma_addr_t iova = handle & PAGE_MASK;
1761 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1791 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1762 int offset = handle & ~PAGE_MASK; 1792 int offset = handle & ~PAGE_MASK;
@@ -1775,7 +1805,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1775static void arm_iommu_sync_single_for_cpu(struct device *dev, 1805static void arm_iommu_sync_single_for_cpu(struct device *dev,
1776 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1806 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1777{ 1807{
1778 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1808 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1779 dma_addr_t iova = handle & PAGE_MASK; 1809 dma_addr_t iova = handle & PAGE_MASK;
1780 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1810 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1781 unsigned int offset = handle & ~PAGE_MASK; 1811 unsigned int offset = handle & ~PAGE_MASK;
@@ -1789,7 +1819,7 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
1789static void arm_iommu_sync_single_for_device(struct device *dev, 1819static void arm_iommu_sync_single_for_device(struct device *dev,
1790 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1820 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1791{ 1821{
1792 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1822 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1793 dma_addr_t iova = handle & PAGE_MASK; 1823 dma_addr_t iova = handle & PAGE_MASK;
1794 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1824 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1795 unsigned int offset = handle & ~PAGE_MASK; 1825 unsigned int offset = handle & ~PAGE_MASK;
@@ -1950,7 +1980,7 @@ static int __arm_iommu_attach_device(struct device *dev,
1950 return err; 1980 return err;
1951 1981
1952 kref_get(&mapping->kref); 1982 kref_get(&mapping->kref);
1953 dev->archdata.mapping = mapping; 1983 to_dma_iommu_mapping(dev) = mapping;
1954 1984
1955 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 1985 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1956 return 0; 1986 return 0;
@@ -1995,7 +2025,7 @@ static void __arm_iommu_detach_device(struct device *dev)
1995 2025
1996 iommu_detach_device(mapping->domain, dev); 2026 iommu_detach_device(mapping->domain, dev);
1997 kref_put(&mapping->kref, release_iommu_mapping); 2027 kref_put(&mapping->kref, release_iommu_mapping);
1998 dev->archdata.mapping = NULL; 2028 to_dma_iommu_mapping(dev) = NULL;
1999 2029
2000 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2030 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
2001} 2031}
@@ -2053,7 +2083,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2053 2083
2054static void arm_teardown_iommu_dma_ops(struct device *dev) 2084static void arm_teardown_iommu_dma_ops(struct device *dev)
2055{ 2085{
2056 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 2086 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2057 2087
2058 if (!mapping) 2088 if (!mapping)
2059 return; 2089 return;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3d0e9aed4b40..be92fa0f2f35 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -86,55 +86,6 @@ static int __init parse_tag_initrd2(const struct tag *tag)
86 86
87__tagtable(ATAG_INITRD2, parse_tag_initrd2); 87__tagtable(ATAG_INITRD2, parse_tag_initrd2);
88 88
89/*
90 * This keeps memory configuration data used by a couple memory
91 * initialization functions, as well as show_mem() for the skipping
92 * of holes in the memory map. It is populated by arm_add_memory().
93 */
94void show_mem(unsigned int filter)
95{
96 int free = 0, total = 0, reserved = 0;
97 int shared = 0, cached = 0, slab = 0;
98 struct memblock_region *reg;
99
100 printk("Mem-info:\n");
101 show_free_areas(filter);
102
103 for_each_memblock (memory, reg) {
104 unsigned int pfn1, pfn2;
105 struct page *page, *end;
106
107 pfn1 = memblock_region_memory_base_pfn(reg);
108 pfn2 = memblock_region_memory_end_pfn(reg);
109
110 page = pfn_to_page(pfn1);
111 end = pfn_to_page(pfn2 - 1) + 1;
112
113 do {
114 total++;
115 if (PageReserved(page))
116 reserved++;
117 else if (PageSwapCache(page))
118 cached++;
119 else if (PageSlab(page))
120 slab++;
121 else if (!page_count(page))
122 free++;
123 else
124 shared += page_count(page) - 1;
125 pfn1++;
126 page = pfn_to_page(pfn1);
127 } while (pfn1 < pfn2);
128 }
129
130 printk("%d pages of RAM\n", total);
131 printk("%d free pages\n", free);
132 printk("%d reserved pages\n", reserved);
133 printk("%d slab pages\n", slab);
134 printk("%d pages shared\n", shared);
135 printk("%d pages swap cached\n", cached);
136}
137
138static void __init find_limits(unsigned long *min, unsigned long *max_low, 89static void __init find_limits(unsigned long *min, unsigned long *max_low,
139 unsigned long *max_high) 90 unsigned long *max_high)
140{ 91{
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 86ee5d47ce3c..aa0519eed698 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -507,7 +507,7 @@ cpu_arm1020_name:
507 507
508 .align 508 .align
509 509
510 .section ".proc.info.init", #alloc, #execinstr 510 .section ".proc.info.init", #alloc
511 511
512 .type __arm1020_proc_info,#object 512 .type __arm1020_proc_info,#object
513__arm1020_proc_info: 513__arm1020_proc_info:
@@ -519,7 +519,7 @@ __arm1020_proc_info:
519 .long PMD_TYPE_SECT | \ 519 .long PMD_TYPE_SECT | \
520 PMD_SECT_AP_WRITE | \ 520 PMD_SECT_AP_WRITE | \
521 PMD_SECT_AP_READ 521 PMD_SECT_AP_READ
522 b __arm1020_setup 522 initfn __arm1020_setup, __arm1020_proc_info
523 .long cpu_arch_name 523 .long cpu_arch_name
524 .long cpu_elf_name 524 .long cpu_elf_name
525 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 525 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index a6331d78601f..bff4c7f70fd6 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -465,7 +465,7 @@ arm1020e_crval:
465 465
466 .align 466 .align
467 467
468 .section ".proc.info.init", #alloc, #execinstr 468 .section ".proc.info.init", #alloc
469 469
470 .type __arm1020e_proc_info,#object 470 .type __arm1020e_proc_info,#object
471__arm1020e_proc_info: 471__arm1020e_proc_info:
@@ -479,7 +479,7 @@ __arm1020e_proc_info:
479 PMD_BIT4 | \ 479 PMD_BIT4 | \
480 PMD_SECT_AP_WRITE | \ 480 PMD_SECT_AP_WRITE | \
481 PMD_SECT_AP_READ 481 PMD_SECT_AP_READ
482 b __arm1020e_setup 482 initfn __arm1020e_setup, __arm1020e_proc_info
483 .long cpu_arch_name 483 .long cpu_arch_name
484 .long cpu_elf_name 484 .long cpu_elf_name
485 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP 485 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index a126b7a59928..dbb2413fe04d 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -448,7 +448,7 @@ arm1022_crval:
448 448
449 .align 449 .align
450 450
451 .section ".proc.info.init", #alloc, #execinstr 451 .section ".proc.info.init", #alloc
452 452
453 .type __arm1022_proc_info,#object 453 .type __arm1022_proc_info,#object
454__arm1022_proc_info: 454__arm1022_proc_info:
@@ -462,7 +462,7 @@ __arm1022_proc_info:
462 PMD_BIT4 | \ 462 PMD_BIT4 | \
463 PMD_SECT_AP_WRITE | \ 463 PMD_SECT_AP_WRITE | \
464 PMD_SECT_AP_READ 464 PMD_SECT_AP_READ
465 b __arm1022_setup 465 initfn __arm1022_setup, __arm1022_proc_info
466 .long cpu_arch_name 466 .long cpu_arch_name
467 .long cpu_elf_name 467 .long cpu_elf_name
468 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP 468 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index fc294067e977..0b37b2cef9d3 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -442,7 +442,7 @@ arm1026_crval:
442 string cpu_arm1026_name, "ARM1026EJ-S" 442 string cpu_arm1026_name, "ARM1026EJ-S"
443 .align 443 .align
444 444
445 .section ".proc.info.init", #alloc, #execinstr 445 .section ".proc.info.init", #alloc
446 446
447 .type __arm1026_proc_info,#object 447 .type __arm1026_proc_info,#object
448__arm1026_proc_info: 448__arm1026_proc_info:
@@ -456,7 +456,7 @@ __arm1026_proc_info:
456 PMD_BIT4 | \ 456 PMD_BIT4 | \
457 PMD_SECT_AP_WRITE | \ 457 PMD_SECT_AP_WRITE | \
458 PMD_SECT_AP_READ 458 PMD_SECT_AP_READ
459 b __arm1026_setup 459 initfn __arm1026_setup, __arm1026_proc_info
460 .long cpu_arch_name 460 .long cpu_arch_name
461 .long cpu_elf_name 461 .long cpu_elf_name
462 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 462 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 2baa66b3ac9b..3651cd70e418 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -186,7 +186,7 @@ arm720_crval:
186 * See <asm/procinfo.h> for a definition of this structure. 186 * See <asm/procinfo.h> for a definition of this structure.
187 */ 187 */
188 188
189 .section ".proc.info.init", #alloc, #execinstr 189 .section ".proc.info.init", #alloc
190 190
191.macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req 191.macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req
192 .type __\name\()_proc_info,#object 192 .type __\name\()_proc_info,#object
@@ -203,7 +203,7 @@ __\name\()_proc_info:
203 PMD_BIT4 | \ 203 PMD_BIT4 | \
204 PMD_SECT_AP_WRITE | \ 204 PMD_SECT_AP_WRITE | \
205 PMD_SECT_AP_READ 205 PMD_SECT_AP_READ
206 b \cpu_flush @ cpu_flush 206 initfn \cpu_flush, __\name\()_proc_info @ cpu_flush
207 .long cpu_arch_name @ arch_name 207 .long cpu_arch_name @ arch_name
208 .long cpu_elf_name @ elf_name 208 .long cpu_elf_name @ elf_name
209 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap 209 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index ac1ea6b3bce4..024fb7732407 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -132,14 +132,14 @@ __arm740_setup:
132 132
133 .align 133 .align
134 134
135 .section ".proc.info.init", #alloc, #execinstr 135 .section ".proc.info.init", #alloc
136 .type __arm740_proc_info,#object 136 .type __arm740_proc_info,#object
137__arm740_proc_info: 137__arm740_proc_info:
138 .long 0x41807400 138 .long 0x41807400
139 .long 0xfffffff0 139 .long 0xfffffff0
140 .long 0 140 .long 0
141 .long 0 141 .long 0
142 b __arm740_setup 142 initfn __arm740_setup, __arm740_proc_info
143 .long cpu_arch_name 143 .long cpu_arch_name
144 .long cpu_elf_name 144 .long cpu_elf_name
145 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT 145 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index bf6ba4bc30ff..25472d94426d 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -76,7 +76,7 @@ __arm7tdmi_setup:
76 76
77 .align 77 .align
78 78
79 .section ".proc.info.init", #alloc, #execinstr 79 .section ".proc.info.init", #alloc
80 80
81.macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ 81.macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \
82 extra_hwcaps=0 82 extra_hwcaps=0
@@ -86,7 +86,7 @@ __\name\()_proc_info:
86 .long \cpu_mask 86 .long \cpu_mask
87 .long 0 87 .long 0
88 .long 0 88 .long 0
89 b __arm7tdmi_setup 89 initfn __arm7tdmi_setup, __\name\()_proc_info
90 .long cpu_arch_name 90 .long cpu_arch_name
91 .long cpu_elf_name 91 .long cpu_elf_name
92 .long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps ) 92 .long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps )
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 22bf8dde4f84..7a14bd4414c9 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -448,7 +448,7 @@ arm920_crval:
448 448
449 .align 449 .align
450 450
451 .section ".proc.info.init", #alloc, #execinstr 451 .section ".proc.info.init", #alloc
452 452
453 .type __arm920_proc_info,#object 453 .type __arm920_proc_info,#object
454__arm920_proc_info: 454__arm920_proc_info:
@@ -464,7 +464,7 @@ __arm920_proc_info:
464 PMD_BIT4 | \ 464 PMD_BIT4 | \
465 PMD_SECT_AP_WRITE | \ 465 PMD_SECT_AP_WRITE | \
466 PMD_SECT_AP_READ 466 PMD_SECT_AP_READ
467 b __arm920_setup 467 initfn __arm920_setup, __arm920_proc_info
468 .long cpu_arch_name 468 .long cpu_arch_name
469 .long cpu_elf_name 469 .long cpu_elf_name
470 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 470 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 0c6d5ac5a6d4..edccfcdcd551 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -426,7 +426,7 @@ arm922_crval:
426 426
427 .align 427 .align
428 428
429 .section ".proc.info.init", #alloc, #execinstr 429 .section ".proc.info.init", #alloc
430 430
431 .type __arm922_proc_info,#object 431 .type __arm922_proc_info,#object
432__arm922_proc_info: 432__arm922_proc_info:
@@ -442,7 +442,7 @@ __arm922_proc_info:
442 PMD_BIT4 | \ 442 PMD_BIT4 | \
443 PMD_SECT_AP_WRITE | \ 443 PMD_SECT_AP_WRITE | \
444 PMD_SECT_AP_READ 444 PMD_SECT_AP_READ
445 b __arm922_setup 445 initfn __arm922_setup, __arm922_proc_info
446 .long cpu_arch_name 446 .long cpu_arch_name
447 .long cpu_elf_name 447 .long cpu_elf_name
448 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 448 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index c32d073282ea..ede8c54ab4aa 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -494,7 +494,7 @@ arm925_crval:
494 494
495 .align 495 .align
496 496
497 .section ".proc.info.init", #alloc, #execinstr 497 .section ".proc.info.init", #alloc
498 498
499.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache 499.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
500 .type __\name\()_proc_info,#object 500 .type __\name\()_proc_info,#object
@@ -510,7 +510,7 @@ __\name\()_proc_info:
510 PMD_BIT4 | \ 510 PMD_BIT4 | \
511 PMD_SECT_AP_WRITE | \ 511 PMD_SECT_AP_WRITE | \
512 PMD_SECT_AP_READ 512 PMD_SECT_AP_READ
513 b __arm925_setup 513 initfn __arm925_setup, __\name\()_proc_info
514 .long cpu_arch_name 514 .long cpu_arch_name
515 .long cpu_elf_name 515 .long cpu_elf_name
516 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 516 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 252b2503038d..fb827c633693 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -474,7 +474,7 @@ arm926_crval:
474 474
475 .align 475 .align
476 476
477 .section ".proc.info.init", #alloc, #execinstr 477 .section ".proc.info.init", #alloc
478 478
479 .type __arm926_proc_info,#object 479 .type __arm926_proc_info,#object
480__arm926_proc_info: 480__arm926_proc_info:
@@ -490,7 +490,7 @@ __arm926_proc_info:
490 PMD_BIT4 | \ 490 PMD_BIT4 | \
491 PMD_SECT_AP_WRITE | \ 491 PMD_SECT_AP_WRITE | \
492 PMD_SECT_AP_READ 492 PMD_SECT_AP_READ
493 b __arm926_setup 493 initfn __arm926_setup, __arm926_proc_info
494 .long cpu_arch_name 494 .long cpu_arch_name
495 .long cpu_elf_name 495 .long cpu_elf_name
496 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 496 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index e5212d489377..ee5b66f847c4 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -297,26 +297,16 @@ __arm940_setup:
297 mcr p15, 0, r0, c6, c0, 1 297 mcr p15, 0, r0, c6, c0, 1
298 298
299 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM 299 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
300 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) 300 ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB)
301 mov r2, #10 @ 11 is the minimum (4KB) 301 pr_val r3, r0, r7, #1
3021: add r2, r2, #1 @ area size *= 2 302 mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM
303 mov r1, r1, lsr #1 303 mcr p15, 0, r3, c6, c1, 1
304 bne 1b @ count not zero r-shift
305 orr r0, r0, r2, lsl #1 @ the area register value
306 orr r0, r0, #1 @ set enable bit
307 mcr p15, 0, r0, c6, c1, 0 @ set area 1, RAM
308 mcr p15, 0, r0, c6, c1, 1
309 304
310 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH 305 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
311 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) 306 ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
312 mov r2, #10 @ 11 is the minimum (4KB) 307 pr_val r3, r0, r6, #1
3131: add r2, r2, #1 @ area size *= 2 308 mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH
314 mov r1, r1, lsr #1 309 mcr p15, 0, r3, c6, c2, 1
315 bne 1b @ count not zero r-shift
316 orr r0, r0, r2, lsl #1 @ the area register value
317 orr r0, r0, #1 @ set enable bit
318 mcr p15, 0, r0, c6, c2, 0 @ set area 2, ROM/FLASH
319 mcr p15, 0, r0, c6, c2, 1
320 310
321 mov r0, #0x06 311 mov r0, #0x06
322 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable 312 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
@@ -354,14 +344,14 @@ __arm940_setup:
354 344
355 .align 345 .align
356 346
357 .section ".proc.info.init", #alloc, #execinstr 347 .section ".proc.info.init", #alloc
358 348
359 .type __arm940_proc_info,#object 349 .type __arm940_proc_info,#object
360__arm940_proc_info: 350__arm940_proc_info:
361 .long 0x41009400 351 .long 0x41009400
362 .long 0xff00fff0 352 .long 0xff00fff0
363 .long 0 353 .long 0
364 b __arm940_setup 354 initfn __arm940_setup, __arm940_proc_info
365 .long cpu_arch_name 355 .long cpu_arch_name
366 .long cpu_elf_name 356 .long cpu_elf_name
367 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 357 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index b3dd9b2d0b8e..7361837edc31 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -343,24 +343,14 @@ __arm946_setup:
343 mcr p15, 0, r0, c6, c0, 0 @ set region 0, default 343 mcr p15, 0, r0, c6, c0, 0 @ set region 0, default
344 344
345 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM 345 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
346 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) 346 ldr r7, =CONFIG_DRAM_SIZE @ size of RAM (must be >= 4KB)
347 mov r2, #10 @ 11 is the minimum (4KB) 347 pr_val r3, r0, r7, #1
3481: add r2, r2, #1 @ area size *= 2 348 mcr p15, 0, r3, c6, c1, 0
349 mov r1, r1, lsr #1
350 bne 1b @ count not zero r-shift
351 orr r0, r0, r2, lsl #1 @ the region register value
352 orr r0, r0, #1 @ set enable bit
353 mcr p15, 0, r0, c6, c1, 0 @ set region 1, RAM
354 349
355 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH 350 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
356 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) 351 ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
357 mov r2, #10 @ 11 is the minimum (4KB) 352 pr_val r3, r0, r7, #1
3581: add r2, r2, #1 @ area size *= 2 353 mcr p15, 0, r3, c6, c2, 0
359 mov r1, r1, lsr #1
360 bne 1b @ count not zero r-shift
361 orr r0, r0, r2, lsl #1 @ the region register value
362 orr r0, r0, #1 @ set enable bit
363 mcr p15, 0, r0, c6, c2, 0 @ set region 2, ROM/FLASH
364 354
365 mov r0, #0x06 355 mov r0, #0x06
366 mcr p15, 0, r0, c2, c0, 0 @ region 1,2 d-cacheable 356 mcr p15, 0, r0, c2, c0, 0 @ region 1,2 d-cacheable
@@ -409,14 +399,14 @@ __arm946_setup:
409 399
410 .align 400 .align
411 401
412 .section ".proc.info.init", #alloc, #execinstr 402 .section ".proc.info.init", #alloc
413 .type __arm946_proc_info,#object 403 .type __arm946_proc_info,#object
414__arm946_proc_info: 404__arm946_proc_info:
415 .long 0x41009460 405 .long 0x41009460
416 .long 0xff00fff0 406 .long 0xff00fff0
417 .long 0 407 .long 0
418 .long 0 408 .long 0
419 b __arm946_setup 409 initfn __arm946_setup, __arm946_proc_info
420 .long cpu_arch_name 410 .long cpu_arch_name
421 .long cpu_elf_name 411 .long cpu_elf_name
422 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 412 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 8227322bbb8f..7fac8c612134 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -70,7 +70,7 @@ __arm9tdmi_setup:
70 70
71 .align 71 .align
72 72
73 .section ".proc.info.init", #alloc, #execinstr 73 .section ".proc.info.init", #alloc
74 74
75.macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req 75.macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
76 .type __\name\()_proc_info, #object 76 .type __\name\()_proc_info, #object
@@ -79,7 +79,7 @@ __\name\()_proc_info:
79 .long \cpu_mask 79 .long \cpu_mask
80 .long 0 80 .long 0
81 .long 0 81 .long 0
82 b __arm9tdmi_setup 82 initfn __arm9tdmi_setup, __\name\()_proc_info
83 .long cpu_arch_name 83 .long cpu_arch_name
84 .long cpu_elf_name 84 .long cpu_elf_name
85 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT 85 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index c494886892ba..4001b73af4ee 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -190,7 +190,7 @@ fa526_cr1_set:
190 190
191 .align 191 .align
192 192
193 .section ".proc.info.init", #alloc, #execinstr 193 .section ".proc.info.init", #alloc
194 194
195 .type __fa526_proc_info,#object 195 .type __fa526_proc_info,#object
196__fa526_proc_info: 196__fa526_proc_info:
@@ -206,7 +206,7 @@ __fa526_proc_info:
206 PMD_BIT4 | \ 206 PMD_BIT4 | \
207 PMD_SECT_AP_WRITE | \ 207 PMD_SECT_AP_WRITE | \
208 PMD_SECT_AP_READ 208 PMD_SECT_AP_READ
209 b __fa526_setup 209 initfn __fa526_setup, __fa526_proc_info
210 .long cpu_arch_name 210 .long cpu_arch_name
211 .long cpu_elf_name 211 .long cpu_elf_name
212 .long HWCAP_SWP | HWCAP_HALF 212 .long HWCAP_SWP | HWCAP_HALF
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 03a1b75f2e16..e494d6d6acbe 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -584,7 +584,7 @@ feroceon_crval:
584 584
585 .align 585 .align
586 586
587 .section ".proc.info.init", #alloc, #execinstr 587 .section ".proc.info.init", #alloc
588 588
589.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req 589.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
590 .type __\name\()_proc_info,#object 590 .type __\name\()_proc_info,#object
@@ -601,7 +601,8 @@ __\name\()_proc_info:
601 PMD_BIT4 | \ 601 PMD_BIT4 | \
602 PMD_SECT_AP_WRITE | \ 602 PMD_SECT_AP_WRITE | \
603 PMD_SECT_AP_READ 603 PMD_SECT_AP_READ
604 b __feroceon_setup 604 initfn __feroceon_setup, __\name\()_proc_info
605 .long __feroceon_setup
605 .long cpu_arch_name 606 .long cpu_arch_name
606 .long cpu_elf_name 607 .long cpu_elf_name
607 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 608 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 082b9f2f7e90..c671f345266a 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -331,3 +331,31 @@ ENTRY(\name\()_tlb_fns)
331 .globl \x 331 .globl \x
332 .equ \x, \y 332 .equ \x, \y
333.endm 333.endm
334
335.macro initfn, func, base
336 .long \func - \base
337.endm
338
339 /*
340 * Macro to calculate the log2 size for the protection region
341 * registers. This calculates rd = log2(size) - 1. tmp must
342 * not be the same register as rd.
343 */
344.macro pr_sz, rd, size, tmp
345 mov \tmp, \size, lsr #12
346 mov \rd, #11
3471: movs \tmp, \tmp, lsr #1
348 addne \rd, \rd, #1
349 bne 1b
350.endm
351
352 /*
353 * Macro to generate a protection region register value
354 * given a pre-masked address, size, and enable bit.
355 * Corrupts size.
356 */
357.macro pr_val, dest, addr, size, enable
358 pr_sz \dest, \size, \size @ calculate log2(size) - 1
359 orr \dest, \addr, \dest, lsl #1 @ mask in the region size
360 orr \dest, \dest, \enable
361.endm
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 53d393455f13..d65edf717bf7 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -427,7 +427,7 @@ mohawk_crval:
427 427
428 .align 428 .align
429 429
430 .section ".proc.info.init", #alloc, #execinstr 430 .section ".proc.info.init", #alloc
431 431
432 .type __88sv331x_proc_info,#object 432 .type __88sv331x_proc_info,#object
433__88sv331x_proc_info: 433__88sv331x_proc_info:
@@ -443,7 +443,7 @@ __88sv331x_proc_info:
443 PMD_BIT4 | \ 443 PMD_BIT4 | \
444 PMD_SECT_AP_WRITE | \ 444 PMD_SECT_AP_WRITE | \
445 PMD_SECT_AP_READ 445 PMD_SECT_AP_READ
446 b __mohawk_setup 446 initfn __mohawk_setup, __88sv331x_proc_info
447 .long cpu_arch_name 447 .long cpu_arch_name
448 .long cpu_elf_name 448 .long cpu_elf_name
449 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 449 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 8008a0461cf5..ee2ce496239f 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -199,7 +199,7 @@ sa110_crval:
199 199
200 .align 200 .align
201 201
202 .section ".proc.info.init", #alloc, #execinstr 202 .section ".proc.info.init", #alloc
203 203
204 .type __sa110_proc_info,#object 204 .type __sa110_proc_info,#object
205__sa110_proc_info: 205__sa110_proc_info:
@@ -213,7 +213,7 @@ __sa110_proc_info:
213 .long PMD_TYPE_SECT | \ 213 .long PMD_TYPE_SECT | \
214 PMD_SECT_AP_WRITE | \ 214 PMD_SECT_AP_WRITE | \
215 PMD_SECT_AP_READ 215 PMD_SECT_AP_READ
216 b __sa110_setup 216 initfn __sa110_setup, __sa110_proc_info
217 .long cpu_arch_name 217 .long cpu_arch_name
218 .long cpu_elf_name 218 .long cpu_elf_name
219 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT 219 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 89f97ac648a9..222d5836f666 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -242,7 +242,7 @@ sa1100_crval:
242 242
243 .align 243 .align
244 244
245 .section ".proc.info.init", #alloc, #execinstr 245 .section ".proc.info.init", #alloc
246 246
247.macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req 247.macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
248 .type __\name\()_proc_info,#object 248 .type __\name\()_proc_info,#object
@@ -257,7 +257,7 @@ __\name\()_proc_info:
257 .long PMD_TYPE_SECT | \ 257 .long PMD_TYPE_SECT | \
258 PMD_SECT_AP_WRITE | \ 258 PMD_SECT_AP_WRITE | \
259 PMD_SECT_AP_READ 259 PMD_SECT_AP_READ
260 b __sa1100_setup 260 initfn __sa1100_setup, __\name\()_proc_info
261 .long cpu_arch_name 261 .long cpu_arch_name
262 .long cpu_elf_name 262 .long cpu_elf_name
263 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT 263 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index d0390f4b3f18..06d890a2342b 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -264,7 +264,7 @@ v6_crval:
264 string cpu_elf_name, "v6" 264 string cpu_elf_name, "v6"
265 .align 265 .align
266 266
267 .section ".proc.info.init", #alloc, #execinstr 267 .section ".proc.info.init", #alloc
268 268
269 /* 269 /*
270 * Match any ARMv6 processor core. 270 * Match any ARMv6 processor core.
@@ -287,7 +287,7 @@ __v6_proc_info:
287 PMD_SECT_XN | \ 287 PMD_SECT_XN | \
288 PMD_SECT_AP_WRITE | \ 288 PMD_SECT_AP_WRITE | \
289 PMD_SECT_AP_READ 289 PMD_SECT_AP_READ
290 b __v6_setup 290 initfn __v6_setup, __v6_proc_info
291 .long cpu_arch_name 291 .long cpu_arch_name
292 .long cpu_elf_name 292 .long cpu_elf_name
293 /* See also feat_v6_fixup() for HWCAP_TLS */ 293 /* See also feat_v6_fixup() for HWCAP_TLS */
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index ed448d8a596b..10405b8d31af 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -37,15 +37,18 @@
37 * It is assumed that: 37 * It is assumed that:
38 * - we are not using split page tables 38 * - we are not using split page tables
39 */ 39 */
40ENTRY(cpu_v7_switch_mm) 40ENTRY(cpu_ca8_switch_mm)
41#ifdef CONFIG_MMU 41#ifdef CONFIG_MMU
42 mov r2, #0 42 mov r2, #0
43 mmid r1, r1 @ get mm->context.id
44 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
45 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
46#ifdef CONFIG_ARM_ERRATA_430973 43#ifdef CONFIG_ARM_ERRATA_430973
47 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 44 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
48#endif 45#endif
46#endif
47ENTRY(cpu_v7_switch_mm)
48#ifdef CONFIG_MMU
49 mmid r1, r1 @ get mm->context.id
50 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
51 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
49#ifdef CONFIG_PID_IN_CONTEXTIDR 52#ifdef CONFIG_PID_IN_CONTEXTIDR
50 mrc p15, 0, r2, c13, c0, 1 @ read current context ID 53 mrc p15, 0, r2, c13, c0, 1 @ read current context ID
51 lsr r2, r2, #8 @ extract the PID 54 lsr r2, r2, #8 @ extract the PID
@@ -61,6 +64,7 @@ ENTRY(cpu_v7_switch_mm)
61#endif 64#endif
62 bx lr 65 bx lr
63ENDPROC(cpu_v7_switch_mm) 66ENDPROC(cpu_v7_switch_mm)
67ENDPROC(cpu_ca8_switch_mm)
64 68
65/* 69/*
66 * cpu_v7_set_pte_ext(ptep, pte) 70 * cpu_v7_set_pte_ext(ptep, pte)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 8b4ee5e81c14..3d1054f11a8a 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -153,6 +153,21 @@ ENDPROC(cpu_v7_do_resume)
153#endif 153#endif
154 154
155/* 155/*
156 * Cortex-A8
157 */
158 globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
159 globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
160 globl_equ cpu_ca8_reset, cpu_v7_reset
161 globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
162 globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
163 globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
164 globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
165#ifdef CONFIG_ARM_CPU_SUSPEND
166 globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
167 globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
168#endif
169
170/*
156 * Cortex-A9 processor functions 171 * Cortex-A9 processor functions
157 */ 172 */
158 globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init 173 globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
@@ -451,7 +466,10 @@ __v7_setup_stack:
451 466
452 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 467 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
453 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 468 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
469#ifndef CONFIG_ARM_LPAE
470 define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
454 define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 471 define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
472#endif
455#ifdef CONFIG_CPU_PJ4B 473#ifdef CONFIG_CPU_PJ4B
456 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 474 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
457#endif 475#endif
@@ -462,19 +480,19 @@ __v7_setup_stack:
462 string cpu_elf_name, "v7" 480 string cpu_elf_name, "v7"
463 .align 481 .align
464 482
465 .section ".proc.info.init", #alloc, #execinstr 483 .section ".proc.info.init", #alloc
466 484
467 /* 485 /*
468 * Standard v7 proc info content 486 * Standard v7 proc info content
469 */ 487 */
470.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions 488.macro __v7_proc name, initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
471 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ 489 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
472 PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) 490 PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
473 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ 491 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
474 PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags) 492 PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags)
475 .long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \ 493 .long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \
476 PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags 494 PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags
477 W(b) \initfunc 495 initfn \initfunc, \name
478 .long cpu_arch_name 496 .long cpu_arch_name
479 .long cpu_elf_name 497 .long cpu_elf_name
480 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ 498 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
@@ -494,7 +512,7 @@ __v7_setup_stack:
494__v7_ca5mp_proc_info: 512__v7_ca5mp_proc_info:
495 .long 0x410fc050 513 .long 0x410fc050
496 .long 0xff0ffff0 514 .long 0xff0ffff0
497 __v7_proc __v7_ca5mp_setup 515 __v7_proc __v7_ca5mp_proc_info, __v7_ca5mp_setup
498 .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info 516 .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info
499 517
500 /* 518 /*
@@ -504,9 +522,19 @@ __v7_ca5mp_proc_info:
504__v7_ca9mp_proc_info: 522__v7_ca9mp_proc_info:
505 .long 0x410fc090 523 .long 0x410fc090
506 .long 0xff0ffff0 524 .long 0xff0ffff0
507 __v7_proc __v7_ca9mp_setup, proc_fns = ca9mp_processor_functions 525 __v7_proc __v7_ca9mp_proc_info, __v7_ca9mp_setup, proc_fns = ca9mp_processor_functions
508 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info 526 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
509 527
528 /*
529 * ARM Ltd. Cortex A8 processor.
530 */
531 .type __v7_ca8_proc_info, #object
532__v7_ca8_proc_info:
533 .long 0x410fc080
534 .long 0xff0ffff0
535 __v7_proc __v7_ca8_proc_info, __v7_setup, proc_fns = ca8_processor_functions
536 .size __v7_ca8_proc_info, . - __v7_ca8_proc_info
537
510#endif /* CONFIG_ARM_LPAE */ 538#endif /* CONFIG_ARM_LPAE */
511 539
512 /* 540 /*
@@ -517,7 +545,7 @@ __v7_ca9mp_proc_info:
517__v7_pj4b_proc_info: 545__v7_pj4b_proc_info:
518 .long 0x560f5800 546 .long 0x560f5800
519 .long 0xff0fff00 547 .long 0xff0fff00
520 __v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions 548 __v7_proc __v7_pj4b_proc_info, __v7_pj4b_setup, proc_fns = pj4b_processor_functions
521 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info 549 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
522#endif 550#endif
523 551
@@ -528,7 +556,7 @@ __v7_pj4b_proc_info:
528__v7_cr7mp_proc_info: 556__v7_cr7mp_proc_info:
529 .long 0x410fc170 557 .long 0x410fc170
530 .long 0xff0ffff0 558 .long 0xff0ffff0
531 __v7_proc __v7_cr7mp_setup 559 __v7_proc __v7_cr7mp_proc_info, __v7_cr7mp_setup
532 .size __v7_cr7mp_proc_info, . - __v7_cr7mp_proc_info 560 .size __v7_cr7mp_proc_info, . - __v7_cr7mp_proc_info
533 561
534 /* 562 /*
@@ -538,7 +566,7 @@ __v7_cr7mp_proc_info:
538__v7_ca7mp_proc_info: 566__v7_ca7mp_proc_info:
539 .long 0x410fc070 567 .long 0x410fc070
540 .long 0xff0ffff0 568 .long 0xff0ffff0
541 __v7_proc __v7_ca7mp_setup 569 __v7_proc __v7_ca7mp_proc_info, __v7_ca7mp_setup
542 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info 570 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
543 571
544 /* 572 /*
@@ -548,7 +576,7 @@ __v7_ca7mp_proc_info:
548__v7_ca12mp_proc_info: 576__v7_ca12mp_proc_info:
549 .long 0x410fc0d0 577 .long 0x410fc0d0
550 .long 0xff0ffff0 578 .long 0xff0ffff0
551 __v7_proc __v7_ca12mp_setup 579 __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup
552 .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info 580 .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
553 581
554 /* 582 /*
@@ -558,7 +586,7 @@ __v7_ca12mp_proc_info:
558__v7_ca15mp_proc_info: 586__v7_ca15mp_proc_info:
559 .long 0x410fc0f0 587 .long 0x410fc0f0
560 .long 0xff0ffff0 588 .long 0xff0ffff0
561 __v7_proc __v7_ca15mp_setup 589 __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup
562 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info 590 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
563 591
564 /* 592 /*
@@ -568,7 +596,7 @@ __v7_ca15mp_proc_info:
568__v7_b15mp_proc_info: 596__v7_b15mp_proc_info:
569 .long 0x420f00f0 597 .long 0x420f00f0
570 .long 0xff0ffff0 598 .long 0xff0ffff0
571 __v7_proc __v7_b15mp_setup 599 __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup
572 .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info 600 .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
573 601
574 /* 602 /*
@@ -578,7 +606,7 @@ __v7_b15mp_proc_info:
578__v7_ca17mp_proc_info: 606__v7_ca17mp_proc_info:
579 .long 0x410fc0e0 607 .long 0x410fc0e0
580 .long 0xff0ffff0 608 .long 0xff0ffff0
581 __v7_proc __v7_ca17mp_setup 609 __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup
582 .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info 610 .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
583 611
584 /* 612 /*
@@ -594,7 +622,7 @@ __krait_proc_info:
594 * do support them. They also don't indicate support for fused multiply 622 * do support them. They also don't indicate support for fused multiply
595 * instructions even though they actually do support them. 623 * instructions even though they actually do support them.
596 */ 624 */
597 __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4 625 __v7_proc __krait_proc_info, __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4
598 .size __krait_proc_info, . - __krait_proc_info 626 .size __krait_proc_info, . - __krait_proc_info
599 627
600 /* 628 /*
@@ -604,5 +632,5 @@ __krait_proc_info:
604__v7_proc_info: 632__v7_proc_info:
605 .long 0x000f0000 @ Required ID value 633 .long 0x000f0000 @ Required ID value
606 .long 0x000f0000 @ Mask for ID 634 .long 0x000f0000 @ Mask for ID
607 __v7_proc __v7_setup 635 __v7_proc __v7_proc_info, __v7_setup
608 .size __v7_proc_info, . - __v7_proc_info 636 .size __v7_proc_info, . - __v7_proc_info
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index d1e68b553d3b..e08e1f2bab76 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -135,7 +135,7 @@ __v7m_setup_stack_top:
135 string cpu_elf_name "v7m" 135 string cpu_elf_name "v7m"
136 string cpu_v7m_name "ARMv7-M" 136 string cpu_v7m_name "ARMv7-M"
137 137
138 .section ".proc.info.init", #alloc, #execinstr 138 .section ".proc.info.init", #alloc
139 139
140 /* 140 /*
141 * Match any ARMv7-M processor core. 141 * Match any ARMv7-M processor core.
@@ -146,7 +146,7 @@ __v7m_proc_info:
146 .long 0x000f0000 @ Mask for ID 146 .long 0x000f0000 @ Mask for ID
147 .long 0 @ proc_info_list.__cpu_mm_mmu_flags 147 .long 0 @ proc_info_list.__cpu_mm_mmu_flags
148 .long 0 @ proc_info_list.__cpu_io_mmu_flags 148 .long 0 @ proc_info_list.__cpu_io_mmu_flags
149 b __v7m_setup @ proc_info_list.__cpu_flush 149 initfn __v7m_setup, __v7m_proc_info @ proc_info_list.__cpu_flush
150 .long cpu_arch_name 150 .long cpu_arch_name
151 .long cpu_elf_name 151 .long cpu_elf_name
152 .long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT 152 .long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index f8acdfece036..293dcc2c441f 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -499,7 +499,7 @@ xsc3_crval:
499 499
500 .align 500 .align
501 501
502 .section ".proc.info.init", #alloc, #execinstr 502 .section ".proc.info.init", #alloc
503 503
504.macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req 504.macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req
505 .type __\name\()_proc_info,#object 505 .type __\name\()_proc_info,#object
@@ -514,7 +514,7 @@ __\name\()_proc_info:
514 .long PMD_TYPE_SECT | \ 514 .long PMD_TYPE_SECT | \
515 PMD_SECT_AP_WRITE | \ 515 PMD_SECT_AP_WRITE | \
516 PMD_SECT_AP_READ 516 PMD_SECT_AP_READ
517 b __xsc3_setup 517 initfn __xsc3_setup, __\name\()_proc_info
518 .long cpu_arch_name 518 .long cpu_arch_name
519 .long cpu_elf_name 519 .long cpu_elf_name
520 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 520 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index afa2b3c4df4a..b6bbfdb6dfdc 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -612,7 +612,7 @@ xscale_crval:
612 612
613 .align 613 .align
614 614
615 .section ".proc.info.init", #alloc, #execinstr 615 .section ".proc.info.init", #alloc
616 616
617.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache 617.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
618 .type __\name\()_proc_info,#object 618 .type __\name\()_proc_info,#object
@@ -627,7 +627,7 @@ __\name\()_proc_info:
627 .long PMD_TYPE_SECT | \ 627 .long PMD_TYPE_SECT | \
628 PMD_SECT_AP_WRITE | \ 628 PMD_SECT_AP_WRITE | \
629 PMD_SECT_AP_READ 629 PMD_SECT_AP_READ
630 b __xscale_setup 630 initfn __xscale_setup, __\name\()_proc_info
631 .long cpu_arch_name 631 .long cpu_arch_name
632 .long cpu_elf_name 632 .long cpu_elf_name
633 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 633 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S
index 5d65be1f1e8a..71df43547659 100644
--- a/arch/arm/nwfpe/entry.S
+++ b/arch/arm/nwfpe/entry.S
@@ -113,7 +113,7 @@ next:
113 @ to fault. Emit the appropriate exception gunk to fix things up. 113 @ to fault. Emit the appropriate exception gunk to fix things up.
114 @ ??? For some reason, faults can happen at .Lx2 even with a 114 @ ??? For some reason, faults can happen at .Lx2 even with a
115 @ plain LDR instruction. Weird, but it seems harmless. 115 @ plain LDR instruction. Weird, but it seems harmless.
116 .pushsection .fixup,"ax" 116 .pushsection .text.fixup,"ax"
117 .align 2 117 .align 2
118.Lfix: ret r9 @ let the user eat segfaults 118.Lfix: ret r9 @ let the user eat segfaults
119 .popsection 119 .popsection
diff --git a/arch/arm/vdso/.gitignore b/arch/arm/vdso/.gitignore
new file mode 100644
index 000000000000..f8b69d84238e
--- /dev/null
+++ b/arch/arm/vdso/.gitignore
@@ -0,0 +1 @@
vdso.lds
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
new file mode 100644
index 000000000000..bab0a8be7924
--- /dev/null
+++ b/arch/arm/vdso/Makefile
@@ -0,0 +1,74 @@
1hostprogs-y := vdsomunge
2
3obj-vdso := vgettimeofday.o datapage.o
4
5# Build rules
6targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.so.raw vdso.lds
7obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
8
9ccflags-y := -shared -fPIC -fno-common -fno-builtin -fno-stack-protector
10ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 -DDISABLE_BRANCH_PROFILING
11ccflags-y += -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
12
13obj-y += vdso.o
14extra-y += vdso.lds
15CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
16
17CFLAGS_REMOVE_vdso.o = -pg
18
19# Force -O2 to avoid libgcc dependencies
20CFLAGS_REMOVE_vgettimeofday.o = -pg -Os
21CFLAGS_vgettimeofday.o = -O2
22
23# Disable gcov profiling for VDSO code
24GCOV_PROFILE := n
25
26# Force dependency
27$(obj)/vdso.o : $(obj)/vdso.so
28
29# Link rule for the .so file
30$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
31 $(call if_changed,vdsold)
32
33$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/vdsomunge FORCE
34 $(call if_changed,vdsomunge)
35
36# Strip rule for the .so file
37$(obj)/%.so: OBJCOPYFLAGS := -S
38$(obj)/%.so: $(obj)/%.so.dbg FORCE
39 $(call if_changed,objcopy)
40
41# Actual build commands
42quiet_cmd_vdsold = VDSO $@
43 cmd_vdsold = $(CC) $(c_flags) -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) \
44 $(call cc-ldoption, -Wl$(comma)--build-id) \
45 -Wl,-Bsymbolic -Wl,-z,max-page-size=4096 \
46 -Wl,-z,common-page-size=4096 -o $@
47
48quiet_cmd_vdsomunge = MUNGE $@
49 cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@
50
51#
52# Install the unstripped copy of vdso.so.dbg. If our toolchain
53# supports build-id, install .build-id links as well.
54#
55# Cribbed from arch/x86/vdso/Makefile.
56#
57quiet_cmd_vdso_install = INSTALL $<
58define cmd_vdso_install
59 cp $< "$(MODLIB)/vdso/vdso.so"; \
60 if readelf -n $< | grep -q 'Build ID'; then \
61 buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
62 first=`echo $$buildid | cut -b-2`; \
63 last=`echo $$buildid | cut -b3-`; \
64 mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
65 ln -sf "../../vdso.so" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
66 fi
67endef
68
69$(MODLIB)/vdso: FORCE
70 @mkdir -p $(MODLIB)/vdso
71
72PHONY += vdso_install
73vdso_install: $(obj)/vdso.so.dbg $(MODLIB)/vdso FORCE
74 $(call cmd,vdso_install)
diff --git a/arch/arm/vdso/datapage.S b/arch/arm/vdso/datapage.S
new file mode 100644
index 000000000000..a2e60367931b
--- /dev/null
+++ b/arch/arm/vdso/datapage.S
@@ -0,0 +1,15 @@
1#include <linux/linkage.h>
2#include <asm/asm-offsets.h>
3
4 .align 2
5.L_vdso_data_ptr:
6 .long _start - . - VDSO_DATA_SIZE
7
8ENTRY(__get_datapage)
9 .fnstart
10 adr r0, .L_vdso_data_ptr
11 ldr r1, [r0]
12 add r0, r0, r1
13 bx lr
14 .fnend
15ENDPROC(__get_datapage)
diff --git a/arch/arm/vdso/vdso.S b/arch/arm/vdso/vdso.S
new file mode 100644
index 000000000000..b2b97e3e7bab
--- /dev/null
+++ b/arch/arm/vdso/vdso.S
@@ -0,0 +1,35 @@
1/*
2 * Adapted from arm64 version.
3 *
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23#include <linux/const.h>
24#include <asm/page.h>
25
26 __PAGE_ALIGNED_DATA
27
28 .globl vdso_start, vdso_end
29 .balign PAGE_SIZE
30vdso_start:
31 .incbin "arch/arm/vdso/vdso.so"
32 .balign PAGE_SIZE
33vdso_end:
34
35 .previous
diff --git a/arch/arm/vdso/vdso.lds.S b/arch/arm/vdso/vdso.lds.S
new file mode 100644
index 000000000000..89ca89f12d23
--- /dev/null
+++ b/arch/arm/vdso/vdso.lds.S
@@ -0,0 +1,87 @@
1/*
2 * Adapted from arm64 version.
3 *
4 * GNU linker script for the VDSO library.
5 *
6 * Copyright (C) 2012 ARM Limited
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Author: Will Deacon <will.deacon@arm.com>
21 * Heavily based on the vDSO linker scripts for other archs.
22 */
23
24#include <linux/const.h>
25#include <asm/page.h>
26#include <asm/vdso.h>
27
28OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm")
29OUTPUT_ARCH(arm)
30
31SECTIONS
32{
33 PROVIDE(_start = .);
34
35 . = SIZEOF_HEADERS;
36
37 .hash : { *(.hash) } :text
38 .gnu.hash : { *(.gnu.hash) }
39 .dynsym : { *(.dynsym) }
40 .dynstr : { *(.dynstr) }
41 .gnu.version : { *(.gnu.version) }
42 .gnu.version_d : { *(.gnu.version_d) }
43 .gnu.version_r : { *(.gnu.version_r) }
44
45 .note : { *(.note.*) } :text :note
46
47
48 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
49 .eh_frame : { KEEP (*(.eh_frame)) } :text
50
51 .dynamic : { *(.dynamic) } :text :dynamic
52
53 .rodata : { *(.rodata*) } :text
54
55 .text : { *(.text*) } :text =0xe7f001f2
56
57 .got : { *(.got) }
58 .rel.plt : { *(.rel.plt) }
59
60 /DISCARD/ : {
61 *(.note.GNU-stack)
62 *(.data .data.* .gnu.linkonce.d.* .sdata*)
63 *(.bss .sbss .dynbss .dynsbss)
64 }
65}
66
67/*
68 * We must supply the ELF program headers explicitly to get just one
69 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
70 */
71PHDRS
72{
73 text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
74 dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
75 note PT_NOTE FLAGS(4); /* PF_R */
76 eh_frame_hdr PT_GNU_EH_FRAME;
77}
78
79VERSION
80{
81 LINUX_2.6 {
82 global:
83 __vdso_clock_gettime;
84 __vdso_gettimeofday;
85 local: *;
86 };
87}
diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c
new file mode 100644
index 000000000000..9005b07296c8
--- /dev/null
+++ b/arch/arm/vdso/vdsomunge.c
@@ -0,0 +1,201 @@
1/*
2 * Copyright 2015 Mentor Graphics Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; version 2 of the
7 * License.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 *
18 * vdsomunge - Host program which produces a shared object
19 * architecturally specified to be usable by both soft- and hard-float
20 * programs.
21 *
22 * The Procedure Call Standard for the ARM Architecture (ARM IHI
23 * 0042E) says:
24 *
25 * 6.4.1 VFP and Base Standard Compatibility
26 *
27 * Code compiled for the VFP calling standard is compatible with
28 * the base standard (and vice-versa) if no floating-point or
29 * containerized vector arguments or results are used.
30 *
31 * And ELF for the ARM Architecture (ARM IHI 0044E) (Table 4-2) says:
32 *
33 * If both EF_ARM_ABI_FLOAT_XXXX bits are clear, conformance to the
34 * base procedure-call standard is implied.
35 *
36 * The VDSO is built with -msoft-float, as with the rest of the ARM
37 * kernel, and uses no floating point arguments or results. The build
38 * process will produce a shared object that may or may not have the
39 * EF_ARM_ABI_FLOAT_SOFT flag set (it seems to depend on the binutils
40 * version; binutils starting with 2.24 appears to set it). The
41 * EF_ARM_ABI_FLOAT_HARD flag should definitely not be set, and this
42 * program will error out if it is.
43 *
44 * If the soft-float flag is set, this program clears it. That's all
45 * it does.
46 */
47
48#define _GNU_SOURCE
49
50#include <byteswap.h>
51#include <elf.h>
52#include <errno.h>
53#include <error.h>
54#include <fcntl.h>
55#include <stdbool.h>
56#include <stdio.h>
57#include <stdlib.h>
58#include <string.h>
59#include <sys/mman.h>
60#include <sys/stat.h>
61#include <sys/types.h>
62#include <unistd.h>
63
64#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
65#define HOST_ORDER ELFDATA2LSB
66#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
67#define HOST_ORDER ELFDATA2MSB
68#endif
69
70/* Some of the ELF constants we'd like to use were added to <elf.h>
71 * relatively recently.
72 */
73#ifndef EF_ARM_EABI_VER5
74#define EF_ARM_EABI_VER5 0x05000000
75#endif
76
77#ifndef EF_ARM_ABI_FLOAT_SOFT
78#define EF_ARM_ABI_FLOAT_SOFT 0x200
79#endif
80
81#ifndef EF_ARM_ABI_FLOAT_HARD
82#define EF_ARM_ABI_FLOAT_HARD 0x400
83#endif
84
85static const char *outfile;
86
87static void cleanup(void)
88{
89 if (error_message_count > 0 && outfile != NULL)
90 unlink(outfile);
91}
92
93static Elf32_Word read_elf_word(Elf32_Word word, bool swap)
94{
95 return swap ? bswap_32(word) : word;
96}
97
98static Elf32_Half read_elf_half(Elf32_Half half, bool swap)
99{
100 return swap ? bswap_16(half) : half;
101}
102
103static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap)
104{
105 *dst = swap ? bswap_32(val) : val;
106}
107
108int main(int argc, char **argv)
109{
110 const Elf32_Ehdr *inhdr;
111 bool clear_soft_float;
112 const char *infile;
113 Elf32_Word e_flags;
114 const void *inbuf;
115 struct stat stat;
116 void *outbuf;
117 bool swap;
118 int outfd;
119 int infd;
120
121 atexit(cleanup);
122
123 if (argc != 3)
124 error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]);
125
126 infile = argv[1];
127 outfile = argv[2];
128
129 infd = open(infile, O_RDONLY);
130 if (infd < 0)
131 error(EXIT_FAILURE, errno, "Cannot open %s", infile);
132
133 if (fstat(infd, &stat) != 0)
134 error(EXIT_FAILURE, errno, "Failed stat for %s", infile);
135
136 inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
137 if (inbuf == MAP_FAILED)
138 error(EXIT_FAILURE, errno, "Failed to map %s", infile);
139
140 close(infd);
141
142 inhdr = inbuf;
143
144 if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
145 error(EXIT_FAILURE, 0, "Not an ELF file");
146
147 if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
148 error(EXIT_FAILURE, 0, "Unsupported ELF class");
149
150 swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
151
152 if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
153 error(EXIT_FAILURE, 0, "Not a shared object");
154
155 if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) {
156 error(EXIT_FAILURE, 0, "Unsupported architecture %#x",
157 inhdr->e_machine);
158 }
159
160 e_flags = read_elf_word(inhdr->e_flags, swap);
161
162 if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
163 error(EXIT_FAILURE, 0, "Unsupported EABI version %#x",
164 EF_ARM_EABI_VERSION(e_flags));
165 }
166
167 if (e_flags & EF_ARM_ABI_FLOAT_HARD)
168 error(EXIT_FAILURE, 0,
169 "Unexpected hard-float flag set in e_flags");
170
171 clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
172
173 outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
174 if (outfd < 0)
175 error(EXIT_FAILURE, errno, "Cannot open %s", outfile);
176
177 if (ftruncate(outfd, stat.st_size) != 0)
178 error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile);
179
180 outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
181 outfd, 0);
182 if (outbuf == MAP_FAILED)
183 error(EXIT_FAILURE, errno, "Failed to map %s", outfile);
184
185 close(outfd);
186
187 memcpy(outbuf, inbuf, stat.st_size);
188
189 if (clear_soft_float) {
190 Elf32_Ehdr *outhdr;
191
192 outhdr = outbuf;
193 e_flags &= ~EF_ARM_ABI_FLOAT_SOFT;
194 write_elf_word(e_flags, &outhdr->e_flags, swap);
195 }
196
197 if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
198 error(EXIT_FAILURE, errno, "Failed to sync %s", outfile);
199
200 return EXIT_SUCCESS;
201}
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
new file mode 100644
index 000000000000..79214d5ff097
--- /dev/null
+++ b/arch/arm/vdso/vgettimeofday.c
@@ -0,0 +1,282 @@
1/*
2 * Copyright 2015 Mentor Graphics Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; version 2 of the
7 * License.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/compiler.h>
19#include <linux/hrtimer.h>
20#include <linux/time.h>
21#include <asm/arch_timer.h>
22#include <asm/barrier.h>
23#include <asm/bug.h>
24#include <asm/page.h>
25#include <asm/unistd.h>
26#include <asm/vdso_datapage.h>
27
28#ifndef CONFIG_AEABI
29#error This code depends on AEABI system call conventions
30#endif
31
32extern struct vdso_data *__get_datapage(void);
33
34static notrace u32 __vdso_read_begin(const struct vdso_data *vdata)
35{
36 u32 seq;
37repeat:
38 seq = ACCESS_ONCE(vdata->seq_count);
39 if (seq & 1) {
40 cpu_relax();
41 goto repeat;
42 }
43 return seq;
44}
45
46static notrace u32 vdso_read_begin(const struct vdso_data *vdata)
47{
48 u32 seq;
49
50 seq = __vdso_read_begin(vdata);
51
52 smp_rmb(); /* Pairs with smp_wmb in vdso_write_end */
53 return seq;
54}
55
56static notrace int vdso_read_retry(const struct vdso_data *vdata, u32 start)
57{
58 smp_rmb(); /* Pairs with smp_wmb in vdso_write_begin */
59 return vdata->seq_count != start;
60}
61
62static notrace long clock_gettime_fallback(clockid_t _clkid,
63 struct timespec *_ts)
64{
65 register struct timespec *ts asm("r1") = _ts;
66 register clockid_t clkid asm("r0") = _clkid;
67 register long ret asm ("r0");
68 register long nr asm("r7") = __NR_clock_gettime;
69
70 asm volatile(
71 " swi #0\n"
72 : "=r" (ret)
73 : "r" (clkid), "r" (ts), "r" (nr)
74 : "memory");
75
76 return ret;
77}
78
79static notrace int do_realtime_coarse(struct timespec *ts,
80 struct vdso_data *vdata)
81{
82 u32 seq;
83
84 do {
85 seq = vdso_read_begin(vdata);
86
87 ts->tv_sec = vdata->xtime_coarse_sec;
88 ts->tv_nsec = vdata->xtime_coarse_nsec;
89
90 } while (vdso_read_retry(vdata, seq));
91
92 return 0;
93}
94
95static notrace int do_monotonic_coarse(struct timespec *ts,
96 struct vdso_data *vdata)
97{
98 struct timespec tomono;
99 u32 seq;
100
101 do {
102 seq = vdso_read_begin(vdata);
103
104 ts->tv_sec = vdata->xtime_coarse_sec;
105 ts->tv_nsec = vdata->xtime_coarse_nsec;
106
107 tomono.tv_sec = vdata->wtm_clock_sec;
108 tomono.tv_nsec = vdata->wtm_clock_nsec;
109
110 } while (vdso_read_retry(vdata, seq));
111
112 ts->tv_sec += tomono.tv_sec;
113 timespec_add_ns(ts, tomono.tv_nsec);
114
115 return 0;
116}
117
118#ifdef CONFIG_ARM_ARCH_TIMER
119
120static notrace u64 get_ns(struct vdso_data *vdata)
121{
122 u64 cycle_delta;
123 u64 cycle_now;
124 u64 nsec;
125
126 cycle_now = arch_counter_get_cntvct();
127
128 cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
129
130 nsec = (cycle_delta * vdata->cs_mult) + vdata->xtime_clock_snsec;
131 nsec >>= vdata->cs_shift;
132
133 return nsec;
134}
135
136static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata)
137{
138 u64 nsecs;
139 u32 seq;
140
141 do {
142 seq = vdso_read_begin(vdata);
143
144 if (!vdata->tk_is_cntvct)
145 return -1;
146
147 ts->tv_sec = vdata->xtime_clock_sec;
148 nsecs = get_ns(vdata);
149
150 } while (vdso_read_retry(vdata, seq));
151
152 ts->tv_nsec = 0;
153 timespec_add_ns(ts, nsecs);
154
155 return 0;
156}
157
158static notrace int do_monotonic(struct timespec *ts, struct vdso_data *vdata)
159{
160 struct timespec tomono;
161 u64 nsecs;
162 u32 seq;
163
164 do {
165 seq = vdso_read_begin(vdata);
166
167 if (!vdata->tk_is_cntvct)
168 return -1;
169
170 ts->tv_sec = vdata->xtime_clock_sec;
171 nsecs = get_ns(vdata);
172
173 tomono.tv_sec = vdata->wtm_clock_sec;
174 tomono.tv_nsec = vdata->wtm_clock_nsec;
175
176 } while (vdso_read_retry(vdata, seq));
177
178 ts->tv_sec += tomono.tv_sec;
179 ts->tv_nsec = 0;
180 timespec_add_ns(ts, nsecs + tomono.tv_nsec);
181
182 return 0;
183}
184
185#else /* CONFIG_ARM_ARCH_TIMER */
186
187static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata)
188{
189 return -1;
190}
191
192static notrace int do_monotonic(struct timespec *ts, struct vdso_data *vdata)
193{
194 return -1;
195}
196
197#endif /* CONFIG_ARM_ARCH_TIMER */
198
199notrace int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
200{
201 struct vdso_data *vdata;
202 int ret = -1;
203
204 vdata = __get_datapage();
205
206 switch (clkid) {
207 case CLOCK_REALTIME_COARSE:
208 ret = do_realtime_coarse(ts, vdata);
209 break;
210 case CLOCK_MONOTONIC_COARSE:
211 ret = do_monotonic_coarse(ts, vdata);
212 break;
213 case CLOCK_REALTIME:
214 ret = do_realtime(ts, vdata);
215 break;
216 case CLOCK_MONOTONIC:
217 ret = do_monotonic(ts, vdata);
218 break;
219 default:
220 break;
221 }
222
223 if (ret)
224 ret = clock_gettime_fallback(clkid, ts);
225
226 return ret;
227}
228
229static notrace long gettimeofday_fallback(struct timeval *_tv,
230 struct timezone *_tz)
231{
232 register struct timezone *tz asm("r1") = _tz;
233 register struct timeval *tv asm("r0") = _tv;
234 register long ret asm ("r0");
235 register long nr asm("r7") = __NR_gettimeofday;
236
237 asm volatile(
238 " swi #0\n"
239 : "=r" (ret)
240 : "r" (tv), "r" (tz), "r" (nr)
241 : "memory");
242
243 return ret;
244}
245
246notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
247{
248 struct timespec ts;
249 struct vdso_data *vdata;
250 int ret;
251
252 vdata = __get_datapage();
253
254 ret = do_realtime(&ts, vdata);
255 if (ret)
256 return gettimeofday_fallback(tv, tz);
257
258 if (tv) {
259 tv->tv_sec = ts.tv_sec;
260 tv->tv_usec = ts.tv_nsec / 1000;
261 }
262 if (tz) {
263 tz->tz_minuteswest = vdata->tz_minuteswest;
264 tz->tz_dsttime = vdata->tz_dsttime;
265 }
266
267 return ret;
268}
269
270/* Avoid unresolved references emitted by GCC */
271
272void __aeabi_unwind_cpp_pr0(void)
273{
274}
275
276void __aeabi_unwind_cpp_pr1(void)
277{
278}
279
280void __aeabi_unwind_cpp_pr2(void)
281{
282}
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index c6dc3548e5d1..b0b688c481e8 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -25,49 +25,50 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/of.h>
28 29
29#include <soc/tegra/ahb.h> 30#include <soc/tegra/ahb.h>
30 31
31#define DRV_NAME "tegra-ahb" 32#define DRV_NAME "tegra-ahb"
32 33
33#define AHB_ARBITRATION_DISABLE 0x00 34#define AHB_ARBITRATION_DISABLE 0x04
34#define AHB_ARBITRATION_PRIORITY_CTRL 0x04 35#define AHB_ARBITRATION_PRIORITY_CTRL 0x08
35#define AHB_PRIORITY_WEIGHT(x) (((x) & 0x7) << 29) 36#define AHB_PRIORITY_WEIGHT(x) (((x) & 0x7) << 29)
36#define PRIORITY_SELECT_USB BIT(6) 37#define PRIORITY_SELECT_USB BIT(6)
37#define PRIORITY_SELECT_USB2 BIT(18) 38#define PRIORITY_SELECT_USB2 BIT(18)
38#define PRIORITY_SELECT_USB3 BIT(17) 39#define PRIORITY_SELECT_USB3 BIT(17)
39 40
40#define AHB_GIZMO_AHB_MEM 0x0c 41#define AHB_GIZMO_AHB_MEM 0x10
41#define ENB_FAST_REARBITRATE BIT(2) 42#define ENB_FAST_REARBITRATE BIT(2)
42#define DONT_SPLIT_AHB_WR BIT(7) 43#define DONT_SPLIT_AHB_WR BIT(7)
43 44
44#define AHB_GIZMO_APB_DMA 0x10 45#define AHB_GIZMO_APB_DMA 0x14
45#define AHB_GIZMO_IDE 0x18 46#define AHB_GIZMO_IDE 0x1c
46#define AHB_GIZMO_USB 0x1c 47#define AHB_GIZMO_USB 0x20
47#define AHB_GIZMO_AHB_XBAR_BRIDGE 0x20 48#define AHB_GIZMO_AHB_XBAR_BRIDGE 0x24
48#define AHB_GIZMO_CPU_AHB_BRIDGE 0x24 49#define AHB_GIZMO_CPU_AHB_BRIDGE 0x28
49#define AHB_GIZMO_COP_AHB_BRIDGE 0x28 50#define AHB_GIZMO_COP_AHB_BRIDGE 0x2c
50#define AHB_GIZMO_XBAR_APB_CTLR 0x2c 51#define AHB_GIZMO_XBAR_APB_CTLR 0x30
51#define AHB_GIZMO_VCP_AHB_BRIDGE 0x30 52#define AHB_GIZMO_VCP_AHB_BRIDGE 0x34
52#define AHB_GIZMO_NAND 0x3c 53#define AHB_GIZMO_NAND 0x40
53#define AHB_GIZMO_SDMMC4 0x44 54#define AHB_GIZMO_SDMMC4 0x48
54#define AHB_GIZMO_XIO 0x48 55#define AHB_GIZMO_XIO 0x4c
55#define AHB_GIZMO_BSEV 0x60 56#define AHB_GIZMO_BSEV 0x64
56#define AHB_GIZMO_BSEA 0x70 57#define AHB_GIZMO_BSEA 0x74
57#define AHB_GIZMO_NOR 0x74 58#define AHB_GIZMO_NOR 0x78
58#define AHB_GIZMO_USB2 0x78 59#define AHB_GIZMO_USB2 0x7c
59#define AHB_GIZMO_USB3 0x7c 60#define AHB_GIZMO_USB3 0x80
60#define IMMEDIATE BIT(18) 61#define IMMEDIATE BIT(18)
61 62
62#define AHB_GIZMO_SDMMC1 0x80 63#define AHB_GIZMO_SDMMC1 0x84
63#define AHB_GIZMO_SDMMC2 0x84 64#define AHB_GIZMO_SDMMC2 0x88
64#define AHB_GIZMO_SDMMC3 0x88 65#define AHB_GIZMO_SDMMC3 0x8c
65#define AHB_MEM_PREFETCH_CFG_X 0xd8 66#define AHB_MEM_PREFETCH_CFG_X 0xdc
66#define AHB_ARBITRATION_XBAR_CTRL 0xdc 67#define AHB_ARBITRATION_XBAR_CTRL 0xe0
67#define AHB_MEM_PREFETCH_CFG3 0xe0 68#define AHB_MEM_PREFETCH_CFG3 0xe4
68#define AHB_MEM_PREFETCH_CFG4 0xe4 69#define AHB_MEM_PREFETCH_CFG4 0xe8
69#define AHB_MEM_PREFETCH_CFG1 0xec 70#define AHB_MEM_PREFETCH_CFG1 0xf0
70#define AHB_MEM_PREFETCH_CFG2 0xf0 71#define AHB_MEM_PREFETCH_CFG2 0xf4
71#define PREFETCH_ENB BIT(31) 72#define PREFETCH_ENB BIT(31)
72#define MST_ID(x) (((x) & 0x1f) << 26) 73#define MST_ID(x) (((x) & 0x1f) << 26)
73#define AHBDMA_MST_ID MST_ID(5) 74#define AHBDMA_MST_ID MST_ID(5)
@@ -77,10 +78,20 @@
77#define ADDR_BNDRY(x) (((x) & 0xf) << 21) 78#define ADDR_BNDRY(x) (((x) & 0xf) << 21)
78#define INACTIVITY_TIMEOUT(x) (((x) & 0xffff) << 0) 79#define INACTIVITY_TIMEOUT(x) (((x) & 0xffff) << 0)
79 80
80#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID 0xf8 81#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID 0xfc
81 82
82#define AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE BIT(17) 83#define AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE BIT(17)
83 84
85/*
86 * INCORRECT_BASE_ADDR_LOW_BYTE: Legacy kernel DT files for Tegra SoCs
87 * prior to Tegra124 generally use a physical base address ending in
88 * 0x4 for the AHB IP block. According to the TRM, the low byte
89 * should be 0x0. During device probing, this macro is used to detect
90 * whether the passed-in physical address is incorrect, and if so, to
91 * correct it.
92 */
93#define INCORRECT_BASE_ADDR_LOW_BYTE 0x4
94
84static struct platform_driver tegra_ahb_driver; 95static struct platform_driver tegra_ahb_driver;
85 96
86static const u32 tegra_ahb_gizmo[] = { 97static const u32 tegra_ahb_gizmo[] = {
@@ -257,6 +268,15 @@ static int tegra_ahb_probe(struct platform_device *pdev)
257 return -ENOMEM; 268 return -ENOMEM;
258 269
259 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 270 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
271
272 /* Correct the IP block base address if necessary */
273 if (res &&
274 (res->start & INCORRECT_BASE_ADDR_LOW_BYTE) ==
275 INCORRECT_BASE_ADDR_LOW_BYTE) {
276 dev_warn(&pdev->dev, "incorrect AHB base address in DT data - enabling workaround\n");
277 res->start -= INCORRECT_BASE_ADDR_LOW_BYTE;
278 }
279
260 ahb->regs = devm_ioremap_resource(&pdev->dev, res); 280 ahb->regs = devm_ioremap_resource(&pdev->dev, res);
261 if (IS_ERR(ahb->regs)) 281 if (IS_ERR(ahb->regs))
262 return PTR_ERR(ahb->regs); 282 return PTR_ERR(ahb->regs);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 392c74c40056..5c48c58514e5 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -405,7 +405,7 @@
405#define TEXT_TEXT \ 405#define TEXT_TEXT \
406 ALIGN_FUNCTION(); \ 406 ALIGN_FUNCTION(); \
407 *(.text.hot) \ 407 *(.text.hot) \
408 *(.text) \ 408 *(.text .text.fixup) \
409 *(.ref.text) \ 409 *(.ref.text) \
410 MEM_KEEP(init.text) \ 410 MEM_KEEP(init.text) \
411 MEM_KEEP(exit.text) \ 411 MEM_KEEP(exit.text) \