aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-05 13:05:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-05 13:05:29 -0400
commitc489d98c8c81a898cfed6bec193cca2006f956aa (patch)
tree4cc9b571c9bb2380e6b11828cc843f3ceeb5dcf4
parentf67d251a87ccb288a3a164c5226c6ee9ce8ea53d (diff)
parentf15bdfe4fb264ac30d9c176f898cbd52cfd1ffa9 (diff)
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "Included in this update: - perf updates from Will Deacon: The main changes are callchain stability fixes from Jean Pihet and event mapping and PMU name rework from Mark Rutland The latter is preparatory work for enabling some code re-use with arm64 in the future. - updates for nommu from Uwe Kleine-König: Two different fixes for the same problem making some ARM nommu configurations not boot since 3.6-rc1. The problem is that user_addr_max returned the biggest available RAM address which makes some copy_from_user variants fail to read from XIP memory. - deprecate legacy OMAP DMA API, in preparation for it's removal. The popular drivers have been converted over, leaving a very small number of rarely used drivers, which hopefully can be converted during the next cycle with a bit more visibility (and hopefully people popping out of the woodwork to help test) - more tweaks for BE systems, particularly with the kernel image format. In connection with this, I've cleaned up the way we generate the linker script for the decompressor. - removal of hard-coded assumptions of the kernel stack size, making everywhere depend on the value of THREAD_SIZE_ORDER. - MCPM updates from Nicolas Pitre. - Make it easier for proper CPU part number checks (which should always include the vendor field). - Assembly code optimisation - use the "bx" instruction when returning from a function on ARMv6+ rather than "mov pc, reg". - Save the last kernel misaligned fault location and report it via the procfs alignment file. - Clean up the way we create the initial stack frame, which is a repeated pattern in several different locations. - Support for 8-byte get_user(), needed for some DRM implementations. - mcs locking from Will Deacon. - Save and restore a few more Cortex-A9 registers (for errata workarounds) - Fix various aspects of the SWP emulation, and the ELF hwcap for the SWP instruction. - Update LPAE logic for pte_write and pmd_write to make it more correct. - Support for Broadcom Brahma15 CPU cores. - ARM assembly crypto updates from Ard Biesheuvel" * 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (53 commits) ARM: add comments to the early page table remap code ARM: 8122/1: smp_scu: enable SCU standby support ARM: 8121/1: smp_scu: use macro for SCU enable bit ARM: 8120/1: crypto: sha512: add ARM NEON implementation ARM: 8119/1: crypto: sha1: add ARM NEON implementation ARM: 8118/1: crypto: sha1/make use of common SHA-1 structures ARM: 8113/1: remove remaining definitions of PLAT_PHYS_OFFSET from <mach/memory.h> ARM: 8111/1: Enable erratum 798181 for Broadcom Brahma-B15 ARM: 8110/1: do CPU-specific init for Broadcom Brahma15 cores ARM: 8109/1: mm: Modify pte_write and pmd_write logic for LPAE ARM: 8108/1: mm: Introduce {pte,pmd}_isset and {pte,pmd}_isclear ARM: hwcap: disable HWCAP_SWP if the CPU advertises it has exclusives ARM: SWP emulation: only initialise on ARMv7 CPUs ARM: SWP emulation: always enable when SMP is enabled ARM: 8103/1: save/restore Cortex-A9 CP15 registers on suspend/resume ARM: 8098/1: mcs lock: implement wfe-based polling for MCS locking ARM: 8091/2: add get_user() support for 8 byte types ARM: 8097/1: unistd.h: relocate comments back to place ARM: 8096/1: Describe required sort order for textofs-y (TEXT_OFFSET) ARM: 8090/1: add revision info for PL310 errata 588369 and 727915 ...
-rw-r--r--arch/arm/Kconfig17
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/boot/compressed/Makefile5
-rw-r--r--arch/arm/boot/compressed/head.S8
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S (renamed from arch/arm/boot/compressed/vmlinux.lds.in)17
-rw-r--r--arch/arm/common/mcpm_entry.c52
-rw-r--r--arch/arm/crypto/Makefile4
-rw-r--r--arch/arm/crypto/aes-armv4.S3
-rw-r--r--arch/arm/crypto/sha1-armv7-neon.S634
-rw-r--r--arch/arm/crypto/sha1_glue.c58
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c197
-rw-r--r--arch/arm/crypto/sha512-armv7-neon.S455
-rw-r--r--arch/arm/crypto/sha512_neon_glue.c305
-rw-r--r--arch/arm/include/asm/assembler.h29
-rw-r--r--arch/arm/include/asm/cputype.h37
-rw-r--r--arch/arm/include/asm/crypto/sha1.h10
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S2
-rw-r--r--arch/arm/include/asm/glue-proc.h18
-rw-r--r--arch/arm/include/asm/mcpm.h16
-rw-r--r--arch/arm/include/asm/mcs_spinlock.h23
-rw-r--r--arch/arm/include/asm/memory.h10
-rw-r--r--arch/arm/include/asm/perf_event.h9
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h3
-rw-r--r--arch/arm/include/asm/pgtable-3level.h49
-rw-r--r--arch/arm/include/asm/pgtable.h18
-rw-r--r--arch/arm/include/asm/pmu.h19
-rw-r--r--arch/arm/include/asm/ptrace.h6
-rw-r--r--arch/arm/include/asm/smp_scu.h2
-rw-r--r--arch/arm/include/asm/stacktrace.h15
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/include/asm/uaccess.h22
-rw-r--r--arch/arm/include/asm/unistd.h10
-rw-r--r--arch/arm/include/uapi/asm/unistd.h11
-rw-r--r--arch/arm/kernel/debug.S10
-rw-r--r--arch/arm/kernel/entry-armv.S42
-rw-r--r--arch/arm/kernel/entry-common.S13
-rw-r--r--arch/arm/kernel/entry-header.S14
-rw-r--r--arch/arm/kernel/fiqasm.S4
-rw-r--r--arch/arm/kernel/head-common.S7
-rw-r--r--arch/arm/kernel/head-nommu.S8
-rw-r--r--arch/arm/kernel/head.S18
-rw-r--r--arch/arm/kernel/hyp-stub.S6
-rw-r--r--arch/arm/kernel/iwmmxt.S16
-rw-r--r--arch/arm/kernel/perf_event.c18
-rw-r--r--arch/arm/kernel/perf_event_cpu.c66
-rw-r--r--arch/arm/kernel/perf_event_v6.c307
-rw-r--r--arch/arm/kernel/perf_event_v7.c967
-rw-r--r--arch/arm/kernel/perf_event_xscale.c121
-rw-r--r--arch/arm/kernel/relocate_kernel.S3
-rw-r--r--arch/arm/kernel/setup.c29
-rw-r--r--arch/arm/kernel/sleep.S2
-rw-r--r--arch/arm/kernel/smp_scu.c12
-rw-r--r--arch/arm/kernel/smp_tlb.c20
-rw-r--r--arch/arm/kernel/swp_emulate.c4
-rw-r--r--arch/arm/kernel/time.c5
-rw-r--r--arch/arm/kernel/traps.c6
-rw-r--r--arch/arm/kernel/unwind.c8
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/kvm/guest.c8
-rw-r--r--arch/arm/kvm/init.S3
-rw-r--r--arch/arm/lib/ashldi3.S3
-rw-r--r--arch/arm/lib/ashrdi3.S3
-rw-r--r--arch/arm/lib/backtrace.S2
-rw-r--r--arch/arm/lib/bitops.h5
-rw-r--r--arch/arm/lib/bswapsdi2.S5
-rw-r--r--arch/arm/lib/call_with_stack.S4
-rw-r--r--arch/arm/lib/csumpartial.S2
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S5
-rw-r--r--arch/arm/lib/delay-loop.S18
-rw-r--r--arch/arm/lib/div64.S13
-rw-r--r--arch/arm/lib/findbit.S10
-rw-r--r--arch/arm/lib/getuser.S45
-rw-r--r--arch/arm/lib/io-readsb.S2
-rw-r--r--arch/arm/lib/io-readsl.S6
-rw-r--r--arch/arm/lib/io-readsw-armv3.S4
-rw-r--r--arch/arm/lib/io-readsw-armv4.S2
-rw-r--r--arch/arm/lib/io-writesb.S2
-rw-r--r--arch/arm/lib/io-writesl.S10
-rw-r--r--arch/arm/lib/io-writesw-armv3.S4
-rw-r--r--arch/arm/lib/io-writesw-armv4.S4
-rw-r--r--arch/arm/lib/lib1funcs.S26
-rw-r--r--arch/arm/lib/lshrdi3.S3
-rw-r--r--arch/arm/lib/memchr.S2
-rw-r--r--arch/arm/lib/memset.S2
-rw-r--r--arch/arm/lib/memzero.S2
-rw-r--r--arch/arm/lib/muldi3.S3
-rw-r--r--arch/arm/lib/putuser.S10
-rw-r--r--arch/arm/lib/strchr.S2
-rw-r--r--arch/arm/lib/strrchr.S2
-rw-r--r--arch/arm/lib/ucmpdi2.S5
-rw-r--r--arch/arm/mach-davinci/sleep.S2
-rw-r--r--arch/arm/mach-ebsa110/include/mach/memory.h5
-rw-r--r--arch/arm/mach-ep93xx/crunch-bits.S6
-rw-r--r--arch/arm/mach-ep93xx/include/mach/memory.h22
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c17
-rw-r--r--arch/arm/mach-exynos/platsmp.c4
-rw-r--r--arch/arm/mach-exynos/pm.c11
-rw-r--r--arch/arm/mach-footbridge/include/mach/memory.h5
-rw-r--r--arch/arm/mach-imx/suspend-imx6.S5
-rw-r--r--arch/arm/mach-integrator/include/mach/memory.h5
-rw-r--r--arch/arm/mach-iop13xx/include/mach/iop13xx.h2
-rw-r--r--arch/arm/mach-iop13xx/include/mach/memory.h5
-rw-r--r--arch/arm/mach-iop13xx/setup.c1
-rw-r--r--arch/arm/mach-ks8695/include/mach/memory.h5
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S10
-rw-r--r--arch/arm/mach-mvebu/headsmp-a9.S2
-rw-r--r--arch/arm/mach-omap1/include/mach/memory.h5
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S3
-rw-r--r--arch/arm/mach-omap2/sram242x.S6
-rw-r--r--arch/arm/mach-omap2/sram243x.S6
-rw-r--r--arch/arm/mach-pxa/mioa701_bootresume.S2
-rw-r--r--arch/arm/mach-pxa/standby.S4
-rw-r--r--arch/arm/mach-realview/include/mach/memory.h9
-rw-r--r--arch/arm/mach-rpc/include/mach/memory.h5
-rw-r--r--arch/arm/mach-s3c24xx/sleep-s3c2410.S2
-rw-r--r--arch/arm/mach-s3c24xx/sleep-s3c2412.S2
-rw-r--r--arch/arm/mach-s5pv210/include/mach/memory.h2
-rw-r--r--arch/arm/mach-sa1100/include/mach/memory.h5
-rw-r--r--arch/arm/mach-shmobile/headsmp.S3
-rw-r--r--arch/arm/mach-tegra/sleep-tegra20.S24
-rw-r--r--arch/arm/mach-tegra/sleep-tegra30.S14
-rw-r--r--arch/arm/mach-tegra/sleep.S8
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c19
-rw-r--r--arch/arm/mm/Kconfig9
-rw-r--r--arch/arm/mm/alignment.c4
-rw-r--r--arch/arm/mm/cache-fa.S19
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm/mm/cache-nop.S5
-rw-r--r--arch/arm/mm/cache-v4.S13
-rw-r--r--arch/arm/mm/cache-v4wb.S15
-rw-r--r--arch/arm/mm/cache-v4wt.S13
-rw-r--r--arch/arm/mm/cache-v6.S20
-rw-r--r--arch/arm/mm/cache-v7.S30
-rw-r--r--arch/arm/mm/dump.c4
-rw-r--r--arch/arm/mm/l2c-l2x0-resume.S7
-rw-r--r--arch/arm/mm/mmu.c45
-rw-r--r--arch/arm/mm/proc-arm1020.S34
-rw-r--r--arch/arm/mm/proc-arm1020e.S34
-rw-r--r--arch/arm/mm/proc-arm1022.S34
-rw-r--r--arch/arm/mm/proc-arm1026.S34
-rw-r--r--arch/arm/mm/proc-arm720.S16
-rw-r--r--arch/arm/mm/proc-arm740.S8
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S8
-rw-r--r--arch/arm/mm/proc-arm920.S34
-rw-r--r--arch/arm/mm/proc-arm922.S34
-rw-r--r--arch/arm/mm/proc-arm925.S34
-rw-r--r--arch/arm/mm/proc-arm926.S34
-rw-r--r--arch/arm/mm/proc-arm940.S24
-rw-r--r--arch/arm/mm/proc-arm946.S30
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S8
-rw-r--r--arch/arm/mm/proc-fa526.S16
-rw-r--r--arch/arm/mm/proc-feroceon.S44
-rw-r--r--arch/arm/mm/proc-mohawk.S34
-rw-r--r--arch/arm/mm/proc-sa110.S16
-rw-r--r--arch/arm/mm/proc-sa1100.S16
-rw-r--r--arch/arm/mm/proc-v6.S16
-rw-r--r--arch/arm/mm/proc-v7-2level.S4
-rw-r--r--arch/arm/mm/proc-v7-3level.S14
-rw-r--r--arch/arm/mm/proc-v7.S74
-rw-r--r--arch/arm/mm/proc-v7m.S18
-rw-r--r--arch/arm/mm/proc-xsc3.S32
-rw-r--r--arch/arm/mm/proc-xscale.S34
-rw-r--r--arch/arm/mm/tlb-fa.S7
-rw-r--r--arch/arm/mm/tlb-v4.S5
-rw-r--r--arch/arm/mm/tlb-v4wb.S7
-rw-r--r--arch/arm/mm/tlb-v4wbi.S7
-rw-r--r--arch/arm/mm/tlb-v6.S5
-rw-r--r--arch/arm/mm/tlb-v7.S4
-rw-r--r--arch/arm/nwfpe/entry.S8
-rw-r--r--arch/arm/oprofile/common.c19
-rw-r--r--arch/arm/plat-omap/dma.c2
-rw-r--r--arch/arm/vfp/entry.S4
-rw-r--r--arch/arm/vfp/vfphw.S26
-rw-r--r--arch/arm/xen/hypercall.S6
-rw-r--r--crypto/Kconfig26
-rw-r--r--drivers/clocksource/arm_global_timer.c2
177 files changed, 3213 insertions, 2006 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 290f02ee0157..1e14b9068a39 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -263,8 +263,22 @@ config NEED_MACH_MEMORY_H
263 263
264config PHYS_OFFSET 264config PHYS_OFFSET
265 hex "Physical address of main memory" if MMU 265 hex "Physical address of main memory" if MMU
266 depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H 266 depends on !ARM_PATCH_PHYS_VIRT
267 default DRAM_BASE if !MMU 267 default DRAM_BASE if !MMU
268 default 0x00000000 if ARCH_EBSA110 || \
269 EP93XX_SDCE3_SYNC_PHYS_OFFSET || \
270 ARCH_FOOTBRIDGE || \
271 ARCH_INTEGRATOR || \
272 ARCH_IOP13XX || \
273 ARCH_KS8695 || \
274 (ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET)
275 default 0x10000000 if ARCH_OMAP1 || ARCH_RPC
276 default 0x20000000 if ARCH_S5PV210
277 default 0x70000000 if REALVIEW_HIGH_PHYS_OFFSET
278 default 0xc0000000 if EP93XX_SDCE0_PHYS_OFFSET || ARCH_SA1100
279 default 0xd0000000 if EP93XX_SDCE1_PHYS_OFFSET
280 default 0xe0000000 if EP93XX_SDCE2_PHYS_OFFSET
281 default 0xf0000000 if EP93XX_SDCE3_ASYNC_PHYS_OFFSET
268 help 282 help
269 Please provide the physical address corresponding to the 283 Please provide the physical address corresponding to the
270 location of main memory in your system. 284 location of main memory in your system.
@@ -436,7 +450,6 @@ config ARCH_EP93XX
436 select ARM_VIC 450 select ARM_VIC
437 select CLKDEV_LOOKUP 451 select CLKDEV_LOOKUP
438 select CPU_ARM920T 452 select CPU_ARM920T
439 select NEED_MACH_MEMORY_H
440 help 453 help
441 This enables support for the Cirrus EP93xx series of CPUs. 454 This enables support for the Cirrus EP93xx series of CPUs.
442 455
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 6721fab13734..718913dfe815 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -127,6 +127,9 @@ CHECKFLAGS += -D__arm__
127 127
128#Default value 128#Default value
129head-y := arch/arm/kernel/head$(MMUEXT).o 129head-y := arch/arm/kernel/head$(MMUEXT).o
130
131# Text offset. This list is sorted numerically by address in order to
132# provide a means to avoid/resolve conflicts in multi-arch kernels.
130textofs-y := 0x00008000 133textofs-y := 0x00008000
131textofs-$(CONFIG_ARCH_CLPS711X) := 0x00028000 134textofs-$(CONFIG_ARCH_CLPS711X) := 0x00028000
132# We don't want the htc bootloader to corrupt kernel during resume 135# We don't want the htc bootloader to corrupt kernel during resume
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 68c918362b79..76a50ecae1c3 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -81,7 +81,7 @@ ZTEXTADDR := 0
81ZBSSADDR := ALIGN(8) 81ZBSSADDR := ALIGN(8)
82endif 82endif
83 83
84SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ 84CPPFLAGS_vmlinux.lds := -DTEXT_START="$(ZTEXTADDR)" -DBSS_START="$(ZBSSADDR)"
85 85
86suffix_$(CONFIG_KERNEL_GZIP) = gzip 86suffix_$(CONFIG_KERNEL_GZIP) = gzip
87suffix_$(CONFIG_KERNEL_LZO) = lzo 87suffix_$(CONFIG_KERNEL_LZO) = lzo
@@ -199,8 +199,5 @@ CFLAGS_font.o := -Dstatic=
199$(obj)/font.c: $(FONTC) 199$(obj)/font.c: $(FONTC)
200 $(call cmd,shipped) 200 $(call cmd,shipped)
201 201
202$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
203 @sed "$(SEDFLAGS)" < $< > $@
204
205$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S 202$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
206 $(call cmd,shipped) 203 $(call cmd,shipped)
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 3a8b32df6b31..413fd94b5301 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -125,9 +125,11 @@ start:
125 THUMB( adr r12, BSYM(1f) ) 125 THUMB( adr r12, BSYM(1f) )
126 THUMB( bx r12 ) 126 THUMB( bx r12 )
127 127
128 .word 0x016f2818 @ Magic numbers to help the loader 128 .word _magic_sig @ Magic numbers to help the loader
129 .word start @ absolute load/run zImage address 129 .word _magic_start @ absolute load/run zImage address
130 .word _edata @ zImage end address 130 .word _magic_end @ zImage end address
131 .word 0x04030201 @ endianness flag
132
131 THUMB( .thumb ) 133 THUMB( .thumb )
1321: 1341:
133 ARM_BE8( setend be ) @ go BE8 if compiled for BE8 135 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.S
index 4919f2ac8b89..2b60b843ac5e 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -1,12 +1,20 @@
1/* 1/*
2 * linux/arch/arm/boot/compressed/vmlinux.lds.in
3 *
4 * Copyright (C) 2000 Russell King 2 * Copyright (C) 2000 Russell King
5 * 3 *
6 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
9 */ 7 */
8
9#ifdef CONFIG_CPU_ENDIAN_BE8
10#define ZIMAGE_MAGIC(x) ( (((x) >> 24) & 0x000000ff) | \
11 (((x) >> 8) & 0x0000ff00) | \
12 (((x) << 8) & 0x00ff0000) | \
13 (((x) << 24) & 0xff000000) )
14#else
15#define ZIMAGE_MAGIC(x) (x)
16#endif
17
10OUTPUT_ARCH(arm) 18OUTPUT_ARCH(arm)
11ENTRY(_start) 19ENTRY(_start)
12SECTIONS 20SECTIONS
@@ -57,6 +65,10 @@ SECTIONS
57 .pad : { BYTE(0); . = ALIGN(8); } 65 .pad : { BYTE(0); . = ALIGN(8); }
58 _edata = .; 66 _edata = .;
59 67
68 _magic_sig = ZIMAGE_MAGIC(0x016f2818);
69 _magic_start = ZIMAGE_MAGIC(_start);
70 _magic_end = ZIMAGE_MAGIC(_edata);
71
60 . = BSS_START; 72 . = BSS_START;
61 __bss_start = .; 73 __bss_start = .;
62 .bss : { *(.bss) } 74 .bss : { *(.bss) }
@@ -73,4 +85,3 @@ SECTIONS
73 .stab.indexstr 0 : { *(.stab.indexstr) } 85 .stab.indexstr 0 : { *(.stab.indexstr) }
74 .comment 0 : { *(.comment) } 86 .comment 0 : { *(.comment) }
75} 87}
76
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index f91136ab447e..3c165fc2dce2 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -12,11 +12,13 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/irqflags.h> 14#include <linux/irqflags.h>
15#include <linux/cpu_pm.h>
15 16
16#include <asm/mcpm.h> 17#include <asm/mcpm.h>
17#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
18#include <asm/idmap.h> 19#include <asm/idmap.h>
19#include <asm/cputype.h> 20#include <asm/cputype.h>
21#include <asm/suspend.h>
20 22
21extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; 23extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
22 24
@@ -146,6 +148,56 @@ int mcpm_cpu_powered_up(void)
146 return 0; 148 return 0;
147} 149}
148 150
151#ifdef CONFIG_ARM_CPU_SUSPEND
152
153static int __init nocache_trampoline(unsigned long _arg)
154{
155 void (*cache_disable)(void) = (void *)_arg;
156 unsigned int mpidr = read_cpuid_mpidr();
157 unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
158 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
159 phys_reset_t phys_reset;
160
161 mcpm_set_entry_vector(cpu, cluster, cpu_resume);
162 setup_mm_for_reboot();
163
164 __mcpm_cpu_going_down(cpu, cluster);
165 BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
166 cache_disable();
167 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
168 __mcpm_cpu_down(cpu, cluster);
169
170 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
171 phys_reset(virt_to_phys(mcpm_entry_point));
172 BUG();
173}
174
175int __init mcpm_loopback(void (*cache_disable)(void))
176{
177 int ret;
178
179 /*
180 * We're going to soft-restart the current CPU through the
181 * low-level MCPM code by leveraging the suspend/resume
182 * infrastructure. Let's play it safe by using cpu_pm_enter()
183 * in case the CPU init code path resets the VFP or similar.
184 */
185 local_irq_disable();
186 local_fiq_disable();
187 ret = cpu_pm_enter();
188 if (!ret) {
189 ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline);
190 cpu_pm_exit();
191 }
192 local_fiq_enable();
193 local_irq_enable();
194 if (ret)
195 pr_err("%s returned %d\n", __func__, ret);
196 return ret;
197}
198
199#endif
200
149struct sync_struct mcpm_sync; 201struct sync_struct mcpm_sync;
150 202
151/* 203/*
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 81cda39860c5..b48fa341648d 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -5,10 +5,14 @@
5obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o 5obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
6obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o 6obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
7obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o 7obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
8obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
9obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o
8 10
9aes-arm-y := aes-armv4.o aes_glue.o 11aes-arm-y := aes-armv4.o aes_glue.o
10aes-arm-bs-y := aesbs-core.o aesbs-glue.o 12aes-arm-bs-y := aesbs-core.o aesbs-glue.o
11sha1-arm-y := sha1-armv4-large.o sha1_glue.o 13sha1-arm-y := sha1-armv4-large.o sha1_glue.o
14sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o
15sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o
12 16
13quiet_cmd_perl = PERL $@ 17quiet_cmd_perl = PERL $@
14 cmd_perl = $(PERL) $(<) > $(@) 18 cmd_perl = $(PERL) $(<) > $(@)
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index 3a14ea8fe97e..ebb9761fb572 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -35,6 +35,7 @@
35@ that is being targetted. 35@ that is being targetted.
36 36
37#include <linux/linkage.h> 37#include <linux/linkage.h>
38#include <asm/assembler.h>
38 39
39.text 40.text
40 41
@@ -648,7 +649,7 @@ _armv4_AES_set_encrypt_key:
648 649
649.Ldone: mov r0,#0 650.Ldone: mov r0,#0
650 ldmia sp!,{r4-r12,lr} 651 ldmia sp!,{r4-r12,lr}
651.Labrt: mov pc,lr 652.Labrt: ret lr
652ENDPROC(private_AES_set_encrypt_key) 653ENDPROC(private_AES_set_encrypt_key)
653 654
654.align 5 655.align 5
diff --git a/arch/arm/crypto/sha1-armv7-neon.S b/arch/arm/crypto/sha1-armv7-neon.S
new file mode 100644
index 000000000000..50013c0e2864
--- /dev/null
+++ b/arch/arm/crypto/sha1-armv7-neon.S
@@ -0,0 +1,634 @@
1/* sha1-armv7-neon.S - ARM/NEON accelerated SHA-1 transform function
2 *
3 * Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 */
10
11#include <linux/linkage.h>
12
13
14.syntax unified
15.code 32
16.fpu neon
17
18.text
19
20
21/* Context structure */
22
23#define state_h0 0
24#define state_h1 4
25#define state_h2 8
26#define state_h3 12
27#define state_h4 16
28
29
30/* Constants */
31
32#define K1 0x5A827999
33#define K2 0x6ED9EBA1
34#define K3 0x8F1BBCDC
35#define K4 0xCA62C1D6
36.align 4
37.LK_VEC:
38.LK1: .long K1, K1, K1, K1
39.LK2: .long K2, K2, K2, K2
40.LK3: .long K3, K3, K3, K3
41.LK4: .long K4, K4, K4, K4
42
43
44/* Register macros */
45
46#define RSTATE r0
47#define RDATA r1
48#define RNBLKS r2
49#define ROLDSTACK r3
50#define RWK lr
51
52#define _a r4
53#define _b r5
54#define _c r6
55#define _d r7
56#define _e r8
57
58#define RT0 r9
59#define RT1 r10
60#define RT2 r11
61#define RT3 r12
62
63#define W0 q0
64#define W1 q1
65#define W2 q2
66#define W3 q3
67#define W4 q4
68#define W5 q5
69#define W6 q6
70#define W7 q7
71
72#define tmp0 q8
73#define tmp1 q9
74#define tmp2 q10
75#define tmp3 q11
76
77#define qK1 q12
78#define qK2 q13
79#define qK3 q14
80#define qK4 q15
81
82
83/* Round function macros. */
84
85#define WK_offs(i) (((i) & 15) * 4)
86
87#define _R_F1(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
88 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
89 ldr RT3, [sp, WK_offs(i)]; \
90 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
91 bic RT0, d, b; \
92 add e, e, a, ror #(32 - 5); \
93 and RT1, c, b; \
94 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
95 add RT0, RT0, RT3; \
96 add e, e, RT1; \
97 ror b, #(32 - 30); \
98 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
99 add e, e, RT0;
100
101#define _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
102 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
103 ldr RT3, [sp, WK_offs(i)]; \
104 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
105 eor RT0, d, b; \
106 add e, e, a, ror #(32 - 5); \
107 eor RT0, RT0, c; \
108 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
109 add e, e, RT3; \
110 ror b, #(32 - 30); \
111 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
112 add e, e, RT0; \
113
114#define _R_F3(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
115 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
116 ldr RT3, [sp, WK_offs(i)]; \
117 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
118 eor RT0, b, c; \
119 and RT1, b, c; \
120 add e, e, a, ror #(32 - 5); \
121 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
122 and RT0, RT0, d; \
123 add RT1, RT1, RT3; \
124 add e, e, RT0; \
125 ror b, #(32 - 30); \
126 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
127 add e, e, RT1;
128
129#define _R_F4(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
130 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
131 _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
132 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
133
134#define _R(a,b,c,d,e,f,i,pre1,pre2,pre3,i16,\
135 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
136 _R_##f(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
137 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
138
139#define R(a,b,c,d,e,f,i) \
140 _R_##f(a,b,c,d,e,i,dummy,dummy,dummy,i16,\
141 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
142
143#define dummy(...)
144
145
146/* Input expansion macros. */
147
148/********* Precalc macros for rounds 0-15 *************************************/
149
150#define W_PRECALC_00_15() \
151 add RWK, sp, #(WK_offs(0)); \
152 \
153 vld1.32 {tmp0, tmp1}, [RDATA]!; \
154 vrev32.8 W0, tmp0; /* big => little */ \
155 vld1.32 {tmp2, tmp3}, [RDATA]!; \
156 vadd.u32 tmp0, W0, curK; \
157 vrev32.8 W7, tmp1; /* big => little */ \
158 vrev32.8 W6, tmp2; /* big => little */ \
159 vadd.u32 tmp1, W7, curK; \
160 vrev32.8 W5, tmp3; /* big => little */ \
161 vadd.u32 tmp2, W6, curK; \
162 vst1.32 {tmp0, tmp1}, [RWK]!; \
163 vadd.u32 tmp3, W5, curK; \
164 vst1.32 {tmp2, tmp3}, [RWK]; \
165
166#define WPRECALC_00_15_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
167 vld1.32 {tmp0, tmp1}, [RDATA]!; \
168
169#define WPRECALC_00_15_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
170 add RWK, sp, #(WK_offs(0)); \
171
172#define WPRECALC_00_15_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
173 vrev32.8 W0, tmp0; /* big => little */ \
174
175#define WPRECALC_00_15_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
176 vld1.32 {tmp2, tmp3}, [RDATA]!; \
177
178#define WPRECALC_00_15_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
179 vadd.u32 tmp0, W0, curK; \
180
181#define WPRECALC_00_15_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
182 vrev32.8 W7, tmp1; /* big => little */ \
183
184#define WPRECALC_00_15_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
185 vrev32.8 W6, tmp2; /* big => little */ \
186
187#define WPRECALC_00_15_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
188 vadd.u32 tmp1, W7, curK; \
189
190#define WPRECALC_00_15_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
191 vrev32.8 W5, tmp3; /* big => little */ \
192
193#define WPRECALC_00_15_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
194 vadd.u32 tmp2, W6, curK; \
195
196#define WPRECALC_00_15_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
197 vst1.32 {tmp0, tmp1}, [RWK]!; \
198
199#define WPRECALC_00_15_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
200 vadd.u32 tmp3, W5, curK; \
201
202#define WPRECALC_00_15_12(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
203 vst1.32 {tmp2, tmp3}, [RWK]; \
204
205
206/********* Precalc macros for rounds 16-31 ************************************/
207
208#define WPRECALC_16_31_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
209 veor tmp0, tmp0; \
210 vext.8 W, W_m16, W_m12, #8; \
211
212#define WPRECALC_16_31_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
213 add RWK, sp, #(WK_offs(i)); \
214 vext.8 tmp0, W_m04, tmp0, #4; \
215
216#define WPRECALC_16_31_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
217 veor tmp0, tmp0, W_m16; \
218 veor.32 W, W, W_m08; \
219
220#define WPRECALC_16_31_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
221 veor tmp1, tmp1; \
222 veor W, W, tmp0; \
223
224#define WPRECALC_16_31_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
225 vshl.u32 tmp0, W, #1; \
226
227#define WPRECALC_16_31_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
228 vext.8 tmp1, tmp1, W, #(16-12); \
229 vshr.u32 W, W, #31; \
230
231#define WPRECALC_16_31_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
232 vorr tmp0, tmp0, W; \
233 vshr.u32 W, tmp1, #30; \
234
235#define WPRECALC_16_31_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
236 vshl.u32 tmp1, tmp1, #2; \
237
238#define WPRECALC_16_31_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
239 veor tmp0, tmp0, W; \
240
241#define WPRECALC_16_31_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
242 veor W, tmp0, tmp1; \
243
244#define WPRECALC_16_31_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
245 vadd.u32 tmp0, W, curK; \
246
247#define WPRECALC_16_31_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
248 vst1.32 {tmp0}, [RWK];
249
250
251/********* Precalc macros for rounds 32-79 ************************************/
252
253#define WPRECALC_32_79_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
254 veor W, W_m28; \
255
256#define WPRECALC_32_79_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
257 vext.8 tmp0, W_m08, W_m04, #8; \
258
259#define WPRECALC_32_79_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
260 veor W, W_m16; \
261
262#define WPRECALC_32_79_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
263 veor W, tmp0; \
264
265#define WPRECALC_32_79_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
266 add RWK, sp, #(WK_offs(i&~3)); \
267
268#define WPRECALC_32_79_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
269 vshl.u32 tmp1, W, #2; \
270
271#define WPRECALC_32_79_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
272 vshr.u32 tmp0, W, #30; \
273
274#define WPRECALC_32_79_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
275 vorr W, tmp0, tmp1; \
276
277#define WPRECALC_32_79_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
278 vadd.u32 tmp0, W, curK; \
279
280#define WPRECALC_32_79_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
281 vst1.32 {tmp0}, [RWK];
282
283
284/*
285 * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA.
286 *
287 * unsigned int
288 * sha1_transform_neon (void *ctx, const unsigned char *data,
289 * unsigned int nblks)
290 */
291.align 3
292ENTRY(sha1_transform_neon)
293 /* input:
294 * r0: ctx, CTX
295 * r1: data (64*nblks bytes)
296 * r2: nblks
297 */
298
299 cmp RNBLKS, #0;
300 beq .Ldo_nothing;
301
302 push {r4-r12, lr};
303 /*vpush {q4-q7};*/
304
305 adr RT3, .LK_VEC;
306
307 mov ROLDSTACK, sp;
308
309 /* Align stack. */
310 sub RT0, sp, #(16*4);
311 and RT0, #(~(16-1));
312 mov sp, RT0;
313
314 vld1.32 {qK1-qK2}, [RT3]!; /* Load K1,K2 */
315
316 /* Get the values of the chaining variables. */
317 ldm RSTATE, {_a-_e};
318
319 vld1.32 {qK3-qK4}, [RT3]; /* Load K3,K4 */
320
321#undef curK
322#define curK qK1
323 /* Precalc 0-15. */
324 W_PRECALC_00_15();
325
326.Loop:
327 /* Transform 0-15 + Precalc 16-31. */
328 _R( _a, _b, _c, _d, _e, F1, 0,
329 WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 16,
330 W4, W5, W6, W7, W0, _, _, _ );
331 _R( _e, _a, _b, _c, _d, F1, 1,
332 WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 16,
333 W4, W5, W6, W7, W0, _, _, _ );
334 _R( _d, _e, _a, _b, _c, F1, 2,
335 WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 16,
336 W4, W5, W6, W7, W0, _, _, _ );
337 _R( _c, _d, _e, _a, _b, F1, 3,
338 WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,16,
339 W4, W5, W6, W7, W0, _, _, _ );
340
341#undef curK
342#define curK qK2
343 _R( _b, _c, _d, _e, _a, F1, 4,
344 WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 20,
345 W3, W4, W5, W6, W7, _, _, _ );
346 _R( _a, _b, _c, _d, _e, F1, 5,
347 WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 20,
348 W3, W4, W5, W6, W7, _, _, _ );
349 _R( _e, _a, _b, _c, _d, F1, 6,
350 WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 20,
351 W3, W4, W5, W6, W7, _, _, _ );
352 _R( _d, _e, _a, _b, _c, F1, 7,
353 WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,20,
354 W3, W4, W5, W6, W7, _, _, _ );
355
356 _R( _c, _d, _e, _a, _b, F1, 8,
357 WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 24,
358 W2, W3, W4, W5, W6, _, _, _ );
359 _R( _b, _c, _d, _e, _a, F1, 9,
360 WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 24,
361 W2, W3, W4, W5, W6, _, _, _ );
362 _R( _a, _b, _c, _d, _e, F1, 10,
363 WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 24,
364 W2, W3, W4, W5, W6, _, _, _ );
365 _R( _e, _a, _b, _c, _d, F1, 11,
366 WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,24,
367 W2, W3, W4, W5, W6, _, _, _ );
368
369 _R( _d, _e, _a, _b, _c, F1, 12,
370 WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 28,
371 W1, W2, W3, W4, W5, _, _, _ );
372 _R( _c, _d, _e, _a, _b, F1, 13,
373 WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 28,
374 W1, W2, W3, W4, W5, _, _, _ );
375 _R( _b, _c, _d, _e, _a, F1, 14,
376 WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 28,
377 W1, W2, W3, W4, W5, _, _, _ );
378 _R( _a, _b, _c, _d, _e, F1, 15,
379 WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,28,
380 W1, W2, W3, W4, W5, _, _, _ );
381
382 /* Transform 16-63 + Precalc 32-79. */
383 _R( _e, _a, _b, _c, _d, F1, 16,
384 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 32,
385 W0, W1, W2, W3, W4, W5, W6, W7);
386 _R( _d, _e, _a, _b, _c, F1, 17,
387 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 32,
388 W0, W1, W2, W3, W4, W5, W6, W7);
389 _R( _c, _d, _e, _a, _b, F1, 18,
390 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 32,
391 W0, W1, W2, W3, W4, W5, W6, W7);
392 _R( _b, _c, _d, _e, _a, F1, 19,
393 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 32,
394 W0, W1, W2, W3, W4, W5, W6, W7);
395
396 _R( _a, _b, _c, _d, _e, F2, 20,
397 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 36,
398 W7, W0, W1, W2, W3, W4, W5, W6);
399 _R( _e, _a, _b, _c, _d, F2, 21,
400 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 36,
401 W7, W0, W1, W2, W3, W4, W5, W6);
402 _R( _d, _e, _a, _b, _c, F2, 22,
403 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 36,
404 W7, W0, W1, W2, W3, W4, W5, W6);
405 _R( _c, _d, _e, _a, _b, F2, 23,
406 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 36,
407 W7, W0, W1, W2, W3, W4, W5, W6);
408
409#undef curK
410#define curK qK3
411 _R( _b, _c, _d, _e, _a, F2, 24,
412 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 40,
413 W6, W7, W0, W1, W2, W3, W4, W5);
414 _R( _a, _b, _c, _d, _e, F2, 25,
415 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 40,
416 W6, W7, W0, W1, W2, W3, W4, W5);
417 _R( _e, _a, _b, _c, _d, F2, 26,
418 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 40,
419 W6, W7, W0, W1, W2, W3, W4, W5);
420 _R( _d, _e, _a, _b, _c, F2, 27,
421 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 40,
422 W6, W7, W0, W1, W2, W3, W4, W5);
423
424 _R( _c, _d, _e, _a, _b, F2, 28,
425 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 44,
426 W5, W6, W7, W0, W1, W2, W3, W4);
427 _R( _b, _c, _d, _e, _a, F2, 29,
428 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 44,
429 W5, W6, W7, W0, W1, W2, W3, W4);
430 _R( _a, _b, _c, _d, _e, F2, 30,
431 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 44,
432 W5, W6, W7, W0, W1, W2, W3, W4);
433 _R( _e, _a, _b, _c, _d, F2, 31,
434 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 44,
435 W5, W6, W7, W0, W1, W2, W3, W4);
436
437 _R( _d, _e, _a, _b, _c, F2, 32,
438 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 48,
439 W4, W5, W6, W7, W0, W1, W2, W3);
440 _R( _c, _d, _e, _a, _b, F2, 33,
441 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 48,
442 W4, W5, W6, W7, W0, W1, W2, W3);
443 _R( _b, _c, _d, _e, _a, F2, 34,
444 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 48,
445 W4, W5, W6, W7, W0, W1, W2, W3);
446 _R( _a, _b, _c, _d, _e, F2, 35,
447 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 48,
448 W4, W5, W6, W7, W0, W1, W2, W3);
449
450 _R( _e, _a, _b, _c, _d, F2, 36,
451 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 52,
452 W3, W4, W5, W6, W7, W0, W1, W2);
453 _R( _d, _e, _a, _b, _c, F2, 37,
454 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 52,
455 W3, W4, W5, W6, W7, W0, W1, W2);
456 _R( _c, _d, _e, _a, _b, F2, 38,
457 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 52,
458 W3, W4, W5, W6, W7, W0, W1, W2);
459 _R( _b, _c, _d, _e, _a, F2, 39,
460 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 52,
461 W3, W4, W5, W6, W7, W0, W1, W2);
462
463 _R( _a, _b, _c, _d, _e, F3, 40,
464 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 56,
465 W2, W3, W4, W5, W6, W7, W0, W1);
466 _R( _e, _a, _b, _c, _d, F3, 41,
467 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 56,
468 W2, W3, W4, W5, W6, W7, W0, W1);
469 _R( _d, _e, _a, _b, _c, F3, 42,
470 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 56,
471 W2, W3, W4, W5, W6, W7, W0, W1);
472 _R( _c, _d, _e, _a, _b, F3, 43,
473 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 56,
474 W2, W3, W4, W5, W6, W7, W0, W1);
475
476#undef curK
477#define curK qK4
478 _R( _b, _c, _d, _e, _a, F3, 44,
479 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 60,
480 W1, W2, W3, W4, W5, W6, W7, W0);
481 _R( _a, _b, _c, _d, _e, F3, 45,
482 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 60,
483 W1, W2, W3, W4, W5, W6, W7, W0);
484 _R( _e, _a, _b, _c, _d, F3, 46,
485 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 60,
486 W1, W2, W3, W4, W5, W6, W7, W0);
487 _R( _d, _e, _a, _b, _c, F3, 47,
488 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 60,
489 W1, W2, W3, W4, W5, W6, W7, W0);
490
491 _R( _c, _d, _e, _a, _b, F3, 48,
492 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 64,
493 W0, W1, W2, W3, W4, W5, W6, W7);
494 _R( _b, _c, _d, _e, _a, F3, 49,
495 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 64,
496 W0, W1, W2, W3, W4, W5, W6, W7);
497 _R( _a, _b, _c, _d, _e, F3, 50,
498 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 64,
499 W0, W1, W2, W3, W4, W5, W6, W7);
500 _R( _e, _a, _b, _c, _d, F3, 51,
501 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 64,
502 W0, W1, W2, W3, W4, W5, W6, W7);
503
504 _R( _d, _e, _a, _b, _c, F3, 52,
505 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 68,
506 W7, W0, W1, W2, W3, W4, W5, W6);
507 _R( _c, _d, _e, _a, _b, F3, 53,
508 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 68,
509 W7, W0, W1, W2, W3, W4, W5, W6);
510 _R( _b, _c, _d, _e, _a, F3, 54,
511 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 68,
512 W7, W0, W1, W2, W3, W4, W5, W6);
513 _R( _a, _b, _c, _d, _e, F3, 55,
514 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 68,
515 W7, W0, W1, W2, W3, W4, W5, W6);
516
517 _R( _e, _a, _b, _c, _d, F3, 56,
518 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 72,
519 W6, W7, W0, W1, W2, W3, W4, W5);
520 _R( _d, _e, _a, _b, _c, F3, 57,
521 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 72,
522 W6, W7, W0, W1, W2, W3, W4, W5);
523 _R( _c, _d, _e, _a, _b, F3, 58,
524 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 72,
525 W6, W7, W0, W1, W2, W3, W4, W5);
526 _R( _b, _c, _d, _e, _a, F3, 59,
527 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 72,
528 W6, W7, W0, W1, W2, W3, W4, W5);
529
530 subs RNBLKS, #1;
531
532 _R( _a, _b, _c, _d, _e, F4, 60,
533 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 76,
534 W5, W6, W7, W0, W1, W2, W3, W4);
535 _R( _e, _a, _b, _c, _d, F4, 61,
536 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 76,
537 W5, W6, W7, W0, W1, W2, W3, W4);
538 _R( _d, _e, _a, _b, _c, F4, 62,
539 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 76,
540 W5, W6, W7, W0, W1, W2, W3, W4);
541 _R( _c, _d, _e, _a, _b, F4, 63,
542 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 76,
543 W5, W6, W7, W0, W1, W2, W3, W4);
544
545 beq .Lend;
546
547 /* Transform 64-79 + Precalc 0-15 of next block. */
548#undef curK
549#define curK qK1
550 _R( _b, _c, _d, _e, _a, F4, 64,
551 WPRECALC_00_15_0, dummy, dummy, _, _, _, _, _, _, _, _, _ );
552 _R( _a, _b, _c, _d, _e, F4, 65,
553 WPRECALC_00_15_1, dummy, dummy, _, _, _, _, _, _, _, _, _ );
554 _R( _e, _a, _b, _c, _d, F4, 66,
555 WPRECALC_00_15_2, dummy, dummy, _, _, _, _, _, _, _, _, _ );
556 _R( _d, _e, _a, _b, _c, F4, 67,
557 WPRECALC_00_15_3, dummy, dummy, _, _, _, _, _, _, _, _, _ );
558
559 _R( _c, _d, _e, _a, _b, F4, 68,
560 dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
561 _R( _b, _c, _d, _e, _a, F4, 69,
562 dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
563 _R( _a, _b, _c, _d, _e, F4, 70,
564 WPRECALC_00_15_4, dummy, dummy, _, _, _, _, _, _, _, _, _ );
565 _R( _e, _a, _b, _c, _d, F4, 71,
566 WPRECALC_00_15_5, dummy, dummy, _, _, _, _, _, _, _, _, _ );
567
568 _R( _d, _e, _a, _b, _c, F4, 72,
569 dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
570 _R( _c, _d, _e, _a, _b, F4, 73,
571 dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
572 _R( _b, _c, _d, _e, _a, F4, 74,
573 WPRECALC_00_15_6, dummy, dummy, _, _, _, _, _, _, _, _, _ );
574 _R( _a, _b, _c, _d, _e, F4, 75,
575 WPRECALC_00_15_7, dummy, dummy, _, _, _, _, _, _, _, _, _ );
576
577 _R( _e, _a, _b, _c, _d, F4, 76,
578 WPRECALC_00_15_8, dummy, dummy, _, _, _, _, _, _, _, _, _ );
579 _R( _d, _e, _a, _b, _c, F4, 77,
580 WPRECALC_00_15_9, dummy, dummy, _, _, _, _, _, _, _, _, _ );
581 _R( _c, _d, _e, _a, _b, F4, 78,
582 WPRECALC_00_15_10, dummy, dummy, _, _, _, _, _, _, _, _, _ );
583 _R( _b, _c, _d, _e, _a, F4, 79,
584 WPRECALC_00_15_11, dummy, WPRECALC_00_15_12, _, _, _, _, _, _, _, _, _ );
585
586 /* Update the chaining variables. */
587 ldm RSTATE, {RT0-RT3};
588 add _a, RT0;
589 ldr RT0, [RSTATE, #state_h4];
590 add _b, RT1;
591 add _c, RT2;
592 add _d, RT3;
593 add _e, RT0;
594 stm RSTATE, {_a-_e};
595
596 b .Loop;
597
598.Lend:
599 /* Transform 64-79 */
600 R( _b, _c, _d, _e, _a, F4, 64 );
601 R( _a, _b, _c, _d, _e, F4, 65 );
602 R( _e, _a, _b, _c, _d, F4, 66 );
603 R( _d, _e, _a, _b, _c, F4, 67 );
604 R( _c, _d, _e, _a, _b, F4, 68 );
605 R( _b, _c, _d, _e, _a, F4, 69 );
606 R( _a, _b, _c, _d, _e, F4, 70 );
607 R( _e, _a, _b, _c, _d, F4, 71 );
608 R( _d, _e, _a, _b, _c, F4, 72 );
609 R( _c, _d, _e, _a, _b, F4, 73 );
610 R( _b, _c, _d, _e, _a, F4, 74 );
611 R( _a, _b, _c, _d, _e, F4, 75 );
612 R( _e, _a, _b, _c, _d, F4, 76 );
613 R( _d, _e, _a, _b, _c, F4, 77 );
614 R( _c, _d, _e, _a, _b, F4, 78 );
615 R( _b, _c, _d, _e, _a, F4, 79 );
616
617 mov sp, ROLDSTACK;
618
619 /* Update the chaining variables. */
620 ldm RSTATE, {RT0-RT3};
621 add _a, RT0;
622 ldr RT0, [RSTATE, #state_h4];
623 add _b, RT1;
624 add _c, RT2;
625 add _d, RT3;
626 /*vpop {q4-q7};*/
627 add _e, RT0;
628 stm RSTATE, {_a-_e};
629
630 pop {r4-r12, pc};
631
632.Ldo_nothing:
633 bx lr
634ENDPROC(sha1_transform_neon)
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 76cd976230bc..84f2a756588b 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -23,32 +23,27 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <crypto/sha.h> 24#include <crypto/sha.h>
25#include <asm/byteorder.h> 25#include <asm/byteorder.h>
26#include <asm/crypto/sha1.h>
26 27
27struct SHA1_CTX {
28 uint32_t h0,h1,h2,h3,h4;
29 u64 count;
30 u8 data[SHA1_BLOCK_SIZE];
31};
32 28
33asmlinkage void sha1_block_data_order(struct SHA1_CTX *digest, 29asmlinkage void sha1_block_data_order(u32 *digest,
34 const unsigned char *data, unsigned int rounds); 30 const unsigned char *data, unsigned int rounds);
35 31
36 32
37static int sha1_init(struct shash_desc *desc) 33static int sha1_init(struct shash_desc *desc)
38{ 34{
39 struct SHA1_CTX *sctx = shash_desc_ctx(desc); 35 struct sha1_state *sctx = shash_desc_ctx(desc);
40 memset(sctx, 0, sizeof(*sctx)); 36
41 sctx->h0 = SHA1_H0; 37 *sctx = (struct sha1_state){
42 sctx->h1 = SHA1_H1; 38 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
43 sctx->h2 = SHA1_H2; 39 };
44 sctx->h3 = SHA1_H3; 40
45 sctx->h4 = SHA1_H4;
46 return 0; 41 return 0;
47} 42}
48 43
49 44
50static int __sha1_update(struct SHA1_CTX *sctx, const u8 *data, 45static int __sha1_update(struct sha1_state *sctx, const u8 *data,
51 unsigned int len, unsigned int partial) 46 unsigned int len, unsigned int partial)
52{ 47{
53 unsigned int done = 0; 48 unsigned int done = 0;
54 49
@@ -56,43 +51,44 @@ static int __sha1_update(struct SHA1_CTX *sctx, const u8 *data,
56 51
57 if (partial) { 52 if (partial) {
58 done = SHA1_BLOCK_SIZE - partial; 53 done = SHA1_BLOCK_SIZE - partial;
59 memcpy(sctx->data + partial, data, done); 54 memcpy(sctx->buffer + partial, data, done);
60 sha1_block_data_order(sctx, sctx->data, 1); 55 sha1_block_data_order(sctx->state, sctx->buffer, 1);
61 } 56 }
62 57
63 if (len - done >= SHA1_BLOCK_SIZE) { 58 if (len - done >= SHA1_BLOCK_SIZE) {
64 const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; 59 const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
65 sha1_block_data_order(sctx, data + done, rounds); 60 sha1_block_data_order(sctx->state, data + done, rounds);
66 done += rounds * SHA1_BLOCK_SIZE; 61 done += rounds * SHA1_BLOCK_SIZE;
67 } 62 }
68 63
69 memcpy(sctx->data, data + done, len - done); 64 memcpy(sctx->buffer, data + done, len - done);
70 return 0; 65 return 0;
71} 66}
72 67
73 68
74static int sha1_update(struct shash_desc *desc, const u8 *data, 69int sha1_update_arm(struct shash_desc *desc, const u8 *data,
75 unsigned int len) 70 unsigned int len)
76{ 71{
77 struct SHA1_CTX *sctx = shash_desc_ctx(desc); 72 struct sha1_state *sctx = shash_desc_ctx(desc);
78 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; 73 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
79 int res; 74 int res;
80 75
81 /* Handle the fast case right here */ 76 /* Handle the fast case right here */
82 if (partial + len < SHA1_BLOCK_SIZE) { 77 if (partial + len < SHA1_BLOCK_SIZE) {
83 sctx->count += len; 78 sctx->count += len;
84 memcpy(sctx->data + partial, data, len); 79 memcpy(sctx->buffer + partial, data, len);
85 return 0; 80 return 0;
86 } 81 }
87 res = __sha1_update(sctx, data, len, partial); 82 res = __sha1_update(sctx, data, len, partial);
88 return res; 83 return res;
89} 84}
85EXPORT_SYMBOL_GPL(sha1_update_arm);
90 86
91 87
92/* Add padding and return the message digest. */ 88/* Add padding and return the message digest. */
93static int sha1_final(struct shash_desc *desc, u8 *out) 89static int sha1_final(struct shash_desc *desc, u8 *out)
94{ 90{
95 struct SHA1_CTX *sctx = shash_desc_ctx(desc); 91 struct sha1_state *sctx = shash_desc_ctx(desc);
96 unsigned int i, index, padlen; 92 unsigned int i, index, padlen;
97 __be32 *dst = (__be32 *)out; 93 __be32 *dst = (__be32 *)out;
98 __be64 bits; 94 __be64 bits;
@@ -106,7 +102,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
106 /* We need to fill a whole block for __sha1_update() */ 102 /* We need to fill a whole block for __sha1_update() */
107 if (padlen <= 56) { 103 if (padlen <= 56) {
108 sctx->count += padlen; 104 sctx->count += padlen;
109 memcpy(sctx->data + index, padding, padlen); 105 memcpy(sctx->buffer + index, padding, padlen);
110 } else { 106 } else {
111 __sha1_update(sctx, padding, padlen, index); 107 __sha1_update(sctx, padding, padlen, index);
112 } 108 }
@@ -114,7 +110,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
114 110
115 /* Store state in digest */ 111 /* Store state in digest */
116 for (i = 0; i < 5; i++) 112 for (i = 0; i < 5; i++)
117 dst[i] = cpu_to_be32(((u32 *)sctx)[i]); 113 dst[i] = cpu_to_be32(sctx->state[i]);
118 114
119 /* Wipe context */ 115 /* Wipe context */
120 memset(sctx, 0, sizeof(*sctx)); 116 memset(sctx, 0, sizeof(*sctx));
@@ -124,7 +120,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
124 120
125static int sha1_export(struct shash_desc *desc, void *out) 121static int sha1_export(struct shash_desc *desc, void *out)
126{ 122{
127 struct SHA1_CTX *sctx = shash_desc_ctx(desc); 123 struct sha1_state *sctx = shash_desc_ctx(desc);
128 memcpy(out, sctx, sizeof(*sctx)); 124 memcpy(out, sctx, sizeof(*sctx));
129 return 0; 125 return 0;
130} 126}
@@ -132,7 +128,7 @@ static int sha1_export(struct shash_desc *desc, void *out)
132 128
133static int sha1_import(struct shash_desc *desc, const void *in) 129static int sha1_import(struct shash_desc *desc, const void *in)
134{ 130{
135 struct SHA1_CTX *sctx = shash_desc_ctx(desc); 131 struct sha1_state *sctx = shash_desc_ctx(desc);
136 memcpy(sctx, in, sizeof(*sctx)); 132 memcpy(sctx, in, sizeof(*sctx));
137 return 0; 133 return 0;
138} 134}
@@ -141,12 +137,12 @@ static int sha1_import(struct shash_desc *desc, const void *in)
141static struct shash_alg alg = { 137static struct shash_alg alg = {
142 .digestsize = SHA1_DIGEST_SIZE, 138 .digestsize = SHA1_DIGEST_SIZE,
143 .init = sha1_init, 139 .init = sha1_init,
144 .update = sha1_update, 140 .update = sha1_update_arm,
145 .final = sha1_final, 141 .final = sha1_final,
146 .export = sha1_export, 142 .export = sha1_export,
147 .import = sha1_import, 143 .import = sha1_import,
148 .descsize = sizeof(struct SHA1_CTX), 144 .descsize = sizeof(struct sha1_state),
149 .statesize = sizeof(struct SHA1_CTX), 145 .statesize = sizeof(struct sha1_state),
150 .base = { 146 .base = {
151 .cra_name = "sha1", 147 .cra_name = "sha1",
152 .cra_driver_name= "sha1-asm", 148 .cra_driver_name= "sha1-asm",
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
new file mode 100644
index 000000000000..6f1b411b1d55
--- /dev/null
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -0,0 +1,197 @@
1/*
2 * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
3 * ARM NEON instructions.
4 *
5 * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
6 *
7 * This file is based on sha1_generic.c and sha1_ssse3_glue.c:
8 * Copyright (c) Alan Smithee.
9 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
10 * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
11 * Copyright (c) Mathias Krause <minipli@googlemail.com>
12 * Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com>
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
17 * any later version.
18 *
19 */
20
21#include <crypto/internal/hash.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/mm.h>
25#include <linux/cryptohash.h>
26#include <linux/types.h>
27#include <crypto/sha.h>
28#include <asm/byteorder.h>
29#include <asm/neon.h>
30#include <asm/simd.h>
31#include <asm/crypto/sha1.h>
32
33
34asmlinkage void sha1_transform_neon(void *state_h, const char *data,
35 unsigned int rounds);
36
37
38static int sha1_neon_init(struct shash_desc *desc)
39{
40 struct sha1_state *sctx = shash_desc_ctx(desc);
41
42 *sctx = (struct sha1_state){
43 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
44 };
45
46 return 0;
47}
48
49static int __sha1_neon_update(struct shash_desc *desc, const u8 *data,
50 unsigned int len, unsigned int partial)
51{
52 struct sha1_state *sctx = shash_desc_ctx(desc);
53 unsigned int done = 0;
54
55 sctx->count += len;
56
57 if (partial) {
58 done = SHA1_BLOCK_SIZE - partial;
59 memcpy(sctx->buffer + partial, data, done);
60 sha1_transform_neon(sctx->state, sctx->buffer, 1);
61 }
62
63 if (len - done >= SHA1_BLOCK_SIZE) {
64 const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
65
66 sha1_transform_neon(sctx->state, data + done, rounds);
67 done += rounds * SHA1_BLOCK_SIZE;
68 }
69
70 memcpy(sctx->buffer, data + done, len - done);
71
72 return 0;
73}
74
75static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
76 unsigned int len)
77{
78 struct sha1_state *sctx = shash_desc_ctx(desc);
79 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
80 int res;
81
82 /* Handle the fast case right here */
83 if (partial + len < SHA1_BLOCK_SIZE) {
84 sctx->count += len;
85 memcpy(sctx->buffer + partial, data, len);
86
87 return 0;
88 }
89
90 if (!may_use_simd()) {
91 res = sha1_update_arm(desc, data, len);
92 } else {
93 kernel_neon_begin();
94 res = __sha1_neon_update(desc, data, len, partial);
95 kernel_neon_end();
96 }
97
98 return res;
99}
100
101
102/* Add padding and return the message digest. */
103static int sha1_neon_final(struct shash_desc *desc, u8 *out)
104{
105 struct sha1_state *sctx = shash_desc_ctx(desc);
106 unsigned int i, index, padlen;
107 __be32 *dst = (__be32 *)out;
108 __be64 bits;
109 static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
110
111 bits = cpu_to_be64(sctx->count << 3);
112
113 /* Pad out to 56 mod 64 and append length */
114 index = sctx->count % SHA1_BLOCK_SIZE;
115 padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
116 if (!may_use_simd()) {
117 sha1_update_arm(desc, padding, padlen);
118 sha1_update_arm(desc, (const u8 *)&bits, sizeof(bits));
119 } else {
120 kernel_neon_begin();
121 /* We need to fill a whole block for __sha1_neon_update() */
122 if (padlen <= 56) {
123 sctx->count += padlen;
124 memcpy(sctx->buffer + index, padding, padlen);
125 } else {
126 __sha1_neon_update(desc, padding, padlen, index);
127 }
128 __sha1_neon_update(desc, (const u8 *)&bits, sizeof(bits), 56);
129 kernel_neon_end();
130 }
131
132 /* Store state in digest */
133 for (i = 0; i < 5; i++)
134 dst[i] = cpu_to_be32(sctx->state[i]);
135
136 /* Wipe context */
137 memset(sctx, 0, sizeof(*sctx));
138
139 return 0;
140}
141
142static int sha1_neon_export(struct shash_desc *desc, void *out)
143{
144 struct sha1_state *sctx = shash_desc_ctx(desc);
145
146 memcpy(out, sctx, sizeof(*sctx));
147
148 return 0;
149}
150
151static int sha1_neon_import(struct shash_desc *desc, const void *in)
152{
153 struct sha1_state *sctx = shash_desc_ctx(desc);
154
155 memcpy(sctx, in, sizeof(*sctx));
156
157 return 0;
158}
159
160static struct shash_alg alg = {
161 .digestsize = SHA1_DIGEST_SIZE,
162 .init = sha1_neon_init,
163 .update = sha1_neon_update,
164 .final = sha1_neon_final,
165 .export = sha1_neon_export,
166 .import = sha1_neon_import,
167 .descsize = sizeof(struct sha1_state),
168 .statesize = sizeof(struct sha1_state),
169 .base = {
170 .cra_name = "sha1",
171 .cra_driver_name = "sha1-neon",
172 .cra_priority = 250,
173 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
174 .cra_blocksize = SHA1_BLOCK_SIZE,
175 .cra_module = THIS_MODULE,
176 }
177};
178
179static int __init sha1_neon_mod_init(void)
180{
181 if (!cpu_has_neon())
182 return -ENODEV;
183
184 return crypto_register_shash(&alg);
185}
186
187static void __exit sha1_neon_mod_fini(void)
188{
189 crypto_unregister_shash(&alg);
190}
191
192module_init(sha1_neon_mod_init);
193module_exit(sha1_neon_mod_fini);
194
195MODULE_LICENSE("GPL");
196MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated");
197MODULE_ALIAS("sha1");
diff --git a/arch/arm/crypto/sha512-armv7-neon.S b/arch/arm/crypto/sha512-armv7-neon.S
new file mode 100644
index 000000000000..fe99472e507c
--- /dev/null
+++ b/arch/arm/crypto/sha512-armv7-neon.S
@@ -0,0 +1,455 @@
1/* sha512-armv7-neon.S - ARM/NEON assembly implementation of SHA-512 transform
2 *
3 * Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 */
10
11#include <linux/linkage.h>
12
13
14.syntax unified
15.code 32
16.fpu neon
17
18.text
19
20/* structure of SHA512_CONTEXT */
21#define hd_a 0
22#define hd_b ((hd_a) + 8)
23#define hd_c ((hd_b) + 8)
24#define hd_d ((hd_c) + 8)
25#define hd_e ((hd_d) + 8)
26#define hd_f ((hd_e) + 8)
27#define hd_g ((hd_f) + 8)
28
29/* register macros */
30#define RK %r2
31
32#define RA d0
33#define RB d1
34#define RC d2
35#define RD d3
36#define RE d4
37#define RF d5
38#define RG d6
39#define RH d7
40
41#define RT0 d8
42#define RT1 d9
43#define RT2 d10
44#define RT3 d11
45#define RT4 d12
46#define RT5 d13
47#define RT6 d14
48#define RT7 d15
49
50#define RT01q q4
51#define RT23q q5
52#define RT45q q6
53#define RT67q q7
54
55#define RW0 d16
56#define RW1 d17
57#define RW2 d18
58#define RW3 d19
59#define RW4 d20
60#define RW5 d21
61#define RW6 d22
62#define RW7 d23
63#define RW8 d24
64#define RW9 d25
65#define RW10 d26
66#define RW11 d27
67#define RW12 d28
68#define RW13 d29
69#define RW14 d30
70#define RW15 d31
71
72#define RW01q q8
73#define RW23q q9
74#define RW45q q10
75#define RW67q q11
76#define RW89q q12
77#define RW1011q q13
78#define RW1213q q14
79#define RW1415q q15
80
81/***********************************************************************
82 * ARM assembly implementation of sha512 transform
83 ***********************************************************************/
84#define rounds2_0_63(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, rw01q, rw2, \
85 rw23q, rw1415q, rw9, rw10, interleave_op, arg1) \
86 /* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
87 vshr.u64 RT2, re, #14; \
88 vshl.u64 RT3, re, #64 - 14; \
89 interleave_op(arg1); \
90 vshr.u64 RT4, re, #18; \
91 vshl.u64 RT5, re, #64 - 18; \
92 vld1.64 {RT0}, [RK]!; \
93 veor.64 RT23q, RT23q, RT45q; \
94 vshr.u64 RT4, re, #41; \
95 vshl.u64 RT5, re, #64 - 41; \
96 vadd.u64 RT0, RT0, rw0; \
97 veor.64 RT23q, RT23q, RT45q; \
98 vmov.64 RT7, re; \
99 veor.64 RT1, RT2, RT3; \
100 vbsl.64 RT7, rf, rg; \
101 \
102 vadd.u64 RT1, RT1, rh; \
103 vshr.u64 RT2, ra, #28; \
104 vshl.u64 RT3, ra, #64 - 28; \
105 vadd.u64 RT1, RT1, RT0; \
106 vshr.u64 RT4, ra, #34; \
107 vshl.u64 RT5, ra, #64 - 34; \
108 vadd.u64 RT1, RT1, RT7; \
109 \
110 /* h = Sum0 (a) + Maj (a, b, c); */ \
111 veor.64 RT23q, RT23q, RT45q; \
112 vshr.u64 RT4, ra, #39; \
113 vshl.u64 RT5, ra, #64 - 39; \
114 veor.64 RT0, ra, rb; \
115 veor.64 RT23q, RT23q, RT45q; \
116 vbsl.64 RT0, rc, rb; \
117 vadd.u64 rd, rd, RT1; /* d+=t1; */ \
118 veor.64 rh, RT2, RT3; \
119 \
120 /* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
121 vshr.u64 RT2, rd, #14; \
122 vshl.u64 RT3, rd, #64 - 14; \
123 vadd.u64 rh, rh, RT0; \
124 vshr.u64 RT4, rd, #18; \
125 vshl.u64 RT5, rd, #64 - 18; \
126 vadd.u64 rh, rh, RT1; /* h+=t1; */ \
127 vld1.64 {RT0}, [RK]!; \
128 veor.64 RT23q, RT23q, RT45q; \
129 vshr.u64 RT4, rd, #41; \
130 vshl.u64 RT5, rd, #64 - 41; \
131 vadd.u64 RT0, RT0, rw1; \
132 veor.64 RT23q, RT23q, RT45q; \
133 vmov.64 RT7, rd; \
134 veor.64 RT1, RT2, RT3; \
135 vbsl.64 RT7, re, rf; \
136 \
137 vadd.u64 RT1, RT1, rg; \
138 vshr.u64 RT2, rh, #28; \
139 vshl.u64 RT3, rh, #64 - 28; \
140 vadd.u64 RT1, RT1, RT0; \
141 vshr.u64 RT4, rh, #34; \
142 vshl.u64 RT5, rh, #64 - 34; \
143 vadd.u64 RT1, RT1, RT7; \
144 \
145 /* g = Sum0 (h) + Maj (h, a, b); */ \
146 veor.64 RT23q, RT23q, RT45q; \
147 vshr.u64 RT4, rh, #39; \
148 vshl.u64 RT5, rh, #64 - 39; \
149 veor.64 RT0, rh, ra; \
150 veor.64 RT23q, RT23q, RT45q; \
151 vbsl.64 RT0, rb, ra; \
152 vadd.u64 rc, rc, RT1; /* c+=t1; */ \
153 veor.64 rg, RT2, RT3; \
154 \
155 /* w[0] += S1 (w[14]) + w[9] + S0 (w[1]); */ \
156 /* w[1] += S1 (w[15]) + w[10] + S0 (w[2]); */ \
157 \
158 /**** S0(w[1:2]) */ \
159 \
160 /* w[0:1] += w[9:10] */ \
161 /* RT23q = rw1:rw2 */ \
162 vext.u64 RT23q, rw01q, rw23q, #1; \
163 vadd.u64 rw0, rw9; \
164 vadd.u64 rg, rg, RT0; \
165 vadd.u64 rw1, rw10;\
166 vadd.u64 rg, rg, RT1; /* g+=t1; */ \
167 \
168 vshr.u64 RT45q, RT23q, #1; \
169 vshl.u64 RT67q, RT23q, #64 - 1; \
170 vshr.u64 RT01q, RT23q, #8; \
171 veor.u64 RT45q, RT45q, RT67q; \
172 vshl.u64 RT67q, RT23q, #64 - 8; \
173 veor.u64 RT45q, RT45q, RT01q; \
174 vshr.u64 RT01q, RT23q, #7; \
175 veor.u64 RT45q, RT45q, RT67q; \
176 \
177 /**** S1(w[14:15]) */ \
178 vshr.u64 RT23q, rw1415q, #6; \
179 veor.u64 RT01q, RT01q, RT45q; \
180 vshr.u64 RT45q, rw1415q, #19; \
181 vshl.u64 RT67q, rw1415q, #64 - 19; \
182 veor.u64 RT23q, RT23q, RT45q; \
183 vshr.u64 RT45q, rw1415q, #61; \
184 veor.u64 RT23q, RT23q, RT67q; \
185 vshl.u64 RT67q, rw1415q, #64 - 61; \
186 veor.u64 RT23q, RT23q, RT45q; \
187 vadd.u64 rw01q, RT01q; /* w[0:1] += S(w[1:2]) */ \
188 veor.u64 RT01q, RT23q, RT67q;
189#define vadd_RT01q(rw01q) \
190 /* w[0:1] += S(w[14:15]) */ \
191 vadd.u64 rw01q, RT01q;
192
193#define dummy(_) /*_*/
194
195#define rounds2_64_79(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, \
196 interleave_op1, arg1, interleave_op2, arg2) \
197 /* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
198 vshr.u64 RT2, re, #14; \
199 vshl.u64 RT3, re, #64 - 14; \
200 interleave_op1(arg1); \
201 vshr.u64 RT4, re, #18; \
202 vshl.u64 RT5, re, #64 - 18; \
203 interleave_op2(arg2); \
204 vld1.64 {RT0}, [RK]!; \
205 veor.64 RT23q, RT23q, RT45q; \
206 vshr.u64 RT4, re, #41; \
207 vshl.u64 RT5, re, #64 - 41; \
208 vadd.u64 RT0, RT0, rw0; \
209 veor.64 RT23q, RT23q, RT45q; \
210 vmov.64 RT7, re; \
211 veor.64 RT1, RT2, RT3; \
212 vbsl.64 RT7, rf, rg; \
213 \
214 vadd.u64 RT1, RT1, rh; \
215 vshr.u64 RT2, ra, #28; \
216 vshl.u64 RT3, ra, #64 - 28; \
217 vadd.u64 RT1, RT1, RT0; \
218 vshr.u64 RT4, ra, #34; \
219 vshl.u64 RT5, ra, #64 - 34; \
220 vadd.u64 RT1, RT1, RT7; \
221 \
222 /* h = Sum0 (a) + Maj (a, b, c); */ \
223 veor.64 RT23q, RT23q, RT45q; \
224 vshr.u64 RT4, ra, #39; \
225 vshl.u64 RT5, ra, #64 - 39; \
226 veor.64 RT0, ra, rb; \
227 veor.64 RT23q, RT23q, RT45q; \
228 vbsl.64 RT0, rc, rb; \
229 vadd.u64 rd, rd, RT1; /* d+=t1; */ \
230 veor.64 rh, RT2, RT3; \
231 \
232 /* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
233 vshr.u64 RT2, rd, #14; \
234 vshl.u64 RT3, rd, #64 - 14; \
235 vadd.u64 rh, rh, RT0; \
236 vshr.u64 RT4, rd, #18; \
237 vshl.u64 RT5, rd, #64 - 18; \
238 vadd.u64 rh, rh, RT1; /* h+=t1; */ \
239 vld1.64 {RT0}, [RK]!; \
240 veor.64 RT23q, RT23q, RT45q; \
241 vshr.u64 RT4, rd, #41; \
242 vshl.u64 RT5, rd, #64 - 41; \
243 vadd.u64 RT0, RT0, rw1; \
244 veor.64 RT23q, RT23q, RT45q; \
245 vmov.64 RT7, rd; \
246 veor.64 RT1, RT2, RT3; \
247 vbsl.64 RT7, re, rf; \
248 \
249 vadd.u64 RT1, RT1, rg; \
250 vshr.u64 RT2, rh, #28; \
251 vshl.u64 RT3, rh, #64 - 28; \
252 vadd.u64 RT1, RT1, RT0; \
253 vshr.u64 RT4, rh, #34; \
254 vshl.u64 RT5, rh, #64 - 34; \
255 vadd.u64 RT1, RT1, RT7; \
256 \
257 /* g = Sum0 (h) + Maj (h, a, b); */ \
258 veor.64 RT23q, RT23q, RT45q; \
259 vshr.u64 RT4, rh, #39; \
260 vshl.u64 RT5, rh, #64 - 39; \
261 veor.64 RT0, rh, ra; \
262 veor.64 RT23q, RT23q, RT45q; \
263 vbsl.64 RT0, rb, ra; \
264 vadd.u64 rc, rc, RT1; /* c+=t1; */ \
265 veor.64 rg, RT2, RT3;
266#define vadd_rg_RT0(rg) \
267 vadd.u64 rg, rg, RT0;
268#define vadd_rg_RT1(rg) \
269 vadd.u64 rg, rg, RT1; /* g+=t1; */
270
271.align 3
272ENTRY(sha512_transform_neon)
273 /* Input:
274 * %r0: SHA512_CONTEXT
275 * %r1: data
276 * %r2: u64 k[] constants
277 * %r3: nblks
278 */
279 push {%lr};
280
281 mov %lr, #0;
282
283 /* Load context to d0-d7 */
284 vld1.64 {RA-RD}, [%r0]!;
285 vld1.64 {RE-RH}, [%r0];
286 sub %r0, #(4*8);
287
288 /* Load input to w[16], d16-d31 */
289 /* NOTE: Assumes that on ARMv7 unaligned accesses are always allowed. */
290 vld1.64 {RW0-RW3}, [%r1]!;
291 vld1.64 {RW4-RW7}, [%r1]!;
292 vld1.64 {RW8-RW11}, [%r1]!;
293 vld1.64 {RW12-RW15}, [%r1]!;
294#ifdef __ARMEL__
295 /* byteswap */
296 vrev64.8 RW01q, RW01q;
297 vrev64.8 RW23q, RW23q;
298 vrev64.8 RW45q, RW45q;
299 vrev64.8 RW67q, RW67q;
300 vrev64.8 RW89q, RW89q;
301 vrev64.8 RW1011q, RW1011q;
302 vrev64.8 RW1213q, RW1213q;
303 vrev64.8 RW1415q, RW1415q;
304#endif
305
306 /* EABI says that d8-d15 must be preserved by callee. */
307 /*vpush {RT0-RT7};*/
308
309.Loop:
310 rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2,
311 RW23q, RW1415q, RW9, RW10, dummy, _);
312 b .Lenter_rounds;
313
314.Loop_rounds:
315 rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2,
316 RW23q, RW1415q, RW9, RW10, vadd_RT01q, RW1415q);
317.Lenter_rounds:
318 rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3, RW23q, RW4,
319 RW45q, RW01q, RW11, RW12, vadd_RT01q, RW01q);
320 rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5, RW45q, RW6,
321 RW67q, RW23q, RW13, RW14, vadd_RT01q, RW23q);
322 rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7, RW67q, RW8,
323 RW89q, RW45q, RW15, RW0, vadd_RT01q, RW45q);
324 rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9, RW89q, RW10,
325 RW1011q, RW67q, RW1, RW2, vadd_RT01q, RW67q);
326 rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11, RW1011q, RW12,
327 RW1213q, RW89q, RW3, RW4, vadd_RT01q, RW89q);
328 add %lr, #16;
329 rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13, RW1213q, RW14,
330 RW1415q, RW1011q, RW5, RW6, vadd_RT01q, RW1011q);
331 cmp %lr, #64;
332 rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15, RW1415q, RW0,
333 RW01q, RW1213q, RW7, RW8, vadd_RT01q, RW1213q);
334 bne .Loop_rounds;
335
336 subs %r3, #1;
337
338 rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1,
339 vadd_RT01q, RW1415q, dummy, _);
340 rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3,
341 vadd_rg_RT0, RG, vadd_rg_RT1, RG);
342 beq .Lhandle_tail;
343 vld1.64 {RW0-RW3}, [%r1]!;
344 rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5,
345 vadd_rg_RT0, RE, vadd_rg_RT1, RE);
346 rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7,
347 vadd_rg_RT0, RC, vadd_rg_RT1, RC);
348#ifdef __ARMEL__
349 vrev64.8 RW01q, RW01q;
350 vrev64.8 RW23q, RW23q;
351#endif
352 vld1.64 {RW4-RW7}, [%r1]!;
353 rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9,
354 vadd_rg_RT0, RA, vadd_rg_RT1, RA);
355 rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11,
356 vadd_rg_RT0, RG, vadd_rg_RT1, RG);
357#ifdef __ARMEL__
358 vrev64.8 RW45q, RW45q;
359 vrev64.8 RW67q, RW67q;
360#endif
361 vld1.64 {RW8-RW11}, [%r1]!;
362 rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13,
363 vadd_rg_RT0, RE, vadd_rg_RT1, RE);
364 rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15,
365 vadd_rg_RT0, RC, vadd_rg_RT1, RC);
366#ifdef __ARMEL__
367 vrev64.8 RW89q, RW89q;
368 vrev64.8 RW1011q, RW1011q;
369#endif
370 vld1.64 {RW12-RW15}, [%r1]!;
371 vadd_rg_RT0(RA);
372 vadd_rg_RT1(RA);
373
374 /* Load context */
375 vld1.64 {RT0-RT3}, [%r0]!;
376 vld1.64 {RT4-RT7}, [%r0];
377 sub %r0, #(4*8);
378
379#ifdef __ARMEL__
380 vrev64.8 RW1213q, RW1213q;
381 vrev64.8 RW1415q, RW1415q;
382#endif
383
384 vadd.u64 RA, RT0;
385 vadd.u64 RB, RT1;
386 vadd.u64 RC, RT2;
387 vadd.u64 RD, RT3;
388 vadd.u64 RE, RT4;
389 vadd.u64 RF, RT5;
390 vadd.u64 RG, RT6;
391 vadd.u64 RH, RT7;
392
393 /* Store the first half of context */
394 vst1.64 {RA-RD}, [%r0]!;
395 sub RK, $(8*80);
396 vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
397 mov %lr, #0;
398 sub %r0, #(4*8);
399
400 b .Loop;
401
402.Lhandle_tail:
403 rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5,
404 vadd_rg_RT0, RE, vadd_rg_RT1, RE);
405 rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7,
406 vadd_rg_RT0, RC, vadd_rg_RT1, RC);
407 rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9,
408 vadd_rg_RT0, RA, vadd_rg_RT1, RA);
409 rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11,
410 vadd_rg_RT0, RG, vadd_rg_RT1, RG);
411 rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13,
412 vadd_rg_RT0, RE, vadd_rg_RT1, RE);
413 rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15,
414 vadd_rg_RT0, RC, vadd_rg_RT1, RC);
415
416 /* Load context to d16-d23 */
417 vld1.64 {RW0-RW3}, [%r0]!;
418 vadd_rg_RT0(RA);
419 vld1.64 {RW4-RW7}, [%r0];
420 vadd_rg_RT1(RA);
421 sub %r0, #(4*8);
422
423 vadd.u64 RA, RW0;
424 vadd.u64 RB, RW1;
425 vadd.u64 RC, RW2;
426 vadd.u64 RD, RW3;
427 vadd.u64 RE, RW4;
428 vadd.u64 RF, RW5;
429 vadd.u64 RG, RW6;
430 vadd.u64 RH, RW7;
431
432 /* Store the first half of context */
433 vst1.64 {RA-RD}, [%r0]!;
434
435 /* Clear used registers */
436 /* d16-d31 */
437 veor.u64 RW01q, RW01q;
438 veor.u64 RW23q, RW23q;
439 veor.u64 RW45q, RW45q;
440 veor.u64 RW67q, RW67q;
441 vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
442 veor.u64 RW89q, RW89q;
443 veor.u64 RW1011q, RW1011q;
444 veor.u64 RW1213q, RW1213q;
445 veor.u64 RW1415q, RW1415q;
446 /* d8-d15 */
447 /*vpop {RT0-RT7};*/
448 /* d0-d7 (q0-q3) */
449 veor.u64 %q0, %q0;
450 veor.u64 %q1, %q1;
451 veor.u64 %q2, %q2;
452 veor.u64 %q3, %q3;
453
454 pop {%pc};
455ENDPROC(sha512_transform_neon)
diff --git a/arch/arm/crypto/sha512_neon_glue.c b/arch/arm/crypto/sha512_neon_glue.c
new file mode 100644
index 000000000000..0d2758ff5e12
--- /dev/null
+++ b/arch/arm/crypto/sha512_neon_glue.c
@@ -0,0 +1,305 @@
1/*
2 * Glue code for the SHA512 Secure Hash Algorithm assembly implementation
3 * using NEON instructions.
4 *
5 * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
6 *
7 * This file is based on sha512_ssse3_glue.c:
8 * Copyright (C) 2013 Intel Corporation
9 * Author: Tim Chen <tim.c.chen@linux.intel.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 */
17
18#include <crypto/internal/hash.h>
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/mm.h>
22#include <linux/cryptohash.h>
23#include <linux/types.h>
24#include <linux/string.h>
25#include <crypto/sha.h>
26#include <asm/byteorder.h>
27#include <asm/simd.h>
28#include <asm/neon.h>
29
30
31static const u64 sha512_k[] = {
32 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
33 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
34 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
35 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
36 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
37 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
38 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
39 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
40 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
41 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
42 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
43 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
44 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
45 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
46 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
47 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
48 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
49 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
50 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
51 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
52 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
53 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
54 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
55 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
56 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
57 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
58 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
59 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
60 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
61 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
62 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
63 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
64 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
65 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
66 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
67 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
68 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
69 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
70 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
71 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
72};
73
74
75asmlinkage void sha512_transform_neon(u64 *digest, const void *data,
76 const u64 k[], unsigned int num_blks);
77
78
79static int sha512_neon_init(struct shash_desc *desc)
80{
81 struct sha512_state *sctx = shash_desc_ctx(desc);
82
83 sctx->state[0] = SHA512_H0;
84 sctx->state[1] = SHA512_H1;
85 sctx->state[2] = SHA512_H2;
86 sctx->state[3] = SHA512_H3;
87 sctx->state[4] = SHA512_H4;
88 sctx->state[5] = SHA512_H5;
89 sctx->state[6] = SHA512_H6;
90 sctx->state[7] = SHA512_H7;
91 sctx->count[0] = sctx->count[1] = 0;
92
93 return 0;
94}
95
96static int __sha512_neon_update(struct shash_desc *desc, const u8 *data,
97 unsigned int len, unsigned int partial)
98{
99 struct sha512_state *sctx = shash_desc_ctx(desc);
100 unsigned int done = 0;
101
102 sctx->count[0] += len;
103 if (sctx->count[0] < len)
104 sctx->count[1]++;
105
106 if (partial) {
107 done = SHA512_BLOCK_SIZE - partial;
108 memcpy(sctx->buf + partial, data, done);
109 sha512_transform_neon(sctx->state, sctx->buf, sha512_k, 1);
110 }
111
112 if (len - done >= SHA512_BLOCK_SIZE) {
113 const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE;
114
115 sha512_transform_neon(sctx->state, data + done, sha512_k,
116 rounds);
117
118 done += rounds * SHA512_BLOCK_SIZE;
119 }
120
121 memcpy(sctx->buf, data + done, len - done);
122
123 return 0;
124}
125
126static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
127 unsigned int len)
128{
129 struct sha512_state *sctx = shash_desc_ctx(desc);
130 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
131 int res;
132
133 /* Handle the fast case right here */
134 if (partial + len < SHA512_BLOCK_SIZE) {
135 sctx->count[0] += len;
136 if (sctx->count[0] < len)
137 sctx->count[1]++;
138 memcpy(sctx->buf + partial, data, len);
139
140 return 0;
141 }
142
143 if (!may_use_simd()) {
144 res = crypto_sha512_update(desc, data, len);
145 } else {
146 kernel_neon_begin();
147 res = __sha512_neon_update(desc, data, len, partial);
148 kernel_neon_end();
149 }
150
151 return res;
152}
153
154
155/* Add padding and return the message digest. */
156static int sha512_neon_final(struct shash_desc *desc, u8 *out)
157{
158 struct sha512_state *sctx = shash_desc_ctx(desc);
159 unsigned int i, index, padlen;
160 __be64 *dst = (__be64 *)out;
161 __be64 bits[2];
162 static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, };
163
164 /* save number of bits */
165 bits[1] = cpu_to_be64(sctx->count[0] << 3);
166 bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
167
168 /* Pad out to 112 mod 128 and append length */
169 index = sctx->count[0] & 0x7f;
170 padlen = (index < 112) ? (112 - index) : ((128+112) - index);
171
172 if (!may_use_simd()) {
173 crypto_sha512_update(desc, padding, padlen);
174 crypto_sha512_update(desc, (const u8 *)&bits, sizeof(bits));
175 } else {
176 kernel_neon_begin();
177 /* We need to fill a whole block for __sha512_neon_update() */
178 if (padlen <= 112) {
179 sctx->count[0] += padlen;
180 if (sctx->count[0] < padlen)
181 sctx->count[1]++;
182 memcpy(sctx->buf + index, padding, padlen);
183 } else {
184 __sha512_neon_update(desc, padding, padlen, index);
185 }
186 __sha512_neon_update(desc, (const u8 *)&bits,
187 sizeof(bits), 112);
188 kernel_neon_end();
189 }
190
191 /* Store state in digest */
192 for (i = 0; i < 8; i++)
193 dst[i] = cpu_to_be64(sctx->state[i]);
194
195 /* Wipe context */
196 memset(sctx, 0, sizeof(*sctx));
197
198 return 0;
199}
200
201static int sha512_neon_export(struct shash_desc *desc, void *out)
202{
203 struct sha512_state *sctx = shash_desc_ctx(desc);
204
205 memcpy(out, sctx, sizeof(*sctx));
206
207 return 0;
208}
209
210static int sha512_neon_import(struct shash_desc *desc, const void *in)
211{
212 struct sha512_state *sctx = shash_desc_ctx(desc);
213
214 memcpy(sctx, in, sizeof(*sctx));
215
216 return 0;
217}
218
219static int sha384_neon_init(struct shash_desc *desc)
220{
221 struct sha512_state *sctx = shash_desc_ctx(desc);
222
223 sctx->state[0] = SHA384_H0;
224 sctx->state[1] = SHA384_H1;
225 sctx->state[2] = SHA384_H2;
226 sctx->state[3] = SHA384_H3;
227 sctx->state[4] = SHA384_H4;
228 sctx->state[5] = SHA384_H5;
229 sctx->state[6] = SHA384_H6;
230 sctx->state[7] = SHA384_H7;
231
232 sctx->count[0] = sctx->count[1] = 0;
233
234 return 0;
235}
236
237static int sha384_neon_final(struct shash_desc *desc, u8 *hash)
238{
239 u8 D[SHA512_DIGEST_SIZE];
240
241 sha512_neon_final(desc, D);
242
243 memcpy(hash, D, SHA384_DIGEST_SIZE);
244 memset(D, 0, SHA512_DIGEST_SIZE);
245
246 return 0;
247}
248
249static struct shash_alg algs[] = { {
250 .digestsize = SHA512_DIGEST_SIZE,
251 .init = sha512_neon_init,
252 .update = sha512_neon_update,
253 .final = sha512_neon_final,
254 .export = sha512_neon_export,
255 .import = sha512_neon_import,
256 .descsize = sizeof(struct sha512_state),
257 .statesize = sizeof(struct sha512_state),
258 .base = {
259 .cra_name = "sha512",
260 .cra_driver_name = "sha512-neon",
261 .cra_priority = 250,
262 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
263 .cra_blocksize = SHA512_BLOCK_SIZE,
264 .cra_module = THIS_MODULE,
265 }
266}, {
267 .digestsize = SHA384_DIGEST_SIZE,
268 .init = sha384_neon_init,
269 .update = sha512_neon_update,
270 .final = sha384_neon_final,
271 .export = sha512_neon_export,
272 .import = sha512_neon_import,
273 .descsize = sizeof(struct sha512_state),
274 .statesize = sizeof(struct sha512_state),
275 .base = {
276 .cra_name = "sha384",
277 .cra_driver_name = "sha384-neon",
278 .cra_priority = 250,
279 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
280 .cra_blocksize = SHA384_BLOCK_SIZE,
281 .cra_module = THIS_MODULE,
282 }
283} };
284
285static int __init sha512_neon_mod_init(void)
286{
287 if (!cpu_has_neon())
288 return -ENODEV;
289
290 return crypto_register_shashes(algs, ARRAY_SIZE(algs));
291}
292
293static void __exit sha512_neon_mod_fini(void)
294{
295 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
296}
297
298module_init(sha512_neon_mod_init);
299module_exit(sha512_neon_mod_fini);
300
301MODULE_LICENSE("GPL");
302MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated");
303
304MODULE_ALIAS("sha512");
305MODULE_ALIAS("sha384");
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 57f0584e8d97..f67fd3afebdf 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -24,6 +24,8 @@
24#include <asm/domain.h> 24#include <asm/domain.h>
25#include <asm/opcodes-virt.h> 25#include <asm/opcodes-virt.h>
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27#include <asm/page.h>
28#include <asm/thread_info.h>
27 29
28#define IOMEM(x) (x) 30#define IOMEM(x) (x)
29 31
@@ -179,10 +181,10 @@
179 * Get current thread_info. 181 * Get current thread_info.
180 */ 182 */
181 .macro get_thread_info, rd 183 .macro get_thread_info, rd
182 ARM( mov \rd, sp, lsr #13 ) 184 ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
183 THUMB( mov \rd, sp ) 185 THUMB( mov \rd, sp )
184 THUMB( lsr \rd, \rd, #13 ) 186 THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
185 mov \rd, \rd, lsl #13 187 mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
186 .endm 188 .endm
187 189
188/* 190/*
@@ -425,4 +427,25 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
425#endif 427#endif
426 .endm 428 .endm
427 429
430 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
431 .macro ret\c, reg
432#if __LINUX_ARM_ARCH__ < 6
433 mov\c pc, \reg
434#else
435 .ifeqs "\reg", "lr"
436 bx\c \reg
437 .else
438 mov\c pc, \reg
439 .endif
440#endif
441 .endm
442 .endr
443
444 .macro ret.w, reg
445 ret \reg
446#ifdef CONFIG_THUMB2_KERNEL
447 nop
448#endif
449 .endm
450
428#endif /* __ASM_ASSEMBLER_H__ */ 451#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 8c2b7321a478..963a2515906d 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -62,17 +62,18 @@
62#define ARM_CPU_IMP_ARM 0x41 62#define ARM_CPU_IMP_ARM 0x41
63#define ARM_CPU_IMP_INTEL 0x69 63#define ARM_CPU_IMP_INTEL 0x69
64 64
65#define ARM_CPU_PART_ARM1136 0xB360 65/* ARM implemented processors */
66#define ARM_CPU_PART_ARM1156 0xB560 66#define ARM_CPU_PART_ARM1136 0x4100b360
67#define ARM_CPU_PART_ARM1176 0xB760 67#define ARM_CPU_PART_ARM1156 0x4100b560
68#define ARM_CPU_PART_ARM11MPCORE 0xB020 68#define ARM_CPU_PART_ARM1176 0x4100b760
69#define ARM_CPU_PART_CORTEX_A8 0xC080 69#define ARM_CPU_PART_ARM11MPCORE 0x4100b020
70#define ARM_CPU_PART_CORTEX_A9 0xC090 70#define ARM_CPU_PART_CORTEX_A8 0x4100c080
71#define ARM_CPU_PART_CORTEX_A5 0xC050 71#define ARM_CPU_PART_CORTEX_A9 0x4100c090
72#define ARM_CPU_PART_CORTEX_A15 0xC0F0 72#define ARM_CPU_PART_CORTEX_A5 0x4100c050
73#define ARM_CPU_PART_CORTEX_A7 0xC070 73#define ARM_CPU_PART_CORTEX_A7 0x4100c070
74#define ARM_CPU_PART_CORTEX_A12 0xC0D0 74#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
75#define ARM_CPU_PART_CORTEX_A17 0xC0E0 75#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
76#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
76 77
77#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 78#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
78#define ARM_CPU_XSCALE_ARCH_V1 0x2000 79#define ARM_CPU_XSCALE_ARCH_V1 0x2000
@@ -171,14 +172,24 @@ static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
171 return (read_cpuid_id() & 0xFF000000) >> 24; 172 return (read_cpuid_id() & 0xFF000000) >> 24;
172} 173}
173 174
174static inline unsigned int __attribute_const__ read_cpuid_part_number(void) 175/*
176 * The CPU part number is meaningless without referring to the CPU
177 * implementer: implementers are free to define their own part numbers
178 * which are permitted to clash with other implementer part numbers.
179 */
180static inline unsigned int __attribute_const__ read_cpuid_part(void)
181{
182 return read_cpuid_id() & 0xff00fff0;
183}
184
185static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
175{ 186{
176 return read_cpuid_id() & 0xFFF0; 187 return read_cpuid_id() & 0xFFF0;
177} 188}
178 189
179static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void) 190static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
180{ 191{
181 return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK; 192 return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
182} 193}
183 194
184static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) 195static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
diff --git a/arch/arm/include/asm/crypto/sha1.h b/arch/arm/include/asm/crypto/sha1.h
new file mode 100644
index 000000000000..75e6a417416b
--- /dev/null
+++ b/arch/arm/include/asm/crypto/sha1.h
@@ -0,0 +1,10 @@
1#ifndef ASM_ARM_CRYPTO_SHA1_H
2#define ASM_ARM_CRYPTO_SHA1_H
3
4#include <linux/crypto.h>
5#include <crypto/sha.h>
6
7extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
8 unsigned int len);
9
10#endif
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
index 88d61815f0c0..469a2b30fa27 100644
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ b/arch/arm/include/asm/entry-macro-multi.S
@@ -35,5 +35,5 @@
35\symbol_name: 35\symbol_name:
36 mov r8, lr 36 mov r8, lr
37 arch_irq_handler_default 37 arch_irq_handler_default
38 mov pc, r8 38 ret r8
39 .endm 39 .endm
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index 74a8b84f3cb1..74be7c22035a 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -221,15 +221,6 @@
221# endif 221# endif
222#endif 222#endif
223 223
224#ifdef CONFIG_CPU_V7
225# ifdef CPU_NAME
226# undef MULTI_CPU
227# define MULTI_CPU
228# else
229# define CPU_NAME cpu_v7
230# endif
231#endif
232
233#ifdef CONFIG_CPU_V7M 224#ifdef CONFIG_CPU_V7M
234# ifdef CPU_NAME 225# ifdef CPU_NAME
235# undef MULTI_CPU 226# undef MULTI_CPU
@@ -248,6 +239,15 @@
248# endif 239# endif
249#endif 240#endif
250 241
242#ifdef CONFIG_CPU_V7
243/*
244 * Cortex-A9 needs a different suspend/resume function, so we need
245 * multiple CPU support for ARMv7 anyway.
246 */
247# undef MULTI_CPU
248# define MULTI_CPU
249#endif
250
251#ifndef MULTI_CPU 251#ifndef MULTI_CPU
252#define cpu_proc_init __glue(CPU_NAME,_proc_init) 252#define cpu_proc_init __glue(CPU_NAME,_proc_init)
253#define cpu_proc_fin __glue(CPU_NAME,_proc_fin) 253#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 94060adba174..57ff7f2a3084 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -217,6 +217,22 @@ int __mcpm_cluster_state(unsigned int cluster);
217int __init mcpm_sync_init( 217int __init mcpm_sync_init(
218 void (*power_up_setup)(unsigned int affinity_level)); 218 void (*power_up_setup)(unsigned int affinity_level));
219 219
220/**
221 * mcpm_loopback - make a run through the MCPM low-level code
222 *
223 * @cache_disable: pointer to function performing cache disabling
224 *
225 * This exercises the MCPM machinery by soft resetting the CPU and branching
226 * to the MCPM low-level entry code before returning to the caller.
227 * The @cache_disable function must do the necessary cache disabling to
228 * let the regular kernel init code turn it back on as if the CPU was
229 * hotplugged in. The MCPM state machine is set as if the cluster was
230 * initialized meaning the power_up_setup callback passed to mcpm_sync_init()
231 * will be invoked for all affinity levels. This may be useful to initialize
232 * some resources such as enabling the CCI that requires the cache to be off, or simply for testing purposes.
233 */
234int __init mcpm_loopback(void (*cache_disable)(void));
235
220void __init mcpm_smp_set_ops(void); 236void __init mcpm_smp_set_ops(void);
221 237
222#else 238#else
diff --git a/arch/arm/include/asm/mcs_spinlock.h b/arch/arm/include/asm/mcs_spinlock.h
new file mode 100644
index 000000000000..f652ad65840a
--- /dev/null
+++ b/arch/arm/include/asm/mcs_spinlock.h
@@ -0,0 +1,23 @@
1#ifndef __ASM_MCS_LOCK_H
2#define __ASM_MCS_LOCK_H
3
4#ifdef CONFIG_SMP
5#include <asm/spinlock.h>
6
7/* MCS spin-locking. */
8#define arch_mcs_spin_lock_contended(lock) \
9do { \
10 /* Ensure prior stores are observed before we enter wfe. */ \
11 smp_mb(); \
12 while (!(smp_load_acquire(lock))) \
13 wfe(); \
14} while (0) \
15
16#define arch_mcs_spin_unlock_contended(lock) \
17do { \
18 smp_store_release(lock, 1); \
19 dsb_sev(); \
20} while (0)
21
22#endif /* CONFIG_SMP */
23#endif /* __ASM_MCS_LOCK_H */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 2b751464d6ff..e731018869a7 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -91,9 +91,7 @@
91 * of this define that was meant to. 91 * of this define that was meant to.
92 * Fortunately, there is no reference for this in noMMU mode, for now. 92 * Fortunately, there is no reference for this in noMMU mode, for now.
93 */ 93 */
94#ifndef TASK_SIZE 94#define TASK_SIZE UL(0xffffffff)
95#define TASK_SIZE (CONFIG_DRAM_SIZE)
96#endif
97 95
98#ifndef TASK_UNMAPPED_BASE 96#ifndef TASK_UNMAPPED_BASE
99#define TASK_UNMAPPED_BASE UL(0x00000000) 97#define TASK_UNMAPPED_BASE UL(0x00000000)
@@ -150,13 +148,11 @@
150 148
151/* 149/*
152 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical 150 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
153 * memory. This is used for XIP and NoMMU kernels, or by kernels which 151 * memory. This is used for XIP and NoMMU kernels, and on platforms that don't
154 * have their own mach/memory.h. Assembly code must always use 152 * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
155 * PLAT_PHYS_OFFSET and not PHYS_OFFSET. 153 * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
156 */ 154 */
157#ifndef PLAT_PHYS_OFFSET
158#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) 155#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
159#endif
160 156
161#ifndef __ASSEMBLY__ 157#ifndef __ASSEMBLY__
162 158
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 755877527cf9..c3a83691af8e 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,15 +12,6 @@
12#ifndef __ARM_PERF_EVENT_H__ 12#ifndef __ARM_PERF_EVENT_H__
13#define __ARM_PERF_EVENT_H__ 13#define __ARM_PERF_EVENT_H__
14 14
15/*
16 * The ARMv7 CPU PMU supports up to 32 event counters.
17 */
18#define ARMPMU_MAX_HWEVENTS 32
19
20#define HW_OP_UNSUPPORTED 0xFFFF
21#define C(_x) PERF_COUNT_HW_CACHE_##_x
22#define CACHE_OP_UNSUPPORTED 0xFFFF
23
24#ifdef CONFIG_HW_PERF_EVENTS 15#ifdef CONFIG_HW_PERF_EVENTS
25struct pt_regs; 16struct pt_regs;
26extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 17extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 626989fec4d3..9fd61c72a33a 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -43,7 +43,7 @@
43#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) 43#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
44#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) 44#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
45#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ 45#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
46#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ 46#define PMD_SECT_AP2 (_AT(pmdval_t, 1) << 7) /* read only */
47#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) 47#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
48#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) 48#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
49#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) 49#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
@@ -72,6 +72,7 @@
72#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1) 72#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
73#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */ 73#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
74#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */ 74#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
75#define PTE_AP2 (_AT(pteval_t, 1) << 7) /* AP[2] */
75#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ 76#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
76#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ 77#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
77#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */ 78#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 85c60adc8b60..06e0bc0f8b00 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -79,18 +79,19 @@
79#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */ 79#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
80#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ 80#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
81#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ 81#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
82#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
83#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ 82#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
84#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */ 83#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
85#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ 84#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
86#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ 85#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
87#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ 86#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
88#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ 87#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
88#define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
89 89
90#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) 90#define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
91#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) 91#define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
92#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56) 92#define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
93#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) 93#define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
94#define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
94 95
95/* 96/*
96 * To be used in assembly code with the upper page attributes. 97 * To be used in assembly code with the upper page attributes.
@@ -207,27 +208,32 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
207#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT)) 208#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
208#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) 209#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
209 210
210#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) 211#define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \
212 : !!(pmd_val(pmd) & (val)))
213#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
214
215#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
211 216
212#define __HAVE_ARCH_PMD_WRITE 217#define __HAVE_ARCH_PMD_WRITE
213#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) 218#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
219#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
214 220
215#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) 221#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
216#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) 222#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
217 223
218#ifdef CONFIG_TRANSPARENT_HUGEPAGE 224#ifdef CONFIG_TRANSPARENT_HUGEPAGE
219#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) 225#define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
220#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) 226#define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING))
221#endif 227#endif
222 228
223#define PMD_BIT_FUNC(fn,op) \ 229#define PMD_BIT_FUNC(fn,op) \
224static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } 230static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
225 231
226PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); 232PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
227PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); 233PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
228PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); 234PMD_BIT_FUNC(mksplitting, |= L_PMD_SECT_SPLITTING);
229PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); 235PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY);
230PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); 236PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
231PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); 237PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
232 238
233#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 239#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
@@ -241,8 +247,8 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
241 247
242static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 248static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
243{ 249{
244 const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY | 250 const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY |
245 PMD_SECT_VALID | PMD_SECT_NONE; 251 L_PMD_SECT_VALID | L_PMD_SECT_NONE;
246 pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); 252 pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
247 return pmd; 253 return pmd;
248} 254}
@@ -253,8 +259,13 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
253 BUG_ON(addr >= TASK_SIZE); 259 BUG_ON(addr >= TASK_SIZE);
254 260
255 /* create a faulting entry if PROT_NONE protected */ 261 /* create a faulting entry if PROT_NONE protected */
256 if (pmd_val(pmd) & PMD_SECT_NONE) 262 if (pmd_val(pmd) & L_PMD_SECT_NONE)
257 pmd_val(pmd) &= ~PMD_SECT_VALID; 263 pmd_val(pmd) &= ~L_PMD_SECT_VALID;
264
265 if (pmd_write(pmd) && pmd_dirty(pmd))
266 pmd_val(pmd) &= ~PMD_SECT_AP2;
267 else
268 pmd_val(pmd) |= PMD_SECT_AP2;
258 269
259 *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG); 270 *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
260 flush_pmd_entry(pmdp); 271 flush_pmd_entry(pmdp);
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 5478e5d6ad89..01baef07cd0c 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -214,18 +214,22 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
214 214
215#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) 215#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
216 216
217#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
218 : !!(pte_val(pte) & (val)))
219#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
220
217#define pte_none(pte) (!pte_val(pte)) 221#define pte_none(pte) (!pte_val(pte))
218#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) 222#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
219#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID) 223#define pte_valid(pte) (pte_isset((pte), L_PTE_VALID))
220#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 224#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
221#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) 225#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
222#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 226#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
223#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 227#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
224#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) 228#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
225#define pte_special(pte) (0) 229#define pte_special(pte) (0)
226 230
227#define pte_valid_user(pte) \ 231#define pte_valid_user(pte) \
228 (pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte)) 232 (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
229 233
230#if __LINUX_ARM_ARCH__ < 6 234#if __LINUX_ARM_ARCH__ < 6
231static inline void __sync_icache_dcache(pte_t pteval) 235static inline void __sync_icache_dcache(pte_t pteval)
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index ae1919be8f98..0b648c541293 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -42,6 +42,25 @@ struct arm_pmu_platdata {
42 42
43#ifdef CONFIG_HW_PERF_EVENTS 43#ifdef CONFIG_HW_PERF_EVENTS
44 44
45/*
46 * The ARMv7 CPU PMU supports up to 32 event counters.
47 */
48#define ARMPMU_MAX_HWEVENTS 32
49
50#define HW_OP_UNSUPPORTED 0xFFFF
51#define C(_x) PERF_COUNT_HW_CACHE_##_x
52#define CACHE_OP_UNSUPPORTED 0xFFFF
53
54#define PERF_MAP_ALL_UNSUPPORTED \
55 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
56
57#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
58[0 ... C(MAX) - 1] = { \
59 [0 ... C(OP_MAX) - 1] = { \
60 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
61 }, \
62}
63
45/* The events for a given PMU register set. */ 64/* The events for a given PMU register set. */
46struct pmu_hw_events { 65struct pmu_hw_events {
47 /* 66 /*
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index c877654fe3bf..601264d983fa 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -84,6 +84,12 @@ static inline long regs_return_value(struct pt_regs *regs)
84 84
85#define instruction_pointer(regs) (regs)->ARM_pc 85#define instruction_pointer(regs) (regs)->ARM_pc
86 86
87#ifdef CONFIG_THUMB2_KERNEL
88#define frame_pointer(regs) (regs)->ARM_r7
89#else
90#define frame_pointer(regs) (regs)->ARM_fp
91#endif
92
87static inline void instruction_pointer_set(struct pt_regs *regs, 93static inline void instruction_pointer_set(struct pt_regs *regs,
88 unsigned long val) 94 unsigned long val)
89{ 95{
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index 0393fbab8dd5..bfe163c40024 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -11,7 +11,7 @@
11 11
12static inline bool scu_a9_has_base(void) 12static inline bool scu_a9_has_base(void)
13{ 13{
14 return read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 14 return read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
15} 15}
16 16
17static inline unsigned long scu_a9_get_base(void) 17static inline unsigned long scu_a9_get_base(void)
diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h
index 4d0a16441b29..7722201ead19 100644
--- a/arch/arm/include/asm/stacktrace.h
+++ b/arch/arm/include/asm/stacktrace.h
@@ -1,13 +1,28 @@
1#ifndef __ASM_STACKTRACE_H 1#ifndef __ASM_STACKTRACE_H
2#define __ASM_STACKTRACE_H 2#define __ASM_STACKTRACE_H
3 3
4#include <asm/ptrace.h>
5
4struct stackframe { 6struct stackframe {
7 /*
8 * FP member should hold R7 when CONFIG_THUMB2_KERNEL is enabled
9 * and R11 otherwise.
10 */
5 unsigned long fp; 11 unsigned long fp;
6 unsigned long sp; 12 unsigned long sp;
7 unsigned long lr; 13 unsigned long lr;
8 unsigned long pc; 14 unsigned long pc;
9}; 15};
10 16
17static __always_inline
18void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
19{
20 frame->fp = frame_pointer(regs);
21 frame->sp = regs->ARM_sp;
22 frame->lr = regs->ARM_lr;
23 frame->pc = regs->ARM_pc;
24}
25
11extern int unwind_frame(struct stackframe *frame); 26extern int unwind_frame(struct stackframe *frame);
12extern void walk_stackframe(struct stackframe *frame, 27extern void walk_stackframe(struct stackframe *frame,
13 int (*fn)(struct stackframe *, void *), void *data); 28 int (*fn)(struct stackframe *, void *), void *data);
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index e4e4208a9130..fc44d3761f9e 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -14,9 +14,10 @@
14 14
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <asm/fpstate.h> 16#include <asm/fpstate.h>
17#include <asm/page.h>
17 18
18#define THREAD_SIZE_ORDER 1 19#define THREAD_SIZE_ORDER 1
19#define THREAD_SIZE 8192 20#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
20#define THREAD_START_SP (THREAD_SIZE - 8) 21#define THREAD_START_SP (THREAD_SIZE - 8)
21 22
22#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 75d95799b6e6..a4cd7af475e9 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -107,6 +107,8 @@ static inline void set_fs(mm_segment_t fs)
107extern int __get_user_1(void *); 107extern int __get_user_1(void *);
108extern int __get_user_2(void *); 108extern int __get_user_2(void *);
109extern int __get_user_4(void *); 109extern int __get_user_4(void *);
110extern int __get_user_lo8(void *);
111extern int __get_user_8(void *);
110 112
111#define __GUP_CLOBBER_1 "lr", "cc" 113#define __GUP_CLOBBER_1 "lr", "cc"
112#ifdef CONFIG_CPU_USE_DOMAINS 114#ifdef CONFIG_CPU_USE_DOMAINS
@@ -115,6 +117,8 @@ extern int __get_user_4(void *);
115#define __GUP_CLOBBER_2 "lr", "cc" 117#define __GUP_CLOBBER_2 "lr", "cc"
116#endif 118#endif
117#define __GUP_CLOBBER_4 "lr", "cc" 119#define __GUP_CLOBBER_4 "lr", "cc"
120#define __GUP_CLOBBER_lo8 "lr", "cc"
121#define __GUP_CLOBBER_8 "lr", "cc"
118 122
119#define __get_user_x(__r2,__p,__e,__l,__s) \ 123#define __get_user_x(__r2,__p,__e,__l,__s) \
120 __asm__ __volatile__ ( \ 124 __asm__ __volatile__ ( \
@@ -125,11 +129,19 @@ extern int __get_user_4(void *);
125 : "0" (__p), "r" (__l) \ 129 : "0" (__p), "r" (__l) \
126 : __GUP_CLOBBER_##__s) 130 : __GUP_CLOBBER_##__s)
127 131
132/* narrowing a double-word get into a single 32bit word register: */
133#ifdef __ARMEB__
134#define __get_user_xb(__r2, __p, __e, __l, __s) \
135 __get_user_x(__r2, __p, __e, __l, lo8)
136#else
137#define __get_user_xb __get_user_x
138#endif
139
128#define __get_user_check(x,p) \ 140#define __get_user_check(x,p) \
129 ({ \ 141 ({ \
130 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 142 unsigned long __limit = current_thread_info()->addr_limit - 1; \
131 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 143 register const typeof(*(p)) __user *__p asm("r0") = (p);\
132 register unsigned long __r2 asm("r2"); \ 144 register typeof(x) __r2 asm("r2"); \
133 register unsigned long __l asm("r1") = __limit; \ 145 register unsigned long __l asm("r1") = __limit; \
134 register int __e asm("r0"); \ 146 register int __e asm("r0"); \
135 switch (sizeof(*(__p))) { \ 147 switch (sizeof(*(__p))) { \
@@ -142,6 +154,12 @@ extern int __get_user_4(void *);
142 case 4: \ 154 case 4: \
143 __get_user_x(__r2, __p, __e, __l, 4); \ 155 __get_user_x(__r2, __p, __e, __l, 4); \
144 break; \ 156 break; \
157 case 8: \
158 if (sizeof((x)) < 8) \
159 __get_user_xb(__r2, __p, __e, __l, 4); \
160 else \
161 __get_user_x(__r2, __p, __e, __l, 8); \
162 break; \
145 default: __e = __get_user_bad(); break; \ 163 default: __e = __get_user_bad(); break; \
146 } \ 164 } \
147 x = (typeof(*(p))) __r2; \ 165 x = (typeof(*(p))) __r2; \
@@ -224,7 +242,7 @@ static inline void set_fs(mm_segment_t fs)
224#define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 242#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
225 243
226#define user_addr_max() \ 244#define user_addr_max() \
227 (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) 245 (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
228 246
229/* 247/*
230 * The "__xxx" versions of the user access functions do not verify the 248 * The "__xxx" versions of the user access functions do not verify the
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 43876245fc57..21ca0cebcab0 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -15,7 +15,17 @@
15 15
16#include <uapi/asm/unistd.h> 16#include <uapi/asm/unistd.h>
17 17
18/*
19 * This may need to be greater than __NR_last_syscall+1 in order to
20 * account for the padding in the syscall table
21 */
18#define __NR_syscalls (384) 22#define __NR_syscalls (384)
23
24/*
25 * *NOTE*: This is a ghost syscall private to the kernel. Only the
26 * __kuser_cmpxchg code in entry-armv.S should be aware of its
27 * existence. Don't ever use this from user code.
28 */
19#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) 29#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
20 30
21#define __ARCH_WANT_STAT64 31#define __ARCH_WANT_STAT64
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index ba94446c72d9..acd5b66ea3aa 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -411,11 +411,6 @@
411#define __NR_renameat2 (__NR_SYSCALL_BASE+382) 411#define __NR_renameat2 (__NR_SYSCALL_BASE+382)
412 412
413/* 413/*
414 * This may need to be greater than __NR_last_syscall+1 in order to
415 * account for the padding in the syscall table
416 */
417
418/*
419 * The following SWIs are ARM private. 414 * The following SWIs are ARM private.
420 */ 415 */
421#define __ARM_NR_BASE (__NR_SYSCALL_BASE+0x0f0000) 416#define __ARM_NR_BASE (__NR_SYSCALL_BASE+0x0f0000)
@@ -426,12 +421,6 @@
426#define __ARM_NR_set_tls (__ARM_NR_BASE+5) 421#define __ARM_NR_set_tls (__ARM_NR_BASE+5)
427 422
428/* 423/*
429 * *NOTE*: This is a ghost syscall private to the kernel. Only the
430 * __kuser_cmpxchg code in entry-armv.S should be aware of its
431 * existence. Don't ever use this from user code.
432 */
433
434/*
435 * The following syscalls are obsolete and no longer available for EABI. 424 * The following syscalls are obsolete and no longer available for EABI.
436 */ 425 */
437#if !defined(__KERNEL__) 426#if !defined(__KERNEL__)
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index 14f7c3b14632..78c91b5f97d4 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -90,7 +90,7 @@ ENTRY(printascii)
90 ldrneb r1, [r0], #1 90 ldrneb r1, [r0], #1
91 teqne r1, #0 91 teqne r1, #0
92 bne 1b 92 bne 1b
93 mov pc, lr 93 ret lr
94ENDPROC(printascii) 94ENDPROC(printascii)
95 95
96ENTRY(printch) 96ENTRY(printch)
@@ -105,7 +105,7 @@ ENTRY(debug_ll_addr)
105 addruart r2, r3, ip 105 addruart r2, r3, ip
106 str r2, [r0] 106 str r2, [r0]
107 str r3, [r1] 107 str r3, [r1]
108 mov pc, lr 108 ret lr
109ENDPROC(debug_ll_addr) 109ENDPROC(debug_ll_addr)
110#endif 110#endif
111 111
@@ -116,7 +116,7 @@ ENTRY(printascii)
116 mov r0, #0x04 @ SYS_WRITE0 116 mov r0, #0x04 @ SYS_WRITE0
117 ARM( svc #0x123456 ) 117 ARM( svc #0x123456 )
118 THUMB( svc #0xab ) 118 THUMB( svc #0xab )
119 mov pc, lr 119 ret lr
120ENDPROC(printascii) 120ENDPROC(printascii)
121 121
122ENTRY(printch) 122ENTRY(printch)
@@ -125,14 +125,14 @@ ENTRY(printch)
125 mov r0, #0x03 @ SYS_WRITEC 125 mov r0, #0x03 @ SYS_WRITEC
126 ARM( svc #0x123456 ) 126 ARM( svc #0x123456 )
127 THUMB( svc #0xab ) 127 THUMB( svc #0xab )
128 mov pc, lr 128 ret lr
129ENDPROC(printch) 129ENDPROC(printch)
130 130
131ENTRY(debug_ll_addr) 131ENTRY(debug_ll_addr)
132 mov r2, #0 132 mov r2, #0
133 str r2, [r0] 133 str r2, [r0]
134 str r2, [r1] 134 str r2, [r1]
135 mov pc, lr 135 ret lr
136ENDPROC(debug_ll_addr) 136ENDPROC(debug_ll_addr)
137 137
138#endif 138#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 52a949a8077d..36276cdccfbc 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -224,7 +224,7 @@ svc_preempt:
2241: bl preempt_schedule_irq @ irq en/disable is done inside 2241: bl preempt_schedule_irq @ irq en/disable is done inside
225 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS 225 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
226 tst r0, #_TIF_NEED_RESCHED 226 tst r0, #_TIF_NEED_RESCHED
227 moveq pc, r8 @ go again 227 reteq r8 @ go again
228 b 1b 228 b 1b
229#endif 229#endif
230 230
@@ -490,7 +490,7 @@ ENDPROC(__und_usr)
490 .pushsection .fixup, "ax" 490 .pushsection .fixup, "ax"
491 .align 2 491 .align 2
4924: str r4, [sp, #S_PC] @ retry current instruction 4924: str r4, [sp, #S_PC] @ retry current instruction
493 mov pc, r9 493 ret r9
494 .popsection 494 .popsection
495 .pushsection __ex_table,"a" 495 .pushsection __ex_table,"a"
496 .long 1b, 4b 496 .long 1b, 4b
@@ -552,7 +552,7 @@ call_fpe:
552#endif 552#endif
553 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 553 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
554 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 554 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
555 moveq pc, lr 555 reteq lr
556 and r8, r0, #0x00000f00 @ mask out CP number 556 and r8, r0, #0x00000f00 @ mask out CP number
557 THUMB( lsr r8, r8, #8 ) 557 THUMB( lsr r8, r8, #8 )
558 mov r7, #1 558 mov r7, #1
@@ -571,33 +571,33 @@ call_fpe:
571 THUMB( add pc, r8 ) 571 THUMB( add pc, r8 )
572 nop 572 nop
573 573
574 movw_pc lr @ CP#0 574 ret.w lr @ CP#0
575 W(b) do_fpe @ CP#1 (FPE) 575 W(b) do_fpe @ CP#1 (FPE)
576 W(b) do_fpe @ CP#2 (FPE) 576 W(b) do_fpe @ CP#2 (FPE)
577 movw_pc lr @ CP#3 577 ret.w lr @ CP#3
578#ifdef CONFIG_CRUNCH 578#ifdef CONFIG_CRUNCH
579 b crunch_task_enable @ CP#4 (MaverickCrunch) 579 b crunch_task_enable @ CP#4 (MaverickCrunch)
580 b crunch_task_enable @ CP#5 (MaverickCrunch) 580 b crunch_task_enable @ CP#5 (MaverickCrunch)
581 b crunch_task_enable @ CP#6 (MaverickCrunch) 581 b crunch_task_enable @ CP#6 (MaverickCrunch)
582#else 582#else
583 movw_pc lr @ CP#4 583 ret.w lr @ CP#4
584 movw_pc lr @ CP#5 584 ret.w lr @ CP#5
585 movw_pc lr @ CP#6 585 ret.w lr @ CP#6
586#endif 586#endif
587 movw_pc lr @ CP#7 587 ret.w lr @ CP#7
588 movw_pc lr @ CP#8 588 ret.w lr @ CP#8
589 movw_pc lr @ CP#9 589 ret.w lr @ CP#9
590#ifdef CONFIG_VFP 590#ifdef CONFIG_VFP
591 W(b) do_vfp @ CP#10 (VFP) 591 W(b) do_vfp @ CP#10 (VFP)
592 W(b) do_vfp @ CP#11 (VFP) 592 W(b) do_vfp @ CP#11 (VFP)
593#else 593#else
594 movw_pc lr @ CP#10 (VFP) 594 ret.w lr @ CP#10 (VFP)
595 movw_pc lr @ CP#11 (VFP) 595 ret.w lr @ CP#11 (VFP)
596#endif 596#endif
597 movw_pc lr @ CP#12 597 ret.w lr @ CP#12
598 movw_pc lr @ CP#13 598 ret.w lr @ CP#13
599 movw_pc lr @ CP#14 (Debug) 599 ret.w lr @ CP#14 (Debug)
600 movw_pc lr @ CP#15 (Control) 600 ret.w lr @ CP#15 (Control)
601 601
602#ifdef NEED_CPU_ARCHITECTURE 602#ifdef NEED_CPU_ARCHITECTURE
603 .align 2 603 .align 2
@@ -649,7 +649,7 @@ ENTRY(fp_enter)
649 .popsection 649 .popsection
650 650
651ENTRY(no_fp) 651ENTRY(no_fp)
652 mov pc, lr 652 ret lr
653ENDPROC(no_fp) 653ENDPROC(no_fp)
654 654
655__und_usr_fault_32: 655__und_usr_fault_32:
@@ -745,7 +745,7 @@ ENDPROC(__switch_to)
745#ifdef CONFIG_ARM_THUMB 745#ifdef CONFIG_ARM_THUMB
746 bx \reg 746 bx \reg
747#else 747#else
748 mov pc, \reg 748 ret \reg
749#endif 749#endif
750 .endm 750 .endm
751 751
@@ -837,7 +837,7 @@ kuser_cmpxchg64_fixup:
837#if __LINUX_ARM_ARCH__ < 6 837#if __LINUX_ARM_ARCH__ < 6
838 bcc kuser_cmpxchg32_fixup 838 bcc kuser_cmpxchg32_fixup
839#endif 839#endif
840 mov pc, lr 840 ret lr
841 .previous 841 .previous
842 842
843#else 843#else
@@ -905,7 +905,7 @@ kuser_cmpxchg32_fixup:
905 subs r8, r4, r7 905 subs r8, r4, r7
906 rsbcss r8, r8, #(2b - 1b) 906 rsbcss r8, r8, #(2b - 1b)
907 strcs r7, [sp, #S_PC] 907 strcs r7, [sp, #S_PC]
908 mov pc, lr 908 ret lr
909 .previous 909 .previous
910 910
911#else 911#else
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 7139d4a7dea7..e52fe5a2d843 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <asm/assembler.h>
11#include <asm/unistd.h> 12#include <asm/unistd.h>
12#include <asm/ftrace.h> 13#include <asm/ftrace.h>
13#include <asm/unwind.h> 14#include <asm/unwind.h>
@@ -88,7 +89,7 @@ ENTRY(ret_from_fork)
88 cmp r5, #0 89 cmp r5, #0
89 movne r0, r4 90 movne r0, r4
90 adrne lr, BSYM(1f) 91 adrne lr, BSYM(1f)
91 movne pc, r5 92 retne r5
921: get_thread_info tsk 931: get_thread_info tsk
93 b ret_slow_syscall 94 b ret_slow_syscall
94ENDPROC(ret_from_fork) 95ENDPROC(ret_from_fork)
@@ -290,7 +291,7 @@ ENDPROC(ftrace_graph_caller_old)
290 291
291.macro mcount_exit 292.macro mcount_exit
292 ldmia sp!, {r0-r3, ip, lr} 293 ldmia sp!, {r0-r3, ip, lr}
293 mov pc, ip 294 ret ip
294.endm 295.endm
295 296
296ENTRY(__gnu_mcount_nc) 297ENTRY(__gnu_mcount_nc)
@@ -298,7 +299,7 @@ UNWIND(.fnstart)
298#ifdef CONFIG_DYNAMIC_FTRACE 299#ifdef CONFIG_DYNAMIC_FTRACE
299 mov ip, lr 300 mov ip, lr
300 ldmia sp!, {lr} 301 ldmia sp!, {lr}
301 mov pc, ip 302 ret ip
302#else 303#else
303 __mcount 304 __mcount
304#endif 305#endif
@@ -333,12 +334,12 @@ return_to_handler:
333 bl ftrace_return_to_handler 334 bl ftrace_return_to_handler
334 mov lr, r0 @ r0 has real ret addr 335 mov lr, r0 @ r0 has real ret addr
335 ldmia sp!, {r0-r3} 336 ldmia sp!, {r0-r3}
336 mov pc, lr 337 ret lr
337#endif 338#endif
338 339
339ENTRY(ftrace_stub) 340ENTRY(ftrace_stub)
340.Lftrace_stub: 341.Lftrace_stub:
341 mov pc, lr 342 ret lr
342ENDPROC(ftrace_stub) 343ENDPROC(ftrace_stub)
343 344
344#endif /* CONFIG_FUNCTION_TRACER */ 345#endif /* CONFIG_FUNCTION_TRACER */
@@ -561,7 +562,7 @@ sys_mmap2:
561 streq r5, [sp, #4] 562 streq r5, [sp, #4]
562 beq sys_mmap_pgoff 563 beq sys_mmap_pgoff
563 mov r0, #-EINVAL 564 mov r0, #-EINVAL
564 mov pc, lr 565 ret lr
565#else 566#else
566 str r5, [sp, #4] 567 str r5, [sp, #4]
567 b sys_mmap_pgoff 568 b sys_mmap_pgoff
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 5d702f8900b1..8db307d0954b 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -240,12 +240,6 @@
240 movs pc, lr @ return & move spsr_svc into cpsr 240 movs pc, lr @ return & move spsr_svc into cpsr
241 .endm 241 .endm
242 242
243 @
244 @ 32-bit wide "mov pc, reg"
245 @
246 .macro movw_pc, reg
247 mov pc, \reg
248 .endm
249#else /* CONFIG_THUMB2_KERNEL */ 243#else /* CONFIG_THUMB2_KERNEL */
250 .macro svc_exit, rpsr, irq = 0 244 .macro svc_exit, rpsr, irq = 0
251 .if \irq != 0 245 .if \irq != 0
@@ -304,14 +298,6 @@
304 movs pc, lr @ return & move spsr_svc into cpsr 298 movs pc, lr @ return & move spsr_svc into cpsr
305 .endm 299 .endm
306#endif /* ifdef CONFIG_CPU_V7M / else */ 300#endif /* ifdef CONFIG_CPU_V7M / else */
307
308 @
309 @ 32-bit wide "mov pc, reg"
310 @
311 .macro movw_pc, reg
312 mov pc, \reg
313 nop
314 .endm
315#endif /* !CONFIG_THUMB2_KERNEL */ 301#endif /* !CONFIG_THUMB2_KERNEL */
316 302
317/* 303/*
diff --git a/arch/arm/kernel/fiqasm.S b/arch/arm/kernel/fiqasm.S
index 207f9d652010..8dd26e1a9bd6 100644
--- a/arch/arm/kernel/fiqasm.S
+++ b/arch/arm/kernel/fiqasm.S
@@ -32,7 +32,7 @@ ENTRY(__set_fiq_regs)
32 ldr lr, [r0] 32 ldr lr, [r0]
33 msr cpsr_c, r1 @ return to SVC mode 33 msr cpsr_c, r1 @ return to SVC mode
34 mov r0, r0 @ avoid hazard prior to ARMv4 34 mov r0, r0 @ avoid hazard prior to ARMv4
35 mov pc, lr 35 ret lr
36ENDPROC(__set_fiq_regs) 36ENDPROC(__set_fiq_regs)
37 37
38ENTRY(__get_fiq_regs) 38ENTRY(__get_fiq_regs)
@@ -45,5 +45,5 @@ ENTRY(__get_fiq_regs)
45 str lr, [r0] 45 str lr, [r0]
46 msr cpsr_c, r1 @ return to SVC mode 46 msr cpsr_c, r1 @ return to SVC mode
47 mov r0, r0 @ avoid hazard prior to ARMv4 47 mov r0, r0 @ avoid hazard prior to ARMv4
48 mov pc, lr 48 ret lr
49ENDPROC(__get_fiq_regs) 49ENDPROC(__get_fiq_regs)
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 572a38335c96..8733012d231f 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 * 11 *
12 */ 12 */
13#include <asm/assembler.h>
13 14
14#define ATAG_CORE 0x54410001 15#define ATAG_CORE 0x54410001
15#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) 16#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
@@ -61,10 +62,10 @@ __vet_atags:
61 cmp r5, r6 62 cmp r5, r6
62 bne 1f 63 bne 1f
63 64
642: mov pc, lr @ atag/dtb pointer is ok 652: ret lr @ atag/dtb pointer is ok
65 66
661: mov r2, #0 671: mov r2, #0
67 mov pc, lr 68 ret lr
68ENDPROC(__vet_atags) 69ENDPROC(__vet_atags)
69 70
70/* 71/*
@@ -162,7 +163,7 @@ __lookup_processor_type:
162 cmp r5, r6 163 cmp r5, r6
163 blo 1b 164 blo 1b
164 mov r5, #0 @ unknown processor 165 mov r5, #0 @ unknown processor
1652: mov pc, lr 1662: ret lr
166ENDPROC(__lookup_processor_type) 167ENDPROC(__lookup_processor_type)
167 168
168/* 169/*
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 716249cc2ee1..cc176b67c134 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -82,7 +82,7 @@ ENTRY(stext)
82 adr lr, BSYM(1f) @ return (PIC) address 82 adr lr, BSYM(1f) @ return (PIC) address
83 ARM( add pc, r10, #PROCINFO_INITFUNC ) 83 ARM( add pc, r10, #PROCINFO_INITFUNC )
84 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 84 THUMB( add r12, r10, #PROCINFO_INITFUNC )
85 THUMB( mov pc, r12 ) 85 THUMB( ret r12 )
86 1: b __after_proc_init 86 1: b __after_proc_init
87ENDPROC(stext) 87ENDPROC(stext)
88 88
@@ -119,7 +119,7 @@ ENTRY(secondary_startup)
119 mov r13, r12 @ __secondary_switched address 119 mov r13, r12 @ __secondary_switched address
120 ARM( add pc, r10, #PROCINFO_INITFUNC ) 120 ARM( add pc, r10, #PROCINFO_INITFUNC )
121 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 121 THUMB( add r12, r10, #PROCINFO_INITFUNC )
122 THUMB( mov pc, r12 ) 122 THUMB( ret r12 )
123ENDPROC(secondary_startup) 123ENDPROC(secondary_startup)
124 124
125ENTRY(__secondary_switched) 125ENTRY(__secondary_switched)
@@ -164,7 +164,7 @@ __after_proc_init:
164#endif 164#endif
165 mcr p15, 0, r0, c1, c0, 0 @ write control reg 165 mcr p15, 0, r0, c1, c0, 0 @ write control reg
166#endif /* CONFIG_CPU_CP15 */ 166#endif /* CONFIG_CPU_CP15 */
167 mov pc, r13 167 ret r13
168ENDPROC(__after_proc_init) 168ENDPROC(__after_proc_init)
169 .ltorg 169 .ltorg
170 170
@@ -254,7 +254,7 @@ ENTRY(__setup_mpu)
254 orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) 254 orr r0, r0, #CR_M @ Set SCTRL.M (MPU on)
255 mcr p15, 0, r0, c1, c0, 0 @ Enable MPU 255 mcr p15, 0, r0, c1, c0, 0 @ Enable MPU
256 isb 256 isb
257 mov pc,lr 257 ret lr
258ENDPROC(__setup_mpu) 258ENDPROC(__setup_mpu)
259#endif 259#endif
260#include "head-common.S" 260#include "head-common.S"
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 2c35f0ff2fdc..664eee8c4a26 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -140,7 +140,7 @@ ENTRY(stext)
140 mov r8, r4 @ set TTBR1 to swapper_pg_dir 140 mov r8, r4 @ set TTBR1 to swapper_pg_dir
141 ARM( add pc, r10, #PROCINFO_INITFUNC ) 141 ARM( add pc, r10, #PROCINFO_INITFUNC )
142 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 142 THUMB( add r12, r10, #PROCINFO_INITFUNC )
143 THUMB( mov pc, r12 ) 143 THUMB( ret r12 )
1441: b __enable_mmu 1441: b __enable_mmu
145ENDPROC(stext) 145ENDPROC(stext)
146 .ltorg 146 .ltorg
@@ -335,7 +335,7 @@ __create_page_tables:
335 sub r4, r4, #0x1000 @ point to the PGD table 335 sub r4, r4, #0x1000 @ point to the PGD table
336 mov r4, r4, lsr #ARCH_PGD_SHIFT 336 mov r4, r4, lsr #ARCH_PGD_SHIFT
337#endif 337#endif
338 mov pc, lr 338 ret lr
339ENDPROC(__create_page_tables) 339ENDPROC(__create_page_tables)
340 .ltorg 340 .ltorg
341 .align 341 .align
@@ -383,7 +383,7 @@ ENTRY(secondary_startup)
383 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor 383 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
384 @ (return control reg) 384 @ (return control reg)
385 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 385 THUMB( add r12, r10, #PROCINFO_INITFUNC )
386 THUMB( mov pc, r12 ) 386 THUMB( ret r12 )
387ENDPROC(secondary_startup) 387ENDPROC(secondary_startup)
388 388
389 /* 389 /*
@@ -468,7 +468,7 @@ ENTRY(__turn_mmu_on)
468 instr_sync 468 instr_sync
469 mov r3, r3 469 mov r3, r3
470 mov r3, r13 470 mov r3, r13
471 mov pc, r3 471 ret r3
472__turn_mmu_on_end: 472__turn_mmu_on_end:
473ENDPROC(__turn_mmu_on) 473ENDPROC(__turn_mmu_on)
474 .popsection 474 .popsection
@@ -487,7 +487,7 @@ __fixup_smp:
487 orr r4, r4, #0x0000b000 487 orr r4, r4, #0x0000b000
488 orr r4, r4, #0x00000020 @ val 0x4100b020 488 orr r4, r4, #0x00000020 @ val 0x4100b020
489 teq r3, r4 @ ARM 11MPCore? 489 teq r3, r4 @ ARM 11MPCore?
490 moveq pc, lr @ yes, assume SMP 490 reteq lr @ yes, assume SMP
491 491
492 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR 492 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
493 and r0, r0, #0xc0000000 @ multiprocessing extensions and 493 and r0, r0, #0xc0000000 @ multiprocessing extensions and
@@ -500,7 +500,7 @@ __fixup_smp:
500 orr r4, r4, #0x0000c000 500 orr r4, r4, #0x0000c000
501 orr r4, r4, #0x00000090 501 orr r4, r4, #0x00000090
502 teq r3, r4 @ Check for ARM Cortex-A9 502 teq r3, r4 @ Check for ARM Cortex-A9
503 movne pc, lr @ Not ARM Cortex-A9, 503 retne lr @ Not ARM Cortex-A9,
504 504
505 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the 505 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
506 @ below address check will need to be #ifdef'd or equivalent 506 @ below address check will need to be #ifdef'd or equivalent
@@ -512,7 +512,7 @@ __fixup_smp:
512ARM_BE8(rev r0, r0) @ byteswap if big endian 512ARM_BE8(rev r0, r0) @ byteswap if big endian
513 and r0, r0, #0x3 @ number of CPUs 513 and r0, r0, #0x3 @ number of CPUs
514 teq r0, #0x0 @ is 1? 514 teq r0, #0x0 @ is 1?
515 movne pc, lr 515 retne lr
516 516
517__fixup_smp_on_up: 517__fixup_smp_on_up:
518 adr r0, 1f 518 adr r0, 1f
@@ -539,7 +539,7 @@ smp_on_up:
539 .text 539 .text
540__do_fixup_smp_on_up: 540__do_fixup_smp_on_up:
541 cmp r4, r5 541 cmp r4, r5
542 movhs pc, lr 542 reths lr
543 ldmia r4!, {r0, r6} 543 ldmia r4!, {r0, r6}
544 ARM( str r6, [r0, r3] ) 544 ARM( str r6, [r0, r3] )
545 THUMB( add r0, r0, r3 ) 545 THUMB( add r0, r0, r3 )
@@ -672,7 +672,7 @@ ARM_BE8(rev16 ip, ip)
6722: cmp r4, r5 6722: cmp r4, r5
673 ldrcc r7, [r4], #4 @ use branch for delay slot 673 ldrcc r7, [r4], #4 @ use branch for delay slot
674 bcc 1b 674 bcc 1b
675 mov pc, lr 675 ret lr
676#endif 676#endif
677ENDPROC(__fixup_a_pv_table) 677ENDPROC(__fixup_a_pv_table)
678 678
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 797b1a6a4906..56ce6290c831 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary)
99 * immediately. 99 * immediately.
100 */ 100 */
101 compare_cpu_mode_with_primary r4, r5, r6, r7 101 compare_cpu_mode_with_primary r4, r5, r6, r7
102 movne pc, lr 102 retne lr
103 103
104 /* 104 /*
105 * Once we have given up on one CPU, we do not try to install the 105 * Once we have given up on one CPU, we do not try to install the
@@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary)
111 */ 111 */
112 112
113 cmp r4, #HYP_MODE 113 cmp r4, #HYP_MODE
114 movne pc, lr @ give up if the CPU is not in HYP mode 114 retne lr @ give up if the CPU is not in HYP mode
115 115
116/* 116/*
117 * Configure HSCTLR to set correct exception endianness/instruction set 117 * Configure HSCTLR to set correct exception endianness/instruction set
@@ -201,7 +201,7 @@ ENDPROC(__hyp_get_vectors)
201 @ fall through 201 @ fall through
202ENTRY(__hyp_set_vectors) 202ENTRY(__hyp_set_vectors)
203 __HVC(0) 203 __HVC(0)
204 mov pc, lr 204 ret lr
205ENDPROC(__hyp_set_vectors) 205ENDPROC(__hyp_set_vectors)
206 206
207#ifndef ZIMAGE 207#ifndef ZIMAGE
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index 2b32978ae905..ad58e565fe98 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -100,7 +100,7 @@ ENTRY(iwmmxt_task_enable)
100 get_thread_info r10 100 get_thread_info r10
101#endif 101#endif
1024: dec_preempt_count r10, r3 1024: dec_preempt_count r10, r3
103 mov pc, r9 @ normal exit from exception 103 ret r9 @ normal exit from exception
104 104
105concan_save: 105concan_save:
106 106
@@ -144,7 +144,7 @@ concan_dump:
144 wstrd wR15, [r1, #MMX_WR15] 144 wstrd wR15, [r1, #MMX_WR15]
145 145
1462: teq r0, #0 @ anything to load? 1462: teq r0, #0 @ anything to load?
147 moveq pc, lr @ if not, return 147 reteq lr @ if not, return
148 148
149concan_load: 149concan_load:
150 150
@@ -177,10 +177,10 @@ concan_load:
177 @ clear CUP/MUP (only if r1 != 0) 177 @ clear CUP/MUP (only if r1 != 0)
178 teq r1, #0 178 teq r1, #0
179 mov r2, #0 179 mov r2, #0
180 moveq pc, lr 180 reteq lr
181 181
182 tmcr wCon, r2 182 tmcr wCon, r2
183 mov pc, lr 183 ret lr
184 184
185/* 185/*
186 * Back up Concan regs to save area and disable access to them 186 * Back up Concan regs to save area and disable access to them
@@ -266,7 +266,7 @@ ENTRY(iwmmxt_task_copy)
266 mov r3, lr @ preserve return address 266 mov r3, lr @ preserve return address
267 bl concan_dump 267 bl concan_dump
268 msr cpsr_c, ip @ restore interrupt mode 268 msr cpsr_c, ip @ restore interrupt mode
269 mov pc, r3 269 ret r3
270 270
271/* 271/*
272 * Restore Concan state from given memory address 272 * Restore Concan state from given memory address
@@ -302,7 +302,7 @@ ENTRY(iwmmxt_task_restore)
302 mov r3, lr @ preserve return address 302 mov r3, lr @ preserve return address
303 bl concan_load 303 bl concan_load
304 msr cpsr_c, ip @ restore interrupt mode 304 msr cpsr_c, ip @ restore interrupt mode
305 mov pc, r3 305 ret r3
306 306
307/* 307/*
308 * Concan handling on task switch 308 * Concan handling on task switch
@@ -324,7 +324,7 @@ ENTRY(iwmmxt_task_switch)
324 add r3, r0, #TI_IWMMXT_STATE @ get next task Concan save area 324 add r3, r0, #TI_IWMMXT_STATE @ get next task Concan save area
325 ldr r2, [r2] @ get current Concan owner 325 ldr r2, [r2] @ get current Concan owner
326 teq r2, r3 @ next task owns it? 326 teq r2, r3 @ next task owns it?
327 movne pc, lr @ no: leave Concan disabled 327 retne lr @ no: leave Concan disabled
328 328
3291: @ flip Concan access 3291: @ flip Concan access
330 XSC(eor r1, r1, #0x3) 330 XSC(eor r1, r1, #0x3)
@@ -351,7 +351,7 @@ ENTRY(iwmmxt_task_release)
351 eors r0, r0, r1 @ if equal... 351 eors r0, r0, r1 @ if equal...
352 streq r0, [r3] @ then clear ownership 352 streq r0, [r3] @ then clear ownership
353 msr cpsr_c, r2 @ restore interrupts 353 msr cpsr_c, r2 @ restore interrupts
354 mov pc, lr 354 ret lr
355 355
356 .data 356 .data
357concan_owner: 357concan_owner:
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 4238bcba9d60..266cba46db3e 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -560,11 +560,16 @@ user_backtrace(struct frame_tail __user *tail,
560 struct perf_callchain_entry *entry) 560 struct perf_callchain_entry *entry)
561{ 561{
562 struct frame_tail buftail; 562 struct frame_tail buftail;
563 unsigned long err;
563 564
564 /* Also check accessibility of one struct frame_tail beyond */
565 if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) 565 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
566 return NULL; 566 return NULL;
567 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) 567
568 pagefault_disable();
569 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
570 pagefault_enable();
571
572 if (err)
568 return NULL; 573 return NULL;
569 574
570 perf_callchain_store(entry, buftail.lr); 575 perf_callchain_store(entry, buftail.lr);
@@ -590,6 +595,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
590 } 595 }
591 596
592 perf_callchain_store(entry, regs->ARM_pc); 597 perf_callchain_store(entry, regs->ARM_pc);
598
599 if (!current->mm)
600 return;
601
593 tail = (struct frame_tail __user *)regs->ARM_fp - 1; 602 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
594 603
595 while ((entry->nr < PERF_MAX_STACK_DEPTH) && 604 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
@@ -621,10 +630,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
621 return; 630 return;
622 } 631 }
623 632
624 fr.fp = regs->ARM_fp; 633 arm_get_current_stackframe(regs, &fr);
625 fr.sp = regs->ARM_sp;
626 fr.lr = regs->ARM_lr;
627 fr.pc = regs->ARM_pc;
628 walk_stackframe(&fr, callchain_trace, entry); 634 walk_stackframe(&fr, callchain_trace, entry);
629} 635}
630 636
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index af9e35e8836f..e6a6edbec613 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -233,14 +233,17 @@ static struct of_device_id cpu_pmu_of_device_ids[] = {
233 {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init}, 233 {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
234 {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init}, 234 {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
235 {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init}, 235 {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
236 {.compatible = "arm,arm1176-pmu", .data = armv6pmu_init}, 236 {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
237 {.compatible = "arm,arm1136-pmu", .data = armv6pmu_init}, 237 {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
238 {.compatible = "qcom,krait-pmu", .data = krait_pmu_init}, 238 {.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
239 {}, 239 {},
240}; 240};
241 241
242static struct platform_device_id cpu_pmu_plat_device_ids[] = { 242static struct platform_device_id cpu_pmu_plat_device_ids[] = {
243 {.name = "arm-pmu"}, 243 {.name = "arm-pmu"},
244 {.name = "armv6-pmu"},
245 {.name = "armv7-pmu"},
246 {.name = "xscale-pmu"},
244 {}, 247 {},
245}; 248};
246 249
@@ -250,40 +253,43 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
250static int probe_current_pmu(struct arm_pmu *pmu) 253static int probe_current_pmu(struct arm_pmu *pmu)
251{ 254{
252 int cpu = get_cpu(); 255 int cpu = get_cpu();
253 unsigned long implementor = read_cpuid_implementor();
254 unsigned long part_number = read_cpuid_part_number();
255 int ret = -ENODEV; 256 int ret = -ENODEV;
256 257
257 pr_info("probing PMU on CPU %d\n", cpu); 258 pr_info("probing PMU on CPU %d\n", cpu);
258 259
260 switch (read_cpuid_part()) {
259 /* ARM Ltd CPUs. */ 261 /* ARM Ltd CPUs. */
260 if (implementor == ARM_CPU_IMP_ARM) { 262 case ARM_CPU_PART_ARM1136:
261 switch (part_number) { 263 ret = armv6_1136_pmu_init(pmu);
262 case ARM_CPU_PART_ARM1136: 264 break;
263 case ARM_CPU_PART_ARM1156: 265 case ARM_CPU_PART_ARM1156:
264 case ARM_CPU_PART_ARM1176: 266 ret = armv6_1156_pmu_init(pmu);
265 ret = armv6pmu_init(pmu); 267 break;
266 break; 268 case ARM_CPU_PART_ARM1176:
267 case ARM_CPU_PART_ARM11MPCORE: 269 ret = armv6_1176_pmu_init(pmu);
268 ret = armv6mpcore_pmu_init(pmu); 270 break;
269 break; 271 case ARM_CPU_PART_ARM11MPCORE:
270 case ARM_CPU_PART_CORTEX_A8: 272 ret = armv6mpcore_pmu_init(pmu);
271 ret = armv7_a8_pmu_init(pmu); 273 break;
272 break; 274 case ARM_CPU_PART_CORTEX_A8:
273 case ARM_CPU_PART_CORTEX_A9: 275 ret = armv7_a8_pmu_init(pmu);
274 ret = armv7_a9_pmu_init(pmu); 276 break;
275 break; 277 case ARM_CPU_PART_CORTEX_A9:
276 } 278 ret = armv7_a9_pmu_init(pmu);
277 /* Intel CPUs [xscale]. */ 279 break;
278 } else if (implementor == ARM_CPU_IMP_INTEL) { 280
279 switch (xscale_cpu_arch_version()) { 281 default:
280 case ARM_CPU_XSCALE_ARCH_V1: 282 if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
281 ret = xscale1pmu_init(pmu); 283 switch (xscale_cpu_arch_version()) {
282 break; 284 case ARM_CPU_XSCALE_ARCH_V1:
283 case ARM_CPU_XSCALE_ARCH_V2: 285 ret = xscale1pmu_init(pmu);
284 ret = xscale2pmu_init(pmu); 286 break;
285 break; 287 case ARM_CPU_XSCALE_ARCH_V2:
288 ret = xscale2pmu_init(pmu);
289 break;
290 }
286 } 291 }
292 break;
287 } 293 }
288 294
289 put_cpu(); 295 put_cpu();
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 03664b0e8fa4..abfeb04f3213 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -65,13 +65,11 @@ enum armv6_counters {
65 * accesses/misses in hardware. 65 * accesses/misses in hardware.
66 */ 66 */
67static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { 67static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
68 PERF_MAP_ALL_UNSUPPORTED,
68 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, 69 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
69 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, 70 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
70 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
71 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
72 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, 71 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
73 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, 72 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
74 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
75 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL, 73 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
76 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL, 74 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL,
77}; 75};
@@ -79,116 +77,31 @@ static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
79static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 77static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
80 [PERF_COUNT_HW_CACHE_OP_MAX] 78 [PERF_COUNT_HW_CACHE_OP_MAX]
81 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 79 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
82 [C(L1D)] = { 80 PERF_CACHE_MAP_ALL_UNSUPPORTED,
83 /* 81
84 * The performance counters don't differentiate between read 82 /*
85 * and write accesses/misses so this isn't strictly correct, 83 * The performance counters don't differentiate between read and write
86 * but it's the best we can do. Writes and reads get 84 * accesses/misses so this isn't strictly correct, but it's the best we
87 * combined. 85 * can do. Writes and reads get combined.
88 */ 86 */
89 [C(OP_READ)] = { 87 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
90 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, 88 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
91 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, 89 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
92 }, 90 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
93 [C(OP_WRITE)] = { 91
94 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, 92 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
95 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, 93
96 }, 94 /*
97 [C(OP_PREFETCH)] = { 95 * The ARM performance counters can count micro DTLB misses, micro ITLB
98 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 96 * misses and main TLB misses. There isn't an event for TLB misses, so
99 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 97 * use the micro misses here and if users want the main TLB misses they
100 }, 98 * can use a raw counter.
101 }, 99 */
102 [C(L1I)] = { 100 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
103 [C(OP_READ)] = { 101 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
104 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 102
105 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, 103 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
106 }, 104 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
107 [C(OP_WRITE)] = {
108 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
109 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
110 },
111 [C(OP_PREFETCH)] = {
112 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
113 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
114 },
115 },
116 [C(LL)] = {
117 [C(OP_READ)] = {
118 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
119 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
120 },
121 [C(OP_WRITE)] = {
122 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
123 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
124 },
125 [C(OP_PREFETCH)] = {
126 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
127 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
128 },
129 },
130 [C(DTLB)] = {
131 /*
132 * The ARM performance counters can count micro DTLB misses,
133 * micro ITLB misses and main TLB misses. There isn't an event
134 * for TLB misses, so use the micro misses here and if users
135 * want the main TLB misses they can use a raw counter.
136 */
137 [C(OP_READ)] = {
138 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
139 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
140 },
141 [C(OP_WRITE)] = {
142 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
143 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
144 },
145 [C(OP_PREFETCH)] = {
146 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
147 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
148 },
149 },
150 [C(ITLB)] = {
151 [C(OP_READ)] = {
152 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
153 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
154 },
155 [C(OP_WRITE)] = {
156 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
157 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
158 },
159 [C(OP_PREFETCH)] = {
160 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
161 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
162 },
163 },
164 [C(BPU)] = {
165 [C(OP_READ)] = {
166 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
167 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
168 },
169 [C(OP_WRITE)] = {
170 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
171 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
172 },
173 [C(OP_PREFETCH)] = {
174 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
175 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
176 },
177 },
178 [C(NODE)] = {
179 [C(OP_READ)] = {
180 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
181 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
182 },
183 [C(OP_WRITE)] = {
184 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
185 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
186 },
187 [C(OP_PREFETCH)] = {
188 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
189 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
190 },
191 },
192}; 105};
193 106
194enum armv6mpcore_perf_types { 107enum armv6mpcore_perf_types {
@@ -220,13 +133,11 @@ enum armv6mpcore_perf_types {
220 * accesses/misses in hardware. 133 * accesses/misses in hardware.
221 */ 134 */
222static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { 135static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
136 PERF_MAP_ALL_UNSUPPORTED,
223 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, 137 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
224 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, 138 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
225 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
226 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
227 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, 139 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
228 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, 140 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
229 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
230 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL, 141 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
231 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL, 142 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
232}; 143};
@@ -234,114 +145,26 @@ static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
234static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 145static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
235 [PERF_COUNT_HW_CACHE_OP_MAX] 146 [PERF_COUNT_HW_CACHE_OP_MAX]
236 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 147 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
237 [C(L1D)] = { 148 PERF_CACHE_MAP_ALL_UNSUPPORTED,
238 [C(OP_READ)] = { 149
239 [C(RESULT_ACCESS)] = 150 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
240 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, 151 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
241 [C(RESULT_MISS)] = 152 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
242 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, 153 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
243 }, 154
244 [C(OP_WRITE)] = { 155 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
245 [C(RESULT_ACCESS)] = 156
246 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, 157 /*
247 [C(RESULT_MISS)] = 158 * The ARM performance counters can count micro DTLB misses, micro ITLB
248 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, 159 * misses and main TLB misses. There isn't an event for TLB misses, so
249 }, 160 * use the micro misses here and if users want the main TLB misses they
250 [C(OP_PREFETCH)] = { 161 * can use a raw counter.
251 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 162 */
252 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 163 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
253 }, 164 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
254 }, 165
255 [C(L1I)] = { 166 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
256 [C(OP_READ)] = { 167 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
257 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
258 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
259 },
260 [C(OP_WRITE)] = {
261 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
262 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
263 },
264 [C(OP_PREFETCH)] = {
265 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
266 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
267 },
268 },
269 [C(LL)] = {
270 [C(OP_READ)] = {
271 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
272 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
273 },
274 [C(OP_WRITE)] = {
275 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
276 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
277 },
278 [C(OP_PREFETCH)] = {
279 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
280 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
281 },
282 },
283 [C(DTLB)] = {
284 /*
285 * The ARM performance counters can count micro DTLB misses,
286 * micro ITLB misses and main TLB misses. There isn't an event
287 * for TLB misses, so use the micro misses here and if users
288 * want the main TLB misses they can use a raw counter.
289 */
290 [C(OP_READ)] = {
291 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
292 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
293 },
294 [C(OP_WRITE)] = {
295 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
296 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
297 },
298 [C(OP_PREFETCH)] = {
299 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
300 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
301 },
302 },
303 [C(ITLB)] = {
304 [C(OP_READ)] = {
305 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
306 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
307 },
308 [C(OP_WRITE)] = {
309 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
310 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
311 },
312 [C(OP_PREFETCH)] = {
313 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
314 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
315 },
316 },
317 [C(BPU)] = {
318 [C(OP_READ)] = {
319 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
320 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
321 },
322 [C(OP_WRITE)] = {
323 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
324 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
325 },
326 [C(OP_PREFETCH)] = {
327 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
328 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
329 },
330 },
331 [C(NODE)] = {
332 [C(OP_READ)] = {
333 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
334 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
335 },
336 [C(OP_WRITE)] = {
337 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
338 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
339 },
340 [C(OP_PREFETCH)] = {
341 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
342 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
343 },
344 },
345}; 168};
346 169
347static inline unsigned long 170static inline unsigned long
@@ -653,9 +476,8 @@ static int armv6_map_event(struct perf_event *event)
653 &armv6_perf_cache_map, 0xFF); 476 &armv6_perf_cache_map, 0xFF);
654} 477}
655 478
656static int armv6pmu_init(struct arm_pmu *cpu_pmu) 479static void armv6pmu_init(struct arm_pmu *cpu_pmu)
657{ 480{
658 cpu_pmu->name = "v6";
659 cpu_pmu->handle_irq = armv6pmu_handle_irq; 481 cpu_pmu->handle_irq = armv6pmu_handle_irq;
660 cpu_pmu->enable = armv6pmu_enable_event; 482 cpu_pmu->enable = armv6pmu_enable_event;
661 cpu_pmu->disable = armv6pmu_disable_event; 483 cpu_pmu->disable = armv6pmu_disable_event;
@@ -667,7 +489,26 @@ static int armv6pmu_init(struct arm_pmu *cpu_pmu)
667 cpu_pmu->map_event = armv6_map_event; 489 cpu_pmu->map_event = armv6_map_event;
668 cpu_pmu->num_events = 3; 490 cpu_pmu->num_events = 3;
669 cpu_pmu->max_period = (1LLU << 32) - 1; 491 cpu_pmu->max_period = (1LLU << 32) - 1;
492}
493
494static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
495{
496 armv6pmu_init(cpu_pmu);
497 cpu_pmu->name = "armv6_1136";
498 return 0;
499}
670 500
501static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
502{
503 armv6pmu_init(cpu_pmu);
504 cpu_pmu->name = "armv6_1156";
505 return 0;
506}
507
508static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
509{
510 armv6pmu_init(cpu_pmu);
511 cpu_pmu->name = "armv6_1176";
671 return 0; 512 return 0;
672} 513}
673 514
@@ -687,7 +528,7 @@ static int armv6mpcore_map_event(struct perf_event *event)
687 528
688static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) 529static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
689{ 530{
690 cpu_pmu->name = "v6mpcore"; 531 cpu_pmu->name = "armv6_11mpcore";
691 cpu_pmu->handle_irq = armv6pmu_handle_irq; 532 cpu_pmu->handle_irq = armv6pmu_handle_irq;
692 cpu_pmu->enable = armv6pmu_enable_event; 533 cpu_pmu->enable = armv6pmu_enable_event;
693 cpu_pmu->disable = armv6mpcore_pmu_disable_event; 534 cpu_pmu->disable = armv6mpcore_pmu_disable_event;
@@ -703,7 +544,17 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
703 return 0; 544 return 0;
704} 545}
705#else 546#else
706static int armv6pmu_init(struct arm_pmu *cpu_pmu) 547static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
548{
549 return -ENODEV;
550}
551
552static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
553{
554 return -ENODEV;
555}
556
557static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
707{ 558{
708 return -ENODEV; 559 return -ENODEV;
709} 560}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 1d37568c547a..116758b77f93 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -148,137 +148,62 @@ enum krait_perf_types {
148 * accesses/misses in hardware. 148 * accesses/misses in hardware.
149 */ 149 */
150static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { 150static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
151 PERF_MAP_ALL_UNSUPPORTED,
151 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 152 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
152 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 153 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
153 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 154 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
154 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 155 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
155 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 156 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
156 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 157 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
157 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
158 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE, 158 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
159 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
160}; 159};
161 160
162static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 161static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
163 [PERF_COUNT_HW_CACHE_OP_MAX] 162 [PERF_COUNT_HW_CACHE_OP_MAX]
164 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 163 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
165 [C(L1D)] = { 164 PERF_CACHE_MAP_ALL_UNSUPPORTED,
166 /* 165
167 * The performance counters don't differentiate between read 166 /*
168 * and write accesses/misses so this isn't strictly correct, 167 * The performance counters don't differentiate between read and write
169 * but it's the best we can do. Writes and reads get 168 * accesses/misses so this isn't strictly correct, but it's the best we
170 * combined. 169 * can do. Writes and reads get combined.
171 */ 170 */
172 [C(OP_READ)] = { 171 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
173 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 172 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
174 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 173 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
175 }, 174 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
176 [C(OP_WRITE)] = { 175
177 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 176 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
178 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 177 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
179 }, 178
180 [C(OP_PREFETCH)] = { 179 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
181 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 180 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
182 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 181 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
183 }, 182 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
184 }, 183
185 [C(L1I)] = { 184 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
186 [C(OP_READ)] = { 185 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
187 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, 186
188 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 187 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
189 }, 188 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
190 [C(OP_WRITE)] = { 189
191 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 190 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
192 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 191 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
193 }, 192 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
194 [C(OP_PREFETCH)] = { 193 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
195 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
196 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
197 },
198 },
199 [C(LL)] = {
200 [C(OP_READ)] = {
201 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
202 [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
203 },
204 [C(OP_WRITE)] = {
205 [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
206 [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
207 },
208 [C(OP_PREFETCH)] = {
209 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
210 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
211 },
212 },
213 [C(DTLB)] = {
214 [C(OP_READ)] = {
215 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
216 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
217 },
218 [C(OP_WRITE)] = {
219 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
220 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
221 },
222 [C(OP_PREFETCH)] = {
223 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
224 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
225 },
226 },
227 [C(ITLB)] = {
228 [C(OP_READ)] = {
229 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
230 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
231 },
232 [C(OP_WRITE)] = {
233 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
234 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
235 },
236 [C(OP_PREFETCH)] = {
237 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
238 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
239 },
240 },
241 [C(BPU)] = {
242 [C(OP_READ)] = {
243 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
244 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
245 },
246 [C(OP_WRITE)] = {
247 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
248 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
249 },
250 [C(OP_PREFETCH)] = {
251 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
252 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
253 },
254 },
255 [C(NODE)] = {
256 [C(OP_READ)] = {
257 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
258 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
259 },
260 [C(OP_WRITE)] = {
261 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
262 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
263 },
264 [C(OP_PREFETCH)] = {
265 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
266 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
267 },
268 },
269}; 194};
270 195
271/* 196/*
272 * Cortex-A9 HW events mapping 197 * Cortex-A9 HW events mapping
273 */ 198 */
274static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { 199static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
200 PERF_MAP_ALL_UNSUPPORTED,
275 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 201 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
276 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME, 202 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
277 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 203 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
278 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 204 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
279 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 205 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
280 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 206 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
281 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
282 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE, 207 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
283 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH, 208 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
284}; 209};
@@ -286,238 +211,83 @@ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
286static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 211static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
287 [PERF_COUNT_HW_CACHE_OP_MAX] 212 [PERF_COUNT_HW_CACHE_OP_MAX]
288 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 213 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
289 [C(L1D)] = { 214 PERF_CACHE_MAP_ALL_UNSUPPORTED,
290 /* 215
291 * The performance counters don't differentiate between read 216 /*
292 * and write accesses/misses so this isn't strictly correct, 217 * The performance counters don't differentiate between read and write
293 * but it's the best we can do. Writes and reads get 218 * accesses/misses so this isn't strictly correct, but it's the best we
294 * combined. 219 * can do. Writes and reads get combined.
295 */ 220 */
296 [C(OP_READ)] = { 221 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
297 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 222 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
298 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 223 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
299 }, 224 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
300 [C(OP_WRITE)] = { 225
301 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 226 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
302 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 227
303 }, 228 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
304 [C(OP_PREFETCH)] = { 229 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
305 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 230
306 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 231 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
307 }, 232 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
308 }, 233
309 [C(L1I)] = { 234 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
310 [C(OP_READ)] = { 235 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
311 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 236 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
312 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 237 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
313 },
314 [C(OP_WRITE)] = {
315 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
316 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
317 },
318 [C(OP_PREFETCH)] = {
319 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
320 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
321 },
322 },
323 [C(LL)] = {
324 [C(OP_READ)] = {
325 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
326 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
327 },
328 [C(OP_WRITE)] = {
329 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
330 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
331 },
332 [C(OP_PREFETCH)] = {
333 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
334 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
335 },
336 },
337 [C(DTLB)] = {
338 [C(OP_READ)] = {
339 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
340 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
341 },
342 [C(OP_WRITE)] = {
343 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
344 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
345 },
346 [C(OP_PREFETCH)] = {
347 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
348 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
349 },
350 },
351 [C(ITLB)] = {
352 [C(OP_READ)] = {
353 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
354 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
355 },
356 [C(OP_WRITE)] = {
357 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
358 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
359 },
360 [C(OP_PREFETCH)] = {
361 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
362 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
363 },
364 },
365 [C(BPU)] = {
366 [C(OP_READ)] = {
367 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
368 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
369 },
370 [C(OP_WRITE)] = {
371 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
372 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
373 },
374 [C(OP_PREFETCH)] = {
375 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
376 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
377 },
378 },
379 [C(NODE)] = {
380 [C(OP_READ)] = {
381 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
382 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
383 },
384 [C(OP_WRITE)] = {
385 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
386 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
387 },
388 [C(OP_PREFETCH)] = {
389 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
390 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
391 },
392 },
393}; 238};
394 239
395/* 240/*
396 * Cortex-A5 HW events mapping 241 * Cortex-A5 HW events mapping
397 */ 242 */
398static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = { 243static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
244 PERF_MAP_ALL_UNSUPPORTED,
399 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 245 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
400 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 246 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
401 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 247 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
402 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 248 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
403 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 249 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
404 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 250 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
405 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
406 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
407 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
408}; 251};
409 252
410static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 253static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
411 [PERF_COUNT_HW_CACHE_OP_MAX] 254 [PERF_COUNT_HW_CACHE_OP_MAX]
412 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 255 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
413 [C(L1D)] = { 256 PERF_CACHE_MAP_ALL_UNSUPPORTED,
414 [C(OP_READ)] = { 257
415 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 258 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
416 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 259 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
417 }, 260 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
418 [C(OP_WRITE)] = { 261 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
419 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 262 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
420 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 263 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
421 }, 264
422 [C(OP_PREFETCH)] = { 265 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
423 [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL, 266 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
424 [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP, 267 /*
425 }, 268 * The prefetch counters don't differentiate between the I side and the
426 }, 269 * D side.
427 [C(L1I)] = { 270 */
428 [C(OP_READ)] = { 271 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
429 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 272 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
430 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 273
431 }, 274 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
432 [C(OP_WRITE)] = { 275 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
433 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 276
434 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 277 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
435 }, 278 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
436 /* 279
437 * The prefetch counters don't differentiate between the I 280 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
438 * side and the D side. 281 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
439 */ 282 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
440 [C(OP_PREFETCH)] = { 283 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
441 [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
442 [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
443 },
444 },
445 [C(LL)] = {
446 [C(OP_READ)] = {
447 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
448 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
449 },
450 [C(OP_WRITE)] = {
451 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
452 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
453 },
454 [C(OP_PREFETCH)] = {
455 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
456 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
457 },
458 },
459 [C(DTLB)] = {
460 [C(OP_READ)] = {
461 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
462 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
463 },
464 [C(OP_WRITE)] = {
465 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
466 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
467 },
468 [C(OP_PREFETCH)] = {
469 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
470 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
471 },
472 },
473 [C(ITLB)] = {
474 [C(OP_READ)] = {
475 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
476 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
477 },
478 [C(OP_WRITE)] = {
479 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
480 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
481 },
482 [C(OP_PREFETCH)] = {
483 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
484 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
485 },
486 },
487 [C(BPU)] = {
488 [C(OP_READ)] = {
489 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
490 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
491 },
492 [C(OP_WRITE)] = {
493 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
494 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
495 },
496 [C(OP_PREFETCH)] = {
497 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
498 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
499 },
500 },
501 [C(NODE)] = {
502 [C(OP_READ)] = {
503 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
504 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
505 },
506 [C(OP_WRITE)] = {
507 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
508 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
509 },
510 [C(OP_PREFETCH)] = {
511 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
512 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
513 },
514 },
515}; 284};
516 285
517/* 286/*
518 * Cortex-A15 HW events mapping 287 * Cortex-A15 HW events mapping
519 */ 288 */
520static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { 289static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
290 PERF_MAP_ALL_UNSUPPORTED,
521 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 291 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
522 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 292 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
523 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 293 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
@@ -525,123 +295,48 @@ static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
525 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC, 295 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
526 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 296 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
527 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, 297 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
528 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
529 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
530}; 298};
531 299
532static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 300static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
533 [PERF_COUNT_HW_CACHE_OP_MAX] 301 [PERF_COUNT_HW_CACHE_OP_MAX]
534 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 302 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
535 [C(L1D)] = { 303 PERF_CACHE_MAP_ALL_UNSUPPORTED,
536 [C(OP_READ)] = { 304
537 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ, 305 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
538 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ, 306 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
539 }, 307 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
540 [C(OP_WRITE)] = { 308 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
541 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE, 309
542 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE, 310 /*
543 }, 311 * Not all performance counters differentiate between read and write
544 [C(OP_PREFETCH)] = { 312 * accesses/misses so we're not always strictly correct, but it's the
545 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 313 * best we can do. Writes and reads get combined in these cases.
546 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 314 */
547 }, 315 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
548 }, 316 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
549 [C(L1I)] = { 317
550 /* 318 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
551 * Not all performance counters differentiate between read 319 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
552 * and write accesses/misses so we're not always strictly 320 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
553 * correct, but it's the best we can do. Writes and reads get 321 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
554 * combined in these cases. 322
555 */ 323 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
556 [C(OP_READ)] = { 324 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
557 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 325
558 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 326 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
559 }, 327 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
560 [C(OP_WRITE)] = { 328
561 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 329 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
562 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 330 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
563 }, 331 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
564 [C(OP_PREFETCH)] = { 332 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
565 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
566 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
567 },
568 },
569 [C(LL)] = {
570 [C(OP_READ)] = {
571 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
572 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
573 },
574 [C(OP_WRITE)] = {
575 [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
576 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
577 },
578 [C(OP_PREFETCH)] = {
579 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
580 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
581 },
582 },
583 [C(DTLB)] = {
584 [C(OP_READ)] = {
585 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
586 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
587 },
588 [C(OP_WRITE)] = {
589 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
590 [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
591 },
592 [C(OP_PREFETCH)] = {
593 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
594 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
595 },
596 },
597 [C(ITLB)] = {
598 [C(OP_READ)] = {
599 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
600 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
601 },
602 [C(OP_WRITE)] = {
603 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
604 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
605 },
606 [C(OP_PREFETCH)] = {
607 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
608 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
609 },
610 },
611 [C(BPU)] = {
612 [C(OP_READ)] = {
613 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
614 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
615 },
616 [C(OP_WRITE)] = {
617 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
618 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
619 },
620 [C(OP_PREFETCH)] = {
621 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
622 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
623 },
624 },
625 [C(NODE)] = {
626 [C(OP_READ)] = {
627 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
628 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
629 },
630 [C(OP_WRITE)] = {
631 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
632 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
633 },
634 [C(OP_PREFETCH)] = {
635 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
636 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
637 },
638 },
639}; 333};
640 334
641/* 335/*
642 * Cortex-A7 HW events mapping 336 * Cortex-A7 HW events mapping
643 */ 337 */
644static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = { 338static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
339 PERF_MAP_ALL_UNSUPPORTED,
645 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 340 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
646 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 341 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
647 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 342 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
@@ -649,123 +344,48 @@ static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
649 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 344 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
650 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 345 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
651 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, 346 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
652 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
653 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
654}; 347};
655 348
656static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 349static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
657 [PERF_COUNT_HW_CACHE_OP_MAX] 350 [PERF_COUNT_HW_CACHE_OP_MAX]
658 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 351 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
659 [C(L1D)] = { 352 PERF_CACHE_MAP_ALL_UNSUPPORTED,
660 /* 353
661 * The performance counters don't differentiate between read 354 /*
662 * and write accesses/misses so this isn't strictly correct, 355 * The performance counters don't differentiate between read and write
663 * but it's the best we can do. Writes and reads get 356 * accesses/misses so this isn't strictly correct, but it's the best we
664 * combined. 357 * can do. Writes and reads get combined.
665 */ 358 */
666 [C(OP_READ)] = { 359 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
667 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 360 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
668 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 361 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
669 }, 362 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
670 [C(OP_WRITE)] = { 363
671 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 364 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
672 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 365 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
673 }, 366
674 [C(OP_PREFETCH)] = { 367 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
675 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 368 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
676 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 369 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
677 }, 370 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
678 }, 371
679 [C(L1I)] = { 372 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
680 [C(OP_READ)] = { 373 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
681 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 374
682 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 375 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
683 }, 376 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
684 [C(OP_WRITE)] = { 377
685 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 378 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
686 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 379 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
687 }, 380 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
688 [C(OP_PREFETCH)] = { 381 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
689 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
690 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
691 },
692 },
693 [C(LL)] = {
694 [C(OP_READ)] = {
695 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
696 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
697 },
698 [C(OP_WRITE)] = {
699 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
700 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
701 },
702 [C(OP_PREFETCH)] = {
703 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
704 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
705 },
706 },
707 [C(DTLB)] = {
708 [C(OP_READ)] = {
709 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
710 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
711 },
712 [C(OP_WRITE)] = {
713 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
714 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
715 },
716 [C(OP_PREFETCH)] = {
717 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
718 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
719 },
720 },
721 [C(ITLB)] = {
722 [C(OP_READ)] = {
723 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
724 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
725 },
726 [C(OP_WRITE)] = {
727 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
728 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
729 },
730 [C(OP_PREFETCH)] = {
731 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
732 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
733 },
734 },
735 [C(BPU)] = {
736 [C(OP_READ)] = {
737 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
738 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
739 },
740 [C(OP_WRITE)] = {
741 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
742 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
743 },
744 [C(OP_PREFETCH)] = {
745 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
746 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
747 },
748 },
749 [C(NODE)] = {
750 [C(OP_READ)] = {
751 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
752 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
753 },
754 [C(OP_WRITE)] = {
755 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
756 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
757 },
758 [C(OP_PREFETCH)] = {
759 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
760 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
761 },
762 },
763}; 382};
764 383
765/* 384/*
766 * Cortex-A12 HW events mapping 385 * Cortex-A12 HW events mapping
767 */ 386 */
768static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = { 387static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
388 PERF_MAP_ALL_UNSUPPORTED,
769 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 389 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
770 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 390 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
771 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 391 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
@@ -773,138 +393,60 @@ static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
773 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC, 393 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
774 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 394 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
775 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, 395 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
776 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
777 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
778}; 396};
779 397
780static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 398static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
781 [PERF_COUNT_HW_CACHE_OP_MAX] 399 [PERF_COUNT_HW_CACHE_OP_MAX]
782 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 400 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
783 [C(L1D)] = { 401 PERF_CACHE_MAP_ALL_UNSUPPORTED,
784 [C(OP_READ)] = { 402
785 [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ, 403 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
786 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 404 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
787 }, 405 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
788 [C(OP_WRITE)] = { 406 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
789 [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE, 407
790 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 408 /*
791 }, 409 * Not all performance counters differentiate between read and write
792 [C(OP_PREFETCH)] = { 410 * accesses/misses so we're not always strictly correct, but it's the
793 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 411 * best we can do. Writes and reads get combined in these cases.
794 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 412 */
795 }, 413 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
796 }, 414 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
797 [C(L1I)] = { 415
798 /* 416 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
799 * Not all performance counters differentiate between read 417 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
800 * and write accesses/misses so we're not always strictly 418 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
801 * correct, but it's the best we can do. Writes and reads get 419 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
802 * combined in these cases. 420
803 */ 421 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
804 [C(OP_READ)] = { 422 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
805 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 423 [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
806 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 424
807 }, 425 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
808 [C(OP_WRITE)] = { 426 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
809 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 427
810 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 428 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
811 }, 429 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
812 [C(OP_PREFETCH)] = { 430 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
813 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 431 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
814 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
815 },
816 },
817 [C(LL)] = {
818 [C(OP_READ)] = {
819 [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
820 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
821 },
822 [C(OP_WRITE)] = {
823 [C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
824 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
825 },
826 [C(OP_PREFETCH)] = {
827 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
828 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
829 },
830 },
831 [C(DTLB)] = {
832 [C(OP_READ)] = {
833 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
834 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
835 },
836 [C(OP_WRITE)] = {
837 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
838 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
839 },
840 [C(OP_PREFETCH)] = {
841 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
842 [C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
843 },
844 },
845 [C(ITLB)] = {
846 [C(OP_READ)] = {
847 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
848 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
849 },
850 [C(OP_WRITE)] = {
851 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
852 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
853 },
854 [C(OP_PREFETCH)] = {
855 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
856 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
857 },
858 },
859 [C(BPU)] = {
860 [C(OP_READ)] = {
861 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
862 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
863 },
864 [C(OP_WRITE)] = {
865 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
866 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
867 },
868 [C(OP_PREFETCH)] = {
869 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
870 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
871 },
872 },
873 [C(NODE)] = {
874 [C(OP_READ)] = {
875 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
876 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
877 },
878 [C(OP_WRITE)] = {
879 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
880 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
881 },
882 [C(OP_PREFETCH)] = {
883 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
884 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
885 },
886 },
887}; 432};
888 433
889/* 434/*
890 * Krait HW events mapping 435 * Krait HW events mapping
891 */ 436 */
892static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = { 437static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
438 PERF_MAP_ALL_UNSUPPORTED,
893 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 439 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
894 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 440 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
895 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
896 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
897 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 441 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
898 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 442 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
899 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, 443 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
900}; 444};
901 445
902static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = { 446static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
447 PERF_MAP_ALL_UNSUPPORTED,
903 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 448 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
904 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, 449 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
905 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
906 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
907 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
908 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 450 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
909 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, 451 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
910}; 452};
@@ -912,110 +454,31 @@ static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
912static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 454static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
913 [PERF_COUNT_HW_CACHE_OP_MAX] 455 [PERF_COUNT_HW_CACHE_OP_MAX]
914 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 456 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
915 [C(L1D)] = { 457 PERF_CACHE_MAP_ALL_UNSUPPORTED,
916 /* 458
917 * The performance counters don't differentiate between read 459 /*
918 * and write accesses/misses so this isn't strictly correct, 460 * The performance counters don't differentiate between read and write
919 * but it's the best we can do. Writes and reads get 461 * accesses/misses so this isn't strictly correct, but it's the best we
920 * combined. 462 * can do. Writes and reads get combined.
921 */ 463 */
922 [C(OP_READ)] = { 464 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
923 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 465 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
924 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 466 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
925 }, 467 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
926 [C(OP_WRITE)] = { 468
927 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, 469 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
928 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, 470 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS,
929 }, 471
930 [C(OP_PREFETCH)] = { 472 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
931 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 473 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
932 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 474
933 }, 475 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
934 }, 476 [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
935 [C(L1I)] = { 477
936 [C(OP_READ)] = { 478 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
937 [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS, 479 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
938 [C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS, 480 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
939 }, 481 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
940 [C(OP_WRITE)] = {
941 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
942 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
943 },
944 [C(OP_PREFETCH)] = {
945 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
946 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
947 },
948 },
949 [C(LL)] = {
950 [C(OP_READ)] = {
951 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
952 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
953 },
954 [C(OP_WRITE)] = {
955 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
956 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
957 },
958 [C(OP_PREFETCH)] = {
959 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
960 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
961 },
962 },
963 [C(DTLB)] = {
964 [C(OP_READ)] = {
965 [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
966 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
967 },
968 [C(OP_WRITE)] = {
969 [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
970 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
971 },
972 [C(OP_PREFETCH)] = {
973 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
974 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
975 },
976 },
977 [C(ITLB)] = {
978 [C(OP_READ)] = {
979 [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
980 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
981 },
982 [C(OP_WRITE)] = {
983 [C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
984 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
985 },
986 [C(OP_PREFETCH)] = {
987 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
988 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
989 },
990 },
991 [C(BPU)] = {
992 [C(OP_READ)] = {
993 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
994 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
995 },
996 [C(OP_WRITE)] = {
997 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
998 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
999 },
1000 [C(OP_PREFETCH)] = {
1001 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1002 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1003 },
1004 },
1005 [C(NODE)] = {
1006 [C(OP_READ)] = {
1007 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1008 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1009 },
1010 [C(OP_WRITE)] = {
1011 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1012 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1013 },
1014 [C(OP_PREFETCH)] = {
1015 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1016 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1017 },
1018 },
1019}; 482};
1020 483
1021/* 484/*
@@ -1545,7 +1008,7 @@ static u32 armv7_read_num_pmnc_events(void)
1545static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) 1008static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1546{ 1009{
1547 armv7pmu_init(cpu_pmu); 1010 armv7pmu_init(cpu_pmu);
1548 cpu_pmu->name = "ARMv7 Cortex-A8"; 1011 cpu_pmu->name = "armv7_cortex_a8";
1549 cpu_pmu->map_event = armv7_a8_map_event; 1012 cpu_pmu->map_event = armv7_a8_map_event;
1550 cpu_pmu->num_events = armv7_read_num_pmnc_events(); 1013 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1551 return 0; 1014 return 0;
@@ -1554,7 +1017,7 @@ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1554static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) 1017static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1555{ 1018{
1556 armv7pmu_init(cpu_pmu); 1019 armv7pmu_init(cpu_pmu);
1557 cpu_pmu->name = "ARMv7 Cortex-A9"; 1020 cpu_pmu->name = "armv7_cortex_a9";
1558 cpu_pmu->map_event = armv7_a9_map_event; 1021 cpu_pmu->map_event = armv7_a9_map_event;
1559 cpu_pmu->num_events = armv7_read_num_pmnc_events(); 1022 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1560 return 0; 1023 return 0;
@@ -1563,7 +1026,7 @@ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1563static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) 1026static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1564{ 1027{
1565 armv7pmu_init(cpu_pmu); 1028 armv7pmu_init(cpu_pmu);
1566 cpu_pmu->name = "ARMv7 Cortex-A5"; 1029 cpu_pmu->name = "armv7_cortex_a5";
1567 cpu_pmu->map_event = armv7_a5_map_event; 1030 cpu_pmu->map_event = armv7_a5_map_event;
1568 cpu_pmu->num_events = armv7_read_num_pmnc_events(); 1031 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1569 return 0; 1032 return 0;
@@ -1572,7 +1035,7 @@ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1572static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) 1035static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1573{ 1036{
1574 armv7pmu_init(cpu_pmu); 1037 armv7pmu_init(cpu_pmu);
1575 cpu_pmu->name = "ARMv7 Cortex-A15"; 1038 cpu_pmu->name = "armv7_cortex_a15";
1576 cpu_pmu->map_event = armv7_a15_map_event; 1039 cpu_pmu->map_event = armv7_a15_map_event;
1577 cpu_pmu->num_events = armv7_read_num_pmnc_events(); 1040 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1578 cpu_pmu->set_event_filter = armv7pmu_set_event_filter; 1041 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
@@ -1582,7 +1045,7 @@ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1582static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) 1045static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1583{ 1046{
1584 armv7pmu_init(cpu_pmu); 1047 armv7pmu_init(cpu_pmu);
1585 cpu_pmu->name = "ARMv7 Cortex-A7"; 1048 cpu_pmu->name = "armv7_cortex_a7";
1586 cpu_pmu->map_event = armv7_a7_map_event; 1049 cpu_pmu->map_event = armv7_a7_map_event;
1587 cpu_pmu->num_events = armv7_read_num_pmnc_events(); 1050 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1588 cpu_pmu->set_event_filter = armv7pmu_set_event_filter; 1051 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
@@ -1592,7 +1055,7 @@ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1592static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) 1055static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1593{ 1056{
1594 armv7pmu_init(cpu_pmu); 1057 armv7pmu_init(cpu_pmu);
1595 cpu_pmu->name = "ARMv7 Cortex-A12"; 1058 cpu_pmu->name = "armv7_cortex_a12";
1596 cpu_pmu->map_event = armv7_a12_map_event; 1059 cpu_pmu->map_event = armv7_a12_map_event;
1597 cpu_pmu->num_events = armv7_read_num_pmnc_events(); 1060 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1598 cpu_pmu->set_event_filter = armv7pmu_set_event_filter; 1061 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
@@ -1602,7 +1065,7 @@ static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1602static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu) 1065static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1603{ 1066{
1604 armv7_a12_pmu_init(cpu_pmu); 1067 armv7_a12_pmu_init(cpu_pmu);
1605 cpu_pmu->name = "ARMv7 Cortex-A17"; 1068 cpu_pmu->name = "armv7_cortex_a17";
1606 return 0; 1069 return 0;
1607} 1070}
1608 1071
@@ -1823,6 +1286,7 @@ static void krait_pmu_disable_event(struct perf_event *event)
1823 unsigned long flags; 1286 unsigned long flags;
1824 struct hw_perf_event *hwc = &event->hw; 1287 struct hw_perf_event *hwc = &event->hw;
1825 int idx = hwc->idx; 1288 int idx = hwc->idx;
1289 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1826 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1290 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1827 1291
1828 /* Disable counter and interrupt */ 1292 /* Disable counter and interrupt */
@@ -1848,6 +1312,7 @@ static void krait_pmu_enable_event(struct perf_event *event)
1848 unsigned long flags; 1312 unsigned long flags;
1849 struct hw_perf_event *hwc = &event->hw; 1313 struct hw_perf_event *hwc = &event->hw;
1850 int idx = hwc->idx; 1314 int idx = hwc->idx;
1315 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1851 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1316 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1852 1317
1853 /* 1318 /*
@@ -1981,7 +1446,7 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1981static int krait_pmu_init(struct arm_pmu *cpu_pmu) 1446static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1982{ 1447{
1983 armv7pmu_init(cpu_pmu); 1448 armv7pmu_init(cpu_pmu);
1984 cpu_pmu->name = "ARMv7 Krait"; 1449 cpu_pmu->name = "armv7_krait";
1985 /* Some early versions of Krait don't support PC write events */ 1450 /* Some early versions of Krait don't support PC write events */
1986 if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node, 1451 if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1987 "qcom,no-pc-write")) 1452 "qcom,no-pc-write"))
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 63990c42fac9..08da0af550b7 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -48,118 +48,31 @@ enum xscale_counters {
48}; 48};
49 49
50static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { 50static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
51 PERF_MAP_ALL_UNSUPPORTED,
51 [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, 52 [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
52 [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, 53 [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
53 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
54 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
55 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, 54 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
56 [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, 55 [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
57 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
58 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER, 56 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
59 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
60}; 57};
61 58
62static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] 59static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
63 [PERF_COUNT_HW_CACHE_OP_MAX] 60 [PERF_COUNT_HW_CACHE_OP_MAX]
64 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 61 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
65 [C(L1D)] = { 62 PERF_CACHE_MAP_ALL_UNSUPPORTED,
66 [C(OP_READ)] = { 63
67 [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, 64 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
68 [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, 65 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
69 }, 66 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
70 [C(OP_WRITE)] = { 67 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
71 [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, 68
72 [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, 69 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
73 }, 70
74 [C(OP_PREFETCH)] = { 71 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
75 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 72 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
76 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 73
77 }, 74 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
78 }, 75 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
79 [C(L1I)] = {
80 [C(OP_READ)] = {
81 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
82 [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
83 },
84 [C(OP_WRITE)] = {
85 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
86 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
87 },
88 [C(OP_PREFETCH)] = {
89 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
90 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
91 },
92 },
93 [C(LL)] = {
94 [C(OP_READ)] = {
95 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
96 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
97 },
98 [C(OP_WRITE)] = {
99 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
100 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
101 },
102 [C(OP_PREFETCH)] = {
103 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
104 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
105 },
106 },
107 [C(DTLB)] = {
108 [C(OP_READ)] = {
109 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
110 [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
111 },
112 [C(OP_WRITE)] = {
113 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
114 [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
115 },
116 [C(OP_PREFETCH)] = {
117 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
118 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
119 },
120 },
121 [C(ITLB)] = {
122 [C(OP_READ)] = {
123 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
124 [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
125 },
126 [C(OP_WRITE)] = {
127 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
128 [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
129 },
130 [C(OP_PREFETCH)] = {
131 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
132 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
133 },
134 },
135 [C(BPU)] = {
136 [C(OP_READ)] = {
137 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
138 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
139 },
140 [C(OP_WRITE)] = {
141 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
142 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
143 },
144 [C(OP_PREFETCH)] = {
145 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
146 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
147 },
148 },
149 [C(NODE)] = {
150 [C(OP_READ)] = {
151 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
152 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
153 },
154 [C(OP_WRITE)] = {
155 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
156 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
157 },
158 [C(OP_PREFETCH)] = {
159 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
160 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
161 },
162 },
163}; 76};
164 77
165#define XSCALE_PMU_ENABLE 0x001 78#define XSCALE_PMU_ENABLE 0x001
@@ -442,7 +355,7 @@ static int xscale_map_event(struct perf_event *event)
442 355
443static int xscale1pmu_init(struct arm_pmu *cpu_pmu) 356static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
444{ 357{
445 cpu_pmu->name = "xscale1"; 358 cpu_pmu->name = "armv5_xscale1";
446 cpu_pmu->handle_irq = xscale1pmu_handle_irq; 359 cpu_pmu->handle_irq = xscale1pmu_handle_irq;
447 cpu_pmu->enable = xscale1pmu_enable_event; 360 cpu_pmu->enable = xscale1pmu_enable_event;
448 cpu_pmu->disable = xscale1pmu_disable_event; 361 cpu_pmu->disable = xscale1pmu_disable_event;
@@ -812,7 +725,7 @@ static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
812 725
813static int xscale2pmu_init(struct arm_pmu *cpu_pmu) 726static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
814{ 727{
815 cpu_pmu->name = "xscale2"; 728 cpu_pmu->name = "armv5_xscale2";
816 cpu_pmu->handle_irq = xscale2pmu_handle_irq; 729 cpu_pmu->handle_irq = xscale2pmu_handle_irq;
817 cpu_pmu->enable = xscale2pmu_enable_event; 730 cpu_pmu->enable = xscale2pmu_enable_event;
818 cpu_pmu->disable = xscale2pmu_disable_event; 731 cpu_pmu->disable = xscale2pmu_disable_event;
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 95858966d84e..35e72585ec1d 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -3,6 +3,7 @@
3 */ 3 */
4 4
5#include <linux/linkage.h> 5#include <linux/linkage.h>
6#include <asm/assembler.h>
6#include <asm/kexec.h> 7#include <asm/kexec.h>
7 8
8 .align 3 /* not needed for this code, but keeps fncpy() happy */ 9 .align 3 /* not needed for this code, but keeps fncpy() happy */
@@ -59,7 +60,7 @@ ENTRY(relocate_new_kernel)
59 mov r0,#0 60 mov r0,#0
60 ldr r1,kexec_mach_type 61 ldr r1,kexec_mach_type
61 ldr r2,kexec_boot_atags 62 ldr r2,kexec_boot_atags
62 ARM( mov pc, lr ) 63 ARM( ret lr )
63 THUMB( bx lr ) 64 THUMB( bx lr )
64 65
65 .align 66 .align
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 8a16ee5d8a95..84db893dedc2 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -393,19 +393,34 @@ static void __init cpuid_init_hwcaps(void)
393 elf_hwcap |= HWCAP_LPAE; 393 elf_hwcap |= HWCAP_LPAE;
394} 394}
395 395
396static void __init feat_v6_fixup(void) 396static void __init elf_hwcap_fixup(void)
397{ 397{
398 int id = read_cpuid_id(); 398 unsigned id = read_cpuid_id();
399 399 unsigned sync_prim;
400 if ((id & 0xff0f0000) != 0x41070000)
401 return;
402 400
403 /* 401 /*
404 * HWCAP_TLS is available only on 1136 r1p0 and later, 402 * HWCAP_TLS is available only on 1136 r1p0 and later,
405 * see also kuser_get_tls_init. 403 * see also kuser_get_tls_init.
406 */ 404 */
407 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0)) 405 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
406 ((id >> 20) & 3) == 0) {
408 elf_hwcap &= ~HWCAP_TLS; 407 elf_hwcap &= ~HWCAP_TLS;
408 return;
409 }
410
411 /* Verify if CPUID scheme is implemented */
412 if ((id & 0x000f0000) != 0x000f0000)
413 return;
414
415 /*
416 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
417 * avoid advertising SWP; it may not be atomic with
418 * multiprocessing cores.
419 */
420 sync_prim = ((read_cpuid_ext(CPUID_EXT_ISAR3) >> 8) & 0xf0) |
421 ((read_cpuid_ext(CPUID_EXT_ISAR4) >> 20) & 0x0f);
422 if (sync_prim >= 0x13)
423 elf_hwcap &= ~HWCAP_SWP;
409} 424}
410 425
411/* 426/*
@@ -609,7 +624,7 @@ static void __init setup_processor(void)
609#endif 624#endif
610 erratum_a15_798181_init(); 625 erratum_a15_798181_init();
611 626
612 feat_v6_fixup(); 627 elf_hwcap_fixup();
613 628
614 cacheid_init(); 629 cacheid_init();
615 cpu_init(); 630 cpu_init();
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 1b880db2a033..e1e60e5a7a27 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -107,7 +107,7 @@ ENTRY(cpu_resume_mmu)
107 instr_sync 107 instr_sync
108 mov r0, r0 108 mov r0, r0
109 mov r0, r0 109 mov r0, r0
110 mov pc, r3 @ jump to virtual address 110 ret r3 @ jump to virtual address
111ENDPROC(cpu_resume_mmu) 111ENDPROC(cpu_resume_mmu)
112 .popsection 112 .popsection
113cpu_resume_after_mmu: 113cpu_resume_after_mmu:
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 1aafa0d785eb..72f9241ad5db 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -17,6 +17,8 @@
17#include <asm/cputype.h> 17#include <asm/cputype.h>
18 18
19#define SCU_CTRL 0x00 19#define SCU_CTRL 0x00
20#define SCU_ENABLE (1 << 0)
21#define SCU_STANDBY_ENABLE (1 << 5)
20#define SCU_CONFIG 0x04 22#define SCU_CONFIG 0x04
21#define SCU_CPU_STATUS 0x08 23#define SCU_CPU_STATUS 0x08
22#define SCU_INVALIDATE 0x0c 24#define SCU_INVALIDATE 0x0c
@@ -50,10 +52,16 @@ void scu_enable(void __iomem *scu_base)
50 52
51 scu_ctrl = readl_relaxed(scu_base + SCU_CTRL); 53 scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
52 /* already enabled? */ 54 /* already enabled? */
53 if (scu_ctrl & 1) 55 if (scu_ctrl & SCU_ENABLE)
54 return; 56 return;
55 57
56 scu_ctrl |= 1; 58 scu_ctrl |= SCU_ENABLE;
59
60 /* Cortex-A9 earlier than r2p0 has no standby bit in SCU */
61 if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090 &&
62 (read_cpuid_id() & 0x00f0000f) >= 0x00200000)
63 scu_ctrl |= SCU_STANDBY_ENABLE;
64
57 writel_relaxed(scu_ctrl, scu_base + SCU_CTRL); 65 writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
58 66
59 /* 67 /*
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 95d063620b76..2e72be4f623e 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -92,15 +92,19 @@ void erratum_a15_798181_init(void)
92 unsigned int midr = read_cpuid_id(); 92 unsigned int midr = read_cpuid_id();
93 unsigned int revidr = read_cpuid(CPUID_REVIDR); 93 unsigned int revidr = read_cpuid(CPUID_REVIDR);
94 94
95 /* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */ 95 /* Brahma-B15 r0p0..r0p2 affected
96 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 || 96 * Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
97 (revidr & 0x210) == 0x210) { 97 if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2)
98 return;
99 }
100 if (revidr & 0x10)
101 erratum_a15_798181_handler = erratum_a15_798181_partial;
102 else
103 erratum_a15_798181_handler = erratum_a15_798181_broadcast; 98 erratum_a15_798181_handler = erratum_a15_798181_broadcast;
99 else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr <= 0x413fc0f2 &&
100 (revidr & 0x210) != 0x210) {
101 if (revidr & 0x10)
102 erratum_a15_798181_handler =
103 erratum_a15_798181_partial;
104 else
105 erratum_a15_798181_handler =
106 erratum_a15_798181_broadcast;
107 }
104} 108}
105#endif 109#endif
106 110
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index b1b89882b113..67ca8578c6d8 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -27,6 +27,7 @@
27#include <linux/perf_event.h> 27#include <linux/perf_event.h>
28 28
29#include <asm/opcodes.h> 29#include <asm/opcodes.h>
30#include <asm/system_info.h>
30#include <asm/traps.h> 31#include <asm/traps.h>
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
32 33
@@ -266,6 +267,9 @@ static struct undef_hook swp_hook = {
266 */ 267 */
267static int __init swp_emulation_init(void) 268static int __init swp_emulation_init(void)
268{ 269{
270 if (cpu_architecture() < CPU_ARCH_ARMv7)
271 return 0;
272
269#ifdef CONFIG_PROC_FS 273#ifdef CONFIG_PROC_FS
270 if (!proc_create("cpu/swp_emulation", S_IRUGO, NULL, &proc_status_fops)) 274 if (!proc_create("cpu/swp_emulation", S_IRUGO, NULL, &proc_status_fops))
271 return -ENOMEM; 275 return -ENOMEM;
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 829a96d4a179..0cc7e58c47cc 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -50,10 +50,7 @@ unsigned long profile_pc(struct pt_regs *regs)
50 if (!in_lock_functions(regs->ARM_pc)) 50 if (!in_lock_functions(regs->ARM_pc))
51 return regs->ARM_pc; 51 return regs->ARM_pc;
52 52
53 frame.fp = regs->ARM_fp; 53 arm_get_current_stackframe(regs, &frame);
54 frame.sp = regs->ARM_sp;
55 frame.lr = regs->ARM_lr;
56 frame.pc = regs->ARM_pc;
57 do { 54 do {
58 int ret = unwind_frame(&frame); 55 int ret = unwind_frame(&frame);
59 if (ret < 0) 56 if (ret < 0)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index abd2fc067736..c8e4bb714944 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -31,11 +31,13 @@
31#include <asm/exception.h> 31#include <asm/exception.h>
32#include <asm/unistd.h> 32#include <asm/unistd.h>
33#include <asm/traps.h> 33#include <asm/traps.h>
34#include <asm/ptrace.h>
34#include <asm/unwind.h> 35#include <asm/unwind.h>
35#include <asm/tls.h> 36#include <asm/tls.h>
36#include <asm/system_misc.h> 37#include <asm/system_misc.h>
37#include <asm/opcodes.h> 38#include <asm/opcodes.h>
38 39
40
39static const char *handler[]= { 41static const char *handler[]= {
40 "prefetch abort", 42 "prefetch abort",
41 "data abort", 43 "data abort",
@@ -184,7 +186,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
184 tsk = current; 186 tsk = current;
185 187
186 if (regs) { 188 if (regs) {
187 fp = regs->ARM_fp; 189 fp = frame_pointer(regs);
188 mode = processor_mode(regs); 190 mode = processor_mode(regs);
189 } else if (tsk != current) { 191 } else if (tsk != current) {
190 fp = thread_saved_fp(tsk); 192 fp = thread_saved_fp(tsk);
@@ -719,7 +721,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
719 dump_instr("", regs); 721 dump_instr("", regs);
720 if (user_mode(regs)) { 722 if (user_mode(regs)) {
721 __show_regs(regs); 723 __show_regs(regs);
722 c_backtrace(regs->ARM_fp, processor_mode(regs)); 724 c_backtrace(frame_pointer(regs), processor_mode(regs));
723 } 725 }
724 } 726 }
725#endif 727#endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index e67682f02cb2..a61a1dfbb0db 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -479,12 +479,10 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
479 tsk = current; 479 tsk = current;
480 480
481 if (regs) { 481 if (regs) {
482 frame.fp = regs->ARM_fp; 482 arm_get_current_stackframe(regs, &frame);
483 frame.sp = regs->ARM_sp;
484 frame.lr = regs->ARM_lr;
485 /* PC might be corrupted, use LR in that case. */ 483 /* PC might be corrupted, use LR in that case. */
486 frame.pc = kernel_text_address(regs->ARM_pc) 484 if (!kernel_text_address(regs->ARM_pc))
487 ? regs->ARM_pc : regs->ARM_lr; 485 frame.pc = regs->ARM_lr;
488 } else if (tsk == current) { 486 } else if (tsk == current) {
489 frame.fp = (unsigned long)__builtin_frame_address(0); 487 frame.fp = (unsigned long)__builtin_frame_address(0);
490 frame.sp = current_sp; 488 frame.sp = current_sp;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 7bcee5c9b604..6f57cb94367f 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -318,7 +318,6 @@ SECTIONS
318 _end = .; 318 _end = .;
319 319
320 STABS_DEBUG 320 STABS_DEBUG
321 .comment 0 : { *(.comment) }
322} 321}
323 322
324/* 323/*
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index b23a59c1c522..70bf49b8b244 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -274,13 +274,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
274 274
275int __attribute_const__ kvm_target_cpu(void) 275int __attribute_const__ kvm_target_cpu(void)
276{ 276{
277 unsigned long implementor = read_cpuid_implementor(); 277 switch (read_cpuid_part()) {
278 unsigned long part_number = read_cpuid_part_number();
279
280 if (implementor != ARM_CPU_IMP_ARM)
281 return -EINVAL;
282
283 switch (part_number) {
284 case ARM_CPU_PART_CORTEX_A7: 278 case ARM_CPU_PART_CORTEX_A7:
285 return KVM_ARM_TARGET_CORTEX_A7; 279 return KVM_ARM_TARGET_CORTEX_A7;
286 case ARM_CPU_PART_CORTEX_A15: 280 case ARM_CPU_PART_CORTEX_A15:
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 1b9844d369cc..b2d229f09c07 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20#include <asm/assembler.h>
20#include <asm/unified.h> 21#include <asm/unified.h>
21#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
22#include <asm/kvm_asm.h> 23#include <asm/kvm_asm.h>
@@ -134,7 +135,7 @@ phase2:
134 ldr r0, =TRAMPOLINE_VA 135 ldr r0, =TRAMPOLINE_VA
135 adr r1, target 136 adr r1, target
136 bfi r0, r1, #0, #PAGE_SHIFT 137 bfi r0, r1, #0, #PAGE_SHIFT
137 mov pc, r0 138 ret r0
138 139
139target: @ We're now in the trampoline code, switch page tables 140target: @ We're now in the trampoline code, switch page tables
140 mcrr p15, 4, r2, r3, c2 141 mcrr p15, 4, r2, r3, c2
diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S
index 638deb13da1c..b05e95840651 100644
--- a/arch/arm/lib/ashldi3.S
+++ b/arch/arm/lib/ashldi3.S
@@ -27,6 +27,7 @@ Boston, MA 02110-1301, USA. */
27 27
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/assembler.h>
30 31
31#ifdef __ARMEB__ 32#ifdef __ARMEB__
32#define al r1 33#define al r1
@@ -47,7 +48,7 @@ ENTRY(__aeabi_llsl)
47 THUMB( lsrmi r3, al, ip ) 48 THUMB( lsrmi r3, al, ip )
48 THUMB( orrmi ah, ah, r3 ) 49 THUMB( orrmi ah, ah, r3 )
49 mov al, al, lsl r2 50 mov al, al, lsl r2
50 mov pc, lr 51 ret lr
51 52
52ENDPROC(__ashldi3) 53ENDPROC(__ashldi3)
53ENDPROC(__aeabi_llsl) 54ENDPROC(__aeabi_llsl)
diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S
index 015e8aa5a1d1..275d7d2341a4 100644
--- a/arch/arm/lib/ashrdi3.S
+++ b/arch/arm/lib/ashrdi3.S
@@ -27,6 +27,7 @@ Boston, MA 02110-1301, USA. */
27 27
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/assembler.h>
30 31
31#ifdef __ARMEB__ 32#ifdef __ARMEB__
32#define al r1 33#define al r1
@@ -47,7 +48,7 @@ ENTRY(__aeabi_lasr)
47 THUMB( lslmi r3, ah, ip ) 48 THUMB( lslmi r3, ah, ip )
48 THUMB( orrmi al, al, r3 ) 49 THUMB( orrmi al, al, r3 )
49 mov ah, ah, asr r2 50 mov ah, ah, asr r2
50 mov pc, lr 51 ret lr
51 52
52ENDPROC(__ashrdi3) 53ENDPROC(__ashrdi3)
53ENDPROC(__aeabi_lasr) 54ENDPROC(__aeabi_lasr)
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 4102be617fce..fab5a50503ae 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -25,7 +25,7 @@
25ENTRY(c_backtrace) 25ENTRY(c_backtrace)
26 26
27#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK) 27#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
28 mov pc, lr 28 ret lr
29ENDPROC(c_backtrace) 29ENDPROC(c_backtrace)
30#else 30#else
31 stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location... 31 stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 9f12ed1eea86..7d807cfd8ef5 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,3 +1,4 @@
1#include <asm/assembler.h>
1#include <asm/unwind.h> 2#include <asm/unwind.h>
2 3
3#if __LINUX_ARM_ARCH__ >= 6 4#if __LINUX_ARM_ARCH__ >= 6
@@ -70,7 +71,7 @@ UNWIND( .fnstart )
70 \instr r2, r2, r3 71 \instr r2, r2, r3
71 str r2, [r1, r0, lsl #2] 72 str r2, [r1, r0, lsl #2]
72 restore_irqs ip 73 restore_irqs ip
73 mov pc, lr 74 ret lr
74UNWIND( .fnend ) 75UNWIND( .fnend )
75ENDPROC(\name ) 76ENDPROC(\name )
76 .endm 77 .endm
@@ -98,7 +99,7 @@ UNWIND( .fnstart )
98 \store r2, [r1] 99 \store r2, [r1]
99 moveq r0, #0 100 moveq r0, #0
100 restore_irqs ip 101 restore_irqs ip
101 mov pc, lr 102 ret lr
102UNWIND( .fnend ) 103UNWIND( .fnend )
103ENDPROC(\name ) 104ENDPROC(\name )
104 .endm 105 .endm
diff --git a/arch/arm/lib/bswapsdi2.S b/arch/arm/lib/bswapsdi2.S
index 9fcdd154eff9..07cda737bb11 100644
--- a/arch/arm/lib/bswapsdi2.S
+++ b/arch/arm/lib/bswapsdi2.S
@@ -1,4 +1,5 @@
1#include <linux/linkage.h> 1#include <linux/linkage.h>
2#include <asm/assembler.h>
2 3
3#if __LINUX_ARM_ARCH__ >= 6 4#if __LINUX_ARM_ARCH__ >= 6
4ENTRY(__bswapsi2) 5ENTRY(__bswapsi2)
@@ -18,7 +19,7 @@ ENTRY(__bswapsi2)
18 mov r3, r3, lsr #8 19 mov r3, r3, lsr #8
19 bic r3, r3, #0xff00 20 bic r3, r3, #0xff00
20 eor r0, r3, r0, ror #8 21 eor r0, r3, r0, ror #8
21 mov pc, lr 22 ret lr
22ENDPROC(__bswapsi2) 23ENDPROC(__bswapsi2)
23 24
24ENTRY(__bswapdi2) 25ENTRY(__bswapdi2)
@@ -31,6 +32,6 @@ ENTRY(__bswapdi2)
31 bic r1, r1, #0xff00 32 bic r1, r1, #0xff00
32 eor r1, r1, r0, ror #8 33 eor r1, r1, r0, ror #8
33 eor r0, r3, ip, ror #8 34 eor r0, r3, ip, ror #8
34 mov pc, lr 35 ret lr
35ENDPROC(__bswapdi2) 36ENDPROC(__bswapdi2)
36#endif 37#endif
diff --git a/arch/arm/lib/call_with_stack.S b/arch/arm/lib/call_with_stack.S
index 916c80f13ae7..ed1a421813cb 100644
--- a/arch/arm/lib/call_with_stack.S
+++ b/arch/arm/lib/call_with_stack.S
@@ -36,9 +36,9 @@ ENTRY(call_with_stack)
36 mov r0, r1 36 mov r0, r1
37 37
38 adr lr, BSYM(1f) 38 adr lr, BSYM(1f)
39 mov pc, r2 39 ret r2
40 40
411: ldr lr, [sp] 411: ldr lr, [sp]
42 ldr sp, [sp, #4] 42 ldr sp, [sp, #4]
43 mov pc, lr 43 ret lr
44ENDPROC(call_with_stack) 44ENDPROC(call_with_stack)
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S
index 31d3cb34740d..984e0f29d548 100644
--- a/arch/arm/lib/csumpartial.S
+++ b/arch/arm/lib/csumpartial.S
@@ -97,7 +97,7 @@ td3 .req lr
97#endif 97#endif
98#endif 98#endif
99 adcnes sum, sum, td0 @ update checksum 99 adcnes sum, sum, td0 @ update checksum
100 mov pc, lr 100 ret lr
101 101
102ENTRY(csum_partial) 102ENTRY(csum_partial)
103 stmfd sp!, {buf, lr} 103 stmfd sp!, {buf, lr}
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index d6e742d24007..10b45909610c 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -7,6 +7,7 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <asm/assembler.h>
10 11
11/* 12/*
12 * unsigned int 13 * unsigned int
@@ -40,7 +41,7 @@ sum .req r3
40 adcs sum, sum, ip, put_byte_1 @ update checksum 41 adcs sum, sum, ip, put_byte_1 @ update checksum
41 strb ip, [dst], #1 42 strb ip, [dst], #1
42 tst dst, #2 43 tst dst, #2
43 moveq pc, lr @ dst is now 32bit aligned 44 reteq lr @ dst is now 32bit aligned
44 45
45.Ldst_16bit: load2b r8, ip 46.Ldst_16bit: load2b r8, ip
46 sub len, len, #2 47 sub len, len, #2
@@ -48,7 +49,7 @@ sum .req r3
48 strb r8, [dst], #1 49 strb r8, [dst], #1
49 adcs sum, sum, ip, put_byte_1 50 adcs sum, sum, ip, put_byte_1
50 strb ip, [dst], #1 51 strb ip, [dst], #1
51 mov pc, lr @ dst is now 32bit aligned 52 ret lr @ dst is now 32bit aligned
52 53
53 /* 54 /*
54 * Handle 0 to 7 bytes, with any alignment of source and 55 * Handle 0 to 7 bytes, with any alignment of source and
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
index bc1033b897b4..518bf6e93f78 100644
--- a/arch/arm/lib/delay-loop.S
+++ b/arch/arm/lib/delay-loop.S
@@ -35,7 +35,7 @@ ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
35 mul r0, r2, r0 @ max = 2^32-1 35 mul r0, r2, r0 @ max = 2^32-1
36 add r0, r0, r1, lsr #32-6 36 add r0, r0, r1, lsr #32-6
37 movs r0, r0, lsr #6 37 movs r0, r0, lsr #6
38 moveq pc, lr 38 reteq lr
39 39
40/* 40/*
41 * loops = r0 * HZ * loops_per_jiffy / 1000000 41 * loops = r0 * HZ * loops_per_jiffy / 1000000
@@ -46,23 +46,23 @@ ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
46ENTRY(__loop_delay) 46ENTRY(__loop_delay)
47 subs r0, r0, #1 47 subs r0, r0, #1
48#if 0 48#if 0
49 movls pc, lr 49 retls lr
50 subs r0, r0, #1 50 subs r0, r0, #1
51 movls pc, lr 51 retls lr
52 subs r0, r0, #1 52 subs r0, r0, #1
53 movls pc, lr 53 retls lr
54 subs r0, r0, #1 54 subs r0, r0, #1
55 movls pc, lr 55 retls lr
56 subs r0, r0, #1 56 subs r0, r0, #1
57 movls pc, lr 57 retls lr
58 subs r0, r0, #1 58 subs r0, r0, #1
59 movls pc, lr 59 retls lr
60 subs r0, r0, #1 60 subs r0, r0, #1
61 movls pc, lr 61 retls lr
62 subs r0, r0, #1 62 subs r0, r0, #1
63#endif 63#endif
64 bhi __loop_delay 64 bhi __loop_delay
65 mov pc, lr 65 ret lr
66ENDPROC(__loop_udelay) 66ENDPROC(__loop_udelay)
67ENDPROC(__loop_const_udelay) 67ENDPROC(__loop_const_udelay)
68ENDPROC(__loop_delay) 68ENDPROC(__loop_delay)
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index e55c4842c290..a9eafe4981eb 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/assembler.h>
16#include <asm/unwind.h> 17#include <asm/unwind.h>
17 18
18#ifdef __ARMEB__ 19#ifdef __ARMEB__
@@ -97,7 +98,7 @@ UNWIND(.fnstart)
97 mov yl, #0 98 mov yl, #0
98 cmpeq xl, r4 99 cmpeq xl, r4
99 movlo xh, xl 100 movlo xh, xl
100 movlo pc, lr 101 retlo lr
101 102
102 @ The division loop for lower bit positions. 103 @ The division loop for lower bit positions.
103 @ Here we shift remainer bits leftwards rather than moving the 104 @ Here we shift remainer bits leftwards rather than moving the
@@ -111,14 +112,14 @@ UNWIND(.fnstart)
111 subcs xh, xh, r4 112 subcs xh, xh, r4
112 movs ip, ip, lsr #1 113 movs ip, ip, lsr #1
113 bne 4b 114 bne 4b
114 mov pc, lr 115 ret lr
115 116
116 @ The top part of remainder became zero. If carry is set 117 @ The top part of remainder became zero. If carry is set
117 @ (the 33th bit) this is a false positive so resume the loop. 118 @ (the 33th bit) this is a false positive so resume the loop.
118 @ Otherwise, if lower part is also null then we are done. 119 @ Otherwise, if lower part is also null then we are done.
1196: bcs 5b 1206: bcs 5b
120 cmp xl, #0 121 cmp xl, #0
121 moveq pc, lr 122 reteq lr
122 123
123 @ We still have remainer bits in the low part. Bring them up. 124 @ We still have remainer bits in the low part. Bring them up.
124 125
@@ -144,7 +145,7 @@ UNWIND(.fnstart)
144 movs ip, ip, lsr #1 145 movs ip, ip, lsr #1
145 mov xh, #1 146 mov xh, #1
146 bne 4b 147 bne 4b
147 mov pc, lr 148 ret lr
148 149
1498: @ Division by a power of 2: determine what that divisor order is 1508: @ Division by a power of 2: determine what that divisor order is
150 @ then simply shift values around 151 @ then simply shift values around
@@ -184,13 +185,13 @@ UNWIND(.fnstart)
184 THUMB( orr yl, yl, xh ) 185 THUMB( orr yl, yl, xh )
185 mov xh, xl, lsl ip 186 mov xh, xl, lsl ip
186 mov xh, xh, lsr ip 187 mov xh, xh, lsr ip
187 mov pc, lr 188 ret lr
188 189
189 @ eq -> division by 1: obvious enough... 190 @ eq -> division by 1: obvious enough...
1909: moveq yl, xl 1919: moveq yl, xl
191 moveq yh, xh 192 moveq yh, xh
192 moveq xh, #0 193 moveq xh, #0
193 moveq pc, lr 194 reteq lr
194UNWIND(.fnend) 195UNWIND(.fnend)
195 196
196UNWIND(.fnstart) 197UNWIND(.fnstart)
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 64f6bc1a9132..7848780e8834 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -35,7 +35,7 @@ ENTRY(_find_first_zero_bit_le)
352: cmp r2, r1 @ any more? 352: cmp r2, r1 @ any more?
36 blo 1b 36 blo 1b
373: mov r0, r1 @ no free bits 373: mov r0, r1 @ no free bits
38 mov pc, lr 38 ret lr
39ENDPROC(_find_first_zero_bit_le) 39ENDPROC(_find_first_zero_bit_le)
40 40
41/* 41/*
@@ -76,7 +76,7 @@ ENTRY(_find_first_bit_le)
762: cmp r2, r1 @ any more? 762: cmp r2, r1 @ any more?
77 blo 1b 77 blo 1b
783: mov r0, r1 @ no free bits 783: mov r0, r1 @ no free bits
79 mov pc, lr 79 ret lr
80ENDPROC(_find_first_bit_le) 80ENDPROC(_find_first_bit_le)
81 81
82/* 82/*
@@ -114,7 +114,7 @@ ENTRY(_find_first_zero_bit_be)
1142: cmp r2, r1 @ any more? 1142: cmp r2, r1 @ any more?
115 blo 1b 115 blo 1b
1163: mov r0, r1 @ no free bits 1163: mov r0, r1 @ no free bits
117 mov pc, lr 117 ret lr
118ENDPROC(_find_first_zero_bit_be) 118ENDPROC(_find_first_zero_bit_be)
119 119
120ENTRY(_find_next_zero_bit_be) 120ENTRY(_find_next_zero_bit_be)
@@ -148,7 +148,7 @@ ENTRY(_find_first_bit_be)
1482: cmp r2, r1 @ any more? 1482: cmp r2, r1 @ any more?
149 blo 1b 149 blo 1b
1503: mov r0, r1 @ no free bits 1503: mov r0, r1 @ no free bits
151 mov pc, lr 151 ret lr
152ENDPROC(_find_first_bit_be) 152ENDPROC(_find_first_bit_be)
153 153
154ENTRY(_find_next_bit_be) 154ENTRY(_find_next_bit_be)
@@ -192,5 +192,5 @@ ENDPROC(_find_next_bit_be)
192#endif 192#endif
193 cmp r1, r0 @ Clamp to maxbit 193 cmp r1, r0 @ Clamp to maxbit
194 movlo r0, r1 194 movlo r0, r1
195 mov pc, lr 195 ret lr
196 196
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 9b06bb41fca6..938600098b88 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -18,7 +18,7 @@
18 * Inputs: r0 contains the address 18 * Inputs: r0 contains the address
19 * r1 contains the address limit, which must be preserved 19 * r1 contains the address limit, which must be preserved
20 * Outputs: r0 is the error code 20 * Outputs: r0 is the error code
21 * r2 contains the zero-extended value 21 * r2, r3 contains the zero-extended value
22 * lr corrupted 22 * lr corrupted
23 * 23 *
24 * No other registers must be altered. (see <asm/uaccess.h> 24 * No other registers must be altered. (see <asm/uaccess.h>
@@ -36,7 +36,7 @@ ENTRY(__get_user_1)
36 check_uaccess r0, 1, r1, r2, __get_user_bad 36 check_uaccess r0, 1, r1, r2, __get_user_bad
371: TUSER(ldrb) r2, [r0] 371: TUSER(ldrb) r2, [r0]
38 mov r0, #0 38 mov r0, #0
39 mov pc, lr 39 ret lr
40ENDPROC(__get_user_1) 40ENDPROC(__get_user_1)
41 41
42ENTRY(__get_user_2) 42ENTRY(__get_user_2)
@@ -56,25 +56,60 @@ rb .req r0
56 orr r2, rb, r2, lsl #8 56 orr r2, rb, r2, lsl #8
57#endif 57#endif
58 mov r0, #0 58 mov r0, #0
59 mov pc, lr 59 ret lr
60ENDPROC(__get_user_2) 60ENDPROC(__get_user_2)
61 61
62ENTRY(__get_user_4) 62ENTRY(__get_user_4)
63 check_uaccess r0, 4, r1, r2, __get_user_bad 63 check_uaccess r0, 4, r1, r2, __get_user_bad
644: TUSER(ldr) r2, [r0] 644: TUSER(ldr) r2, [r0]
65 mov r0, #0 65 mov r0, #0
66 mov pc, lr 66 ret lr
67ENDPROC(__get_user_4) 67ENDPROC(__get_user_4)
68 68
69ENTRY(__get_user_8)
70 check_uaccess r0, 8, r1, r2, __get_user_bad
71#ifdef CONFIG_THUMB2_KERNEL
725: TUSER(ldr) r2, [r0]
736: TUSER(ldr) r3, [r0, #4]
74#else
755: TUSER(ldr) r2, [r0], #4
766: TUSER(ldr) r3, [r0]
77#endif
78 mov r0, #0
79 ret lr
80ENDPROC(__get_user_8)
81
82#ifdef __ARMEB__
83ENTRY(__get_user_lo8)
84 check_uaccess r0, 8, r1, r2, __get_user_bad
85#ifdef CONFIG_CPU_USE_DOMAINS
86 add r0, r0, #4
877: ldrt r2, [r0]
88#else
897: ldr r2, [r0, #4]
90#endif
91 mov r0, #0
92 ret lr
93ENDPROC(__get_user_lo8)
94#endif
95
96__get_user_bad8:
97 mov r3, #0
69__get_user_bad: 98__get_user_bad:
70 mov r2, #0 99 mov r2, #0
71 mov r0, #-EFAULT 100 mov r0, #-EFAULT
72 mov pc, lr 101 ret lr
73ENDPROC(__get_user_bad) 102ENDPROC(__get_user_bad)
103ENDPROC(__get_user_bad8)
74 104
75.pushsection __ex_table, "a" 105.pushsection __ex_table, "a"
76 .long 1b, __get_user_bad 106 .long 1b, __get_user_bad
77 .long 2b, __get_user_bad 107 .long 2b, __get_user_bad
78 .long 3b, __get_user_bad 108 .long 3b, __get_user_bad
79 .long 4b, __get_user_bad 109 .long 4b, __get_user_bad
110 .long 5b, __get_user_bad8
111 .long 6b, __get_user_bad8
112#ifdef __ARMEB__
113 .long 7b, __get_user_bad
114#endif
80.popsection 115.popsection
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index 9f4238987fe9..c31b2f3153f1 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -25,7 +25,7 @@
25 25
26ENTRY(__raw_readsb) 26ENTRY(__raw_readsb)
27 teq r2, #0 @ do we have to check for the zero len? 27 teq r2, #0 @ do we have to check for the zero len?
28 moveq pc, lr 28 reteq lr
29 ands ip, r1, #3 29 ands ip, r1, #3
30 bne .Linsb_align 30 bne .Linsb_align
31 31
diff --git a/arch/arm/lib/io-readsl.S b/arch/arm/lib/io-readsl.S
index 7a7430950c79..2ed86fa5465f 100644
--- a/arch/arm/lib/io-readsl.S
+++ b/arch/arm/lib/io-readsl.S
@@ -12,7 +12,7 @@
12 12
13ENTRY(__raw_readsl) 13ENTRY(__raw_readsl)
14 teq r2, #0 @ do we have to check for the zero len? 14 teq r2, #0 @ do we have to check for the zero len?
15 moveq pc, lr 15 reteq lr
16 ands ip, r1, #3 16 ands ip, r1, #3
17 bne 3f 17 bne 3f
18 18
@@ -33,7 +33,7 @@ ENTRY(__raw_readsl)
33 stmcsia r1!, {r3, ip} 33 stmcsia r1!, {r3, ip}
34 ldrne r3, [r0, #0] 34 ldrne r3, [r0, #0]
35 strne r3, [r1, #0] 35 strne r3, [r1, #0]
36 mov pc, lr 36 ret lr
37 37
383: ldr r3, [r0] 383: ldr r3, [r0]
39 cmp ip, #2 39 cmp ip, #2
@@ -75,5 +75,5 @@ ENTRY(__raw_readsl)
75 strb r3, [r1, #1] 75 strb r3, [r1, #1]
768: mov r3, ip, get_byte_0 768: mov r3, ip, get_byte_0
77 strb r3, [r1, #0] 77 strb r3, [r1, #0]
78 mov pc, lr 78 ret lr
79ENDPROC(__raw_readsl) 79ENDPROC(__raw_readsl)
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index 88487c8c4f23..413da9914529 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -27,11 +27,11 @@
27 strb r3, [r1], #1 27 strb r3, [r1], #1
28 28
29 subs r2, r2, #1 29 subs r2, r2, #1
30 moveq pc, lr 30 reteq lr
31 31
32ENTRY(__raw_readsw) 32ENTRY(__raw_readsw)
33 teq r2, #0 @ do we have to check for the zero len? 33 teq r2, #0 @ do we have to check for the zero len?
34 moveq pc, lr 34 reteq lr
35 tst r1, #3 35 tst r1, #3
36 bne .Linsw_align 36 bne .Linsw_align
37 37
diff --git a/arch/arm/lib/io-readsw-armv4.S b/arch/arm/lib/io-readsw-armv4.S
index 1f393d42593d..d9a45e9692ae 100644
--- a/arch/arm/lib/io-readsw-armv4.S
+++ b/arch/arm/lib/io-readsw-armv4.S
@@ -26,7 +26,7 @@
26 26
27ENTRY(__raw_readsw) 27ENTRY(__raw_readsw)
28 teq r2, #0 28 teq r2, #0
29 moveq pc, lr 29 reteq lr
30 tst r1, #3 30 tst r1, #3
31 bne .Linsw_align 31 bne .Linsw_align
32 32
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index 68b92f4acaeb..a46bbc9b168b 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -45,7 +45,7 @@
45 45
46ENTRY(__raw_writesb) 46ENTRY(__raw_writesb)
47 teq r2, #0 @ do we have to check for the zero len? 47 teq r2, #0 @ do we have to check for the zero len?
48 moveq pc, lr 48 reteq lr
49 ands ip, r1, #3 49 ands ip, r1, #3
50 bne .Loutsb_align 50 bne .Loutsb_align
51 51
diff --git a/arch/arm/lib/io-writesl.S b/arch/arm/lib/io-writesl.S
index d0d104a0dd11..4ea2435988c1 100644
--- a/arch/arm/lib/io-writesl.S
+++ b/arch/arm/lib/io-writesl.S
@@ -12,7 +12,7 @@
12 12
13ENTRY(__raw_writesl) 13ENTRY(__raw_writesl)
14 teq r2, #0 @ do we have to check for the zero len? 14 teq r2, #0 @ do we have to check for the zero len?
15 moveq pc, lr 15 reteq lr
16 ands ip, r1, #3 16 ands ip, r1, #3
17 bne 3f 17 bne 3f
18 18
@@ -33,7 +33,7 @@ ENTRY(__raw_writesl)
33 ldrne r3, [r1, #0] 33 ldrne r3, [r1, #0]
34 strcs ip, [r0, #0] 34 strcs ip, [r0, #0]
35 strne r3, [r0, #0] 35 strne r3, [r0, #0]
36 mov pc, lr 36 ret lr
37 37
383: bic r1, r1, #3 383: bic r1, r1, #3
39 ldr r3, [r1], #4 39 ldr r3, [r1], #4
@@ -47,7 +47,7 @@ ENTRY(__raw_writesl)
47 orr ip, ip, r3, lspush #16 47 orr ip, ip, r3, lspush #16
48 str ip, [r0] 48 str ip, [r0]
49 bne 4b 49 bne 4b
50 mov pc, lr 50 ret lr
51 51
525: mov ip, r3, lspull #8 525: mov ip, r3, lspull #8
53 ldr r3, [r1], #4 53 ldr r3, [r1], #4
@@ -55,7 +55,7 @@ ENTRY(__raw_writesl)
55 orr ip, ip, r3, lspush #24 55 orr ip, ip, r3, lspush #24
56 str ip, [r0] 56 str ip, [r0]
57 bne 5b 57 bne 5b
58 mov pc, lr 58 ret lr
59 59
606: mov ip, r3, lspull #24 606: mov ip, r3, lspull #24
61 ldr r3, [r1], #4 61 ldr r3, [r1], #4
@@ -63,5 +63,5 @@ ENTRY(__raw_writesl)
63 orr ip, ip, r3, lspush #8 63 orr ip, ip, r3, lspush #8
64 str ip, [r0] 64 str ip, [r0]
65 bne 6b 65 bne 6b
66 mov pc, lr 66 ret lr
67ENDPROC(__raw_writesl) 67ENDPROC(__raw_writesl)
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 49b800419e32..121789eb6802 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -28,11 +28,11 @@
28 orr r3, r3, r3, lsl #16 28 orr r3, r3, r3, lsl #16
29 str r3, [r0] 29 str r3, [r0]
30 subs r2, r2, #1 30 subs r2, r2, #1
31 moveq pc, lr 31 reteq lr
32 32
33ENTRY(__raw_writesw) 33ENTRY(__raw_writesw)
34 teq r2, #0 @ do we have to check for the zero len? 34 teq r2, #0 @ do we have to check for the zero len?
35 moveq pc, lr 35 reteq lr
36 tst r1, #3 36 tst r1, #3
37 bne .Loutsw_align 37 bne .Loutsw_align
38 38
diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S
index ff4f71b579ee..269f90c51ad2 100644
--- a/arch/arm/lib/io-writesw-armv4.S
+++ b/arch/arm/lib/io-writesw-armv4.S
@@ -31,7 +31,7 @@
31 31
32ENTRY(__raw_writesw) 32ENTRY(__raw_writesw)
33 teq r2, #0 33 teq r2, #0
34 moveq pc, lr 34 reteq lr
35 ands r3, r1, #3 35 ands r3, r1, #3
36 bne .Loutsw_align 36 bne .Loutsw_align
37 37
@@ -96,5 +96,5 @@ ENTRY(__raw_writesw)
96 tst r2, #1 96 tst r2, #1
973: movne ip, r3, lsr #8 973: movne ip, r3, lsr #8
98 strneh ip, [r0] 98 strneh ip, [r0]
99 mov pc, lr 99 ret lr
100ENDPROC(__raw_writesw) 100ENDPROC(__raw_writesw)
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index c562f649734c..947567ff67f9 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -210,7 +210,7 @@ ENTRY(__aeabi_uidiv)
210UNWIND(.fnstart) 210UNWIND(.fnstart)
211 211
212 subs r2, r1, #1 212 subs r2, r1, #1
213 moveq pc, lr 213 reteq lr
214 bcc Ldiv0 214 bcc Ldiv0
215 cmp r0, r1 215 cmp r0, r1
216 bls 11f 216 bls 11f
@@ -220,16 +220,16 @@ UNWIND(.fnstart)
220 ARM_DIV_BODY r0, r1, r2, r3 220 ARM_DIV_BODY r0, r1, r2, r3
221 221
222 mov r0, r2 222 mov r0, r2
223 mov pc, lr 223 ret lr
224 224
22511: moveq r0, #1 22511: moveq r0, #1
226 movne r0, #0 226 movne r0, #0
227 mov pc, lr 227 ret lr
228 228
22912: ARM_DIV2_ORDER r1, r2 22912: ARM_DIV2_ORDER r1, r2
230 230
231 mov r0, r0, lsr r2 231 mov r0, r0, lsr r2
232 mov pc, lr 232 ret lr
233 233
234UNWIND(.fnend) 234UNWIND(.fnend)
235ENDPROC(__udivsi3) 235ENDPROC(__udivsi3)
@@ -244,11 +244,11 @@ UNWIND(.fnstart)
244 moveq r0, #0 244 moveq r0, #0
245 tsthi r1, r2 @ see if divisor is power of 2 245 tsthi r1, r2 @ see if divisor is power of 2
246 andeq r0, r0, r2 246 andeq r0, r0, r2
247 movls pc, lr 247 retls lr
248 248
249 ARM_MOD_BODY r0, r1, r2, r3 249 ARM_MOD_BODY r0, r1, r2, r3
250 250
251 mov pc, lr 251 ret lr
252 252
253UNWIND(.fnend) 253UNWIND(.fnend)
254ENDPROC(__umodsi3) 254ENDPROC(__umodsi3)
@@ -274,23 +274,23 @@ UNWIND(.fnstart)
274 274
275 cmp ip, #0 275 cmp ip, #0
276 rsbmi r0, r0, #0 276 rsbmi r0, r0, #0
277 mov pc, lr 277 ret lr
278 278
27910: teq ip, r0 @ same sign ? 27910: teq ip, r0 @ same sign ?
280 rsbmi r0, r0, #0 280 rsbmi r0, r0, #0
281 mov pc, lr 281 ret lr
282 282
28311: movlo r0, #0 28311: movlo r0, #0
284 moveq r0, ip, asr #31 284 moveq r0, ip, asr #31
285 orreq r0, r0, #1 285 orreq r0, r0, #1
286 mov pc, lr 286 ret lr
287 287
28812: ARM_DIV2_ORDER r1, r2 28812: ARM_DIV2_ORDER r1, r2
289 289
290 cmp ip, #0 290 cmp ip, #0
291 mov r0, r3, lsr r2 291 mov r0, r3, lsr r2
292 rsbmi r0, r0, #0 292 rsbmi r0, r0, #0
293 mov pc, lr 293 ret lr
294 294
295UNWIND(.fnend) 295UNWIND(.fnend)
296ENDPROC(__divsi3) 296ENDPROC(__divsi3)
@@ -315,7 +315,7 @@ UNWIND(.fnstart)
315 315
31610: cmp ip, #0 31610: cmp ip, #0
317 rsbmi r0, r0, #0 317 rsbmi r0, r0, #0
318 mov pc, lr 318 ret lr
319 319
320UNWIND(.fnend) 320UNWIND(.fnend)
321ENDPROC(__modsi3) 321ENDPROC(__modsi3)
@@ -331,7 +331,7 @@ UNWIND(.save {r0, r1, ip, lr} )
331 ldmfd sp!, {r1, r2, ip, lr} 331 ldmfd sp!, {r1, r2, ip, lr}
332 mul r3, r0, r2 332 mul r3, r0, r2
333 sub r1, r1, r3 333 sub r1, r1, r3
334 mov pc, lr 334 ret lr
335 335
336UNWIND(.fnend) 336UNWIND(.fnend)
337ENDPROC(__aeabi_uidivmod) 337ENDPROC(__aeabi_uidivmod)
@@ -344,7 +344,7 @@ UNWIND(.save {r0, r1, ip, lr} )
344 ldmfd sp!, {r1, r2, ip, lr} 344 ldmfd sp!, {r1, r2, ip, lr}
345 mul r3, r0, r2 345 mul r3, r0, r2
346 sub r1, r1, r3 346 sub r1, r1, r3
347 mov pc, lr 347 ret lr
348 348
349UNWIND(.fnend) 349UNWIND(.fnend)
350ENDPROC(__aeabi_idivmod) 350ENDPROC(__aeabi_idivmod)
diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S
index f83d449141f7..922dcd88b02b 100644
--- a/arch/arm/lib/lshrdi3.S
+++ b/arch/arm/lib/lshrdi3.S
@@ -27,6 +27,7 @@ Boston, MA 02110-1301, USA. */
27 27
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/assembler.h>
30 31
31#ifdef __ARMEB__ 32#ifdef __ARMEB__
32#define al r1 33#define al r1
@@ -47,7 +48,7 @@ ENTRY(__aeabi_llsr)
47 THUMB( lslmi r3, ah, ip ) 48 THUMB( lslmi r3, ah, ip )
48 THUMB( orrmi al, al, r3 ) 49 THUMB( orrmi al, al, r3 )
49 mov ah, ah, lsr r2 50 mov ah, ah, lsr r2
50 mov pc, lr 51 ret lr
51 52
52ENDPROC(__lshrdi3) 53ENDPROC(__lshrdi3)
53ENDPROC(__aeabi_llsr) 54ENDPROC(__aeabi_llsr)
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S
index 1da86991d700..74a5bed6d999 100644
--- a/arch/arm/lib/memchr.S
+++ b/arch/arm/lib/memchr.S
@@ -22,5 +22,5 @@ ENTRY(memchr)
22 bne 1b 22 bne 1b
23 sub r0, r0, #1 23 sub r0, r0, #1
242: movne r0, #0 242: movne r0, #0
25 mov pc, lr 25 ret lr
26ENDPROC(memchr) 26ENDPROC(memchr)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 94b0650ea98f..671455c854fa 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -110,7 +110,7 @@ ENTRY(memset)
110 strneb r1, [ip], #1 110 strneb r1, [ip], #1
111 tst r2, #1 111 tst r2, #1
112 strneb r1, [ip], #1 112 strneb r1, [ip], #1
113 mov pc, lr 113 ret lr
114 114
1156: subs r2, r2, #4 @ 1 do we have enough 1156: subs r2, r2, #4 @ 1 do we have enough
116 blt 5b @ 1 bytes to align with? 116 blt 5b @ 1 bytes to align with?
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 3fbdef5f802a..385ccb306fa2 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -121,5 +121,5 @@ ENTRY(__memzero)
121 strneb r2, [r0], #1 @ 1 121 strneb r2, [r0], #1 @ 1
122 tst r1, #1 @ 1 a byte left over 122 tst r1, #1 @ 1 a byte left over
123 strneb r2, [r0], #1 @ 1 123 strneb r2, [r0], #1 @ 1
124 mov pc, lr @ 1 124 ret lr @ 1
125ENDPROC(__memzero) 125ENDPROC(__memzero)
diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S
index 36c91b4957e2..204305956925 100644
--- a/arch/arm/lib/muldi3.S
+++ b/arch/arm/lib/muldi3.S
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h>
14 15
15#ifdef __ARMEB__ 16#ifdef __ARMEB__
16#define xh r0 17#define xh r0
@@ -41,7 +42,7 @@ ENTRY(__aeabi_lmul)
41 adc xh, xh, yh, lsr #16 42 adc xh, xh, yh, lsr #16
42 adds xl, xl, ip, lsl #16 43 adds xl, xl, ip, lsl #16
43 adc xh, xh, ip, lsr #16 44 adc xh, xh, ip, lsr #16
44 mov pc, lr 45 ret lr
45 46
46ENDPROC(__muldi3) 47ENDPROC(__muldi3)
47ENDPROC(__aeabi_lmul) 48ENDPROC(__aeabi_lmul)
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 3d73dcb959b0..38d660d3705f 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -36,7 +36,7 @@ ENTRY(__put_user_1)
36 check_uaccess r0, 1, r1, ip, __put_user_bad 36 check_uaccess r0, 1, r1, ip, __put_user_bad
371: TUSER(strb) r2, [r0] 371: TUSER(strb) r2, [r0]
38 mov r0, #0 38 mov r0, #0
39 mov pc, lr 39 ret lr
40ENDPROC(__put_user_1) 40ENDPROC(__put_user_1)
41 41
42ENTRY(__put_user_2) 42ENTRY(__put_user_2)
@@ -60,14 +60,14 @@ ENTRY(__put_user_2)
60#endif 60#endif
61#endif /* CONFIG_THUMB2_KERNEL */ 61#endif /* CONFIG_THUMB2_KERNEL */
62 mov r0, #0 62 mov r0, #0
63 mov pc, lr 63 ret lr
64ENDPROC(__put_user_2) 64ENDPROC(__put_user_2)
65 65
66ENTRY(__put_user_4) 66ENTRY(__put_user_4)
67 check_uaccess r0, 4, r1, ip, __put_user_bad 67 check_uaccess r0, 4, r1, ip, __put_user_bad
684: TUSER(str) r2, [r0] 684: TUSER(str) r2, [r0]
69 mov r0, #0 69 mov r0, #0
70 mov pc, lr 70 ret lr
71ENDPROC(__put_user_4) 71ENDPROC(__put_user_4)
72 72
73ENTRY(__put_user_8) 73ENTRY(__put_user_8)
@@ -80,12 +80,12 @@ ENTRY(__put_user_8)
806: TUSER(str) r3, [r0] 806: TUSER(str) r3, [r0]
81#endif 81#endif
82 mov r0, #0 82 mov r0, #0
83 mov pc, lr 83 ret lr
84ENDPROC(__put_user_8) 84ENDPROC(__put_user_8)
85 85
86__put_user_bad: 86__put_user_bad:
87 mov r0, #-EFAULT 87 mov r0, #-EFAULT
88 mov pc, lr 88 ret lr
89ENDPROC(__put_user_bad) 89ENDPROC(__put_user_bad)
90 90
91.pushsection __ex_table, "a" 91.pushsection __ex_table, "a"
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S
index d8f2a1c1aea4..013d64c71e8d 100644
--- a/arch/arm/lib/strchr.S
+++ b/arch/arm/lib/strchr.S
@@ -23,5 +23,5 @@ ENTRY(strchr)
23 teq r2, r1 23 teq r2, r1
24 movne r0, #0 24 movne r0, #0
25 subeq r0, r0, #1 25 subeq r0, r0, #1
26 mov pc, lr 26 ret lr
27ENDPROC(strchr) 27ENDPROC(strchr)
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S
index 302f20cd2423..3cec1c7482c4 100644
--- a/arch/arm/lib/strrchr.S
+++ b/arch/arm/lib/strrchr.S
@@ -22,5 +22,5 @@ ENTRY(strrchr)
22 teq r2, #0 22 teq r2, #0
23 bne 1b 23 bne 1b
24 mov r0, r3 24 mov r0, r3
25 mov pc, lr 25 ret lr
26ENDPROC(strrchr) 26ENDPROC(strrchr)
diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S
index f0df6a91db04..ad4a6309141a 100644
--- a/arch/arm/lib/ucmpdi2.S
+++ b/arch/arm/lib/ucmpdi2.S
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h>
14 15
15#ifdef __ARMEB__ 16#ifdef __ARMEB__
16#define xh r0 17#define xh r0
@@ -31,7 +32,7 @@ ENTRY(__ucmpdi2)
31 movlo r0, #0 32 movlo r0, #0
32 moveq r0, #1 33 moveq r0, #1
33 movhi r0, #2 34 movhi r0, #2
34 mov pc, lr 35 ret lr
35 36
36ENDPROC(__ucmpdi2) 37ENDPROC(__ucmpdi2)
37 38
@@ -44,7 +45,7 @@ ENTRY(__aeabi_ulcmp)
44 movlo r0, #-1 45 movlo r0, #-1
45 moveq r0, #0 46 moveq r0, #0
46 movhi r0, #1 47 movhi r0, #1
47 mov pc, lr 48 ret lr
48 49
49ENDPROC(__aeabi_ulcmp) 50ENDPROC(__aeabi_ulcmp)
50 51
diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
index d4e9316ecacb..a5336a5e2739 100644
--- a/arch/arm/mach-davinci/sleep.S
+++ b/arch/arm/mach-davinci/sleep.S
@@ -213,7 +213,7 @@ ddr2clk_stop_done:
213 cmp ip, r0 213 cmp ip, r0
214 bne ddr2clk_stop_done 214 bne ddr2clk_stop_done
215 215
216 mov pc, lr 216 ret lr
217ENDPROC(davinci_ddr_psc_config) 217ENDPROC(davinci_ddr_psc_config)
218 218
219CACHE_FLUSH: 219CACHE_FLUSH:
diff --git a/arch/arm/mach-ebsa110/include/mach/memory.h b/arch/arm/mach-ebsa110/include/mach/memory.h
index 8e49066ad850..866f8a1c6ff7 100644
--- a/arch/arm/mach-ebsa110/include/mach/memory.h
+++ b/arch/arm/mach-ebsa110/include/mach/memory.h
@@ -17,11 +17,6 @@
17#define __ASM_ARCH_MEMORY_H 17#define __ASM_ARCH_MEMORY_H
18 18
19/* 19/*
20 * Physical DRAM offset.
21 */
22#define PLAT_PHYS_OFFSET UL(0x00000000)
23
24/*
25 * Cache flushing area - SRAM 20 * Cache flushing area - SRAM
26 */ 21 */
27#define FLUSH_BASE_PHYS 0x40000000 22#define FLUSH_BASE_PHYS 0x40000000
diff --git a/arch/arm/mach-ep93xx/crunch-bits.S b/arch/arm/mach-ep93xx/crunch-bits.S
index e96923a3017b..ee0be2af5c61 100644
--- a/arch/arm/mach-ep93xx/crunch-bits.S
+++ b/arch/arm/mach-ep93xx/crunch-bits.S
@@ -198,7 +198,7 @@ crunch_load:
198 get_thread_info r10 198 get_thread_info r10
199#endif 199#endif
2002: dec_preempt_count r10, r3 2002: dec_preempt_count r10, r3
201 mov pc, lr 201 ret lr
202 202
203/* 203/*
204 * Back up crunch regs to save area and disable access to them 204 * Back up crunch regs to save area and disable access to them
@@ -277,7 +277,7 @@ ENTRY(crunch_task_copy)
277 mov r3, lr @ preserve return address 277 mov r3, lr @ preserve return address
278 bl crunch_save 278 bl crunch_save
279 msr cpsr_c, ip @ restore interrupt mode 279 msr cpsr_c, ip @ restore interrupt mode
280 mov pc, r3 280 ret r3
281 281
282/* 282/*
283 * Restore crunch state from given memory address 283 * Restore crunch state from given memory address
@@ -310,4 +310,4 @@ ENTRY(crunch_task_restore)
310 mov r3, lr @ preserve return address 310 mov r3, lr @ preserve return address
311 bl crunch_load 311 bl crunch_load
312 msr cpsr_c, ip @ restore interrupt mode 312 msr cpsr_c, ip @ restore interrupt mode
313 mov pc, r3 313 ret r3
diff --git a/arch/arm/mach-ep93xx/include/mach/memory.h b/arch/arm/mach-ep93xx/include/mach/memory.h
deleted file mode 100644
index c9400cf0051c..000000000000
--- a/arch/arm/mach-ep93xx/include/mach/memory.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * arch/arm/mach-ep93xx/include/mach/memory.h
3 */
4
5#ifndef __ASM_ARCH_MEMORY_H
6#define __ASM_ARCH_MEMORY_H
7
8#if defined(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET)
9#define PLAT_PHYS_OFFSET UL(0x00000000)
10#elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)
11#define PLAT_PHYS_OFFSET UL(0xc0000000)
12#elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)
13#define PLAT_PHYS_OFFSET UL(0xd0000000)
14#elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)
15#define PLAT_PHYS_OFFSET UL(0xe0000000)
16#elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)
17#define PLAT_PHYS_OFFSET UL(0xf0000000)
18#else
19#error "Kconfig bug: No EP93xx PHYS_OFFSET set"
20#endif
21
22#endif
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 8f9b66c4ac78..5d4ff6571dcd 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -119,6 +119,7 @@ config EXYNOS5420_MCPM
119 bool "Exynos5420 Multi-Cluster PM support" 119 bool "Exynos5420 Multi-Cluster PM support"
120 depends on MCPM && SOC_EXYNOS5420 120 depends on MCPM && SOC_EXYNOS5420
121 select ARM_CCI 121 select ARM_CCI
122 select ARM_CPU_SUSPEND
122 help 123 help
123 This is needed to provide CPU and cluster power management 124 This is needed to provide CPU and cluster power management
124 on Exynos5420 implementing big.LITTLE. 125 on Exynos5420 implementing big.LITTLE.
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index ace0ed617476..a96b78f93f2b 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -196,7 +196,7 @@ static void exynos_power_down(void)
196 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 196 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
197 arch_spin_unlock(&exynos_mcpm_lock); 197 arch_spin_unlock(&exynos_mcpm_lock);
198 198
199 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 199 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
200 /* 200 /*
201 * On the Cortex-A15 we need to disable 201 * On the Cortex-A15 we need to disable
202 * L2 prefetching before flushing the cache. 202 * L2 prefetching before flushing the cache.
@@ -289,6 +289,19 @@ static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
289 "b cci_enable_port_for_self"); 289 "b cci_enable_port_for_self");
290} 290}
291 291
292static void __init exynos_cache_off(void)
293{
294 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
295 /* disable L2 prefetching on the Cortex-A15 */
296 asm volatile(
297 "mcr p15, 1, %0, c15, c0, 3\n\t"
298 "isb\n\t"
299 "dsb"
300 : : "r" (0x400));
301 }
302 exynos_v7_exit_coherency_flush(all);
303}
304
292static const struct of_device_id exynos_dt_mcpm_match[] = { 305static const struct of_device_id exynos_dt_mcpm_match[] = {
293 { .compatible = "samsung,exynos5420" }, 306 { .compatible = "samsung,exynos5420" },
294 { .compatible = "samsung,exynos5800" }, 307 { .compatible = "samsung,exynos5800" },
@@ -332,6 +345,8 @@ static int __init exynos_mcpm_init(void)
332 ret = mcpm_platform_register(&exynos_power_ops); 345 ret = mcpm_platform_register(&exynos_power_ops);
333 if (!ret) 346 if (!ret)
334 ret = mcpm_sync_init(exynos_pm_power_up_setup); 347 ret = mcpm_sync_init(exynos_pm_power_up_setup);
348 if (!ret)
349 ret = mcpm_loopback(exynos_cache_off); /* turn on the CCI */
335 if (ret) { 350 if (ret) {
336 iounmap(ns_sram_base_addr); 351 iounmap(ns_sram_base_addr);
337 return ret; 352 return ret;
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 50b9aad5e27b..70d1e65a51d8 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -190,7 +190,7 @@ static void __init exynos_smp_init_cpus(void)
190 void __iomem *scu_base = scu_base_addr(); 190 void __iomem *scu_base = scu_base_addr();
191 unsigned int i, ncores; 191 unsigned int i, ncores;
192 192
193 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 193 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
194 ncores = scu_base ? scu_get_core_count(scu_base) : 1; 194 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
195 else 195 else
196 /* 196 /*
@@ -216,7 +216,7 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
216 216
217 exynos_sysram_init(); 217 exynos_sysram_init();
218 218
219 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 219 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
220 scu_enable(scu_base_addr()); 220 scu_enable(scu_base_addr());
221 221
222 /* 222 /*
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 202ca73e49c4..67d383de614f 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -300,7 +300,7 @@ static int exynos_pm_suspend(void)
300 tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0); 300 tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
301 __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION); 301 __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
302 302
303 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 303 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
304 exynos_cpu_save_register(); 304 exynos_cpu_save_register();
305 305
306 return 0; 306 return 0;
@@ -334,7 +334,7 @@ static void exynos_pm_resume(void)
334 if (exynos_pm_central_resume()) 334 if (exynos_pm_central_resume())
335 goto early_wakeup; 335 goto early_wakeup;
336 336
337 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 337 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
338 exynos_cpu_restore_register(); 338 exynos_cpu_restore_register();
339 339
340 /* For release retention */ 340 /* For release retention */
@@ -353,7 +353,7 @@ static void exynos_pm_resume(void)
353 353
354 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save)); 354 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
355 355
356 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 356 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
357 scu_enable(S5P_VA_SCU); 357 scu_enable(S5P_VA_SCU);
358 358
359early_wakeup: 359early_wakeup:
@@ -440,15 +440,14 @@ static int exynos_cpu_pm_notifier(struct notifier_block *self,
440 case CPU_PM_ENTER: 440 case CPU_PM_ENTER:
441 if (cpu == 0) { 441 if (cpu == 0) {
442 exynos_pm_central_suspend(); 442 exynos_pm_central_suspend();
443 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 443 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
444 exynos_cpu_save_register(); 444 exynos_cpu_save_register();
445 } 445 }
446 break; 446 break;
447 447
448 case CPU_PM_EXIT: 448 case CPU_PM_EXIT:
449 if (cpu == 0) { 449 if (cpu == 0) {
450 if (read_cpuid_part_number() == 450 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
451 ARM_CPU_PART_CORTEX_A9) {
452 scu_enable(S5P_VA_SCU); 451 scu_enable(S5P_VA_SCU);
453 exynos_cpu_restore_register(); 452 exynos_cpu_restore_register();
454 } 453 }
diff --git a/arch/arm/mach-footbridge/include/mach/memory.h b/arch/arm/mach-footbridge/include/mach/memory.h
index 5c6df377f969..6f2ecccdf323 100644
--- a/arch/arm/mach-footbridge/include/mach/memory.h
+++ b/arch/arm/mach-footbridge/include/mach/memory.h
@@ -59,11 +59,6 @@ extern unsigned long __bus_to_pfn(unsigned long);
59 */ 59 */
60#define FLUSH_BASE 0xf9000000 60#define FLUSH_BASE 0xf9000000
61 61
62/*
63 * Physical DRAM offset.
64 */
65#define PLAT_PHYS_OFFSET UL(0x00000000)
66
67#define FLUSH_BASE_PHYS 0x50000000 62#define FLUSH_BASE_PHYS 0x50000000
68 63
69#endif 64#endif
diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
index fe123b079c05..74b50f1982db 100644
--- a/arch/arm/mach-imx/suspend-imx6.S
+++ b/arch/arm/mach-imx/suspend-imx6.S
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h>
13#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
14#include <asm/hardware/cache-l2x0.h> 15#include <asm/hardware/cache-l2x0.h>
15#include "hardware.h" 16#include "hardware.h"
@@ -301,7 +302,7 @@ rbc_loop:
301 resume_mmdc 302 resume_mmdc
302 303
303 /* return to suspend finish */ 304 /* return to suspend finish */
304 mov pc, lr 305 ret lr
305 306
306resume: 307resume:
307 /* invalidate L1 I-cache first */ 308 /* invalidate L1 I-cache first */
@@ -325,7 +326,7 @@ resume:
325 mov r5, #0x1 326 mov r5, #0x1
326 resume_mmdc 327 resume_mmdc
327 328
328 mov pc, lr 329 ret lr
329ENDPROC(imx6_suspend) 330ENDPROC(imx6_suspend)
330 331
331/* 332/*
diff --git a/arch/arm/mach-integrator/include/mach/memory.h b/arch/arm/mach-integrator/include/mach/memory.h
index 334d5e271889..7268cb50ded0 100644
--- a/arch/arm/mach-integrator/include/mach/memory.h
+++ b/arch/arm/mach-integrator/include/mach/memory.h
@@ -20,11 +20,6 @@
20#ifndef __ASM_ARCH_MEMORY_H 20#ifndef __ASM_ARCH_MEMORY_H
21#define __ASM_ARCH_MEMORY_H 21#define __ASM_ARCH_MEMORY_H
22 22
23/*
24 * Physical DRAM offset.
25 */
26#define PLAT_PHYS_OFFSET UL(0x00000000)
27
28#define BUS_OFFSET UL(0x80000000) 23#define BUS_OFFSET UL(0x80000000)
29#define __virt_to_bus(x) ((x) - PAGE_OFFSET + BUS_OFFSET) 24#define __virt_to_bus(x) ((x) - PAGE_OFFSET + BUS_OFFSET)
30#define __bus_to_virt(x) ((x) - BUS_OFFSET + PAGE_OFFSET) 25#define __bus_to_virt(x) ((x) - BUS_OFFSET + PAGE_OFFSET)
diff --git a/arch/arm/mach-iop13xx/include/mach/iop13xx.h b/arch/arm/mach-iop13xx/include/mach/iop13xx.h
index 17b40279e0a4..9311ee2126d6 100644
--- a/arch/arm/mach-iop13xx/include/mach/iop13xx.h
+++ b/arch/arm/mach-iop13xx/include/mach/iop13xx.h
@@ -3,7 +3,7 @@
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6#include <linux/reboot.h> 6enum reboot_mode;
7 7
8/* The ATU offsets can change based on the strapping */ 8/* The ATU offsets can change based on the strapping */
9extern u32 iop13xx_atux_pmmr_offset; 9extern u32 iop13xx_atux_pmmr_offset;
diff --git a/arch/arm/mach-iop13xx/include/mach/memory.h b/arch/arm/mach-iop13xx/include/mach/memory.h
index 7c032d0ab24a..59307e787588 100644
--- a/arch/arm/mach-iop13xx/include/mach/memory.h
+++ b/arch/arm/mach-iop13xx/include/mach/memory.h
@@ -3,11 +3,6 @@
3 3
4#include <mach/hardware.h> 4#include <mach/hardware.h>
5 5
6/*
7 * Physical DRAM offset.
8 */
9#define PLAT_PHYS_OFFSET UL(0x00000000)
10
11#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
12 7
13#if defined(CONFIG_ARCH_IOP13XX) 8#if defined(CONFIG_ARCH_IOP13XX)
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index bca96f433495..53c316f7301e 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -20,6 +20,7 @@
20#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
21#include <linux/serial_8250.h> 21#include <linux/serial_8250.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/reboot.h>
23#ifdef CONFIG_MTD_PHYSMAP 24#ifdef CONFIG_MTD_PHYSMAP
24#include <linux/mtd/physmap.h> 25#include <linux/mtd/physmap.h>
25#endif 26#endif
diff --git a/arch/arm/mach-ks8695/include/mach/memory.h b/arch/arm/mach-ks8695/include/mach/memory.h
index 95e731a7ed6a..ab0d27fa8969 100644
--- a/arch/arm/mach-ks8695/include/mach/memory.h
+++ b/arch/arm/mach-ks8695/include/mach/memory.h
@@ -15,11 +15,6 @@
15 15
16#include <mach/hardware.h> 16#include <mach/hardware.h>
17 17
18/*
19 * Physical SRAM offset.
20 */
21#define PLAT_PHYS_OFFSET KS8695_SDRAM_PA
22
23#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
24 19
25#ifdef CONFIG_PCI 20#ifdef CONFIG_PCI
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
index 510c29e079ca..f5d881b5d0f7 100644
--- a/arch/arm/mach-mvebu/coherency_ll.S
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -46,7 +46,7 @@ ENTRY(ll_get_coherency_base)
46 ldr r1, =coherency_base 46 ldr r1, =coherency_base
47 ldr r1, [r1] 47 ldr r1, [r1]
482: 482:
49 mov pc, lr 49 ret lr
50ENDPROC(ll_get_coherency_base) 50ENDPROC(ll_get_coherency_base)
51 51
52/* 52/*
@@ -63,7 +63,7 @@ ENTRY(ll_get_coherency_cpumask)
63 mov r2, #(1 << 24) 63 mov r2, #(1 << 24)
64 lsl r3, r2, r3 64 lsl r3, r2, r3
65ARM_BE8(rev r3, r3) 65ARM_BE8(rev r3, r3)
66 mov pc, lr 66 ret lr
67ENDPROC(ll_get_coherency_cpumask) 67ENDPROC(ll_get_coherency_cpumask)
68 68
69/* 69/*
@@ -94,7 +94,7 @@ ENTRY(ll_add_cpu_to_smp_group)
94 strex r1, r2, [r0] 94 strex r1, r2, [r0]
95 cmp r1, #0 95 cmp r1, #0
96 bne 1b 96 bne 1b
97 mov pc, lr 97 ret lr
98ENDPROC(ll_add_cpu_to_smp_group) 98ENDPROC(ll_add_cpu_to_smp_group)
99 99
100ENTRY(ll_enable_coherency) 100ENTRY(ll_enable_coherency)
@@ -118,7 +118,7 @@ ENTRY(ll_enable_coherency)
118 bne 1b 118 bne 1b
119 dsb 119 dsb
120 mov r0, #0 120 mov r0, #0
121 mov pc, lr 121 ret lr
122ENDPROC(ll_enable_coherency) 122ENDPROC(ll_enable_coherency)
123 123
124ENTRY(ll_disable_coherency) 124ENTRY(ll_disable_coherency)
@@ -141,7 +141,7 @@ ENTRY(ll_disable_coherency)
141 cmp r1, #0 141 cmp r1, #0
142 bne 1b 142 bne 1b
143 dsb 143 dsb
144 mov pc, lr 144 ret lr
145ENDPROC(ll_disable_coherency) 145ENDPROC(ll_disable_coherency)
146 146
147 .align 2 147 .align 2
diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S
index da5bb292b91c..2c3c7fc65e28 100644
--- a/arch/arm/mach-mvebu/headsmp-a9.S
+++ b/arch/arm/mach-mvebu/headsmp-a9.S
@@ -29,7 +29,7 @@ ARM_BE8(setend be)
29 ldr r0, [r0] 29 ldr r0, [r0]
30 ldr r1, [r0] 30 ldr r1, [r0]
31ARM_BE8(rev r1, r1) 31ARM_BE8(rev r1, r1)
32 mov pc, r1 32 ret r1
331: 331:
34 .word CPU_RESUME_ADDR_REG 34 .word CPU_RESUME_ADDR_REG
35armada_375_smp_cpu1_enable_code_end: 35armada_375_smp_cpu1_enable_code_end:
diff --git a/arch/arm/mach-omap1/include/mach/memory.h b/arch/arm/mach-omap1/include/mach/memory.h
index 3c2530523111..058a4f7d44c5 100644
--- a/arch/arm/mach-omap1/include/mach/memory.h
+++ b/arch/arm/mach-omap1/include/mach/memory.h
@@ -6,11 +6,6 @@
6#define __ASM_ARCH_MEMORY_H 6#define __ASM_ARCH_MEMORY_H
7 7
8/* 8/*
9 * Physical DRAM offset.
10 */
11#define PLAT_PHYS_OFFSET UL(0x10000000)
12
13/*
14 * Bus address is physical address, except for OMAP-1510 Local Bus. 9 * Bus address is physical address, except for OMAP-1510 Local Bus.
15 * OMAP-1510 bus address is translated into a Local Bus address if the 10 * OMAP-1510 bus address is translated into a Local Bus address if the
16 * OMAP bus type is lbus. We do the address translation based on the 11 * OMAP bus type is lbus. We do the address translation based on the
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index 9086ce03ae12..b84a0122d823 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h>
13#include <asm/smp_scu.h> 14#include <asm/smp_scu.h>
14#include <asm/memory.h> 15#include <asm/memory.h>
15#include <asm/hardware/cache-l2x0.h> 16#include <asm/hardware/cache-l2x0.h>
@@ -334,7 +335,7 @@ ENDPROC(omap4_cpu_resume)
334 335
335#ifndef CONFIG_OMAP4_ERRATA_I688 336#ifndef CONFIG_OMAP4_ERRATA_I688
336ENTRY(omap_bus_sync) 337ENTRY(omap_bus_sync)
337 mov pc, lr 338 ret lr
338ENDPROC(omap_bus_sync) 339ENDPROC(omap_bus_sync)
339#endif 340#endif
340 341
diff --git a/arch/arm/mach-omap2/sram242x.S b/arch/arm/mach-omap2/sram242x.S
index 680a7c56cc3e..2c88ff2d0236 100644
--- a/arch/arm/mach-omap2/sram242x.S
+++ b/arch/arm/mach-omap2/sram242x.S
@@ -101,7 +101,7 @@ i_dll_wait:
101i_dll_delay: 101i_dll_delay:
102 subs r4, r4, #0x1 102 subs r4, r4, #0x1
103 bne i_dll_delay 103 bne i_dll_delay
104 mov pc, lr 104 ret lr
105 105
106 /* 106 /*
107 * shift up or down voltage, use R9 as input to tell level. 107 * shift up or down voltage, use R9 as input to tell level.
@@ -125,7 +125,7 @@ volt_delay:
125 ldr r7, [r3] @ get timer value 125 ldr r7, [r3] @ get timer value
126 cmp r5, r7 @ time up? 126 cmp r5, r7 @ time up?
127 bhi volt_delay @ not yet->branch 127 bhi volt_delay @ not yet->branch
128 mov pc, lr @ back to caller. 128 ret lr @ back to caller.
129 129
130omap242x_sdi_cm_clksel2_pll: 130omap242x_sdi_cm_clksel2_pll:
131 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2) 131 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
@@ -220,7 +220,7 @@ volt_delay_c:
220 ldr r7, [r10] @ get timer value 220 ldr r7, [r10] @ get timer value
221 cmp r8, r7 @ time up? 221 cmp r8, r7 @ time up?
222 bhi volt_delay_c @ not yet->branch 222 bhi volt_delay_c @ not yet->branch
223 mov pc, lr @ back to caller 223 ret lr @ back to caller
224 224
225omap242x_srs_cm_clksel2_pll: 225omap242x_srs_cm_clksel2_pll:
226 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2) 226 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
diff --git a/arch/arm/mach-omap2/sram243x.S b/arch/arm/mach-omap2/sram243x.S
index a1e9edd673f4..d5deb9761fc7 100644
--- a/arch/arm/mach-omap2/sram243x.S
+++ b/arch/arm/mach-omap2/sram243x.S
@@ -101,7 +101,7 @@ i_dll_wait:
101i_dll_delay: 101i_dll_delay:
102 subs r4, r4, #0x1 102 subs r4, r4, #0x1
103 bne i_dll_delay 103 bne i_dll_delay
104 mov pc, lr 104 ret lr
105 105
106 /* 106 /*
107 * shift up or down voltage, use R9 as input to tell level. 107 * shift up or down voltage, use R9 as input to tell level.
@@ -125,7 +125,7 @@ volt_delay:
125 ldr r7, [r3] @ get timer value 125 ldr r7, [r3] @ get timer value
126 cmp r5, r7 @ time up? 126 cmp r5, r7 @ time up?
127 bhi volt_delay @ not yet->branch 127 bhi volt_delay @ not yet->branch
128 mov pc, lr @ back to caller. 128 ret lr @ back to caller.
129 129
130omap243x_sdi_cm_clksel2_pll: 130omap243x_sdi_cm_clksel2_pll:
131 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2) 131 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
@@ -220,7 +220,7 @@ volt_delay_c:
220 ldr r7, [r10] @ get timer value 220 ldr r7, [r10] @ get timer value
221 cmp r8, r7 @ time up? 221 cmp r8, r7 @ time up?
222 bhi volt_delay_c @ not yet->branch 222 bhi volt_delay_c @ not yet->branch
223 mov pc, lr @ back to caller 223 ret lr @ back to caller
224 224
225omap243x_srs_cm_clksel2_pll: 225omap243x_srs_cm_clksel2_pll:
226 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2) 226 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
diff --git a/arch/arm/mach-pxa/mioa701_bootresume.S b/arch/arm/mach-pxa/mioa701_bootresume.S
index 324d25a48c85..81591491ab94 100644
--- a/arch/arm/mach-pxa/mioa701_bootresume.S
+++ b/arch/arm/mach-pxa/mioa701_bootresume.S
@@ -29,7 +29,7 @@ ENTRY(mioa701_jumpaddr)
29 str r1, [r0] @ Early disable resume for next boot 29 str r1, [r0] @ Early disable resume for next boot
30 ldr r0, mioa701_jumpaddr @ (Murphy's Law) 30 ldr r0, mioa701_jumpaddr @ (Murphy's Law)
31 ldr r0, [r0] 31 ldr r0, [r0]
32 mov pc, r0 32 ret r0
332: 332:
34 34
35ENTRY(mioa701_bootstrap_lg) 35ENTRY(mioa701_bootstrap_lg)
diff --git a/arch/arm/mach-pxa/standby.S b/arch/arm/mach-pxa/standby.S
index 29f5f5c180b7..eab1645bb4ad 100644
--- a/arch/arm/mach-pxa/standby.S
+++ b/arch/arm/mach-pxa/standby.S
@@ -29,7 +29,7 @@ ENTRY(pxa_cpu_standby)
29 .align 5 29 .align 5
301: mcr p14, 0, r2, c7, c0, 0 @ put the system into Standby 301: mcr p14, 0, r2, c7, c0, 0 @ put the system into Standby
31 str r1, [r0] @ make sure PSSR_PH/STS are clear 31 str r1, [r0] @ make sure PSSR_PH/STS are clear
32 mov pc, lr 32 ret lr
33 33
34#endif 34#endif
35 35
@@ -108,7 +108,7 @@ ENTRY(pm_enter_standby_start)
108 bic r0, r0, #0x20000000 108 bic r0, r0, #0x20000000
109 str r0, [r1, #PXA3_DMCIER] 109 str r0, [r1, #PXA3_DMCIER]
110 110
111 mov pc, lr 111 ret lr
112ENTRY(pm_enter_standby_end) 112ENTRY(pm_enter_standby_end)
113 113
114#endif 114#endif
diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h
index db09170e3832..23e7a313f75d 100644
--- a/arch/arm/mach-realview/include/mach/memory.h
+++ b/arch/arm/mach-realview/include/mach/memory.h
@@ -20,15 +20,6 @@
20#ifndef __ASM_ARCH_MEMORY_H 20#ifndef __ASM_ARCH_MEMORY_H
21#define __ASM_ARCH_MEMORY_H 21#define __ASM_ARCH_MEMORY_H
22 22
23/*
24 * Physical DRAM offset.
25 */
26#ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET
27#define PLAT_PHYS_OFFSET UL(0x70000000)
28#else
29#define PLAT_PHYS_OFFSET UL(0x00000000)
30#endif
31
32#ifdef CONFIG_SPARSEMEM 23#ifdef CONFIG_SPARSEMEM
33 24
34/* 25/*
diff --git a/arch/arm/mach-rpc/include/mach/memory.h b/arch/arm/mach-rpc/include/mach/memory.h
index 18a221093bf5..b7e49571417d 100644
--- a/arch/arm/mach-rpc/include/mach/memory.h
+++ b/arch/arm/mach-rpc/include/mach/memory.h
@@ -19,11 +19,6 @@
19#define __ASM_ARCH_MEMORY_H 19#define __ASM_ARCH_MEMORY_H
20 20
21/* 21/*
22 * Physical DRAM offset.
23 */
24#define PLAT_PHYS_OFFSET UL(0x10000000)
25
26/*
27 * Cache flushing area - ROM 22 * Cache flushing area - ROM
28 */ 23 */
29#define FLUSH_BASE_PHYS 0x00000000 24#define FLUSH_BASE_PHYS 0x00000000
diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2410.S b/arch/arm/mach-s3c24xx/sleep-s3c2410.S
index c9b91223697c..875ba8911127 100644
--- a/arch/arm/mach-s3c24xx/sleep-s3c2410.S
+++ b/arch/arm/mach-s3c24xx/sleep-s3c2410.S
@@ -66,4 +66,4 @@ s3c2410_do_sleep:
66 streq r8, [r5] @ SDRAM power-down config 66 streq r8, [r5] @ SDRAM power-down config
67 streq r9, [r6] @ CPU sleep 67 streq r9, [r6] @ CPU sleep
681: beq 1b 681: beq 1b
69 mov pc, r14 69 ret lr
diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2412.S b/arch/arm/mach-s3c24xx/sleep-s3c2412.S
index 5adaceb7da13..6bf5b4d8743c 100644
--- a/arch/arm/mach-s3c24xx/sleep-s3c2412.S
+++ b/arch/arm/mach-s3c24xx/sleep-s3c2412.S
@@ -65,4 +65,4 @@ s3c2412_sleep_enter1:
65 strne r9, [r3] 65 strne r9, [r3]
66 bne s3c2412_sleep_enter1 66 bne s3c2412_sleep_enter1
67 67
68 mov pc, r14 68 ret lr
diff --git a/arch/arm/mach-s5pv210/include/mach/memory.h b/arch/arm/mach-s5pv210/include/mach/memory.h
index 2d3cfa221d5f..d584fac9156b 100644
--- a/arch/arm/mach-s5pv210/include/mach/memory.h
+++ b/arch/arm/mach-s5pv210/include/mach/memory.h
@@ -13,8 +13,6 @@
13#ifndef __ASM_ARCH_MEMORY_H 13#ifndef __ASM_ARCH_MEMORY_H
14#define __ASM_ARCH_MEMORY_H 14#define __ASM_ARCH_MEMORY_H
15 15
16#define PLAT_PHYS_OFFSET UL(0x20000000)
17
18/* 16/*
19 * Sparsemem support 17 * Sparsemem support
20 * Physical memory can be located from 0x20000000 to 0x7fffffff, 18 * Physical memory can be located from 0x20000000 to 0x7fffffff,
diff --git a/arch/arm/mach-sa1100/include/mach/memory.h b/arch/arm/mach-sa1100/include/mach/memory.h
index 12d376795abc..2054051eb797 100644
--- a/arch/arm/mach-sa1100/include/mach/memory.h
+++ b/arch/arm/mach-sa1100/include/mach/memory.h
@@ -10,11 +10,6 @@
10#include <asm/sizes.h> 10#include <asm/sizes.h>
11 11
12/* 12/*
13 * Physical DRAM offset is 0xc0000000 on the SA1100
14 */
15#define PLAT_PHYS_OFFSET UL(0xc0000000)
16
17/*
18 * Because of the wide memory address space between physical RAM banks on the 13 * Because of the wide memory address space between physical RAM banks on the
19 * SA1100, it's much convenient to use Linux's SparseMEM support to implement 14 * SA1100, it's much convenient to use Linux's SparseMEM support to implement
20 * our memory map representation. Assuming all memory nodes have equal access 15 * our memory map representation. Assuming all memory nodes have equal access
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index e5be5c88644b..293007579b8e 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -12,6 +12,7 @@
12 */ 12 */
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/assembler.h>
15#include <asm/memory.h> 16#include <asm/memory.h>
16 17
17ENTRY(shmobile_invalidate_start) 18ENTRY(shmobile_invalidate_start)
@@ -75,7 +76,7 @@ shmobile_smp_boot_next:
75 76
76shmobile_smp_boot_found: 77shmobile_smp_boot_found:
77 ldr r0, [r7, r1, lsl #2] 78 ldr r0, [r7, r1, lsl #2]
78 mov pc, r9 79 ret r9
79ENDPROC(shmobile_smp_boot) 80ENDPROC(shmobile_smp_boot)
80 81
81ENTRY(shmobile_smp_sleep) 82ENTRY(shmobile_smp_sleep)
diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
index aaaf3abd2688..be4bc5f853f5 100644
--- a/arch/arm/mach-tegra/sleep-tegra20.S
+++ b/arch/arm/mach-tegra/sleep-tegra20.S
@@ -78,7 +78,7 @@ ENTRY(tegra20_hotplug_shutdown)
78 /* Put this CPU down */ 78 /* Put this CPU down */
79 cpu_id r0 79 cpu_id r0
80 bl tegra20_cpu_shutdown 80 bl tegra20_cpu_shutdown
81 mov pc, lr @ should never get here 81 ret lr @ should never get here
82ENDPROC(tegra20_hotplug_shutdown) 82ENDPROC(tegra20_hotplug_shutdown)
83 83
84/* 84/*
@@ -96,7 +96,7 @@ ENDPROC(tegra20_hotplug_shutdown)
96 */ 96 */
97ENTRY(tegra20_cpu_shutdown) 97ENTRY(tegra20_cpu_shutdown)
98 cmp r0, #0 98 cmp r0, #0
99 moveq pc, lr @ must not be called for CPU 0 99 reteq lr @ must not be called for CPU 0
100 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41 100 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
101 mov r12, #CPU_RESETTABLE 101 mov r12, #CPU_RESETTABLE
102 str r12, [r1] 102 str r12, [r1]
@@ -117,7 +117,7 @@ ENTRY(tegra20_cpu_shutdown)
117 cpu_id r3 117 cpu_id r3
118 cmp r3, r0 118 cmp r3, r0
119 beq . 119 beq .
120 mov pc, lr 120 ret lr
121ENDPROC(tegra20_cpu_shutdown) 121ENDPROC(tegra20_cpu_shutdown)
122#endif 122#endif
123 123
@@ -164,7 +164,7 @@ ENTRY(tegra_pen_lock)
164 cmpeq r12, r0 @ !turn == cpu? 164 cmpeq r12, r0 @ !turn == cpu?
165 beq 1b @ while !turn == cpu && flag[!cpu] == 1 165 beq 1b @ while !turn == cpu && flag[!cpu] == 1
166 166
167 mov pc, lr @ locked 167 ret lr @ locked
168ENDPROC(tegra_pen_lock) 168ENDPROC(tegra_pen_lock)
169 169
170ENTRY(tegra_pen_unlock) 170ENTRY(tegra_pen_unlock)
@@ -176,7 +176,7 @@ ENTRY(tegra_pen_unlock)
176 addne r2, r3, #PMC_SCRATCH39 176 addne r2, r3, #PMC_SCRATCH39
177 mov r12, #0 177 mov r12, #0
178 str r12, [r2] 178 str r12, [r2]
179 mov pc, lr 179 ret lr
180ENDPROC(tegra_pen_unlock) 180ENDPROC(tegra_pen_unlock)
181 181
182/* 182/*
@@ -189,7 +189,7 @@ ENTRY(tegra20_cpu_clear_resettable)
189 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41 189 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
190 mov r12, #CPU_NOT_RESETTABLE 190 mov r12, #CPU_NOT_RESETTABLE
191 str r12, [r1] 191 str r12, [r1]
192 mov pc, lr 192 ret lr
193ENDPROC(tegra20_cpu_clear_resettable) 193ENDPROC(tegra20_cpu_clear_resettable)
194 194
195/* 195/*
@@ -202,7 +202,7 @@ ENTRY(tegra20_cpu_set_resettable_soon)
202 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41 202 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
203 mov r12, #CPU_RESETTABLE_SOON 203 mov r12, #CPU_RESETTABLE_SOON
204 str r12, [r1] 204 str r12, [r1]
205 mov pc, lr 205 ret lr
206ENDPROC(tegra20_cpu_set_resettable_soon) 206ENDPROC(tegra20_cpu_set_resettable_soon)
207 207
208/* 208/*
@@ -217,7 +217,7 @@ ENTRY(tegra20_cpu_is_resettable_soon)
217 cmp r12, #CPU_RESETTABLE_SOON 217 cmp r12, #CPU_RESETTABLE_SOON
218 moveq r0, #1 218 moveq r0, #1
219 movne r0, #0 219 movne r0, #0
220 mov pc, lr 220 ret lr
221ENDPROC(tegra20_cpu_is_resettable_soon) 221ENDPROC(tegra20_cpu_is_resettable_soon)
222 222
223/* 223/*
@@ -239,7 +239,7 @@ ENTRY(tegra20_sleep_core_finish)
239 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA 239 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA
240 add r0, r0, r1 240 add r0, r0, r1
241 241
242 mov pc, r3 242 ret r3
243ENDPROC(tegra20_sleep_core_finish) 243ENDPROC(tegra20_sleep_core_finish)
244 244
245/* 245/*
@@ -402,7 +402,7 @@ exit_selfrefresh_loop:
402 402
403 mov32 r0, TEGRA_PMC_BASE 403 mov32 r0, TEGRA_PMC_BASE
404 ldr r0, [r0, #PMC_SCRATCH41] 404 ldr r0, [r0, #PMC_SCRATCH41]
405 mov pc, r0 @ jump to tegra_resume 405 ret r0 @ jump to tegra_resume
406ENDPROC(tegra20_lp1_reset) 406ENDPROC(tegra20_lp1_reset)
407 407
408/* 408/*
@@ -455,7 +455,7 @@ tegra20_switch_cpu_to_clk32k:
455 mov r0, #0 /* brust policy = 32KHz */ 455 mov r0, #0 /* brust policy = 32KHz */
456 str r0, [r5, #CLK_RESET_SCLK_BURST] 456 str r0, [r5, #CLK_RESET_SCLK_BURST]
457 457
458 mov pc, lr 458 ret lr
459 459
460/* 460/*
461 * tegra20_enter_sleep 461 * tegra20_enter_sleep
@@ -535,7 +535,7 @@ padsave_done:
535 adr r2, tegra20_sclk_save 535 adr r2, tegra20_sclk_save
536 str r0, [r2] 536 str r0, [r2]
537 dsb 537 dsb
538 mov pc, lr 538 ret lr
539 539
540tegra20_sdram_pad_address: 540tegra20_sdram_pad_address:
541 .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGCPADCTRL 541 .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGCPADCTRL
diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S
index b16d4a57fa59..09cad9b071de 100644
--- a/arch/arm/mach-tegra/sleep-tegra30.S
+++ b/arch/arm/mach-tegra/sleep-tegra30.S
@@ -142,7 +142,7 @@ ENTRY(tegra30_hotplug_shutdown)
142 /* Powergate this CPU */ 142 /* Powergate this CPU */
143 mov r0, #TEGRA30_POWER_HOTPLUG_SHUTDOWN 143 mov r0, #TEGRA30_POWER_HOTPLUG_SHUTDOWN
144 bl tegra30_cpu_shutdown 144 bl tegra30_cpu_shutdown
145 mov pc, lr @ should never get here 145 ret lr @ should never get here
146ENDPROC(tegra30_hotplug_shutdown) 146ENDPROC(tegra30_hotplug_shutdown)
147 147
148/* 148/*
@@ -161,7 +161,7 @@ ENTRY(tegra30_cpu_shutdown)
161 bne _no_cpu0_chk @ It's not Tegra30 161 bne _no_cpu0_chk @ It's not Tegra30
162 162
163 cmp r3, #0 163 cmp r3, #0
164 moveq pc, lr @ Must never be called for CPU 0 164 reteq lr @ Must never be called for CPU 0
165_no_cpu0_chk: 165_no_cpu0_chk:
166 166
167 ldr r12, =TEGRA_FLOW_CTRL_VIRT 167 ldr r12, =TEGRA_FLOW_CTRL_VIRT
@@ -266,7 +266,7 @@ ENTRY(tegra30_sleep_core_finish)
266 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA 266 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA
267 add r0, r0, r1 267 add r0, r0, r1
268 268
269 mov pc, r3 269 ret r3
270ENDPROC(tegra30_sleep_core_finish) 270ENDPROC(tegra30_sleep_core_finish)
271 271
272/* 272/*
@@ -285,7 +285,7 @@ ENTRY(tegra30_sleep_cpu_secondary_finish)
285 mov r0, #0 @ power mode flags (!hotplug) 285 mov r0, #0 @ power mode flags (!hotplug)
286 bl tegra30_cpu_shutdown 286 bl tegra30_cpu_shutdown
287 mov r0, #1 @ never return here 287 mov r0, #1 @ never return here
288 mov pc, r7 288 ret r7
289ENDPROC(tegra30_sleep_cpu_secondary_finish) 289ENDPROC(tegra30_sleep_cpu_secondary_finish)
290 290
291/* 291/*
@@ -529,7 +529,7 @@ __no_dual_emc_chanl:
529 529
530 mov32 r0, TEGRA_PMC_BASE 530 mov32 r0, TEGRA_PMC_BASE
531 ldr r0, [r0, #PMC_SCRATCH41] 531 ldr r0, [r0, #PMC_SCRATCH41]
532 mov pc, r0 @ jump to tegra_resume 532 ret r0 @ jump to tegra_resume
533ENDPROC(tegra30_lp1_reset) 533ENDPROC(tegra30_lp1_reset)
534 534
535 .align L1_CACHE_SHIFT 535 .align L1_CACHE_SHIFT
@@ -659,7 +659,7 @@ _no_pll_in_iddq:
659 mov r0, #0 /* brust policy = 32KHz */ 659 mov r0, #0 /* brust policy = 32KHz */
660 str r0, [r5, #CLK_RESET_SCLK_BURST] 660 str r0, [r5, #CLK_RESET_SCLK_BURST]
661 661
662 mov pc, lr 662 ret lr
663 663
664/* 664/*
665 * tegra30_enter_sleep 665 * tegra30_enter_sleep
@@ -819,7 +819,7 @@ pmc_io_dpd_skip:
819 819
820 dsb 820 dsb
821 821
822 mov pc, lr 822 ret lr
823 823
824 .ltorg 824 .ltorg
825/* dummy symbol for end of IRAM */ 825/* dummy symbol for end of IRAM */
diff --git a/arch/arm/mach-tegra/sleep.S b/arch/arm/mach-tegra/sleep.S
index 8d06213fbc47..f024a5109e8e 100644
--- a/arch/arm/mach-tegra/sleep.S
+++ b/arch/arm/mach-tegra/sleep.S
@@ -87,7 +87,7 @@ ENTRY(tegra_init_l2_for_a15)
87 mcrne p15, 0x1, r0, c9, c0, 2 87 mcrne p15, 0x1, r0, c9, c0, 2
88_exit_init_l2_a15: 88_exit_init_l2_a15:
89 89
90 mov pc, lr 90 ret lr
91ENDPROC(tegra_init_l2_for_a15) 91ENDPROC(tegra_init_l2_for_a15)
92 92
93/* 93/*
@@ -111,7 +111,7 @@ ENTRY(tegra_sleep_cpu_finish)
111 add r3, r3, r0 111 add r3, r3, r0
112 mov r0, r1 112 mov r0, r1
113 113
114 mov pc, r3 114 ret r3
115ENDPROC(tegra_sleep_cpu_finish) 115ENDPROC(tegra_sleep_cpu_finish)
116 116
117/* 117/*
@@ -139,7 +139,7 @@ ENTRY(tegra_shut_off_mmu)
139 moveq r3, #0 139 moveq r3, #0
140 streq r3, [r2, #L2X0_CTRL] 140 streq r3, [r2, #L2X0_CTRL]
141#endif 141#endif
142 mov pc, r0 142 ret r0
143ENDPROC(tegra_shut_off_mmu) 143ENDPROC(tegra_shut_off_mmu)
144 .popsection 144 .popsection
145 145
@@ -156,6 +156,6 @@ ENTRY(tegra_switch_cpu_to_pllp)
156 str r0, [r5, #CLK_RESET_CCLK_BURST] 156 str r0, [r5, #CLK_RESET_CCLK_BURST]
157 mov r0, #0 157 mov r0, #0
158 str r0, [r5, #CLK_RESET_CCLK_DIVIDER] 158 str r0, [r5, #CLK_RESET_CCLK_DIVIDER]
159 mov pc, lr 159 ret lr
160ENDPROC(tegra_switch_cpu_to_pllp) 160ENDPROC(tegra_switch_cpu_to_pllp)
161#endif 161#endif
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index b743a0ae02ce..2fb78b4648cb 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -152,7 +152,7 @@ static void tc2_pm_down(u64 residency)
152 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 152 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
153 arch_spin_unlock(&tc2_pm_lock); 153 arch_spin_unlock(&tc2_pm_lock);
154 154
155 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 155 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
156 /* 156 /*
157 * On the Cortex-A15 we need to disable 157 * On the Cortex-A15 we need to disable
158 * L2 prefetching before flushing the cache. 158 * L2 prefetching before flushing the cache.
@@ -323,6 +323,21 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
323" b cci_enable_port_for_self "); 323" b cci_enable_port_for_self ");
324} 324}
325 325
326static void __init tc2_cache_off(void)
327{
328 pr_info("TC2: disabling cache during MCPM loopback test\n");
329 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
330 /* disable L2 prefetching on the Cortex-A15 */
331 asm volatile(
332 "mcr p15, 1, %0, c15, c0, 3 \n\t"
333 "isb \n\t"
334 "dsb "
335 : : "r" (0x400) );
336 }
337 v7_exit_coherency_flush(all);
338 cci_disable_port_by_cpu(read_cpuid_mpidr());
339}
340
326static int __init tc2_pm_init(void) 341static int __init tc2_pm_init(void)
327{ 342{
328 int ret, irq; 343 int ret, irq;
@@ -370,6 +385,8 @@ static int __init tc2_pm_init(void)
370 ret = mcpm_platform_register(&tc2_pm_power_ops); 385 ret = mcpm_platform_register(&tc2_pm_power_ops);
371 if (!ret) { 386 if (!ret) {
372 mcpm_sync_init(tc2_pm_power_up_setup); 387 mcpm_sync_init(tc2_pm_power_up_setup);
388 /* test if we can (re)enable the CCI on our own */
389 BUG_ON(mcpm_loopback(tc2_cache_off) != 0);
373 pr_info("TC2 power management initialized\n"); 390 pr_info("TC2 power management initialized\n");
374 } 391 }
375 return ret; 392 return ret;
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c348eaee7ee2..577039a3f6e5 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -669,7 +669,7 @@ config ARM_VIRT_EXT
669 details. 669 details.
670 670
671config SWP_EMULATE 671config SWP_EMULATE
672 bool "Emulate SWP/SWPB instructions" 672 bool "Emulate SWP/SWPB instructions" if !SMP
673 depends on CPU_V7 673 depends on CPU_V7
674 default y if SMP 674 default y if SMP
675 select HAVE_PROC_CPU if PROC_FS 675 select HAVE_PROC_CPU if PROC_FS
@@ -907,8 +907,8 @@ config PL310_ERRATA_588369
907 They are architecturally defined to behave as the execution of a 907 They are architecturally defined to behave as the execution of a
908 clean operation followed immediately by an invalidate operation, 908 clean operation followed immediately by an invalidate operation,
909 both performing to the same memory location. This functionality 909 both performing to the same memory location. This functionality
910 is not correctly implemented in PL310 as clean lines are not 910 is not correctly implemented in PL310 prior to r2p0 (fixed in r2p0)
911 invalidated as a result of these operations. 911 as clean lines are not invalidated as a result of these operations.
912 912
913config PL310_ERRATA_727915 913config PL310_ERRATA_727915
914 bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption" 914 bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
@@ -918,7 +918,8 @@ config PL310_ERRATA_727915
918 PL310 can handle normal accesses while it is in progress. Under very 918 PL310 can handle normal accesses while it is in progress. Under very
919 rare circumstances, due to this erratum, write data can be lost when 919 rare circumstances, due to this erratum, write data can be lost when
920 PL310 treats a cacheable write transaction during a Clean & 920 PL310 treats a cacheable write transaction during a Clean &
921 Invalidate by Way operation. 921 Invalidate by Way operation. Revisions prior to r3p1 are affected by
922 this errata (fixed in r3p1).
922 923
923config PL310_ERRATA_753970 924config PL310_ERRATA_753970
924 bool "PL310 errata: cache sync operation may be faulty" 925 bool "PL310 errata: cache sync operation may be faulty"
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index b8cb1a2688a0..0c1ab49e5f7b 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -76,6 +76,7 @@
76 76
77static unsigned long ai_user; 77static unsigned long ai_user;
78static unsigned long ai_sys; 78static unsigned long ai_sys;
79static void *ai_sys_last_pc;
79static unsigned long ai_skipped; 80static unsigned long ai_skipped;
80static unsigned long ai_half; 81static unsigned long ai_half;
81static unsigned long ai_word; 82static unsigned long ai_word;
@@ -130,7 +131,7 @@ static const char *usermode_action[] = {
130static int alignment_proc_show(struct seq_file *m, void *v) 131static int alignment_proc_show(struct seq_file *m, void *v)
131{ 132{
132 seq_printf(m, "User:\t\t%lu\n", ai_user); 133 seq_printf(m, "User:\t\t%lu\n", ai_user);
133 seq_printf(m, "System:\t\t%lu\n", ai_sys); 134 seq_printf(m, "System:\t\t%lu (%pF)\n", ai_sys, ai_sys_last_pc);
134 seq_printf(m, "Skipped:\t%lu\n", ai_skipped); 135 seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
135 seq_printf(m, "Half:\t\t%lu\n", ai_half); 136 seq_printf(m, "Half:\t\t%lu\n", ai_half);
136 seq_printf(m, "Word:\t\t%lu\n", ai_word); 137 seq_printf(m, "Word:\t\t%lu\n", ai_word);
@@ -794,6 +795,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
794 goto user; 795 goto user;
795 796
796 ai_sys += 1; 797 ai_sys += 1;
798 ai_sys_last_pc = (void *)instruction_pointer(regs);
797 799
798 fixup: 800 fixup:
799 801
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index e505befe51b5..2f0c58836ae7 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -15,6 +15,7 @@
15 */ 15 */
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <asm/assembler.h>
18#include <asm/memory.h> 19#include <asm/memory.h>
19#include <asm/page.h> 20#include <asm/page.h>
20 21
@@ -45,7 +46,7 @@
45ENTRY(fa_flush_icache_all) 46ENTRY(fa_flush_icache_all)
46 mov r0, #0 47 mov r0, #0
47 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 48 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
48 mov pc, lr 49 ret lr
49ENDPROC(fa_flush_icache_all) 50ENDPROC(fa_flush_icache_all)
50 51
51/* 52/*
@@ -71,7 +72,7 @@ __flush_whole_cache:
71 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 72 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
72 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 73 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
73 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 74 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
74 mov pc, lr 75 ret lr
75 76
76/* 77/*
77 * flush_user_cache_range(start, end, flags) 78 * flush_user_cache_range(start, end, flags)
@@ -99,7 +100,7 @@ ENTRY(fa_flush_user_cache_range)
99 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 100 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
100 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 101 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
101 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 102 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
102 mov pc, lr 103 ret lr
103 104
104/* 105/*
105 * coherent_kern_range(start, end) 106 * coherent_kern_range(start, end)
@@ -135,7 +136,7 @@ ENTRY(fa_coherent_user_range)
135 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 136 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
136 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 137 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
137 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 138 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
138 mov pc, lr 139 ret lr
139 140
140/* 141/*
141 * flush_kern_dcache_area(void *addr, size_t size) 142 * flush_kern_dcache_area(void *addr, size_t size)
@@ -155,7 +156,7 @@ ENTRY(fa_flush_kern_dcache_area)
155 mov r0, #0 156 mov r0, #0
156 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 157 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
157 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 158 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
158 mov pc, lr 159 ret lr
159 160
160/* 161/*
161 * dma_inv_range(start, end) 162 * dma_inv_range(start, end)
@@ -181,7 +182,7 @@ fa_dma_inv_range:
181 blo 1b 182 blo 1b
182 mov r0, #0 183 mov r0, #0
183 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 184 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
184 mov pc, lr 185 ret lr
185 186
186/* 187/*
187 * dma_clean_range(start, end) 188 * dma_clean_range(start, end)
@@ -199,7 +200,7 @@ fa_dma_clean_range:
199 blo 1b 200 blo 1b
200 mov r0, #0 201 mov r0, #0
201 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 202 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
202 mov pc, lr 203 ret lr
203 204
204/* 205/*
205 * dma_flush_range(start,end) 206 * dma_flush_range(start,end)
@@ -214,7 +215,7 @@ ENTRY(fa_dma_flush_range)
214 blo 1b 215 blo 1b
215 mov r0, #0 216 mov r0, #0
216 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 217 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
217 mov pc, lr 218 ret lr
218 219
219/* 220/*
220 * dma_map_area(start, size, dir) 221 * dma_map_area(start, size, dir)
@@ -237,7 +238,7 @@ ENDPROC(fa_dma_map_area)
237 * - dir - DMA direction 238 * - dir - DMA direction
238 */ 239 */
239ENTRY(fa_dma_unmap_area) 240ENTRY(fa_dma_unmap_area)
240 mov pc, lr 241 ret lr
241ENDPROC(fa_dma_unmap_area) 242ENDPROC(fa_dma_unmap_area)
242 243
243 .globl fa_flush_kern_cache_louis 244 .globl fa_flush_kern_cache_louis
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 7c3fb41a462e..5f2c988a06ac 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -665,7 +665,7 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v
665static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) 665static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
666{ 666{
667 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK; 667 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
668 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 668 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
669 669
670 if (rev >= L310_CACHE_ID_RTL_R2P0) { 670 if (rev >= L310_CACHE_ID_RTL_R2P0) {
671 if (cortex_a9) { 671 if (cortex_a9) {
diff --git a/arch/arm/mm/cache-nop.S b/arch/arm/mm/cache-nop.S
index 8e12ddca0031..f1cc9861031f 100644
--- a/arch/arm/mm/cache-nop.S
+++ b/arch/arm/mm/cache-nop.S
@@ -5,11 +5,12 @@
5 */ 5 */
6#include <linux/linkage.h> 6#include <linux/linkage.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <asm/assembler.h>
8 9
9#include "proc-macros.S" 10#include "proc-macros.S"
10 11
11ENTRY(nop_flush_icache_all) 12ENTRY(nop_flush_icache_all)
12 mov pc, lr 13 ret lr
13ENDPROC(nop_flush_icache_all) 14ENDPROC(nop_flush_icache_all)
14 15
15 .globl nop_flush_kern_cache_all 16 .globl nop_flush_kern_cache_all
@@ -29,7 +30,7 @@ ENDPROC(nop_flush_icache_all)
29 30
30ENTRY(nop_coherent_user_range) 31ENTRY(nop_coherent_user_range)
31 mov r0, 0 32 mov r0, 0
32 mov pc, lr 33 ret lr
33ENDPROC(nop_coherent_user_range) 34ENDPROC(nop_coherent_user_range)
34 35
35 .globl nop_flush_kern_dcache_area 36 .globl nop_flush_kern_dcache_area
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index a7ba68f59f0c..91e3adf155cb 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <asm/assembler.h>
12#include <asm/page.h> 13#include <asm/page.h>
13#include "proc-macros.S" 14#include "proc-macros.S"
14 15
@@ -18,7 +19,7 @@
18 * Unconditionally clean and invalidate the entire icache. 19 * Unconditionally clean and invalidate the entire icache.
19 */ 20 */
20ENTRY(v4_flush_icache_all) 21ENTRY(v4_flush_icache_all)
21 mov pc, lr 22 ret lr
22ENDPROC(v4_flush_icache_all) 23ENDPROC(v4_flush_icache_all)
23 24
24/* 25/*
@@ -40,7 +41,7 @@ ENTRY(v4_flush_kern_cache_all)
40#ifdef CONFIG_CPU_CP15 41#ifdef CONFIG_CPU_CP15
41 mov r0, #0 42 mov r0, #0
42 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 43 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
43 mov pc, lr 44 ret lr
44#else 45#else
45 /* FALLTHROUGH */ 46 /* FALLTHROUGH */
46#endif 47#endif
@@ -59,7 +60,7 @@ ENTRY(v4_flush_user_cache_range)
59#ifdef CONFIG_CPU_CP15 60#ifdef CONFIG_CPU_CP15
60 mov ip, #0 61 mov ip, #0
61 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache 62 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
62 mov pc, lr 63 ret lr
63#else 64#else
64 /* FALLTHROUGH */ 65 /* FALLTHROUGH */
65#endif 66#endif
@@ -89,7 +90,7 @@ ENTRY(v4_coherent_kern_range)
89 */ 90 */
90ENTRY(v4_coherent_user_range) 91ENTRY(v4_coherent_user_range)
91 mov r0, #0 92 mov r0, #0
92 mov pc, lr 93 ret lr
93 94
94/* 95/*
95 * flush_kern_dcache_area(void *addr, size_t size) 96 * flush_kern_dcache_area(void *addr, size_t size)
@@ -116,7 +117,7 @@ ENTRY(v4_dma_flush_range)
116 mov r0, #0 117 mov r0, #0
117 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 118 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
118#endif 119#endif
119 mov pc, lr 120 ret lr
120 121
121/* 122/*
122 * dma_unmap_area(start, size, dir) 123 * dma_unmap_area(start, size, dir)
@@ -136,7 +137,7 @@ ENTRY(v4_dma_unmap_area)
136 * - dir - DMA direction 137 * - dir - DMA direction
137 */ 138 */
138ENTRY(v4_dma_map_area) 139ENTRY(v4_dma_map_area)
139 mov pc, lr 140 ret lr
140ENDPROC(v4_dma_unmap_area) 141ENDPROC(v4_dma_unmap_area)
141ENDPROC(v4_dma_map_area) 142ENDPROC(v4_dma_map_area)
142 143
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index cd4945321407..2522f8c8fbb1 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <asm/assembler.h>
12#include <asm/memory.h> 13#include <asm/memory.h>
13#include <asm/page.h> 14#include <asm/page.h>
14#include "proc-macros.S" 15#include "proc-macros.S"
@@ -58,7 +59,7 @@ flush_base:
58ENTRY(v4wb_flush_icache_all) 59ENTRY(v4wb_flush_icache_all)
59 mov r0, #0 60 mov r0, #0
60 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 61 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
61 mov pc, lr 62 ret lr
62ENDPROC(v4wb_flush_icache_all) 63ENDPROC(v4wb_flush_icache_all)
63 64
64/* 65/*
@@ -94,7 +95,7 @@ __flush_whole_cache:
94 blo 1b 95 blo 1b
95#endif 96#endif
96 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer 97 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
97 mov pc, lr 98 ret lr
98 99
99/* 100/*
100 * flush_user_cache_range(start, end, flags) 101 * flush_user_cache_range(start, end, flags)
@@ -122,7 +123,7 @@ ENTRY(v4wb_flush_user_cache_range)
122 blo 1b 123 blo 1b
123 tst r2, #VM_EXEC 124 tst r2, #VM_EXEC
124 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 125 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
125 mov pc, lr 126 ret lr
126 127
127/* 128/*
128 * flush_kern_dcache_area(void *addr, size_t size) 129 * flush_kern_dcache_area(void *addr, size_t size)
@@ -170,7 +171,7 @@ ENTRY(v4wb_coherent_user_range)
170 mov r0, #0 171 mov r0, #0
171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 172 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
172 mcr p15, 0, r0, c7, c10, 4 @ drain WB 173 mcr p15, 0, r0, c7, c10, 4 @ drain WB
173 mov pc, lr 174 ret lr
174 175
175 176
176/* 177/*
@@ -195,7 +196,7 @@ v4wb_dma_inv_range:
195 cmp r0, r1 196 cmp r0, r1
196 blo 1b 197 blo 1b
197 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 198 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
198 mov pc, lr 199 ret lr
199 200
200/* 201/*
201 * dma_clean_range(start, end) 202 * dma_clean_range(start, end)
@@ -212,7 +213,7 @@ v4wb_dma_clean_range:
212 cmp r0, r1 213 cmp r0, r1
213 blo 1b 214 blo 1b
214 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 215 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
215 mov pc, lr 216 ret lr
216 217
217/* 218/*
218 * dma_flush_range(start, end) 219 * dma_flush_range(start, end)
@@ -248,7 +249,7 @@ ENDPROC(v4wb_dma_map_area)
248 * - dir - DMA direction 249 * - dir - DMA direction
249 */ 250 */
250ENTRY(v4wb_dma_unmap_area) 251ENTRY(v4wb_dma_unmap_area)
251 mov pc, lr 252 ret lr
252ENDPROC(v4wb_dma_unmap_area) 253ENDPROC(v4wb_dma_unmap_area)
253 254
254 .globl v4wb_flush_kern_cache_louis 255 .globl v4wb_flush_kern_cache_louis
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 11e5e5838bc5..a0982ce49007 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/linkage.h> 14#include <linux/linkage.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <asm/assembler.h>
16#include <asm/page.h> 17#include <asm/page.h>
17#include "proc-macros.S" 18#include "proc-macros.S"
18 19
@@ -48,7 +49,7 @@
48ENTRY(v4wt_flush_icache_all) 49ENTRY(v4wt_flush_icache_all)
49 mov r0, #0 50 mov r0, #0
50 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 51 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
51 mov pc, lr 52 ret lr
52ENDPROC(v4wt_flush_icache_all) 53ENDPROC(v4wt_flush_icache_all)
53 54
54/* 55/*
@@ -71,7 +72,7 @@ __flush_whole_cache:
71 tst r2, #VM_EXEC 72 tst r2, #VM_EXEC
72 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 73 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
73 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 74 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
74 mov pc, lr 75 ret lr
75 76
76/* 77/*
77 * flush_user_cache_range(start, end, flags) 78 * flush_user_cache_range(start, end, flags)
@@ -94,7 +95,7 @@ ENTRY(v4wt_flush_user_cache_range)
94 add r0, r0, #CACHE_DLINESIZE 95 add r0, r0, #CACHE_DLINESIZE
95 cmp r0, r1 96 cmp r0, r1
96 blo 1b 97 blo 1b
97 mov pc, lr 98 ret lr
98 99
99/* 100/*
100 * coherent_kern_range(start, end) 101 * coherent_kern_range(start, end)
@@ -126,7 +127,7 @@ ENTRY(v4wt_coherent_user_range)
126 cmp r0, r1 127 cmp r0, r1
127 blo 1b 128 blo 1b
128 mov r0, #0 129 mov r0, #0
129 mov pc, lr 130 ret lr
130 131
131/* 132/*
132 * flush_kern_dcache_area(void *addr, size_t size) 133 * flush_kern_dcache_area(void *addr, size_t size)
@@ -160,7 +161,7 @@ v4wt_dma_inv_range:
160 add r0, r0, #CACHE_DLINESIZE 161 add r0, r0, #CACHE_DLINESIZE
161 cmp r0, r1 162 cmp r0, r1
162 blo 1b 163 blo 1b
163 mov pc, lr 164 ret lr
164 165
165/* 166/*
166 * dma_flush_range(start, end) 167 * dma_flush_range(start, end)
@@ -192,7 +193,7 @@ ENTRY(v4wt_dma_unmap_area)
192 * - dir - DMA direction 193 * - dir - DMA direction
193 */ 194 */
194ENTRY(v4wt_dma_map_area) 195ENTRY(v4wt_dma_map_area)
195 mov pc, lr 196 ret lr
196ENDPROC(v4wt_dma_unmap_area) 197ENDPROC(v4wt_dma_unmap_area)
197ENDPROC(v4wt_dma_map_area) 198ENDPROC(v4wt_dma_map_area)
198 199
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index d8fd4d4bd3d4..24659952c278 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -51,7 +51,7 @@ ENTRY(v6_flush_icache_all)
51#else 51#else
52 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache 52 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
53#endif 53#endif
54 mov pc, lr 54 ret lr
55ENDPROC(v6_flush_icache_all) 55ENDPROC(v6_flush_icache_all)
56 56
57/* 57/*
@@ -73,7 +73,7 @@ ENTRY(v6_flush_kern_cache_all)
73#else 73#else
74 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 74 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
75#endif 75#endif
76 mov pc, lr 76 ret lr
77 77
78/* 78/*
79 * v6_flush_cache_all() 79 * v6_flush_cache_all()
@@ -98,7 +98,7 @@ ENTRY(v6_flush_user_cache_all)
98 * - we have a VIPT cache. 98 * - we have a VIPT cache.
99 */ 99 */
100ENTRY(v6_flush_user_cache_range) 100ENTRY(v6_flush_user_cache_range)
101 mov pc, lr 101 ret lr
102 102
103/* 103/*
104 * v6_coherent_kern_range(start,end) 104 * v6_coherent_kern_range(start,end)
@@ -150,7 +150,7 @@ ENTRY(v6_coherent_user_range)
150#else 150#else
151 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 151 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
152#endif 152#endif
153 mov pc, lr 153 ret lr
154 154
155/* 155/*
156 * Fault handling for the cache operation above. If the virtual address in r0 156 * Fault handling for the cache operation above. If the virtual address in r0
@@ -158,7 +158,7 @@ ENTRY(v6_coherent_user_range)
158 */ 158 */
1599001: 1599001:
160 mov r0, #-EFAULT 160 mov r0, #-EFAULT
161 mov pc, lr 161 ret lr
162 UNWIND(.fnend ) 162 UNWIND(.fnend )
163ENDPROC(v6_coherent_user_range) 163ENDPROC(v6_coherent_user_range)
164ENDPROC(v6_coherent_kern_range) 164ENDPROC(v6_coherent_kern_range)
@@ -188,7 +188,7 @@ ENTRY(v6_flush_kern_dcache_area)
188 mov r0, #0 188 mov r0, #0
189 mcr p15, 0, r0, c7, c10, 4 189 mcr p15, 0, r0, c7, c10, 4
190#endif 190#endif
191 mov pc, lr 191 ret lr
192 192
193 193
194/* 194/*
@@ -239,7 +239,7 @@ v6_dma_inv_range:
239 blo 1b 239 blo 1b
240 mov r0, #0 240 mov r0, #0
241 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 241 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
242 mov pc, lr 242 ret lr
243 243
244/* 244/*
245 * v6_dma_clean_range(start,end) 245 * v6_dma_clean_range(start,end)
@@ -262,7 +262,7 @@ v6_dma_clean_range:
262 blo 1b 262 blo 1b
263 mov r0, #0 263 mov r0, #0
264 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 264 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
265 mov pc, lr 265 ret lr
266 266
267/* 267/*
268 * v6_dma_flush_range(start,end) 268 * v6_dma_flush_range(start,end)
@@ -290,7 +290,7 @@ ENTRY(v6_dma_flush_range)
290 blo 1b 290 blo 1b
291 mov r0, #0 291 mov r0, #0
292 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 292 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
293 mov pc, lr 293 ret lr
294 294
295/* 295/*
296 * dma_map_area(start, size, dir) 296 * dma_map_area(start, size, dir)
@@ -323,7 +323,7 @@ ENTRY(v6_dma_unmap_area)
323 teq r2, #DMA_TO_DEVICE 323 teq r2, #DMA_TO_DEVICE
324 bne v6_dma_inv_range 324 bne v6_dma_inv_range
325#endif 325#endif
326 mov pc, lr 326 ret lr
327ENDPROC(v6_dma_unmap_area) 327ENDPROC(v6_dma_unmap_area)
328 328
329 .globl v6_flush_kern_cache_louis 329 .globl v6_flush_kern_cache_louis
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 615c99e38ba1..b966656d2c2d 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -61,7 +61,7 @@ ENTRY(v7_invalidate_l1)
61 bgt 1b 61 bgt 1b
62 dsb st 62 dsb st
63 isb 63 isb
64 mov pc, lr 64 ret lr
65ENDPROC(v7_invalidate_l1) 65ENDPROC(v7_invalidate_l1)
66 66
67/* 67/*
@@ -76,7 +76,7 @@ ENTRY(v7_flush_icache_all)
76 mov r0, #0 76 mov r0, #0
77 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable 77 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
78 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 78 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
79 mov pc, lr 79 ret lr
80ENDPROC(v7_flush_icache_all) 80ENDPROC(v7_flush_icache_all)
81 81
82 /* 82 /*
@@ -94,7 +94,7 @@ ENTRY(v7_flush_dcache_louis)
94 ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr 94 ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr
95#ifdef CONFIG_ARM_ERRATA_643719 95#ifdef CONFIG_ARM_ERRATA_643719
96 ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register 96 ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register
97 ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do 97 ALT_UP(reteq lr) @ LoUU is zero, so nothing to do
98 ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? 98 ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p?
99 biceq r2, r2, #0x0000000f @ clear minor revision number 99 biceq r2, r2, #0x0000000f @ clear minor revision number
100 teqeq r2, r1 @ test for errata affected core and if so... 100 teqeq r2, r1 @ test for errata affected core and if so...
@@ -102,7 +102,7 @@ ENTRY(v7_flush_dcache_louis)
102#endif 102#endif
103 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 103 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2
104 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 104 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
105 moveq pc, lr @ return if level == 0 105 reteq lr @ return if level == 0
106 mov r10, #0 @ r10 (starting level) = 0 106 mov r10, #0 @ r10 (starting level) = 0
107 b flush_levels @ start flushing cache levels 107 b flush_levels @ start flushing cache levels
108ENDPROC(v7_flush_dcache_louis) 108ENDPROC(v7_flush_dcache_louis)
@@ -168,7 +168,7 @@ finished:
168 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 168 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
169 dsb st 169 dsb st
170 isb 170 isb
171 mov pc, lr 171 ret lr
172ENDPROC(v7_flush_dcache_all) 172ENDPROC(v7_flush_dcache_all)
173 173
174/* 174/*
@@ -191,7 +191,7 @@ ENTRY(v7_flush_kern_cache_all)
191 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 191 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
192 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) 192 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
193 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) 193 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
194 mov pc, lr 194 ret lr
195ENDPROC(v7_flush_kern_cache_all) 195ENDPROC(v7_flush_kern_cache_all)
196 196
197 /* 197 /*
@@ -209,7 +209,7 @@ ENTRY(v7_flush_kern_cache_louis)
209 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 209 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
210 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) 210 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
211 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) 211 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
212 mov pc, lr 212 ret lr
213ENDPROC(v7_flush_kern_cache_louis) 213ENDPROC(v7_flush_kern_cache_louis)
214 214
215/* 215/*
@@ -235,7 +235,7 @@ ENTRY(v7_flush_user_cache_all)
235 * - we have a VIPT cache. 235 * - we have a VIPT cache.
236 */ 236 */
237ENTRY(v7_flush_user_cache_range) 237ENTRY(v7_flush_user_cache_range)
238 mov pc, lr 238 ret lr
239ENDPROC(v7_flush_user_cache_all) 239ENDPROC(v7_flush_user_cache_all)
240ENDPROC(v7_flush_user_cache_range) 240ENDPROC(v7_flush_user_cache_range)
241 241
@@ -296,7 +296,7 @@ ENTRY(v7_coherent_user_range)
296 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB 296 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
297 dsb ishst 297 dsb ishst
298 isb 298 isb
299 mov pc, lr 299 ret lr
300 300
301/* 301/*
302 * Fault handling for the cache operation above. If the virtual address in r0 302 * Fault handling for the cache operation above. If the virtual address in r0
@@ -307,7 +307,7 @@ ENTRY(v7_coherent_user_range)
307 dsb 307 dsb
308#endif 308#endif
309 mov r0, #-EFAULT 309 mov r0, #-EFAULT
310 mov pc, lr 310 ret lr
311 UNWIND(.fnend ) 311 UNWIND(.fnend )
312ENDPROC(v7_coherent_kern_range) 312ENDPROC(v7_coherent_kern_range)
313ENDPROC(v7_coherent_user_range) 313ENDPROC(v7_coherent_user_range)
@@ -336,7 +336,7 @@ ENTRY(v7_flush_kern_dcache_area)
336 cmp r0, r1 336 cmp r0, r1
337 blo 1b 337 blo 1b
338 dsb st 338 dsb st
339 mov pc, lr 339 ret lr
340ENDPROC(v7_flush_kern_dcache_area) 340ENDPROC(v7_flush_kern_dcache_area)
341 341
342/* 342/*
@@ -369,7 +369,7 @@ v7_dma_inv_range:
369 cmp r0, r1 369 cmp r0, r1
370 blo 1b 370 blo 1b
371 dsb st 371 dsb st
372 mov pc, lr 372 ret lr
373ENDPROC(v7_dma_inv_range) 373ENDPROC(v7_dma_inv_range)
374 374
375/* 375/*
@@ -391,7 +391,7 @@ v7_dma_clean_range:
391 cmp r0, r1 391 cmp r0, r1
392 blo 1b 392 blo 1b
393 dsb st 393 dsb st
394 mov pc, lr 394 ret lr
395ENDPROC(v7_dma_clean_range) 395ENDPROC(v7_dma_clean_range)
396 396
397/* 397/*
@@ -413,7 +413,7 @@ ENTRY(v7_dma_flush_range)
413 cmp r0, r1 413 cmp r0, r1
414 blo 1b 414 blo 1b
415 dsb st 415 dsb st
416 mov pc, lr 416 ret lr
417ENDPROC(v7_dma_flush_range) 417ENDPROC(v7_dma_flush_range)
418 418
419/* 419/*
@@ -439,7 +439,7 @@ ENTRY(v7_dma_unmap_area)
439 add r1, r1, r0 439 add r1, r1, r0
440 teq r2, #DMA_TO_DEVICE 440 teq r2, #DMA_TO_DEVICE
441 bne v7_dma_inv_range 441 bne v7_dma_inv_range
442 mov pc, lr 442 ret lr
443ENDPROC(v7_dma_unmap_area) 443ENDPROC(v7_dma_unmap_area)
444 444
445 __INITDATA 445 __INITDATA
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index c508f41a43bc..59424937e52b 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -126,8 +126,8 @@ static const struct prot_bits section_bits[] = {
126 .val = PMD_SECT_USER, 126 .val = PMD_SECT_USER,
127 .set = "USR", 127 .set = "USR",
128 }, { 128 }, {
129 .mask = PMD_SECT_RDONLY, 129 .mask = L_PMD_SECT_RDONLY,
130 .val = PMD_SECT_RDONLY, 130 .val = L_PMD_SECT_RDONLY,
131 .set = "ro", 131 .set = "ro",
132 .clear = "RW", 132 .clear = "RW",
133#elif __LINUX_ARM_ARCH__ >= 6 133#elif __LINUX_ARM_ARCH__ >= 6
diff --git a/arch/arm/mm/l2c-l2x0-resume.S b/arch/arm/mm/l2c-l2x0-resume.S
index 99b05f21a59a..fda415e4ca8f 100644
--- a/arch/arm/mm/l2c-l2x0-resume.S
+++ b/arch/arm/mm/l2c-l2x0-resume.S
@@ -6,6 +6,7 @@
6 * This code can only be used to if you are running in the secure world. 6 * This code can only be used to if you are running in the secure world.
7 */ 7 */
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/assembler.h>
9#include <asm/hardware/cache-l2x0.h> 10#include <asm/hardware/cache-l2x0.h>
10 11
11 .text 12 .text
@@ -27,7 +28,7 @@ ENTRY(l2c310_early_resume)
27 28
28 @ Check that the address has been initialised 29 @ Check that the address has been initialised
29 teq r1, #0 30 teq r1, #0
30 moveq pc, lr 31 reteq lr
31 32
32 @ The prefetch and power control registers are revision dependent 33 @ The prefetch and power control registers are revision dependent
33 @ and can be written whether or not the L2 cache is enabled 34 @ and can be written whether or not the L2 cache is enabled
@@ -41,7 +42,7 @@ ENTRY(l2c310_early_resume)
41 @ Don't setup the L2 cache if it is already enabled 42 @ Don't setup the L2 cache if it is already enabled
42 ldr r0, [r1, #L2X0_CTRL] 43 ldr r0, [r1, #L2X0_CTRL]
43 tst r0, #L2X0_CTRL_EN 44 tst r0, #L2X0_CTRL_EN
44 movne pc, lr 45 retne lr
45 46
46 str r3, [r1, #L310_TAG_LATENCY_CTRL] 47 str r3, [r1, #L310_TAG_LATENCY_CTRL]
47 str r4, [r1, #L310_DATA_LATENCY_CTRL] 48 str r4, [r1, #L310_DATA_LATENCY_CTRL]
@@ -51,7 +52,7 @@ ENTRY(l2c310_early_resume)
51 str r2, [r1, #L2X0_AUX_CTRL] 52 str r2, [r1, #L2X0_AUX_CTRL]
52 mov r9, #L2X0_CTRL_EN 53 mov r9, #L2X0_CTRL_EN
53 str r9, [r1, #L2X0_CTRL] 54 str r9, [r1, #L2X0_CTRL]
54 mov pc, lr 55 ret lr
55ENDPROC(l2c310_early_resume) 56ENDPROC(l2c310_early_resume)
56 57
57 .align 58 .align
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6e3ba8d112a2..8348ed6b2efe 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1434,23 +1434,64 @@ void __init early_paging_init(const struct machine_desc *mdesc,
1434 dsb(ishst); 1434 dsb(ishst);
1435 isb(); 1435 isb();
1436 1436
1437 /* remap level 1 table */ 1437 /*
1438 * FIXME: This code is not architecturally compliant: we modify
1439 * the mappings in-place, indeed while they are in use by this
1440 * very same code. This may lead to unpredictable behaviour of
1441 * the CPU.
1442 *
1443 * Even modifying the mappings in a separate page table does
1444 * not resolve this.
1445 *
1446 * The architecture strongly recommends that when a mapping is
1447 * changed, that it is changed by first going via an invalid
1448 * mapping and back to the new mapping. This is to ensure that
1449 * no TLB conflicts (caused by the TLB having more than one TLB
1450 * entry match a translation) can occur. However, doing that
1451 * here will result in unmapping the code we are running.
1452 */
1453 pr_warn("WARNING: unsafe modification of in-place page tables - tainting kernel\n");
1454 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1455
1456 /*
1457 * Remap level 1 table. This changes the physical addresses
1458 * used to refer to the level 2 page tables to the high
1459 * physical address alias, leaving everything else the same.
1460 */
1438 for (i = 0; i < PTRS_PER_PGD; pud0++, i++) { 1461 for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
1439 set_pud(pud0, 1462 set_pud(pud0,
1440 __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); 1463 __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
1441 pmd0 += PTRS_PER_PMD; 1464 pmd0 += PTRS_PER_PMD;
1442 } 1465 }
1443 1466
1444 /* remap pmds for kernel mapping */ 1467 /*
1468 * Remap the level 2 table, pointing the mappings at the high
1469 * physical address alias of these pages.
1470 */
1445 phys = __pa(map_start); 1471 phys = __pa(map_start);
1446 do { 1472 do {
1447 *pmdk++ = __pmd(phys | pmdprot); 1473 *pmdk++ = __pmd(phys | pmdprot);
1448 phys += PMD_SIZE; 1474 phys += PMD_SIZE;
1449 } while (phys < map_end); 1475 } while (phys < map_end);
1450 1476
1477 /*
1478 * Ensure that the above updates are flushed out of the cache.
1479 * This is not strictly correct; on a system where the caches
1480 * are coherent with each other, but the MMU page table walks
1481 * may not be coherent, flush_cache_all() may be a no-op, and
1482 * this will fail.
1483 */
1451 flush_cache_all(); 1484 flush_cache_all();
1485
1486 /*
1487 * Re-write the TTBR values to point them at the high physical
1488 * alias of the page tables. We expect __va() will work on
1489 * cpu_get_pgd(), which returns the value of TTBR0.
1490 */
1452 cpu_switch_mm(pgd0, &init_mm); 1491 cpu_switch_mm(pgd0, &init_mm);
1453 cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET); 1492 cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
1493
1494 /* Finally flush any stale TLB values. */
1454 local_flush_bp_all(); 1495 local_flush_bp_all();
1455 local_flush_tlb_all(); 1496 local_flush_tlb_all();
1456} 1497}
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index d1a2d05971e0..86ee5d47ce3c 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -73,7 +73,7 @@
73 * cpu_arm1020_proc_init() 73 * cpu_arm1020_proc_init()
74 */ 74 */
75ENTRY(cpu_arm1020_proc_init) 75ENTRY(cpu_arm1020_proc_init)
76 mov pc, lr 76 ret lr
77 77
78/* 78/*
79 * cpu_arm1020_proc_fin() 79 * cpu_arm1020_proc_fin()
@@ -83,7 +83,7 @@ ENTRY(cpu_arm1020_proc_fin)
83 bic r0, r0, #0x1000 @ ...i............ 83 bic r0, r0, #0x1000 @ ...i............
84 bic r0, r0, #0x000e @ ............wca. 84 bic r0, r0, #0x000e @ ............wca.
85 mcr p15, 0, r0, c1, c0, 0 @ disable caches 85 mcr p15, 0, r0, c1, c0, 0 @ disable caches
86 mov pc, lr 86 ret lr
87 87
88/* 88/*
89 * cpu_arm1020_reset(loc) 89 * cpu_arm1020_reset(loc)
@@ -107,7 +107,7 @@ ENTRY(cpu_arm1020_reset)
107 bic ip, ip, #0x000f @ ............wcam 107 bic ip, ip, #0x000f @ ............wcam
108 bic ip, ip, #0x1100 @ ...i...s........ 108 bic ip, ip, #0x1100 @ ...i...s........
109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
110 mov pc, r0 110 ret r0
111ENDPROC(cpu_arm1020_reset) 111ENDPROC(cpu_arm1020_reset)
112 .popsection 112 .popsection
113 113
@@ -117,7 +117,7 @@ ENDPROC(cpu_arm1020_reset)
117 .align 5 117 .align 5
118ENTRY(cpu_arm1020_do_idle) 118ENTRY(cpu_arm1020_do_idle)
119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
120 mov pc, lr 120 ret lr
121 121
122/* ================================= CACHE ================================ */ 122/* ================================= CACHE ================================ */
123 123
@@ -133,7 +133,7 @@ ENTRY(arm1020_flush_icache_all)
133 mov r0, #0 133 mov r0, #0
134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
135#endif 135#endif
136 mov pc, lr 136 ret lr
137ENDPROC(arm1020_flush_icache_all) 137ENDPROC(arm1020_flush_icache_all)
138 138
139/* 139/*
@@ -169,7 +169,7 @@ __flush_whole_cache:
169 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 169 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
170#endif 170#endif
171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
172 mov pc, lr 172 ret lr
173 173
174/* 174/*
175 * flush_user_cache_range(start, end, flags) 175 * flush_user_cache_range(start, end, flags)
@@ -200,7 +200,7 @@ ENTRY(arm1020_flush_user_cache_range)
200 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 200 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
201#endif 201#endif
202 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 202 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
203 mov pc, lr 203 ret lr
204 204
205/* 205/*
206 * coherent_kern_range(start, end) 206 * coherent_kern_range(start, end)
@@ -242,7 +242,7 @@ ENTRY(arm1020_coherent_user_range)
242 blo 1b 242 blo 1b
243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB
244 mov r0, #0 244 mov r0, #0
245 mov pc, lr 245 ret lr
246 246
247/* 247/*
248 * flush_kern_dcache_area(void *addr, size_t size) 248 * flush_kern_dcache_area(void *addr, size_t size)
@@ -264,7 +264,7 @@ ENTRY(arm1020_flush_kern_dcache_area)
264 blo 1b 264 blo 1b
265#endif 265#endif
266 mcr p15, 0, ip, c7, c10, 4 @ drain WB 266 mcr p15, 0, ip, c7, c10, 4 @ drain WB
267 mov pc, lr 267 ret lr
268 268
269/* 269/*
270 * dma_inv_range(start, end) 270 * dma_inv_range(start, end)
@@ -297,7 +297,7 @@ arm1020_dma_inv_range:
297 blo 1b 297 blo 1b
298#endif 298#endif
299 mcr p15, 0, ip, c7, c10, 4 @ drain WB 299 mcr p15, 0, ip, c7, c10, 4 @ drain WB
300 mov pc, lr 300 ret lr
301 301
302/* 302/*
303 * dma_clean_range(start, end) 303 * dma_clean_range(start, end)
@@ -320,7 +320,7 @@ arm1020_dma_clean_range:
320 blo 1b 320 blo 1b
321#endif 321#endif
322 mcr p15, 0, ip, c7, c10, 4 @ drain WB 322 mcr p15, 0, ip, c7, c10, 4 @ drain WB
323 mov pc, lr 323 ret lr
324 324
325/* 325/*
326 * dma_flush_range(start, end) 326 * dma_flush_range(start, end)
@@ -342,7 +342,7 @@ ENTRY(arm1020_dma_flush_range)
342 blo 1b 342 blo 1b
343#endif 343#endif
344 mcr p15, 0, ip, c7, c10, 4 @ drain WB 344 mcr p15, 0, ip, c7, c10, 4 @ drain WB
345 mov pc, lr 345 ret lr
346 346
347/* 347/*
348 * dma_map_area(start, size, dir) 348 * dma_map_area(start, size, dir)
@@ -365,7 +365,7 @@ ENDPROC(arm1020_dma_map_area)
365 * - dir - DMA direction 365 * - dir - DMA direction
366 */ 366 */
367ENTRY(arm1020_dma_unmap_area) 367ENTRY(arm1020_dma_unmap_area)
368 mov pc, lr 368 ret lr
369ENDPROC(arm1020_dma_unmap_area) 369ENDPROC(arm1020_dma_unmap_area)
370 370
371 .globl arm1020_flush_kern_cache_louis 371 .globl arm1020_flush_kern_cache_louis
@@ -384,7 +384,7 @@ ENTRY(cpu_arm1020_dcache_clean_area)
384 subs r1, r1, #CACHE_DLINESIZE 384 subs r1, r1, #CACHE_DLINESIZE
385 bhi 1b 385 bhi 1b
386#endif 386#endif
387 mov pc, lr 387 ret lr
388 388
389/* =============================== PageTable ============================== */ 389/* =============================== PageTable ============================== */
390 390
@@ -423,7 +423,7 @@ ENTRY(cpu_arm1020_switch_mm)
423 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 423 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
424 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 424 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
425#endif /* CONFIG_MMU */ 425#endif /* CONFIG_MMU */
426 mov pc, lr 426 ret lr
427 427
428/* 428/*
429 * cpu_arm1020_set_pte(ptep, pte) 429 * cpu_arm1020_set_pte(ptep, pte)
@@ -441,7 +441,7 @@ ENTRY(cpu_arm1020_set_pte_ext)
441#endif 441#endif
442 mcr p15, 0, r0, c7, c10, 4 @ drain WB 442 mcr p15, 0, r0, c7, c10, 4 @ drain WB
443#endif /* CONFIG_MMU */ 443#endif /* CONFIG_MMU */
444 mov pc, lr 444 ret lr
445 445
446 .type __arm1020_setup, #function 446 .type __arm1020_setup, #function
447__arm1020_setup: 447__arm1020_setup:
@@ -460,7 +460,7 @@ __arm1020_setup:
460#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 460#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
461 orr r0, r0, #0x4000 @ .R.. .... .... .... 461 orr r0, r0, #0x4000 @ .R.. .... .... ....
462#endif 462#endif
463 mov pc, lr 463 ret lr
464 .size __arm1020_setup, . - __arm1020_setup 464 .size __arm1020_setup, . - __arm1020_setup
465 465
466 /* 466 /*
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 9d89405c3d03..a6331d78601f 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -73,7 +73,7 @@
73 * cpu_arm1020e_proc_init() 73 * cpu_arm1020e_proc_init()
74 */ 74 */
75ENTRY(cpu_arm1020e_proc_init) 75ENTRY(cpu_arm1020e_proc_init)
76 mov pc, lr 76 ret lr
77 77
78/* 78/*
79 * cpu_arm1020e_proc_fin() 79 * cpu_arm1020e_proc_fin()
@@ -83,7 +83,7 @@ ENTRY(cpu_arm1020e_proc_fin)
83 bic r0, r0, #0x1000 @ ...i............ 83 bic r0, r0, #0x1000 @ ...i............
84 bic r0, r0, #0x000e @ ............wca. 84 bic r0, r0, #0x000e @ ............wca.
85 mcr p15, 0, r0, c1, c0, 0 @ disable caches 85 mcr p15, 0, r0, c1, c0, 0 @ disable caches
86 mov pc, lr 86 ret lr
87 87
88/* 88/*
89 * cpu_arm1020e_reset(loc) 89 * cpu_arm1020e_reset(loc)
@@ -107,7 +107,7 @@ ENTRY(cpu_arm1020e_reset)
107 bic ip, ip, #0x000f @ ............wcam 107 bic ip, ip, #0x000f @ ............wcam
108 bic ip, ip, #0x1100 @ ...i...s........ 108 bic ip, ip, #0x1100 @ ...i...s........
109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
110 mov pc, r0 110 ret r0
111ENDPROC(cpu_arm1020e_reset) 111ENDPROC(cpu_arm1020e_reset)
112 .popsection 112 .popsection
113 113
@@ -117,7 +117,7 @@ ENDPROC(cpu_arm1020e_reset)
117 .align 5 117 .align 5
118ENTRY(cpu_arm1020e_do_idle) 118ENTRY(cpu_arm1020e_do_idle)
119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
120 mov pc, lr 120 ret lr
121 121
122/* ================================= CACHE ================================ */ 122/* ================================= CACHE ================================ */
123 123
@@ -133,7 +133,7 @@ ENTRY(arm1020e_flush_icache_all)
133 mov r0, #0 133 mov r0, #0
134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
135#endif 135#endif
136 mov pc, lr 136 ret lr
137ENDPROC(arm1020e_flush_icache_all) 137ENDPROC(arm1020e_flush_icache_all)
138 138
139/* 139/*
@@ -168,7 +168,7 @@ __flush_whole_cache:
168 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 168 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
169#endif 169#endif
170 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 170 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
171 mov pc, lr 171 ret lr
172 172
173/* 173/*
174 * flush_user_cache_range(start, end, flags) 174 * flush_user_cache_range(start, end, flags)
@@ -197,7 +197,7 @@ ENTRY(arm1020e_flush_user_cache_range)
197 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 197 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
198#endif 198#endif
199 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 199 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
200 mov pc, lr 200 ret lr
201 201
202/* 202/*
203 * coherent_kern_range(start, end) 203 * coherent_kern_range(start, end)
@@ -236,7 +236,7 @@ ENTRY(arm1020e_coherent_user_range)
236 blo 1b 236 blo 1b
237 mcr p15, 0, ip, c7, c10, 4 @ drain WB 237 mcr p15, 0, ip, c7, c10, 4 @ drain WB
238 mov r0, #0 238 mov r0, #0
239 mov pc, lr 239 ret lr
240 240
241/* 241/*
242 * flush_kern_dcache_area(void *addr, size_t size) 242 * flush_kern_dcache_area(void *addr, size_t size)
@@ -257,7 +257,7 @@ ENTRY(arm1020e_flush_kern_dcache_area)
257 blo 1b 257 blo 1b
258#endif 258#endif
259 mcr p15, 0, ip, c7, c10, 4 @ drain WB 259 mcr p15, 0, ip, c7, c10, 4 @ drain WB
260 mov pc, lr 260 ret lr
261 261
262/* 262/*
263 * dma_inv_range(start, end) 263 * dma_inv_range(start, end)
@@ -286,7 +286,7 @@ arm1020e_dma_inv_range:
286 blo 1b 286 blo 1b
287#endif 287#endif
288 mcr p15, 0, ip, c7, c10, 4 @ drain WB 288 mcr p15, 0, ip, c7, c10, 4 @ drain WB
289 mov pc, lr 289 ret lr
290 290
291/* 291/*
292 * dma_clean_range(start, end) 292 * dma_clean_range(start, end)
@@ -308,7 +308,7 @@ arm1020e_dma_clean_range:
308 blo 1b 308 blo 1b
309#endif 309#endif
310 mcr p15, 0, ip, c7, c10, 4 @ drain WB 310 mcr p15, 0, ip, c7, c10, 4 @ drain WB
311 mov pc, lr 311 ret lr
312 312
313/* 313/*
314 * dma_flush_range(start, end) 314 * dma_flush_range(start, end)
@@ -328,7 +328,7 @@ ENTRY(arm1020e_dma_flush_range)
328 blo 1b 328 blo 1b
329#endif 329#endif
330 mcr p15, 0, ip, c7, c10, 4 @ drain WB 330 mcr p15, 0, ip, c7, c10, 4 @ drain WB
331 mov pc, lr 331 ret lr
332 332
333/* 333/*
334 * dma_map_area(start, size, dir) 334 * dma_map_area(start, size, dir)
@@ -351,7 +351,7 @@ ENDPROC(arm1020e_dma_map_area)
351 * - dir - DMA direction 351 * - dir - DMA direction
352 */ 352 */
353ENTRY(arm1020e_dma_unmap_area) 353ENTRY(arm1020e_dma_unmap_area)
354 mov pc, lr 354 ret lr
355ENDPROC(arm1020e_dma_unmap_area) 355ENDPROC(arm1020e_dma_unmap_area)
356 356
357 .globl arm1020e_flush_kern_cache_louis 357 .globl arm1020e_flush_kern_cache_louis
@@ -369,7 +369,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area)
369 subs r1, r1, #CACHE_DLINESIZE 369 subs r1, r1, #CACHE_DLINESIZE
370 bhi 1b 370 bhi 1b
371#endif 371#endif
372 mov pc, lr 372 ret lr
373 373
374/* =============================== PageTable ============================== */ 374/* =============================== PageTable ============================== */
375 375
@@ -407,7 +407,7 @@ ENTRY(cpu_arm1020e_switch_mm)
407 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 407 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
408 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 408 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
409#endif 409#endif
410 mov pc, lr 410 ret lr
411 411
412/* 412/*
413 * cpu_arm1020e_set_pte(ptep, pte) 413 * cpu_arm1020e_set_pte(ptep, pte)
@@ -423,7 +423,7 @@ ENTRY(cpu_arm1020e_set_pte_ext)
423 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 423 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
424#endif 424#endif
425#endif /* CONFIG_MMU */ 425#endif /* CONFIG_MMU */
426 mov pc, lr 426 ret lr
427 427
428 .type __arm1020e_setup, #function 428 .type __arm1020e_setup, #function
429__arm1020e_setup: 429__arm1020e_setup:
@@ -441,7 +441,7 @@ __arm1020e_setup:
441#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 441#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
442 orr r0, r0, #0x4000 @ .R.. .... .... .... 442 orr r0, r0, #0x4000 @ .R.. .... .... ....
443#endif 443#endif
444 mov pc, lr 444 ret lr
445 .size __arm1020e_setup, . - __arm1020e_setup 445 .size __arm1020e_setup, . - __arm1020e_setup
446 446
447 /* 447 /*
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 6f01a0ae3b30..a126b7a59928 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -62,7 +62,7 @@
62 * cpu_arm1022_proc_init() 62 * cpu_arm1022_proc_init()
63 */ 63 */
64ENTRY(cpu_arm1022_proc_init) 64ENTRY(cpu_arm1022_proc_init)
65 mov pc, lr 65 ret lr
66 66
67/* 67/*
68 * cpu_arm1022_proc_fin() 68 * cpu_arm1022_proc_fin()
@@ -72,7 +72,7 @@ ENTRY(cpu_arm1022_proc_fin)
72 bic r0, r0, #0x1000 @ ...i............ 72 bic r0, r0, #0x1000 @ ...i............
73 bic r0, r0, #0x000e @ ............wca. 73 bic r0, r0, #0x000e @ ............wca.
74 mcr p15, 0, r0, c1, c0, 0 @ disable caches 74 mcr p15, 0, r0, c1, c0, 0 @ disable caches
75 mov pc, lr 75 ret lr
76 76
77/* 77/*
78 * cpu_arm1022_reset(loc) 78 * cpu_arm1022_reset(loc)
@@ -96,7 +96,7 @@ ENTRY(cpu_arm1022_reset)
96 bic ip, ip, #0x000f @ ............wcam 96 bic ip, ip, #0x000f @ ............wcam
97 bic ip, ip, #0x1100 @ ...i...s........ 97 bic ip, ip, #0x1100 @ ...i...s........
98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
99 mov pc, r0 99 ret r0
100ENDPROC(cpu_arm1022_reset) 100ENDPROC(cpu_arm1022_reset)
101 .popsection 101 .popsection
102 102
@@ -106,7 +106,7 @@ ENDPROC(cpu_arm1022_reset)
106 .align 5 106 .align 5
107ENTRY(cpu_arm1022_do_idle) 107ENTRY(cpu_arm1022_do_idle)
108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
109 mov pc, lr 109 ret lr
110 110
111/* ================================= CACHE ================================ */ 111/* ================================= CACHE ================================ */
112 112
@@ -122,7 +122,7 @@ ENTRY(arm1022_flush_icache_all)
122 mov r0, #0 122 mov r0, #0
123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
124#endif 124#endif
125 mov pc, lr 125 ret lr
126ENDPROC(arm1022_flush_icache_all) 126ENDPROC(arm1022_flush_icache_all)
127 127
128/* 128/*
@@ -156,7 +156,7 @@ __flush_whole_cache:
156 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 156 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
157#endif 157#endif
158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
159 mov pc, lr 159 ret lr
160 160
161/* 161/*
162 * flush_user_cache_range(start, end, flags) 162 * flush_user_cache_range(start, end, flags)
@@ -185,7 +185,7 @@ ENTRY(arm1022_flush_user_cache_range)
185 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 185 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
186#endif 186#endif
187 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 187 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
188 mov pc, lr 188 ret lr
189 189
190/* 190/*
191 * coherent_kern_range(start, end) 191 * coherent_kern_range(start, end)
@@ -225,7 +225,7 @@ ENTRY(arm1022_coherent_user_range)
225 blo 1b 225 blo 1b
226 mcr p15, 0, ip, c7, c10, 4 @ drain WB 226 mcr p15, 0, ip, c7, c10, 4 @ drain WB
227 mov r0, #0 227 mov r0, #0
228 mov pc, lr 228 ret lr
229 229
230/* 230/*
231 * flush_kern_dcache_area(void *addr, size_t size) 231 * flush_kern_dcache_area(void *addr, size_t size)
@@ -246,7 +246,7 @@ ENTRY(arm1022_flush_kern_dcache_area)
246 blo 1b 246 blo 1b
247#endif 247#endif
248 mcr p15, 0, ip, c7, c10, 4 @ drain WB 248 mcr p15, 0, ip, c7, c10, 4 @ drain WB
249 mov pc, lr 249 ret lr
250 250
251/* 251/*
252 * dma_inv_range(start, end) 252 * dma_inv_range(start, end)
@@ -275,7 +275,7 @@ arm1022_dma_inv_range:
275 blo 1b 275 blo 1b
276#endif 276#endif
277 mcr p15, 0, ip, c7, c10, 4 @ drain WB 277 mcr p15, 0, ip, c7, c10, 4 @ drain WB
278 mov pc, lr 278 ret lr
279 279
280/* 280/*
281 * dma_clean_range(start, end) 281 * dma_clean_range(start, end)
@@ -297,7 +297,7 @@ arm1022_dma_clean_range:
297 blo 1b 297 blo 1b
298#endif 298#endif
299 mcr p15, 0, ip, c7, c10, 4 @ drain WB 299 mcr p15, 0, ip, c7, c10, 4 @ drain WB
300 mov pc, lr 300 ret lr
301 301
302/* 302/*
303 * dma_flush_range(start, end) 303 * dma_flush_range(start, end)
@@ -317,7 +317,7 @@ ENTRY(arm1022_dma_flush_range)
317 blo 1b 317 blo 1b
318#endif 318#endif
319 mcr p15, 0, ip, c7, c10, 4 @ drain WB 319 mcr p15, 0, ip, c7, c10, 4 @ drain WB
320 mov pc, lr 320 ret lr
321 321
322/* 322/*
323 * dma_map_area(start, size, dir) 323 * dma_map_area(start, size, dir)
@@ -340,7 +340,7 @@ ENDPROC(arm1022_dma_map_area)
340 * - dir - DMA direction 340 * - dir - DMA direction
341 */ 341 */
342ENTRY(arm1022_dma_unmap_area) 342ENTRY(arm1022_dma_unmap_area)
343 mov pc, lr 343 ret lr
344ENDPROC(arm1022_dma_unmap_area) 344ENDPROC(arm1022_dma_unmap_area)
345 345
346 .globl arm1022_flush_kern_cache_louis 346 .globl arm1022_flush_kern_cache_louis
@@ -358,7 +358,7 @@ ENTRY(cpu_arm1022_dcache_clean_area)
358 subs r1, r1, #CACHE_DLINESIZE 358 subs r1, r1, #CACHE_DLINESIZE
359 bhi 1b 359 bhi 1b
360#endif 360#endif
361 mov pc, lr 361 ret lr
362 362
363/* =============================== PageTable ============================== */ 363/* =============================== PageTable ============================== */
364 364
@@ -389,7 +389,7 @@ ENTRY(cpu_arm1022_switch_mm)
389 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 389 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
390 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 390 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
391#endif 391#endif
392 mov pc, lr 392 ret lr
393 393
394/* 394/*
395 * cpu_arm1022_set_pte_ext(ptep, pte, ext) 395 * cpu_arm1022_set_pte_ext(ptep, pte, ext)
@@ -405,7 +405,7 @@ ENTRY(cpu_arm1022_set_pte_ext)
405 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 405 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
406#endif 406#endif
407#endif /* CONFIG_MMU */ 407#endif /* CONFIG_MMU */
408 mov pc, lr 408 ret lr
409 409
410 .type __arm1022_setup, #function 410 .type __arm1022_setup, #function
411__arm1022_setup: 411__arm1022_setup:
@@ -423,7 +423,7 @@ __arm1022_setup:
423#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 423#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
424 orr r0, r0, #0x4000 @ .R.............. 424 orr r0, r0, #0x4000 @ .R..............
425#endif 425#endif
426 mov pc, lr 426 ret lr
427 .size __arm1022_setup, . - __arm1022_setup 427 .size __arm1022_setup, . - __arm1022_setup
428 428
429 /* 429 /*
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 4799a24b43e6..fc294067e977 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -62,7 +62,7 @@
62 * cpu_arm1026_proc_init() 62 * cpu_arm1026_proc_init()
63 */ 63 */
64ENTRY(cpu_arm1026_proc_init) 64ENTRY(cpu_arm1026_proc_init)
65 mov pc, lr 65 ret lr
66 66
67/* 67/*
68 * cpu_arm1026_proc_fin() 68 * cpu_arm1026_proc_fin()
@@ -72,7 +72,7 @@ ENTRY(cpu_arm1026_proc_fin)
72 bic r0, r0, #0x1000 @ ...i............ 72 bic r0, r0, #0x1000 @ ...i............
73 bic r0, r0, #0x000e @ ............wca. 73 bic r0, r0, #0x000e @ ............wca.
74 mcr p15, 0, r0, c1, c0, 0 @ disable caches 74 mcr p15, 0, r0, c1, c0, 0 @ disable caches
75 mov pc, lr 75 ret lr
76 76
77/* 77/*
78 * cpu_arm1026_reset(loc) 78 * cpu_arm1026_reset(loc)
@@ -96,7 +96,7 @@ ENTRY(cpu_arm1026_reset)
96 bic ip, ip, #0x000f @ ............wcam 96 bic ip, ip, #0x000f @ ............wcam
97 bic ip, ip, #0x1100 @ ...i...s........ 97 bic ip, ip, #0x1100 @ ...i...s........
98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
99 mov pc, r0 99 ret r0
100ENDPROC(cpu_arm1026_reset) 100ENDPROC(cpu_arm1026_reset)
101 .popsection 101 .popsection
102 102
@@ -106,7 +106,7 @@ ENDPROC(cpu_arm1026_reset)
106 .align 5 106 .align 5
107ENTRY(cpu_arm1026_do_idle) 107ENTRY(cpu_arm1026_do_idle)
108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
109 mov pc, lr 109 ret lr
110 110
111/* ================================= CACHE ================================ */ 111/* ================================= CACHE ================================ */
112 112
@@ -122,7 +122,7 @@ ENTRY(arm1026_flush_icache_all)
122 mov r0, #0 122 mov r0, #0
123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
124#endif 124#endif
125 mov pc, lr 125 ret lr
126ENDPROC(arm1026_flush_icache_all) 126ENDPROC(arm1026_flush_icache_all)
127 127
128/* 128/*
@@ -151,7 +151,7 @@ __flush_whole_cache:
151 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 151 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
152#endif 152#endif
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
154 mov pc, lr 154 ret lr
155 155
156/* 156/*
157 * flush_user_cache_range(start, end, flags) 157 * flush_user_cache_range(start, end, flags)
@@ -180,7 +180,7 @@ ENTRY(arm1026_flush_user_cache_range)
180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
181#endif 181#endif
182 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 182 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
183 mov pc, lr 183 ret lr
184 184
185/* 185/*
186 * coherent_kern_range(start, end) 186 * coherent_kern_range(start, end)
@@ -219,7 +219,7 @@ ENTRY(arm1026_coherent_user_range)
219 blo 1b 219 blo 1b
220 mcr p15, 0, ip, c7, c10, 4 @ drain WB 220 mcr p15, 0, ip, c7, c10, 4 @ drain WB
221 mov r0, #0 221 mov r0, #0
222 mov pc, lr 222 ret lr
223 223
224/* 224/*
225 * flush_kern_dcache_area(void *addr, size_t size) 225 * flush_kern_dcache_area(void *addr, size_t size)
@@ -240,7 +240,7 @@ ENTRY(arm1026_flush_kern_dcache_area)
240 blo 1b 240 blo 1b
241#endif 241#endif
242 mcr p15, 0, ip, c7, c10, 4 @ drain WB 242 mcr p15, 0, ip, c7, c10, 4 @ drain WB
243 mov pc, lr 243 ret lr
244 244
245/* 245/*
246 * dma_inv_range(start, end) 246 * dma_inv_range(start, end)
@@ -269,7 +269,7 @@ arm1026_dma_inv_range:
269 blo 1b 269 blo 1b
270#endif 270#endif
271 mcr p15, 0, ip, c7, c10, 4 @ drain WB 271 mcr p15, 0, ip, c7, c10, 4 @ drain WB
272 mov pc, lr 272 ret lr
273 273
274/* 274/*
275 * dma_clean_range(start, end) 275 * dma_clean_range(start, end)
@@ -291,7 +291,7 @@ arm1026_dma_clean_range:
291 blo 1b 291 blo 1b
292#endif 292#endif
293 mcr p15, 0, ip, c7, c10, 4 @ drain WB 293 mcr p15, 0, ip, c7, c10, 4 @ drain WB
294 mov pc, lr 294 ret lr
295 295
296/* 296/*
297 * dma_flush_range(start, end) 297 * dma_flush_range(start, end)
@@ -311,7 +311,7 @@ ENTRY(arm1026_dma_flush_range)
311 blo 1b 311 blo 1b
312#endif 312#endif
313 mcr p15, 0, ip, c7, c10, 4 @ drain WB 313 mcr p15, 0, ip, c7, c10, 4 @ drain WB
314 mov pc, lr 314 ret lr
315 315
316/* 316/*
317 * dma_map_area(start, size, dir) 317 * dma_map_area(start, size, dir)
@@ -334,7 +334,7 @@ ENDPROC(arm1026_dma_map_area)
334 * - dir - DMA direction 334 * - dir - DMA direction
335 */ 335 */
336ENTRY(arm1026_dma_unmap_area) 336ENTRY(arm1026_dma_unmap_area)
337 mov pc, lr 337 ret lr
338ENDPROC(arm1026_dma_unmap_area) 338ENDPROC(arm1026_dma_unmap_area)
339 339
340 .globl arm1026_flush_kern_cache_louis 340 .globl arm1026_flush_kern_cache_louis
@@ -352,7 +352,7 @@ ENTRY(cpu_arm1026_dcache_clean_area)
352 subs r1, r1, #CACHE_DLINESIZE 352 subs r1, r1, #CACHE_DLINESIZE
353 bhi 1b 353 bhi 1b
354#endif 354#endif
355 mov pc, lr 355 ret lr
356 356
357/* =============================== PageTable ============================== */ 357/* =============================== PageTable ============================== */
358 358
@@ -378,7 +378,7 @@ ENTRY(cpu_arm1026_switch_mm)
378 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 378 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
379 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 379 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
380#endif 380#endif
381 mov pc, lr 381 ret lr
382 382
383/* 383/*
384 * cpu_arm1026_set_pte_ext(ptep, pte, ext) 384 * cpu_arm1026_set_pte_ext(ptep, pte, ext)
@@ -394,7 +394,7 @@ ENTRY(cpu_arm1026_set_pte_ext)
394 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 394 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
395#endif 395#endif
396#endif /* CONFIG_MMU */ 396#endif /* CONFIG_MMU */
397 mov pc, lr 397 ret lr
398 398
399 .type __arm1026_setup, #function 399 .type __arm1026_setup, #function
400__arm1026_setup: 400__arm1026_setup:
@@ -417,7 +417,7 @@ __arm1026_setup:
417#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 417#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
418 orr r0, r0, #0x4000 @ .R.. .... .... .... 418 orr r0, r0, #0x4000 @ .R.. .... .... ....
419#endif 419#endif
420 mov pc, lr 420 ret lr
421 .size __arm1026_setup, . - __arm1026_setup 421 .size __arm1026_setup, . - __arm1026_setup
422 422
423 /* 423 /*
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index d42c37f9f5bc..2baa66b3ac9b 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -51,14 +51,14 @@
51 */ 51 */
52ENTRY(cpu_arm720_dcache_clean_area) 52ENTRY(cpu_arm720_dcache_clean_area)
53ENTRY(cpu_arm720_proc_init) 53ENTRY(cpu_arm720_proc_init)
54 mov pc, lr 54 ret lr
55 55
56ENTRY(cpu_arm720_proc_fin) 56ENTRY(cpu_arm720_proc_fin)
57 mrc p15, 0, r0, c1, c0, 0 57 mrc p15, 0, r0, c1, c0, 0
58 bic r0, r0, #0x1000 @ ...i............ 58 bic r0, r0, #0x1000 @ ...i............
59 bic r0, r0, #0x000e @ ............wca. 59 bic r0, r0, #0x000e @ ............wca.
60 mcr p15, 0, r0, c1, c0, 0 @ disable caches 60 mcr p15, 0, r0, c1, c0, 0 @ disable caches
61 mov pc, lr 61 ret lr
62 62
63/* 63/*
64 * Function: arm720_proc_do_idle(void) 64 * Function: arm720_proc_do_idle(void)
@@ -66,7 +66,7 @@ ENTRY(cpu_arm720_proc_fin)
66 * Purpose : put the processor in proper idle mode 66 * Purpose : put the processor in proper idle mode
67 */ 67 */
68ENTRY(cpu_arm720_do_idle) 68ENTRY(cpu_arm720_do_idle)
69 mov pc, lr 69 ret lr
70 70
71/* 71/*
72 * Function: arm720_switch_mm(unsigned long pgd_phys) 72 * Function: arm720_switch_mm(unsigned long pgd_phys)
@@ -81,7 +81,7 @@ ENTRY(cpu_arm720_switch_mm)
81 mcr p15, 0, r0, c2, c0, 0 @ update page table ptr 81 mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
82 mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) 82 mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4)
83#endif 83#endif
84 mov pc, lr 84 ret lr
85 85
86/* 86/*
87 * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext) 87 * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext)
@@ -94,7 +94,7 @@ ENTRY(cpu_arm720_set_pte_ext)
94#ifdef CONFIG_MMU 94#ifdef CONFIG_MMU
95 armv3_set_pte_ext wc_disable=0 95 armv3_set_pte_ext wc_disable=0
96#endif 96#endif
97 mov pc, lr 97 ret lr
98 98
99/* 99/*
100 * Function: arm720_reset 100 * Function: arm720_reset
@@ -112,7 +112,7 @@ ENTRY(cpu_arm720_reset)
112 bic ip, ip, #0x000f @ ............wcam 112 bic ip, ip, #0x000f @ ............wcam
113 bic ip, ip, #0x2100 @ ..v....s........ 113 bic ip, ip, #0x2100 @ ..v....s........
114 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 114 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
115 mov pc, r0 115 ret r0
116ENDPROC(cpu_arm720_reset) 116ENDPROC(cpu_arm720_reset)
117 .popsection 117 .popsection
118 118
@@ -128,7 +128,7 @@ __arm710_setup:
128 bic r0, r0, r5 128 bic r0, r0, r5
129 ldr r5, arm710_cr1_set 129 ldr r5, arm710_cr1_set
130 orr r0, r0, r5 130 orr r0, r0, r5
131 mov pc, lr @ __ret (head.S) 131 ret lr @ __ret (head.S)
132 .size __arm710_setup, . - __arm710_setup 132 .size __arm710_setup, . - __arm710_setup
133 133
134 /* 134 /*
@@ -156,7 +156,7 @@ __arm720_setup:
156 mrc p15, 0, r0, c1, c0 @ get control register 156 mrc p15, 0, r0, c1, c0 @ get control register
157 bic r0, r0, r5 157 bic r0, r0, r5
158 orr r0, r0, r6 158 orr r0, r0, r6
159 mov pc, lr @ __ret (head.S) 159 ret lr @ __ret (head.S)
160 .size __arm720_setup, . - __arm720_setup 160 .size __arm720_setup, . - __arm720_setup
161 161
162 /* 162 /*
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 9b0ae90cbf17..ac1ea6b3bce4 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -32,7 +32,7 @@ ENTRY(cpu_arm740_proc_init)
32ENTRY(cpu_arm740_do_idle) 32ENTRY(cpu_arm740_do_idle)
33ENTRY(cpu_arm740_dcache_clean_area) 33ENTRY(cpu_arm740_dcache_clean_area)
34ENTRY(cpu_arm740_switch_mm) 34ENTRY(cpu_arm740_switch_mm)
35 mov pc, lr 35 ret lr
36 36
37/* 37/*
38 * cpu_arm740_proc_fin() 38 * cpu_arm740_proc_fin()
@@ -42,7 +42,7 @@ ENTRY(cpu_arm740_proc_fin)
42 bic r0, r0, #0x3f000000 @ bank/f/lock/s 42 bic r0, r0, #0x3f000000 @ bank/f/lock/s
43 bic r0, r0, #0x0000000c @ w-buffer/cache 43 bic r0, r0, #0x0000000c @ w-buffer/cache
44 mcr p15, 0, r0, c1, c0, 0 @ disable caches 44 mcr p15, 0, r0, c1, c0, 0 @ disable caches
45 mov pc, lr 45 ret lr
46 46
47/* 47/*
48 * cpu_arm740_reset(loc) 48 * cpu_arm740_reset(loc)
@@ -56,7 +56,7 @@ ENTRY(cpu_arm740_reset)
56 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register 56 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register
57 bic ip, ip, #0x0000000c @ ............wc.. 57 bic ip, ip, #0x0000000c @ ............wc..
58 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 58 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
59 mov pc, r0 59 ret r0
60ENDPROC(cpu_arm740_reset) 60ENDPROC(cpu_arm740_reset)
61 .popsection 61 .popsection
62 62
@@ -115,7 +115,7 @@ __arm740_setup:
115 @ need some benchmark 115 @ need some benchmark
116 orr r0, r0, #0x0000000d @ MPU/Cache/WB 116 orr r0, r0, #0x0000000d @ MPU/Cache/WB
117 117
118 mov pc, lr 118 ret lr
119 119
120 .size __arm740_setup, . - __arm740_setup 120 .size __arm740_setup, . - __arm740_setup
121 121
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index f6cc3f63ce39..bf6ba4bc30ff 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -32,13 +32,13 @@ ENTRY(cpu_arm7tdmi_proc_init)
32ENTRY(cpu_arm7tdmi_do_idle) 32ENTRY(cpu_arm7tdmi_do_idle)
33ENTRY(cpu_arm7tdmi_dcache_clean_area) 33ENTRY(cpu_arm7tdmi_dcache_clean_area)
34ENTRY(cpu_arm7tdmi_switch_mm) 34ENTRY(cpu_arm7tdmi_switch_mm)
35 mov pc, lr 35 ret lr
36 36
37/* 37/*
38 * cpu_arm7tdmi_proc_fin() 38 * cpu_arm7tdmi_proc_fin()
39 */ 39 */
40ENTRY(cpu_arm7tdmi_proc_fin) 40ENTRY(cpu_arm7tdmi_proc_fin)
41 mov pc, lr 41 ret lr
42 42
43/* 43/*
44 * Function: cpu_arm7tdmi_reset(loc) 44 * Function: cpu_arm7tdmi_reset(loc)
@@ -47,13 +47,13 @@ ENTRY(cpu_arm7tdmi_proc_fin)
47 */ 47 */
48 .pushsection .idmap.text, "ax" 48 .pushsection .idmap.text, "ax"
49ENTRY(cpu_arm7tdmi_reset) 49ENTRY(cpu_arm7tdmi_reset)
50 mov pc, r0 50 ret r0
51ENDPROC(cpu_arm7tdmi_reset) 51ENDPROC(cpu_arm7tdmi_reset)
52 .popsection 52 .popsection
53 53
54 .type __arm7tdmi_setup, #function 54 .type __arm7tdmi_setup, #function
55__arm7tdmi_setup: 55__arm7tdmi_setup:
56 mov pc, lr 56 ret lr
57 .size __arm7tdmi_setup, . - __arm7tdmi_setup 57 .size __arm7tdmi_setup, . - __arm7tdmi_setup
58 58
59 __INITDATA 59 __INITDATA
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 549557df6d57..22bf8dde4f84 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -63,7 +63,7 @@
63 * cpu_arm920_proc_init() 63 * cpu_arm920_proc_init()
64 */ 64 */
65ENTRY(cpu_arm920_proc_init) 65ENTRY(cpu_arm920_proc_init)
66 mov pc, lr 66 ret lr
67 67
68/* 68/*
69 * cpu_arm920_proc_fin() 69 * cpu_arm920_proc_fin()
@@ -73,7 +73,7 @@ ENTRY(cpu_arm920_proc_fin)
73 bic r0, r0, #0x1000 @ ...i............ 73 bic r0, r0, #0x1000 @ ...i............
74 bic r0, r0, #0x000e @ ............wca. 74 bic r0, r0, #0x000e @ ............wca.
75 mcr p15, 0, r0, c1, c0, 0 @ disable caches 75 mcr p15, 0, r0, c1, c0, 0 @ disable caches
76 mov pc, lr 76 ret lr
77 77
78/* 78/*
79 * cpu_arm920_reset(loc) 79 * cpu_arm920_reset(loc)
@@ -97,7 +97,7 @@ ENTRY(cpu_arm920_reset)
97 bic ip, ip, #0x000f @ ............wcam 97 bic ip, ip, #0x000f @ ............wcam
98 bic ip, ip, #0x1100 @ ...i...s........ 98 bic ip, ip, #0x1100 @ ...i...s........
99 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 99 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
100 mov pc, r0 100 ret r0
101ENDPROC(cpu_arm920_reset) 101ENDPROC(cpu_arm920_reset)
102 .popsection 102 .popsection
103 103
@@ -107,7 +107,7 @@ ENDPROC(cpu_arm920_reset)
107 .align 5 107 .align 5
108ENTRY(cpu_arm920_do_idle) 108ENTRY(cpu_arm920_do_idle)
109 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 109 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
110 mov pc, lr 110 ret lr
111 111
112 112
113#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 113#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -120,7 +120,7 @@ ENTRY(cpu_arm920_do_idle)
120ENTRY(arm920_flush_icache_all) 120ENTRY(arm920_flush_icache_all)
121 mov r0, #0 121 mov r0, #0
122 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 122 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
123 mov pc, lr 123 ret lr
124ENDPROC(arm920_flush_icache_all) 124ENDPROC(arm920_flush_icache_all)
125 125
126/* 126/*
@@ -151,7 +151,7 @@ __flush_whole_cache:
151 tst r2, #VM_EXEC 151 tst r2, #VM_EXEC
152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
154 mov pc, lr 154 ret lr
155 155
156/* 156/*
157 * flush_user_cache_range(start, end, flags) 157 * flush_user_cache_range(start, end, flags)
@@ -177,7 +177,7 @@ ENTRY(arm920_flush_user_cache_range)
177 blo 1b 177 blo 1b
178 tst r2, #VM_EXEC 178 tst r2, #VM_EXEC
179 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 179 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
180 mov pc, lr 180 ret lr
181 181
182/* 182/*
183 * coherent_kern_range(start, end) 183 * coherent_kern_range(start, end)
@@ -211,7 +211,7 @@ ENTRY(arm920_coherent_user_range)
211 blo 1b 211 blo 1b
212 mcr p15, 0, r0, c7, c10, 4 @ drain WB 212 mcr p15, 0, r0, c7, c10, 4 @ drain WB
213 mov r0, #0 213 mov r0, #0
214 mov pc, lr 214 ret lr
215 215
216/* 216/*
217 * flush_kern_dcache_area(void *addr, size_t size) 217 * flush_kern_dcache_area(void *addr, size_t size)
@@ -231,7 +231,7 @@ ENTRY(arm920_flush_kern_dcache_area)
231 mov r0, #0 231 mov r0, #0
232 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 232 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
233 mcr p15, 0, r0, c7, c10, 4 @ drain WB 233 mcr p15, 0, r0, c7, c10, 4 @ drain WB
234 mov pc, lr 234 ret lr
235 235
236/* 236/*
237 * dma_inv_range(start, end) 237 * dma_inv_range(start, end)
@@ -257,7 +257,7 @@ arm920_dma_inv_range:
257 cmp r0, r1 257 cmp r0, r1
258 blo 1b 258 blo 1b
259 mcr p15, 0, r0, c7, c10, 4 @ drain WB 259 mcr p15, 0, r0, c7, c10, 4 @ drain WB
260 mov pc, lr 260 ret lr
261 261
262/* 262/*
263 * dma_clean_range(start, end) 263 * dma_clean_range(start, end)
@@ -276,7 +276,7 @@ arm920_dma_clean_range:
276 cmp r0, r1 276 cmp r0, r1
277 blo 1b 277 blo 1b
278 mcr p15, 0, r0, c7, c10, 4 @ drain WB 278 mcr p15, 0, r0, c7, c10, 4 @ drain WB
279 mov pc, lr 279 ret lr
280 280
281/* 281/*
282 * dma_flush_range(start, end) 282 * dma_flush_range(start, end)
@@ -293,7 +293,7 @@ ENTRY(arm920_dma_flush_range)
293 cmp r0, r1 293 cmp r0, r1
294 blo 1b 294 blo 1b
295 mcr p15, 0, r0, c7, c10, 4 @ drain WB 295 mcr p15, 0, r0, c7, c10, 4 @ drain WB
296 mov pc, lr 296 ret lr
297 297
298/* 298/*
299 * dma_map_area(start, size, dir) 299 * dma_map_area(start, size, dir)
@@ -316,7 +316,7 @@ ENDPROC(arm920_dma_map_area)
316 * - dir - DMA direction 316 * - dir - DMA direction
317 */ 317 */
318ENTRY(arm920_dma_unmap_area) 318ENTRY(arm920_dma_unmap_area)
319 mov pc, lr 319 ret lr
320ENDPROC(arm920_dma_unmap_area) 320ENDPROC(arm920_dma_unmap_area)
321 321
322 .globl arm920_flush_kern_cache_louis 322 .globl arm920_flush_kern_cache_louis
@@ -332,7 +332,7 @@ ENTRY(cpu_arm920_dcache_clean_area)
332 add r0, r0, #CACHE_DLINESIZE 332 add r0, r0, #CACHE_DLINESIZE
333 subs r1, r1, #CACHE_DLINESIZE 333 subs r1, r1, #CACHE_DLINESIZE
334 bhi 1b 334 bhi 1b
335 mov pc, lr 335 ret lr
336 336
337/* =============================== PageTable ============================== */ 337/* =============================== PageTable ============================== */
338 338
@@ -367,7 +367,7 @@ ENTRY(cpu_arm920_switch_mm)
367 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 367 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
368 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 368 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
369#endif 369#endif
370 mov pc, lr 370 ret lr
371 371
372/* 372/*
373 * cpu_arm920_set_pte(ptep, pte, ext) 373 * cpu_arm920_set_pte(ptep, pte, ext)
@@ -382,7 +382,7 @@ ENTRY(cpu_arm920_set_pte_ext)
382 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 382 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
383 mcr p15, 0, r0, c7, c10, 4 @ drain WB 383 mcr p15, 0, r0, c7, c10, 4 @ drain WB
384#endif 384#endif
385 mov pc, lr 385 ret lr
386 386
387/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 387/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
388.globl cpu_arm920_suspend_size 388.globl cpu_arm920_suspend_size
@@ -423,7 +423,7 @@ __arm920_setup:
423 mrc p15, 0, r0, c1, c0 @ get control register v4 423 mrc p15, 0, r0, c1, c0 @ get control register v4
424 bic r0, r0, r5 424 bic r0, r0, r5
425 orr r0, r0, r6 425 orr r0, r0, r6
426 mov pc, lr 426 ret lr
427 .size __arm920_setup, . - __arm920_setup 427 .size __arm920_setup, . - __arm920_setup
428 428
429 /* 429 /*
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 2a758b06c6f6..0c6d5ac5a6d4 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -65,7 +65,7 @@
65 * cpu_arm922_proc_init() 65 * cpu_arm922_proc_init()
66 */ 66 */
67ENTRY(cpu_arm922_proc_init) 67ENTRY(cpu_arm922_proc_init)
68 mov pc, lr 68 ret lr
69 69
70/* 70/*
71 * cpu_arm922_proc_fin() 71 * cpu_arm922_proc_fin()
@@ -75,7 +75,7 @@ ENTRY(cpu_arm922_proc_fin)
75 bic r0, r0, #0x1000 @ ...i............ 75 bic r0, r0, #0x1000 @ ...i............
76 bic r0, r0, #0x000e @ ............wca. 76 bic r0, r0, #0x000e @ ............wca.
77 mcr p15, 0, r0, c1, c0, 0 @ disable caches 77 mcr p15, 0, r0, c1, c0, 0 @ disable caches
78 mov pc, lr 78 ret lr
79 79
80/* 80/*
81 * cpu_arm922_reset(loc) 81 * cpu_arm922_reset(loc)
@@ -99,7 +99,7 @@ ENTRY(cpu_arm922_reset)
99 bic ip, ip, #0x000f @ ............wcam 99 bic ip, ip, #0x000f @ ............wcam
100 bic ip, ip, #0x1100 @ ...i...s........ 100 bic ip, ip, #0x1100 @ ...i...s........
101 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 101 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
102 mov pc, r0 102 ret r0
103ENDPROC(cpu_arm922_reset) 103ENDPROC(cpu_arm922_reset)
104 .popsection 104 .popsection
105 105
@@ -109,7 +109,7 @@ ENDPROC(cpu_arm922_reset)
109 .align 5 109 .align 5
110ENTRY(cpu_arm922_do_idle) 110ENTRY(cpu_arm922_do_idle)
111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
112 mov pc, lr 112 ret lr
113 113
114 114
115#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 115#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -122,7 +122,7 @@ ENTRY(cpu_arm922_do_idle)
122ENTRY(arm922_flush_icache_all) 122ENTRY(arm922_flush_icache_all)
123 mov r0, #0 123 mov r0, #0
124 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 124 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
125 mov pc, lr 125 ret lr
126ENDPROC(arm922_flush_icache_all) 126ENDPROC(arm922_flush_icache_all)
127 127
128/* 128/*
@@ -153,7 +153,7 @@ __flush_whole_cache:
153 tst r2, #VM_EXEC 153 tst r2, #VM_EXEC
154 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 154 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
155 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 155 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
156 mov pc, lr 156 ret lr
157 157
158/* 158/*
159 * flush_user_cache_range(start, end, flags) 159 * flush_user_cache_range(start, end, flags)
@@ -179,7 +179,7 @@ ENTRY(arm922_flush_user_cache_range)
179 blo 1b 179 blo 1b
180 tst r2, #VM_EXEC 180 tst r2, #VM_EXEC
181 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 181 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
182 mov pc, lr 182 ret lr
183 183
184/* 184/*
185 * coherent_kern_range(start, end) 185 * coherent_kern_range(start, end)
@@ -213,7 +213,7 @@ ENTRY(arm922_coherent_user_range)
213 blo 1b 213 blo 1b
214 mcr p15, 0, r0, c7, c10, 4 @ drain WB 214 mcr p15, 0, r0, c7, c10, 4 @ drain WB
215 mov r0, #0 215 mov r0, #0
216 mov pc, lr 216 ret lr
217 217
218/* 218/*
219 * flush_kern_dcache_area(void *addr, size_t size) 219 * flush_kern_dcache_area(void *addr, size_t size)
@@ -233,7 +233,7 @@ ENTRY(arm922_flush_kern_dcache_area)
233 mov r0, #0 233 mov r0, #0
234 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 234 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
235 mcr p15, 0, r0, c7, c10, 4 @ drain WB 235 mcr p15, 0, r0, c7, c10, 4 @ drain WB
236 mov pc, lr 236 ret lr
237 237
238/* 238/*
239 * dma_inv_range(start, end) 239 * dma_inv_range(start, end)
@@ -259,7 +259,7 @@ arm922_dma_inv_range:
259 cmp r0, r1 259 cmp r0, r1
260 blo 1b 260 blo 1b
261 mcr p15, 0, r0, c7, c10, 4 @ drain WB 261 mcr p15, 0, r0, c7, c10, 4 @ drain WB
262 mov pc, lr 262 ret lr
263 263
264/* 264/*
265 * dma_clean_range(start, end) 265 * dma_clean_range(start, end)
@@ -278,7 +278,7 @@ arm922_dma_clean_range:
278 cmp r0, r1 278 cmp r0, r1
279 blo 1b 279 blo 1b
280 mcr p15, 0, r0, c7, c10, 4 @ drain WB 280 mcr p15, 0, r0, c7, c10, 4 @ drain WB
281 mov pc, lr 281 ret lr
282 282
283/* 283/*
284 * dma_flush_range(start, end) 284 * dma_flush_range(start, end)
@@ -295,7 +295,7 @@ ENTRY(arm922_dma_flush_range)
295 cmp r0, r1 295 cmp r0, r1
296 blo 1b 296 blo 1b
297 mcr p15, 0, r0, c7, c10, 4 @ drain WB 297 mcr p15, 0, r0, c7, c10, 4 @ drain WB
298 mov pc, lr 298 ret lr
299 299
300/* 300/*
301 * dma_map_area(start, size, dir) 301 * dma_map_area(start, size, dir)
@@ -318,7 +318,7 @@ ENDPROC(arm922_dma_map_area)
318 * - dir - DMA direction 318 * - dir - DMA direction
319 */ 319 */
320ENTRY(arm922_dma_unmap_area) 320ENTRY(arm922_dma_unmap_area)
321 mov pc, lr 321 ret lr
322ENDPROC(arm922_dma_unmap_area) 322ENDPROC(arm922_dma_unmap_area)
323 323
324 .globl arm922_flush_kern_cache_louis 324 .globl arm922_flush_kern_cache_louis
@@ -336,7 +336,7 @@ ENTRY(cpu_arm922_dcache_clean_area)
336 subs r1, r1, #CACHE_DLINESIZE 336 subs r1, r1, #CACHE_DLINESIZE
337 bhi 1b 337 bhi 1b
338#endif 338#endif
339 mov pc, lr 339 ret lr
340 340
341/* =============================== PageTable ============================== */ 341/* =============================== PageTable ============================== */
342 342
@@ -371,7 +371,7 @@ ENTRY(cpu_arm922_switch_mm)
371 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 371 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
372 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 372 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
373#endif 373#endif
374 mov pc, lr 374 ret lr
375 375
376/* 376/*
377 * cpu_arm922_set_pte_ext(ptep, pte, ext) 377 * cpu_arm922_set_pte_ext(ptep, pte, ext)
@@ -386,7 +386,7 @@ ENTRY(cpu_arm922_set_pte_ext)
386 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 386 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
387 mcr p15, 0, r0, c7, c10, 4 @ drain WB 387 mcr p15, 0, r0, c7, c10, 4 @ drain WB
388#endif /* CONFIG_MMU */ 388#endif /* CONFIG_MMU */
389 mov pc, lr 389 ret lr
390 390
391 .type __arm922_setup, #function 391 .type __arm922_setup, #function
392__arm922_setup: 392__arm922_setup:
@@ -401,7 +401,7 @@ __arm922_setup:
401 mrc p15, 0, r0, c1, c0 @ get control register v4 401 mrc p15, 0, r0, c1, c0 @ get control register v4
402 bic r0, r0, r5 402 bic r0, r0, r5
403 orr r0, r0, r6 403 orr r0, r0, r6
404 mov pc, lr 404 ret lr
405 .size __arm922_setup, . - __arm922_setup 405 .size __arm922_setup, . - __arm922_setup
406 406
407 /* 407 /*
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index ba0d58e1a2a2..c32d073282ea 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -86,7 +86,7 @@
86 * cpu_arm925_proc_init() 86 * cpu_arm925_proc_init()
87 */ 87 */
88ENTRY(cpu_arm925_proc_init) 88ENTRY(cpu_arm925_proc_init)
89 mov pc, lr 89 ret lr
90 90
91/* 91/*
92 * cpu_arm925_proc_fin() 92 * cpu_arm925_proc_fin()
@@ -96,7 +96,7 @@ ENTRY(cpu_arm925_proc_fin)
96 bic r0, r0, #0x1000 @ ...i............ 96 bic r0, r0, #0x1000 @ ...i............
97 bic r0, r0, #0x000e @ ............wca. 97 bic r0, r0, #0x000e @ ............wca.
98 mcr p15, 0, r0, c1, c0, 0 @ disable caches 98 mcr p15, 0, r0, c1, c0, 0 @ disable caches
99 mov pc, lr 99 ret lr
100 100
101/* 101/*
102 * cpu_arm925_reset(loc) 102 * cpu_arm925_reset(loc)
@@ -129,7 +129,7 @@ ENDPROC(cpu_arm925_reset)
129 bic ip, ip, #0x000f @ ............wcam 129 bic ip, ip, #0x000f @ ............wcam
130 bic ip, ip, #0x1100 @ ...i...s........ 130 bic ip, ip, #0x1100 @ ...i...s........
131 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 131 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
132 mov pc, r0 132 ret r0
133 133
134/* 134/*
135 * cpu_arm925_do_idle() 135 * cpu_arm925_do_idle()
@@ -145,7 +145,7 @@ ENTRY(cpu_arm925_do_idle)
145 mcr p15, 0, r2, c1, c0, 0 @ Disable I cache 145 mcr p15, 0, r2, c1, c0, 0 @ Disable I cache
146 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 146 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
147 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 147 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
148 mov pc, lr 148 ret lr
149 149
150/* 150/*
151 * flush_icache_all() 151 * flush_icache_all()
@@ -155,7 +155,7 @@ ENTRY(cpu_arm925_do_idle)
155ENTRY(arm925_flush_icache_all) 155ENTRY(arm925_flush_icache_all)
156 mov r0, #0 156 mov r0, #0
157 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 157 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
158 mov pc, lr 158 ret lr
159ENDPROC(arm925_flush_icache_all) 159ENDPROC(arm925_flush_icache_all)
160 160
161/* 161/*
@@ -188,7 +188,7 @@ __flush_whole_cache:
188 tst r2, #VM_EXEC 188 tst r2, #VM_EXEC
189 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 189 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
191 mov pc, lr 191 ret lr
192 192
193/* 193/*
194 * flush_user_cache_range(start, end, flags) 194 * flush_user_cache_range(start, end, flags)
@@ -225,7 +225,7 @@ ENTRY(arm925_flush_user_cache_range)
225 blo 1b 225 blo 1b
226 tst r2, #VM_EXEC 226 tst r2, #VM_EXEC
227 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 227 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
228 mov pc, lr 228 ret lr
229 229
230/* 230/*
231 * coherent_kern_range(start, end) 231 * coherent_kern_range(start, end)
@@ -259,7 +259,7 @@ ENTRY(arm925_coherent_user_range)
259 blo 1b 259 blo 1b
260 mcr p15, 0, r0, c7, c10, 4 @ drain WB 260 mcr p15, 0, r0, c7, c10, 4 @ drain WB
261 mov r0, #0 261 mov r0, #0
262 mov pc, lr 262 ret lr
263 263
264/* 264/*
265 * flush_kern_dcache_area(void *addr, size_t size) 265 * flush_kern_dcache_area(void *addr, size_t size)
@@ -279,7 +279,7 @@ ENTRY(arm925_flush_kern_dcache_area)
279 mov r0, #0 279 mov r0, #0
280 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 280 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
281 mcr p15, 0, r0, c7, c10, 4 @ drain WB 281 mcr p15, 0, r0, c7, c10, 4 @ drain WB
282 mov pc, lr 282 ret lr
283 283
284/* 284/*
285 * dma_inv_range(start, end) 285 * dma_inv_range(start, end)
@@ -307,7 +307,7 @@ arm925_dma_inv_range:
307 cmp r0, r1 307 cmp r0, r1
308 blo 1b 308 blo 1b
309 mcr p15, 0, r0, c7, c10, 4 @ drain WB 309 mcr p15, 0, r0, c7, c10, 4 @ drain WB
310 mov pc, lr 310 ret lr
311 311
312/* 312/*
313 * dma_clean_range(start, end) 313 * dma_clean_range(start, end)
@@ -328,7 +328,7 @@ arm925_dma_clean_range:
328 blo 1b 328 blo 1b
329#endif 329#endif
330 mcr p15, 0, r0, c7, c10, 4 @ drain WB 330 mcr p15, 0, r0, c7, c10, 4 @ drain WB
331 mov pc, lr 331 ret lr
332 332
333/* 333/*
334 * dma_flush_range(start, end) 334 * dma_flush_range(start, end)
@@ -350,7 +350,7 @@ ENTRY(arm925_dma_flush_range)
350 cmp r0, r1 350 cmp r0, r1
351 blo 1b 351 blo 1b
352 mcr p15, 0, r0, c7, c10, 4 @ drain WB 352 mcr p15, 0, r0, c7, c10, 4 @ drain WB
353 mov pc, lr 353 ret lr
354 354
355/* 355/*
356 * dma_map_area(start, size, dir) 356 * dma_map_area(start, size, dir)
@@ -373,7 +373,7 @@ ENDPROC(arm925_dma_map_area)
373 * - dir - DMA direction 373 * - dir - DMA direction
374 */ 374 */
375ENTRY(arm925_dma_unmap_area) 375ENTRY(arm925_dma_unmap_area)
376 mov pc, lr 376 ret lr
377ENDPROC(arm925_dma_unmap_area) 377ENDPROC(arm925_dma_unmap_area)
378 378
379 .globl arm925_flush_kern_cache_louis 379 .globl arm925_flush_kern_cache_louis
@@ -390,7 +390,7 @@ ENTRY(cpu_arm925_dcache_clean_area)
390 bhi 1b 390 bhi 1b
391#endif 391#endif
392 mcr p15, 0, r0, c7, c10, 4 @ drain WB 392 mcr p15, 0, r0, c7, c10, 4 @ drain WB
393 mov pc, lr 393 ret lr
394 394
395/* =============================== PageTable ============================== */ 395/* =============================== PageTable ============================== */
396 396
@@ -419,7 +419,7 @@ ENTRY(cpu_arm925_switch_mm)
419 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 419 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
420 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 420 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
421#endif 421#endif
422 mov pc, lr 422 ret lr
423 423
424/* 424/*
425 * cpu_arm925_set_pte_ext(ptep, pte, ext) 425 * cpu_arm925_set_pte_ext(ptep, pte, ext)
@@ -436,7 +436,7 @@ ENTRY(cpu_arm925_set_pte_ext)
436#endif 436#endif
437 mcr p15, 0, r0, c7, c10, 4 @ drain WB 437 mcr p15, 0, r0, c7, c10, 4 @ drain WB
438#endif /* CONFIG_MMU */ 438#endif /* CONFIG_MMU */
439 mov pc, lr 439 ret lr
440 440
441 .type __arm925_setup, #function 441 .type __arm925_setup, #function
442__arm925_setup: 442__arm925_setup:
@@ -469,7 +469,7 @@ __arm925_setup:
469#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 469#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
470 orr r0, r0, #0x4000 @ .1.. .... .... .... 470 orr r0, r0, #0x4000 @ .1.. .... .... ....
471#endif 471#endif
472 mov pc, lr 472 ret lr
473 .size __arm925_setup, . - __arm925_setup 473 .size __arm925_setup, . - __arm925_setup
474 474
475 /* 475 /*
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 0f098f407c9f..252b2503038d 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -55,7 +55,7 @@
55 * cpu_arm926_proc_init() 55 * cpu_arm926_proc_init()
56 */ 56 */
57ENTRY(cpu_arm926_proc_init) 57ENTRY(cpu_arm926_proc_init)
58 mov pc, lr 58 ret lr
59 59
60/* 60/*
61 * cpu_arm926_proc_fin() 61 * cpu_arm926_proc_fin()
@@ -65,7 +65,7 @@ ENTRY(cpu_arm926_proc_fin)
65 bic r0, r0, #0x1000 @ ...i............ 65 bic r0, r0, #0x1000 @ ...i............
66 bic r0, r0, #0x000e @ ............wca. 66 bic r0, r0, #0x000e @ ............wca.
67 mcr p15, 0, r0, c1, c0, 0 @ disable caches 67 mcr p15, 0, r0, c1, c0, 0 @ disable caches
68 mov pc, lr 68 ret lr
69 69
70/* 70/*
71 * cpu_arm926_reset(loc) 71 * cpu_arm926_reset(loc)
@@ -89,7 +89,7 @@ ENTRY(cpu_arm926_reset)
89 bic ip, ip, #0x000f @ ............wcam 89 bic ip, ip, #0x000f @ ............wcam
90 bic ip, ip, #0x1100 @ ...i...s........ 90 bic ip, ip, #0x1100 @ ...i...s........
91 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 91 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
92 mov pc, r0 92 ret r0
93ENDPROC(cpu_arm926_reset) 93ENDPROC(cpu_arm926_reset)
94 .popsection 94 .popsection
95 95
@@ -111,7 +111,7 @@ ENTRY(cpu_arm926_do_idle)
111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
112 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 112 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
113 msr cpsr_c, r3 @ Restore FIQ state 113 msr cpsr_c, r3 @ Restore FIQ state
114 mov pc, lr 114 ret lr
115 115
116/* 116/*
117 * flush_icache_all() 117 * flush_icache_all()
@@ -121,7 +121,7 @@ ENTRY(cpu_arm926_do_idle)
121ENTRY(arm926_flush_icache_all) 121ENTRY(arm926_flush_icache_all)
122 mov r0, #0 122 mov r0, #0
123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
124 mov pc, lr 124 ret lr
125ENDPROC(arm926_flush_icache_all) 125ENDPROC(arm926_flush_icache_all)
126 126
127/* 127/*
@@ -151,7 +151,7 @@ __flush_whole_cache:
151 tst r2, #VM_EXEC 151 tst r2, #VM_EXEC
152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
154 mov pc, lr 154 ret lr
155 155
156/* 156/*
157 * flush_user_cache_range(start, end, flags) 157 * flush_user_cache_range(start, end, flags)
@@ -188,7 +188,7 @@ ENTRY(arm926_flush_user_cache_range)
188 blo 1b 188 blo 1b
189 tst r2, #VM_EXEC 189 tst r2, #VM_EXEC
190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
191 mov pc, lr 191 ret lr
192 192
193/* 193/*
194 * coherent_kern_range(start, end) 194 * coherent_kern_range(start, end)
@@ -222,7 +222,7 @@ ENTRY(arm926_coherent_user_range)
222 blo 1b 222 blo 1b
223 mcr p15, 0, r0, c7, c10, 4 @ drain WB 223 mcr p15, 0, r0, c7, c10, 4 @ drain WB
224 mov r0, #0 224 mov r0, #0
225 mov pc, lr 225 ret lr
226 226
227/* 227/*
228 * flush_kern_dcache_area(void *addr, size_t size) 228 * flush_kern_dcache_area(void *addr, size_t size)
@@ -242,7 +242,7 @@ ENTRY(arm926_flush_kern_dcache_area)
242 mov r0, #0 242 mov r0, #0
243 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 243 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
244 mcr p15, 0, r0, c7, c10, 4 @ drain WB 244 mcr p15, 0, r0, c7, c10, 4 @ drain WB
245 mov pc, lr 245 ret lr
246 246
247/* 247/*
248 * dma_inv_range(start, end) 248 * dma_inv_range(start, end)
@@ -270,7 +270,7 @@ arm926_dma_inv_range:
270 cmp r0, r1 270 cmp r0, r1
271 blo 1b 271 blo 1b
272 mcr p15, 0, r0, c7, c10, 4 @ drain WB 272 mcr p15, 0, r0, c7, c10, 4 @ drain WB
273 mov pc, lr 273 ret lr
274 274
275/* 275/*
276 * dma_clean_range(start, end) 276 * dma_clean_range(start, end)
@@ -291,7 +291,7 @@ arm926_dma_clean_range:
291 blo 1b 291 blo 1b
292#endif 292#endif
293 mcr p15, 0, r0, c7, c10, 4 @ drain WB 293 mcr p15, 0, r0, c7, c10, 4 @ drain WB
294 mov pc, lr 294 ret lr
295 295
296/* 296/*
297 * dma_flush_range(start, end) 297 * dma_flush_range(start, end)
@@ -313,7 +313,7 @@ ENTRY(arm926_dma_flush_range)
313 cmp r0, r1 313 cmp r0, r1
314 blo 1b 314 blo 1b
315 mcr p15, 0, r0, c7, c10, 4 @ drain WB 315 mcr p15, 0, r0, c7, c10, 4 @ drain WB
316 mov pc, lr 316 ret lr
317 317
318/* 318/*
319 * dma_map_area(start, size, dir) 319 * dma_map_area(start, size, dir)
@@ -336,7 +336,7 @@ ENDPROC(arm926_dma_map_area)
336 * - dir - DMA direction 336 * - dir - DMA direction
337 */ 337 */
338ENTRY(arm926_dma_unmap_area) 338ENTRY(arm926_dma_unmap_area)
339 mov pc, lr 339 ret lr
340ENDPROC(arm926_dma_unmap_area) 340ENDPROC(arm926_dma_unmap_area)
341 341
342 .globl arm926_flush_kern_cache_louis 342 .globl arm926_flush_kern_cache_louis
@@ -353,7 +353,7 @@ ENTRY(cpu_arm926_dcache_clean_area)
353 bhi 1b 353 bhi 1b
354#endif 354#endif
355 mcr p15, 0, r0, c7, c10, 4 @ drain WB 355 mcr p15, 0, r0, c7, c10, 4 @ drain WB
356 mov pc, lr 356 ret lr
357 357
358/* =============================== PageTable ============================== */ 358/* =============================== PageTable ============================== */
359 359
@@ -380,7 +380,7 @@ ENTRY(cpu_arm926_switch_mm)
380 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 380 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
381 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 381 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
382#endif 382#endif
383 mov pc, lr 383 ret lr
384 384
385/* 385/*
386 * cpu_arm926_set_pte_ext(ptep, pte, ext) 386 * cpu_arm926_set_pte_ext(ptep, pte, ext)
@@ -397,7 +397,7 @@ ENTRY(cpu_arm926_set_pte_ext)
397#endif 397#endif
398 mcr p15, 0, r0, c7, c10, 4 @ drain WB 398 mcr p15, 0, r0, c7, c10, 4 @ drain WB
399#endif 399#endif
400 mov pc, lr 400 ret lr
401 401
402/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 402/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
403.globl cpu_arm926_suspend_size 403.globl cpu_arm926_suspend_size
@@ -448,7 +448,7 @@ __arm926_setup:
448#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 448#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
449 orr r0, r0, #0x4000 @ .1.. .... .... .... 449 orr r0, r0, #0x4000 @ .1.. .... .... ....
450#endif 450#endif
451 mov pc, lr 451 ret lr
452 .size __arm926_setup, . - __arm926_setup 452 .size __arm926_setup, . - __arm926_setup
453 453
454 /* 454 /*
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 1c39a704ff6e..e5212d489377 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -31,7 +31,7 @@
31 */ 31 */
32ENTRY(cpu_arm940_proc_init) 32ENTRY(cpu_arm940_proc_init)
33ENTRY(cpu_arm940_switch_mm) 33ENTRY(cpu_arm940_switch_mm)
34 mov pc, lr 34 ret lr
35 35
36/* 36/*
37 * cpu_arm940_proc_fin() 37 * cpu_arm940_proc_fin()
@@ -41,7 +41,7 @@ ENTRY(cpu_arm940_proc_fin)
41 bic r0, r0, #0x00001000 @ i-cache 41 bic r0, r0, #0x00001000 @ i-cache
42 bic r0, r0, #0x00000004 @ d-cache 42 bic r0, r0, #0x00000004 @ d-cache
43 mcr p15, 0, r0, c1, c0, 0 @ disable caches 43 mcr p15, 0, r0, c1, c0, 0 @ disable caches
44 mov pc, lr 44 ret lr
45 45
46/* 46/*
47 * cpu_arm940_reset(loc) 47 * cpu_arm940_reset(loc)
@@ -58,7 +58,7 @@ ENTRY(cpu_arm940_reset)
58 bic ip, ip, #0x00000005 @ .............c.p 58 bic ip, ip, #0x00000005 @ .............c.p
59 bic ip, ip, #0x00001000 @ i-cache 59 bic ip, ip, #0x00001000 @ i-cache
60 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 60 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
61 mov pc, r0 61 ret r0
62ENDPROC(cpu_arm940_reset) 62ENDPROC(cpu_arm940_reset)
63 .popsection 63 .popsection
64 64
@@ -68,7 +68,7 @@ ENDPROC(cpu_arm940_reset)
68 .align 5 68 .align 5
69ENTRY(cpu_arm940_do_idle) 69ENTRY(cpu_arm940_do_idle)
70 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 70 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
71 mov pc, lr 71 ret lr
72 72
73/* 73/*
74 * flush_icache_all() 74 * flush_icache_all()
@@ -78,7 +78,7 @@ ENTRY(cpu_arm940_do_idle)
78ENTRY(arm940_flush_icache_all) 78ENTRY(arm940_flush_icache_all)
79 mov r0, #0 79 mov r0, #0
80 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 80 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
81 mov pc, lr 81 ret lr
82ENDPROC(arm940_flush_icache_all) 82ENDPROC(arm940_flush_icache_all)
83 83
84/* 84/*
@@ -122,7 +122,7 @@ ENTRY(arm940_flush_user_cache_range)
122 tst r2, #VM_EXEC 122 tst r2, #VM_EXEC
123 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 123 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
124 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 124 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
125 mov pc, lr 125 ret lr
126 126
127/* 127/*
128 * coherent_kern_range(start, end) 128 * coherent_kern_range(start, end)
@@ -170,7 +170,7 @@ ENTRY(arm940_flush_kern_dcache_area)
170 bcs 1b @ segments 7 to 0 170 bcs 1b @ segments 7 to 0
171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
172 mcr p15, 0, r0, c7, c10, 4 @ drain WB 172 mcr p15, 0, r0, c7, c10, 4 @ drain WB
173 mov pc, lr 173 ret lr
174 174
175/* 175/*
176 * dma_inv_range(start, end) 176 * dma_inv_range(start, end)
@@ -191,7 +191,7 @@ arm940_dma_inv_range:
191 subs r1, r1, #1 << 4 191 subs r1, r1, #1 << 4
192 bcs 1b @ segments 7 to 0 192 bcs 1b @ segments 7 to 0
193 mcr p15, 0, ip, c7, c10, 4 @ drain WB 193 mcr p15, 0, ip, c7, c10, 4 @ drain WB
194 mov pc, lr 194 ret lr
195 195
196/* 196/*
197 * dma_clean_range(start, end) 197 * dma_clean_range(start, end)
@@ -215,7 +215,7 @@ ENTRY(cpu_arm940_dcache_clean_area)
215 bcs 1b @ segments 7 to 0 215 bcs 1b @ segments 7 to 0
216#endif 216#endif
217 mcr p15, 0, ip, c7, c10, 4 @ drain WB 217 mcr p15, 0, ip, c7, c10, 4 @ drain WB
218 mov pc, lr 218 ret lr
219 219
220/* 220/*
221 * dma_flush_range(start, end) 221 * dma_flush_range(start, end)
@@ -241,7 +241,7 @@ ENTRY(arm940_dma_flush_range)
241 subs r1, r1, #1 << 4 241 subs r1, r1, #1 << 4
242 bcs 1b @ segments 7 to 0 242 bcs 1b @ segments 7 to 0
243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB
244 mov pc, lr 244 ret lr
245 245
246/* 246/*
247 * dma_map_area(start, size, dir) 247 * dma_map_area(start, size, dir)
@@ -264,7 +264,7 @@ ENDPROC(arm940_dma_map_area)
264 * - dir - DMA direction 264 * - dir - DMA direction
265 */ 265 */
266ENTRY(arm940_dma_unmap_area) 266ENTRY(arm940_dma_unmap_area)
267 mov pc, lr 267 ret lr
268ENDPROC(arm940_dma_unmap_area) 268ENDPROC(arm940_dma_unmap_area)
269 269
270 .globl arm940_flush_kern_cache_louis 270 .globl arm940_flush_kern_cache_louis
@@ -337,7 +337,7 @@ __arm940_setup:
337 orr r0, r0, #0x00001000 @ I-cache 337 orr r0, r0, #0x00001000 @ I-cache
338 orr r0, r0, #0x00000005 @ MPU/D-cache 338 orr r0, r0, #0x00000005 @ MPU/D-cache
339 339
340 mov pc, lr 340 ret lr
341 341
342 .size __arm940_setup, . - __arm940_setup 342 .size __arm940_setup, . - __arm940_setup
343 343
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 0289cd905e73..b3dd9b2d0b8e 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -38,7 +38,7 @@
38 */ 38 */
39ENTRY(cpu_arm946_proc_init) 39ENTRY(cpu_arm946_proc_init)
40ENTRY(cpu_arm946_switch_mm) 40ENTRY(cpu_arm946_switch_mm)
41 mov pc, lr 41 ret lr
42 42
43/* 43/*
44 * cpu_arm946_proc_fin() 44 * cpu_arm946_proc_fin()
@@ -48,7 +48,7 @@ ENTRY(cpu_arm946_proc_fin)
48 bic r0, r0, #0x00001000 @ i-cache 48 bic r0, r0, #0x00001000 @ i-cache
49 bic r0, r0, #0x00000004 @ d-cache 49 bic r0, r0, #0x00000004 @ d-cache
50 mcr p15, 0, r0, c1, c0, 0 @ disable caches 50 mcr p15, 0, r0, c1, c0, 0 @ disable caches
51 mov pc, lr 51 ret lr
52 52
53/* 53/*
54 * cpu_arm946_reset(loc) 54 * cpu_arm946_reset(loc)
@@ -65,7 +65,7 @@ ENTRY(cpu_arm946_reset)
65 bic ip, ip, #0x00000005 @ .............c.p 65 bic ip, ip, #0x00000005 @ .............c.p
66 bic ip, ip, #0x00001000 @ i-cache 66 bic ip, ip, #0x00001000 @ i-cache
67 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 67 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
68 mov pc, r0 68 ret r0
69ENDPROC(cpu_arm946_reset) 69ENDPROC(cpu_arm946_reset)
70 .popsection 70 .popsection
71 71
@@ -75,7 +75,7 @@ ENDPROC(cpu_arm946_reset)
75 .align 5 75 .align 5
76ENTRY(cpu_arm946_do_idle) 76ENTRY(cpu_arm946_do_idle)
77 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 77 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
78 mov pc, lr 78 ret lr
79 79
80/* 80/*
81 * flush_icache_all() 81 * flush_icache_all()
@@ -85,7 +85,7 @@ ENTRY(cpu_arm946_do_idle)
85ENTRY(arm946_flush_icache_all) 85ENTRY(arm946_flush_icache_all)
86 mov r0, #0 86 mov r0, #0
87 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 87 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
88 mov pc, lr 88 ret lr
89ENDPROC(arm946_flush_icache_all) 89ENDPROC(arm946_flush_icache_all)
90 90
91/* 91/*
@@ -117,7 +117,7 @@ __flush_whole_cache:
117 tst r2, #VM_EXEC 117 tst r2, #VM_EXEC
118 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache 118 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache
119 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 119 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
120 mov pc, lr 120 ret lr
121 121
122/* 122/*
123 * flush_user_cache_range(start, end, flags) 123 * flush_user_cache_range(start, end, flags)
@@ -156,7 +156,7 @@ ENTRY(arm946_flush_user_cache_range)
156 blo 1b 156 blo 1b
157 tst r2, #VM_EXEC 157 tst r2, #VM_EXEC
158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
159 mov pc, lr 159 ret lr
160 160
161/* 161/*
162 * coherent_kern_range(start, end) 162 * coherent_kern_range(start, end)
@@ -191,7 +191,7 @@ ENTRY(arm946_coherent_user_range)
191 blo 1b 191 blo 1b
192 mcr p15, 0, r0, c7, c10, 4 @ drain WB 192 mcr p15, 0, r0, c7, c10, 4 @ drain WB
193 mov r0, #0 193 mov r0, #0
194 mov pc, lr 194 ret lr
195 195
196/* 196/*
197 * flush_kern_dcache_area(void *addr, size_t size) 197 * flush_kern_dcache_area(void *addr, size_t size)
@@ -212,7 +212,7 @@ ENTRY(arm946_flush_kern_dcache_area)
212 mov r0, #0 212 mov r0, #0
213 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 213 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
214 mcr p15, 0, r0, c7, c10, 4 @ drain WB 214 mcr p15, 0, r0, c7, c10, 4 @ drain WB
215 mov pc, lr 215 ret lr
216 216
217/* 217/*
218 * dma_inv_range(start, end) 218 * dma_inv_range(start, end)
@@ -239,7 +239,7 @@ arm946_dma_inv_range:
239 cmp r0, r1 239 cmp r0, r1
240 blo 1b 240 blo 1b
241 mcr p15, 0, r0, c7, c10, 4 @ drain WB 241 mcr p15, 0, r0, c7, c10, 4 @ drain WB
242 mov pc, lr 242 ret lr
243 243
244/* 244/*
245 * dma_clean_range(start, end) 245 * dma_clean_range(start, end)
@@ -260,7 +260,7 @@ arm946_dma_clean_range:
260 blo 1b 260 blo 1b
261#endif 261#endif
262 mcr p15, 0, r0, c7, c10, 4 @ drain WB 262 mcr p15, 0, r0, c7, c10, 4 @ drain WB
263 mov pc, lr 263 ret lr
264 264
265/* 265/*
266 * dma_flush_range(start, end) 266 * dma_flush_range(start, end)
@@ -284,7 +284,7 @@ ENTRY(arm946_dma_flush_range)
284 cmp r0, r1 284 cmp r0, r1
285 blo 1b 285 blo 1b
286 mcr p15, 0, r0, c7, c10, 4 @ drain WB 286 mcr p15, 0, r0, c7, c10, 4 @ drain WB
287 mov pc, lr 287 ret lr
288 288
289/* 289/*
290 * dma_map_area(start, size, dir) 290 * dma_map_area(start, size, dir)
@@ -307,7 +307,7 @@ ENDPROC(arm946_dma_map_area)
307 * - dir - DMA direction 307 * - dir - DMA direction
308 */ 308 */
309ENTRY(arm946_dma_unmap_area) 309ENTRY(arm946_dma_unmap_area)
310 mov pc, lr 310 ret lr
311ENDPROC(arm946_dma_unmap_area) 311ENDPROC(arm946_dma_unmap_area)
312 312
313 .globl arm946_flush_kern_cache_louis 313 .globl arm946_flush_kern_cache_louis
@@ -324,7 +324,7 @@ ENTRY(cpu_arm946_dcache_clean_area)
324 bhi 1b 324 bhi 1b
325#endif 325#endif
326 mcr p15, 0, r0, c7, c10, 4 @ drain WB 326 mcr p15, 0, r0, c7, c10, 4 @ drain WB
327 mov pc, lr 327 ret lr
328 328
329 .type __arm946_setup, #function 329 .type __arm946_setup, #function
330__arm946_setup: 330__arm946_setup:
@@ -392,7 +392,7 @@ __arm946_setup:
392#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 392#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
393 orr r0, r0, #0x00004000 @ .1.. .... .... .... 393 orr r0, r0, #0x00004000 @ .1.. .... .... ....
394#endif 394#endif
395 mov pc, lr 395 ret lr
396 396
397 .size __arm946_setup, . - __arm946_setup 397 .size __arm946_setup, . - __arm946_setup
398 398
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index f51197ba754a..8227322bbb8f 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -32,13 +32,13 @@ ENTRY(cpu_arm9tdmi_proc_init)
32ENTRY(cpu_arm9tdmi_do_idle) 32ENTRY(cpu_arm9tdmi_do_idle)
33ENTRY(cpu_arm9tdmi_dcache_clean_area) 33ENTRY(cpu_arm9tdmi_dcache_clean_area)
34ENTRY(cpu_arm9tdmi_switch_mm) 34ENTRY(cpu_arm9tdmi_switch_mm)
35 mov pc, lr 35 ret lr
36 36
37/* 37/*
38 * cpu_arm9tdmi_proc_fin() 38 * cpu_arm9tdmi_proc_fin()
39 */ 39 */
40ENTRY(cpu_arm9tdmi_proc_fin) 40ENTRY(cpu_arm9tdmi_proc_fin)
41 mov pc, lr 41 ret lr
42 42
43/* 43/*
44 * Function: cpu_arm9tdmi_reset(loc) 44 * Function: cpu_arm9tdmi_reset(loc)
@@ -47,13 +47,13 @@ ENTRY(cpu_arm9tdmi_proc_fin)
47 */ 47 */
48 .pushsection .idmap.text, "ax" 48 .pushsection .idmap.text, "ax"
49ENTRY(cpu_arm9tdmi_reset) 49ENTRY(cpu_arm9tdmi_reset)
50 mov pc, r0 50 ret r0
51ENDPROC(cpu_arm9tdmi_reset) 51ENDPROC(cpu_arm9tdmi_reset)
52 .popsection 52 .popsection
53 53
54 .type __arm9tdmi_setup, #function 54 .type __arm9tdmi_setup, #function
55__arm9tdmi_setup: 55__arm9tdmi_setup:
56 mov pc, lr 56 ret lr
57 .size __arm9tdmi_setup, . - __arm9tdmi_setup 57 .size __arm9tdmi_setup, . - __arm9tdmi_setup
58 58
59 __INITDATA 59 __INITDATA
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 2dfc0f1d3bfd..c494886892ba 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -32,7 +32,7 @@
32 * cpu_fa526_proc_init() 32 * cpu_fa526_proc_init()
33 */ 33 */
34ENTRY(cpu_fa526_proc_init) 34ENTRY(cpu_fa526_proc_init)
35 mov pc, lr 35 ret lr
36 36
37/* 37/*
38 * cpu_fa526_proc_fin() 38 * cpu_fa526_proc_fin()
@@ -44,7 +44,7 @@ ENTRY(cpu_fa526_proc_fin)
44 mcr p15, 0, r0, c1, c0, 0 @ disable caches 44 mcr p15, 0, r0, c1, c0, 0 @ disable caches
45 nop 45 nop
46 nop 46 nop
47 mov pc, lr 47 ret lr
48 48
49/* 49/*
50 * cpu_fa526_reset(loc) 50 * cpu_fa526_reset(loc)
@@ -72,7 +72,7 @@ ENTRY(cpu_fa526_reset)
72 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 72 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
73 nop 73 nop
74 nop 74 nop
75 mov pc, r0 75 ret r0
76ENDPROC(cpu_fa526_reset) 76ENDPROC(cpu_fa526_reset)
77 .popsection 77 .popsection
78 78
@@ -81,7 +81,7 @@ ENDPROC(cpu_fa526_reset)
81 */ 81 */
82 .align 4 82 .align 4
83ENTRY(cpu_fa526_do_idle) 83ENTRY(cpu_fa526_do_idle)
84 mov pc, lr 84 ret lr
85 85
86 86
87ENTRY(cpu_fa526_dcache_clean_area) 87ENTRY(cpu_fa526_dcache_clean_area)
@@ -90,7 +90,7 @@ ENTRY(cpu_fa526_dcache_clean_area)
90 subs r1, r1, #CACHE_DLINESIZE 90 subs r1, r1, #CACHE_DLINESIZE
91 bhi 1b 91 bhi 1b
92 mcr p15, 0, r0, c7, c10, 4 @ drain WB 92 mcr p15, 0, r0, c7, c10, 4 @ drain WB
93 mov pc, lr 93 ret lr
94 94
95/* =============================== PageTable ============================== */ 95/* =============================== PageTable ============================== */
96 96
@@ -117,7 +117,7 @@ ENTRY(cpu_fa526_switch_mm)
117 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 117 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
118 mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB 118 mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB
119#endif 119#endif
120 mov pc, lr 120 ret lr
121 121
122/* 122/*
123 * cpu_fa526_set_pte_ext(ptep, pte, ext) 123 * cpu_fa526_set_pte_ext(ptep, pte, ext)
@@ -133,7 +133,7 @@ ENTRY(cpu_fa526_set_pte_ext)
133 mov r0, #0 133 mov r0, #0
134 mcr p15, 0, r0, c7, c10, 4 @ drain WB 134 mcr p15, 0, r0, c7, c10, 4 @ drain WB
135#endif 135#endif
136 mov pc, lr 136 ret lr
137 137
138 .type __fa526_setup, #function 138 .type __fa526_setup, #function
139__fa526_setup: 139__fa526_setup:
@@ -162,7 +162,7 @@ __fa526_setup:
162 bic r0, r0, r5 162 bic r0, r0, r5
163 ldr r5, fa526_cr1_set 163 ldr r5, fa526_cr1_set
164 orr r0, r0, r5 164 orr r0, r0, r5
165 mov pc, lr 165 ret lr
166 .size __fa526_setup, . - __fa526_setup 166 .size __fa526_setup, . - __fa526_setup
167 167
168 /* 168 /*
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index db79b62c92fb..03a1b75f2e16 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -69,7 +69,7 @@ ENTRY(cpu_feroceon_proc_init)
69 movne r2, r2, lsr #2 @ turned into # of sets 69 movne r2, r2, lsr #2 @ turned into # of sets
70 sub r2, r2, #(1 << 5) 70 sub r2, r2, #(1 << 5)
71 stmia r1, {r2, r3} 71 stmia r1, {r2, r3}
72 mov pc, lr 72 ret lr
73 73
74/* 74/*
75 * cpu_feroceon_proc_fin() 75 * cpu_feroceon_proc_fin()
@@ -86,7 +86,7 @@ ENTRY(cpu_feroceon_proc_fin)
86 bic r0, r0, #0x1000 @ ...i............ 86 bic r0, r0, #0x1000 @ ...i............
87 bic r0, r0, #0x000e @ ............wca. 87 bic r0, r0, #0x000e @ ............wca.
88 mcr p15, 0, r0, c1, c0, 0 @ disable caches 88 mcr p15, 0, r0, c1, c0, 0 @ disable caches
89 mov pc, lr 89 ret lr
90 90
91/* 91/*
92 * cpu_feroceon_reset(loc) 92 * cpu_feroceon_reset(loc)
@@ -110,7 +110,7 @@ ENTRY(cpu_feroceon_reset)
110 bic ip, ip, #0x000f @ ............wcam 110 bic ip, ip, #0x000f @ ............wcam
111 bic ip, ip, #0x1100 @ ...i...s........ 111 bic ip, ip, #0x1100 @ ...i...s........
112 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 112 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
113 mov pc, r0 113 ret r0
114ENDPROC(cpu_feroceon_reset) 114ENDPROC(cpu_feroceon_reset)
115 .popsection 115 .popsection
116 116
@@ -124,7 +124,7 @@ ENTRY(cpu_feroceon_do_idle)
124 mov r0, #0 124 mov r0, #0
125 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer 125 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
126 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 126 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
127 mov pc, lr 127 ret lr
128 128
129/* 129/*
130 * flush_icache_all() 130 * flush_icache_all()
@@ -134,7 +134,7 @@ ENTRY(cpu_feroceon_do_idle)
134ENTRY(feroceon_flush_icache_all) 134ENTRY(feroceon_flush_icache_all)
135 mov r0, #0 135 mov r0, #0
136 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 136 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
137 mov pc, lr 137 ret lr
138ENDPROC(feroceon_flush_icache_all) 138ENDPROC(feroceon_flush_icache_all)
139 139
140/* 140/*
@@ -169,7 +169,7 @@ __flush_whole_cache:
169 mov ip, #0 169 mov ip, #0
170 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 170 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
172 mov pc, lr 172 ret lr
173 173
174/* 174/*
175 * flush_user_cache_range(start, end, flags) 175 * flush_user_cache_range(start, end, flags)
@@ -198,7 +198,7 @@ ENTRY(feroceon_flush_user_cache_range)
198 tst r2, #VM_EXEC 198 tst r2, #VM_EXEC
199 mov ip, #0 199 mov ip, #0
200 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 200 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
201 mov pc, lr 201 ret lr
202 202
203/* 203/*
204 * coherent_kern_range(start, end) 204 * coherent_kern_range(start, end)
@@ -233,7 +233,7 @@ ENTRY(feroceon_coherent_user_range)
233 blo 1b 233 blo 1b
234 mcr p15, 0, r0, c7, c10, 4 @ drain WB 234 mcr p15, 0, r0, c7, c10, 4 @ drain WB
235 mov r0, #0 235 mov r0, #0
236 mov pc, lr 236 ret lr
237 237
238/* 238/*
239 * flush_kern_dcache_area(void *addr, size_t size) 239 * flush_kern_dcache_area(void *addr, size_t size)
@@ -254,7 +254,7 @@ ENTRY(feroceon_flush_kern_dcache_area)
254 mov r0, #0 254 mov r0, #0
255 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 255 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
256 mcr p15, 0, r0, c7, c10, 4 @ drain WB 256 mcr p15, 0, r0, c7, c10, 4 @ drain WB
257 mov pc, lr 257 ret lr
258 258
259 .align 5 259 .align 5
260ENTRY(feroceon_range_flush_kern_dcache_area) 260ENTRY(feroceon_range_flush_kern_dcache_area)
@@ -268,7 +268,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area)
268 mov r0, #0 268 mov r0, #0
269 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 269 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
270 mcr p15, 0, r0, c7, c10, 4 @ drain WB 270 mcr p15, 0, r0, c7, c10, 4 @ drain WB
271 mov pc, lr 271 ret lr
272 272
273/* 273/*
274 * dma_inv_range(start, end) 274 * dma_inv_range(start, end)
@@ -295,7 +295,7 @@ feroceon_dma_inv_range:
295 cmp r0, r1 295 cmp r0, r1
296 blo 1b 296 blo 1b
297 mcr p15, 0, r0, c7, c10, 4 @ drain WB 297 mcr p15, 0, r0, c7, c10, 4 @ drain WB
298 mov pc, lr 298 ret lr
299 299
300 .align 5 300 .align 5
301feroceon_range_dma_inv_range: 301feroceon_range_dma_inv_range:
@@ -311,7 +311,7 @@ feroceon_range_dma_inv_range:
311 mcr p15, 5, r0, c15, c14, 0 @ D inv range start 311 mcr p15, 5, r0, c15, c14, 0 @ D inv range start
312 mcr p15, 5, r1, c15, c14, 1 @ D inv range top 312 mcr p15, 5, r1, c15, c14, 1 @ D inv range top
313 msr cpsr_c, r2 @ restore interrupts 313 msr cpsr_c, r2 @ restore interrupts
314 mov pc, lr 314 ret lr
315 315
316/* 316/*
317 * dma_clean_range(start, end) 317 * dma_clean_range(start, end)
@@ -331,7 +331,7 @@ feroceon_dma_clean_range:
331 cmp r0, r1 331 cmp r0, r1
332 blo 1b 332 blo 1b
333 mcr p15, 0, r0, c7, c10, 4 @ drain WB 333 mcr p15, 0, r0, c7, c10, 4 @ drain WB
334 mov pc, lr 334 ret lr
335 335
336 .align 5 336 .align 5
337feroceon_range_dma_clean_range: 337feroceon_range_dma_clean_range:
@@ -344,7 +344,7 @@ feroceon_range_dma_clean_range:
344 mcr p15, 5, r1, c15, c13, 1 @ D clean range top 344 mcr p15, 5, r1, c15, c13, 1 @ D clean range top
345 msr cpsr_c, r2 @ restore interrupts 345 msr cpsr_c, r2 @ restore interrupts
346 mcr p15, 0, r0, c7, c10, 4 @ drain WB 346 mcr p15, 0, r0, c7, c10, 4 @ drain WB
347 mov pc, lr 347 ret lr
348 348
349/* 349/*
350 * dma_flush_range(start, end) 350 * dma_flush_range(start, end)
@@ -362,7 +362,7 @@ ENTRY(feroceon_dma_flush_range)
362 cmp r0, r1 362 cmp r0, r1
363 blo 1b 363 blo 1b
364 mcr p15, 0, r0, c7, c10, 4 @ drain WB 364 mcr p15, 0, r0, c7, c10, 4 @ drain WB
365 mov pc, lr 365 ret lr
366 366
367 .align 5 367 .align 5
368ENTRY(feroceon_range_dma_flush_range) 368ENTRY(feroceon_range_dma_flush_range)
@@ -375,7 +375,7 @@ ENTRY(feroceon_range_dma_flush_range)
375 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top 375 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
376 msr cpsr_c, r2 @ restore interrupts 376 msr cpsr_c, r2 @ restore interrupts
377 mcr p15, 0, r0, c7, c10, 4 @ drain WB 377 mcr p15, 0, r0, c7, c10, 4 @ drain WB
378 mov pc, lr 378 ret lr
379 379
380/* 380/*
381 * dma_map_area(start, size, dir) 381 * dma_map_area(start, size, dir)
@@ -412,7 +412,7 @@ ENDPROC(feroceon_range_dma_map_area)
412 * - dir - DMA direction 412 * - dir - DMA direction
413 */ 413 */
414ENTRY(feroceon_dma_unmap_area) 414ENTRY(feroceon_dma_unmap_area)
415 mov pc, lr 415 ret lr
416ENDPROC(feroceon_dma_unmap_area) 416ENDPROC(feroceon_dma_unmap_area)
417 417
418 .globl feroceon_flush_kern_cache_louis 418 .globl feroceon_flush_kern_cache_louis
@@ -461,7 +461,7 @@ ENTRY(cpu_feroceon_dcache_clean_area)
461 bhi 1b 461 bhi 1b
462#endif 462#endif
463 mcr p15, 0, r0, c7, c10, 4 @ drain WB 463 mcr p15, 0, r0, c7, c10, 4 @ drain WB
464 mov pc, lr 464 ret lr
465 465
466/* =============================== PageTable ============================== */ 466/* =============================== PageTable ============================== */
467 467
@@ -490,9 +490,9 @@ ENTRY(cpu_feroceon_switch_mm)
490 490
491 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 491 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
492 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 492 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
493 mov pc, r2 493 ret r2
494#else 494#else
495 mov pc, lr 495 ret lr
496#endif 496#endif
497 497
498/* 498/*
@@ -512,7 +512,7 @@ ENTRY(cpu_feroceon_set_pte_ext)
512#endif 512#endif
513 mcr p15, 0, r0, c7, c10, 4 @ drain WB 513 mcr p15, 0, r0, c7, c10, 4 @ drain WB
514#endif 514#endif
515 mov pc, lr 515 ret lr
516 516
517/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */ 517/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
518.globl cpu_feroceon_suspend_size 518.globl cpu_feroceon_suspend_size
@@ -554,7 +554,7 @@ __feroceon_setup:
554 mrc p15, 0, r0, c1, c0 @ get control register v4 554 mrc p15, 0, r0, c1, c0 @ get control register v4
555 bic r0, r0, r5 555 bic r0, r0, r5
556 orr r0, r0, r6 556 orr r0, r0, r6
557 mov pc, lr 557 ret lr
558 .size __feroceon_setup, . - __feroceon_setup 558 .size __feroceon_setup, . - __feroceon_setup
559 559
560 /* 560 /*
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 40acba595731..53d393455f13 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -45,7 +45,7 @@
45 * cpu_mohawk_proc_init() 45 * cpu_mohawk_proc_init()
46 */ 46 */
47ENTRY(cpu_mohawk_proc_init) 47ENTRY(cpu_mohawk_proc_init)
48 mov pc, lr 48 ret lr
49 49
50/* 50/*
51 * cpu_mohawk_proc_fin() 51 * cpu_mohawk_proc_fin()
@@ -55,7 +55,7 @@ ENTRY(cpu_mohawk_proc_fin)
55 bic r0, r0, #0x1800 @ ...iz........... 55 bic r0, r0, #0x1800 @ ...iz...........
56 bic r0, r0, #0x0006 @ .............ca. 56 bic r0, r0, #0x0006 @ .............ca.
57 mcr p15, 0, r0, c1, c0, 0 @ disable caches 57 mcr p15, 0, r0, c1, c0, 0 @ disable caches
58 mov pc, lr 58 ret lr
59 59
60/* 60/*
61 * cpu_mohawk_reset(loc) 61 * cpu_mohawk_reset(loc)
@@ -79,7 +79,7 @@ ENTRY(cpu_mohawk_reset)
79 bic ip, ip, #0x0007 @ .............cam 79 bic ip, ip, #0x0007 @ .............cam
80 bic ip, ip, #0x1100 @ ...i...s........ 80 bic ip, ip, #0x1100 @ ...i...s........
81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
82 mov pc, r0 82 ret r0
83ENDPROC(cpu_mohawk_reset) 83ENDPROC(cpu_mohawk_reset)
84 .popsection 84 .popsection
85 85
@@ -93,7 +93,7 @@ ENTRY(cpu_mohawk_do_idle)
93 mov r0, #0 93 mov r0, #0
94 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 94 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
95 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt 95 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
96 mov pc, lr 96 ret lr
97 97
98/* 98/*
99 * flush_icache_all() 99 * flush_icache_all()
@@ -103,7 +103,7 @@ ENTRY(cpu_mohawk_do_idle)
103ENTRY(mohawk_flush_icache_all) 103ENTRY(mohawk_flush_icache_all)
104 mov r0, #0 104 mov r0, #0
105 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 105 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
106 mov pc, lr 106 ret lr
107ENDPROC(mohawk_flush_icache_all) 107ENDPROC(mohawk_flush_icache_all)
108 108
109/* 109/*
@@ -128,7 +128,7 @@ __flush_whole_cache:
128 tst r2, #VM_EXEC 128 tst r2, #VM_EXEC
129 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 129 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
130 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer 130 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer
131 mov pc, lr 131 ret lr
132 132
133/* 133/*
134 * flush_user_cache_range(start, end, flags) 134 * flush_user_cache_range(start, end, flags)
@@ -158,7 +158,7 @@ ENTRY(mohawk_flush_user_cache_range)
158 blo 1b 158 blo 1b
159 tst r2, #VM_EXEC 159 tst r2, #VM_EXEC
160 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 160 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
161 mov pc, lr 161 ret lr
162 162
163/* 163/*
164 * coherent_kern_range(start, end) 164 * coherent_kern_range(start, end)
@@ -194,7 +194,7 @@ ENTRY(mohawk_coherent_user_range)
194 blo 1b 194 blo 1b
195 mcr p15, 0, r0, c7, c10, 4 @ drain WB 195 mcr p15, 0, r0, c7, c10, 4 @ drain WB
196 mov r0, #0 196 mov r0, #0
197 mov pc, lr 197 ret lr
198 198
199/* 199/*
200 * flush_kern_dcache_area(void *addr, size_t size) 200 * flush_kern_dcache_area(void *addr, size_t size)
@@ -214,7 +214,7 @@ ENTRY(mohawk_flush_kern_dcache_area)
214 mov r0, #0 214 mov r0, #0
215 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 215 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
216 mcr p15, 0, r0, c7, c10, 4 @ drain WB 216 mcr p15, 0, r0, c7, c10, 4 @ drain WB
217 mov pc, lr 217 ret lr
218 218
219/* 219/*
220 * dma_inv_range(start, end) 220 * dma_inv_range(start, end)
@@ -240,7 +240,7 @@ mohawk_dma_inv_range:
240 cmp r0, r1 240 cmp r0, r1
241 blo 1b 241 blo 1b
242 mcr p15, 0, r0, c7, c10, 4 @ drain WB 242 mcr p15, 0, r0, c7, c10, 4 @ drain WB
243 mov pc, lr 243 ret lr
244 244
245/* 245/*
246 * dma_clean_range(start, end) 246 * dma_clean_range(start, end)
@@ -259,7 +259,7 @@ mohawk_dma_clean_range:
259 cmp r0, r1 259 cmp r0, r1
260 blo 1b 260 blo 1b
261 mcr p15, 0, r0, c7, c10, 4 @ drain WB 261 mcr p15, 0, r0, c7, c10, 4 @ drain WB
262 mov pc, lr 262 ret lr
263 263
264/* 264/*
265 * dma_flush_range(start, end) 265 * dma_flush_range(start, end)
@@ -277,7 +277,7 @@ ENTRY(mohawk_dma_flush_range)
277 cmp r0, r1 277 cmp r0, r1
278 blo 1b 278 blo 1b
279 mcr p15, 0, r0, c7, c10, 4 @ drain WB 279 mcr p15, 0, r0, c7, c10, 4 @ drain WB
280 mov pc, lr 280 ret lr
281 281
282/* 282/*
283 * dma_map_area(start, size, dir) 283 * dma_map_area(start, size, dir)
@@ -300,7 +300,7 @@ ENDPROC(mohawk_dma_map_area)
300 * - dir - DMA direction 300 * - dir - DMA direction
301 */ 301 */
302ENTRY(mohawk_dma_unmap_area) 302ENTRY(mohawk_dma_unmap_area)
303 mov pc, lr 303 ret lr
304ENDPROC(mohawk_dma_unmap_area) 304ENDPROC(mohawk_dma_unmap_area)
305 305
306 .globl mohawk_flush_kern_cache_louis 306 .globl mohawk_flush_kern_cache_louis
@@ -315,7 +315,7 @@ ENTRY(cpu_mohawk_dcache_clean_area)
315 subs r1, r1, #CACHE_DLINESIZE 315 subs r1, r1, #CACHE_DLINESIZE
316 bhi 1b 316 bhi 1b
317 mcr p15, 0, r0, c7, c10, 4 @ drain WB 317 mcr p15, 0, r0, c7, c10, 4 @ drain WB
318 mov pc, lr 318 ret lr
319 319
320/* 320/*
321 * cpu_mohawk_switch_mm(pgd) 321 * cpu_mohawk_switch_mm(pgd)
@@ -333,7 +333,7 @@ ENTRY(cpu_mohawk_switch_mm)
333 orr r0, r0, #0x18 @ cache the page table in L2 333 orr r0, r0, #0x18 @ cache the page table in L2
334 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 334 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
335 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 335 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
336 mov pc, lr 336 ret lr
337 337
338/* 338/*
339 * cpu_mohawk_set_pte_ext(ptep, pte, ext) 339 * cpu_mohawk_set_pte_ext(ptep, pte, ext)
@@ -346,7 +346,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
346 mov r0, r0 346 mov r0, r0
347 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 347 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
348 mcr p15, 0, r0, c7, c10, 4 @ drain WB 348 mcr p15, 0, r0, c7, c10, 4 @ drain WB
349 mov pc, lr 349 ret lr
350 350
351.globl cpu_mohawk_suspend_size 351.globl cpu_mohawk_suspend_size
352.equ cpu_mohawk_suspend_size, 4 * 6 352.equ cpu_mohawk_suspend_size, 4 * 6
@@ -400,7 +400,7 @@ __mohawk_setup:
400 mrc p15, 0, r0, c1, c0 @ get control register 400 mrc p15, 0, r0, c1, c0 @ get control register
401 bic r0, r0, r5 401 bic r0, r0, r5
402 orr r0, r0, r6 402 orr r0, r0, r6
403 mov pc, lr 403 ret lr
404 404
405 .size __mohawk_setup, . - __mohawk_setup 405 .size __mohawk_setup, . - __mohawk_setup
406 406
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index c45319c8f1d9..8008a0461cf5 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -38,7 +38,7 @@
38ENTRY(cpu_sa110_proc_init) 38ENTRY(cpu_sa110_proc_init)
39 mov r0, #0 39 mov r0, #0
40 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 40 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
41 mov pc, lr 41 ret lr
42 42
43/* 43/*
44 * cpu_sa110_proc_fin() 44 * cpu_sa110_proc_fin()
@@ -50,7 +50,7 @@ ENTRY(cpu_sa110_proc_fin)
50 bic r0, r0, #0x1000 @ ...i............ 50 bic r0, r0, #0x1000 @ ...i............
51 bic r0, r0, #0x000e @ ............wca. 51 bic r0, r0, #0x000e @ ............wca.
52 mcr p15, 0, r0, c1, c0, 0 @ disable caches 52 mcr p15, 0, r0, c1, c0, 0 @ disable caches
53 mov pc, lr 53 ret lr
54 54
55/* 55/*
56 * cpu_sa110_reset(loc) 56 * cpu_sa110_reset(loc)
@@ -74,7 +74,7 @@ ENTRY(cpu_sa110_reset)
74 bic ip, ip, #0x000f @ ............wcam 74 bic ip, ip, #0x000f @ ............wcam
75 bic ip, ip, #0x1100 @ ...i...s........ 75 bic ip, ip, #0x1100 @ ...i...s........
76 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 76 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
77 mov pc, r0 77 ret r0
78ENDPROC(cpu_sa110_reset) 78ENDPROC(cpu_sa110_reset)
79 .popsection 79 .popsection
80 80
@@ -103,7 +103,7 @@ ENTRY(cpu_sa110_do_idle)
103 mov r0, r0 @ safety 103 mov r0, r0 @ safety
104 mov r0, r0 @ safety 104 mov r0, r0 @ safety
105 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 105 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
106 mov pc, lr 106 ret lr
107 107
108/* ================================= CACHE ================================ */ 108/* ================================= CACHE ================================ */
109 109
@@ -121,7 +121,7 @@ ENTRY(cpu_sa110_dcache_clean_area)
121 add r0, r0, #DCACHELINESIZE 121 add r0, r0, #DCACHELINESIZE
122 subs r1, r1, #DCACHELINESIZE 122 subs r1, r1, #DCACHELINESIZE
123 bhi 1b 123 bhi 1b
124 mov pc, lr 124 ret lr
125 125
126/* =============================== PageTable ============================== */ 126/* =============================== PageTable ============================== */
127 127
@@ -141,7 +141,7 @@ ENTRY(cpu_sa110_switch_mm)
141 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 141 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
142 ldr pc, [sp], #4 142 ldr pc, [sp], #4
143#else 143#else
144 mov pc, lr 144 ret lr
145#endif 145#endif
146 146
147/* 147/*
@@ -157,7 +157,7 @@ ENTRY(cpu_sa110_set_pte_ext)
157 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 157 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
158 mcr p15, 0, r0, c7, c10, 4 @ drain WB 158 mcr p15, 0, r0, c7, c10, 4 @ drain WB
159#endif 159#endif
160 mov pc, lr 160 ret lr
161 161
162 .type __sa110_setup, #function 162 .type __sa110_setup, #function
163__sa110_setup: 163__sa110_setup:
@@ -173,7 +173,7 @@ __sa110_setup:
173 mrc p15, 0, r0, c1, c0 @ get control register v4 173 mrc p15, 0, r0, c1, c0 @ get control register v4
174 bic r0, r0, r5 174 bic r0, r0, r5
175 orr r0, r0, r6 175 orr r0, r0, r6
176 mov pc, lr 176 ret lr
177 .size __sa110_setup, . - __sa110_setup 177 .size __sa110_setup, . - __sa110_setup
178 178
179 /* 179 /*
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 09d241ae2dbe..89f97ac648a9 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -43,7 +43,7 @@ ENTRY(cpu_sa1100_proc_init)
43 mov r0, #0 43 mov r0, #0
44 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 44 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland 45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
46 mov pc, lr 46 ret lr
47 47
48/* 48/*
49 * cpu_sa1100_proc_fin() 49 * cpu_sa1100_proc_fin()
@@ -58,7 +58,7 @@ ENTRY(cpu_sa1100_proc_fin)
58 bic r0, r0, #0x1000 @ ...i............ 58 bic r0, r0, #0x1000 @ ...i............
59 bic r0, r0, #0x000e @ ............wca. 59 bic r0, r0, #0x000e @ ............wca.
60 mcr p15, 0, r0, c1, c0, 0 @ disable caches 60 mcr p15, 0, r0, c1, c0, 0 @ disable caches
61 mov pc, lr 61 ret lr
62 62
63/* 63/*
64 * cpu_sa1100_reset(loc) 64 * cpu_sa1100_reset(loc)
@@ -82,7 +82,7 @@ ENTRY(cpu_sa1100_reset)
82 bic ip, ip, #0x000f @ ............wcam 82 bic ip, ip, #0x000f @ ............wcam
83 bic ip, ip, #0x1100 @ ...i...s........ 83 bic ip, ip, #0x1100 @ ...i...s........
84 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 84 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
85 mov pc, r0 85 ret r0
86ENDPROC(cpu_sa1100_reset) 86ENDPROC(cpu_sa1100_reset)
87 .popsection 87 .popsection
88 88
@@ -113,7 +113,7 @@ ENTRY(cpu_sa1100_do_idle)
113 mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt 113 mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt
114 mov r0, r0 @ safety 114 mov r0, r0 @ safety
115 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 115 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
116 mov pc, lr 116 ret lr
117 117
118/* ================================= CACHE ================================ */ 118/* ================================= CACHE ================================ */
119 119
@@ -131,7 +131,7 @@ ENTRY(cpu_sa1100_dcache_clean_area)
131 add r0, r0, #DCACHELINESIZE 131 add r0, r0, #DCACHELINESIZE
132 subs r1, r1, #DCACHELINESIZE 132 subs r1, r1, #DCACHELINESIZE
133 bhi 1b 133 bhi 1b
134 mov pc, lr 134 ret lr
135 135
136/* =============================== PageTable ============================== */ 136/* =============================== PageTable ============================== */
137 137
@@ -152,7 +152,7 @@ ENTRY(cpu_sa1100_switch_mm)
152 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 152 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
153 ldr pc, [sp], #4 153 ldr pc, [sp], #4
154#else 154#else
155 mov pc, lr 155 ret lr
156#endif 156#endif
157 157
158/* 158/*
@@ -168,7 +168,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
168 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 168 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
169 mcr p15, 0, r0, c7, c10, 4 @ drain WB 169 mcr p15, 0, r0, c7, c10, 4 @ drain WB
170#endif 170#endif
171 mov pc, lr 171 ret lr
172 172
173.globl cpu_sa1100_suspend_size 173.globl cpu_sa1100_suspend_size
174.equ cpu_sa1100_suspend_size, 4 * 3 174.equ cpu_sa1100_suspend_size, 4 * 3
@@ -211,7 +211,7 @@ __sa1100_setup:
211 mrc p15, 0, r0, c1, c0 @ get control register v4 211 mrc p15, 0, r0, c1, c0 @ get control register v4
212 bic r0, r0, r5 212 bic r0, r0, r5
213 orr r0, r0, r6 213 orr r0, r0, r6
214 mov pc, lr 214 ret lr
215 .size __sa1100_setup, . - __sa1100_setup 215 .size __sa1100_setup, . - __sa1100_setup
216 216
217 /* 217 /*
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 32b3558321c4..d0390f4b3f18 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -36,14 +36,14 @@
36#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S 36#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
37 37
38ENTRY(cpu_v6_proc_init) 38ENTRY(cpu_v6_proc_init)
39 mov pc, lr 39 ret lr
40 40
41ENTRY(cpu_v6_proc_fin) 41ENTRY(cpu_v6_proc_fin)
42 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 42 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
43 bic r0, r0, #0x1000 @ ...i............ 43 bic r0, r0, #0x1000 @ ...i............
44 bic r0, r0, #0x0006 @ .............ca. 44 bic r0, r0, #0x0006 @ .............ca.
45 mcr p15, 0, r0, c1, c0, 0 @ disable caches 45 mcr p15, 0, r0, c1, c0, 0 @ disable caches
46 mov pc, lr 46 ret lr
47 47
48/* 48/*
49 * cpu_v6_reset(loc) 49 * cpu_v6_reset(loc)
@@ -62,7 +62,7 @@ ENTRY(cpu_v6_reset)
62 mcr p15, 0, r1, c1, c0, 0 @ disable MMU 62 mcr p15, 0, r1, c1, c0, 0 @ disable MMU
63 mov r1, #0 63 mov r1, #0
64 mcr p15, 0, r1, c7, c5, 4 @ ISB 64 mcr p15, 0, r1, c7, c5, 4 @ ISB
65 mov pc, r0 65 ret r0
66ENDPROC(cpu_v6_reset) 66ENDPROC(cpu_v6_reset)
67 .popsection 67 .popsection
68 68
@@ -77,14 +77,14 @@ ENTRY(cpu_v6_do_idle)
77 mov r1, #0 77 mov r1, #0
78 mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode 78 mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode
79 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt 79 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt
80 mov pc, lr 80 ret lr
81 81
82ENTRY(cpu_v6_dcache_clean_area) 82ENTRY(cpu_v6_dcache_clean_area)
831: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 831: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
84 add r0, r0, #D_CACHE_LINE_SIZE 84 add r0, r0, #D_CACHE_LINE_SIZE
85 subs r1, r1, #D_CACHE_LINE_SIZE 85 subs r1, r1, #D_CACHE_LINE_SIZE
86 bhi 1b 86 bhi 1b
87 mov pc, lr 87 ret lr
88 88
89/* 89/*
90 * cpu_v6_switch_mm(pgd_phys, tsk) 90 * cpu_v6_switch_mm(pgd_phys, tsk)
@@ -113,7 +113,7 @@ ENTRY(cpu_v6_switch_mm)
113#endif 113#endif
114 mcr p15, 0, r1, c13, c0, 1 @ set context ID 114 mcr p15, 0, r1, c13, c0, 1 @ set context ID
115#endif 115#endif
116 mov pc, lr 116 ret lr
117 117
118/* 118/*
119 * cpu_v6_set_pte_ext(ptep, pte, ext) 119 * cpu_v6_set_pte_ext(ptep, pte, ext)
@@ -131,7 +131,7 @@ ENTRY(cpu_v6_set_pte_ext)
131#ifdef CONFIG_MMU 131#ifdef CONFIG_MMU
132 armv6_set_pte_ext cpu_v6 132 armv6_set_pte_ext cpu_v6
133#endif 133#endif
134 mov pc, lr 134 ret lr
135 135
136/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ 136/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
137.globl cpu_v6_suspend_size 137.globl cpu_v6_suspend_size
@@ -241,7 +241,7 @@ __v6_setup:
241 mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg 241 mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg
242 orreq r0, r0, #(1 << 21) @ low interrupt latency configuration 242 orreq r0, r0, #(1 << 21) @ low interrupt latency configuration
243#endif 243#endif
244 mov pc, lr @ return to head.S:__ret 244 ret lr @ return to head.S:__ret
245 245
246 /* 246 /*
247 * V X F I D LR 247 * V X F I D LR
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 1f52915f2b28..ed448d8a596b 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -59,7 +59,7 @@ ENTRY(cpu_v7_switch_mm)
59 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 59 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
60 isb 60 isb
61#endif 61#endif
62 mov pc, lr 62 bx lr
63ENDPROC(cpu_v7_switch_mm) 63ENDPROC(cpu_v7_switch_mm)
64 64
65/* 65/*
@@ -106,7 +106,7 @@ ENTRY(cpu_v7_set_pte_ext)
106 ALT_SMP(W(nop)) 106 ALT_SMP(W(nop))
107 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 107 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
108#endif 108#endif
109 mov pc, lr 109 bx lr
110ENDPROC(cpu_v7_set_pte_ext) 110ENDPROC(cpu_v7_set_pte_ext)
111 111
112 /* 112 /*
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 22e3ad63500c..e4c8acfc1323 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -19,6 +19,7 @@
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 21 */
22#include <asm/assembler.h>
22 23
23#define TTB_IRGN_NC (0 << 8) 24#define TTB_IRGN_NC (0 << 8)
24#define TTB_IRGN_WBWA (1 << 8) 25#define TTB_IRGN_WBWA (1 << 8)
@@ -61,7 +62,7 @@ ENTRY(cpu_v7_switch_mm)
61 mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0 62 mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0
62 isb 63 isb
63#endif 64#endif
64 mov pc, lr 65 ret lr
65ENDPROC(cpu_v7_switch_mm) 66ENDPROC(cpu_v7_switch_mm)
66 67
67#ifdef __ARMEB__ 68#ifdef __ARMEB__
@@ -86,13 +87,18 @@ ENTRY(cpu_v7_set_pte_ext)
86 tst rh, #1 << (57 - 32) @ L_PTE_NONE 87 tst rh, #1 << (57 - 32) @ L_PTE_NONE
87 bicne rl, #L_PTE_VALID 88 bicne rl, #L_PTE_VALID
88 bne 1f 89 bne 1f
89 tst rh, #1 << (55 - 32) @ L_PTE_DIRTY 90
90 orreq rl, #L_PTE_RDONLY 91 eor ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to
92 @ test for !L_PTE_DIRTY || L_PTE_RDONLY
93 tst ip, #1 << (55 - 32) | 1 << (58 - 32)
94 orrne rl, #PTE_AP2
95 biceq rl, #PTE_AP2
96
911: strd r2, r3, [r0] 971: strd r2, r3, [r0]
92 ALT_SMP(W(nop)) 98 ALT_SMP(W(nop))
93 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 99 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
94#endif 100#endif
95 mov pc, lr 101 ret lr
96ENDPROC(cpu_v7_set_pte_ext) 102ENDPROC(cpu_v7_set_pte_ext)
97 103
98 /* 104 /*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3db2c2f04a30..b5d67db20897 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -26,7 +26,7 @@
26#endif 26#endif
27 27
28ENTRY(cpu_v7_proc_init) 28ENTRY(cpu_v7_proc_init)
29 mov pc, lr 29 ret lr
30ENDPROC(cpu_v7_proc_init) 30ENDPROC(cpu_v7_proc_init)
31 31
32ENTRY(cpu_v7_proc_fin) 32ENTRY(cpu_v7_proc_fin)
@@ -34,7 +34,7 @@ ENTRY(cpu_v7_proc_fin)
34 bic r0, r0, #0x1000 @ ...i............ 34 bic r0, r0, #0x1000 @ ...i............
35 bic r0, r0, #0x0006 @ .............ca. 35 bic r0, r0, #0x0006 @ .............ca.
36 mcr p15, 0, r0, c1, c0, 0 @ disable caches 36 mcr p15, 0, r0, c1, c0, 0 @ disable caches
37 mov pc, lr 37 ret lr
38ENDPROC(cpu_v7_proc_fin) 38ENDPROC(cpu_v7_proc_fin)
39 39
40/* 40/*
@@ -71,20 +71,20 @@ ENDPROC(cpu_v7_reset)
71ENTRY(cpu_v7_do_idle) 71ENTRY(cpu_v7_do_idle)
72 dsb @ WFI may enter a low-power mode 72 dsb @ WFI may enter a low-power mode
73 wfi 73 wfi
74 mov pc, lr 74 ret lr
75ENDPROC(cpu_v7_do_idle) 75ENDPROC(cpu_v7_do_idle)
76 76
77ENTRY(cpu_v7_dcache_clean_area) 77ENTRY(cpu_v7_dcache_clean_area)
78 ALT_SMP(W(nop)) @ MP extensions imply L1 PTW 78 ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
79 ALT_UP_B(1f) 79 ALT_UP_B(1f)
80 mov pc, lr 80 ret lr
811: dcache_line_size r2, r3 811: dcache_line_size r2, r3
822: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 822: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
83 add r0, r0, r2 83 add r0, r0, r2
84 subs r1, r1, r2 84 subs r1, r1, r2
85 bhi 2b 85 bhi 2b
86 dsb ishst 86 dsb ishst
87 mov pc, lr 87 ret lr
88ENDPROC(cpu_v7_dcache_clean_area) 88ENDPROC(cpu_v7_dcache_clean_area)
89 89
90 string cpu_v7_name, "ARMv7 Processor" 90 string cpu_v7_name, "ARMv7 Processor"
@@ -152,6 +152,40 @@ ENTRY(cpu_v7_do_resume)
152ENDPROC(cpu_v7_do_resume) 152ENDPROC(cpu_v7_do_resume)
153#endif 153#endif
154 154
155/*
156 * Cortex-A9 processor functions
157 */
158 globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
159 globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
160 globl_equ cpu_ca9mp_reset, cpu_v7_reset
161 globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
162 globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
163 globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
164 globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
165.globl cpu_ca9mp_suspend_size
166.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
167#ifdef CONFIG_ARM_CPU_SUSPEND
168ENTRY(cpu_ca9mp_do_suspend)
169 stmfd sp!, {r4 - r5}
170 mrc p15, 0, r4, c15, c0, 1 @ Diagnostic register
171 mrc p15, 0, r5, c15, c0, 0 @ Power register
172 stmia r0!, {r4 - r5}
173 ldmfd sp!, {r4 - r5}
174 b cpu_v7_do_suspend
175ENDPROC(cpu_ca9mp_do_suspend)
176
177ENTRY(cpu_ca9mp_do_resume)
178 ldmia r0!, {r4 - r5}
179 mrc p15, 0, r10, c15, c0, 1 @ Read Diagnostic register
180 teq r4, r10 @ Already restored?
181 mcrne p15, 0, r4, c15, c0, 1 @ No, so restore it
182 mrc p15, 0, r10, c15, c0, 0 @ Read Power register
183 teq r5, r10 @ Already restored?
184 mcrne p15, 0, r5, c15, c0, 0 @ No, so restore it
185 b cpu_v7_do_resume
186ENDPROC(cpu_ca9mp_do_resume)
187#endif
188
155#ifdef CONFIG_CPU_PJ4B 189#ifdef CONFIG_CPU_PJ4B
156 globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm 190 globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm
157 globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext 191 globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext
@@ -163,7 +197,7 @@ ENTRY(cpu_pj4b_do_idle)
163 dsb @ WFI may enter a low-power mode 197 dsb @ WFI may enter a low-power mode
164 wfi 198 wfi
165 dsb @barrier 199 dsb @barrier
166 mov pc, lr 200 ret lr
167ENDPROC(cpu_pj4b_do_idle) 201ENDPROC(cpu_pj4b_do_idle)
168#else 202#else
169 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle 203 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle
@@ -184,16 +218,16 @@ ENDPROC(cpu_pj4b_do_suspend)
184 218
185ENTRY(cpu_pj4b_do_resume) 219ENTRY(cpu_pj4b_do_resume)
186 ldmia r0!, {r6 - r10} 220 ldmia r0!, {r6 - r10}
187 mcr p15, 1, r6, c15, c1, 0 @ save CP15 - extra features 221 mcr p15, 1, r6, c15, c1, 0 @ restore CP15 - extra features
188 mcr p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0 222 mcr p15, 1, r7, c15, c2, 0 @ restore CP15 - Aux Func Modes Ctrl 0
189 mcr p15, 1, r8, c15, c1, 2 @ save CP15 - Aux Debug Modes Ctrl 2 223 mcr p15, 1, r8, c15, c1, 2 @ restore CP15 - Aux Debug Modes Ctrl 2
190 mcr p15, 1, r9, c15, c1, 1 @ save CP15 - Aux Debug Modes Ctrl 1 224 mcr p15, 1, r9, c15, c1, 1 @ restore CP15 - Aux Debug Modes Ctrl 1
191 mcr p15, 0, r10, c9, c14, 0 @ save CP15 - PMC 225 mcr p15, 0, r10, c9, c14, 0 @ restore CP15 - PMC
192 b cpu_v7_do_resume 226 b cpu_v7_do_resume
193ENDPROC(cpu_pj4b_do_resume) 227ENDPROC(cpu_pj4b_do_resume)
194#endif 228#endif
195.globl cpu_pj4b_suspend_size 229.globl cpu_pj4b_suspend_size
196.equ cpu_pj4b_suspend_size, 4 * 14 230.equ cpu_pj4b_suspend_size, cpu_v7_suspend_size + 4 * 5
197 231
198#endif 232#endif
199 233
@@ -216,6 +250,7 @@ __v7_cr7mp_setup:
216__v7_ca7mp_setup: 250__v7_ca7mp_setup:
217__v7_ca12mp_setup: 251__v7_ca12mp_setup:
218__v7_ca15mp_setup: 252__v7_ca15mp_setup:
253__v7_b15mp_setup:
219__v7_ca17mp_setup: 254__v7_ca17mp_setup:
220 mov r10, #0 255 mov r10, #0
2211: 2561:
@@ -407,7 +442,7 @@ __v7_setup:
407 bic r0, r0, r5 @ clear bits them 442 bic r0, r0, r5 @ clear bits them
408 orr r0, r0, r6 @ set them 443 orr r0, r0, r6 @ set them
409 THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions 444 THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
410 mov pc, lr @ return to head.S:__ret 445 ret lr @ return to head.S:__ret
411ENDPROC(__v7_setup) 446ENDPROC(__v7_setup)
412 447
413 .align 2 448 .align 2
@@ -418,6 +453,7 @@ __v7_setup_stack:
418 453
419 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 454 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
420 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 455 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
456 define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
421#ifdef CONFIG_CPU_PJ4B 457#ifdef CONFIG_CPU_PJ4B
422 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 458 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
423#endif 459#endif
@@ -470,7 +506,7 @@ __v7_ca5mp_proc_info:
470__v7_ca9mp_proc_info: 506__v7_ca9mp_proc_info:
471 .long 0x410fc090 507 .long 0x410fc090
472 .long 0xff0ffff0 508 .long 0xff0ffff0
473 __v7_proc __v7_ca9mp_setup 509 __v7_proc __v7_ca9mp_setup, proc_fns = ca9mp_processor_functions
474 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info 510 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
475 511
476#endif /* CONFIG_ARM_LPAE */ 512#endif /* CONFIG_ARM_LPAE */
@@ -528,6 +564,16 @@ __v7_ca15mp_proc_info:
528 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info 564 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
529 565
530 /* 566 /*
567 * Broadcom Corporation Brahma-B15 processor.
568 */
569 .type __v7_b15mp_proc_info, #object
570__v7_b15mp_proc_info:
571 .long 0x420f00f0
572 .long 0xff0ffff0
573 __v7_proc __v7_b15mp_setup, hwcaps = HWCAP_IDIV
574 .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
575
576 /*
531 * ARM Ltd. Cortex A17 processor. 577 * ARM Ltd. Cortex A17 processor.
532 */ 578 */
533 .type __v7_ca17mp_proc_info, #object 579 .type __v7_ca17mp_proc_info, #object
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 1ca37c72f12f..d1e68b553d3b 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -16,11 +16,11 @@
16#include "proc-macros.S" 16#include "proc-macros.S"
17 17
18ENTRY(cpu_v7m_proc_init) 18ENTRY(cpu_v7m_proc_init)
19 mov pc, lr 19 ret lr
20ENDPROC(cpu_v7m_proc_init) 20ENDPROC(cpu_v7m_proc_init)
21 21
22ENTRY(cpu_v7m_proc_fin) 22ENTRY(cpu_v7m_proc_fin)
23 mov pc, lr 23 ret lr
24ENDPROC(cpu_v7m_proc_fin) 24ENDPROC(cpu_v7m_proc_fin)
25 25
26/* 26/*
@@ -34,7 +34,7 @@ ENDPROC(cpu_v7m_proc_fin)
34 */ 34 */
35 .align 5 35 .align 5
36ENTRY(cpu_v7m_reset) 36ENTRY(cpu_v7m_reset)
37 mov pc, r0 37 ret r0
38ENDPROC(cpu_v7m_reset) 38ENDPROC(cpu_v7m_reset)
39 39
40/* 40/*
@@ -46,18 +46,18 @@ ENDPROC(cpu_v7m_reset)
46 */ 46 */
47ENTRY(cpu_v7m_do_idle) 47ENTRY(cpu_v7m_do_idle)
48 wfi 48 wfi
49 mov pc, lr 49 ret lr
50ENDPROC(cpu_v7m_do_idle) 50ENDPROC(cpu_v7m_do_idle)
51 51
52ENTRY(cpu_v7m_dcache_clean_area) 52ENTRY(cpu_v7m_dcache_clean_area)
53 mov pc, lr 53 ret lr
54ENDPROC(cpu_v7m_dcache_clean_area) 54ENDPROC(cpu_v7m_dcache_clean_area)
55 55
56/* 56/*
57 * There is no MMU, so here is nothing to do. 57 * There is no MMU, so here is nothing to do.
58 */ 58 */
59ENTRY(cpu_v7m_switch_mm) 59ENTRY(cpu_v7m_switch_mm)
60 mov pc, lr 60 ret lr
61ENDPROC(cpu_v7m_switch_mm) 61ENDPROC(cpu_v7m_switch_mm)
62 62
63.globl cpu_v7m_suspend_size 63.globl cpu_v7m_suspend_size
@@ -65,11 +65,11 @@ ENDPROC(cpu_v7m_switch_mm)
65 65
66#ifdef CONFIG_ARM_CPU_SUSPEND 66#ifdef CONFIG_ARM_CPU_SUSPEND
67ENTRY(cpu_v7m_do_suspend) 67ENTRY(cpu_v7m_do_suspend)
68 mov pc, lr 68 ret lr
69ENDPROC(cpu_v7m_do_suspend) 69ENDPROC(cpu_v7m_do_suspend)
70 70
71ENTRY(cpu_v7m_do_resume) 71ENTRY(cpu_v7m_do_resume)
72 mov pc, lr 72 ret lr
73ENDPROC(cpu_v7m_do_resume) 73ENDPROC(cpu_v7m_do_resume)
74#endif 74#endif
75 75
@@ -120,7 +120,7 @@ __v7m_setup:
120 ldr r12, [r0, V7M_SCB_CCR] @ system control register 120 ldr r12, [r0, V7M_SCB_CCR] @ system control register
121 orr r12, #V7M_SCB_CCR_STKALIGN 121 orr r12, #V7M_SCB_CCR_STKALIGN
122 str r12, [r0, V7M_SCB_CCR] 122 str r12, [r0, V7M_SCB_CCR]
123 mov pc, lr 123 ret lr
124ENDPROC(__v7m_setup) 124ENDPROC(__v7m_setup)
125 125
126 .align 2 126 .align 2
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index dc1645890042..f8acdfece036 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -83,7 +83,7 @@
83 * Nothing too exciting at the moment 83 * Nothing too exciting at the moment
84 */ 84 */
85ENTRY(cpu_xsc3_proc_init) 85ENTRY(cpu_xsc3_proc_init)
86 mov pc, lr 86 ret lr
87 87
88/* 88/*
89 * cpu_xsc3_proc_fin() 89 * cpu_xsc3_proc_fin()
@@ -93,7 +93,7 @@ ENTRY(cpu_xsc3_proc_fin)
93 bic r0, r0, #0x1800 @ ...IZ........... 93 bic r0, r0, #0x1800 @ ...IZ...........
94 bic r0, r0, #0x0006 @ .............CA. 94 bic r0, r0, #0x0006 @ .............CA.
95 mcr p15, 0, r0, c1, c0, 0 @ disable caches 95 mcr p15, 0, r0, c1, c0, 0 @ disable caches
96 mov pc, lr 96 ret lr
97 97
98/* 98/*
99 * cpu_xsc3_reset(loc) 99 * cpu_xsc3_reset(loc)
@@ -119,7 +119,7 @@ ENTRY(cpu_xsc3_reset)
119 @ CAUTION: MMU turned off from this point. We count on the pipeline 119 @ CAUTION: MMU turned off from this point. We count on the pipeline
120 @ already containing those two last instructions to survive. 120 @ already containing those two last instructions to survive.
121 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 121 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
122 mov pc, r0 122 ret r0
123ENDPROC(cpu_xsc3_reset) 123ENDPROC(cpu_xsc3_reset)
124 .popsection 124 .popsection
125 125
@@ -138,7 +138,7 @@ ENDPROC(cpu_xsc3_reset)
138ENTRY(cpu_xsc3_do_idle) 138ENTRY(cpu_xsc3_do_idle)
139 mov r0, #1 139 mov r0, #1
140 mcr p14, 0, r0, c7, c0, 0 @ go to idle 140 mcr p14, 0, r0, c7, c0, 0 @ go to idle
141 mov pc, lr 141 ret lr
142 142
143/* ================================= CACHE ================================ */ 143/* ================================= CACHE ================================ */
144 144
@@ -150,7 +150,7 @@ ENTRY(cpu_xsc3_do_idle)
150ENTRY(xsc3_flush_icache_all) 150ENTRY(xsc3_flush_icache_all)
151 mov r0, #0 151 mov r0, #0
152 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 152 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
153 mov pc, lr 153 ret lr
154ENDPROC(xsc3_flush_icache_all) 154ENDPROC(xsc3_flush_icache_all)
155 155
156/* 156/*
@@ -176,7 +176,7 @@ __flush_whole_cache:
176 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 176 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
177 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 177 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
178 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 178 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
179 mov pc, lr 179 ret lr
180 180
181/* 181/*
182 * flush_user_cache_range(start, end, vm_flags) 182 * flush_user_cache_range(start, end, vm_flags)
@@ -205,7 +205,7 @@ ENTRY(xsc3_flush_user_cache_range)
205 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 205 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
206 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 206 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
207 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 207 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
208 mov pc, lr 208 ret lr
209 209
210/* 210/*
211 * coherent_kern_range(start, end) 211 * coherent_kern_range(start, end)
@@ -232,7 +232,7 @@ ENTRY(xsc3_coherent_user_range)
232 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 232 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
233 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 233 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
234 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 234 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
235 mov pc, lr 235 ret lr
236 236
237/* 237/*
238 * flush_kern_dcache_area(void *addr, size_t size) 238 * flush_kern_dcache_area(void *addr, size_t size)
@@ -253,7 +253,7 @@ ENTRY(xsc3_flush_kern_dcache_area)
253 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 253 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
254 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 254 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
255 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 255 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
256 mov pc, lr 256 ret lr
257 257
258/* 258/*
259 * dma_inv_range(start, end) 259 * dma_inv_range(start, end)
@@ -277,7 +277,7 @@ xsc3_dma_inv_range:
277 cmp r0, r1 277 cmp r0, r1
278 blo 1b 278 blo 1b
279 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 279 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
280 mov pc, lr 280 ret lr
281 281
282/* 282/*
283 * dma_clean_range(start, end) 283 * dma_clean_range(start, end)
@@ -294,7 +294,7 @@ xsc3_dma_clean_range:
294 cmp r0, r1 294 cmp r0, r1
295 blo 1b 295 blo 1b
296 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 296 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
297 mov pc, lr 297 ret lr
298 298
299/* 299/*
300 * dma_flush_range(start, end) 300 * dma_flush_range(start, end)
@@ -311,7 +311,7 @@ ENTRY(xsc3_dma_flush_range)
311 cmp r0, r1 311 cmp r0, r1
312 blo 1b 312 blo 1b
313 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 313 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
314 mov pc, lr 314 ret lr
315 315
316/* 316/*
317 * dma_map_area(start, size, dir) 317 * dma_map_area(start, size, dir)
@@ -334,7 +334,7 @@ ENDPROC(xsc3_dma_map_area)
334 * - dir - DMA direction 334 * - dir - DMA direction
335 */ 335 */
336ENTRY(xsc3_dma_unmap_area) 336ENTRY(xsc3_dma_unmap_area)
337 mov pc, lr 337 ret lr
338ENDPROC(xsc3_dma_unmap_area) 338ENDPROC(xsc3_dma_unmap_area)
339 339
340 .globl xsc3_flush_kern_cache_louis 340 .globl xsc3_flush_kern_cache_louis
@@ -348,7 +348,7 @@ ENTRY(cpu_xsc3_dcache_clean_area)
348 add r0, r0, #CACHELINESIZE 348 add r0, r0, #CACHELINESIZE
349 subs r1, r1, #CACHELINESIZE 349 subs r1, r1, #CACHELINESIZE
350 bhi 1b 350 bhi 1b
351 mov pc, lr 351 ret lr
352 352
353/* =============================== PageTable ============================== */ 353/* =============================== PageTable ============================== */
354 354
@@ -406,7 +406,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
406 orr r2, r2, ip 406 orr r2, r2, ip
407 407
408 xscale_set_pte_ext_epilogue 408 xscale_set_pte_ext_epilogue
409 mov pc, lr 409 ret lr
410 410
411 .ltorg 411 .ltorg
412 .align 412 .align
@@ -478,7 +478,7 @@ __xsc3_setup:
478 bic r0, r0, r5 @ ..V. ..R. .... ..A. 478 bic r0, r0, r5 @ ..V. ..R. .... ..A.
479 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) 479 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu)
480 @ ...I Z..S .... .... (uc) 480 @ ...I Z..S .... .... (uc)
481 mov pc, lr 481 ret lr
482 482
483 .size __xsc3_setup, . - __xsc3_setup 483 .size __xsc3_setup, . - __xsc3_setup
484 484
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index d19b1cfcad91..23259f104c66 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -118,7 +118,7 @@ ENTRY(cpu_xscale_proc_init)
118 mrc p15, 0, r1, c1, c0, 1 118 mrc p15, 0, r1, c1, c0, 1
119 bic r1, r1, #1 119 bic r1, r1, #1
120 mcr p15, 0, r1, c1, c0, 1 120 mcr p15, 0, r1, c1, c0, 1
121 mov pc, lr 121 ret lr
122 122
123/* 123/*
124 * cpu_xscale_proc_fin() 124 * cpu_xscale_proc_fin()
@@ -128,7 +128,7 @@ ENTRY(cpu_xscale_proc_fin)
128 bic r0, r0, #0x1800 @ ...IZ........... 128 bic r0, r0, #0x1800 @ ...IZ...........
129 bic r0, r0, #0x0006 @ .............CA. 129 bic r0, r0, #0x0006 @ .............CA.
130 mcr p15, 0, r0, c1, c0, 0 @ disable caches 130 mcr p15, 0, r0, c1, c0, 0 @ disable caches
131 mov pc, lr 131 ret lr
132 132
133/* 133/*
134 * cpu_xscale_reset(loc) 134 * cpu_xscale_reset(loc)
@@ -160,7 +160,7 @@ ENTRY(cpu_xscale_reset)
160 @ CAUTION: MMU turned off from this point. We count on the pipeline 160 @ CAUTION: MMU turned off from this point. We count on the pipeline
161 @ already containing those two last instructions to survive. 161 @ already containing those two last instructions to survive.
162 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 162 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
163 mov pc, r0 163 ret r0
164ENDPROC(cpu_xscale_reset) 164ENDPROC(cpu_xscale_reset)
165 .popsection 165 .popsection
166 166
@@ -179,7 +179,7 @@ ENDPROC(cpu_xscale_reset)
179ENTRY(cpu_xscale_do_idle) 179ENTRY(cpu_xscale_do_idle)
180 mov r0, #1 180 mov r0, #1
181 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 181 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE
182 mov pc, lr 182 ret lr
183 183
184/* ================================= CACHE ================================ */ 184/* ================================= CACHE ================================ */
185 185
@@ -191,7 +191,7 @@ ENTRY(cpu_xscale_do_idle)
191ENTRY(xscale_flush_icache_all) 191ENTRY(xscale_flush_icache_all)
192 mov r0, #0 192 mov r0, #0
193 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 193 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
194 mov pc, lr 194 ret lr
195ENDPROC(xscale_flush_icache_all) 195ENDPROC(xscale_flush_icache_all)
196 196
197/* 197/*
@@ -216,7 +216,7 @@ __flush_whole_cache:
216 tst r2, #VM_EXEC 216 tst r2, #VM_EXEC
217 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 217 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
218 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 218 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
219 mov pc, lr 219 ret lr
220 220
221/* 221/*
222 * flush_user_cache_range(start, end, vm_flags) 222 * flush_user_cache_range(start, end, vm_flags)
@@ -245,7 +245,7 @@ ENTRY(xscale_flush_user_cache_range)
245 tst r2, #VM_EXEC 245 tst r2, #VM_EXEC
246 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 246 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB
247 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 247 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
248 mov pc, lr 248 ret lr
249 249
250/* 250/*
251 * coherent_kern_range(start, end) 251 * coherent_kern_range(start, end)
@@ -269,7 +269,7 @@ ENTRY(xscale_coherent_kern_range)
269 mov r0, #0 269 mov r0, #0
270 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 270 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
271 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 271 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
272 mov pc, lr 272 ret lr
273 273
274/* 274/*
275 * coherent_user_range(start, end) 275 * coherent_user_range(start, end)
@@ -291,7 +291,7 @@ ENTRY(xscale_coherent_user_range)
291 mov r0, #0 291 mov r0, #0
292 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 292 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
293 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 293 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
294 mov pc, lr 294 ret lr
295 295
296/* 296/*
297 * flush_kern_dcache_area(void *addr, size_t size) 297 * flush_kern_dcache_area(void *addr, size_t size)
@@ -312,7 +312,7 @@ ENTRY(xscale_flush_kern_dcache_area)
312 mov r0, #0 312 mov r0, #0
313 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 313 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
314 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 314 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
315 mov pc, lr 315 ret lr
316 316
317/* 317/*
318 * dma_inv_range(start, end) 318 * dma_inv_range(start, end)
@@ -336,7 +336,7 @@ xscale_dma_inv_range:
336 cmp r0, r1 336 cmp r0, r1
337 blo 1b 337 blo 1b
338 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 338 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
339 mov pc, lr 339 ret lr
340 340
341/* 341/*
342 * dma_clean_range(start, end) 342 * dma_clean_range(start, end)
@@ -353,7 +353,7 @@ xscale_dma_clean_range:
353 cmp r0, r1 353 cmp r0, r1
354 blo 1b 354 blo 1b
355 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 355 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
356 mov pc, lr 356 ret lr
357 357
358/* 358/*
359 * dma_flush_range(start, end) 359 * dma_flush_range(start, end)
@@ -371,7 +371,7 @@ ENTRY(xscale_dma_flush_range)
371 cmp r0, r1 371 cmp r0, r1
372 blo 1b 372 blo 1b
373 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 373 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
374 mov pc, lr 374 ret lr
375 375
376/* 376/*
377 * dma_map_area(start, size, dir) 377 * dma_map_area(start, size, dir)
@@ -407,7 +407,7 @@ ENDPROC(xscale_80200_A0_A1_dma_map_area)
407 * - dir - DMA direction 407 * - dir - DMA direction
408 */ 408 */
409ENTRY(xscale_dma_unmap_area) 409ENTRY(xscale_dma_unmap_area)
410 mov pc, lr 410 ret lr
411ENDPROC(xscale_dma_unmap_area) 411ENDPROC(xscale_dma_unmap_area)
412 412
413 .globl xscale_flush_kern_cache_louis 413 .globl xscale_flush_kern_cache_louis
@@ -458,7 +458,7 @@ ENTRY(cpu_xscale_dcache_clean_area)
458 add r0, r0, #CACHELINESIZE 458 add r0, r0, #CACHELINESIZE
459 subs r1, r1, #CACHELINESIZE 459 subs r1, r1, #CACHELINESIZE
460 bhi 1b 460 bhi 1b
461 mov pc, lr 461 ret lr
462 462
463/* =============================== PageTable ============================== */ 463/* =============================== PageTable ============================== */
464 464
@@ -521,7 +521,7 @@ ENTRY(cpu_xscale_set_pte_ext)
521 orr r2, r2, ip 521 orr r2, r2, ip
522 522
523 xscale_set_pte_ext_epilogue 523 xscale_set_pte_ext_epilogue
524 mov pc, lr 524 ret lr
525 525
526 .ltorg 526 .ltorg
527 .align 527 .align
@@ -572,7 +572,7 @@ __xscale_setup:
572 mrc p15, 0, r0, c1, c0, 0 @ get control register 572 mrc p15, 0, r0, c1, c0, 0 @ get control register
573 bic r0, r0, r5 573 bic r0, r0, r5
574 orr r0, r0, r6 574 orr r0, r0, r6
575 mov pc, lr 575 ret lr
576 .size __xscale_setup, . - __xscale_setup 576 .size __xscale_setup, . - __xscale_setup
577 577
578 /* 578 /*
diff --git a/arch/arm/mm/tlb-fa.S b/arch/arm/mm/tlb-fa.S
index d3ddcf9a76ca..d2d9ecbe0aac 100644
--- a/arch/arm/mm/tlb-fa.S
+++ b/arch/arm/mm/tlb-fa.S
@@ -18,6 +18,7 @@
18 */ 18 */
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <asm/assembler.h>
21#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
22#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
23#include "proc-macros.S" 24#include "proc-macros.S"
@@ -37,7 +38,7 @@ ENTRY(fa_flush_user_tlb_range)
37 vma_vm_mm ip, r2 38 vma_vm_mm ip, r2
38 act_mm r3 @ get current->active_mm 39 act_mm r3 @ get current->active_mm
39 eors r3, ip, r3 @ == mm ? 40 eors r3, ip, r3 @ == mm ?
40 movne pc, lr @ no, we dont do anything 41 retne lr @ no, we dont do anything
41 mov r3, #0 42 mov r3, #0
42 mcr p15, 0, r3, c7, c10, 4 @ drain WB 43 mcr p15, 0, r3, c7, c10, 4 @ drain WB
43 bic r0, r0, #0x0ff 44 bic r0, r0, #0x0ff
@@ -47,7 +48,7 @@ ENTRY(fa_flush_user_tlb_range)
47 cmp r0, r1 48 cmp r0, r1
48 blo 1b 49 blo 1b
49 mcr p15, 0, r3, c7, c10, 4 @ data write barrier 50 mcr p15, 0, r3, c7, c10, 4 @ data write barrier
50 mov pc, lr 51 ret lr
51 52
52 53
53ENTRY(fa_flush_kern_tlb_range) 54ENTRY(fa_flush_kern_tlb_range)
@@ -61,7 +62,7 @@ ENTRY(fa_flush_kern_tlb_range)
61 blo 1b 62 blo 1b
62 mcr p15, 0, r3, c7, c10, 4 @ data write barrier 63 mcr p15, 0, r3, c7, c10, 4 @ data write barrier
63 mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb) 64 mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb)
64 mov pc, lr 65 ret lr
65 66
66 __INITDATA 67 __INITDATA
67 68
diff --git a/arch/arm/mm/tlb-v4.S b/arch/arm/mm/tlb-v4.S
index 17a025ade573..a2b5dca42048 100644
--- a/arch/arm/mm/tlb-v4.S
+++ b/arch/arm/mm/tlb-v4.S
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <asm/assembler.h>
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include "proc-macros.S" 20#include "proc-macros.S"
@@ -33,7 +34,7 @@ ENTRY(v4_flush_user_tlb_range)
33 vma_vm_mm ip, r2 34 vma_vm_mm ip, r2
34 act_mm r3 @ get current->active_mm 35 act_mm r3 @ get current->active_mm
35 eors r3, ip, r3 @ == mm ? 36 eors r3, ip, r3 @ == mm ?
36 movne pc, lr @ no, we dont do anything 37 retne lr @ no, we dont do anything
37.v4_flush_kern_tlb_range: 38.v4_flush_kern_tlb_range:
38 bic r0, r0, #0x0ff 39 bic r0, r0, #0x0ff
39 bic r0, r0, #0xf00 40 bic r0, r0, #0xf00
@@ -41,7 +42,7 @@ ENTRY(v4_flush_user_tlb_range)
41 add r0, r0, #PAGE_SZ 42 add r0, r0, #PAGE_SZ
42 cmp r0, r1 43 cmp r0, r1
43 blo 1b 44 blo 1b
44 mov pc, lr 45 ret lr
45 46
46/* 47/*
47 * v4_flush_kern_tlb_range(start, end) 48 * v4_flush_kern_tlb_range(start, end)
diff --git a/arch/arm/mm/tlb-v4wb.S b/arch/arm/mm/tlb-v4wb.S
index c04598fa4d4a..5a093b458dbc 100644
--- a/arch/arm/mm/tlb-v4wb.S
+++ b/arch/arm/mm/tlb-v4wb.S
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <asm/assembler.h>
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include "proc-macros.S" 20#include "proc-macros.S"
@@ -33,7 +34,7 @@ ENTRY(v4wb_flush_user_tlb_range)
33 vma_vm_mm ip, r2 34 vma_vm_mm ip, r2
34 act_mm r3 @ get current->active_mm 35 act_mm r3 @ get current->active_mm
35 eors r3, ip, r3 @ == mm ? 36 eors r3, ip, r3 @ == mm ?
36 movne pc, lr @ no, we dont do anything 37 retne lr @ no, we dont do anything
37 vma_vm_flags r2, r2 38 vma_vm_flags r2, r2
38 mcr p15, 0, r3, c7, c10, 4 @ drain WB 39 mcr p15, 0, r3, c7, c10, 4 @ drain WB
39 tst r2, #VM_EXEC 40 tst r2, #VM_EXEC
@@ -44,7 +45,7 @@ ENTRY(v4wb_flush_user_tlb_range)
44 add r0, r0, #PAGE_SZ 45 add r0, r0, #PAGE_SZ
45 cmp r0, r1 46 cmp r0, r1
46 blo 1b 47 blo 1b
47 mov pc, lr 48 ret lr
48 49
49/* 50/*
50 * v4_flush_kern_tlb_range(start, end) 51 * v4_flush_kern_tlb_range(start, end)
@@ -65,7 +66,7 @@ ENTRY(v4wb_flush_kern_tlb_range)
65 add r0, r0, #PAGE_SZ 66 add r0, r0, #PAGE_SZ
66 cmp r0, r1 67 cmp r0, r1
67 blo 1b 68 blo 1b
68 mov pc, lr 69 ret lr
69 70
70 __INITDATA 71 __INITDATA
71 72
diff --git a/arch/arm/mm/tlb-v4wbi.S b/arch/arm/mm/tlb-v4wbi.S
index 1f6062b6c1c1..058861548f68 100644
--- a/arch/arm/mm/tlb-v4wbi.S
+++ b/arch/arm/mm/tlb-v4wbi.S
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <asm/assembler.h>
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include "proc-macros.S" 20#include "proc-macros.S"
@@ -32,7 +33,7 @@ ENTRY(v4wbi_flush_user_tlb_range)
32 vma_vm_mm ip, r2 33 vma_vm_mm ip, r2
33 act_mm r3 @ get current->active_mm 34 act_mm r3 @ get current->active_mm
34 eors r3, ip, r3 @ == mm ? 35 eors r3, ip, r3 @ == mm ?
35 movne pc, lr @ no, we dont do anything 36 retne lr @ no, we dont do anything
36 mov r3, #0 37 mov r3, #0
37 mcr p15, 0, r3, c7, c10, 4 @ drain WB 38 mcr p15, 0, r3, c7, c10, 4 @ drain WB
38 vma_vm_flags r2, r2 39 vma_vm_flags r2, r2
@@ -44,7 +45,7 @@ ENTRY(v4wbi_flush_user_tlb_range)
44 add r0, r0, #PAGE_SZ 45 add r0, r0, #PAGE_SZ
45 cmp r0, r1 46 cmp r0, r1
46 blo 1b 47 blo 1b
47 mov pc, lr 48 ret lr
48 49
49ENTRY(v4wbi_flush_kern_tlb_range) 50ENTRY(v4wbi_flush_kern_tlb_range)
50 mov r3, #0 51 mov r3, #0
@@ -56,7 +57,7 @@ ENTRY(v4wbi_flush_kern_tlb_range)
56 add r0, r0, #PAGE_SZ 57 add r0, r0, #PAGE_SZ
57 cmp r0, r1 58 cmp r0, r1
58 blo 1b 59 blo 1b
59 mov pc, lr 60 ret lr
60 61
61 __INITDATA 62 __INITDATA
62 63
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index eca07f550a0b..6f689be638bd 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/linkage.h> 14#include <linux/linkage.h>
15#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/assembler.h>
16#include <asm/page.h> 17#include <asm/page.h>
17#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
18#include "proc-macros.S" 19#include "proc-macros.S"
@@ -55,7 +56,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
55 cmp r0, r1 56 cmp r0, r1
56 blo 1b 57 blo 1b
57 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier 58 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
58 mov pc, lr 59 ret lr
59 60
60/* 61/*
61 * v6wbi_flush_kern_tlb_range(start,end) 62 * v6wbi_flush_kern_tlb_range(start,end)
@@ -84,7 +85,7 @@ ENTRY(v6wbi_flush_kern_tlb_range)
84 blo 1b 85 blo 1b
85 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier 86 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
86 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb) 87 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
87 mov pc, lr 88 ret lr
88 89
89 __INIT 90 __INIT
90 91
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 355308767bae..e5101a3bc57c 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -57,7 +57,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
57 cmp r0, r1 57 cmp r0, r1
58 blo 1b 58 blo 1b
59 dsb ish 59 dsb ish
60 mov pc, lr 60 ret lr
61ENDPROC(v7wbi_flush_user_tlb_range) 61ENDPROC(v7wbi_flush_user_tlb_range)
62 62
63/* 63/*
@@ -86,7 +86,7 @@ ENTRY(v7wbi_flush_kern_tlb_range)
86 blo 1b 86 blo 1b
87 dsb ish 87 dsb ish
88 isb 88 isb
89 mov pc, lr 89 ret lr
90ENDPROC(v7wbi_flush_kern_tlb_range) 90ENDPROC(v7wbi_flush_kern_tlb_range)
91 91
92 __INIT 92 __INIT
diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S
index d18dde95b8aa..5d65be1f1e8a 100644
--- a/arch/arm/nwfpe/entry.S
+++ b/arch/arm/nwfpe/entry.S
@@ -19,7 +19,7 @@
19 along with this program; if not, write to the Free Software 19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21*/ 21*/
22 22#include <asm/assembler.h>
23#include <asm/opcodes.h> 23#include <asm/opcodes.h>
24 24
25/* This is the kernel's entry point into the floating point emulator. 25/* This is the kernel's entry point into the floating point emulator.
@@ -92,7 +92,7 @@ emulate:
92 mov r0, r6 @ prepare for EmulateAll() 92 mov r0, r6 @ prepare for EmulateAll()
93 bl EmulateAll @ emulate the instruction 93 bl EmulateAll @ emulate the instruction
94 cmp r0, #0 @ was emulation successful 94 cmp r0, #0 @ was emulation successful
95 moveq pc, r4 @ no, return failure 95 reteq r4 @ no, return failure
96 96
97next: 97next:
98.Lx1: ldrt r6, [r5], #4 @ get the next instruction and 98.Lx1: ldrt r6, [r5], #4 @ get the next instruction and
@@ -102,7 +102,7 @@ next:
102 teq r2, #0x0C000000 102 teq r2, #0x0C000000
103 teqne r2, #0x0D000000 103 teqne r2, #0x0D000000
104 teqne r2, #0x0E000000 104 teqne r2, #0x0E000000
105 movne pc, r9 @ return ok if not a fp insn 105 retne r9 @ return ok if not a fp insn
106 106
107 str r5, [sp, #S_PC] @ update PC copy in regs 107 str r5, [sp, #S_PC] @ update PC copy in regs
108 108
@@ -115,7 +115,7 @@ next:
115 @ plain LDR instruction. Weird, but it seems harmless. 115 @ plain LDR instruction. Weird, but it seems harmless.
116 .pushsection .fixup,"ax" 116 .pushsection .fixup,"ax"
117 .align 2 117 .align 2
118.Lfix: mov pc, r9 @ let the user eat segfaults 118.Lfix: ret r9 @ let the user eat segfaults
119 .popsection 119 .popsection
120 120
121 .pushsection __ex_table,"a" 121 .pushsection __ex_table,"a"
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 99c63d4b6af8..cc649a1e46da 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -33,12 +33,14 @@ static struct op_perf_name {
33 char *perf_name; 33 char *perf_name;
34 char *op_name; 34 char *op_name;
35} op_perf_name_map[] = { 35} op_perf_name_map[] = {
36 { "xscale1", "arm/xscale1" }, 36 { "armv5_xscale1", "arm/xscale1" },
37 { "xscale1", "arm/xscale2" }, 37 { "armv5_xscale2", "arm/xscale2" },
38 { "v6", "arm/armv6" }, 38 { "armv6_1136", "arm/armv6" },
39 { "v6mpcore", "arm/mpcore" }, 39 { "armv6_1156", "arm/armv6" },
40 { "ARMv7 Cortex-A8", "arm/armv7" }, 40 { "armv6_1176", "arm/armv6" },
41 { "ARMv7 Cortex-A9", "arm/armv7-ca9" }, 41 { "armv6_11mpcore", "arm/mpcore" },
42 { "armv7_cortex_a8", "arm/armv7" },
43 { "armv7_cortex_a9", "arm/armv7-ca9" },
42}; 44};
43 45
44char *op_name_from_perf_id(void) 46char *op_name_from_perf_id(void)
@@ -107,10 +109,7 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
107 109
108 if (!user_mode(regs)) { 110 if (!user_mode(regs)) {
109 struct stackframe frame; 111 struct stackframe frame;
110 frame.fp = regs->ARM_fp; 112 arm_get_current_stackframe(regs, &frame);
111 frame.sp = regs->ARM_sp;
112 frame.lr = regs->ARM_lr;
113 frame.pc = regs->ARM_pc;
114 walk_stackframe(&frame, report_trace, &depth); 113 walk_stackframe(&frame, report_trace, &depth);
115 return; 114 return;
116 } 115 }
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index b5608b1f9fbd..1c98659bbf89 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -698,6 +698,8 @@ int omap_request_dma(int dev_id, const char *dev_name,
698 unsigned long flags; 698 unsigned long flags;
699 struct omap_dma_lch *chan; 699 struct omap_dma_lch *chan;
700 700
701 WARN(strcmp(dev_name, "DMA engine"), "Using deprecated platform DMA API - please update to DMA engine");
702
701 spin_lock_irqsave(&dma_chan_lock, flags); 703 spin_lock_irqsave(&dma_chan_lock, flags);
702 for (ch = 0; ch < dma_chan_count; ch++) { 704 for (ch = 0; ch < dma_chan_count; ch++) {
703 if (free_ch == -1 && dma_chan[ch].dev_id == -1) { 705 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index fe6ca574d093..2e78760f3495 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -34,7 +34,7 @@ ENDPROC(do_vfp)
34 34
35ENTRY(vfp_null_entry) 35ENTRY(vfp_null_entry)
36 dec_preempt_count_ti r10, r4 36 dec_preempt_count_ti r10, r4
37 mov pc, lr 37 ret lr
38ENDPROC(vfp_null_entry) 38ENDPROC(vfp_null_entry)
39 39
40 .align 2 40 .align 2
@@ -49,7 +49,7 @@ ENTRY(vfp_testing_entry)
49 dec_preempt_count_ti r10, r4 49 dec_preempt_count_ti r10, r4
50 ldr r0, VFP_arch_address 50 ldr r0, VFP_arch_address
51 str r0, [r0] @ set to non-zero value 51 str r0, [r0] @ set to non-zero value
52 mov pc, r9 @ we have handled the fault 52 ret r9 @ we have handled the fault
53ENDPROC(vfp_testing_entry) 53ENDPROC(vfp_testing_entry)
54 54
55 .align 2 55 .align 2
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index be807625ed8c..cda654cbf2c2 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -183,7 +183,7 @@ vfp_hw_state_valid:
183 @ always subtract 4 from the following 183 @ always subtract 4 from the following
184 @ instruction address. 184 @ instruction address.
185 dec_preempt_count_ti r10, r4 185 dec_preempt_count_ti r10, r4
186 mov pc, r9 @ we think we have handled things 186 ret r9 @ we think we have handled things
187 187
188 188
189look_for_VFP_exceptions: 189look_for_VFP_exceptions:
@@ -202,7 +202,7 @@ look_for_VFP_exceptions:
202 202
203 DBGSTR "not VFP" 203 DBGSTR "not VFP"
204 dec_preempt_count_ti r10, r4 204 dec_preempt_count_ti r10, r4
205 mov pc, lr 205 ret lr
206 206
207process_exception: 207process_exception:
208 DBGSTR "bounce" 208 DBGSTR "bounce"
@@ -234,7 +234,7 @@ ENTRY(vfp_save_state)
234 VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present) 234 VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present)
2351: 2351:
236 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 236 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
237 mov pc, lr 237 ret lr
238ENDPROC(vfp_save_state) 238ENDPROC(vfp_save_state)
239 239
240 .align 240 .align
@@ -245,7 +245,7 @@ vfp_current_hw_state_address:
245#ifdef CONFIG_THUMB2_KERNEL 245#ifdef CONFIG_THUMB2_KERNEL
246 adr \tmp, 1f 246 adr \tmp, 1f
247 add \tmp, \tmp, \base, lsl \shift 247 add \tmp, \tmp, \base, lsl \shift
248 mov pc, \tmp 248 ret \tmp
249#else 249#else
250 add pc, pc, \base, lsl \shift 250 add pc, pc, \base, lsl \shift
251 mov r0, r0 251 mov r0, r0
@@ -257,10 +257,10 @@ ENTRY(vfp_get_float)
257 tbl_branch r0, r3, #3 257 tbl_branch r0, r3, #3
258 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 258 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2591: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 2591: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0
260 mov pc, lr 260 ret lr
261 .org 1b + 8 261 .org 1b + 8
2621: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 2621: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1
263 mov pc, lr 263 ret lr
264 .org 1b + 8 264 .org 1b + 8
265 .endr 265 .endr
266ENDPROC(vfp_get_float) 266ENDPROC(vfp_get_float)
@@ -269,10 +269,10 @@ ENTRY(vfp_put_float)
269 tbl_branch r1, r3, #3 269 tbl_branch r1, r3, #3
270 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 270 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2711: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 2711: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0
272 mov pc, lr 272 ret lr
273 .org 1b + 8 273 .org 1b + 8
2741: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 2741: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1
275 mov pc, lr 275 ret lr
276 .org 1b + 8 276 .org 1b + 8
277 .endr 277 .endr
278ENDPROC(vfp_put_float) 278ENDPROC(vfp_put_float)
@@ -281,14 +281,14 @@ ENTRY(vfp_get_double)
281 tbl_branch r0, r3, #3 281 tbl_branch r0, r3, #3
282 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 282 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2831: fmrrd r0, r1, d\dr 2831: fmrrd r0, r1, d\dr
284 mov pc, lr 284 ret lr
285 .org 1b + 8 285 .org 1b + 8
286 .endr 286 .endr
287#ifdef CONFIG_VFPv3 287#ifdef CONFIG_VFPv3
288 @ d16 - d31 registers 288 @ d16 - d31 registers
289 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 289 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2901: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr 2901: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr
291 mov pc, lr 291 ret lr
292 .org 1b + 8 292 .org 1b + 8
293 .endr 293 .endr
294#endif 294#endif
@@ -296,21 +296,21 @@ ENTRY(vfp_get_double)
296 @ virtual register 16 (or 32 if VFPv3) for compare with zero 296 @ virtual register 16 (or 32 if VFPv3) for compare with zero
297 mov r0, #0 297 mov r0, #0
298 mov r1, #0 298 mov r1, #0
299 mov pc, lr 299 ret lr
300ENDPROC(vfp_get_double) 300ENDPROC(vfp_get_double)
301 301
302ENTRY(vfp_put_double) 302ENTRY(vfp_put_double)
303 tbl_branch r2, r3, #3 303 tbl_branch r2, r3, #3
304 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 304 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
3051: fmdrr d\dr, r0, r1 3051: fmdrr d\dr, r0, r1
306 mov pc, lr 306 ret lr
307 .org 1b + 8 307 .org 1b + 8
308 .endr 308 .endr
309#ifdef CONFIG_VFPv3 309#ifdef CONFIG_VFPv3
310 @ d16 - d31 registers 310 @ d16 - d31 registers
311 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 311 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
3121: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr 3121: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr
313 mov pc, lr 313 ret lr
314 .org 1b + 8 314 .org 1b + 8
315 .endr 315 .endr
316#endif 316#endif
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index 44e3a5f10c4c..f00e08075938 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -58,7 +58,7 @@
58ENTRY(HYPERVISOR_##hypercall) \ 58ENTRY(HYPERVISOR_##hypercall) \
59 mov r12, #__HYPERVISOR_##hypercall; \ 59 mov r12, #__HYPERVISOR_##hypercall; \
60 __HVC(XEN_IMM); \ 60 __HVC(XEN_IMM); \
61 mov pc, lr; \ 61 ret lr; \
62ENDPROC(HYPERVISOR_##hypercall) 62ENDPROC(HYPERVISOR_##hypercall)
63 63
64#define HYPERCALL0 HYPERCALL_SIMPLE 64#define HYPERCALL0 HYPERCALL_SIMPLE
@@ -74,7 +74,7 @@ ENTRY(HYPERVISOR_##hypercall) \
74 mov r12, #__HYPERVISOR_##hypercall; \ 74 mov r12, #__HYPERVISOR_##hypercall; \
75 __HVC(XEN_IMM); \ 75 __HVC(XEN_IMM); \
76 ldm sp!, {r4} \ 76 ldm sp!, {r4} \
77 mov pc, lr \ 77 ret lr \
78ENDPROC(HYPERVISOR_##hypercall) 78ENDPROC(HYPERVISOR_##hypercall)
79 79
80 .text 80 .text
@@ -101,5 +101,5 @@ ENTRY(privcmd_call)
101 ldr r4, [sp, #4] 101 ldr r4, [sp, #4]
102 __HVC(XEN_IMM) 102 __HVC(XEN_IMM)
103 ldm sp!, {r4} 103 ldm sp!, {r4}
104 mov pc, lr 104 ret lr
105ENDPROC(privcmd_call); 105ENDPROC(privcmd_call);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 6345c470650d..00b5906f57b7 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -541,6 +541,17 @@ config CRYPTO_SHA1_ARM
541 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 541 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
542 using optimized ARM assembler. 542 using optimized ARM assembler.
543 543
544config CRYPTO_SHA1_ARM_NEON
545 tristate "SHA1 digest algorithm (ARM NEON)"
546 depends on ARM && KERNEL_MODE_NEON && !CPU_BIG_ENDIAN
547 select CRYPTO_SHA1_ARM
548 select CRYPTO_SHA1
549 select CRYPTO_HASH
550 help
551 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
552 using optimized ARM NEON assembly, when NEON instructions are
553 available.
554
544config CRYPTO_SHA1_PPC 555config CRYPTO_SHA1_PPC
545 tristate "SHA1 digest algorithm (powerpc)" 556 tristate "SHA1 digest algorithm (powerpc)"
546 depends on PPC 557 depends on PPC
@@ -590,6 +601,21 @@ config CRYPTO_SHA512_SPARC64
590 SHA-512 secure hash standard (DFIPS 180-2) implemented 601 SHA-512 secure hash standard (DFIPS 180-2) implemented
591 using sparc64 crypto instructions, when available. 602 using sparc64 crypto instructions, when available.
592 603
604config CRYPTO_SHA512_ARM_NEON
605 tristate "SHA384 and SHA512 digest algorithm (ARM NEON)"
606 depends on ARM && KERNEL_MODE_NEON && !CPU_BIG_ENDIAN
607 select CRYPTO_SHA512
608 select CRYPTO_HASH
609 help
610 SHA-512 secure hash standard (DFIPS 180-2) implemented
611 using ARM NEON instructions, when available.
612
613 This version of SHA implements a 512 bit hash with 256 bits of
614 security against collision attacks.
615
616 This code also includes SHA-384, a 384 bit hash with 192 bits
617 of security against collision attacks.
618
593config CRYPTO_TGR192 619config CRYPTO_TGR192
594 tristate "Tiger digest algorithms" 620 tristate "Tiger digest algorithms"
595 select CRYPTO_HASH 621 select CRYPTO_HASH
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 60e5a170c4d2..e6833771a716 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -250,7 +250,7 @@ static void __init global_timer_of_register(struct device_node *np)
250 * fire when the timer value is greater than or equal to. In previous 250 * fire when the timer value is greater than or equal to. In previous
251 * revisions the comparators fired when the timer value was equal to. 251 * revisions the comparators fired when the timer value was equal to.
252 */ 252 */
253 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9 253 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
254 && (read_cpuid_id() & 0xf0000f) < 0x200000) { 254 && (read_cpuid_id() & 0xf0000f) < 0x200000) {
255 pr_warn("global-timer: non support for this cpu version.\n"); 255 pr_warn("global-timer: non support for this cpu version.\n");
256 return; 256 return;