aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/crypto/aes-armv4.S3
-rw-r--r--arch/arm/include/asm/assembler.h21
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S2
-rw-r--r--arch/arm/kernel/debug.S10
-rw-r--r--arch/arm/kernel/entry-armv.S42
-rw-r--r--arch/arm/kernel/entry-common.S13
-rw-r--r--arch/arm/kernel/entry-header.S14
-rw-r--r--arch/arm/kernel/fiqasm.S4
-rw-r--r--arch/arm/kernel/head-common.S7
-rw-r--r--arch/arm/kernel/head-nommu.S8
-rw-r--r--arch/arm/kernel/head.S18
-rw-r--r--arch/arm/kernel/hyp-stub.S6
-rw-r--r--arch/arm/kernel/iwmmxt.S10
-rw-r--r--arch/arm/kernel/relocate_kernel.S3
-rw-r--r--arch/arm/kernel/sleep.S2
-rw-r--r--arch/arm/kvm/init.S3
-rw-r--r--arch/arm/lib/ashldi3.S3
-rw-r--r--arch/arm/lib/ashrdi3.S3
-rw-r--r--arch/arm/lib/backtrace.S2
-rw-r--r--arch/arm/lib/bitops.h5
-rw-r--r--arch/arm/lib/bswapsdi2.S5
-rw-r--r--arch/arm/lib/call_with_stack.S4
-rw-r--r--arch/arm/lib/csumpartial.S2
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S5
-rw-r--r--arch/arm/lib/delay-loop.S18
-rw-r--r--arch/arm/lib/div64.S13
-rw-r--r--arch/arm/lib/findbit.S10
-rw-r--r--arch/arm/lib/getuser.S8
-rw-r--r--arch/arm/lib/io-readsb.S2
-rw-r--r--arch/arm/lib/io-readsl.S6
-rw-r--r--arch/arm/lib/io-readsw-armv3.S4
-rw-r--r--arch/arm/lib/io-readsw-armv4.S2
-rw-r--r--arch/arm/lib/io-writesb.S2
-rw-r--r--arch/arm/lib/io-writesl.S10
-rw-r--r--arch/arm/lib/io-writesw-armv3.S4
-rw-r--r--arch/arm/lib/io-writesw-armv4.S4
-rw-r--r--arch/arm/lib/lib1funcs.S26
-rw-r--r--arch/arm/lib/lshrdi3.S3
-rw-r--r--arch/arm/lib/memchr.S2
-rw-r--r--arch/arm/lib/memset.S2
-rw-r--r--arch/arm/lib/memzero.S2
-rw-r--r--arch/arm/lib/muldi3.S3
-rw-r--r--arch/arm/lib/putuser.S10
-rw-r--r--arch/arm/lib/strchr.S2
-rw-r--r--arch/arm/lib/strrchr.S2
-rw-r--r--arch/arm/lib/ucmpdi2.S5
-rw-r--r--arch/arm/mach-davinci/sleep.S2
-rw-r--r--arch/arm/mach-ep93xx/crunch-bits.S6
-rw-r--r--arch/arm/mach-imx/suspend-imx6.S5
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S10
-rw-r--r--arch/arm/mach-mvebu/headsmp-a9.S3
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S3
-rw-r--r--arch/arm/mach-omap2/sram242x.S6
-rw-r--r--arch/arm/mach-omap2/sram243x.S6
-rw-r--r--arch/arm/mach-pxa/mioa701_bootresume.S2
-rw-r--r--arch/arm/mach-pxa/standby.S4
-rw-r--r--arch/arm/mach-s3c24xx/sleep-s3c2410.S2
-rw-r--r--arch/arm/mach-s3c24xx/sleep-s3c2412.S2
-rw-r--r--arch/arm/mach-shmobile/headsmp.S3
-rw-r--r--arch/arm/mach-tegra/sleep-tegra20.S24
-rw-r--r--arch/arm/mach-tegra/sleep-tegra30.S14
-rw-r--r--arch/arm/mach-tegra/sleep.S8
-rw-r--r--arch/arm/mm/cache-fa.S19
-rw-r--r--arch/arm/mm/cache-nop.S5
-rw-r--r--arch/arm/mm/cache-v4.S13
-rw-r--r--arch/arm/mm/cache-v4wb.S15
-rw-r--r--arch/arm/mm/cache-v4wt.S13
-rw-r--r--arch/arm/mm/cache-v6.S20
-rw-r--r--arch/arm/mm/cache-v7.S30
-rw-r--r--arch/arm/mm/l2c-l2x0-resume.S7
-rw-r--r--arch/arm/mm/proc-arm1020.S34
-rw-r--r--arch/arm/mm/proc-arm1020e.S34
-rw-r--r--arch/arm/mm/proc-arm1022.S34
-rw-r--r--arch/arm/mm/proc-arm1026.S34
-rw-r--r--arch/arm/mm/proc-arm720.S16
-rw-r--r--arch/arm/mm/proc-arm740.S8
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S8
-rw-r--r--arch/arm/mm/proc-arm920.S34
-rw-r--r--arch/arm/mm/proc-arm922.S34
-rw-r--r--arch/arm/mm/proc-arm925.S34
-rw-r--r--arch/arm/mm/proc-arm926.S34
-rw-r--r--arch/arm/mm/proc-arm940.S24
-rw-r--r--arch/arm/mm/proc-arm946.S30
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S8
-rw-r--r--arch/arm/mm/proc-fa526.S16
-rw-r--r--arch/arm/mm/proc-feroceon.S44
-rw-r--r--arch/arm/mm/proc-mohawk.S34
-rw-r--r--arch/arm/mm/proc-sa110.S16
-rw-r--r--arch/arm/mm/proc-sa1100.S16
-rw-r--r--arch/arm/mm/proc-v6.S16
-rw-r--r--arch/arm/mm/proc-v7-2level.S4
-rw-r--r--arch/arm/mm/proc-v7-3level.S5
-rw-r--r--arch/arm/mm/proc-v7.S14
-rw-r--r--arch/arm/mm/proc-v7m.S18
-rw-r--r--arch/arm/mm/proc-xsc3.S32
-rw-r--r--arch/arm/mm/proc-xscale.S34
-rw-r--r--arch/arm/mm/tlb-fa.S7
-rw-r--r--arch/arm/mm/tlb-v4.S5
-rw-r--r--arch/arm/mm/tlb-v4wb.S7
-rw-r--r--arch/arm/mm/tlb-v4wbi.S7
-rw-r--r--arch/arm/mm/tlb-v6.S5
-rw-r--r--arch/arm/mm/tlb-v7.S4
-rw-r--r--arch/arm/nwfpe/entry.S8
-rw-r--r--arch/arm/vfp/entry.S4
-rw-r--r--arch/arm/vfp/vfphw.S26
-rw-r--r--arch/arm/xen/hypercall.S6
106 files changed, 644 insertions, 607 deletions
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index 3a14ea8fe97e..ebb9761fb572 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -35,6 +35,7 @@
35@ that is being targetted. 35@ that is being targetted.
36 36
37#include <linux/linkage.h> 37#include <linux/linkage.h>
38#include <asm/assembler.h>
38 39
39.text 40.text
40 41
@@ -648,7 +649,7 @@ _armv4_AES_set_encrypt_key:
648 649
649.Ldone: mov r0,#0 650.Ldone: mov r0,#0
650 ldmia sp!,{r4-r12,lr} 651 ldmia sp!,{r4-r12,lr}
651.Labrt: mov pc,lr 652.Labrt: ret lr
652ENDPROC(private_AES_set_encrypt_key) 653ENDPROC(private_AES_set_encrypt_key)
653 654
654.align 5 655.align 5
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 906703a5b564..f67fd3afebdf 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -427,4 +427,25 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
427#endif 427#endif
428 .endm 428 .endm
429 429
430 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
431 .macro ret\c, reg
432#if __LINUX_ARM_ARCH__ < 6
433 mov\c pc, \reg
434#else
435 .ifeqs "\reg", "lr"
436 bx\c \reg
437 .else
438 mov\c pc, \reg
439 .endif
440#endif
441 .endm
442 .endr
443
444 .macro ret.w, reg
445 ret \reg
446#ifdef CONFIG_THUMB2_KERNEL
447 nop
448#endif
449 .endm
450
430#endif /* __ASM_ASSEMBLER_H__ */ 451#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
index 88d61815f0c0..469a2b30fa27 100644
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ b/arch/arm/include/asm/entry-macro-multi.S
@@ -35,5 +35,5 @@
35\symbol_name: 35\symbol_name:
36 mov r8, lr 36 mov r8, lr
37 arch_irq_handler_default 37 arch_irq_handler_default
38 mov pc, r8 38 ret r8
39 .endm 39 .endm
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index 14f7c3b14632..78c91b5f97d4 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -90,7 +90,7 @@ ENTRY(printascii)
90 ldrneb r1, [r0], #1 90 ldrneb r1, [r0], #1
91 teqne r1, #0 91 teqne r1, #0
92 bne 1b 92 bne 1b
93 mov pc, lr 93 ret lr
94ENDPROC(printascii) 94ENDPROC(printascii)
95 95
96ENTRY(printch) 96ENTRY(printch)
@@ -105,7 +105,7 @@ ENTRY(debug_ll_addr)
105 addruart r2, r3, ip 105 addruart r2, r3, ip
106 str r2, [r0] 106 str r2, [r0]
107 str r3, [r1] 107 str r3, [r1]
108 mov pc, lr 108 ret lr
109ENDPROC(debug_ll_addr) 109ENDPROC(debug_ll_addr)
110#endif 110#endif
111 111
@@ -116,7 +116,7 @@ ENTRY(printascii)
116 mov r0, #0x04 @ SYS_WRITE0 116 mov r0, #0x04 @ SYS_WRITE0
117 ARM( svc #0x123456 ) 117 ARM( svc #0x123456 )
118 THUMB( svc #0xab ) 118 THUMB( svc #0xab )
119 mov pc, lr 119 ret lr
120ENDPROC(printascii) 120ENDPROC(printascii)
121 121
122ENTRY(printch) 122ENTRY(printch)
@@ -125,14 +125,14 @@ ENTRY(printch)
125 mov r0, #0x03 @ SYS_WRITEC 125 mov r0, #0x03 @ SYS_WRITEC
126 ARM( svc #0x123456 ) 126 ARM( svc #0x123456 )
127 THUMB( svc #0xab ) 127 THUMB( svc #0xab )
128 mov pc, lr 128 ret lr
129ENDPROC(printch) 129ENDPROC(printch)
130 130
131ENTRY(debug_ll_addr) 131ENTRY(debug_ll_addr)
132 mov r2, #0 132 mov r2, #0
133 str r2, [r0] 133 str r2, [r0]
134 str r2, [r1] 134 str r2, [r1]
135 mov pc, lr 135 ret lr
136ENDPROC(debug_ll_addr) 136ENDPROC(debug_ll_addr)
137 137
138#endif 138#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 52a949a8077d..36276cdccfbc 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -224,7 +224,7 @@ svc_preempt:
2241: bl preempt_schedule_irq @ irq en/disable is done inside 2241: bl preempt_schedule_irq @ irq en/disable is done inside
225 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS 225 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
226 tst r0, #_TIF_NEED_RESCHED 226 tst r0, #_TIF_NEED_RESCHED
227 moveq pc, r8 @ go again 227 reteq r8 @ go again
228 b 1b 228 b 1b
229#endif 229#endif
230 230
@@ -490,7 +490,7 @@ ENDPROC(__und_usr)
490 .pushsection .fixup, "ax" 490 .pushsection .fixup, "ax"
491 .align 2 491 .align 2
4924: str r4, [sp, #S_PC] @ retry current instruction 4924: str r4, [sp, #S_PC] @ retry current instruction
493 mov pc, r9 493 ret r9
494 .popsection 494 .popsection
495 .pushsection __ex_table,"a" 495 .pushsection __ex_table,"a"
496 .long 1b, 4b 496 .long 1b, 4b
@@ -552,7 +552,7 @@ call_fpe:
552#endif 552#endif
553 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 553 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
554 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 554 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
555 moveq pc, lr 555 reteq lr
556 and r8, r0, #0x00000f00 @ mask out CP number 556 and r8, r0, #0x00000f00 @ mask out CP number
557 THUMB( lsr r8, r8, #8 ) 557 THUMB( lsr r8, r8, #8 )
558 mov r7, #1 558 mov r7, #1
@@ -571,33 +571,33 @@ call_fpe:
571 THUMB( add pc, r8 ) 571 THUMB( add pc, r8 )
572 nop 572 nop
573 573
574 movw_pc lr @ CP#0 574 ret.w lr @ CP#0
575 W(b) do_fpe @ CP#1 (FPE) 575 W(b) do_fpe @ CP#1 (FPE)
576 W(b) do_fpe @ CP#2 (FPE) 576 W(b) do_fpe @ CP#2 (FPE)
577 movw_pc lr @ CP#3 577 ret.w lr @ CP#3
578#ifdef CONFIG_CRUNCH 578#ifdef CONFIG_CRUNCH
579 b crunch_task_enable @ CP#4 (MaverickCrunch) 579 b crunch_task_enable @ CP#4 (MaverickCrunch)
580 b crunch_task_enable @ CP#5 (MaverickCrunch) 580 b crunch_task_enable @ CP#5 (MaverickCrunch)
581 b crunch_task_enable @ CP#6 (MaverickCrunch) 581 b crunch_task_enable @ CP#6 (MaverickCrunch)
582#else 582#else
583 movw_pc lr @ CP#4 583 ret.w lr @ CP#4
584 movw_pc lr @ CP#5 584 ret.w lr @ CP#5
585 movw_pc lr @ CP#6 585 ret.w lr @ CP#6
586#endif 586#endif
587 movw_pc lr @ CP#7 587 ret.w lr @ CP#7
588 movw_pc lr @ CP#8 588 ret.w lr @ CP#8
589 movw_pc lr @ CP#9 589 ret.w lr @ CP#9
590#ifdef CONFIG_VFP 590#ifdef CONFIG_VFP
591 W(b) do_vfp @ CP#10 (VFP) 591 W(b) do_vfp @ CP#10 (VFP)
592 W(b) do_vfp @ CP#11 (VFP) 592 W(b) do_vfp @ CP#11 (VFP)
593#else 593#else
594 movw_pc lr @ CP#10 (VFP) 594 ret.w lr @ CP#10 (VFP)
595 movw_pc lr @ CP#11 (VFP) 595 ret.w lr @ CP#11 (VFP)
596#endif 596#endif
597 movw_pc lr @ CP#12 597 ret.w lr @ CP#12
598 movw_pc lr @ CP#13 598 ret.w lr @ CP#13
599 movw_pc lr @ CP#14 (Debug) 599 ret.w lr @ CP#14 (Debug)
600 movw_pc lr @ CP#15 (Control) 600 ret.w lr @ CP#15 (Control)
601 601
602#ifdef NEED_CPU_ARCHITECTURE 602#ifdef NEED_CPU_ARCHITECTURE
603 .align 2 603 .align 2
@@ -649,7 +649,7 @@ ENTRY(fp_enter)
649 .popsection 649 .popsection
650 650
651ENTRY(no_fp) 651ENTRY(no_fp)
652 mov pc, lr 652 ret lr
653ENDPROC(no_fp) 653ENDPROC(no_fp)
654 654
655__und_usr_fault_32: 655__und_usr_fault_32:
@@ -745,7 +745,7 @@ ENDPROC(__switch_to)
745#ifdef CONFIG_ARM_THUMB 745#ifdef CONFIG_ARM_THUMB
746 bx \reg 746 bx \reg
747#else 747#else
748 mov pc, \reg 748 ret \reg
749#endif 749#endif
750 .endm 750 .endm
751 751
@@ -837,7 +837,7 @@ kuser_cmpxchg64_fixup:
837#if __LINUX_ARM_ARCH__ < 6 837#if __LINUX_ARM_ARCH__ < 6
838 bcc kuser_cmpxchg32_fixup 838 bcc kuser_cmpxchg32_fixup
839#endif 839#endif
840 mov pc, lr 840 ret lr
841 .previous 841 .previous
842 842
843#else 843#else
@@ -905,7 +905,7 @@ kuser_cmpxchg32_fixup:
905 subs r8, r4, r7 905 subs r8, r4, r7
906 rsbcss r8, r8, #(2b - 1b) 906 rsbcss r8, r8, #(2b - 1b)
907 strcs r7, [sp, #S_PC] 907 strcs r7, [sp, #S_PC]
908 mov pc, lr 908 ret lr
909 .previous 909 .previous
910 910
911#else 911#else
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 7139d4a7dea7..e52fe5a2d843 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <asm/assembler.h>
11#include <asm/unistd.h> 12#include <asm/unistd.h>
12#include <asm/ftrace.h> 13#include <asm/ftrace.h>
13#include <asm/unwind.h> 14#include <asm/unwind.h>
@@ -88,7 +89,7 @@ ENTRY(ret_from_fork)
88 cmp r5, #0 89 cmp r5, #0
89 movne r0, r4 90 movne r0, r4
90 adrne lr, BSYM(1f) 91 adrne lr, BSYM(1f)
91 movne pc, r5 92 retne r5
921: get_thread_info tsk 931: get_thread_info tsk
93 b ret_slow_syscall 94 b ret_slow_syscall
94ENDPROC(ret_from_fork) 95ENDPROC(ret_from_fork)
@@ -290,7 +291,7 @@ ENDPROC(ftrace_graph_caller_old)
290 291
291.macro mcount_exit 292.macro mcount_exit
292 ldmia sp!, {r0-r3, ip, lr} 293 ldmia sp!, {r0-r3, ip, lr}
293 mov pc, ip 294 ret ip
294.endm 295.endm
295 296
296ENTRY(__gnu_mcount_nc) 297ENTRY(__gnu_mcount_nc)
@@ -298,7 +299,7 @@ UNWIND(.fnstart)
298#ifdef CONFIG_DYNAMIC_FTRACE 299#ifdef CONFIG_DYNAMIC_FTRACE
299 mov ip, lr 300 mov ip, lr
300 ldmia sp!, {lr} 301 ldmia sp!, {lr}
301 mov pc, ip 302 ret ip
302#else 303#else
303 __mcount 304 __mcount
304#endif 305#endif
@@ -333,12 +334,12 @@ return_to_handler:
333 bl ftrace_return_to_handler 334 bl ftrace_return_to_handler
334 mov lr, r0 @ r0 has real ret addr 335 mov lr, r0 @ r0 has real ret addr
335 ldmia sp!, {r0-r3} 336 ldmia sp!, {r0-r3}
336 mov pc, lr 337 ret lr
337#endif 338#endif
338 339
339ENTRY(ftrace_stub) 340ENTRY(ftrace_stub)
340.Lftrace_stub: 341.Lftrace_stub:
341 mov pc, lr 342 ret lr
342ENDPROC(ftrace_stub) 343ENDPROC(ftrace_stub)
343 344
344#endif /* CONFIG_FUNCTION_TRACER */ 345#endif /* CONFIG_FUNCTION_TRACER */
@@ -561,7 +562,7 @@ sys_mmap2:
561 streq r5, [sp, #4] 562 streq r5, [sp, #4]
562 beq sys_mmap_pgoff 563 beq sys_mmap_pgoff
563 mov r0, #-EINVAL 564 mov r0, #-EINVAL
564 mov pc, lr 565 ret lr
565#else 566#else
566 str r5, [sp, #4] 567 str r5, [sp, #4]
567 b sys_mmap_pgoff 568 b sys_mmap_pgoff
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 5d702f8900b1..8db307d0954b 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -240,12 +240,6 @@
240 movs pc, lr @ return & move spsr_svc into cpsr 240 movs pc, lr @ return & move spsr_svc into cpsr
241 .endm 241 .endm
242 242
243 @
244 @ 32-bit wide "mov pc, reg"
245 @
246 .macro movw_pc, reg
247 mov pc, \reg
248 .endm
249#else /* CONFIG_THUMB2_KERNEL */ 243#else /* CONFIG_THUMB2_KERNEL */
250 .macro svc_exit, rpsr, irq = 0 244 .macro svc_exit, rpsr, irq = 0
251 .if \irq != 0 245 .if \irq != 0
@@ -304,14 +298,6 @@
304 movs pc, lr @ return & move spsr_svc into cpsr 298 movs pc, lr @ return & move spsr_svc into cpsr
305 .endm 299 .endm
306#endif /* ifdef CONFIG_CPU_V7M / else */ 300#endif /* ifdef CONFIG_CPU_V7M / else */
307
308 @
309 @ 32-bit wide "mov pc, reg"
310 @
311 .macro movw_pc, reg
312 mov pc, \reg
313 nop
314 .endm
315#endif /* !CONFIG_THUMB2_KERNEL */ 301#endif /* !CONFIG_THUMB2_KERNEL */
316 302
317/* 303/*
diff --git a/arch/arm/kernel/fiqasm.S b/arch/arm/kernel/fiqasm.S
index 207f9d652010..8dd26e1a9bd6 100644
--- a/arch/arm/kernel/fiqasm.S
+++ b/arch/arm/kernel/fiqasm.S
@@ -32,7 +32,7 @@ ENTRY(__set_fiq_regs)
32 ldr lr, [r0] 32 ldr lr, [r0]
33 msr cpsr_c, r1 @ return to SVC mode 33 msr cpsr_c, r1 @ return to SVC mode
34 mov r0, r0 @ avoid hazard prior to ARMv4 34 mov r0, r0 @ avoid hazard prior to ARMv4
35 mov pc, lr 35 ret lr
36ENDPROC(__set_fiq_regs) 36ENDPROC(__set_fiq_regs)
37 37
38ENTRY(__get_fiq_regs) 38ENTRY(__get_fiq_regs)
@@ -45,5 +45,5 @@ ENTRY(__get_fiq_regs)
45 str lr, [r0] 45 str lr, [r0]
46 msr cpsr_c, r1 @ return to SVC mode 46 msr cpsr_c, r1 @ return to SVC mode
47 mov r0, r0 @ avoid hazard prior to ARMv4 47 mov r0, r0 @ avoid hazard prior to ARMv4
48 mov pc, lr 48 ret lr
49ENDPROC(__get_fiq_regs) 49ENDPROC(__get_fiq_regs)
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 572a38335c96..8733012d231f 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 * 11 *
12 */ 12 */
13#include <asm/assembler.h>
13 14
14#define ATAG_CORE 0x54410001 15#define ATAG_CORE 0x54410001
15#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) 16#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
@@ -61,10 +62,10 @@ __vet_atags:
61 cmp r5, r6 62 cmp r5, r6
62 bne 1f 63 bne 1f
63 64
642: mov pc, lr @ atag/dtb pointer is ok 652: ret lr @ atag/dtb pointer is ok
65 66
661: mov r2, #0 671: mov r2, #0
67 mov pc, lr 68 ret lr
68ENDPROC(__vet_atags) 69ENDPROC(__vet_atags)
69 70
70/* 71/*
@@ -162,7 +163,7 @@ __lookup_processor_type:
162 cmp r5, r6 163 cmp r5, r6
163 blo 1b 164 blo 1b
164 mov r5, #0 @ unknown processor 165 mov r5, #0 @ unknown processor
1652: mov pc, lr 1662: ret lr
166ENDPROC(__lookup_processor_type) 167ENDPROC(__lookup_processor_type)
167 168
168/* 169/*
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 716249cc2ee1..cc176b67c134 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -82,7 +82,7 @@ ENTRY(stext)
82 adr lr, BSYM(1f) @ return (PIC) address 82 adr lr, BSYM(1f) @ return (PIC) address
83 ARM( add pc, r10, #PROCINFO_INITFUNC ) 83 ARM( add pc, r10, #PROCINFO_INITFUNC )
84 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 84 THUMB( add r12, r10, #PROCINFO_INITFUNC )
85 THUMB( mov pc, r12 ) 85 THUMB( ret r12 )
86 1: b __after_proc_init 86 1: b __after_proc_init
87ENDPROC(stext) 87ENDPROC(stext)
88 88
@@ -119,7 +119,7 @@ ENTRY(secondary_startup)
119 mov r13, r12 @ __secondary_switched address 119 mov r13, r12 @ __secondary_switched address
120 ARM( add pc, r10, #PROCINFO_INITFUNC ) 120 ARM( add pc, r10, #PROCINFO_INITFUNC )
121 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 121 THUMB( add r12, r10, #PROCINFO_INITFUNC )
122 THUMB( mov pc, r12 ) 122 THUMB( ret r12 )
123ENDPROC(secondary_startup) 123ENDPROC(secondary_startup)
124 124
125ENTRY(__secondary_switched) 125ENTRY(__secondary_switched)
@@ -164,7 +164,7 @@ __after_proc_init:
164#endif 164#endif
165 mcr p15, 0, r0, c1, c0, 0 @ write control reg 165 mcr p15, 0, r0, c1, c0, 0 @ write control reg
166#endif /* CONFIG_CPU_CP15 */ 166#endif /* CONFIG_CPU_CP15 */
167 mov pc, r13 167 ret r13
168ENDPROC(__after_proc_init) 168ENDPROC(__after_proc_init)
169 .ltorg 169 .ltorg
170 170
@@ -254,7 +254,7 @@ ENTRY(__setup_mpu)
254 orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) 254 orr r0, r0, #CR_M @ Set SCTRL.M (MPU on)
255 mcr p15, 0, r0, c1, c0, 0 @ Enable MPU 255 mcr p15, 0, r0, c1, c0, 0 @ Enable MPU
256 isb 256 isb
257 mov pc,lr 257 ret lr
258ENDPROC(__setup_mpu) 258ENDPROC(__setup_mpu)
259#endif 259#endif
260#include "head-common.S" 260#include "head-common.S"
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 2c35f0ff2fdc..664eee8c4a26 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -140,7 +140,7 @@ ENTRY(stext)
140 mov r8, r4 @ set TTBR1 to swapper_pg_dir 140 mov r8, r4 @ set TTBR1 to swapper_pg_dir
141 ARM( add pc, r10, #PROCINFO_INITFUNC ) 141 ARM( add pc, r10, #PROCINFO_INITFUNC )
142 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 142 THUMB( add r12, r10, #PROCINFO_INITFUNC )
143 THUMB( mov pc, r12 ) 143 THUMB( ret r12 )
1441: b __enable_mmu 1441: b __enable_mmu
145ENDPROC(stext) 145ENDPROC(stext)
146 .ltorg 146 .ltorg
@@ -335,7 +335,7 @@ __create_page_tables:
335 sub r4, r4, #0x1000 @ point to the PGD table 335 sub r4, r4, #0x1000 @ point to the PGD table
336 mov r4, r4, lsr #ARCH_PGD_SHIFT 336 mov r4, r4, lsr #ARCH_PGD_SHIFT
337#endif 337#endif
338 mov pc, lr 338 ret lr
339ENDPROC(__create_page_tables) 339ENDPROC(__create_page_tables)
340 .ltorg 340 .ltorg
341 .align 341 .align
@@ -383,7 +383,7 @@ ENTRY(secondary_startup)
383 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor 383 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
384 @ (return control reg) 384 @ (return control reg)
385 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 385 THUMB( add r12, r10, #PROCINFO_INITFUNC )
386 THUMB( mov pc, r12 ) 386 THUMB( ret r12 )
387ENDPROC(secondary_startup) 387ENDPROC(secondary_startup)
388 388
389 /* 389 /*
@@ -468,7 +468,7 @@ ENTRY(__turn_mmu_on)
468 instr_sync 468 instr_sync
469 mov r3, r3 469 mov r3, r3
470 mov r3, r13 470 mov r3, r13
471 mov pc, r3 471 ret r3
472__turn_mmu_on_end: 472__turn_mmu_on_end:
473ENDPROC(__turn_mmu_on) 473ENDPROC(__turn_mmu_on)
474 .popsection 474 .popsection
@@ -487,7 +487,7 @@ __fixup_smp:
487 orr r4, r4, #0x0000b000 487 orr r4, r4, #0x0000b000
488 orr r4, r4, #0x00000020 @ val 0x4100b020 488 orr r4, r4, #0x00000020 @ val 0x4100b020
489 teq r3, r4 @ ARM 11MPCore? 489 teq r3, r4 @ ARM 11MPCore?
490 moveq pc, lr @ yes, assume SMP 490 reteq lr @ yes, assume SMP
491 491
492 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR 492 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
493 and r0, r0, #0xc0000000 @ multiprocessing extensions and 493 and r0, r0, #0xc0000000 @ multiprocessing extensions and
@@ -500,7 +500,7 @@ __fixup_smp:
500 orr r4, r4, #0x0000c000 500 orr r4, r4, #0x0000c000
501 orr r4, r4, #0x00000090 501 orr r4, r4, #0x00000090
502 teq r3, r4 @ Check for ARM Cortex-A9 502 teq r3, r4 @ Check for ARM Cortex-A9
503 movne pc, lr @ Not ARM Cortex-A9, 503 retne lr @ Not ARM Cortex-A9,
504 504
505 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the 505 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
506 @ below address check will need to be #ifdef'd or equivalent 506 @ below address check will need to be #ifdef'd or equivalent
@@ -512,7 +512,7 @@ __fixup_smp:
512ARM_BE8(rev r0, r0) @ byteswap if big endian 512ARM_BE8(rev r0, r0) @ byteswap if big endian
513 and r0, r0, #0x3 @ number of CPUs 513 and r0, r0, #0x3 @ number of CPUs
514 teq r0, #0x0 @ is 1? 514 teq r0, #0x0 @ is 1?
515 movne pc, lr 515 retne lr
516 516
517__fixup_smp_on_up: 517__fixup_smp_on_up:
518 adr r0, 1f 518 adr r0, 1f
@@ -539,7 +539,7 @@ smp_on_up:
539 .text 539 .text
540__do_fixup_smp_on_up: 540__do_fixup_smp_on_up:
541 cmp r4, r5 541 cmp r4, r5
542 movhs pc, lr 542 reths lr
543 ldmia r4!, {r0, r6} 543 ldmia r4!, {r0, r6}
544 ARM( str r6, [r0, r3] ) 544 ARM( str r6, [r0, r3] )
545 THUMB( add r0, r0, r3 ) 545 THUMB( add r0, r0, r3 )
@@ -672,7 +672,7 @@ ARM_BE8(rev16 ip, ip)
6722: cmp r4, r5 6722: cmp r4, r5
673 ldrcc r7, [r4], #4 @ use branch for delay slot 673 ldrcc r7, [r4], #4 @ use branch for delay slot
674 bcc 1b 674 bcc 1b
675 mov pc, lr 675 ret lr
676#endif 676#endif
677ENDPROC(__fixup_a_pv_table) 677ENDPROC(__fixup_a_pv_table)
678 678
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 797b1a6a4906..56ce6290c831 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary)
99 * immediately. 99 * immediately.
100 */ 100 */
101 compare_cpu_mode_with_primary r4, r5, r6, r7 101 compare_cpu_mode_with_primary r4, r5, r6, r7
102 movne pc, lr 102 retne lr
103 103
104 /* 104 /*
105 * Once we have given up on one CPU, we do not try to install the 105 * Once we have given up on one CPU, we do not try to install the
@@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary)
111 */ 111 */
112 112
113 cmp r4, #HYP_MODE 113 cmp r4, #HYP_MODE
114 movne pc, lr @ give up if the CPU is not in HYP mode 114 retne lr @ give up if the CPU is not in HYP mode
115 115
116/* 116/*
117 * Configure HSCTLR to set correct exception endianness/instruction set 117 * Configure HSCTLR to set correct exception endianness/instruction set
@@ -201,7 +201,7 @@ ENDPROC(__hyp_get_vectors)
201 @ fall through 201 @ fall through
202ENTRY(__hyp_set_vectors) 202ENTRY(__hyp_set_vectors)
203 __HVC(0) 203 __HVC(0)
204 mov pc, lr 204 ret lr
205ENDPROC(__hyp_set_vectors) 205ENDPROC(__hyp_set_vectors)
206 206
207#ifndef ZIMAGE 207#ifndef ZIMAGE
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index a5599cfc43cb..0960be7953f0 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -179,7 +179,7 @@ concan_load:
179 get_thread_info r10 179 get_thread_info r10
180#endif 180#endif
1814: dec_preempt_count r10, r3 1814: dec_preempt_count r10, r3
182 mov pc, lr 182 ret lr
183 183
184/* 184/*
185 * Back up Concan regs to save area and disable access to them 185 * Back up Concan regs to save area and disable access to them
@@ -265,7 +265,7 @@ ENTRY(iwmmxt_task_copy)
265 mov r3, lr @ preserve return address 265 mov r3, lr @ preserve return address
266 bl concan_dump 266 bl concan_dump
267 msr cpsr_c, ip @ restore interrupt mode 267 msr cpsr_c, ip @ restore interrupt mode
268 mov pc, r3 268 ret r3
269 269
270/* 270/*
271 * Restore Concan state from given memory address 271 * Restore Concan state from given memory address
@@ -301,7 +301,7 @@ ENTRY(iwmmxt_task_restore)
301 mov r3, lr @ preserve return address 301 mov r3, lr @ preserve return address
302 bl concan_load 302 bl concan_load
303 msr cpsr_c, ip @ restore interrupt mode 303 msr cpsr_c, ip @ restore interrupt mode
304 mov pc, r3 304 ret r3
305 305
306/* 306/*
307 * Concan handling on task switch 307 * Concan handling on task switch
@@ -323,7 +323,7 @@ ENTRY(iwmmxt_task_switch)
323 add r3, r0, #TI_IWMMXT_STATE @ get next task Concan save area 323 add r3, r0, #TI_IWMMXT_STATE @ get next task Concan save area
324 ldr r2, [r2] @ get current Concan owner 324 ldr r2, [r2] @ get current Concan owner
325 teq r2, r3 @ next task owns it? 325 teq r2, r3 @ next task owns it?
326 movne pc, lr @ no: leave Concan disabled 326 retne lr @ no: leave Concan disabled
327 327
3281: @ flip Concan access 3281: @ flip Concan access
329 XSC(eor r1, r1, #0x3) 329 XSC(eor r1, r1, #0x3)
@@ -350,7 +350,7 @@ ENTRY(iwmmxt_task_release)
350 eors r0, r0, r1 @ if equal... 350 eors r0, r0, r1 @ if equal...
351 streq r0, [r3] @ then clear ownership 351 streq r0, [r3] @ then clear ownership
352 msr cpsr_c, r2 @ restore interrupts 352 msr cpsr_c, r2 @ restore interrupts
353 mov pc, lr 353 ret lr
354 354
355 .data 355 .data
356concan_owner: 356concan_owner:
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 95858966d84e..35e72585ec1d 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -3,6 +3,7 @@
3 */ 3 */
4 4
5#include <linux/linkage.h> 5#include <linux/linkage.h>
6#include <asm/assembler.h>
6#include <asm/kexec.h> 7#include <asm/kexec.h>
7 8
8 .align 3 /* not needed for this code, but keeps fncpy() happy */ 9 .align 3 /* not needed for this code, but keeps fncpy() happy */
@@ -59,7 +60,7 @@ ENTRY(relocate_new_kernel)
59 mov r0,#0 60 mov r0,#0
60 ldr r1,kexec_mach_type 61 ldr r1,kexec_mach_type
61 ldr r2,kexec_boot_atags 62 ldr r2,kexec_boot_atags
62 ARM( mov pc, lr ) 63 ARM( ret lr )
63 THUMB( bx lr ) 64 THUMB( bx lr )
64 65
65 .align 66 .align
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 1b880db2a033..e1e60e5a7a27 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -107,7 +107,7 @@ ENTRY(cpu_resume_mmu)
107 instr_sync 107 instr_sync
108 mov r0, r0 108 mov r0, r0
109 mov r0, r0 109 mov r0, r0
110 mov pc, r3 @ jump to virtual address 110 ret r3 @ jump to virtual address
111ENDPROC(cpu_resume_mmu) 111ENDPROC(cpu_resume_mmu)
112 .popsection 112 .popsection
113cpu_resume_after_mmu: 113cpu_resume_after_mmu:
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 1b9844d369cc..b2d229f09c07 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20#include <asm/assembler.h>
20#include <asm/unified.h> 21#include <asm/unified.h>
21#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
22#include <asm/kvm_asm.h> 23#include <asm/kvm_asm.h>
@@ -134,7 +135,7 @@ phase2:
134 ldr r0, =TRAMPOLINE_VA 135 ldr r0, =TRAMPOLINE_VA
135 adr r1, target 136 adr r1, target
136 bfi r0, r1, #0, #PAGE_SHIFT 137 bfi r0, r1, #0, #PAGE_SHIFT
137 mov pc, r0 138 ret r0
138 139
139target: @ We're now in the trampoline code, switch page tables 140target: @ We're now in the trampoline code, switch page tables
140 mcrr p15, 4, r2, r3, c2 141 mcrr p15, 4, r2, r3, c2
diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S
index 638deb13da1c..b05e95840651 100644
--- a/arch/arm/lib/ashldi3.S
+++ b/arch/arm/lib/ashldi3.S
@@ -27,6 +27,7 @@ Boston, MA 02110-1301, USA. */
27 27
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/assembler.h>
30 31
31#ifdef __ARMEB__ 32#ifdef __ARMEB__
32#define al r1 33#define al r1
@@ -47,7 +48,7 @@ ENTRY(__aeabi_llsl)
47 THUMB( lsrmi r3, al, ip ) 48 THUMB( lsrmi r3, al, ip )
48 THUMB( orrmi ah, ah, r3 ) 49 THUMB( orrmi ah, ah, r3 )
49 mov al, al, lsl r2 50 mov al, al, lsl r2
50 mov pc, lr 51 ret lr
51 52
52ENDPROC(__ashldi3) 53ENDPROC(__ashldi3)
53ENDPROC(__aeabi_llsl) 54ENDPROC(__aeabi_llsl)
diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S
index 015e8aa5a1d1..275d7d2341a4 100644
--- a/arch/arm/lib/ashrdi3.S
+++ b/arch/arm/lib/ashrdi3.S
@@ -27,6 +27,7 @@ Boston, MA 02110-1301, USA. */
27 27
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/assembler.h>
30 31
31#ifdef __ARMEB__ 32#ifdef __ARMEB__
32#define al r1 33#define al r1
@@ -47,7 +48,7 @@ ENTRY(__aeabi_lasr)
47 THUMB( lslmi r3, ah, ip ) 48 THUMB( lslmi r3, ah, ip )
48 THUMB( orrmi al, al, r3 ) 49 THUMB( orrmi al, al, r3 )
49 mov ah, ah, asr r2 50 mov ah, ah, asr r2
50 mov pc, lr 51 ret lr
51 52
52ENDPROC(__ashrdi3) 53ENDPROC(__ashrdi3)
53ENDPROC(__aeabi_lasr) 54ENDPROC(__aeabi_lasr)
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 4102be617fce..fab5a50503ae 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -25,7 +25,7 @@
25ENTRY(c_backtrace) 25ENTRY(c_backtrace)
26 26
27#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK) 27#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
28 mov pc, lr 28 ret lr
29ENDPROC(c_backtrace) 29ENDPROC(c_backtrace)
30#else 30#else
31 stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location... 31 stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 9f12ed1eea86..7d807cfd8ef5 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,3 +1,4 @@
1#include <asm/assembler.h>
1#include <asm/unwind.h> 2#include <asm/unwind.h>
2 3
3#if __LINUX_ARM_ARCH__ >= 6 4#if __LINUX_ARM_ARCH__ >= 6
@@ -70,7 +71,7 @@ UNWIND( .fnstart )
70 \instr r2, r2, r3 71 \instr r2, r2, r3
71 str r2, [r1, r0, lsl #2] 72 str r2, [r1, r0, lsl #2]
72 restore_irqs ip 73 restore_irqs ip
73 mov pc, lr 74 ret lr
74UNWIND( .fnend ) 75UNWIND( .fnend )
75ENDPROC(\name ) 76ENDPROC(\name )
76 .endm 77 .endm
@@ -98,7 +99,7 @@ UNWIND( .fnstart )
98 \store r2, [r1] 99 \store r2, [r1]
99 moveq r0, #0 100 moveq r0, #0
100 restore_irqs ip 101 restore_irqs ip
101 mov pc, lr 102 ret lr
102UNWIND( .fnend ) 103UNWIND( .fnend )
103ENDPROC(\name ) 104ENDPROC(\name )
104 .endm 105 .endm
diff --git a/arch/arm/lib/bswapsdi2.S b/arch/arm/lib/bswapsdi2.S
index 9fcdd154eff9..07cda737bb11 100644
--- a/arch/arm/lib/bswapsdi2.S
+++ b/arch/arm/lib/bswapsdi2.S
@@ -1,4 +1,5 @@
1#include <linux/linkage.h> 1#include <linux/linkage.h>
2#include <asm/assembler.h>
2 3
3#if __LINUX_ARM_ARCH__ >= 6 4#if __LINUX_ARM_ARCH__ >= 6
4ENTRY(__bswapsi2) 5ENTRY(__bswapsi2)
@@ -18,7 +19,7 @@ ENTRY(__bswapsi2)
18 mov r3, r3, lsr #8 19 mov r3, r3, lsr #8
19 bic r3, r3, #0xff00 20 bic r3, r3, #0xff00
20 eor r0, r3, r0, ror #8 21 eor r0, r3, r0, ror #8
21 mov pc, lr 22 ret lr
22ENDPROC(__bswapsi2) 23ENDPROC(__bswapsi2)
23 24
24ENTRY(__bswapdi2) 25ENTRY(__bswapdi2)
@@ -31,6 +32,6 @@ ENTRY(__bswapdi2)
31 bic r1, r1, #0xff00 32 bic r1, r1, #0xff00
32 eor r1, r1, r0, ror #8 33 eor r1, r1, r0, ror #8
33 eor r0, r3, ip, ror #8 34 eor r0, r3, ip, ror #8
34 mov pc, lr 35 ret lr
35ENDPROC(__bswapdi2) 36ENDPROC(__bswapdi2)
36#endif 37#endif
diff --git a/arch/arm/lib/call_with_stack.S b/arch/arm/lib/call_with_stack.S
index 916c80f13ae7..ed1a421813cb 100644
--- a/arch/arm/lib/call_with_stack.S
+++ b/arch/arm/lib/call_with_stack.S
@@ -36,9 +36,9 @@ ENTRY(call_with_stack)
36 mov r0, r1 36 mov r0, r1
37 37
38 adr lr, BSYM(1f) 38 adr lr, BSYM(1f)
39 mov pc, r2 39 ret r2
40 40
411: ldr lr, [sp] 411: ldr lr, [sp]
42 ldr sp, [sp, #4] 42 ldr sp, [sp, #4]
43 mov pc, lr 43 ret lr
44ENDPROC(call_with_stack) 44ENDPROC(call_with_stack)
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S
index 31d3cb34740d..984e0f29d548 100644
--- a/arch/arm/lib/csumpartial.S
+++ b/arch/arm/lib/csumpartial.S
@@ -97,7 +97,7 @@ td3 .req lr
97#endif 97#endif
98#endif 98#endif
99 adcnes sum, sum, td0 @ update checksum 99 adcnes sum, sum, td0 @ update checksum
100 mov pc, lr 100 ret lr
101 101
102ENTRY(csum_partial) 102ENTRY(csum_partial)
103 stmfd sp!, {buf, lr} 103 stmfd sp!, {buf, lr}
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index d6e742d24007..10b45909610c 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -7,6 +7,7 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <asm/assembler.h>
10 11
11/* 12/*
12 * unsigned int 13 * unsigned int
@@ -40,7 +41,7 @@ sum .req r3
40 adcs sum, sum, ip, put_byte_1 @ update checksum 41 adcs sum, sum, ip, put_byte_1 @ update checksum
41 strb ip, [dst], #1 42 strb ip, [dst], #1
42 tst dst, #2 43 tst dst, #2
43 moveq pc, lr @ dst is now 32bit aligned 44 reteq lr @ dst is now 32bit aligned
44 45
45.Ldst_16bit: load2b r8, ip 46.Ldst_16bit: load2b r8, ip
46 sub len, len, #2 47 sub len, len, #2
@@ -48,7 +49,7 @@ sum .req r3
48 strb r8, [dst], #1 49 strb r8, [dst], #1
49 adcs sum, sum, ip, put_byte_1 50 adcs sum, sum, ip, put_byte_1
50 strb ip, [dst], #1 51 strb ip, [dst], #1
51 mov pc, lr @ dst is now 32bit aligned 52 ret lr @ dst is now 32bit aligned
52 53
53 /* 54 /*
54 * Handle 0 to 7 bytes, with any alignment of source and 55 * Handle 0 to 7 bytes, with any alignment of source and
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
index bc1033b897b4..518bf6e93f78 100644
--- a/arch/arm/lib/delay-loop.S
+++ b/arch/arm/lib/delay-loop.S
@@ -35,7 +35,7 @@ ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
35 mul r0, r2, r0 @ max = 2^32-1 35 mul r0, r2, r0 @ max = 2^32-1
36 add r0, r0, r1, lsr #32-6 36 add r0, r0, r1, lsr #32-6
37 movs r0, r0, lsr #6 37 movs r0, r0, lsr #6
38 moveq pc, lr 38 reteq lr
39 39
40/* 40/*
41 * loops = r0 * HZ * loops_per_jiffy / 1000000 41 * loops = r0 * HZ * loops_per_jiffy / 1000000
@@ -46,23 +46,23 @@ ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
46ENTRY(__loop_delay) 46ENTRY(__loop_delay)
47 subs r0, r0, #1 47 subs r0, r0, #1
48#if 0 48#if 0
49 movls pc, lr 49 retls lr
50 subs r0, r0, #1 50 subs r0, r0, #1
51 movls pc, lr 51 retls lr
52 subs r0, r0, #1 52 subs r0, r0, #1
53 movls pc, lr 53 retls lr
54 subs r0, r0, #1 54 subs r0, r0, #1
55 movls pc, lr 55 retls lr
56 subs r0, r0, #1 56 subs r0, r0, #1
57 movls pc, lr 57 retls lr
58 subs r0, r0, #1 58 subs r0, r0, #1
59 movls pc, lr 59 retls lr
60 subs r0, r0, #1 60 subs r0, r0, #1
61 movls pc, lr 61 retls lr
62 subs r0, r0, #1 62 subs r0, r0, #1
63#endif 63#endif
64 bhi __loop_delay 64 bhi __loop_delay
65 mov pc, lr 65 ret lr
66ENDPROC(__loop_udelay) 66ENDPROC(__loop_udelay)
67ENDPROC(__loop_const_udelay) 67ENDPROC(__loop_const_udelay)
68ENDPROC(__loop_delay) 68ENDPROC(__loop_delay)
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index e55c4842c290..a9eafe4981eb 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/assembler.h>
16#include <asm/unwind.h> 17#include <asm/unwind.h>
17 18
18#ifdef __ARMEB__ 19#ifdef __ARMEB__
@@ -97,7 +98,7 @@ UNWIND(.fnstart)
97 mov yl, #0 98 mov yl, #0
98 cmpeq xl, r4 99 cmpeq xl, r4
99 movlo xh, xl 100 movlo xh, xl
100 movlo pc, lr 101 retlo lr
101 102
102 @ The division loop for lower bit positions. 103 @ The division loop for lower bit positions.
103 @ Here we shift remainer bits leftwards rather than moving the 104 @ Here we shift remainer bits leftwards rather than moving the
@@ -111,14 +112,14 @@ UNWIND(.fnstart)
111 subcs xh, xh, r4 112 subcs xh, xh, r4
112 movs ip, ip, lsr #1 113 movs ip, ip, lsr #1
113 bne 4b 114 bne 4b
114 mov pc, lr 115 ret lr
115 116
116 @ The top part of remainder became zero. If carry is set 117 @ The top part of remainder became zero. If carry is set
117 @ (the 33th bit) this is a false positive so resume the loop. 118 @ (the 33th bit) this is a false positive so resume the loop.
118 @ Otherwise, if lower part is also null then we are done. 119 @ Otherwise, if lower part is also null then we are done.
1196: bcs 5b 1206: bcs 5b
120 cmp xl, #0 121 cmp xl, #0
121 moveq pc, lr 122 reteq lr
122 123
123 @ We still have remainer bits in the low part. Bring them up. 124 @ We still have remainer bits in the low part. Bring them up.
124 125
@@ -144,7 +145,7 @@ UNWIND(.fnstart)
144 movs ip, ip, lsr #1 145 movs ip, ip, lsr #1
145 mov xh, #1 146 mov xh, #1
146 bne 4b 147 bne 4b
147 mov pc, lr 148 ret lr
148 149
1498: @ Division by a power of 2: determine what that divisor order is 1508: @ Division by a power of 2: determine what that divisor order is
150 @ then simply shift values around 151 @ then simply shift values around
@@ -184,13 +185,13 @@ UNWIND(.fnstart)
184 THUMB( orr yl, yl, xh ) 185 THUMB( orr yl, yl, xh )
185 mov xh, xl, lsl ip 186 mov xh, xl, lsl ip
186 mov xh, xh, lsr ip 187 mov xh, xh, lsr ip
187 mov pc, lr 188 ret lr
188 189
189 @ eq -> division by 1: obvious enough... 190 @ eq -> division by 1: obvious enough...
1909: moveq yl, xl 1919: moveq yl, xl
191 moveq yh, xh 192 moveq yh, xh
192 moveq xh, #0 193 moveq xh, #0
193 moveq pc, lr 194 reteq lr
194UNWIND(.fnend) 195UNWIND(.fnend)
195 196
196UNWIND(.fnstart) 197UNWIND(.fnstart)
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 64f6bc1a9132..7848780e8834 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -35,7 +35,7 @@ ENTRY(_find_first_zero_bit_le)
352: cmp r2, r1 @ any more? 352: cmp r2, r1 @ any more?
36 blo 1b 36 blo 1b
373: mov r0, r1 @ no free bits 373: mov r0, r1 @ no free bits
38 mov pc, lr 38 ret lr
39ENDPROC(_find_first_zero_bit_le) 39ENDPROC(_find_first_zero_bit_le)
40 40
41/* 41/*
@@ -76,7 +76,7 @@ ENTRY(_find_first_bit_le)
762: cmp r2, r1 @ any more? 762: cmp r2, r1 @ any more?
77 blo 1b 77 blo 1b
783: mov r0, r1 @ no free bits 783: mov r0, r1 @ no free bits
79 mov pc, lr 79 ret lr
80ENDPROC(_find_first_bit_le) 80ENDPROC(_find_first_bit_le)
81 81
82/* 82/*
@@ -114,7 +114,7 @@ ENTRY(_find_first_zero_bit_be)
1142: cmp r2, r1 @ any more? 1142: cmp r2, r1 @ any more?
115 blo 1b 115 blo 1b
1163: mov r0, r1 @ no free bits 1163: mov r0, r1 @ no free bits
117 mov pc, lr 117 ret lr
118ENDPROC(_find_first_zero_bit_be) 118ENDPROC(_find_first_zero_bit_be)
119 119
120ENTRY(_find_next_zero_bit_be) 120ENTRY(_find_next_zero_bit_be)
@@ -148,7 +148,7 @@ ENTRY(_find_first_bit_be)
1482: cmp r2, r1 @ any more? 1482: cmp r2, r1 @ any more?
149 blo 1b 149 blo 1b
1503: mov r0, r1 @ no free bits 1503: mov r0, r1 @ no free bits
151 mov pc, lr 151 ret lr
152ENDPROC(_find_first_bit_be) 152ENDPROC(_find_first_bit_be)
153 153
154ENTRY(_find_next_bit_be) 154ENTRY(_find_next_bit_be)
@@ -192,5 +192,5 @@ ENDPROC(_find_next_bit_be)
192#endif 192#endif
193 cmp r1, r0 @ Clamp to maxbit 193 cmp r1, r0 @ Clamp to maxbit
194 movlo r0, r1 194 movlo r0, r1
195 mov pc, lr 195 ret lr
196 196
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 9b06bb41fca6..0f958e3d8180 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -36,7 +36,7 @@ ENTRY(__get_user_1)
36 check_uaccess r0, 1, r1, r2, __get_user_bad 36 check_uaccess r0, 1, r1, r2, __get_user_bad
371: TUSER(ldrb) r2, [r0] 371: TUSER(ldrb) r2, [r0]
38 mov r0, #0 38 mov r0, #0
39 mov pc, lr 39 ret lr
40ENDPROC(__get_user_1) 40ENDPROC(__get_user_1)
41 41
42ENTRY(__get_user_2) 42ENTRY(__get_user_2)
@@ -56,20 +56,20 @@ rb .req r0
56 orr r2, rb, r2, lsl #8 56 orr r2, rb, r2, lsl #8
57#endif 57#endif
58 mov r0, #0 58 mov r0, #0
59 mov pc, lr 59 ret lr
60ENDPROC(__get_user_2) 60ENDPROC(__get_user_2)
61 61
62ENTRY(__get_user_4) 62ENTRY(__get_user_4)
63 check_uaccess r0, 4, r1, r2, __get_user_bad 63 check_uaccess r0, 4, r1, r2, __get_user_bad
644: TUSER(ldr) r2, [r0] 644: TUSER(ldr) r2, [r0]
65 mov r0, #0 65 mov r0, #0
66 mov pc, lr 66 ret lr
67ENDPROC(__get_user_4) 67ENDPROC(__get_user_4)
68 68
69__get_user_bad: 69__get_user_bad:
70 mov r2, #0 70 mov r2, #0
71 mov r0, #-EFAULT 71 mov r0, #-EFAULT
72 mov pc, lr 72 ret lr
73ENDPROC(__get_user_bad) 73ENDPROC(__get_user_bad)
74 74
75.pushsection __ex_table, "a" 75.pushsection __ex_table, "a"
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index 9f4238987fe9..c31b2f3153f1 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -25,7 +25,7 @@
25 25
26ENTRY(__raw_readsb) 26ENTRY(__raw_readsb)
27 teq r2, #0 @ do we have to check for the zero len? 27 teq r2, #0 @ do we have to check for the zero len?
28 moveq pc, lr 28 reteq lr
29 ands ip, r1, #3 29 ands ip, r1, #3
30 bne .Linsb_align 30 bne .Linsb_align
31 31
diff --git a/arch/arm/lib/io-readsl.S b/arch/arm/lib/io-readsl.S
index 7a7430950c79..2ed86fa5465f 100644
--- a/arch/arm/lib/io-readsl.S
+++ b/arch/arm/lib/io-readsl.S
@@ -12,7 +12,7 @@
12 12
13ENTRY(__raw_readsl) 13ENTRY(__raw_readsl)
14 teq r2, #0 @ do we have to check for the zero len? 14 teq r2, #0 @ do we have to check for the zero len?
15 moveq pc, lr 15 reteq lr
16 ands ip, r1, #3 16 ands ip, r1, #3
17 bne 3f 17 bne 3f
18 18
@@ -33,7 +33,7 @@ ENTRY(__raw_readsl)
33 stmcsia r1!, {r3, ip} 33 stmcsia r1!, {r3, ip}
34 ldrne r3, [r0, #0] 34 ldrne r3, [r0, #0]
35 strne r3, [r1, #0] 35 strne r3, [r1, #0]
36 mov pc, lr 36 ret lr
37 37
383: ldr r3, [r0] 383: ldr r3, [r0]
39 cmp ip, #2 39 cmp ip, #2
@@ -75,5 +75,5 @@ ENTRY(__raw_readsl)
75 strb r3, [r1, #1] 75 strb r3, [r1, #1]
768: mov r3, ip, get_byte_0 768: mov r3, ip, get_byte_0
77 strb r3, [r1, #0] 77 strb r3, [r1, #0]
78 mov pc, lr 78 ret lr
79ENDPROC(__raw_readsl) 79ENDPROC(__raw_readsl)
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index 88487c8c4f23..413da9914529 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -27,11 +27,11 @@
27 strb r3, [r1], #1 27 strb r3, [r1], #1
28 28
29 subs r2, r2, #1 29 subs r2, r2, #1
30 moveq pc, lr 30 reteq lr
31 31
32ENTRY(__raw_readsw) 32ENTRY(__raw_readsw)
33 teq r2, #0 @ do we have to check for the zero len? 33 teq r2, #0 @ do we have to check for the zero len?
34 moveq pc, lr 34 reteq lr
35 tst r1, #3 35 tst r1, #3
36 bne .Linsw_align 36 bne .Linsw_align
37 37
diff --git a/arch/arm/lib/io-readsw-armv4.S b/arch/arm/lib/io-readsw-armv4.S
index 1f393d42593d..d9a45e9692ae 100644
--- a/arch/arm/lib/io-readsw-armv4.S
+++ b/arch/arm/lib/io-readsw-armv4.S
@@ -26,7 +26,7 @@
26 26
27ENTRY(__raw_readsw) 27ENTRY(__raw_readsw)
28 teq r2, #0 28 teq r2, #0
29 moveq pc, lr 29 reteq lr
30 tst r1, #3 30 tst r1, #3
31 bne .Linsw_align 31 bne .Linsw_align
32 32
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index 68b92f4acaeb..a46bbc9b168b 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -45,7 +45,7 @@
45 45
46ENTRY(__raw_writesb) 46ENTRY(__raw_writesb)
47 teq r2, #0 @ do we have to check for the zero len? 47 teq r2, #0 @ do we have to check for the zero len?
48 moveq pc, lr 48 reteq lr
49 ands ip, r1, #3 49 ands ip, r1, #3
50 bne .Loutsb_align 50 bne .Loutsb_align
51 51
diff --git a/arch/arm/lib/io-writesl.S b/arch/arm/lib/io-writesl.S
index d0d104a0dd11..4ea2435988c1 100644
--- a/arch/arm/lib/io-writesl.S
+++ b/arch/arm/lib/io-writesl.S
@@ -12,7 +12,7 @@
12 12
13ENTRY(__raw_writesl) 13ENTRY(__raw_writesl)
14 teq r2, #0 @ do we have to check for the zero len? 14 teq r2, #0 @ do we have to check for the zero len?
15 moveq pc, lr 15 reteq lr
16 ands ip, r1, #3 16 ands ip, r1, #3
17 bne 3f 17 bne 3f
18 18
@@ -33,7 +33,7 @@ ENTRY(__raw_writesl)
33 ldrne r3, [r1, #0] 33 ldrne r3, [r1, #0]
34 strcs ip, [r0, #0] 34 strcs ip, [r0, #0]
35 strne r3, [r0, #0] 35 strne r3, [r0, #0]
36 mov pc, lr 36 ret lr
37 37
383: bic r1, r1, #3 383: bic r1, r1, #3
39 ldr r3, [r1], #4 39 ldr r3, [r1], #4
@@ -47,7 +47,7 @@ ENTRY(__raw_writesl)
47 orr ip, ip, r3, lspush #16 47 orr ip, ip, r3, lspush #16
48 str ip, [r0] 48 str ip, [r0]
49 bne 4b 49 bne 4b
50 mov pc, lr 50 ret lr
51 51
525: mov ip, r3, lspull #8 525: mov ip, r3, lspull #8
53 ldr r3, [r1], #4 53 ldr r3, [r1], #4
@@ -55,7 +55,7 @@ ENTRY(__raw_writesl)
55 orr ip, ip, r3, lspush #24 55 orr ip, ip, r3, lspush #24
56 str ip, [r0] 56 str ip, [r0]
57 bne 5b 57 bne 5b
58 mov pc, lr 58 ret lr
59 59
606: mov ip, r3, lspull #24 606: mov ip, r3, lspull #24
61 ldr r3, [r1], #4 61 ldr r3, [r1], #4
@@ -63,5 +63,5 @@ ENTRY(__raw_writesl)
63 orr ip, ip, r3, lspush #8 63 orr ip, ip, r3, lspush #8
64 str ip, [r0] 64 str ip, [r0]
65 bne 6b 65 bne 6b
66 mov pc, lr 66 ret lr
67ENDPROC(__raw_writesl) 67ENDPROC(__raw_writesl)
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 49b800419e32..121789eb6802 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -28,11 +28,11 @@
28 orr r3, r3, r3, lsl #16 28 orr r3, r3, r3, lsl #16
29 str r3, [r0] 29 str r3, [r0]
30 subs r2, r2, #1 30 subs r2, r2, #1
31 moveq pc, lr 31 reteq lr
32 32
33ENTRY(__raw_writesw) 33ENTRY(__raw_writesw)
34 teq r2, #0 @ do we have to check for the zero len? 34 teq r2, #0 @ do we have to check for the zero len?
35 moveq pc, lr 35 reteq lr
36 tst r1, #3 36 tst r1, #3
37 bne .Loutsw_align 37 bne .Loutsw_align
38 38
diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S
index ff4f71b579ee..269f90c51ad2 100644
--- a/arch/arm/lib/io-writesw-armv4.S
+++ b/arch/arm/lib/io-writesw-armv4.S
@@ -31,7 +31,7 @@
31 31
32ENTRY(__raw_writesw) 32ENTRY(__raw_writesw)
33 teq r2, #0 33 teq r2, #0
34 moveq pc, lr 34 reteq lr
35 ands r3, r1, #3 35 ands r3, r1, #3
36 bne .Loutsw_align 36 bne .Loutsw_align
37 37
@@ -96,5 +96,5 @@ ENTRY(__raw_writesw)
96 tst r2, #1 96 tst r2, #1
973: movne ip, r3, lsr #8 973: movne ip, r3, lsr #8
98 strneh ip, [r0] 98 strneh ip, [r0]
99 mov pc, lr 99 ret lr
100ENDPROC(__raw_writesw) 100ENDPROC(__raw_writesw)
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index c562f649734c..947567ff67f9 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -210,7 +210,7 @@ ENTRY(__aeabi_uidiv)
210UNWIND(.fnstart) 210UNWIND(.fnstart)
211 211
212 subs r2, r1, #1 212 subs r2, r1, #1
213 moveq pc, lr 213 reteq lr
214 bcc Ldiv0 214 bcc Ldiv0
215 cmp r0, r1 215 cmp r0, r1
216 bls 11f 216 bls 11f
@@ -220,16 +220,16 @@ UNWIND(.fnstart)
220 ARM_DIV_BODY r0, r1, r2, r3 220 ARM_DIV_BODY r0, r1, r2, r3
221 221
222 mov r0, r2 222 mov r0, r2
223 mov pc, lr 223 ret lr
224 224
22511: moveq r0, #1 22511: moveq r0, #1
226 movne r0, #0 226 movne r0, #0
227 mov pc, lr 227 ret lr
228 228
22912: ARM_DIV2_ORDER r1, r2 22912: ARM_DIV2_ORDER r1, r2
230 230
231 mov r0, r0, lsr r2 231 mov r0, r0, lsr r2
232 mov pc, lr 232 ret lr
233 233
234UNWIND(.fnend) 234UNWIND(.fnend)
235ENDPROC(__udivsi3) 235ENDPROC(__udivsi3)
@@ -244,11 +244,11 @@ UNWIND(.fnstart)
244 moveq r0, #0 244 moveq r0, #0
245 tsthi r1, r2 @ see if divisor is power of 2 245 tsthi r1, r2 @ see if divisor is power of 2
246 andeq r0, r0, r2 246 andeq r0, r0, r2
247 movls pc, lr 247 retls lr
248 248
249 ARM_MOD_BODY r0, r1, r2, r3 249 ARM_MOD_BODY r0, r1, r2, r3
250 250
251 mov pc, lr 251 ret lr
252 252
253UNWIND(.fnend) 253UNWIND(.fnend)
254ENDPROC(__umodsi3) 254ENDPROC(__umodsi3)
@@ -274,23 +274,23 @@ UNWIND(.fnstart)
274 274
275 cmp ip, #0 275 cmp ip, #0
276 rsbmi r0, r0, #0 276 rsbmi r0, r0, #0
277 mov pc, lr 277 ret lr
278 278
27910: teq ip, r0 @ same sign ? 27910: teq ip, r0 @ same sign ?
280 rsbmi r0, r0, #0 280 rsbmi r0, r0, #0
281 mov pc, lr 281 ret lr
282 282
28311: movlo r0, #0 28311: movlo r0, #0
284 moveq r0, ip, asr #31 284 moveq r0, ip, asr #31
285 orreq r0, r0, #1 285 orreq r0, r0, #1
286 mov pc, lr 286 ret lr
287 287
28812: ARM_DIV2_ORDER r1, r2 28812: ARM_DIV2_ORDER r1, r2
289 289
290 cmp ip, #0 290 cmp ip, #0
291 mov r0, r3, lsr r2 291 mov r0, r3, lsr r2
292 rsbmi r0, r0, #0 292 rsbmi r0, r0, #0
293 mov pc, lr 293 ret lr
294 294
295UNWIND(.fnend) 295UNWIND(.fnend)
296ENDPROC(__divsi3) 296ENDPROC(__divsi3)
@@ -315,7 +315,7 @@ UNWIND(.fnstart)
315 315
31610: cmp ip, #0 31610: cmp ip, #0
317 rsbmi r0, r0, #0 317 rsbmi r0, r0, #0
318 mov pc, lr 318 ret lr
319 319
320UNWIND(.fnend) 320UNWIND(.fnend)
321ENDPROC(__modsi3) 321ENDPROC(__modsi3)
@@ -331,7 +331,7 @@ UNWIND(.save {r0, r1, ip, lr} )
331 ldmfd sp!, {r1, r2, ip, lr} 331 ldmfd sp!, {r1, r2, ip, lr}
332 mul r3, r0, r2 332 mul r3, r0, r2
333 sub r1, r1, r3 333 sub r1, r1, r3
334 mov pc, lr 334 ret lr
335 335
336UNWIND(.fnend) 336UNWIND(.fnend)
337ENDPROC(__aeabi_uidivmod) 337ENDPROC(__aeabi_uidivmod)
@@ -344,7 +344,7 @@ UNWIND(.save {r0, r1, ip, lr} )
344 ldmfd sp!, {r1, r2, ip, lr} 344 ldmfd sp!, {r1, r2, ip, lr}
345 mul r3, r0, r2 345 mul r3, r0, r2
346 sub r1, r1, r3 346 sub r1, r1, r3
347 mov pc, lr 347 ret lr
348 348
349UNWIND(.fnend) 349UNWIND(.fnend)
350ENDPROC(__aeabi_idivmod) 350ENDPROC(__aeabi_idivmod)
diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S
index f83d449141f7..922dcd88b02b 100644
--- a/arch/arm/lib/lshrdi3.S
+++ b/arch/arm/lib/lshrdi3.S
@@ -27,6 +27,7 @@ Boston, MA 02110-1301, USA. */
27 27
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/assembler.h>
30 31
31#ifdef __ARMEB__ 32#ifdef __ARMEB__
32#define al r1 33#define al r1
@@ -47,7 +48,7 @@ ENTRY(__aeabi_llsr)
47 THUMB( lslmi r3, ah, ip ) 48 THUMB( lslmi r3, ah, ip )
48 THUMB( orrmi al, al, r3 ) 49 THUMB( orrmi al, al, r3 )
49 mov ah, ah, lsr r2 50 mov ah, ah, lsr r2
50 mov pc, lr 51 ret lr
51 52
52ENDPROC(__lshrdi3) 53ENDPROC(__lshrdi3)
53ENDPROC(__aeabi_llsr) 54ENDPROC(__aeabi_llsr)
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S
index 1da86991d700..74a5bed6d999 100644
--- a/arch/arm/lib/memchr.S
+++ b/arch/arm/lib/memchr.S
@@ -22,5 +22,5 @@ ENTRY(memchr)
22 bne 1b 22 bne 1b
23 sub r0, r0, #1 23 sub r0, r0, #1
242: movne r0, #0 242: movne r0, #0
25 mov pc, lr 25 ret lr
26ENDPROC(memchr) 26ENDPROC(memchr)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 94b0650ea98f..671455c854fa 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -110,7 +110,7 @@ ENTRY(memset)
110 strneb r1, [ip], #1 110 strneb r1, [ip], #1
111 tst r2, #1 111 tst r2, #1
112 strneb r1, [ip], #1 112 strneb r1, [ip], #1
113 mov pc, lr 113 ret lr
114 114
1156: subs r2, r2, #4 @ 1 do we have enough 1156: subs r2, r2, #4 @ 1 do we have enough
116 blt 5b @ 1 bytes to align with? 116 blt 5b @ 1 bytes to align with?
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 3fbdef5f802a..385ccb306fa2 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -121,5 +121,5 @@ ENTRY(__memzero)
121 strneb r2, [r0], #1 @ 1 121 strneb r2, [r0], #1 @ 1
122 tst r1, #1 @ 1 a byte left over 122 tst r1, #1 @ 1 a byte left over
123 strneb r2, [r0], #1 @ 1 123 strneb r2, [r0], #1 @ 1
124 mov pc, lr @ 1 124 ret lr @ 1
125ENDPROC(__memzero) 125ENDPROC(__memzero)
diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S
index 36c91b4957e2..204305956925 100644
--- a/arch/arm/lib/muldi3.S
+++ b/arch/arm/lib/muldi3.S
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h>
14 15
15#ifdef __ARMEB__ 16#ifdef __ARMEB__
16#define xh r0 17#define xh r0
@@ -41,7 +42,7 @@ ENTRY(__aeabi_lmul)
41 adc xh, xh, yh, lsr #16 42 adc xh, xh, yh, lsr #16
42 adds xl, xl, ip, lsl #16 43 adds xl, xl, ip, lsl #16
43 adc xh, xh, ip, lsr #16 44 adc xh, xh, ip, lsr #16
44 mov pc, lr 45 ret lr
45 46
46ENDPROC(__muldi3) 47ENDPROC(__muldi3)
47ENDPROC(__aeabi_lmul) 48ENDPROC(__aeabi_lmul)
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 3d73dcb959b0..38d660d3705f 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -36,7 +36,7 @@ ENTRY(__put_user_1)
36 check_uaccess r0, 1, r1, ip, __put_user_bad 36 check_uaccess r0, 1, r1, ip, __put_user_bad
371: TUSER(strb) r2, [r0] 371: TUSER(strb) r2, [r0]
38 mov r0, #0 38 mov r0, #0
39 mov pc, lr 39 ret lr
40ENDPROC(__put_user_1) 40ENDPROC(__put_user_1)
41 41
42ENTRY(__put_user_2) 42ENTRY(__put_user_2)
@@ -60,14 +60,14 @@ ENTRY(__put_user_2)
60#endif 60#endif
61#endif /* CONFIG_THUMB2_KERNEL */ 61#endif /* CONFIG_THUMB2_KERNEL */
62 mov r0, #0 62 mov r0, #0
63 mov pc, lr 63 ret lr
64ENDPROC(__put_user_2) 64ENDPROC(__put_user_2)
65 65
66ENTRY(__put_user_4) 66ENTRY(__put_user_4)
67 check_uaccess r0, 4, r1, ip, __put_user_bad 67 check_uaccess r0, 4, r1, ip, __put_user_bad
684: TUSER(str) r2, [r0] 684: TUSER(str) r2, [r0]
69 mov r0, #0 69 mov r0, #0
70 mov pc, lr 70 ret lr
71ENDPROC(__put_user_4) 71ENDPROC(__put_user_4)
72 72
73ENTRY(__put_user_8) 73ENTRY(__put_user_8)
@@ -80,12 +80,12 @@ ENTRY(__put_user_8)
806: TUSER(str) r3, [r0] 806: TUSER(str) r3, [r0]
81#endif 81#endif
82 mov r0, #0 82 mov r0, #0
83 mov pc, lr 83 ret lr
84ENDPROC(__put_user_8) 84ENDPROC(__put_user_8)
85 85
86__put_user_bad: 86__put_user_bad:
87 mov r0, #-EFAULT 87 mov r0, #-EFAULT
88 mov pc, lr 88 ret lr
89ENDPROC(__put_user_bad) 89ENDPROC(__put_user_bad)
90 90
91.pushsection __ex_table, "a" 91.pushsection __ex_table, "a"
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S
index d8f2a1c1aea4..013d64c71e8d 100644
--- a/arch/arm/lib/strchr.S
+++ b/arch/arm/lib/strchr.S
@@ -23,5 +23,5 @@ ENTRY(strchr)
23 teq r2, r1 23 teq r2, r1
24 movne r0, #0 24 movne r0, #0
25 subeq r0, r0, #1 25 subeq r0, r0, #1
26 mov pc, lr 26 ret lr
27ENDPROC(strchr) 27ENDPROC(strchr)
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S
index 302f20cd2423..3cec1c7482c4 100644
--- a/arch/arm/lib/strrchr.S
+++ b/arch/arm/lib/strrchr.S
@@ -22,5 +22,5 @@ ENTRY(strrchr)
22 teq r2, #0 22 teq r2, #0
23 bne 1b 23 bne 1b
24 mov r0, r3 24 mov r0, r3
25 mov pc, lr 25 ret lr
26ENDPROC(strrchr) 26ENDPROC(strrchr)
diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S
index f0df6a91db04..ad4a6309141a 100644
--- a/arch/arm/lib/ucmpdi2.S
+++ b/arch/arm/lib/ucmpdi2.S
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h>
14 15
15#ifdef __ARMEB__ 16#ifdef __ARMEB__
16#define xh r0 17#define xh r0
@@ -31,7 +32,7 @@ ENTRY(__ucmpdi2)
31 movlo r0, #0 32 movlo r0, #0
32 moveq r0, #1 33 moveq r0, #1
33 movhi r0, #2 34 movhi r0, #2
34 mov pc, lr 35 ret lr
35 36
36ENDPROC(__ucmpdi2) 37ENDPROC(__ucmpdi2)
37 38
@@ -44,7 +45,7 @@ ENTRY(__aeabi_ulcmp)
44 movlo r0, #-1 45 movlo r0, #-1
45 moveq r0, #0 46 moveq r0, #0
46 movhi r0, #1 47 movhi r0, #1
47 mov pc, lr 48 ret lr
48 49
49ENDPROC(__aeabi_ulcmp) 50ENDPROC(__aeabi_ulcmp)
50 51
diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
index d4e9316ecacb..a5336a5e2739 100644
--- a/arch/arm/mach-davinci/sleep.S
+++ b/arch/arm/mach-davinci/sleep.S
@@ -213,7 +213,7 @@ ddr2clk_stop_done:
213 cmp ip, r0 213 cmp ip, r0
214 bne ddr2clk_stop_done 214 bne ddr2clk_stop_done
215 215
216 mov pc, lr 216 ret lr
217ENDPROC(davinci_ddr_psc_config) 217ENDPROC(davinci_ddr_psc_config)
218 218
219CACHE_FLUSH: 219CACHE_FLUSH:
diff --git a/arch/arm/mach-ep93xx/crunch-bits.S b/arch/arm/mach-ep93xx/crunch-bits.S
index e96923a3017b..ee0be2af5c61 100644
--- a/arch/arm/mach-ep93xx/crunch-bits.S
+++ b/arch/arm/mach-ep93xx/crunch-bits.S
@@ -198,7 +198,7 @@ crunch_load:
198 get_thread_info r10 198 get_thread_info r10
199#endif 199#endif
2002: dec_preempt_count r10, r3 2002: dec_preempt_count r10, r3
201 mov pc, lr 201 ret lr
202 202
203/* 203/*
204 * Back up crunch regs to save area and disable access to them 204 * Back up crunch regs to save area and disable access to them
@@ -277,7 +277,7 @@ ENTRY(crunch_task_copy)
277 mov r3, lr @ preserve return address 277 mov r3, lr @ preserve return address
278 bl crunch_save 278 bl crunch_save
279 msr cpsr_c, ip @ restore interrupt mode 279 msr cpsr_c, ip @ restore interrupt mode
280 mov pc, r3 280 ret r3
281 281
282/* 282/*
283 * Restore crunch state from given memory address 283 * Restore crunch state from given memory address
@@ -310,4 +310,4 @@ ENTRY(crunch_task_restore)
310 mov r3, lr @ preserve return address 310 mov r3, lr @ preserve return address
311 bl crunch_load 311 bl crunch_load
312 msr cpsr_c, ip @ restore interrupt mode 312 msr cpsr_c, ip @ restore interrupt mode
313 mov pc, r3 313 ret r3
diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
index fe123b079c05..74b50f1982db 100644
--- a/arch/arm/mach-imx/suspend-imx6.S
+++ b/arch/arm/mach-imx/suspend-imx6.S
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h>
13#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
14#include <asm/hardware/cache-l2x0.h> 15#include <asm/hardware/cache-l2x0.h>
15#include "hardware.h" 16#include "hardware.h"
@@ -301,7 +302,7 @@ rbc_loop:
301 resume_mmdc 302 resume_mmdc
302 303
303 /* return to suspend finish */ 304 /* return to suspend finish */
304 mov pc, lr 305 ret lr
305 306
306resume: 307resume:
307 /* invalidate L1 I-cache first */ 308 /* invalidate L1 I-cache first */
@@ -325,7 +326,7 @@ resume:
325 mov r5, #0x1 326 mov r5, #0x1
326 resume_mmdc 327 resume_mmdc
327 328
328 mov pc, lr 329 ret lr
329ENDPROC(imx6_suspend) 330ENDPROC(imx6_suspend)
330 331
331/* 332/*
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
index 510c29e079ca..f5d881b5d0f7 100644
--- a/arch/arm/mach-mvebu/coherency_ll.S
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -46,7 +46,7 @@ ENTRY(ll_get_coherency_base)
46 ldr r1, =coherency_base 46 ldr r1, =coherency_base
47 ldr r1, [r1] 47 ldr r1, [r1]
482: 482:
49 mov pc, lr 49 ret lr
50ENDPROC(ll_get_coherency_base) 50ENDPROC(ll_get_coherency_base)
51 51
52/* 52/*
@@ -63,7 +63,7 @@ ENTRY(ll_get_coherency_cpumask)
63 mov r2, #(1 << 24) 63 mov r2, #(1 << 24)
64 lsl r3, r2, r3 64 lsl r3, r2, r3
65ARM_BE8(rev r3, r3) 65ARM_BE8(rev r3, r3)
66 mov pc, lr 66 ret lr
67ENDPROC(ll_get_coherency_cpumask) 67ENDPROC(ll_get_coherency_cpumask)
68 68
69/* 69/*
@@ -94,7 +94,7 @@ ENTRY(ll_add_cpu_to_smp_group)
94 strex r1, r2, [r0] 94 strex r1, r2, [r0]
95 cmp r1, #0 95 cmp r1, #0
96 bne 1b 96 bne 1b
97 mov pc, lr 97 ret lr
98ENDPROC(ll_add_cpu_to_smp_group) 98ENDPROC(ll_add_cpu_to_smp_group)
99 99
100ENTRY(ll_enable_coherency) 100ENTRY(ll_enable_coherency)
@@ -118,7 +118,7 @@ ENTRY(ll_enable_coherency)
118 bne 1b 118 bne 1b
119 dsb 119 dsb
120 mov r0, #0 120 mov r0, #0
121 mov pc, lr 121 ret lr
122ENDPROC(ll_enable_coherency) 122ENDPROC(ll_enable_coherency)
123 123
124ENTRY(ll_disable_coherency) 124ENTRY(ll_disable_coherency)
@@ -141,7 +141,7 @@ ENTRY(ll_disable_coherency)
141 cmp r1, #0 141 cmp r1, #0
142 bne 1b 142 bne 1b
143 dsb 143 dsb
144 mov pc, lr 144 ret lr
145ENDPROC(ll_disable_coherency) 145ENDPROC(ll_disable_coherency)
146 146
147 .align 2 147 .align 2
diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S
index 5925366bc03c..7c91ddb6f1f7 100644
--- a/arch/arm/mach-mvebu/headsmp-a9.S
+++ b/arch/arm/mach-mvebu/headsmp-a9.S
@@ -14,6 +14,7 @@
14 14
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <asm/assembler.h>
17 18
18 __CPUINIT 19 __CPUINIT
19#define CPU_RESUME_ADDR_REG 0xf10182d4 20#define CPU_RESUME_ADDR_REG 0xf10182d4
@@ -24,7 +25,7 @@
24armada_375_smp_cpu1_enable_code_start: 25armada_375_smp_cpu1_enable_code_start:
25 ldr r0, [pc, #4] 26 ldr r0, [pc, #4]
26 ldr r1, [r0] 27 ldr r1, [r0]
27 mov pc, r1 28 ret r1
28 .word CPU_RESUME_ADDR_REG 29 .word CPU_RESUME_ADDR_REG
29armada_375_smp_cpu1_enable_code_end: 30armada_375_smp_cpu1_enable_code_end:
30 31
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index 9086ce03ae12..b84a0122d823 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h>
13#include <asm/smp_scu.h> 14#include <asm/smp_scu.h>
14#include <asm/memory.h> 15#include <asm/memory.h>
15#include <asm/hardware/cache-l2x0.h> 16#include <asm/hardware/cache-l2x0.h>
@@ -334,7 +335,7 @@ ENDPROC(omap4_cpu_resume)
334 335
335#ifndef CONFIG_OMAP4_ERRATA_I688 336#ifndef CONFIG_OMAP4_ERRATA_I688
336ENTRY(omap_bus_sync) 337ENTRY(omap_bus_sync)
337 mov pc, lr 338 ret lr
338ENDPROC(omap_bus_sync) 339ENDPROC(omap_bus_sync)
339#endif 340#endif
340 341
diff --git a/arch/arm/mach-omap2/sram242x.S b/arch/arm/mach-omap2/sram242x.S
index 680a7c56cc3e..2c88ff2d0236 100644
--- a/arch/arm/mach-omap2/sram242x.S
+++ b/arch/arm/mach-omap2/sram242x.S
@@ -101,7 +101,7 @@ i_dll_wait:
101i_dll_delay: 101i_dll_delay:
102 subs r4, r4, #0x1 102 subs r4, r4, #0x1
103 bne i_dll_delay 103 bne i_dll_delay
104 mov pc, lr 104 ret lr
105 105
106 /* 106 /*
107 * shift up or down voltage, use R9 as input to tell level. 107 * shift up or down voltage, use R9 as input to tell level.
@@ -125,7 +125,7 @@ volt_delay:
125 ldr r7, [r3] @ get timer value 125 ldr r7, [r3] @ get timer value
126 cmp r5, r7 @ time up? 126 cmp r5, r7 @ time up?
127 bhi volt_delay @ not yet->branch 127 bhi volt_delay @ not yet->branch
128 mov pc, lr @ back to caller. 128 ret lr @ back to caller.
129 129
130omap242x_sdi_cm_clksel2_pll: 130omap242x_sdi_cm_clksel2_pll:
131 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2) 131 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
@@ -220,7 +220,7 @@ volt_delay_c:
220 ldr r7, [r10] @ get timer value 220 ldr r7, [r10] @ get timer value
221 cmp r8, r7 @ time up? 221 cmp r8, r7 @ time up?
222 bhi volt_delay_c @ not yet->branch 222 bhi volt_delay_c @ not yet->branch
223 mov pc, lr @ back to caller 223 ret lr @ back to caller
224 224
225omap242x_srs_cm_clksel2_pll: 225omap242x_srs_cm_clksel2_pll:
226 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2) 226 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
diff --git a/arch/arm/mach-omap2/sram243x.S b/arch/arm/mach-omap2/sram243x.S
index a1e9edd673f4..d5deb9761fc7 100644
--- a/arch/arm/mach-omap2/sram243x.S
+++ b/arch/arm/mach-omap2/sram243x.S
@@ -101,7 +101,7 @@ i_dll_wait:
101i_dll_delay: 101i_dll_delay:
102 subs r4, r4, #0x1 102 subs r4, r4, #0x1
103 bne i_dll_delay 103 bne i_dll_delay
104 mov pc, lr 104 ret lr
105 105
106 /* 106 /*
107 * shift up or down voltage, use R9 as input to tell level. 107 * shift up or down voltage, use R9 as input to tell level.
@@ -125,7 +125,7 @@ volt_delay:
125 ldr r7, [r3] @ get timer value 125 ldr r7, [r3] @ get timer value
126 cmp r5, r7 @ time up? 126 cmp r5, r7 @ time up?
127 bhi volt_delay @ not yet->branch 127 bhi volt_delay @ not yet->branch
128 mov pc, lr @ back to caller. 128 ret lr @ back to caller.
129 129
130omap243x_sdi_cm_clksel2_pll: 130omap243x_sdi_cm_clksel2_pll:
131 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2) 131 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
@@ -220,7 +220,7 @@ volt_delay_c:
220 ldr r7, [r10] @ get timer value 220 ldr r7, [r10] @ get timer value
221 cmp r8, r7 @ time up? 221 cmp r8, r7 @ time up?
222 bhi volt_delay_c @ not yet->branch 222 bhi volt_delay_c @ not yet->branch
223 mov pc, lr @ back to caller 223 ret lr @ back to caller
224 224
225omap243x_srs_cm_clksel2_pll: 225omap243x_srs_cm_clksel2_pll:
226 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2) 226 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
diff --git a/arch/arm/mach-pxa/mioa701_bootresume.S b/arch/arm/mach-pxa/mioa701_bootresume.S
index 324d25a48c85..81591491ab94 100644
--- a/arch/arm/mach-pxa/mioa701_bootresume.S
+++ b/arch/arm/mach-pxa/mioa701_bootresume.S
@@ -29,7 +29,7 @@ ENTRY(mioa701_jumpaddr)
29 str r1, [r0] @ Early disable resume for next boot 29 str r1, [r0] @ Early disable resume for next boot
30 ldr r0, mioa701_jumpaddr @ (Murphy's Law) 30 ldr r0, mioa701_jumpaddr @ (Murphy's Law)
31 ldr r0, [r0] 31 ldr r0, [r0]
32 mov pc, r0 32 ret r0
332: 332:
34 34
35ENTRY(mioa701_bootstrap_lg) 35ENTRY(mioa701_bootstrap_lg)
diff --git a/arch/arm/mach-pxa/standby.S b/arch/arm/mach-pxa/standby.S
index 29f5f5c180b7..eab1645bb4ad 100644
--- a/arch/arm/mach-pxa/standby.S
+++ b/arch/arm/mach-pxa/standby.S
@@ -29,7 +29,7 @@ ENTRY(pxa_cpu_standby)
29 .align 5 29 .align 5
301: mcr p14, 0, r2, c7, c0, 0 @ put the system into Standby 301: mcr p14, 0, r2, c7, c0, 0 @ put the system into Standby
31 str r1, [r0] @ make sure PSSR_PH/STS are clear 31 str r1, [r0] @ make sure PSSR_PH/STS are clear
32 mov pc, lr 32 ret lr
33 33
34#endif 34#endif
35 35
@@ -108,7 +108,7 @@ ENTRY(pm_enter_standby_start)
108 bic r0, r0, #0x20000000 108 bic r0, r0, #0x20000000
109 str r0, [r1, #PXA3_DMCIER] 109 str r0, [r1, #PXA3_DMCIER]
110 110
111 mov pc, lr 111 ret lr
112ENTRY(pm_enter_standby_end) 112ENTRY(pm_enter_standby_end)
113 113
114#endif 114#endif
diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2410.S b/arch/arm/mach-s3c24xx/sleep-s3c2410.S
index c9b91223697c..875ba8911127 100644
--- a/arch/arm/mach-s3c24xx/sleep-s3c2410.S
+++ b/arch/arm/mach-s3c24xx/sleep-s3c2410.S
@@ -66,4 +66,4 @@ s3c2410_do_sleep:
66 streq r8, [r5] @ SDRAM power-down config 66 streq r8, [r5] @ SDRAM power-down config
67 streq r9, [r6] @ CPU sleep 67 streq r9, [r6] @ CPU sleep
681: beq 1b 681: beq 1b
69 mov pc, r14 69 ret lr
diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2412.S b/arch/arm/mach-s3c24xx/sleep-s3c2412.S
index 5adaceb7da13..6bf5b4d8743c 100644
--- a/arch/arm/mach-s3c24xx/sleep-s3c2412.S
+++ b/arch/arm/mach-s3c24xx/sleep-s3c2412.S
@@ -65,4 +65,4 @@ s3c2412_sleep_enter1:
65 strne r9, [r3] 65 strne r9, [r3]
66 bne s3c2412_sleep_enter1 66 bne s3c2412_sleep_enter1
67 67
68 mov pc, r14 68 ret lr
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index e5be5c88644b..293007579b8e 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -12,6 +12,7 @@
12 */ 12 */
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/assembler.h>
15#include <asm/memory.h> 16#include <asm/memory.h>
16 17
17ENTRY(shmobile_invalidate_start) 18ENTRY(shmobile_invalidate_start)
@@ -75,7 +76,7 @@ shmobile_smp_boot_next:
75 76
76shmobile_smp_boot_found: 77shmobile_smp_boot_found:
77 ldr r0, [r7, r1, lsl #2] 78 ldr r0, [r7, r1, lsl #2]
78 mov pc, r9 79 ret r9
79ENDPROC(shmobile_smp_boot) 80ENDPROC(shmobile_smp_boot)
80 81
81ENTRY(shmobile_smp_sleep) 82ENTRY(shmobile_smp_sleep)
diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
index aaaf3abd2688..be4bc5f853f5 100644
--- a/arch/arm/mach-tegra/sleep-tegra20.S
+++ b/arch/arm/mach-tegra/sleep-tegra20.S
@@ -78,7 +78,7 @@ ENTRY(tegra20_hotplug_shutdown)
78 /* Put this CPU down */ 78 /* Put this CPU down */
79 cpu_id r0 79 cpu_id r0
80 bl tegra20_cpu_shutdown 80 bl tegra20_cpu_shutdown
81 mov pc, lr @ should never get here 81 ret lr @ should never get here
82ENDPROC(tegra20_hotplug_shutdown) 82ENDPROC(tegra20_hotplug_shutdown)
83 83
84/* 84/*
@@ -96,7 +96,7 @@ ENDPROC(tegra20_hotplug_shutdown)
96 */ 96 */
97ENTRY(tegra20_cpu_shutdown) 97ENTRY(tegra20_cpu_shutdown)
98 cmp r0, #0 98 cmp r0, #0
99 moveq pc, lr @ must not be called for CPU 0 99 reteq lr @ must not be called for CPU 0
100 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41 100 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
101 mov r12, #CPU_RESETTABLE 101 mov r12, #CPU_RESETTABLE
102 str r12, [r1] 102 str r12, [r1]
@@ -117,7 +117,7 @@ ENTRY(tegra20_cpu_shutdown)
117 cpu_id r3 117 cpu_id r3
118 cmp r3, r0 118 cmp r3, r0
119 beq . 119 beq .
120 mov pc, lr 120 ret lr
121ENDPROC(tegra20_cpu_shutdown) 121ENDPROC(tegra20_cpu_shutdown)
122#endif 122#endif
123 123
@@ -164,7 +164,7 @@ ENTRY(tegra_pen_lock)
164 cmpeq r12, r0 @ !turn == cpu? 164 cmpeq r12, r0 @ !turn == cpu?
165 beq 1b @ while !turn == cpu && flag[!cpu] == 1 165 beq 1b @ while !turn == cpu && flag[!cpu] == 1
166 166
167 mov pc, lr @ locked 167 ret lr @ locked
168ENDPROC(tegra_pen_lock) 168ENDPROC(tegra_pen_lock)
169 169
170ENTRY(tegra_pen_unlock) 170ENTRY(tegra_pen_unlock)
@@ -176,7 +176,7 @@ ENTRY(tegra_pen_unlock)
176 addne r2, r3, #PMC_SCRATCH39 176 addne r2, r3, #PMC_SCRATCH39
177 mov r12, #0 177 mov r12, #0
178 str r12, [r2] 178 str r12, [r2]
179 mov pc, lr 179 ret lr
180ENDPROC(tegra_pen_unlock) 180ENDPROC(tegra_pen_unlock)
181 181
182/* 182/*
@@ -189,7 +189,7 @@ ENTRY(tegra20_cpu_clear_resettable)
189 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41 189 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
190 mov r12, #CPU_NOT_RESETTABLE 190 mov r12, #CPU_NOT_RESETTABLE
191 str r12, [r1] 191 str r12, [r1]
192 mov pc, lr 192 ret lr
193ENDPROC(tegra20_cpu_clear_resettable) 193ENDPROC(tegra20_cpu_clear_resettable)
194 194
195/* 195/*
@@ -202,7 +202,7 @@ ENTRY(tegra20_cpu_set_resettable_soon)
202 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41 202 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
203 mov r12, #CPU_RESETTABLE_SOON 203 mov r12, #CPU_RESETTABLE_SOON
204 str r12, [r1] 204 str r12, [r1]
205 mov pc, lr 205 ret lr
206ENDPROC(tegra20_cpu_set_resettable_soon) 206ENDPROC(tegra20_cpu_set_resettable_soon)
207 207
208/* 208/*
@@ -217,7 +217,7 @@ ENTRY(tegra20_cpu_is_resettable_soon)
217 cmp r12, #CPU_RESETTABLE_SOON 217 cmp r12, #CPU_RESETTABLE_SOON
218 moveq r0, #1 218 moveq r0, #1
219 movne r0, #0 219 movne r0, #0
220 mov pc, lr 220 ret lr
221ENDPROC(tegra20_cpu_is_resettable_soon) 221ENDPROC(tegra20_cpu_is_resettable_soon)
222 222
223/* 223/*
@@ -239,7 +239,7 @@ ENTRY(tegra20_sleep_core_finish)
239 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA 239 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA
240 add r0, r0, r1 240 add r0, r0, r1
241 241
242 mov pc, r3 242 ret r3
243ENDPROC(tegra20_sleep_core_finish) 243ENDPROC(tegra20_sleep_core_finish)
244 244
245/* 245/*
@@ -402,7 +402,7 @@ exit_selfrefresh_loop:
402 402
403 mov32 r0, TEGRA_PMC_BASE 403 mov32 r0, TEGRA_PMC_BASE
404 ldr r0, [r0, #PMC_SCRATCH41] 404 ldr r0, [r0, #PMC_SCRATCH41]
405 mov pc, r0 @ jump to tegra_resume 405 ret r0 @ jump to tegra_resume
406ENDPROC(tegra20_lp1_reset) 406ENDPROC(tegra20_lp1_reset)
407 407
408/* 408/*
@@ -455,7 +455,7 @@ tegra20_switch_cpu_to_clk32k:
455 mov r0, #0 /* brust policy = 32KHz */ 455 mov r0, #0 /* brust policy = 32KHz */
456 str r0, [r5, #CLK_RESET_SCLK_BURST] 456 str r0, [r5, #CLK_RESET_SCLK_BURST]
457 457
458 mov pc, lr 458 ret lr
459 459
460/* 460/*
461 * tegra20_enter_sleep 461 * tegra20_enter_sleep
@@ -535,7 +535,7 @@ padsave_done:
535 adr r2, tegra20_sclk_save 535 adr r2, tegra20_sclk_save
536 str r0, [r2] 536 str r0, [r2]
537 dsb 537 dsb
538 mov pc, lr 538 ret lr
539 539
540tegra20_sdram_pad_address: 540tegra20_sdram_pad_address:
541 .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGCPADCTRL 541 .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGCPADCTRL
diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S
index b16d4a57fa59..09cad9b071de 100644
--- a/arch/arm/mach-tegra/sleep-tegra30.S
+++ b/arch/arm/mach-tegra/sleep-tegra30.S
@@ -142,7 +142,7 @@ ENTRY(tegra30_hotplug_shutdown)
142 /* Powergate this CPU */ 142 /* Powergate this CPU */
143 mov r0, #TEGRA30_POWER_HOTPLUG_SHUTDOWN 143 mov r0, #TEGRA30_POWER_HOTPLUG_SHUTDOWN
144 bl tegra30_cpu_shutdown 144 bl tegra30_cpu_shutdown
145 mov pc, lr @ should never get here 145 ret lr @ should never get here
146ENDPROC(tegra30_hotplug_shutdown) 146ENDPROC(tegra30_hotplug_shutdown)
147 147
148/* 148/*
@@ -161,7 +161,7 @@ ENTRY(tegra30_cpu_shutdown)
161 bne _no_cpu0_chk @ It's not Tegra30 161 bne _no_cpu0_chk @ It's not Tegra30
162 162
163 cmp r3, #0 163 cmp r3, #0
164 moveq pc, lr @ Must never be called for CPU 0 164 reteq lr @ Must never be called for CPU 0
165_no_cpu0_chk: 165_no_cpu0_chk:
166 166
167 ldr r12, =TEGRA_FLOW_CTRL_VIRT 167 ldr r12, =TEGRA_FLOW_CTRL_VIRT
@@ -266,7 +266,7 @@ ENTRY(tegra30_sleep_core_finish)
266 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA 266 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA
267 add r0, r0, r1 267 add r0, r0, r1
268 268
269 mov pc, r3 269 ret r3
270ENDPROC(tegra30_sleep_core_finish) 270ENDPROC(tegra30_sleep_core_finish)
271 271
272/* 272/*
@@ -285,7 +285,7 @@ ENTRY(tegra30_sleep_cpu_secondary_finish)
285 mov r0, #0 @ power mode flags (!hotplug) 285 mov r0, #0 @ power mode flags (!hotplug)
286 bl tegra30_cpu_shutdown 286 bl tegra30_cpu_shutdown
287 mov r0, #1 @ never return here 287 mov r0, #1 @ never return here
288 mov pc, r7 288 ret r7
289ENDPROC(tegra30_sleep_cpu_secondary_finish) 289ENDPROC(tegra30_sleep_cpu_secondary_finish)
290 290
291/* 291/*
@@ -529,7 +529,7 @@ __no_dual_emc_chanl:
529 529
530 mov32 r0, TEGRA_PMC_BASE 530 mov32 r0, TEGRA_PMC_BASE
531 ldr r0, [r0, #PMC_SCRATCH41] 531 ldr r0, [r0, #PMC_SCRATCH41]
532 mov pc, r0 @ jump to tegra_resume 532 ret r0 @ jump to tegra_resume
533ENDPROC(tegra30_lp1_reset) 533ENDPROC(tegra30_lp1_reset)
534 534
535 .align L1_CACHE_SHIFT 535 .align L1_CACHE_SHIFT
@@ -659,7 +659,7 @@ _no_pll_in_iddq:
659 mov r0, #0 /* brust policy = 32KHz */ 659 mov r0, #0 /* brust policy = 32KHz */
660 str r0, [r5, #CLK_RESET_SCLK_BURST] 660 str r0, [r5, #CLK_RESET_SCLK_BURST]
661 661
662 mov pc, lr 662 ret lr
663 663
664/* 664/*
665 * tegra30_enter_sleep 665 * tegra30_enter_sleep
@@ -819,7 +819,7 @@ pmc_io_dpd_skip:
819 819
820 dsb 820 dsb
821 821
822 mov pc, lr 822 ret lr
823 823
824 .ltorg 824 .ltorg
825/* dummy symbol for end of IRAM */ 825/* dummy symbol for end of IRAM */
diff --git a/arch/arm/mach-tegra/sleep.S b/arch/arm/mach-tegra/sleep.S
index 8d06213fbc47..f024a5109e8e 100644
--- a/arch/arm/mach-tegra/sleep.S
+++ b/arch/arm/mach-tegra/sleep.S
@@ -87,7 +87,7 @@ ENTRY(tegra_init_l2_for_a15)
87 mcrne p15, 0x1, r0, c9, c0, 2 87 mcrne p15, 0x1, r0, c9, c0, 2
88_exit_init_l2_a15: 88_exit_init_l2_a15:
89 89
90 mov pc, lr 90 ret lr
91ENDPROC(tegra_init_l2_for_a15) 91ENDPROC(tegra_init_l2_for_a15)
92 92
93/* 93/*
@@ -111,7 +111,7 @@ ENTRY(tegra_sleep_cpu_finish)
111 add r3, r3, r0 111 add r3, r3, r0
112 mov r0, r1 112 mov r0, r1
113 113
114 mov pc, r3 114 ret r3
115ENDPROC(tegra_sleep_cpu_finish) 115ENDPROC(tegra_sleep_cpu_finish)
116 116
117/* 117/*
@@ -139,7 +139,7 @@ ENTRY(tegra_shut_off_mmu)
139 moveq r3, #0 139 moveq r3, #0
140 streq r3, [r2, #L2X0_CTRL] 140 streq r3, [r2, #L2X0_CTRL]
141#endif 141#endif
142 mov pc, r0 142 ret r0
143ENDPROC(tegra_shut_off_mmu) 143ENDPROC(tegra_shut_off_mmu)
144 .popsection 144 .popsection
145 145
@@ -156,6 +156,6 @@ ENTRY(tegra_switch_cpu_to_pllp)
156 str r0, [r5, #CLK_RESET_CCLK_BURST] 156 str r0, [r5, #CLK_RESET_CCLK_BURST]
157 mov r0, #0 157 mov r0, #0
158 str r0, [r5, #CLK_RESET_CCLK_DIVIDER] 158 str r0, [r5, #CLK_RESET_CCLK_DIVIDER]
159 mov pc, lr 159 ret lr
160ENDPROC(tegra_switch_cpu_to_pllp) 160ENDPROC(tegra_switch_cpu_to_pllp)
161#endif 161#endif
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index e505befe51b5..2f0c58836ae7 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -15,6 +15,7 @@
15 */ 15 */
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <asm/assembler.h>
18#include <asm/memory.h> 19#include <asm/memory.h>
19#include <asm/page.h> 20#include <asm/page.h>
20 21
@@ -45,7 +46,7 @@
45ENTRY(fa_flush_icache_all) 46ENTRY(fa_flush_icache_all)
46 mov r0, #0 47 mov r0, #0
47 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 48 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
48 mov pc, lr 49 ret lr
49ENDPROC(fa_flush_icache_all) 50ENDPROC(fa_flush_icache_all)
50 51
51/* 52/*
@@ -71,7 +72,7 @@ __flush_whole_cache:
71 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 72 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
72 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 73 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
73 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 74 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
74 mov pc, lr 75 ret lr
75 76
76/* 77/*
77 * flush_user_cache_range(start, end, flags) 78 * flush_user_cache_range(start, end, flags)
@@ -99,7 +100,7 @@ ENTRY(fa_flush_user_cache_range)
99 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 100 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
100 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 101 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
101 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 102 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
102 mov pc, lr 103 ret lr
103 104
104/* 105/*
105 * coherent_kern_range(start, end) 106 * coherent_kern_range(start, end)
@@ -135,7 +136,7 @@ ENTRY(fa_coherent_user_range)
135 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 136 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
136 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 137 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
137 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 138 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
138 mov pc, lr 139 ret lr
139 140
140/* 141/*
141 * flush_kern_dcache_area(void *addr, size_t size) 142 * flush_kern_dcache_area(void *addr, size_t size)
@@ -155,7 +156,7 @@ ENTRY(fa_flush_kern_dcache_area)
155 mov r0, #0 156 mov r0, #0
156 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 157 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
157 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 158 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
158 mov pc, lr 159 ret lr
159 160
160/* 161/*
161 * dma_inv_range(start, end) 162 * dma_inv_range(start, end)
@@ -181,7 +182,7 @@ fa_dma_inv_range:
181 blo 1b 182 blo 1b
182 mov r0, #0 183 mov r0, #0
183 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 184 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
184 mov pc, lr 185 ret lr
185 186
186/* 187/*
187 * dma_clean_range(start, end) 188 * dma_clean_range(start, end)
@@ -199,7 +200,7 @@ fa_dma_clean_range:
199 blo 1b 200 blo 1b
200 mov r0, #0 201 mov r0, #0
201 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 202 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
202 mov pc, lr 203 ret lr
203 204
204/* 205/*
205 * dma_flush_range(start,end) 206 * dma_flush_range(start,end)
@@ -214,7 +215,7 @@ ENTRY(fa_dma_flush_range)
214 blo 1b 215 blo 1b
215 mov r0, #0 216 mov r0, #0
216 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 217 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
217 mov pc, lr 218 ret lr
218 219
219/* 220/*
220 * dma_map_area(start, size, dir) 221 * dma_map_area(start, size, dir)
@@ -237,7 +238,7 @@ ENDPROC(fa_dma_map_area)
237 * - dir - DMA direction 238 * - dir - DMA direction
238 */ 239 */
239ENTRY(fa_dma_unmap_area) 240ENTRY(fa_dma_unmap_area)
240 mov pc, lr 241 ret lr
241ENDPROC(fa_dma_unmap_area) 242ENDPROC(fa_dma_unmap_area)
242 243
243 .globl fa_flush_kern_cache_louis 244 .globl fa_flush_kern_cache_louis
diff --git a/arch/arm/mm/cache-nop.S b/arch/arm/mm/cache-nop.S
index 8e12ddca0031..f1cc9861031f 100644
--- a/arch/arm/mm/cache-nop.S
+++ b/arch/arm/mm/cache-nop.S
@@ -5,11 +5,12 @@
5 */ 5 */
6#include <linux/linkage.h> 6#include <linux/linkage.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <asm/assembler.h>
8 9
9#include "proc-macros.S" 10#include "proc-macros.S"
10 11
11ENTRY(nop_flush_icache_all) 12ENTRY(nop_flush_icache_all)
12 mov pc, lr 13 ret lr
13ENDPROC(nop_flush_icache_all) 14ENDPROC(nop_flush_icache_all)
14 15
15 .globl nop_flush_kern_cache_all 16 .globl nop_flush_kern_cache_all
@@ -29,7 +30,7 @@ ENDPROC(nop_flush_icache_all)
29 30
30ENTRY(nop_coherent_user_range) 31ENTRY(nop_coherent_user_range)
31 mov r0, 0 32 mov r0, 0
32 mov pc, lr 33 ret lr
33ENDPROC(nop_coherent_user_range) 34ENDPROC(nop_coherent_user_range)
34 35
35 .globl nop_flush_kern_dcache_area 36 .globl nop_flush_kern_dcache_area
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index a7ba68f59f0c..91e3adf155cb 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <asm/assembler.h>
12#include <asm/page.h> 13#include <asm/page.h>
13#include "proc-macros.S" 14#include "proc-macros.S"
14 15
@@ -18,7 +19,7 @@
18 * Unconditionally clean and invalidate the entire icache. 19 * Unconditionally clean and invalidate the entire icache.
19 */ 20 */
20ENTRY(v4_flush_icache_all) 21ENTRY(v4_flush_icache_all)
21 mov pc, lr 22 ret lr
22ENDPROC(v4_flush_icache_all) 23ENDPROC(v4_flush_icache_all)
23 24
24/* 25/*
@@ -40,7 +41,7 @@ ENTRY(v4_flush_kern_cache_all)
40#ifdef CONFIG_CPU_CP15 41#ifdef CONFIG_CPU_CP15
41 mov r0, #0 42 mov r0, #0
42 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 43 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
43 mov pc, lr 44 ret lr
44#else 45#else
45 /* FALLTHROUGH */ 46 /* FALLTHROUGH */
46#endif 47#endif
@@ -59,7 +60,7 @@ ENTRY(v4_flush_user_cache_range)
59#ifdef CONFIG_CPU_CP15 60#ifdef CONFIG_CPU_CP15
60 mov ip, #0 61 mov ip, #0
61 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache 62 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
62 mov pc, lr 63 ret lr
63#else 64#else
64 /* FALLTHROUGH */ 65 /* FALLTHROUGH */
65#endif 66#endif
@@ -89,7 +90,7 @@ ENTRY(v4_coherent_kern_range)
89 */ 90 */
90ENTRY(v4_coherent_user_range) 91ENTRY(v4_coherent_user_range)
91 mov r0, #0 92 mov r0, #0
92 mov pc, lr 93 ret lr
93 94
94/* 95/*
95 * flush_kern_dcache_area(void *addr, size_t size) 96 * flush_kern_dcache_area(void *addr, size_t size)
@@ -116,7 +117,7 @@ ENTRY(v4_dma_flush_range)
116 mov r0, #0 117 mov r0, #0
117 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 118 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
118#endif 119#endif
119 mov pc, lr 120 ret lr
120 121
121/* 122/*
122 * dma_unmap_area(start, size, dir) 123 * dma_unmap_area(start, size, dir)
@@ -136,7 +137,7 @@ ENTRY(v4_dma_unmap_area)
136 * - dir - DMA direction 137 * - dir - DMA direction
137 */ 138 */
138ENTRY(v4_dma_map_area) 139ENTRY(v4_dma_map_area)
139 mov pc, lr 140 ret lr
140ENDPROC(v4_dma_unmap_area) 141ENDPROC(v4_dma_unmap_area)
141ENDPROC(v4_dma_map_area) 142ENDPROC(v4_dma_map_area)
142 143
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index cd4945321407..2522f8c8fbb1 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <asm/assembler.h>
12#include <asm/memory.h> 13#include <asm/memory.h>
13#include <asm/page.h> 14#include <asm/page.h>
14#include "proc-macros.S" 15#include "proc-macros.S"
@@ -58,7 +59,7 @@ flush_base:
58ENTRY(v4wb_flush_icache_all) 59ENTRY(v4wb_flush_icache_all)
59 mov r0, #0 60 mov r0, #0
60 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 61 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
61 mov pc, lr 62 ret lr
62ENDPROC(v4wb_flush_icache_all) 63ENDPROC(v4wb_flush_icache_all)
63 64
64/* 65/*
@@ -94,7 +95,7 @@ __flush_whole_cache:
94 blo 1b 95 blo 1b
95#endif 96#endif
96 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer 97 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
97 mov pc, lr 98 ret lr
98 99
99/* 100/*
100 * flush_user_cache_range(start, end, flags) 101 * flush_user_cache_range(start, end, flags)
@@ -122,7 +123,7 @@ ENTRY(v4wb_flush_user_cache_range)
122 blo 1b 123 blo 1b
123 tst r2, #VM_EXEC 124 tst r2, #VM_EXEC
124 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 125 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
125 mov pc, lr 126 ret lr
126 127
127/* 128/*
128 * flush_kern_dcache_area(void *addr, size_t size) 129 * flush_kern_dcache_area(void *addr, size_t size)
@@ -170,7 +171,7 @@ ENTRY(v4wb_coherent_user_range)
170 mov r0, #0 171 mov r0, #0
171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 172 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
172 mcr p15, 0, r0, c7, c10, 4 @ drain WB 173 mcr p15, 0, r0, c7, c10, 4 @ drain WB
173 mov pc, lr 174 ret lr
174 175
175 176
176/* 177/*
@@ -195,7 +196,7 @@ v4wb_dma_inv_range:
195 cmp r0, r1 196 cmp r0, r1
196 blo 1b 197 blo 1b
197 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 198 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
198 mov pc, lr 199 ret lr
199 200
200/* 201/*
201 * dma_clean_range(start, end) 202 * dma_clean_range(start, end)
@@ -212,7 +213,7 @@ v4wb_dma_clean_range:
212 cmp r0, r1 213 cmp r0, r1
213 blo 1b 214 blo 1b
214 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 215 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
215 mov pc, lr 216 ret lr
216 217
217/* 218/*
218 * dma_flush_range(start, end) 219 * dma_flush_range(start, end)
@@ -248,7 +249,7 @@ ENDPROC(v4wb_dma_map_area)
248 * - dir - DMA direction 249 * - dir - DMA direction
249 */ 250 */
250ENTRY(v4wb_dma_unmap_area) 251ENTRY(v4wb_dma_unmap_area)
251 mov pc, lr 252 ret lr
252ENDPROC(v4wb_dma_unmap_area) 253ENDPROC(v4wb_dma_unmap_area)
253 254
254 .globl v4wb_flush_kern_cache_louis 255 .globl v4wb_flush_kern_cache_louis
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 11e5e5838bc5..a0982ce49007 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/linkage.h> 14#include <linux/linkage.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <asm/assembler.h>
16#include <asm/page.h> 17#include <asm/page.h>
17#include "proc-macros.S" 18#include "proc-macros.S"
18 19
@@ -48,7 +49,7 @@
48ENTRY(v4wt_flush_icache_all) 49ENTRY(v4wt_flush_icache_all)
49 mov r0, #0 50 mov r0, #0
50 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 51 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
51 mov pc, lr 52 ret lr
52ENDPROC(v4wt_flush_icache_all) 53ENDPROC(v4wt_flush_icache_all)
53 54
54/* 55/*
@@ -71,7 +72,7 @@ __flush_whole_cache:
71 tst r2, #VM_EXEC 72 tst r2, #VM_EXEC
72 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 73 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
73 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 74 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
74 mov pc, lr 75 ret lr
75 76
76/* 77/*
77 * flush_user_cache_range(start, end, flags) 78 * flush_user_cache_range(start, end, flags)
@@ -94,7 +95,7 @@ ENTRY(v4wt_flush_user_cache_range)
94 add r0, r0, #CACHE_DLINESIZE 95 add r0, r0, #CACHE_DLINESIZE
95 cmp r0, r1 96 cmp r0, r1
96 blo 1b 97 blo 1b
97 mov pc, lr 98 ret lr
98 99
99/* 100/*
100 * coherent_kern_range(start, end) 101 * coherent_kern_range(start, end)
@@ -126,7 +127,7 @@ ENTRY(v4wt_coherent_user_range)
126 cmp r0, r1 127 cmp r0, r1
127 blo 1b 128 blo 1b
128 mov r0, #0 129 mov r0, #0
129 mov pc, lr 130 ret lr
130 131
131/* 132/*
132 * flush_kern_dcache_area(void *addr, size_t size) 133 * flush_kern_dcache_area(void *addr, size_t size)
@@ -160,7 +161,7 @@ v4wt_dma_inv_range:
160 add r0, r0, #CACHE_DLINESIZE 161 add r0, r0, #CACHE_DLINESIZE
161 cmp r0, r1 162 cmp r0, r1
162 blo 1b 163 blo 1b
163 mov pc, lr 164 ret lr
164 165
165/* 166/*
166 * dma_flush_range(start, end) 167 * dma_flush_range(start, end)
@@ -192,7 +193,7 @@ ENTRY(v4wt_dma_unmap_area)
192 * - dir - DMA direction 193 * - dir - DMA direction
193 */ 194 */
194ENTRY(v4wt_dma_map_area) 195ENTRY(v4wt_dma_map_area)
195 mov pc, lr 196 ret lr
196ENDPROC(v4wt_dma_unmap_area) 197ENDPROC(v4wt_dma_unmap_area)
197ENDPROC(v4wt_dma_map_area) 198ENDPROC(v4wt_dma_map_area)
198 199
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index d8fd4d4bd3d4..24659952c278 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -51,7 +51,7 @@ ENTRY(v6_flush_icache_all)
51#else 51#else
52 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache 52 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
53#endif 53#endif
54 mov pc, lr 54 ret lr
55ENDPROC(v6_flush_icache_all) 55ENDPROC(v6_flush_icache_all)
56 56
57/* 57/*
@@ -73,7 +73,7 @@ ENTRY(v6_flush_kern_cache_all)
73#else 73#else
74 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 74 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
75#endif 75#endif
76 mov pc, lr 76 ret lr
77 77
78/* 78/*
79 * v6_flush_cache_all() 79 * v6_flush_cache_all()
@@ -98,7 +98,7 @@ ENTRY(v6_flush_user_cache_all)
98 * - we have a VIPT cache. 98 * - we have a VIPT cache.
99 */ 99 */
100ENTRY(v6_flush_user_cache_range) 100ENTRY(v6_flush_user_cache_range)
101 mov pc, lr 101 ret lr
102 102
103/* 103/*
104 * v6_coherent_kern_range(start,end) 104 * v6_coherent_kern_range(start,end)
@@ -150,7 +150,7 @@ ENTRY(v6_coherent_user_range)
150#else 150#else
151 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 151 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
152#endif 152#endif
153 mov pc, lr 153 ret lr
154 154
155/* 155/*
156 * Fault handling for the cache operation above. If the virtual address in r0 156 * Fault handling for the cache operation above. If the virtual address in r0
@@ -158,7 +158,7 @@ ENTRY(v6_coherent_user_range)
158 */ 158 */
1599001: 1599001:
160 mov r0, #-EFAULT 160 mov r0, #-EFAULT
161 mov pc, lr 161 ret lr
162 UNWIND(.fnend ) 162 UNWIND(.fnend )
163ENDPROC(v6_coherent_user_range) 163ENDPROC(v6_coherent_user_range)
164ENDPROC(v6_coherent_kern_range) 164ENDPROC(v6_coherent_kern_range)
@@ -188,7 +188,7 @@ ENTRY(v6_flush_kern_dcache_area)
188 mov r0, #0 188 mov r0, #0
189 mcr p15, 0, r0, c7, c10, 4 189 mcr p15, 0, r0, c7, c10, 4
190#endif 190#endif
191 mov pc, lr 191 ret lr
192 192
193 193
194/* 194/*
@@ -239,7 +239,7 @@ v6_dma_inv_range:
239 blo 1b 239 blo 1b
240 mov r0, #0 240 mov r0, #0
241 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 241 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
242 mov pc, lr 242 ret lr
243 243
244/* 244/*
245 * v6_dma_clean_range(start,end) 245 * v6_dma_clean_range(start,end)
@@ -262,7 +262,7 @@ v6_dma_clean_range:
262 blo 1b 262 blo 1b
263 mov r0, #0 263 mov r0, #0
264 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 264 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
265 mov pc, lr 265 ret lr
266 266
267/* 267/*
268 * v6_dma_flush_range(start,end) 268 * v6_dma_flush_range(start,end)
@@ -290,7 +290,7 @@ ENTRY(v6_dma_flush_range)
290 blo 1b 290 blo 1b
291 mov r0, #0 291 mov r0, #0
292 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 292 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
293 mov pc, lr 293 ret lr
294 294
295/* 295/*
296 * dma_map_area(start, size, dir) 296 * dma_map_area(start, size, dir)
@@ -323,7 +323,7 @@ ENTRY(v6_dma_unmap_area)
323 teq r2, #DMA_TO_DEVICE 323 teq r2, #DMA_TO_DEVICE
324 bne v6_dma_inv_range 324 bne v6_dma_inv_range
325#endif 325#endif
326 mov pc, lr 326 ret lr
327ENDPROC(v6_dma_unmap_area) 327ENDPROC(v6_dma_unmap_area)
328 328
329 .globl v6_flush_kern_cache_louis 329 .globl v6_flush_kern_cache_louis
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 615c99e38ba1..b966656d2c2d 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -61,7 +61,7 @@ ENTRY(v7_invalidate_l1)
61 bgt 1b 61 bgt 1b
62 dsb st 62 dsb st
63 isb 63 isb
64 mov pc, lr 64 ret lr
65ENDPROC(v7_invalidate_l1) 65ENDPROC(v7_invalidate_l1)
66 66
67/* 67/*
@@ -76,7 +76,7 @@ ENTRY(v7_flush_icache_all)
76 mov r0, #0 76 mov r0, #0
77 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable 77 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
78 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 78 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
79 mov pc, lr 79 ret lr
80ENDPROC(v7_flush_icache_all) 80ENDPROC(v7_flush_icache_all)
81 81
82 /* 82 /*
@@ -94,7 +94,7 @@ ENTRY(v7_flush_dcache_louis)
94 ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr 94 ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr
95#ifdef CONFIG_ARM_ERRATA_643719 95#ifdef CONFIG_ARM_ERRATA_643719
96 ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register 96 ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register
97 ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do 97 ALT_UP(reteq lr) @ LoUU is zero, so nothing to do
98 ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? 98 ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p?
99 biceq r2, r2, #0x0000000f @ clear minor revision number 99 biceq r2, r2, #0x0000000f @ clear minor revision number
100 teqeq r2, r1 @ test for errata affected core and if so... 100 teqeq r2, r1 @ test for errata affected core and if so...
@@ -102,7 +102,7 @@ ENTRY(v7_flush_dcache_louis)
102#endif 102#endif
103 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 103 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2
104 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 104 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
105 moveq pc, lr @ return if level == 0 105 reteq lr @ return if level == 0
106 mov r10, #0 @ r10 (starting level) = 0 106 mov r10, #0 @ r10 (starting level) = 0
107 b flush_levels @ start flushing cache levels 107 b flush_levels @ start flushing cache levels
108ENDPROC(v7_flush_dcache_louis) 108ENDPROC(v7_flush_dcache_louis)
@@ -168,7 +168,7 @@ finished:
168 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 168 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
169 dsb st 169 dsb st
170 isb 170 isb
171 mov pc, lr 171 ret lr
172ENDPROC(v7_flush_dcache_all) 172ENDPROC(v7_flush_dcache_all)
173 173
174/* 174/*
@@ -191,7 +191,7 @@ ENTRY(v7_flush_kern_cache_all)
191 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 191 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
192 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) 192 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
193 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) 193 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
194 mov pc, lr 194 ret lr
195ENDPROC(v7_flush_kern_cache_all) 195ENDPROC(v7_flush_kern_cache_all)
196 196
197 /* 197 /*
@@ -209,7 +209,7 @@ ENTRY(v7_flush_kern_cache_louis)
209 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 209 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
210 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) 210 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
211 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) 211 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
212 mov pc, lr 212 ret lr
213ENDPROC(v7_flush_kern_cache_louis) 213ENDPROC(v7_flush_kern_cache_louis)
214 214
215/* 215/*
@@ -235,7 +235,7 @@ ENTRY(v7_flush_user_cache_all)
235 * - we have a VIPT cache. 235 * - we have a VIPT cache.
236 */ 236 */
237ENTRY(v7_flush_user_cache_range) 237ENTRY(v7_flush_user_cache_range)
238 mov pc, lr 238 ret lr
239ENDPROC(v7_flush_user_cache_all) 239ENDPROC(v7_flush_user_cache_all)
240ENDPROC(v7_flush_user_cache_range) 240ENDPROC(v7_flush_user_cache_range)
241 241
@@ -296,7 +296,7 @@ ENTRY(v7_coherent_user_range)
296 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB 296 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
297 dsb ishst 297 dsb ishst
298 isb 298 isb
299 mov pc, lr 299 ret lr
300 300
301/* 301/*
302 * Fault handling for the cache operation above. If the virtual address in r0 302 * Fault handling for the cache operation above. If the virtual address in r0
@@ -307,7 +307,7 @@ ENTRY(v7_coherent_user_range)
307 dsb 307 dsb
308#endif 308#endif
309 mov r0, #-EFAULT 309 mov r0, #-EFAULT
310 mov pc, lr 310 ret lr
311 UNWIND(.fnend ) 311 UNWIND(.fnend )
312ENDPROC(v7_coherent_kern_range) 312ENDPROC(v7_coherent_kern_range)
313ENDPROC(v7_coherent_user_range) 313ENDPROC(v7_coherent_user_range)
@@ -336,7 +336,7 @@ ENTRY(v7_flush_kern_dcache_area)
336 cmp r0, r1 336 cmp r0, r1
337 blo 1b 337 blo 1b
338 dsb st 338 dsb st
339 mov pc, lr 339 ret lr
340ENDPROC(v7_flush_kern_dcache_area) 340ENDPROC(v7_flush_kern_dcache_area)
341 341
342/* 342/*
@@ -369,7 +369,7 @@ v7_dma_inv_range:
369 cmp r0, r1 369 cmp r0, r1
370 blo 1b 370 blo 1b
371 dsb st 371 dsb st
372 mov pc, lr 372 ret lr
373ENDPROC(v7_dma_inv_range) 373ENDPROC(v7_dma_inv_range)
374 374
375/* 375/*
@@ -391,7 +391,7 @@ v7_dma_clean_range:
391 cmp r0, r1 391 cmp r0, r1
392 blo 1b 392 blo 1b
393 dsb st 393 dsb st
394 mov pc, lr 394 ret lr
395ENDPROC(v7_dma_clean_range) 395ENDPROC(v7_dma_clean_range)
396 396
397/* 397/*
@@ -413,7 +413,7 @@ ENTRY(v7_dma_flush_range)
413 cmp r0, r1 413 cmp r0, r1
414 blo 1b 414 blo 1b
415 dsb st 415 dsb st
416 mov pc, lr 416 ret lr
417ENDPROC(v7_dma_flush_range) 417ENDPROC(v7_dma_flush_range)
418 418
419/* 419/*
@@ -439,7 +439,7 @@ ENTRY(v7_dma_unmap_area)
439 add r1, r1, r0 439 add r1, r1, r0
440 teq r2, #DMA_TO_DEVICE 440 teq r2, #DMA_TO_DEVICE
441 bne v7_dma_inv_range 441 bne v7_dma_inv_range
442 mov pc, lr 442 ret lr
443ENDPROC(v7_dma_unmap_area) 443ENDPROC(v7_dma_unmap_area)
444 444
445 __INITDATA 445 __INITDATA
diff --git a/arch/arm/mm/l2c-l2x0-resume.S b/arch/arm/mm/l2c-l2x0-resume.S
index 99b05f21a59a..fda415e4ca8f 100644
--- a/arch/arm/mm/l2c-l2x0-resume.S
+++ b/arch/arm/mm/l2c-l2x0-resume.S
@@ -6,6 +6,7 @@
6 * This code can only be used to if you are running in the secure world. 6 * This code can only be used to if you are running in the secure world.
7 */ 7 */
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/assembler.h>
9#include <asm/hardware/cache-l2x0.h> 10#include <asm/hardware/cache-l2x0.h>
10 11
11 .text 12 .text
@@ -27,7 +28,7 @@ ENTRY(l2c310_early_resume)
27 28
28 @ Check that the address has been initialised 29 @ Check that the address has been initialised
29 teq r1, #0 30 teq r1, #0
30 moveq pc, lr 31 reteq lr
31 32
32 @ The prefetch and power control registers are revision dependent 33 @ The prefetch and power control registers are revision dependent
33 @ and can be written whether or not the L2 cache is enabled 34 @ and can be written whether or not the L2 cache is enabled
@@ -41,7 +42,7 @@ ENTRY(l2c310_early_resume)
41 @ Don't setup the L2 cache if it is already enabled 42 @ Don't setup the L2 cache if it is already enabled
42 ldr r0, [r1, #L2X0_CTRL] 43 ldr r0, [r1, #L2X0_CTRL]
43 tst r0, #L2X0_CTRL_EN 44 tst r0, #L2X0_CTRL_EN
44 movne pc, lr 45 retne lr
45 46
46 str r3, [r1, #L310_TAG_LATENCY_CTRL] 47 str r3, [r1, #L310_TAG_LATENCY_CTRL]
47 str r4, [r1, #L310_DATA_LATENCY_CTRL] 48 str r4, [r1, #L310_DATA_LATENCY_CTRL]
@@ -51,7 +52,7 @@ ENTRY(l2c310_early_resume)
51 str r2, [r1, #L2X0_AUX_CTRL] 52 str r2, [r1, #L2X0_AUX_CTRL]
52 mov r9, #L2X0_CTRL_EN 53 mov r9, #L2X0_CTRL_EN
53 str r9, [r1, #L2X0_CTRL] 54 str r9, [r1, #L2X0_CTRL]
54 mov pc, lr 55 ret lr
55ENDPROC(l2c310_early_resume) 56ENDPROC(l2c310_early_resume)
56 57
57 .align 58 .align
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index d1a2d05971e0..86ee5d47ce3c 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -73,7 +73,7 @@
73 * cpu_arm1020_proc_init() 73 * cpu_arm1020_proc_init()
74 */ 74 */
75ENTRY(cpu_arm1020_proc_init) 75ENTRY(cpu_arm1020_proc_init)
76 mov pc, lr 76 ret lr
77 77
78/* 78/*
79 * cpu_arm1020_proc_fin() 79 * cpu_arm1020_proc_fin()
@@ -83,7 +83,7 @@ ENTRY(cpu_arm1020_proc_fin)
83 bic r0, r0, #0x1000 @ ...i............ 83 bic r0, r0, #0x1000 @ ...i............
84 bic r0, r0, #0x000e @ ............wca. 84 bic r0, r0, #0x000e @ ............wca.
85 mcr p15, 0, r0, c1, c0, 0 @ disable caches 85 mcr p15, 0, r0, c1, c0, 0 @ disable caches
86 mov pc, lr 86 ret lr
87 87
88/* 88/*
89 * cpu_arm1020_reset(loc) 89 * cpu_arm1020_reset(loc)
@@ -107,7 +107,7 @@ ENTRY(cpu_arm1020_reset)
107 bic ip, ip, #0x000f @ ............wcam 107 bic ip, ip, #0x000f @ ............wcam
108 bic ip, ip, #0x1100 @ ...i...s........ 108 bic ip, ip, #0x1100 @ ...i...s........
109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
110 mov pc, r0 110 ret r0
111ENDPROC(cpu_arm1020_reset) 111ENDPROC(cpu_arm1020_reset)
112 .popsection 112 .popsection
113 113
@@ -117,7 +117,7 @@ ENDPROC(cpu_arm1020_reset)
117 .align 5 117 .align 5
118ENTRY(cpu_arm1020_do_idle) 118ENTRY(cpu_arm1020_do_idle)
119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
120 mov pc, lr 120 ret lr
121 121
122/* ================================= CACHE ================================ */ 122/* ================================= CACHE ================================ */
123 123
@@ -133,7 +133,7 @@ ENTRY(arm1020_flush_icache_all)
133 mov r0, #0 133 mov r0, #0
134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
135#endif 135#endif
136 mov pc, lr 136 ret lr
137ENDPROC(arm1020_flush_icache_all) 137ENDPROC(arm1020_flush_icache_all)
138 138
139/* 139/*
@@ -169,7 +169,7 @@ __flush_whole_cache:
169 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 169 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
170#endif 170#endif
171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
172 mov pc, lr 172 ret lr
173 173
174/* 174/*
175 * flush_user_cache_range(start, end, flags) 175 * flush_user_cache_range(start, end, flags)
@@ -200,7 +200,7 @@ ENTRY(arm1020_flush_user_cache_range)
200 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 200 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
201#endif 201#endif
202 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 202 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
203 mov pc, lr 203 ret lr
204 204
205/* 205/*
206 * coherent_kern_range(start, end) 206 * coherent_kern_range(start, end)
@@ -242,7 +242,7 @@ ENTRY(arm1020_coherent_user_range)
242 blo 1b 242 blo 1b
243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB
244 mov r0, #0 244 mov r0, #0
245 mov pc, lr 245 ret lr
246 246
247/* 247/*
248 * flush_kern_dcache_area(void *addr, size_t size) 248 * flush_kern_dcache_area(void *addr, size_t size)
@@ -264,7 +264,7 @@ ENTRY(arm1020_flush_kern_dcache_area)
264 blo 1b 264 blo 1b
265#endif 265#endif
266 mcr p15, 0, ip, c7, c10, 4 @ drain WB 266 mcr p15, 0, ip, c7, c10, 4 @ drain WB
267 mov pc, lr 267 ret lr
268 268
269/* 269/*
270 * dma_inv_range(start, end) 270 * dma_inv_range(start, end)
@@ -297,7 +297,7 @@ arm1020_dma_inv_range:
297 blo 1b 297 blo 1b
298#endif 298#endif
299 mcr p15, 0, ip, c7, c10, 4 @ drain WB 299 mcr p15, 0, ip, c7, c10, 4 @ drain WB
300 mov pc, lr 300 ret lr
301 301
302/* 302/*
303 * dma_clean_range(start, end) 303 * dma_clean_range(start, end)
@@ -320,7 +320,7 @@ arm1020_dma_clean_range:
320 blo 1b 320 blo 1b
321#endif 321#endif
322 mcr p15, 0, ip, c7, c10, 4 @ drain WB 322 mcr p15, 0, ip, c7, c10, 4 @ drain WB
323 mov pc, lr 323 ret lr
324 324
325/* 325/*
326 * dma_flush_range(start, end) 326 * dma_flush_range(start, end)
@@ -342,7 +342,7 @@ ENTRY(arm1020_dma_flush_range)
342 blo 1b 342 blo 1b
343#endif 343#endif
344 mcr p15, 0, ip, c7, c10, 4 @ drain WB 344 mcr p15, 0, ip, c7, c10, 4 @ drain WB
345 mov pc, lr 345 ret lr
346 346
347/* 347/*
348 * dma_map_area(start, size, dir) 348 * dma_map_area(start, size, dir)
@@ -365,7 +365,7 @@ ENDPROC(arm1020_dma_map_area)
365 * - dir - DMA direction 365 * - dir - DMA direction
366 */ 366 */
367ENTRY(arm1020_dma_unmap_area) 367ENTRY(arm1020_dma_unmap_area)
368 mov pc, lr 368 ret lr
369ENDPROC(arm1020_dma_unmap_area) 369ENDPROC(arm1020_dma_unmap_area)
370 370
371 .globl arm1020_flush_kern_cache_louis 371 .globl arm1020_flush_kern_cache_louis
@@ -384,7 +384,7 @@ ENTRY(cpu_arm1020_dcache_clean_area)
384 subs r1, r1, #CACHE_DLINESIZE 384 subs r1, r1, #CACHE_DLINESIZE
385 bhi 1b 385 bhi 1b
386#endif 386#endif
387 mov pc, lr 387 ret lr
388 388
389/* =============================== PageTable ============================== */ 389/* =============================== PageTable ============================== */
390 390
@@ -423,7 +423,7 @@ ENTRY(cpu_arm1020_switch_mm)
423 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 423 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
424 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 424 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
425#endif /* CONFIG_MMU */ 425#endif /* CONFIG_MMU */
426 mov pc, lr 426 ret lr
427 427
428/* 428/*
429 * cpu_arm1020_set_pte(ptep, pte) 429 * cpu_arm1020_set_pte(ptep, pte)
@@ -441,7 +441,7 @@ ENTRY(cpu_arm1020_set_pte_ext)
441#endif 441#endif
442 mcr p15, 0, r0, c7, c10, 4 @ drain WB 442 mcr p15, 0, r0, c7, c10, 4 @ drain WB
443#endif /* CONFIG_MMU */ 443#endif /* CONFIG_MMU */
444 mov pc, lr 444 ret lr
445 445
446 .type __arm1020_setup, #function 446 .type __arm1020_setup, #function
447__arm1020_setup: 447__arm1020_setup:
@@ -460,7 +460,7 @@ __arm1020_setup:
460#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 460#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
461 orr r0, r0, #0x4000 @ .R.. .... .... .... 461 orr r0, r0, #0x4000 @ .R.. .... .... ....
462#endif 462#endif
463 mov pc, lr 463 ret lr
464 .size __arm1020_setup, . - __arm1020_setup 464 .size __arm1020_setup, . - __arm1020_setup
465 465
466 /* 466 /*
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 9d89405c3d03..a6331d78601f 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -73,7 +73,7 @@
73 * cpu_arm1020e_proc_init() 73 * cpu_arm1020e_proc_init()
74 */ 74 */
75ENTRY(cpu_arm1020e_proc_init) 75ENTRY(cpu_arm1020e_proc_init)
76 mov pc, lr 76 ret lr
77 77
78/* 78/*
79 * cpu_arm1020e_proc_fin() 79 * cpu_arm1020e_proc_fin()
@@ -83,7 +83,7 @@ ENTRY(cpu_arm1020e_proc_fin)
83 bic r0, r0, #0x1000 @ ...i............ 83 bic r0, r0, #0x1000 @ ...i............
84 bic r0, r0, #0x000e @ ............wca. 84 bic r0, r0, #0x000e @ ............wca.
85 mcr p15, 0, r0, c1, c0, 0 @ disable caches 85 mcr p15, 0, r0, c1, c0, 0 @ disable caches
86 mov pc, lr 86 ret lr
87 87
88/* 88/*
89 * cpu_arm1020e_reset(loc) 89 * cpu_arm1020e_reset(loc)
@@ -107,7 +107,7 @@ ENTRY(cpu_arm1020e_reset)
107 bic ip, ip, #0x000f @ ............wcam 107 bic ip, ip, #0x000f @ ............wcam
108 bic ip, ip, #0x1100 @ ...i...s........ 108 bic ip, ip, #0x1100 @ ...i...s........
109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
110 mov pc, r0 110 ret r0
111ENDPROC(cpu_arm1020e_reset) 111ENDPROC(cpu_arm1020e_reset)
112 .popsection 112 .popsection
113 113
@@ -117,7 +117,7 @@ ENDPROC(cpu_arm1020e_reset)
117 .align 5 117 .align 5
118ENTRY(cpu_arm1020e_do_idle) 118ENTRY(cpu_arm1020e_do_idle)
119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
120 mov pc, lr 120 ret lr
121 121
122/* ================================= CACHE ================================ */ 122/* ================================= CACHE ================================ */
123 123
@@ -133,7 +133,7 @@ ENTRY(arm1020e_flush_icache_all)
133 mov r0, #0 133 mov r0, #0
134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
135#endif 135#endif
136 mov pc, lr 136 ret lr
137ENDPROC(arm1020e_flush_icache_all) 137ENDPROC(arm1020e_flush_icache_all)
138 138
139/* 139/*
@@ -168,7 +168,7 @@ __flush_whole_cache:
168 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 168 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
169#endif 169#endif
170 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 170 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
171 mov pc, lr 171 ret lr
172 172
173/* 173/*
174 * flush_user_cache_range(start, end, flags) 174 * flush_user_cache_range(start, end, flags)
@@ -197,7 +197,7 @@ ENTRY(arm1020e_flush_user_cache_range)
197 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 197 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
198#endif 198#endif
199 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 199 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
200 mov pc, lr 200 ret lr
201 201
202/* 202/*
203 * coherent_kern_range(start, end) 203 * coherent_kern_range(start, end)
@@ -236,7 +236,7 @@ ENTRY(arm1020e_coherent_user_range)
236 blo 1b 236 blo 1b
237 mcr p15, 0, ip, c7, c10, 4 @ drain WB 237 mcr p15, 0, ip, c7, c10, 4 @ drain WB
238 mov r0, #0 238 mov r0, #0
239 mov pc, lr 239 ret lr
240 240
241/* 241/*
242 * flush_kern_dcache_area(void *addr, size_t size) 242 * flush_kern_dcache_area(void *addr, size_t size)
@@ -257,7 +257,7 @@ ENTRY(arm1020e_flush_kern_dcache_area)
257 blo 1b 257 blo 1b
258#endif 258#endif
259 mcr p15, 0, ip, c7, c10, 4 @ drain WB 259 mcr p15, 0, ip, c7, c10, 4 @ drain WB
260 mov pc, lr 260 ret lr
261 261
262/* 262/*
263 * dma_inv_range(start, end) 263 * dma_inv_range(start, end)
@@ -286,7 +286,7 @@ arm1020e_dma_inv_range:
286 blo 1b 286 blo 1b
287#endif 287#endif
288 mcr p15, 0, ip, c7, c10, 4 @ drain WB 288 mcr p15, 0, ip, c7, c10, 4 @ drain WB
289 mov pc, lr 289 ret lr
290 290
291/* 291/*
292 * dma_clean_range(start, end) 292 * dma_clean_range(start, end)
@@ -308,7 +308,7 @@ arm1020e_dma_clean_range:
308 blo 1b 308 blo 1b
309#endif 309#endif
310 mcr p15, 0, ip, c7, c10, 4 @ drain WB 310 mcr p15, 0, ip, c7, c10, 4 @ drain WB
311 mov pc, lr 311 ret lr
312 312
313/* 313/*
314 * dma_flush_range(start, end) 314 * dma_flush_range(start, end)
@@ -328,7 +328,7 @@ ENTRY(arm1020e_dma_flush_range)
328 blo 1b 328 blo 1b
329#endif 329#endif
330 mcr p15, 0, ip, c7, c10, 4 @ drain WB 330 mcr p15, 0, ip, c7, c10, 4 @ drain WB
331 mov pc, lr 331 ret lr
332 332
333/* 333/*
334 * dma_map_area(start, size, dir) 334 * dma_map_area(start, size, dir)
@@ -351,7 +351,7 @@ ENDPROC(arm1020e_dma_map_area)
351 * - dir - DMA direction 351 * - dir - DMA direction
352 */ 352 */
353ENTRY(arm1020e_dma_unmap_area) 353ENTRY(arm1020e_dma_unmap_area)
354 mov pc, lr 354 ret lr
355ENDPROC(arm1020e_dma_unmap_area) 355ENDPROC(arm1020e_dma_unmap_area)
356 356
357 .globl arm1020e_flush_kern_cache_louis 357 .globl arm1020e_flush_kern_cache_louis
@@ -369,7 +369,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area)
369 subs r1, r1, #CACHE_DLINESIZE 369 subs r1, r1, #CACHE_DLINESIZE
370 bhi 1b 370 bhi 1b
371#endif 371#endif
372 mov pc, lr 372 ret lr
373 373
374/* =============================== PageTable ============================== */ 374/* =============================== PageTable ============================== */
375 375
@@ -407,7 +407,7 @@ ENTRY(cpu_arm1020e_switch_mm)
407 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 407 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
408 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 408 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
409#endif 409#endif
410 mov pc, lr 410 ret lr
411 411
412/* 412/*
413 * cpu_arm1020e_set_pte(ptep, pte) 413 * cpu_arm1020e_set_pte(ptep, pte)
@@ -423,7 +423,7 @@ ENTRY(cpu_arm1020e_set_pte_ext)
423 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 423 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
424#endif 424#endif
425#endif /* CONFIG_MMU */ 425#endif /* CONFIG_MMU */
426 mov pc, lr 426 ret lr
427 427
428 .type __arm1020e_setup, #function 428 .type __arm1020e_setup, #function
429__arm1020e_setup: 429__arm1020e_setup:
@@ -441,7 +441,7 @@ __arm1020e_setup:
441#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 441#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
442 orr r0, r0, #0x4000 @ .R.. .... .... .... 442 orr r0, r0, #0x4000 @ .R.. .... .... ....
443#endif 443#endif
444 mov pc, lr 444 ret lr
445 .size __arm1020e_setup, . - __arm1020e_setup 445 .size __arm1020e_setup, . - __arm1020e_setup
446 446
447 /* 447 /*
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 6f01a0ae3b30..a126b7a59928 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -62,7 +62,7 @@
62 * cpu_arm1022_proc_init() 62 * cpu_arm1022_proc_init()
63 */ 63 */
64ENTRY(cpu_arm1022_proc_init) 64ENTRY(cpu_arm1022_proc_init)
65 mov pc, lr 65 ret lr
66 66
67/* 67/*
68 * cpu_arm1022_proc_fin() 68 * cpu_arm1022_proc_fin()
@@ -72,7 +72,7 @@ ENTRY(cpu_arm1022_proc_fin)
72 bic r0, r0, #0x1000 @ ...i............ 72 bic r0, r0, #0x1000 @ ...i............
73 bic r0, r0, #0x000e @ ............wca. 73 bic r0, r0, #0x000e @ ............wca.
74 mcr p15, 0, r0, c1, c0, 0 @ disable caches 74 mcr p15, 0, r0, c1, c0, 0 @ disable caches
75 mov pc, lr 75 ret lr
76 76
77/* 77/*
78 * cpu_arm1022_reset(loc) 78 * cpu_arm1022_reset(loc)
@@ -96,7 +96,7 @@ ENTRY(cpu_arm1022_reset)
96 bic ip, ip, #0x000f @ ............wcam 96 bic ip, ip, #0x000f @ ............wcam
97 bic ip, ip, #0x1100 @ ...i...s........ 97 bic ip, ip, #0x1100 @ ...i...s........
98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
99 mov pc, r0 99 ret r0
100ENDPROC(cpu_arm1022_reset) 100ENDPROC(cpu_arm1022_reset)
101 .popsection 101 .popsection
102 102
@@ -106,7 +106,7 @@ ENDPROC(cpu_arm1022_reset)
106 .align 5 106 .align 5
107ENTRY(cpu_arm1022_do_idle) 107ENTRY(cpu_arm1022_do_idle)
108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
109 mov pc, lr 109 ret lr
110 110
111/* ================================= CACHE ================================ */ 111/* ================================= CACHE ================================ */
112 112
@@ -122,7 +122,7 @@ ENTRY(arm1022_flush_icache_all)
122 mov r0, #0 122 mov r0, #0
123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
124#endif 124#endif
125 mov pc, lr 125 ret lr
126ENDPROC(arm1022_flush_icache_all) 126ENDPROC(arm1022_flush_icache_all)
127 127
128/* 128/*
@@ -156,7 +156,7 @@ __flush_whole_cache:
156 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 156 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
157#endif 157#endif
158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
159 mov pc, lr 159 ret lr
160 160
161/* 161/*
162 * flush_user_cache_range(start, end, flags) 162 * flush_user_cache_range(start, end, flags)
@@ -185,7 +185,7 @@ ENTRY(arm1022_flush_user_cache_range)
185 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 185 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
186#endif 186#endif
187 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 187 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
188 mov pc, lr 188 ret lr
189 189
190/* 190/*
191 * coherent_kern_range(start, end) 191 * coherent_kern_range(start, end)
@@ -225,7 +225,7 @@ ENTRY(arm1022_coherent_user_range)
225 blo 1b 225 blo 1b
226 mcr p15, 0, ip, c7, c10, 4 @ drain WB 226 mcr p15, 0, ip, c7, c10, 4 @ drain WB
227 mov r0, #0 227 mov r0, #0
228 mov pc, lr 228 ret lr
229 229
230/* 230/*
231 * flush_kern_dcache_area(void *addr, size_t size) 231 * flush_kern_dcache_area(void *addr, size_t size)
@@ -246,7 +246,7 @@ ENTRY(arm1022_flush_kern_dcache_area)
246 blo 1b 246 blo 1b
247#endif 247#endif
248 mcr p15, 0, ip, c7, c10, 4 @ drain WB 248 mcr p15, 0, ip, c7, c10, 4 @ drain WB
249 mov pc, lr 249 ret lr
250 250
251/* 251/*
252 * dma_inv_range(start, end) 252 * dma_inv_range(start, end)
@@ -275,7 +275,7 @@ arm1022_dma_inv_range:
275 blo 1b 275 blo 1b
276#endif 276#endif
277 mcr p15, 0, ip, c7, c10, 4 @ drain WB 277 mcr p15, 0, ip, c7, c10, 4 @ drain WB
278 mov pc, lr 278 ret lr
279 279
280/* 280/*
281 * dma_clean_range(start, end) 281 * dma_clean_range(start, end)
@@ -297,7 +297,7 @@ arm1022_dma_clean_range:
297 blo 1b 297 blo 1b
298#endif 298#endif
299 mcr p15, 0, ip, c7, c10, 4 @ drain WB 299 mcr p15, 0, ip, c7, c10, 4 @ drain WB
300 mov pc, lr 300 ret lr
301 301
302/* 302/*
303 * dma_flush_range(start, end) 303 * dma_flush_range(start, end)
@@ -317,7 +317,7 @@ ENTRY(arm1022_dma_flush_range)
317 blo 1b 317 blo 1b
318#endif 318#endif
319 mcr p15, 0, ip, c7, c10, 4 @ drain WB 319 mcr p15, 0, ip, c7, c10, 4 @ drain WB
320 mov pc, lr 320 ret lr
321 321
322/* 322/*
323 * dma_map_area(start, size, dir) 323 * dma_map_area(start, size, dir)
@@ -340,7 +340,7 @@ ENDPROC(arm1022_dma_map_area)
340 * - dir - DMA direction 340 * - dir - DMA direction
341 */ 341 */
342ENTRY(arm1022_dma_unmap_area) 342ENTRY(arm1022_dma_unmap_area)
343 mov pc, lr 343 ret lr
344ENDPROC(arm1022_dma_unmap_area) 344ENDPROC(arm1022_dma_unmap_area)
345 345
346 .globl arm1022_flush_kern_cache_louis 346 .globl arm1022_flush_kern_cache_louis
@@ -358,7 +358,7 @@ ENTRY(cpu_arm1022_dcache_clean_area)
358 subs r1, r1, #CACHE_DLINESIZE 358 subs r1, r1, #CACHE_DLINESIZE
359 bhi 1b 359 bhi 1b
360#endif 360#endif
361 mov pc, lr 361 ret lr
362 362
363/* =============================== PageTable ============================== */ 363/* =============================== PageTable ============================== */
364 364
@@ -389,7 +389,7 @@ ENTRY(cpu_arm1022_switch_mm)
389 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 389 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
390 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 390 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
391#endif 391#endif
392 mov pc, lr 392 ret lr
393 393
394/* 394/*
395 * cpu_arm1022_set_pte_ext(ptep, pte, ext) 395 * cpu_arm1022_set_pte_ext(ptep, pte, ext)
@@ -405,7 +405,7 @@ ENTRY(cpu_arm1022_set_pte_ext)
405 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 405 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
406#endif 406#endif
407#endif /* CONFIG_MMU */ 407#endif /* CONFIG_MMU */
408 mov pc, lr 408 ret lr
409 409
410 .type __arm1022_setup, #function 410 .type __arm1022_setup, #function
411__arm1022_setup: 411__arm1022_setup:
@@ -423,7 +423,7 @@ __arm1022_setup:
423#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 423#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
424 orr r0, r0, #0x4000 @ .R.............. 424 orr r0, r0, #0x4000 @ .R..............
425#endif 425#endif
426 mov pc, lr 426 ret lr
427 .size __arm1022_setup, . - __arm1022_setup 427 .size __arm1022_setup, . - __arm1022_setup
428 428
429 /* 429 /*
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 4799a24b43e6..fc294067e977 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -62,7 +62,7 @@
62 * cpu_arm1026_proc_init() 62 * cpu_arm1026_proc_init()
63 */ 63 */
64ENTRY(cpu_arm1026_proc_init) 64ENTRY(cpu_arm1026_proc_init)
65 mov pc, lr 65 ret lr
66 66
67/* 67/*
68 * cpu_arm1026_proc_fin() 68 * cpu_arm1026_proc_fin()
@@ -72,7 +72,7 @@ ENTRY(cpu_arm1026_proc_fin)
72 bic r0, r0, #0x1000 @ ...i............ 72 bic r0, r0, #0x1000 @ ...i............
73 bic r0, r0, #0x000e @ ............wca. 73 bic r0, r0, #0x000e @ ............wca.
74 mcr p15, 0, r0, c1, c0, 0 @ disable caches 74 mcr p15, 0, r0, c1, c0, 0 @ disable caches
75 mov pc, lr 75 ret lr
76 76
77/* 77/*
78 * cpu_arm1026_reset(loc) 78 * cpu_arm1026_reset(loc)
@@ -96,7 +96,7 @@ ENTRY(cpu_arm1026_reset)
96 bic ip, ip, #0x000f @ ............wcam 96 bic ip, ip, #0x000f @ ............wcam
97 bic ip, ip, #0x1100 @ ...i...s........ 97 bic ip, ip, #0x1100 @ ...i...s........
98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
99 mov pc, r0 99 ret r0
100ENDPROC(cpu_arm1026_reset) 100ENDPROC(cpu_arm1026_reset)
101 .popsection 101 .popsection
102 102
@@ -106,7 +106,7 @@ ENDPROC(cpu_arm1026_reset)
106 .align 5 106 .align 5
107ENTRY(cpu_arm1026_do_idle) 107ENTRY(cpu_arm1026_do_idle)
108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
109 mov pc, lr 109 ret lr
110 110
111/* ================================= CACHE ================================ */ 111/* ================================= CACHE ================================ */
112 112
@@ -122,7 +122,7 @@ ENTRY(arm1026_flush_icache_all)
122 mov r0, #0 122 mov r0, #0
123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
124#endif 124#endif
125 mov pc, lr 125 ret lr
126ENDPROC(arm1026_flush_icache_all) 126ENDPROC(arm1026_flush_icache_all)
127 127
128/* 128/*
@@ -151,7 +151,7 @@ __flush_whole_cache:
151 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 151 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
152#endif 152#endif
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
154 mov pc, lr 154 ret lr
155 155
156/* 156/*
157 * flush_user_cache_range(start, end, flags) 157 * flush_user_cache_range(start, end, flags)
@@ -180,7 +180,7 @@ ENTRY(arm1026_flush_user_cache_range)
180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
181#endif 181#endif
182 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 182 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
183 mov pc, lr 183 ret lr
184 184
185/* 185/*
186 * coherent_kern_range(start, end) 186 * coherent_kern_range(start, end)
@@ -219,7 +219,7 @@ ENTRY(arm1026_coherent_user_range)
219 blo 1b 219 blo 1b
220 mcr p15, 0, ip, c7, c10, 4 @ drain WB 220 mcr p15, 0, ip, c7, c10, 4 @ drain WB
221 mov r0, #0 221 mov r0, #0
222 mov pc, lr 222 ret lr
223 223
224/* 224/*
225 * flush_kern_dcache_area(void *addr, size_t size) 225 * flush_kern_dcache_area(void *addr, size_t size)
@@ -240,7 +240,7 @@ ENTRY(arm1026_flush_kern_dcache_area)
240 blo 1b 240 blo 1b
241#endif 241#endif
242 mcr p15, 0, ip, c7, c10, 4 @ drain WB 242 mcr p15, 0, ip, c7, c10, 4 @ drain WB
243 mov pc, lr 243 ret lr
244 244
245/* 245/*
246 * dma_inv_range(start, end) 246 * dma_inv_range(start, end)
@@ -269,7 +269,7 @@ arm1026_dma_inv_range:
269 blo 1b 269 blo 1b
270#endif 270#endif
271 mcr p15, 0, ip, c7, c10, 4 @ drain WB 271 mcr p15, 0, ip, c7, c10, 4 @ drain WB
272 mov pc, lr 272 ret lr
273 273
274/* 274/*
275 * dma_clean_range(start, end) 275 * dma_clean_range(start, end)
@@ -291,7 +291,7 @@ arm1026_dma_clean_range:
291 blo 1b 291 blo 1b
292#endif 292#endif
293 mcr p15, 0, ip, c7, c10, 4 @ drain WB 293 mcr p15, 0, ip, c7, c10, 4 @ drain WB
294 mov pc, lr 294 ret lr
295 295
296/* 296/*
297 * dma_flush_range(start, end) 297 * dma_flush_range(start, end)
@@ -311,7 +311,7 @@ ENTRY(arm1026_dma_flush_range)
311 blo 1b 311 blo 1b
312#endif 312#endif
313 mcr p15, 0, ip, c7, c10, 4 @ drain WB 313 mcr p15, 0, ip, c7, c10, 4 @ drain WB
314 mov pc, lr 314 ret lr
315 315
316/* 316/*
317 * dma_map_area(start, size, dir) 317 * dma_map_area(start, size, dir)
@@ -334,7 +334,7 @@ ENDPROC(arm1026_dma_map_area)
334 * - dir - DMA direction 334 * - dir - DMA direction
335 */ 335 */
336ENTRY(arm1026_dma_unmap_area) 336ENTRY(arm1026_dma_unmap_area)
337 mov pc, lr 337 ret lr
338ENDPROC(arm1026_dma_unmap_area) 338ENDPROC(arm1026_dma_unmap_area)
339 339
340 .globl arm1026_flush_kern_cache_louis 340 .globl arm1026_flush_kern_cache_louis
@@ -352,7 +352,7 @@ ENTRY(cpu_arm1026_dcache_clean_area)
352 subs r1, r1, #CACHE_DLINESIZE 352 subs r1, r1, #CACHE_DLINESIZE
353 bhi 1b 353 bhi 1b
354#endif 354#endif
355 mov pc, lr 355 ret lr
356 356
357/* =============================== PageTable ============================== */ 357/* =============================== PageTable ============================== */
358 358
@@ -378,7 +378,7 @@ ENTRY(cpu_arm1026_switch_mm)
378 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 378 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
379 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 379 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
380#endif 380#endif
381 mov pc, lr 381 ret lr
382 382
383/* 383/*
384 * cpu_arm1026_set_pte_ext(ptep, pte, ext) 384 * cpu_arm1026_set_pte_ext(ptep, pte, ext)
@@ -394,7 +394,7 @@ ENTRY(cpu_arm1026_set_pte_ext)
394 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 394 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
395#endif 395#endif
396#endif /* CONFIG_MMU */ 396#endif /* CONFIG_MMU */
397 mov pc, lr 397 ret lr
398 398
399 .type __arm1026_setup, #function 399 .type __arm1026_setup, #function
400__arm1026_setup: 400__arm1026_setup:
@@ -417,7 +417,7 @@ __arm1026_setup:
417#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 417#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
418 orr r0, r0, #0x4000 @ .R.. .... .... .... 418 orr r0, r0, #0x4000 @ .R.. .... .... ....
419#endif 419#endif
420 mov pc, lr 420 ret lr
421 .size __arm1026_setup, . - __arm1026_setup 421 .size __arm1026_setup, . - __arm1026_setup
422 422
423 /* 423 /*
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index d42c37f9f5bc..2baa66b3ac9b 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -51,14 +51,14 @@
51 */ 51 */
52ENTRY(cpu_arm720_dcache_clean_area) 52ENTRY(cpu_arm720_dcache_clean_area)
53ENTRY(cpu_arm720_proc_init) 53ENTRY(cpu_arm720_proc_init)
54 mov pc, lr 54 ret lr
55 55
56ENTRY(cpu_arm720_proc_fin) 56ENTRY(cpu_arm720_proc_fin)
57 mrc p15, 0, r0, c1, c0, 0 57 mrc p15, 0, r0, c1, c0, 0
58 bic r0, r0, #0x1000 @ ...i............ 58 bic r0, r0, #0x1000 @ ...i............
59 bic r0, r0, #0x000e @ ............wca. 59 bic r0, r0, #0x000e @ ............wca.
60 mcr p15, 0, r0, c1, c0, 0 @ disable caches 60 mcr p15, 0, r0, c1, c0, 0 @ disable caches
61 mov pc, lr 61 ret lr
62 62
63/* 63/*
64 * Function: arm720_proc_do_idle(void) 64 * Function: arm720_proc_do_idle(void)
@@ -66,7 +66,7 @@ ENTRY(cpu_arm720_proc_fin)
66 * Purpose : put the processor in proper idle mode 66 * Purpose : put the processor in proper idle mode
67 */ 67 */
68ENTRY(cpu_arm720_do_idle) 68ENTRY(cpu_arm720_do_idle)
69 mov pc, lr 69 ret lr
70 70
71/* 71/*
72 * Function: arm720_switch_mm(unsigned long pgd_phys) 72 * Function: arm720_switch_mm(unsigned long pgd_phys)
@@ -81,7 +81,7 @@ ENTRY(cpu_arm720_switch_mm)
81 mcr p15, 0, r0, c2, c0, 0 @ update page table ptr 81 mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
82 mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) 82 mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4)
83#endif 83#endif
84 mov pc, lr 84 ret lr
85 85
86/* 86/*
87 * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext) 87 * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext)
@@ -94,7 +94,7 @@ ENTRY(cpu_arm720_set_pte_ext)
94#ifdef CONFIG_MMU 94#ifdef CONFIG_MMU
95 armv3_set_pte_ext wc_disable=0 95 armv3_set_pte_ext wc_disable=0
96#endif 96#endif
97 mov pc, lr 97 ret lr
98 98
99/* 99/*
100 * Function: arm720_reset 100 * Function: arm720_reset
@@ -112,7 +112,7 @@ ENTRY(cpu_arm720_reset)
112 bic ip, ip, #0x000f @ ............wcam 112 bic ip, ip, #0x000f @ ............wcam
113 bic ip, ip, #0x2100 @ ..v....s........ 113 bic ip, ip, #0x2100 @ ..v....s........
114 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 114 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
115 mov pc, r0 115 ret r0
116ENDPROC(cpu_arm720_reset) 116ENDPROC(cpu_arm720_reset)
117 .popsection 117 .popsection
118 118
@@ -128,7 +128,7 @@ __arm710_setup:
128 bic r0, r0, r5 128 bic r0, r0, r5
129 ldr r5, arm710_cr1_set 129 ldr r5, arm710_cr1_set
130 orr r0, r0, r5 130 orr r0, r0, r5
131 mov pc, lr @ __ret (head.S) 131 ret lr @ __ret (head.S)
132 .size __arm710_setup, . - __arm710_setup 132 .size __arm710_setup, . - __arm710_setup
133 133
134 /* 134 /*
@@ -156,7 +156,7 @@ __arm720_setup:
156 mrc p15, 0, r0, c1, c0 @ get control register 156 mrc p15, 0, r0, c1, c0 @ get control register
157 bic r0, r0, r5 157 bic r0, r0, r5
158 orr r0, r0, r6 158 orr r0, r0, r6
159 mov pc, lr @ __ret (head.S) 159 ret lr @ __ret (head.S)
160 .size __arm720_setup, . - __arm720_setup 160 .size __arm720_setup, . - __arm720_setup
161 161
162 /* 162 /*
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 9b0ae90cbf17..ac1ea6b3bce4 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -32,7 +32,7 @@ ENTRY(cpu_arm740_proc_init)
32ENTRY(cpu_arm740_do_idle) 32ENTRY(cpu_arm740_do_idle)
33ENTRY(cpu_arm740_dcache_clean_area) 33ENTRY(cpu_arm740_dcache_clean_area)
34ENTRY(cpu_arm740_switch_mm) 34ENTRY(cpu_arm740_switch_mm)
35 mov pc, lr 35 ret lr
36 36
37/* 37/*
38 * cpu_arm740_proc_fin() 38 * cpu_arm740_proc_fin()
@@ -42,7 +42,7 @@ ENTRY(cpu_arm740_proc_fin)
42 bic r0, r0, #0x3f000000 @ bank/f/lock/s 42 bic r0, r0, #0x3f000000 @ bank/f/lock/s
43 bic r0, r0, #0x0000000c @ w-buffer/cache 43 bic r0, r0, #0x0000000c @ w-buffer/cache
44 mcr p15, 0, r0, c1, c0, 0 @ disable caches 44 mcr p15, 0, r0, c1, c0, 0 @ disable caches
45 mov pc, lr 45 ret lr
46 46
47/* 47/*
48 * cpu_arm740_reset(loc) 48 * cpu_arm740_reset(loc)
@@ -56,7 +56,7 @@ ENTRY(cpu_arm740_reset)
56 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register 56 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register
57 bic ip, ip, #0x0000000c @ ............wc.. 57 bic ip, ip, #0x0000000c @ ............wc..
58 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 58 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
59 mov pc, r0 59 ret r0
60ENDPROC(cpu_arm740_reset) 60ENDPROC(cpu_arm740_reset)
61 .popsection 61 .popsection
62 62
@@ -115,7 +115,7 @@ __arm740_setup:
115 @ need some benchmark 115 @ need some benchmark
116 orr r0, r0, #0x0000000d @ MPU/Cache/WB 116 orr r0, r0, #0x0000000d @ MPU/Cache/WB
117 117
118 mov pc, lr 118 ret lr
119 119
120 .size __arm740_setup, . - __arm740_setup 120 .size __arm740_setup, . - __arm740_setup
121 121
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index f6cc3f63ce39..bf6ba4bc30ff 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -32,13 +32,13 @@ ENTRY(cpu_arm7tdmi_proc_init)
32ENTRY(cpu_arm7tdmi_do_idle) 32ENTRY(cpu_arm7tdmi_do_idle)
33ENTRY(cpu_arm7tdmi_dcache_clean_area) 33ENTRY(cpu_arm7tdmi_dcache_clean_area)
34ENTRY(cpu_arm7tdmi_switch_mm) 34ENTRY(cpu_arm7tdmi_switch_mm)
35 mov pc, lr 35 ret lr
36 36
37/* 37/*
38 * cpu_arm7tdmi_proc_fin() 38 * cpu_arm7tdmi_proc_fin()
39 */ 39 */
40ENTRY(cpu_arm7tdmi_proc_fin) 40ENTRY(cpu_arm7tdmi_proc_fin)
41 mov pc, lr 41 ret lr
42 42
43/* 43/*
44 * Function: cpu_arm7tdmi_reset(loc) 44 * Function: cpu_arm7tdmi_reset(loc)
@@ -47,13 +47,13 @@ ENTRY(cpu_arm7tdmi_proc_fin)
47 */ 47 */
48 .pushsection .idmap.text, "ax" 48 .pushsection .idmap.text, "ax"
49ENTRY(cpu_arm7tdmi_reset) 49ENTRY(cpu_arm7tdmi_reset)
50 mov pc, r0 50 ret r0
51ENDPROC(cpu_arm7tdmi_reset) 51ENDPROC(cpu_arm7tdmi_reset)
52 .popsection 52 .popsection
53 53
54 .type __arm7tdmi_setup, #function 54 .type __arm7tdmi_setup, #function
55__arm7tdmi_setup: 55__arm7tdmi_setup:
56 mov pc, lr 56 ret lr
57 .size __arm7tdmi_setup, . - __arm7tdmi_setup 57 .size __arm7tdmi_setup, . - __arm7tdmi_setup
58 58
59 __INITDATA 59 __INITDATA
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 549557df6d57..22bf8dde4f84 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -63,7 +63,7 @@
63 * cpu_arm920_proc_init() 63 * cpu_arm920_proc_init()
64 */ 64 */
65ENTRY(cpu_arm920_proc_init) 65ENTRY(cpu_arm920_proc_init)
66 mov pc, lr 66 ret lr
67 67
68/* 68/*
69 * cpu_arm920_proc_fin() 69 * cpu_arm920_proc_fin()
@@ -73,7 +73,7 @@ ENTRY(cpu_arm920_proc_fin)
73 bic r0, r0, #0x1000 @ ...i............ 73 bic r0, r0, #0x1000 @ ...i............
74 bic r0, r0, #0x000e @ ............wca. 74 bic r0, r0, #0x000e @ ............wca.
75 mcr p15, 0, r0, c1, c0, 0 @ disable caches 75 mcr p15, 0, r0, c1, c0, 0 @ disable caches
76 mov pc, lr 76 ret lr
77 77
78/* 78/*
79 * cpu_arm920_reset(loc) 79 * cpu_arm920_reset(loc)
@@ -97,7 +97,7 @@ ENTRY(cpu_arm920_reset)
97 bic ip, ip, #0x000f @ ............wcam 97 bic ip, ip, #0x000f @ ............wcam
98 bic ip, ip, #0x1100 @ ...i...s........ 98 bic ip, ip, #0x1100 @ ...i...s........
99 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 99 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
100 mov pc, r0 100 ret r0
101ENDPROC(cpu_arm920_reset) 101ENDPROC(cpu_arm920_reset)
102 .popsection 102 .popsection
103 103
@@ -107,7 +107,7 @@ ENDPROC(cpu_arm920_reset)
107 .align 5 107 .align 5
108ENTRY(cpu_arm920_do_idle) 108ENTRY(cpu_arm920_do_idle)
109 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 109 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
110 mov pc, lr 110 ret lr
111 111
112 112
113#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 113#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -120,7 +120,7 @@ ENTRY(cpu_arm920_do_idle)
120ENTRY(arm920_flush_icache_all) 120ENTRY(arm920_flush_icache_all)
121 mov r0, #0 121 mov r0, #0
122 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 122 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
123 mov pc, lr 123 ret lr
124ENDPROC(arm920_flush_icache_all) 124ENDPROC(arm920_flush_icache_all)
125 125
126/* 126/*
@@ -151,7 +151,7 @@ __flush_whole_cache:
151 tst r2, #VM_EXEC 151 tst r2, #VM_EXEC
152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
154 mov pc, lr 154 ret lr
155 155
156/* 156/*
157 * flush_user_cache_range(start, end, flags) 157 * flush_user_cache_range(start, end, flags)
@@ -177,7 +177,7 @@ ENTRY(arm920_flush_user_cache_range)
177 blo 1b 177 blo 1b
178 tst r2, #VM_EXEC 178 tst r2, #VM_EXEC
179 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 179 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
180 mov pc, lr 180 ret lr
181 181
182/* 182/*
183 * coherent_kern_range(start, end) 183 * coherent_kern_range(start, end)
@@ -211,7 +211,7 @@ ENTRY(arm920_coherent_user_range)
211 blo 1b 211 blo 1b
212 mcr p15, 0, r0, c7, c10, 4 @ drain WB 212 mcr p15, 0, r0, c7, c10, 4 @ drain WB
213 mov r0, #0 213 mov r0, #0
214 mov pc, lr 214 ret lr
215 215
216/* 216/*
217 * flush_kern_dcache_area(void *addr, size_t size) 217 * flush_kern_dcache_area(void *addr, size_t size)
@@ -231,7 +231,7 @@ ENTRY(arm920_flush_kern_dcache_area)
231 mov r0, #0 231 mov r0, #0
232 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 232 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
233 mcr p15, 0, r0, c7, c10, 4 @ drain WB 233 mcr p15, 0, r0, c7, c10, 4 @ drain WB
234 mov pc, lr 234 ret lr
235 235
236/* 236/*
237 * dma_inv_range(start, end) 237 * dma_inv_range(start, end)
@@ -257,7 +257,7 @@ arm920_dma_inv_range:
257 cmp r0, r1 257 cmp r0, r1
258 blo 1b 258 blo 1b
259 mcr p15, 0, r0, c7, c10, 4 @ drain WB 259 mcr p15, 0, r0, c7, c10, 4 @ drain WB
260 mov pc, lr 260 ret lr
261 261
262/* 262/*
263 * dma_clean_range(start, end) 263 * dma_clean_range(start, end)
@@ -276,7 +276,7 @@ arm920_dma_clean_range:
276 cmp r0, r1 276 cmp r0, r1
277 blo 1b 277 blo 1b
278 mcr p15, 0, r0, c7, c10, 4 @ drain WB 278 mcr p15, 0, r0, c7, c10, 4 @ drain WB
279 mov pc, lr 279 ret lr
280 280
281/* 281/*
282 * dma_flush_range(start, end) 282 * dma_flush_range(start, end)
@@ -293,7 +293,7 @@ ENTRY(arm920_dma_flush_range)
293 cmp r0, r1 293 cmp r0, r1
294 blo 1b 294 blo 1b
295 mcr p15, 0, r0, c7, c10, 4 @ drain WB 295 mcr p15, 0, r0, c7, c10, 4 @ drain WB
296 mov pc, lr 296 ret lr
297 297
298/* 298/*
299 * dma_map_area(start, size, dir) 299 * dma_map_area(start, size, dir)
@@ -316,7 +316,7 @@ ENDPROC(arm920_dma_map_area)
316 * - dir - DMA direction 316 * - dir - DMA direction
317 */ 317 */
318ENTRY(arm920_dma_unmap_area) 318ENTRY(arm920_dma_unmap_area)
319 mov pc, lr 319 ret lr
320ENDPROC(arm920_dma_unmap_area) 320ENDPROC(arm920_dma_unmap_area)
321 321
322 .globl arm920_flush_kern_cache_louis 322 .globl arm920_flush_kern_cache_louis
@@ -332,7 +332,7 @@ ENTRY(cpu_arm920_dcache_clean_area)
332 add r0, r0, #CACHE_DLINESIZE 332 add r0, r0, #CACHE_DLINESIZE
333 subs r1, r1, #CACHE_DLINESIZE 333 subs r1, r1, #CACHE_DLINESIZE
334 bhi 1b 334 bhi 1b
335 mov pc, lr 335 ret lr
336 336
337/* =============================== PageTable ============================== */ 337/* =============================== PageTable ============================== */
338 338
@@ -367,7 +367,7 @@ ENTRY(cpu_arm920_switch_mm)
367 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 367 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
368 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 368 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
369#endif 369#endif
370 mov pc, lr 370 ret lr
371 371
372/* 372/*
373 * cpu_arm920_set_pte(ptep, pte, ext) 373 * cpu_arm920_set_pte(ptep, pte, ext)
@@ -382,7 +382,7 @@ ENTRY(cpu_arm920_set_pte_ext)
382 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 382 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
383 mcr p15, 0, r0, c7, c10, 4 @ drain WB 383 mcr p15, 0, r0, c7, c10, 4 @ drain WB
384#endif 384#endif
385 mov pc, lr 385 ret lr
386 386
387/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 387/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
388.globl cpu_arm920_suspend_size 388.globl cpu_arm920_suspend_size
@@ -423,7 +423,7 @@ __arm920_setup:
423 mrc p15, 0, r0, c1, c0 @ get control register v4 423 mrc p15, 0, r0, c1, c0 @ get control register v4
424 bic r0, r0, r5 424 bic r0, r0, r5
425 orr r0, r0, r6 425 orr r0, r0, r6
426 mov pc, lr 426 ret lr
427 .size __arm920_setup, . - __arm920_setup 427 .size __arm920_setup, . - __arm920_setup
428 428
429 /* 429 /*
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 2a758b06c6f6..0c6d5ac5a6d4 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -65,7 +65,7 @@
65 * cpu_arm922_proc_init() 65 * cpu_arm922_proc_init()
66 */ 66 */
67ENTRY(cpu_arm922_proc_init) 67ENTRY(cpu_arm922_proc_init)
68 mov pc, lr 68 ret lr
69 69
70/* 70/*
71 * cpu_arm922_proc_fin() 71 * cpu_arm922_proc_fin()
@@ -75,7 +75,7 @@ ENTRY(cpu_arm922_proc_fin)
75 bic r0, r0, #0x1000 @ ...i............ 75 bic r0, r0, #0x1000 @ ...i............
76 bic r0, r0, #0x000e @ ............wca. 76 bic r0, r0, #0x000e @ ............wca.
77 mcr p15, 0, r0, c1, c0, 0 @ disable caches 77 mcr p15, 0, r0, c1, c0, 0 @ disable caches
78 mov pc, lr 78 ret lr
79 79
80/* 80/*
81 * cpu_arm922_reset(loc) 81 * cpu_arm922_reset(loc)
@@ -99,7 +99,7 @@ ENTRY(cpu_arm922_reset)
99 bic ip, ip, #0x000f @ ............wcam 99 bic ip, ip, #0x000f @ ............wcam
100 bic ip, ip, #0x1100 @ ...i...s........ 100 bic ip, ip, #0x1100 @ ...i...s........
101 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 101 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
102 mov pc, r0 102 ret r0
103ENDPROC(cpu_arm922_reset) 103ENDPROC(cpu_arm922_reset)
104 .popsection 104 .popsection
105 105
@@ -109,7 +109,7 @@ ENDPROC(cpu_arm922_reset)
109 .align 5 109 .align 5
110ENTRY(cpu_arm922_do_idle) 110ENTRY(cpu_arm922_do_idle)
111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
112 mov pc, lr 112 ret lr
113 113
114 114
115#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 115#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -122,7 +122,7 @@ ENTRY(cpu_arm922_do_idle)
122ENTRY(arm922_flush_icache_all) 122ENTRY(arm922_flush_icache_all)
123 mov r0, #0 123 mov r0, #0
124 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 124 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
125 mov pc, lr 125 ret lr
126ENDPROC(arm922_flush_icache_all) 126ENDPROC(arm922_flush_icache_all)
127 127
128/* 128/*
@@ -153,7 +153,7 @@ __flush_whole_cache:
153 tst r2, #VM_EXEC 153 tst r2, #VM_EXEC
154 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 154 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
155 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 155 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
156 mov pc, lr 156 ret lr
157 157
158/* 158/*
159 * flush_user_cache_range(start, end, flags) 159 * flush_user_cache_range(start, end, flags)
@@ -179,7 +179,7 @@ ENTRY(arm922_flush_user_cache_range)
179 blo 1b 179 blo 1b
180 tst r2, #VM_EXEC 180 tst r2, #VM_EXEC
181 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 181 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
182 mov pc, lr 182 ret lr
183 183
184/* 184/*
185 * coherent_kern_range(start, end) 185 * coherent_kern_range(start, end)
@@ -213,7 +213,7 @@ ENTRY(arm922_coherent_user_range)
213 blo 1b 213 blo 1b
214 mcr p15, 0, r0, c7, c10, 4 @ drain WB 214 mcr p15, 0, r0, c7, c10, 4 @ drain WB
215 mov r0, #0 215 mov r0, #0
216 mov pc, lr 216 ret lr
217 217
218/* 218/*
219 * flush_kern_dcache_area(void *addr, size_t size) 219 * flush_kern_dcache_area(void *addr, size_t size)
@@ -233,7 +233,7 @@ ENTRY(arm922_flush_kern_dcache_area)
233 mov r0, #0 233 mov r0, #0
234 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 234 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
235 mcr p15, 0, r0, c7, c10, 4 @ drain WB 235 mcr p15, 0, r0, c7, c10, 4 @ drain WB
236 mov pc, lr 236 ret lr
237 237
238/* 238/*
239 * dma_inv_range(start, end) 239 * dma_inv_range(start, end)
@@ -259,7 +259,7 @@ arm922_dma_inv_range:
259 cmp r0, r1 259 cmp r0, r1
260 blo 1b 260 blo 1b
261 mcr p15, 0, r0, c7, c10, 4 @ drain WB 261 mcr p15, 0, r0, c7, c10, 4 @ drain WB
262 mov pc, lr 262 ret lr
263 263
264/* 264/*
265 * dma_clean_range(start, end) 265 * dma_clean_range(start, end)
@@ -278,7 +278,7 @@ arm922_dma_clean_range:
278 cmp r0, r1 278 cmp r0, r1
279 blo 1b 279 blo 1b
280 mcr p15, 0, r0, c7, c10, 4 @ drain WB 280 mcr p15, 0, r0, c7, c10, 4 @ drain WB
281 mov pc, lr 281 ret lr
282 282
283/* 283/*
284 * dma_flush_range(start, end) 284 * dma_flush_range(start, end)
@@ -295,7 +295,7 @@ ENTRY(arm922_dma_flush_range)
295 cmp r0, r1 295 cmp r0, r1
296 blo 1b 296 blo 1b
297 mcr p15, 0, r0, c7, c10, 4 @ drain WB 297 mcr p15, 0, r0, c7, c10, 4 @ drain WB
298 mov pc, lr 298 ret lr
299 299
300/* 300/*
301 * dma_map_area(start, size, dir) 301 * dma_map_area(start, size, dir)
@@ -318,7 +318,7 @@ ENDPROC(arm922_dma_map_area)
318 * - dir - DMA direction 318 * - dir - DMA direction
319 */ 319 */
320ENTRY(arm922_dma_unmap_area) 320ENTRY(arm922_dma_unmap_area)
321 mov pc, lr 321 ret lr
322ENDPROC(arm922_dma_unmap_area) 322ENDPROC(arm922_dma_unmap_area)
323 323
324 .globl arm922_flush_kern_cache_louis 324 .globl arm922_flush_kern_cache_louis
@@ -336,7 +336,7 @@ ENTRY(cpu_arm922_dcache_clean_area)
336 subs r1, r1, #CACHE_DLINESIZE 336 subs r1, r1, #CACHE_DLINESIZE
337 bhi 1b 337 bhi 1b
338#endif 338#endif
339 mov pc, lr 339 ret lr
340 340
341/* =============================== PageTable ============================== */ 341/* =============================== PageTable ============================== */
342 342
@@ -371,7 +371,7 @@ ENTRY(cpu_arm922_switch_mm)
371 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 371 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
372 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 372 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
373#endif 373#endif
374 mov pc, lr 374 ret lr
375 375
376/* 376/*
377 * cpu_arm922_set_pte_ext(ptep, pte, ext) 377 * cpu_arm922_set_pte_ext(ptep, pte, ext)
@@ -386,7 +386,7 @@ ENTRY(cpu_arm922_set_pte_ext)
386 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 386 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
387 mcr p15, 0, r0, c7, c10, 4 @ drain WB 387 mcr p15, 0, r0, c7, c10, 4 @ drain WB
388#endif /* CONFIG_MMU */ 388#endif /* CONFIG_MMU */
389 mov pc, lr 389 ret lr
390 390
391 .type __arm922_setup, #function 391 .type __arm922_setup, #function
392__arm922_setup: 392__arm922_setup:
@@ -401,7 +401,7 @@ __arm922_setup:
401 mrc p15, 0, r0, c1, c0 @ get control register v4 401 mrc p15, 0, r0, c1, c0 @ get control register v4
402 bic r0, r0, r5 402 bic r0, r0, r5
403 orr r0, r0, r6 403 orr r0, r0, r6
404 mov pc, lr 404 ret lr
405 .size __arm922_setup, . - __arm922_setup 405 .size __arm922_setup, . - __arm922_setup
406 406
407 /* 407 /*
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index ba0d58e1a2a2..c32d073282ea 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -86,7 +86,7 @@
86 * cpu_arm925_proc_init() 86 * cpu_arm925_proc_init()
87 */ 87 */
88ENTRY(cpu_arm925_proc_init) 88ENTRY(cpu_arm925_proc_init)
89 mov pc, lr 89 ret lr
90 90
91/* 91/*
92 * cpu_arm925_proc_fin() 92 * cpu_arm925_proc_fin()
@@ -96,7 +96,7 @@ ENTRY(cpu_arm925_proc_fin)
96 bic r0, r0, #0x1000 @ ...i............ 96 bic r0, r0, #0x1000 @ ...i............
97 bic r0, r0, #0x000e @ ............wca. 97 bic r0, r0, #0x000e @ ............wca.
98 mcr p15, 0, r0, c1, c0, 0 @ disable caches 98 mcr p15, 0, r0, c1, c0, 0 @ disable caches
99 mov pc, lr 99 ret lr
100 100
101/* 101/*
102 * cpu_arm925_reset(loc) 102 * cpu_arm925_reset(loc)
@@ -129,7 +129,7 @@ ENDPROC(cpu_arm925_reset)
129 bic ip, ip, #0x000f @ ............wcam 129 bic ip, ip, #0x000f @ ............wcam
130 bic ip, ip, #0x1100 @ ...i...s........ 130 bic ip, ip, #0x1100 @ ...i...s........
131 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 131 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
132 mov pc, r0 132 ret r0
133 133
134/* 134/*
135 * cpu_arm925_do_idle() 135 * cpu_arm925_do_idle()
@@ -145,7 +145,7 @@ ENTRY(cpu_arm925_do_idle)
145 mcr p15, 0, r2, c1, c0, 0 @ Disable I cache 145 mcr p15, 0, r2, c1, c0, 0 @ Disable I cache
146 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 146 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
147 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 147 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
148 mov pc, lr 148 ret lr
149 149
150/* 150/*
151 * flush_icache_all() 151 * flush_icache_all()
@@ -155,7 +155,7 @@ ENTRY(cpu_arm925_do_idle)
155ENTRY(arm925_flush_icache_all) 155ENTRY(arm925_flush_icache_all)
156 mov r0, #0 156 mov r0, #0
157 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 157 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
158 mov pc, lr 158 ret lr
159ENDPROC(arm925_flush_icache_all) 159ENDPROC(arm925_flush_icache_all)
160 160
161/* 161/*
@@ -188,7 +188,7 @@ __flush_whole_cache:
188 tst r2, #VM_EXEC 188 tst r2, #VM_EXEC
189 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 189 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
191 mov pc, lr 191 ret lr
192 192
193/* 193/*
194 * flush_user_cache_range(start, end, flags) 194 * flush_user_cache_range(start, end, flags)
@@ -225,7 +225,7 @@ ENTRY(arm925_flush_user_cache_range)
225 blo 1b 225 blo 1b
226 tst r2, #VM_EXEC 226 tst r2, #VM_EXEC
227 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 227 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
228 mov pc, lr 228 ret lr
229 229
230/* 230/*
231 * coherent_kern_range(start, end) 231 * coherent_kern_range(start, end)
@@ -259,7 +259,7 @@ ENTRY(arm925_coherent_user_range)
259 blo 1b 259 blo 1b
260 mcr p15, 0, r0, c7, c10, 4 @ drain WB 260 mcr p15, 0, r0, c7, c10, 4 @ drain WB
261 mov r0, #0 261 mov r0, #0
262 mov pc, lr 262 ret lr
263 263
264/* 264/*
265 * flush_kern_dcache_area(void *addr, size_t size) 265 * flush_kern_dcache_area(void *addr, size_t size)
@@ -279,7 +279,7 @@ ENTRY(arm925_flush_kern_dcache_area)
279 mov r0, #0 279 mov r0, #0
280 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 280 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
281 mcr p15, 0, r0, c7, c10, 4 @ drain WB 281 mcr p15, 0, r0, c7, c10, 4 @ drain WB
282 mov pc, lr 282 ret lr
283 283
284/* 284/*
285 * dma_inv_range(start, end) 285 * dma_inv_range(start, end)
@@ -307,7 +307,7 @@ arm925_dma_inv_range:
307 cmp r0, r1 307 cmp r0, r1
308 blo 1b 308 blo 1b
309 mcr p15, 0, r0, c7, c10, 4 @ drain WB 309 mcr p15, 0, r0, c7, c10, 4 @ drain WB
310 mov pc, lr 310 ret lr
311 311
312/* 312/*
313 * dma_clean_range(start, end) 313 * dma_clean_range(start, end)
@@ -328,7 +328,7 @@ arm925_dma_clean_range:
328 blo 1b 328 blo 1b
329#endif 329#endif
330 mcr p15, 0, r0, c7, c10, 4 @ drain WB 330 mcr p15, 0, r0, c7, c10, 4 @ drain WB
331 mov pc, lr 331 ret lr
332 332
333/* 333/*
334 * dma_flush_range(start, end) 334 * dma_flush_range(start, end)
@@ -350,7 +350,7 @@ ENTRY(arm925_dma_flush_range)
350 cmp r0, r1 350 cmp r0, r1
351 blo 1b 351 blo 1b
352 mcr p15, 0, r0, c7, c10, 4 @ drain WB 352 mcr p15, 0, r0, c7, c10, 4 @ drain WB
353 mov pc, lr 353 ret lr
354 354
355/* 355/*
356 * dma_map_area(start, size, dir) 356 * dma_map_area(start, size, dir)
@@ -373,7 +373,7 @@ ENDPROC(arm925_dma_map_area)
373 * - dir - DMA direction 373 * - dir - DMA direction
374 */ 374 */
375ENTRY(arm925_dma_unmap_area) 375ENTRY(arm925_dma_unmap_area)
376 mov pc, lr 376 ret lr
377ENDPROC(arm925_dma_unmap_area) 377ENDPROC(arm925_dma_unmap_area)
378 378
379 .globl arm925_flush_kern_cache_louis 379 .globl arm925_flush_kern_cache_louis
@@ -390,7 +390,7 @@ ENTRY(cpu_arm925_dcache_clean_area)
390 bhi 1b 390 bhi 1b
391#endif 391#endif
392 mcr p15, 0, r0, c7, c10, 4 @ drain WB 392 mcr p15, 0, r0, c7, c10, 4 @ drain WB
393 mov pc, lr 393 ret lr
394 394
395/* =============================== PageTable ============================== */ 395/* =============================== PageTable ============================== */
396 396
@@ -419,7 +419,7 @@ ENTRY(cpu_arm925_switch_mm)
419 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 419 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
420 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 420 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
421#endif 421#endif
422 mov pc, lr 422 ret lr
423 423
424/* 424/*
425 * cpu_arm925_set_pte_ext(ptep, pte, ext) 425 * cpu_arm925_set_pte_ext(ptep, pte, ext)
@@ -436,7 +436,7 @@ ENTRY(cpu_arm925_set_pte_ext)
436#endif 436#endif
437 mcr p15, 0, r0, c7, c10, 4 @ drain WB 437 mcr p15, 0, r0, c7, c10, 4 @ drain WB
438#endif /* CONFIG_MMU */ 438#endif /* CONFIG_MMU */
439 mov pc, lr 439 ret lr
440 440
441 .type __arm925_setup, #function 441 .type __arm925_setup, #function
442__arm925_setup: 442__arm925_setup:
@@ -469,7 +469,7 @@ __arm925_setup:
469#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 469#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
470 orr r0, r0, #0x4000 @ .1.. .... .... .... 470 orr r0, r0, #0x4000 @ .1.. .... .... ....
471#endif 471#endif
472 mov pc, lr 472 ret lr
473 .size __arm925_setup, . - __arm925_setup 473 .size __arm925_setup, . - __arm925_setup
474 474
475 /* 475 /*
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 0f098f407c9f..252b2503038d 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -55,7 +55,7 @@
55 * cpu_arm926_proc_init() 55 * cpu_arm926_proc_init()
56 */ 56 */
57ENTRY(cpu_arm926_proc_init) 57ENTRY(cpu_arm926_proc_init)
58 mov pc, lr 58 ret lr
59 59
60/* 60/*
61 * cpu_arm926_proc_fin() 61 * cpu_arm926_proc_fin()
@@ -65,7 +65,7 @@ ENTRY(cpu_arm926_proc_fin)
65 bic r0, r0, #0x1000 @ ...i............ 65 bic r0, r0, #0x1000 @ ...i............
66 bic r0, r0, #0x000e @ ............wca. 66 bic r0, r0, #0x000e @ ............wca.
67 mcr p15, 0, r0, c1, c0, 0 @ disable caches 67 mcr p15, 0, r0, c1, c0, 0 @ disable caches
68 mov pc, lr 68 ret lr
69 69
70/* 70/*
71 * cpu_arm926_reset(loc) 71 * cpu_arm926_reset(loc)
@@ -89,7 +89,7 @@ ENTRY(cpu_arm926_reset)
89 bic ip, ip, #0x000f @ ............wcam 89 bic ip, ip, #0x000f @ ............wcam
90 bic ip, ip, #0x1100 @ ...i...s........ 90 bic ip, ip, #0x1100 @ ...i...s........
91 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 91 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
92 mov pc, r0 92 ret r0
93ENDPROC(cpu_arm926_reset) 93ENDPROC(cpu_arm926_reset)
94 .popsection 94 .popsection
95 95
@@ -111,7 +111,7 @@ ENTRY(cpu_arm926_do_idle)
111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
112 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 112 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
113 msr cpsr_c, r3 @ Restore FIQ state 113 msr cpsr_c, r3 @ Restore FIQ state
114 mov pc, lr 114 ret lr
115 115
116/* 116/*
117 * flush_icache_all() 117 * flush_icache_all()
@@ -121,7 +121,7 @@ ENTRY(cpu_arm926_do_idle)
121ENTRY(arm926_flush_icache_all) 121ENTRY(arm926_flush_icache_all)
122 mov r0, #0 122 mov r0, #0
123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
124 mov pc, lr 124 ret lr
125ENDPROC(arm926_flush_icache_all) 125ENDPROC(arm926_flush_icache_all)
126 126
127/* 127/*
@@ -151,7 +151,7 @@ __flush_whole_cache:
151 tst r2, #VM_EXEC 151 tst r2, #VM_EXEC
152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
154 mov pc, lr 154 ret lr
155 155
156/* 156/*
157 * flush_user_cache_range(start, end, flags) 157 * flush_user_cache_range(start, end, flags)
@@ -188,7 +188,7 @@ ENTRY(arm926_flush_user_cache_range)
188 blo 1b 188 blo 1b
189 tst r2, #VM_EXEC 189 tst r2, #VM_EXEC
190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
191 mov pc, lr 191 ret lr
192 192
193/* 193/*
194 * coherent_kern_range(start, end) 194 * coherent_kern_range(start, end)
@@ -222,7 +222,7 @@ ENTRY(arm926_coherent_user_range)
222 blo 1b 222 blo 1b
223 mcr p15, 0, r0, c7, c10, 4 @ drain WB 223 mcr p15, 0, r0, c7, c10, 4 @ drain WB
224 mov r0, #0 224 mov r0, #0
225 mov pc, lr 225 ret lr
226 226
227/* 227/*
228 * flush_kern_dcache_area(void *addr, size_t size) 228 * flush_kern_dcache_area(void *addr, size_t size)
@@ -242,7 +242,7 @@ ENTRY(arm926_flush_kern_dcache_area)
242 mov r0, #0 242 mov r0, #0
243 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 243 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
244 mcr p15, 0, r0, c7, c10, 4 @ drain WB 244 mcr p15, 0, r0, c7, c10, 4 @ drain WB
245 mov pc, lr 245 ret lr
246 246
247/* 247/*
248 * dma_inv_range(start, end) 248 * dma_inv_range(start, end)
@@ -270,7 +270,7 @@ arm926_dma_inv_range:
270 cmp r0, r1 270 cmp r0, r1
271 blo 1b 271 blo 1b
272 mcr p15, 0, r0, c7, c10, 4 @ drain WB 272 mcr p15, 0, r0, c7, c10, 4 @ drain WB
273 mov pc, lr 273 ret lr
274 274
275/* 275/*
276 * dma_clean_range(start, end) 276 * dma_clean_range(start, end)
@@ -291,7 +291,7 @@ arm926_dma_clean_range:
291 blo 1b 291 blo 1b
292#endif 292#endif
293 mcr p15, 0, r0, c7, c10, 4 @ drain WB 293 mcr p15, 0, r0, c7, c10, 4 @ drain WB
294 mov pc, lr 294 ret lr
295 295
296/* 296/*
297 * dma_flush_range(start, end) 297 * dma_flush_range(start, end)
@@ -313,7 +313,7 @@ ENTRY(arm926_dma_flush_range)
313 cmp r0, r1 313 cmp r0, r1
314 blo 1b 314 blo 1b
315 mcr p15, 0, r0, c7, c10, 4 @ drain WB 315 mcr p15, 0, r0, c7, c10, 4 @ drain WB
316 mov pc, lr 316 ret lr
317 317
318/* 318/*
319 * dma_map_area(start, size, dir) 319 * dma_map_area(start, size, dir)
@@ -336,7 +336,7 @@ ENDPROC(arm926_dma_map_area)
336 * - dir - DMA direction 336 * - dir - DMA direction
337 */ 337 */
338ENTRY(arm926_dma_unmap_area) 338ENTRY(arm926_dma_unmap_area)
339 mov pc, lr 339 ret lr
340ENDPROC(arm926_dma_unmap_area) 340ENDPROC(arm926_dma_unmap_area)
341 341
342 .globl arm926_flush_kern_cache_louis 342 .globl arm926_flush_kern_cache_louis
@@ -353,7 +353,7 @@ ENTRY(cpu_arm926_dcache_clean_area)
353 bhi 1b 353 bhi 1b
354#endif 354#endif
355 mcr p15, 0, r0, c7, c10, 4 @ drain WB 355 mcr p15, 0, r0, c7, c10, 4 @ drain WB
356 mov pc, lr 356 ret lr
357 357
358/* =============================== PageTable ============================== */ 358/* =============================== PageTable ============================== */
359 359
@@ -380,7 +380,7 @@ ENTRY(cpu_arm926_switch_mm)
380 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 380 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
381 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 381 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
382#endif 382#endif
383 mov pc, lr 383 ret lr
384 384
385/* 385/*
386 * cpu_arm926_set_pte_ext(ptep, pte, ext) 386 * cpu_arm926_set_pte_ext(ptep, pte, ext)
@@ -397,7 +397,7 @@ ENTRY(cpu_arm926_set_pte_ext)
397#endif 397#endif
398 mcr p15, 0, r0, c7, c10, 4 @ drain WB 398 mcr p15, 0, r0, c7, c10, 4 @ drain WB
399#endif 399#endif
400 mov pc, lr 400 ret lr
401 401
402/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 402/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
403.globl cpu_arm926_suspend_size 403.globl cpu_arm926_suspend_size
@@ -448,7 +448,7 @@ __arm926_setup:
448#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 448#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
449 orr r0, r0, #0x4000 @ .1.. .... .... .... 449 orr r0, r0, #0x4000 @ .1.. .... .... ....
450#endif 450#endif
451 mov pc, lr 451 ret lr
452 .size __arm926_setup, . - __arm926_setup 452 .size __arm926_setup, . - __arm926_setup
453 453
454 /* 454 /*
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 1c39a704ff6e..e5212d489377 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -31,7 +31,7 @@
31 */ 31 */
32ENTRY(cpu_arm940_proc_init) 32ENTRY(cpu_arm940_proc_init)
33ENTRY(cpu_arm940_switch_mm) 33ENTRY(cpu_arm940_switch_mm)
34 mov pc, lr 34 ret lr
35 35
36/* 36/*
37 * cpu_arm940_proc_fin() 37 * cpu_arm940_proc_fin()
@@ -41,7 +41,7 @@ ENTRY(cpu_arm940_proc_fin)
41 bic r0, r0, #0x00001000 @ i-cache 41 bic r0, r0, #0x00001000 @ i-cache
42 bic r0, r0, #0x00000004 @ d-cache 42 bic r0, r0, #0x00000004 @ d-cache
43 mcr p15, 0, r0, c1, c0, 0 @ disable caches 43 mcr p15, 0, r0, c1, c0, 0 @ disable caches
44 mov pc, lr 44 ret lr
45 45
46/* 46/*
47 * cpu_arm940_reset(loc) 47 * cpu_arm940_reset(loc)
@@ -58,7 +58,7 @@ ENTRY(cpu_arm940_reset)
58 bic ip, ip, #0x00000005 @ .............c.p 58 bic ip, ip, #0x00000005 @ .............c.p
59 bic ip, ip, #0x00001000 @ i-cache 59 bic ip, ip, #0x00001000 @ i-cache
60 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 60 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
61 mov pc, r0 61 ret r0
62ENDPROC(cpu_arm940_reset) 62ENDPROC(cpu_arm940_reset)
63 .popsection 63 .popsection
64 64
@@ -68,7 +68,7 @@ ENDPROC(cpu_arm940_reset)
68 .align 5 68 .align 5
69ENTRY(cpu_arm940_do_idle) 69ENTRY(cpu_arm940_do_idle)
70 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 70 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
71 mov pc, lr 71 ret lr
72 72
73/* 73/*
74 * flush_icache_all() 74 * flush_icache_all()
@@ -78,7 +78,7 @@ ENTRY(cpu_arm940_do_idle)
78ENTRY(arm940_flush_icache_all) 78ENTRY(arm940_flush_icache_all)
79 mov r0, #0 79 mov r0, #0
80 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 80 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
81 mov pc, lr 81 ret lr
82ENDPROC(arm940_flush_icache_all) 82ENDPROC(arm940_flush_icache_all)
83 83
84/* 84/*
@@ -122,7 +122,7 @@ ENTRY(arm940_flush_user_cache_range)
122 tst r2, #VM_EXEC 122 tst r2, #VM_EXEC
123 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 123 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
124 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 124 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
125 mov pc, lr 125 ret lr
126 126
127/* 127/*
128 * coherent_kern_range(start, end) 128 * coherent_kern_range(start, end)
@@ -170,7 +170,7 @@ ENTRY(arm940_flush_kern_dcache_area)
170 bcs 1b @ segments 7 to 0 170 bcs 1b @ segments 7 to 0
171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
172 mcr p15, 0, r0, c7, c10, 4 @ drain WB 172 mcr p15, 0, r0, c7, c10, 4 @ drain WB
173 mov pc, lr 173 ret lr
174 174
175/* 175/*
176 * dma_inv_range(start, end) 176 * dma_inv_range(start, end)
@@ -191,7 +191,7 @@ arm940_dma_inv_range:
191 subs r1, r1, #1 << 4 191 subs r1, r1, #1 << 4
192 bcs 1b @ segments 7 to 0 192 bcs 1b @ segments 7 to 0
193 mcr p15, 0, ip, c7, c10, 4 @ drain WB 193 mcr p15, 0, ip, c7, c10, 4 @ drain WB
194 mov pc, lr 194 ret lr
195 195
196/* 196/*
197 * dma_clean_range(start, end) 197 * dma_clean_range(start, end)
@@ -215,7 +215,7 @@ ENTRY(cpu_arm940_dcache_clean_area)
215 bcs 1b @ segments 7 to 0 215 bcs 1b @ segments 7 to 0
216#endif 216#endif
217 mcr p15, 0, ip, c7, c10, 4 @ drain WB 217 mcr p15, 0, ip, c7, c10, 4 @ drain WB
218 mov pc, lr 218 ret lr
219 219
220/* 220/*
221 * dma_flush_range(start, end) 221 * dma_flush_range(start, end)
@@ -241,7 +241,7 @@ ENTRY(arm940_dma_flush_range)
241 subs r1, r1, #1 << 4 241 subs r1, r1, #1 << 4
242 bcs 1b @ segments 7 to 0 242 bcs 1b @ segments 7 to 0
243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB
244 mov pc, lr 244 ret lr
245 245
246/* 246/*
247 * dma_map_area(start, size, dir) 247 * dma_map_area(start, size, dir)
@@ -264,7 +264,7 @@ ENDPROC(arm940_dma_map_area)
264 * - dir - DMA direction 264 * - dir - DMA direction
265 */ 265 */
266ENTRY(arm940_dma_unmap_area) 266ENTRY(arm940_dma_unmap_area)
267 mov pc, lr 267 ret lr
268ENDPROC(arm940_dma_unmap_area) 268ENDPROC(arm940_dma_unmap_area)
269 269
270 .globl arm940_flush_kern_cache_louis 270 .globl arm940_flush_kern_cache_louis
@@ -337,7 +337,7 @@ __arm940_setup:
337 orr r0, r0, #0x00001000 @ I-cache 337 orr r0, r0, #0x00001000 @ I-cache
338 orr r0, r0, #0x00000005 @ MPU/D-cache 338 orr r0, r0, #0x00000005 @ MPU/D-cache
339 339
340 mov pc, lr 340 ret lr
341 341
342 .size __arm940_setup, . - __arm940_setup 342 .size __arm940_setup, . - __arm940_setup
343 343
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 0289cd905e73..b3dd9b2d0b8e 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -38,7 +38,7 @@
38 */ 38 */
39ENTRY(cpu_arm946_proc_init) 39ENTRY(cpu_arm946_proc_init)
40ENTRY(cpu_arm946_switch_mm) 40ENTRY(cpu_arm946_switch_mm)
41 mov pc, lr 41 ret lr
42 42
43/* 43/*
44 * cpu_arm946_proc_fin() 44 * cpu_arm946_proc_fin()
@@ -48,7 +48,7 @@ ENTRY(cpu_arm946_proc_fin)
48 bic r0, r0, #0x00001000 @ i-cache 48 bic r0, r0, #0x00001000 @ i-cache
49 bic r0, r0, #0x00000004 @ d-cache 49 bic r0, r0, #0x00000004 @ d-cache
50 mcr p15, 0, r0, c1, c0, 0 @ disable caches 50 mcr p15, 0, r0, c1, c0, 0 @ disable caches
51 mov pc, lr 51 ret lr
52 52
53/* 53/*
54 * cpu_arm946_reset(loc) 54 * cpu_arm946_reset(loc)
@@ -65,7 +65,7 @@ ENTRY(cpu_arm946_reset)
65 bic ip, ip, #0x00000005 @ .............c.p 65 bic ip, ip, #0x00000005 @ .............c.p
66 bic ip, ip, #0x00001000 @ i-cache 66 bic ip, ip, #0x00001000 @ i-cache
67 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 67 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
68 mov pc, r0 68 ret r0
69ENDPROC(cpu_arm946_reset) 69ENDPROC(cpu_arm946_reset)
70 .popsection 70 .popsection
71 71
@@ -75,7 +75,7 @@ ENDPROC(cpu_arm946_reset)
75 .align 5 75 .align 5
76ENTRY(cpu_arm946_do_idle) 76ENTRY(cpu_arm946_do_idle)
77 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 77 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
78 mov pc, lr 78 ret lr
79 79
80/* 80/*
81 * flush_icache_all() 81 * flush_icache_all()
@@ -85,7 +85,7 @@ ENTRY(cpu_arm946_do_idle)
85ENTRY(arm946_flush_icache_all) 85ENTRY(arm946_flush_icache_all)
86 mov r0, #0 86 mov r0, #0
87 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 87 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
88 mov pc, lr 88 ret lr
89ENDPROC(arm946_flush_icache_all) 89ENDPROC(arm946_flush_icache_all)
90 90
91/* 91/*
@@ -117,7 +117,7 @@ __flush_whole_cache:
117 tst r2, #VM_EXEC 117 tst r2, #VM_EXEC
118 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache 118 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache
119 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 119 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
120 mov pc, lr 120 ret lr
121 121
122/* 122/*
123 * flush_user_cache_range(start, end, flags) 123 * flush_user_cache_range(start, end, flags)
@@ -156,7 +156,7 @@ ENTRY(arm946_flush_user_cache_range)
156 blo 1b 156 blo 1b
157 tst r2, #VM_EXEC 157 tst r2, #VM_EXEC
158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
159 mov pc, lr 159 ret lr
160 160
161/* 161/*
162 * coherent_kern_range(start, end) 162 * coherent_kern_range(start, end)
@@ -191,7 +191,7 @@ ENTRY(arm946_coherent_user_range)
191 blo 1b 191 blo 1b
192 mcr p15, 0, r0, c7, c10, 4 @ drain WB 192 mcr p15, 0, r0, c7, c10, 4 @ drain WB
193 mov r0, #0 193 mov r0, #0
194 mov pc, lr 194 ret lr
195 195
196/* 196/*
197 * flush_kern_dcache_area(void *addr, size_t size) 197 * flush_kern_dcache_area(void *addr, size_t size)
@@ -212,7 +212,7 @@ ENTRY(arm946_flush_kern_dcache_area)
212 mov r0, #0 212 mov r0, #0
213 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 213 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
214 mcr p15, 0, r0, c7, c10, 4 @ drain WB 214 mcr p15, 0, r0, c7, c10, 4 @ drain WB
215 mov pc, lr 215 ret lr
216 216
217/* 217/*
218 * dma_inv_range(start, end) 218 * dma_inv_range(start, end)
@@ -239,7 +239,7 @@ arm946_dma_inv_range:
239 cmp r0, r1 239 cmp r0, r1
240 blo 1b 240 blo 1b
241 mcr p15, 0, r0, c7, c10, 4 @ drain WB 241 mcr p15, 0, r0, c7, c10, 4 @ drain WB
242 mov pc, lr 242 ret lr
243 243
244/* 244/*
245 * dma_clean_range(start, end) 245 * dma_clean_range(start, end)
@@ -260,7 +260,7 @@ arm946_dma_clean_range:
260 blo 1b 260 blo 1b
261#endif 261#endif
262 mcr p15, 0, r0, c7, c10, 4 @ drain WB 262 mcr p15, 0, r0, c7, c10, 4 @ drain WB
263 mov pc, lr 263 ret lr
264 264
265/* 265/*
266 * dma_flush_range(start, end) 266 * dma_flush_range(start, end)
@@ -284,7 +284,7 @@ ENTRY(arm946_dma_flush_range)
284 cmp r0, r1 284 cmp r0, r1
285 blo 1b 285 blo 1b
286 mcr p15, 0, r0, c7, c10, 4 @ drain WB 286 mcr p15, 0, r0, c7, c10, 4 @ drain WB
287 mov pc, lr 287 ret lr
288 288
289/* 289/*
290 * dma_map_area(start, size, dir) 290 * dma_map_area(start, size, dir)
@@ -307,7 +307,7 @@ ENDPROC(arm946_dma_map_area)
307 * - dir - DMA direction 307 * - dir - DMA direction
308 */ 308 */
309ENTRY(arm946_dma_unmap_area) 309ENTRY(arm946_dma_unmap_area)
310 mov pc, lr 310 ret lr
311ENDPROC(arm946_dma_unmap_area) 311ENDPROC(arm946_dma_unmap_area)
312 312
313 .globl arm946_flush_kern_cache_louis 313 .globl arm946_flush_kern_cache_louis
@@ -324,7 +324,7 @@ ENTRY(cpu_arm946_dcache_clean_area)
324 bhi 1b 324 bhi 1b
325#endif 325#endif
326 mcr p15, 0, r0, c7, c10, 4 @ drain WB 326 mcr p15, 0, r0, c7, c10, 4 @ drain WB
327 mov pc, lr 327 ret lr
328 328
329 .type __arm946_setup, #function 329 .type __arm946_setup, #function
330__arm946_setup: 330__arm946_setup:
@@ -392,7 +392,7 @@ __arm946_setup:
392#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 392#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
393 orr r0, r0, #0x00004000 @ .1.. .... .... .... 393 orr r0, r0, #0x00004000 @ .1.. .... .... ....
394#endif 394#endif
395 mov pc, lr 395 ret lr
396 396
397 .size __arm946_setup, . - __arm946_setup 397 .size __arm946_setup, . - __arm946_setup
398 398
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index f51197ba754a..8227322bbb8f 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -32,13 +32,13 @@ ENTRY(cpu_arm9tdmi_proc_init)
32ENTRY(cpu_arm9tdmi_do_idle) 32ENTRY(cpu_arm9tdmi_do_idle)
33ENTRY(cpu_arm9tdmi_dcache_clean_area) 33ENTRY(cpu_arm9tdmi_dcache_clean_area)
34ENTRY(cpu_arm9tdmi_switch_mm) 34ENTRY(cpu_arm9tdmi_switch_mm)
35 mov pc, lr 35 ret lr
36 36
37/* 37/*
38 * cpu_arm9tdmi_proc_fin() 38 * cpu_arm9tdmi_proc_fin()
39 */ 39 */
40ENTRY(cpu_arm9tdmi_proc_fin) 40ENTRY(cpu_arm9tdmi_proc_fin)
41 mov pc, lr 41 ret lr
42 42
43/* 43/*
44 * Function: cpu_arm9tdmi_reset(loc) 44 * Function: cpu_arm9tdmi_reset(loc)
@@ -47,13 +47,13 @@ ENTRY(cpu_arm9tdmi_proc_fin)
47 */ 47 */
48 .pushsection .idmap.text, "ax" 48 .pushsection .idmap.text, "ax"
49ENTRY(cpu_arm9tdmi_reset) 49ENTRY(cpu_arm9tdmi_reset)
50 mov pc, r0 50 ret r0
51ENDPROC(cpu_arm9tdmi_reset) 51ENDPROC(cpu_arm9tdmi_reset)
52 .popsection 52 .popsection
53 53
54 .type __arm9tdmi_setup, #function 54 .type __arm9tdmi_setup, #function
55__arm9tdmi_setup: 55__arm9tdmi_setup:
56 mov pc, lr 56 ret lr
57 .size __arm9tdmi_setup, . - __arm9tdmi_setup 57 .size __arm9tdmi_setup, . - __arm9tdmi_setup
58 58
59 __INITDATA 59 __INITDATA
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 2dfc0f1d3bfd..c494886892ba 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -32,7 +32,7 @@
32 * cpu_fa526_proc_init() 32 * cpu_fa526_proc_init()
33 */ 33 */
34ENTRY(cpu_fa526_proc_init) 34ENTRY(cpu_fa526_proc_init)
35 mov pc, lr 35 ret lr
36 36
37/* 37/*
38 * cpu_fa526_proc_fin() 38 * cpu_fa526_proc_fin()
@@ -44,7 +44,7 @@ ENTRY(cpu_fa526_proc_fin)
44 mcr p15, 0, r0, c1, c0, 0 @ disable caches 44 mcr p15, 0, r0, c1, c0, 0 @ disable caches
45 nop 45 nop
46 nop 46 nop
47 mov pc, lr 47 ret lr
48 48
49/* 49/*
50 * cpu_fa526_reset(loc) 50 * cpu_fa526_reset(loc)
@@ -72,7 +72,7 @@ ENTRY(cpu_fa526_reset)
72 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 72 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
73 nop 73 nop
74 nop 74 nop
75 mov pc, r0 75 ret r0
76ENDPROC(cpu_fa526_reset) 76ENDPROC(cpu_fa526_reset)
77 .popsection 77 .popsection
78 78
@@ -81,7 +81,7 @@ ENDPROC(cpu_fa526_reset)
81 */ 81 */
82 .align 4 82 .align 4
83ENTRY(cpu_fa526_do_idle) 83ENTRY(cpu_fa526_do_idle)
84 mov pc, lr 84 ret lr
85 85
86 86
87ENTRY(cpu_fa526_dcache_clean_area) 87ENTRY(cpu_fa526_dcache_clean_area)
@@ -90,7 +90,7 @@ ENTRY(cpu_fa526_dcache_clean_area)
90 subs r1, r1, #CACHE_DLINESIZE 90 subs r1, r1, #CACHE_DLINESIZE
91 bhi 1b 91 bhi 1b
92 mcr p15, 0, r0, c7, c10, 4 @ drain WB 92 mcr p15, 0, r0, c7, c10, 4 @ drain WB
93 mov pc, lr 93 ret lr
94 94
95/* =============================== PageTable ============================== */ 95/* =============================== PageTable ============================== */
96 96
@@ -117,7 +117,7 @@ ENTRY(cpu_fa526_switch_mm)
117 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 117 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
118 mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB 118 mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB
119#endif 119#endif
120 mov pc, lr 120 ret lr
121 121
122/* 122/*
123 * cpu_fa526_set_pte_ext(ptep, pte, ext) 123 * cpu_fa526_set_pte_ext(ptep, pte, ext)
@@ -133,7 +133,7 @@ ENTRY(cpu_fa526_set_pte_ext)
133 mov r0, #0 133 mov r0, #0
134 mcr p15, 0, r0, c7, c10, 4 @ drain WB 134 mcr p15, 0, r0, c7, c10, 4 @ drain WB
135#endif 135#endif
136 mov pc, lr 136 ret lr
137 137
138 .type __fa526_setup, #function 138 .type __fa526_setup, #function
139__fa526_setup: 139__fa526_setup:
@@ -162,7 +162,7 @@ __fa526_setup:
162 bic r0, r0, r5 162 bic r0, r0, r5
163 ldr r5, fa526_cr1_set 163 ldr r5, fa526_cr1_set
164 orr r0, r0, r5 164 orr r0, r0, r5
165 mov pc, lr 165 ret lr
166 .size __fa526_setup, . - __fa526_setup 166 .size __fa526_setup, . - __fa526_setup
167 167
168 /* 168 /*
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index db79b62c92fb..03a1b75f2e16 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -69,7 +69,7 @@ ENTRY(cpu_feroceon_proc_init)
69 movne r2, r2, lsr #2 @ turned into # of sets 69 movne r2, r2, lsr #2 @ turned into # of sets
70 sub r2, r2, #(1 << 5) 70 sub r2, r2, #(1 << 5)
71 stmia r1, {r2, r3} 71 stmia r1, {r2, r3}
72 mov pc, lr 72 ret lr
73 73
74/* 74/*
75 * cpu_feroceon_proc_fin() 75 * cpu_feroceon_proc_fin()
@@ -86,7 +86,7 @@ ENTRY(cpu_feroceon_proc_fin)
86 bic r0, r0, #0x1000 @ ...i............ 86 bic r0, r0, #0x1000 @ ...i............
87 bic r0, r0, #0x000e @ ............wca. 87 bic r0, r0, #0x000e @ ............wca.
88 mcr p15, 0, r0, c1, c0, 0 @ disable caches 88 mcr p15, 0, r0, c1, c0, 0 @ disable caches
89 mov pc, lr 89 ret lr
90 90
91/* 91/*
92 * cpu_feroceon_reset(loc) 92 * cpu_feroceon_reset(loc)
@@ -110,7 +110,7 @@ ENTRY(cpu_feroceon_reset)
110 bic ip, ip, #0x000f @ ............wcam 110 bic ip, ip, #0x000f @ ............wcam
111 bic ip, ip, #0x1100 @ ...i...s........ 111 bic ip, ip, #0x1100 @ ...i...s........
112 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 112 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
113 mov pc, r0 113 ret r0
114ENDPROC(cpu_feroceon_reset) 114ENDPROC(cpu_feroceon_reset)
115 .popsection 115 .popsection
116 116
@@ -124,7 +124,7 @@ ENTRY(cpu_feroceon_do_idle)
124 mov r0, #0 124 mov r0, #0
125 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer 125 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
126 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 126 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
127 mov pc, lr 127 ret lr
128 128
129/* 129/*
130 * flush_icache_all() 130 * flush_icache_all()
@@ -134,7 +134,7 @@ ENTRY(cpu_feroceon_do_idle)
134ENTRY(feroceon_flush_icache_all) 134ENTRY(feroceon_flush_icache_all)
135 mov r0, #0 135 mov r0, #0
136 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 136 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
137 mov pc, lr 137 ret lr
138ENDPROC(feroceon_flush_icache_all) 138ENDPROC(feroceon_flush_icache_all)
139 139
140/* 140/*
@@ -169,7 +169,7 @@ __flush_whole_cache:
169 mov ip, #0 169 mov ip, #0
170 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 170 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
172 mov pc, lr 172 ret lr
173 173
174/* 174/*
175 * flush_user_cache_range(start, end, flags) 175 * flush_user_cache_range(start, end, flags)
@@ -198,7 +198,7 @@ ENTRY(feroceon_flush_user_cache_range)
198 tst r2, #VM_EXEC 198 tst r2, #VM_EXEC
199 mov ip, #0 199 mov ip, #0
200 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 200 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
201 mov pc, lr 201 ret lr
202 202
203/* 203/*
204 * coherent_kern_range(start, end) 204 * coherent_kern_range(start, end)
@@ -233,7 +233,7 @@ ENTRY(feroceon_coherent_user_range)
233 blo 1b 233 blo 1b
234 mcr p15, 0, r0, c7, c10, 4 @ drain WB 234 mcr p15, 0, r0, c7, c10, 4 @ drain WB
235 mov r0, #0 235 mov r0, #0
236 mov pc, lr 236 ret lr
237 237
238/* 238/*
239 * flush_kern_dcache_area(void *addr, size_t size) 239 * flush_kern_dcache_area(void *addr, size_t size)
@@ -254,7 +254,7 @@ ENTRY(feroceon_flush_kern_dcache_area)
254 mov r0, #0 254 mov r0, #0
255 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 255 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
256 mcr p15, 0, r0, c7, c10, 4 @ drain WB 256 mcr p15, 0, r0, c7, c10, 4 @ drain WB
257 mov pc, lr 257 ret lr
258 258
259 .align 5 259 .align 5
260ENTRY(feroceon_range_flush_kern_dcache_area) 260ENTRY(feroceon_range_flush_kern_dcache_area)
@@ -268,7 +268,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area)
268 mov r0, #0 268 mov r0, #0
269 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 269 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
270 mcr p15, 0, r0, c7, c10, 4 @ drain WB 270 mcr p15, 0, r0, c7, c10, 4 @ drain WB
271 mov pc, lr 271 ret lr
272 272
273/* 273/*
274 * dma_inv_range(start, end) 274 * dma_inv_range(start, end)
@@ -295,7 +295,7 @@ feroceon_dma_inv_range:
295 cmp r0, r1 295 cmp r0, r1
296 blo 1b 296 blo 1b
297 mcr p15, 0, r0, c7, c10, 4 @ drain WB 297 mcr p15, 0, r0, c7, c10, 4 @ drain WB
298 mov pc, lr 298 ret lr
299 299
300 .align 5 300 .align 5
301feroceon_range_dma_inv_range: 301feroceon_range_dma_inv_range:
@@ -311,7 +311,7 @@ feroceon_range_dma_inv_range:
311 mcr p15, 5, r0, c15, c14, 0 @ D inv range start 311 mcr p15, 5, r0, c15, c14, 0 @ D inv range start
312 mcr p15, 5, r1, c15, c14, 1 @ D inv range top 312 mcr p15, 5, r1, c15, c14, 1 @ D inv range top
313 msr cpsr_c, r2 @ restore interrupts 313 msr cpsr_c, r2 @ restore interrupts
314 mov pc, lr 314 ret lr
315 315
316/* 316/*
317 * dma_clean_range(start, end) 317 * dma_clean_range(start, end)
@@ -331,7 +331,7 @@ feroceon_dma_clean_range:
331 cmp r0, r1 331 cmp r0, r1
332 blo 1b 332 blo 1b
333 mcr p15, 0, r0, c7, c10, 4 @ drain WB 333 mcr p15, 0, r0, c7, c10, 4 @ drain WB
334 mov pc, lr 334 ret lr
335 335
336 .align 5 336 .align 5
337feroceon_range_dma_clean_range: 337feroceon_range_dma_clean_range:
@@ -344,7 +344,7 @@ feroceon_range_dma_clean_range:
344 mcr p15, 5, r1, c15, c13, 1 @ D clean range top 344 mcr p15, 5, r1, c15, c13, 1 @ D clean range top
345 msr cpsr_c, r2 @ restore interrupts 345 msr cpsr_c, r2 @ restore interrupts
346 mcr p15, 0, r0, c7, c10, 4 @ drain WB 346 mcr p15, 0, r0, c7, c10, 4 @ drain WB
347 mov pc, lr 347 ret lr
348 348
349/* 349/*
350 * dma_flush_range(start, end) 350 * dma_flush_range(start, end)
@@ -362,7 +362,7 @@ ENTRY(feroceon_dma_flush_range)
362 cmp r0, r1 362 cmp r0, r1
363 blo 1b 363 blo 1b
364 mcr p15, 0, r0, c7, c10, 4 @ drain WB 364 mcr p15, 0, r0, c7, c10, 4 @ drain WB
365 mov pc, lr 365 ret lr
366 366
367 .align 5 367 .align 5
368ENTRY(feroceon_range_dma_flush_range) 368ENTRY(feroceon_range_dma_flush_range)
@@ -375,7 +375,7 @@ ENTRY(feroceon_range_dma_flush_range)
375 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top 375 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
376 msr cpsr_c, r2 @ restore interrupts 376 msr cpsr_c, r2 @ restore interrupts
377 mcr p15, 0, r0, c7, c10, 4 @ drain WB 377 mcr p15, 0, r0, c7, c10, 4 @ drain WB
378 mov pc, lr 378 ret lr
379 379
380/* 380/*
381 * dma_map_area(start, size, dir) 381 * dma_map_area(start, size, dir)
@@ -412,7 +412,7 @@ ENDPROC(feroceon_range_dma_map_area)
412 * - dir - DMA direction 412 * - dir - DMA direction
413 */ 413 */
414ENTRY(feroceon_dma_unmap_area) 414ENTRY(feroceon_dma_unmap_area)
415 mov pc, lr 415 ret lr
416ENDPROC(feroceon_dma_unmap_area) 416ENDPROC(feroceon_dma_unmap_area)
417 417
418 .globl feroceon_flush_kern_cache_louis 418 .globl feroceon_flush_kern_cache_louis
@@ -461,7 +461,7 @@ ENTRY(cpu_feroceon_dcache_clean_area)
461 bhi 1b 461 bhi 1b
462#endif 462#endif
463 mcr p15, 0, r0, c7, c10, 4 @ drain WB 463 mcr p15, 0, r0, c7, c10, 4 @ drain WB
464 mov pc, lr 464 ret lr
465 465
466/* =============================== PageTable ============================== */ 466/* =============================== PageTable ============================== */
467 467
@@ -490,9 +490,9 @@ ENTRY(cpu_feroceon_switch_mm)
490 490
491 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 491 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
492 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 492 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
493 mov pc, r2 493 ret r2
494#else 494#else
495 mov pc, lr 495 ret lr
496#endif 496#endif
497 497
498/* 498/*
@@ -512,7 +512,7 @@ ENTRY(cpu_feroceon_set_pte_ext)
512#endif 512#endif
513 mcr p15, 0, r0, c7, c10, 4 @ drain WB 513 mcr p15, 0, r0, c7, c10, 4 @ drain WB
514#endif 514#endif
515 mov pc, lr 515 ret lr
516 516
517/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */ 517/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
518.globl cpu_feroceon_suspend_size 518.globl cpu_feroceon_suspend_size
@@ -554,7 +554,7 @@ __feroceon_setup:
554 mrc p15, 0, r0, c1, c0 @ get control register v4 554 mrc p15, 0, r0, c1, c0 @ get control register v4
555 bic r0, r0, r5 555 bic r0, r0, r5
556 orr r0, r0, r6 556 orr r0, r0, r6
557 mov pc, lr 557 ret lr
558 .size __feroceon_setup, . - __feroceon_setup 558 .size __feroceon_setup, . - __feroceon_setup
559 559
560 /* 560 /*
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 40acba595731..53d393455f13 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -45,7 +45,7 @@
45 * cpu_mohawk_proc_init() 45 * cpu_mohawk_proc_init()
46 */ 46 */
47ENTRY(cpu_mohawk_proc_init) 47ENTRY(cpu_mohawk_proc_init)
48 mov pc, lr 48 ret lr
49 49
50/* 50/*
51 * cpu_mohawk_proc_fin() 51 * cpu_mohawk_proc_fin()
@@ -55,7 +55,7 @@ ENTRY(cpu_mohawk_proc_fin)
55 bic r0, r0, #0x1800 @ ...iz........... 55 bic r0, r0, #0x1800 @ ...iz...........
56 bic r0, r0, #0x0006 @ .............ca. 56 bic r0, r0, #0x0006 @ .............ca.
57 mcr p15, 0, r0, c1, c0, 0 @ disable caches 57 mcr p15, 0, r0, c1, c0, 0 @ disable caches
58 mov pc, lr 58 ret lr
59 59
60/* 60/*
61 * cpu_mohawk_reset(loc) 61 * cpu_mohawk_reset(loc)
@@ -79,7 +79,7 @@ ENTRY(cpu_mohawk_reset)
79 bic ip, ip, #0x0007 @ .............cam 79 bic ip, ip, #0x0007 @ .............cam
80 bic ip, ip, #0x1100 @ ...i...s........ 80 bic ip, ip, #0x1100 @ ...i...s........
81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
82 mov pc, r0 82 ret r0
83ENDPROC(cpu_mohawk_reset) 83ENDPROC(cpu_mohawk_reset)
84 .popsection 84 .popsection
85 85
@@ -93,7 +93,7 @@ ENTRY(cpu_mohawk_do_idle)
93 mov r0, #0 93 mov r0, #0
94 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 94 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
95 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt 95 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
96 mov pc, lr 96 ret lr
97 97
98/* 98/*
99 * flush_icache_all() 99 * flush_icache_all()
@@ -103,7 +103,7 @@ ENTRY(cpu_mohawk_do_idle)
103ENTRY(mohawk_flush_icache_all) 103ENTRY(mohawk_flush_icache_all)
104 mov r0, #0 104 mov r0, #0
105 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 105 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
106 mov pc, lr 106 ret lr
107ENDPROC(mohawk_flush_icache_all) 107ENDPROC(mohawk_flush_icache_all)
108 108
109/* 109/*
@@ -128,7 +128,7 @@ __flush_whole_cache:
128 tst r2, #VM_EXEC 128 tst r2, #VM_EXEC
129 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 129 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
130 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer 130 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer
131 mov pc, lr 131 ret lr
132 132
133/* 133/*
134 * flush_user_cache_range(start, end, flags) 134 * flush_user_cache_range(start, end, flags)
@@ -158,7 +158,7 @@ ENTRY(mohawk_flush_user_cache_range)
158 blo 1b 158 blo 1b
159 tst r2, #VM_EXEC 159 tst r2, #VM_EXEC
160 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 160 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
161 mov pc, lr 161 ret lr
162 162
163/* 163/*
164 * coherent_kern_range(start, end) 164 * coherent_kern_range(start, end)
@@ -194,7 +194,7 @@ ENTRY(mohawk_coherent_user_range)
194 blo 1b 194 blo 1b
195 mcr p15, 0, r0, c7, c10, 4 @ drain WB 195 mcr p15, 0, r0, c7, c10, 4 @ drain WB
196 mov r0, #0 196 mov r0, #0
197 mov pc, lr 197 ret lr
198 198
199/* 199/*
200 * flush_kern_dcache_area(void *addr, size_t size) 200 * flush_kern_dcache_area(void *addr, size_t size)
@@ -214,7 +214,7 @@ ENTRY(mohawk_flush_kern_dcache_area)
214 mov r0, #0 214 mov r0, #0
215 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 215 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
216 mcr p15, 0, r0, c7, c10, 4 @ drain WB 216 mcr p15, 0, r0, c7, c10, 4 @ drain WB
217 mov pc, lr 217 ret lr
218 218
219/* 219/*
220 * dma_inv_range(start, end) 220 * dma_inv_range(start, end)
@@ -240,7 +240,7 @@ mohawk_dma_inv_range:
240 cmp r0, r1 240 cmp r0, r1
241 blo 1b 241 blo 1b
242 mcr p15, 0, r0, c7, c10, 4 @ drain WB 242 mcr p15, 0, r0, c7, c10, 4 @ drain WB
243 mov pc, lr 243 ret lr
244 244
245/* 245/*
246 * dma_clean_range(start, end) 246 * dma_clean_range(start, end)
@@ -259,7 +259,7 @@ mohawk_dma_clean_range:
259 cmp r0, r1 259 cmp r0, r1
260 blo 1b 260 blo 1b
261 mcr p15, 0, r0, c7, c10, 4 @ drain WB 261 mcr p15, 0, r0, c7, c10, 4 @ drain WB
262 mov pc, lr 262 ret lr
263 263
264/* 264/*
265 * dma_flush_range(start, end) 265 * dma_flush_range(start, end)
@@ -277,7 +277,7 @@ ENTRY(mohawk_dma_flush_range)
277 cmp r0, r1 277 cmp r0, r1
278 blo 1b 278 blo 1b
279 mcr p15, 0, r0, c7, c10, 4 @ drain WB 279 mcr p15, 0, r0, c7, c10, 4 @ drain WB
280 mov pc, lr 280 ret lr
281 281
282/* 282/*
283 * dma_map_area(start, size, dir) 283 * dma_map_area(start, size, dir)
@@ -300,7 +300,7 @@ ENDPROC(mohawk_dma_map_area)
300 * - dir - DMA direction 300 * - dir - DMA direction
301 */ 301 */
302ENTRY(mohawk_dma_unmap_area) 302ENTRY(mohawk_dma_unmap_area)
303 mov pc, lr 303 ret lr
304ENDPROC(mohawk_dma_unmap_area) 304ENDPROC(mohawk_dma_unmap_area)
305 305
306 .globl mohawk_flush_kern_cache_louis 306 .globl mohawk_flush_kern_cache_louis
@@ -315,7 +315,7 @@ ENTRY(cpu_mohawk_dcache_clean_area)
315 subs r1, r1, #CACHE_DLINESIZE 315 subs r1, r1, #CACHE_DLINESIZE
316 bhi 1b 316 bhi 1b
317 mcr p15, 0, r0, c7, c10, 4 @ drain WB 317 mcr p15, 0, r0, c7, c10, 4 @ drain WB
318 mov pc, lr 318 ret lr
319 319
320/* 320/*
321 * cpu_mohawk_switch_mm(pgd) 321 * cpu_mohawk_switch_mm(pgd)
@@ -333,7 +333,7 @@ ENTRY(cpu_mohawk_switch_mm)
333 orr r0, r0, #0x18 @ cache the page table in L2 333 orr r0, r0, #0x18 @ cache the page table in L2
334 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 334 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
335 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 335 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
336 mov pc, lr 336 ret lr
337 337
338/* 338/*
339 * cpu_mohawk_set_pte_ext(ptep, pte, ext) 339 * cpu_mohawk_set_pte_ext(ptep, pte, ext)
@@ -346,7 +346,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
346 mov r0, r0 346 mov r0, r0
347 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 347 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
348 mcr p15, 0, r0, c7, c10, 4 @ drain WB 348 mcr p15, 0, r0, c7, c10, 4 @ drain WB
349 mov pc, lr 349 ret lr
350 350
351.globl cpu_mohawk_suspend_size 351.globl cpu_mohawk_suspend_size
352.equ cpu_mohawk_suspend_size, 4 * 6 352.equ cpu_mohawk_suspend_size, 4 * 6
@@ -400,7 +400,7 @@ __mohawk_setup:
400 mrc p15, 0, r0, c1, c0 @ get control register 400 mrc p15, 0, r0, c1, c0 @ get control register
401 bic r0, r0, r5 401 bic r0, r0, r5
402 orr r0, r0, r6 402 orr r0, r0, r6
403 mov pc, lr 403 ret lr
404 404
405 .size __mohawk_setup, . - __mohawk_setup 405 .size __mohawk_setup, . - __mohawk_setup
406 406
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index c45319c8f1d9..8008a0461cf5 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -38,7 +38,7 @@
38ENTRY(cpu_sa110_proc_init) 38ENTRY(cpu_sa110_proc_init)
39 mov r0, #0 39 mov r0, #0
40 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 40 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
41 mov pc, lr 41 ret lr
42 42
43/* 43/*
44 * cpu_sa110_proc_fin() 44 * cpu_sa110_proc_fin()
@@ -50,7 +50,7 @@ ENTRY(cpu_sa110_proc_fin)
50 bic r0, r0, #0x1000 @ ...i............ 50 bic r0, r0, #0x1000 @ ...i............
51 bic r0, r0, #0x000e @ ............wca. 51 bic r0, r0, #0x000e @ ............wca.
52 mcr p15, 0, r0, c1, c0, 0 @ disable caches 52 mcr p15, 0, r0, c1, c0, 0 @ disable caches
53 mov pc, lr 53 ret lr
54 54
55/* 55/*
56 * cpu_sa110_reset(loc) 56 * cpu_sa110_reset(loc)
@@ -74,7 +74,7 @@ ENTRY(cpu_sa110_reset)
74 bic ip, ip, #0x000f @ ............wcam 74 bic ip, ip, #0x000f @ ............wcam
75 bic ip, ip, #0x1100 @ ...i...s........ 75 bic ip, ip, #0x1100 @ ...i...s........
76 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 76 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
77 mov pc, r0 77 ret r0
78ENDPROC(cpu_sa110_reset) 78ENDPROC(cpu_sa110_reset)
79 .popsection 79 .popsection
80 80
@@ -103,7 +103,7 @@ ENTRY(cpu_sa110_do_idle)
103 mov r0, r0 @ safety 103 mov r0, r0 @ safety
104 mov r0, r0 @ safety 104 mov r0, r0 @ safety
105 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 105 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
106 mov pc, lr 106 ret lr
107 107
108/* ================================= CACHE ================================ */ 108/* ================================= CACHE ================================ */
109 109
@@ -121,7 +121,7 @@ ENTRY(cpu_sa110_dcache_clean_area)
121 add r0, r0, #DCACHELINESIZE 121 add r0, r0, #DCACHELINESIZE
122 subs r1, r1, #DCACHELINESIZE 122 subs r1, r1, #DCACHELINESIZE
123 bhi 1b 123 bhi 1b
124 mov pc, lr 124 ret lr
125 125
126/* =============================== PageTable ============================== */ 126/* =============================== PageTable ============================== */
127 127
@@ -141,7 +141,7 @@ ENTRY(cpu_sa110_switch_mm)
141 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 141 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
142 ldr pc, [sp], #4 142 ldr pc, [sp], #4
143#else 143#else
144 mov pc, lr 144 ret lr
145#endif 145#endif
146 146
147/* 147/*
@@ -157,7 +157,7 @@ ENTRY(cpu_sa110_set_pte_ext)
157 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 157 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
158 mcr p15, 0, r0, c7, c10, 4 @ drain WB 158 mcr p15, 0, r0, c7, c10, 4 @ drain WB
159#endif 159#endif
160 mov pc, lr 160 ret lr
161 161
162 .type __sa110_setup, #function 162 .type __sa110_setup, #function
163__sa110_setup: 163__sa110_setup:
@@ -173,7 +173,7 @@ __sa110_setup:
173 mrc p15, 0, r0, c1, c0 @ get control register v4 173 mrc p15, 0, r0, c1, c0 @ get control register v4
174 bic r0, r0, r5 174 bic r0, r0, r5
175 orr r0, r0, r6 175 orr r0, r0, r6
176 mov pc, lr 176 ret lr
177 .size __sa110_setup, . - __sa110_setup 177 .size __sa110_setup, . - __sa110_setup
178 178
179 /* 179 /*
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 09d241ae2dbe..89f97ac648a9 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -43,7 +43,7 @@ ENTRY(cpu_sa1100_proc_init)
43 mov r0, #0 43 mov r0, #0
44 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 44 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland 45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
46 mov pc, lr 46 ret lr
47 47
48/* 48/*
49 * cpu_sa1100_proc_fin() 49 * cpu_sa1100_proc_fin()
@@ -58,7 +58,7 @@ ENTRY(cpu_sa1100_proc_fin)
58 bic r0, r0, #0x1000 @ ...i............ 58 bic r0, r0, #0x1000 @ ...i............
59 bic r0, r0, #0x000e @ ............wca. 59 bic r0, r0, #0x000e @ ............wca.
60 mcr p15, 0, r0, c1, c0, 0 @ disable caches 60 mcr p15, 0, r0, c1, c0, 0 @ disable caches
61 mov pc, lr 61 ret lr
62 62
63/* 63/*
64 * cpu_sa1100_reset(loc) 64 * cpu_sa1100_reset(loc)
@@ -82,7 +82,7 @@ ENTRY(cpu_sa1100_reset)
82 bic ip, ip, #0x000f @ ............wcam 82 bic ip, ip, #0x000f @ ............wcam
83 bic ip, ip, #0x1100 @ ...i...s........ 83 bic ip, ip, #0x1100 @ ...i...s........
84 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 84 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
85 mov pc, r0 85 ret r0
86ENDPROC(cpu_sa1100_reset) 86ENDPROC(cpu_sa1100_reset)
87 .popsection 87 .popsection
88 88
@@ -113,7 +113,7 @@ ENTRY(cpu_sa1100_do_idle)
113 mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt 113 mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt
114 mov r0, r0 @ safety 114 mov r0, r0 @ safety
115 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 115 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
116 mov pc, lr 116 ret lr
117 117
118/* ================================= CACHE ================================ */ 118/* ================================= CACHE ================================ */
119 119
@@ -131,7 +131,7 @@ ENTRY(cpu_sa1100_dcache_clean_area)
131 add r0, r0, #DCACHELINESIZE 131 add r0, r0, #DCACHELINESIZE
132 subs r1, r1, #DCACHELINESIZE 132 subs r1, r1, #DCACHELINESIZE
133 bhi 1b 133 bhi 1b
134 mov pc, lr 134 ret lr
135 135
136/* =============================== PageTable ============================== */ 136/* =============================== PageTable ============================== */
137 137
@@ -152,7 +152,7 @@ ENTRY(cpu_sa1100_switch_mm)
152 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 152 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
153 ldr pc, [sp], #4 153 ldr pc, [sp], #4
154#else 154#else
155 mov pc, lr 155 ret lr
156#endif 156#endif
157 157
158/* 158/*
@@ -168,7 +168,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
168 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 168 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
169 mcr p15, 0, r0, c7, c10, 4 @ drain WB 169 mcr p15, 0, r0, c7, c10, 4 @ drain WB
170#endif 170#endif
171 mov pc, lr 171 ret lr
172 172
173.globl cpu_sa1100_suspend_size 173.globl cpu_sa1100_suspend_size
174.equ cpu_sa1100_suspend_size, 4 * 3 174.equ cpu_sa1100_suspend_size, 4 * 3
@@ -211,7 +211,7 @@ __sa1100_setup:
211 mrc p15, 0, r0, c1, c0 @ get control register v4 211 mrc p15, 0, r0, c1, c0 @ get control register v4
212 bic r0, r0, r5 212 bic r0, r0, r5
213 orr r0, r0, r6 213 orr r0, r0, r6
214 mov pc, lr 214 ret lr
215 .size __sa1100_setup, . - __sa1100_setup 215 .size __sa1100_setup, . - __sa1100_setup
216 216
217 /* 217 /*
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 32b3558321c4..d0390f4b3f18 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -36,14 +36,14 @@
36#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S 36#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
37 37
38ENTRY(cpu_v6_proc_init) 38ENTRY(cpu_v6_proc_init)
39 mov pc, lr 39 ret lr
40 40
41ENTRY(cpu_v6_proc_fin) 41ENTRY(cpu_v6_proc_fin)
42 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 42 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
43 bic r0, r0, #0x1000 @ ...i............ 43 bic r0, r0, #0x1000 @ ...i............
44 bic r0, r0, #0x0006 @ .............ca. 44 bic r0, r0, #0x0006 @ .............ca.
45 mcr p15, 0, r0, c1, c0, 0 @ disable caches 45 mcr p15, 0, r0, c1, c0, 0 @ disable caches
46 mov pc, lr 46 ret lr
47 47
48/* 48/*
49 * cpu_v6_reset(loc) 49 * cpu_v6_reset(loc)
@@ -62,7 +62,7 @@ ENTRY(cpu_v6_reset)
62 mcr p15, 0, r1, c1, c0, 0 @ disable MMU 62 mcr p15, 0, r1, c1, c0, 0 @ disable MMU
63 mov r1, #0 63 mov r1, #0
64 mcr p15, 0, r1, c7, c5, 4 @ ISB 64 mcr p15, 0, r1, c7, c5, 4 @ ISB
65 mov pc, r0 65 ret r0
66ENDPROC(cpu_v6_reset) 66ENDPROC(cpu_v6_reset)
67 .popsection 67 .popsection
68 68
@@ -77,14 +77,14 @@ ENTRY(cpu_v6_do_idle)
77 mov r1, #0 77 mov r1, #0
78 mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode 78 mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode
79 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt 79 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt
80 mov pc, lr 80 ret lr
81 81
82ENTRY(cpu_v6_dcache_clean_area) 82ENTRY(cpu_v6_dcache_clean_area)
831: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 831: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
84 add r0, r0, #D_CACHE_LINE_SIZE 84 add r0, r0, #D_CACHE_LINE_SIZE
85 subs r1, r1, #D_CACHE_LINE_SIZE 85 subs r1, r1, #D_CACHE_LINE_SIZE
86 bhi 1b 86 bhi 1b
87 mov pc, lr 87 ret lr
88 88
89/* 89/*
90 * cpu_v6_switch_mm(pgd_phys, tsk) 90 * cpu_v6_switch_mm(pgd_phys, tsk)
@@ -113,7 +113,7 @@ ENTRY(cpu_v6_switch_mm)
113#endif 113#endif
114 mcr p15, 0, r1, c13, c0, 1 @ set context ID 114 mcr p15, 0, r1, c13, c0, 1 @ set context ID
115#endif 115#endif
116 mov pc, lr 116 ret lr
117 117
118/* 118/*
119 * cpu_v6_set_pte_ext(ptep, pte, ext) 119 * cpu_v6_set_pte_ext(ptep, pte, ext)
@@ -131,7 +131,7 @@ ENTRY(cpu_v6_set_pte_ext)
131#ifdef CONFIG_MMU 131#ifdef CONFIG_MMU
132 armv6_set_pte_ext cpu_v6 132 armv6_set_pte_ext cpu_v6
133#endif 133#endif
134 mov pc, lr 134 ret lr
135 135
136/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ 136/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
137.globl cpu_v6_suspend_size 137.globl cpu_v6_suspend_size
@@ -241,7 +241,7 @@ __v6_setup:
241 mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg 241 mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg
242 orreq r0, r0, #(1 << 21) @ low interrupt latency configuration 242 orreq r0, r0, #(1 << 21) @ low interrupt latency configuration
243#endif 243#endif
244 mov pc, lr @ return to head.S:__ret 244 ret lr @ return to head.S:__ret
245 245
246 /* 246 /*
247 * V X F I D LR 247 * V X F I D LR
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 1f52915f2b28..ed448d8a596b 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -59,7 +59,7 @@ ENTRY(cpu_v7_switch_mm)
59 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 59 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
60 isb 60 isb
61#endif 61#endif
62 mov pc, lr 62 bx lr
63ENDPROC(cpu_v7_switch_mm) 63ENDPROC(cpu_v7_switch_mm)
64 64
65/* 65/*
@@ -106,7 +106,7 @@ ENTRY(cpu_v7_set_pte_ext)
106 ALT_SMP(W(nop)) 106 ALT_SMP(W(nop))
107 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 107 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
108#endif 108#endif
109 mov pc, lr 109 bx lr
110ENDPROC(cpu_v7_set_pte_ext) 110ENDPROC(cpu_v7_set_pte_ext)
111 111
112 /* 112 /*
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 22e3ad63500c..564f4b934ceb 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -19,6 +19,7 @@
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 21 */
22#include <asm/assembler.h>
22 23
23#define TTB_IRGN_NC (0 << 8) 24#define TTB_IRGN_NC (0 << 8)
24#define TTB_IRGN_WBWA (1 << 8) 25#define TTB_IRGN_WBWA (1 << 8)
@@ -61,7 +62,7 @@ ENTRY(cpu_v7_switch_mm)
61 mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0 62 mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0
62 isb 63 isb
63#endif 64#endif
64 mov pc, lr 65 ret lr
65ENDPROC(cpu_v7_switch_mm) 66ENDPROC(cpu_v7_switch_mm)
66 67
67#ifdef __ARMEB__ 68#ifdef __ARMEB__
@@ -92,7 +93,7 @@ ENTRY(cpu_v7_set_pte_ext)
92 ALT_SMP(W(nop)) 93 ALT_SMP(W(nop))
93 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 94 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
94#endif 95#endif
95 mov pc, lr 96 ret lr
96ENDPROC(cpu_v7_set_pte_ext) 97ENDPROC(cpu_v7_set_pte_ext)
97 98
98 /* 99 /*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3db2c2f04a30..71abb60c4222 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -26,7 +26,7 @@
26#endif 26#endif
27 27
28ENTRY(cpu_v7_proc_init) 28ENTRY(cpu_v7_proc_init)
29 mov pc, lr 29 ret lr
30ENDPROC(cpu_v7_proc_init) 30ENDPROC(cpu_v7_proc_init)
31 31
32ENTRY(cpu_v7_proc_fin) 32ENTRY(cpu_v7_proc_fin)
@@ -34,7 +34,7 @@ ENTRY(cpu_v7_proc_fin)
34 bic r0, r0, #0x1000 @ ...i............ 34 bic r0, r0, #0x1000 @ ...i............
35 bic r0, r0, #0x0006 @ .............ca. 35 bic r0, r0, #0x0006 @ .............ca.
36 mcr p15, 0, r0, c1, c0, 0 @ disable caches 36 mcr p15, 0, r0, c1, c0, 0 @ disable caches
37 mov pc, lr 37 ret lr
38ENDPROC(cpu_v7_proc_fin) 38ENDPROC(cpu_v7_proc_fin)
39 39
40/* 40/*
@@ -71,20 +71,20 @@ ENDPROC(cpu_v7_reset)
71ENTRY(cpu_v7_do_idle) 71ENTRY(cpu_v7_do_idle)
72 dsb @ WFI may enter a low-power mode 72 dsb @ WFI may enter a low-power mode
73 wfi 73 wfi
74 mov pc, lr 74 ret lr
75ENDPROC(cpu_v7_do_idle) 75ENDPROC(cpu_v7_do_idle)
76 76
77ENTRY(cpu_v7_dcache_clean_area) 77ENTRY(cpu_v7_dcache_clean_area)
78 ALT_SMP(W(nop)) @ MP extensions imply L1 PTW 78 ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
79 ALT_UP_B(1f) 79 ALT_UP_B(1f)
80 mov pc, lr 80 ret lr
811: dcache_line_size r2, r3 811: dcache_line_size r2, r3
822: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 822: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
83 add r0, r0, r2 83 add r0, r0, r2
84 subs r1, r1, r2 84 subs r1, r1, r2
85 bhi 2b 85 bhi 2b
86 dsb ishst 86 dsb ishst
87 mov pc, lr 87 ret lr
88ENDPROC(cpu_v7_dcache_clean_area) 88ENDPROC(cpu_v7_dcache_clean_area)
89 89
90 string cpu_v7_name, "ARMv7 Processor" 90 string cpu_v7_name, "ARMv7 Processor"
@@ -163,7 +163,7 @@ ENTRY(cpu_pj4b_do_idle)
163 dsb @ WFI may enter a low-power mode 163 dsb @ WFI may enter a low-power mode
164 wfi 164 wfi
165 dsb @barrier 165 dsb @barrier
166 mov pc, lr 166 ret lr
167ENDPROC(cpu_pj4b_do_idle) 167ENDPROC(cpu_pj4b_do_idle)
168#else 168#else
169 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle 169 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle
@@ -407,7 +407,7 @@ __v7_setup:
407 bic r0, r0, r5 @ clear bits them 407 bic r0, r0, r5 @ clear bits them
408 orr r0, r0, r6 @ set them 408 orr r0, r0, r6 @ set them
409 THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions 409 THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
410 mov pc, lr @ return to head.S:__ret 410 ret lr @ return to head.S:__ret
411ENDPROC(__v7_setup) 411ENDPROC(__v7_setup)
412 412
413 .align 2 413 .align 2
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 1ca37c72f12f..d1e68b553d3b 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -16,11 +16,11 @@
16#include "proc-macros.S" 16#include "proc-macros.S"
17 17
18ENTRY(cpu_v7m_proc_init) 18ENTRY(cpu_v7m_proc_init)
19 mov pc, lr 19 ret lr
20ENDPROC(cpu_v7m_proc_init) 20ENDPROC(cpu_v7m_proc_init)
21 21
22ENTRY(cpu_v7m_proc_fin) 22ENTRY(cpu_v7m_proc_fin)
23 mov pc, lr 23 ret lr
24ENDPROC(cpu_v7m_proc_fin) 24ENDPROC(cpu_v7m_proc_fin)
25 25
26/* 26/*
@@ -34,7 +34,7 @@ ENDPROC(cpu_v7m_proc_fin)
34 */ 34 */
35 .align 5 35 .align 5
36ENTRY(cpu_v7m_reset) 36ENTRY(cpu_v7m_reset)
37 mov pc, r0 37 ret r0
38ENDPROC(cpu_v7m_reset) 38ENDPROC(cpu_v7m_reset)
39 39
40/* 40/*
@@ -46,18 +46,18 @@ ENDPROC(cpu_v7m_reset)
46 */ 46 */
47ENTRY(cpu_v7m_do_idle) 47ENTRY(cpu_v7m_do_idle)
48 wfi 48 wfi
49 mov pc, lr 49 ret lr
50ENDPROC(cpu_v7m_do_idle) 50ENDPROC(cpu_v7m_do_idle)
51 51
52ENTRY(cpu_v7m_dcache_clean_area) 52ENTRY(cpu_v7m_dcache_clean_area)
53 mov pc, lr 53 ret lr
54ENDPROC(cpu_v7m_dcache_clean_area) 54ENDPROC(cpu_v7m_dcache_clean_area)
55 55
56/* 56/*
57 * There is no MMU, so here is nothing to do. 57 * There is no MMU, so here is nothing to do.
58 */ 58 */
59ENTRY(cpu_v7m_switch_mm) 59ENTRY(cpu_v7m_switch_mm)
60 mov pc, lr 60 ret lr
61ENDPROC(cpu_v7m_switch_mm) 61ENDPROC(cpu_v7m_switch_mm)
62 62
63.globl cpu_v7m_suspend_size 63.globl cpu_v7m_suspend_size
@@ -65,11 +65,11 @@ ENDPROC(cpu_v7m_switch_mm)
65 65
66#ifdef CONFIG_ARM_CPU_SUSPEND 66#ifdef CONFIG_ARM_CPU_SUSPEND
67ENTRY(cpu_v7m_do_suspend) 67ENTRY(cpu_v7m_do_suspend)
68 mov pc, lr 68 ret lr
69ENDPROC(cpu_v7m_do_suspend) 69ENDPROC(cpu_v7m_do_suspend)
70 70
71ENTRY(cpu_v7m_do_resume) 71ENTRY(cpu_v7m_do_resume)
72 mov pc, lr 72 ret lr
73ENDPROC(cpu_v7m_do_resume) 73ENDPROC(cpu_v7m_do_resume)
74#endif 74#endif
75 75
@@ -120,7 +120,7 @@ __v7m_setup:
120 ldr r12, [r0, V7M_SCB_CCR] @ system control register 120 ldr r12, [r0, V7M_SCB_CCR] @ system control register
121 orr r12, #V7M_SCB_CCR_STKALIGN 121 orr r12, #V7M_SCB_CCR_STKALIGN
122 str r12, [r0, V7M_SCB_CCR] 122 str r12, [r0, V7M_SCB_CCR]
123 mov pc, lr 123 ret lr
124ENDPROC(__v7m_setup) 124ENDPROC(__v7m_setup)
125 125
126 .align 2 126 .align 2
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index dc1645890042..f8acdfece036 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -83,7 +83,7 @@
83 * Nothing too exciting at the moment 83 * Nothing too exciting at the moment
84 */ 84 */
85ENTRY(cpu_xsc3_proc_init) 85ENTRY(cpu_xsc3_proc_init)
86 mov pc, lr 86 ret lr
87 87
88/* 88/*
89 * cpu_xsc3_proc_fin() 89 * cpu_xsc3_proc_fin()
@@ -93,7 +93,7 @@ ENTRY(cpu_xsc3_proc_fin)
93 bic r0, r0, #0x1800 @ ...IZ........... 93 bic r0, r0, #0x1800 @ ...IZ...........
94 bic r0, r0, #0x0006 @ .............CA. 94 bic r0, r0, #0x0006 @ .............CA.
95 mcr p15, 0, r0, c1, c0, 0 @ disable caches 95 mcr p15, 0, r0, c1, c0, 0 @ disable caches
96 mov pc, lr 96 ret lr
97 97
98/* 98/*
99 * cpu_xsc3_reset(loc) 99 * cpu_xsc3_reset(loc)
@@ -119,7 +119,7 @@ ENTRY(cpu_xsc3_reset)
119 @ CAUTION: MMU turned off from this point. We count on the pipeline 119 @ CAUTION: MMU turned off from this point. We count on the pipeline
120 @ already containing those two last instructions to survive. 120 @ already containing those two last instructions to survive.
121 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 121 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
122 mov pc, r0 122 ret r0
123ENDPROC(cpu_xsc3_reset) 123ENDPROC(cpu_xsc3_reset)
124 .popsection 124 .popsection
125 125
@@ -138,7 +138,7 @@ ENDPROC(cpu_xsc3_reset)
138ENTRY(cpu_xsc3_do_idle) 138ENTRY(cpu_xsc3_do_idle)
139 mov r0, #1 139 mov r0, #1
140 mcr p14, 0, r0, c7, c0, 0 @ go to idle 140 mcr p14, 0, r0, c7, c0, 0 @ go to idle
141 mov pc, lr 141 ret lr
142 142
143/* ================================= CACHE ================================ */ 143/* ================================= CACHE ================================ */
144 144
@@ -150,7 +150,7 @@ ENTRY(cpu_xsc3_do_idle)
150ENTRY(xsc3_flush_icache_all) 150ENTRY(xsc3_flush_icache_all)
151 mov r0, #0 151 mov r0, #0
152 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 152 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
153 mov pc, lr 153 ret lr
154ENDPROC(xsc3_flush_icache_all) 154ENDPROC(xsc3_flush_icache_all)
155 155
156/* 156/*
@@ -176,7 +176,7 @@ __flush_whole_cache:
176 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 176 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
177 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 177 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
178 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 178 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
179 mov pc, lr 179 ret lr
180 180
181/* 181/*
182 * flush_user_cache_range(start, end, vm_flags) 182 * flush_user_cache_range(start, end, vm_flags)
@@ -205,7 +205,7 @@ ENTRY(xsc3_flush_user_cache_range)
205 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 205 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
206 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 206 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
207 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 207 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
208 mov pc, lr 208 ret lr
209 209
210/* 210/*
211 * coherent_kern_range(start, end) 211 * coherent_kern_range(start, end)
@@ -232,7 +232,7 @@ ENTRY(xsc3_coherent_user_range)
232 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 232 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
233 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 233 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
234 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 234 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
235 mov pc, lr 235 ret lr
236 236
237/* 237/*
238 * flush_kern_dcache_area(void *addr, size_t size) 238 * flush_kern_dcache_area(void *addr, size_t size)
@@ -253,7 +253,7 @@ ENTRY(xsc3_flush_kern_dcache_area)
253 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 253 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
254 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 254 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
255 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 255 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
256 mov pc, lr 256 ret lr
257 257
258/* 258/*
259 * dma_inv_range(start, end) 259 * dma_inv_range(start, end)
@@ -277,7 +277,7 @@ xsc3_dma_inv_range:
277 cmp r0, r1 277 cmp r0, r1
278 blo 1b 278 blo 1b
279 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 279 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
280 mov pc, lr 280 ret lr
281 281
282/* 282/*
283 * dma_clean_range(start, end) 283 * dma_clean_range(start, end)
@@ -294,7 +294,7 @@ xsc3_dma_clean_range:
294 cmp r0, r1 294 cmp r0, r1
295 blo 1b 295 blo 1b
296 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 296 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
297 mov pc, lr 297 ret lr
298 298
299/* 299/*
300 * dma_flush_range(start, end) 300 * dma_flush_range(start, end)
@@ -311,7 +311,7 @@ ENTRY(xsc3_dma_flush_range)
311 cmp r0, r1 311 cmp r0, r1
312 blo 1b 312 blo 1b
313 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 313 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
314 mov pc, lr 314 ret lr
315 315
316/* 316/*
317 * dma_map_area(start, size, dir) 317 * dma_map_area(start, size, dir)
@@ -334,7 +334,7 @@ ENDPROC(xsc3_dma_map_area)
334 * - dir - DMA direction 334 * - dir - DMA direction
335 */ 335 */
336ENTRY(xsc3_dma_unmap_area) 336ENTRY(xsc3_dma_unmap_area)
337 mov pc, lr 337 ret lr
338ENDPROC(xsc3_dma_unmap_area) 338ENDPROC(xsc3_dma_unmap_area)
339 339
340 .globl xsc3_flush_kern_cache_louis 340 .globl xsc3_flush_kern_cache_louis
@@ -348,7 +348,7 @@ ENTRY(cpu_xsc3_dcache_clean_area)
348 add r0, r0, #CACHELINESIZE 348 add r0, r0, #CACHELINESIZE
349 subs r1, r1, #CACHELINESIZE 349 subs r1, r1, #CACHELINESIZE
350 bhi 1b 350 bhi 1b
351 mov pc, lr 351 ret lr
352 352
353/* =============================== PageTable ============================== */ 353/* =============================== PageTable ============================== */
354 354
@@ -406,7 +406,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
406 orr r2, r2, ip 406 orr r2, r2, ip
407 407
408 xscale_set_pte_ext_epilogue 408 xscale_set_pte_ext_epilogue
409 mov pc, lr 409 ret lr
410 410
411 .ltorg 411 .ltorg
412 .align 412 .align
@@ -478,7 +478,7 @@ __xsc3_setup:
478 bic r0, r0, r5 @ ..V. ..R. .... ..A. 478 bic r0, r0, r5 @ ..V. ..R. .... ..A.
479 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) 479 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu)
480 @ ...I Z..S .... .... (uc) 480 @ ...I Z..S .... .... (uc)
481 mov pc, lr 481 ret lr
482 482
483 .size __xsc3_setup, . - __xsc3_setup 483 .size __xsc3_setup, . - __xsc3_setup
484 484
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index d19b1cfcad91..23259f104c66 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -118,7 +118,7 @@ ENTRY(cpu_xscale_proc_init)
118 mrc p15, 0, r1, c1, c0, 1 118 mrc p15, 0, r1, c1, c0, 1
119 bic r1, r1, #1 119 bic r1, r1, #1
120 mcr p15, 0, r1, c1, c0, 1 120 mcr p15, 0, r1, c1, c0, 1
121 mov pc, lr 121 ret lr
122 122
123/* 123/*
124 * cpu_xscale_proc_fin() 124 * cpu_xscale_proc_fin()
@@ -128,7 +128,7 @@ ENTRY(cpu_xscale_proc_fin)
128 bic r0, r0, #0x1800 @ ...IZ........... 128 bic r0, r0, #0x1800 @ ...IZ...........
129 bic r0, r0, #0x0006 @ .............CA. 129 bic r0, r0, #0x0006 @ .............CA.
130 mcr p15, 0, r0, c1, c0, 0 @ disable caches 130 mcr p15, 0, r0, c1, c0, 0 @ disable caches
131 mov pc, lr 131 ret lr
132 132
133/* 133/*
134 * cpu_xscale_reset(loc) 134 * cpu_xscale_reset(loc)
@@ -160,7 +160,7 @@ ENTRY(cpu_xscale_reset)
160 @ CAUTION: MMU turned off from this point. We count on the pipeline 160 @ CAUTION: MMU turned off from this point. We count on the pipeline
161 @ already containing those two last instructions to survive. 161 @ already containing those two last instructions to survive.
162 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 162 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
163 mov pc, r0 163 ret r0
164ENDPROC(cpu_xscale_reset) 164ENDPROC(cpu_xscale_reset)
165 .popsection 165 .popsection
166 166
@@ -179,7 +179,7 @@ ENDPROC(cpu_xscale_reset)
179ENTRY(cpu_xscale_do_idle) 179ENTRY(cpu_xscale_do_idle)
180 mov r0, #1 180 mov r0, #1
181 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 181 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE
182 mov pc, lr 182 ret lr
183 183
184/* ================================= CACHE ================================ */ 184/* ================================= CACHE ================================ */
185 185
@@ -191,7 +191,7 @@ ENTRY(cpu_xscale_do_idle)
191ENTRY(xscale_flush_icache_all) 191ENTRY(xscale_flush_icache_all)
192 mov r0, #0 192 mov r0, #0
193 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 193 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
194 mov pc, lr 194 ret lr
195ENDPROC(xscale_flush_icache_all) 195ENDPROC(xscale_flush_icache_all)
196 196
197/* 197/*
@@ -216,7 +216,7 @@ __flush_whole_cache:
216 tst r2, #VM_EXEC 216 tst r2, #VM_EXEC
217 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 217 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
218 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 218 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
219 mov pc, lr 219 ret lr
220 220
221/* 221/*
222 * flush_user_cache_range(start, end, vm_flags) 222 * flush_user_cache_range(start, end, vm_flags)
@@ -245,7 +245,7 @@ ENTRY(xscale_flush_user_cache_range)
245 tst r2, #VM_EXEC 245 tst r2, #VM_EXEC
246 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 246 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB
247 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 247 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
248 mov pc, lr 248 ret lr
249 249
250/* 250/*
251 * coherent_kern_range(start, end) 251 * coherent_kern_range(start, end)
@@ -269,7 +269,7 @@ ENTRY(xscale_coherent_kern_range)
269 mov r0, #0 269 mov r0, #0
270 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 270 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
271 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 271 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
272 mov pc, lr 272 ret lr
273 273
274/* 274/*
275 * coherent_user_range(start, end) 275 * coherent_user_range(start, end)
@@ -291,7 +291,7 @@ ENTRY(xscale_coherent_user_range)
291 mov r0, #0 291 mov r0, #0
292 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 292 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
293 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 293 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
294 mov pc, lr 294 ret lr
295 295
296/* 296/*
297 * flush_kern_dcache_area(void *addr, size_t size) 297 * flush_kern_dcache_area(void *addr, size_t size)
@@ -312,7 +312,7 @@ ENTRY(xscale_flush_kern_dcache_area)
312 mov r0, #0 312 mov r0, #0
313 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 313 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
314 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 314 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
315 mov pc, lr 315 ret lr
316 316
317/* 317/*
318 * dma_inv_range(start, end) 318 * dma_inv_range(start, end)
@@ -336,7 +336,7 @@ xscale_dma_inv_range:
336 cmp r0, r1 336 cmp r0, r1
337 blo 1b 337 blo 1b
338 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 338 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
339 mov pc, lr 339 ret lr
340 340
341/* 341/*
342 * dma_clean_range(start, end) 342 * dma_clean_range(start, end)
@@ -353,7 +353,7 @@ xscale_dma_clean_range:
353 cmp r0, r1 353 cmp r0, r1
354 blo 1b 354 blo 1b
355 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 355 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
356 mov pc, lr 356 ret lr
357 357
358/* 358/*
359 * dma_flush_range(start, end) 359 * dma_flush_range(start, end)
@@ -371,7 +371,7 @@ ENTRY(xscale_dma_flush_range)
371 cmp r0, r1 371 cmp r0, r1
372 blo 1b 372 blo 1b
373 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 373 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
374 mov pc, lr 374 ret lr
375 375
376/* 376/*
377 * dma_map_area(start, size, dir) 377 * dma_map_area(start, size, dir)
@@ -407,7 +407,7 @@ ENDPROC(xscale_80200_A0_A1_dma_map_area)
407 * - dir - DMA direction 407 * - dir - DMA direction
408 */ 408 */
409ENTRY(xscale_dma_unmap_area) 409ENTRY(xscale_dma_unmap_area)
410 mov pc, lr 410 ret lr
411ENDPROC(xscale_dma_unmap_area) 411ENDPROC(xscale_dma_unmap_area)
412 412
413 .globl xscale_flush_kern_cache_louis 413 .globl xscale_flush_kern_cache_louis
@@ -458,7 +458,7 @@ ENTRY(cpu_xscale_dcache_clean_area)
458 add r0, r0, #CACHELINESIZE 458 add r0, r0, #CACHELINESIZE
459 subs r1, r1, #CACHELINESIZE 459 subs r1, r1, #CACHELINESIZE
460 bhi 1b 460 bhi 1b
461 mov pc, lr 461 ret lr
462 462
463/* =============================== PageTable ============================== */ 463/* =============================== PageTable ============================== */
464 464
@@ -521,7 +521,7 @@ ENTRY(cpu_xscale_set_pte_ext)
521 orr r2, r2, ip 521 orr r2, r2, ip
522 522
523 xscale_set_pte_ext_epilogue 523 xscale_set_pte_ext_epilogue
524 mov pc, lr 524 ret lr
525 525
526 .ltorg 526 .ltorg
527 .align 527 .align
@@ -572,7 +572,7 @@ __xscale_setup:
572 mrc p15, 0, r0, c1, c0, 0 @ get control register 572 mrc p15, 0, r0, c1, c0, 0 @ get control register
573 bic r0, r0, r5 573 bic r0, r0, r5
574 orr r0, r0, r6 574 orr r0, r0, r6
575 mov pc, lr 575 ret lr
576 .size __xscale_setup, . - __xscale_setup 576 .size __xscale_setup, . - __xscale_setup
577 577
578 /* 578 /*
diff --git a/arch/arm/mm/tlb-fa.S b/arch/arm/mm/tlb-fa.S
index d3ddcf9a76ca..d2d9ecbe0aac 100644
--- a/arch/arm/mm/tlb-fa.S
+++ b/arch/arm/mm/tlb-fa.S
@@ -18,6 +18,7 @@
18 */ 18 */
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <asm/assembler.h>
21#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
22#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
23#include "proc-macros.S" 24#include "proc-macros.S"
@@ -37,7 +38,7 @@ ENTRY(fa_flush_user_tlb_range)
37 vma_vm_mm ip, r2 38 vma_vm_mm ip, r2
38 act_mm r3 @ get current->active_mm 39 act_mm r3 @ get current->active_mm
39 eors r3, ip, r3 @ == mm ? 40 eors r3, ip, r3 @ == mm ?
40 movne pc, lr @ no, we dont do anything 41 retne lr @ no, we dont do anything
41 mov r3, #0 42 mov r3, #0
42 mcr p15, 0, r3, c7, c10, 4 @ drain WB 43 mcr p15, 0, r3, c7, c10, 4 @ drain WB
43 bic r0, r0, #0x0ff 44 bic r0, r0, #0x0ff
@@ -47,7 +48,7 @@ ENTRY(fa_flush_user_tlb_range)
47 cmp r0, r1 48 cmp r0, r1
48 blo 1b 49 blo 1b
49 mcr p15, 0, r3, c7, c10, 4 @ data write barrier 50 mcr p15, 0, r3, c7, c10, 4 @ data write barrier
50 mov pc, lr 51 ret lr
51 52
52 53
53ENTRY(fa_flush_kern_tlb_range) 54ENTRY(fa_flush_kern_tlb_range)
@@ -61,7 +62,7 @@ ENTRY(fa_flush_kern_tlb_range)
61 blo 1b 62 blo 1b
62 mcr p15, 0, r3, c7, c10, 4 @ data write barrier 63 mcr p15, 0, r3, c7, c10, 4 @ data write barrier
63 mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb) 64 mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb)
64 mov pc, lr 65 ret lr
65 66
66 __INITDATA 67 __INITDATA
67 68
diff --git a/arch/arm/mm/tlb-v4.S b/arch/arm/mm/tlb-v4.S
index 17a025ade573..a2b5dca42048 100644
--- a/arch/arm/mm/tlb-v4.S
+++ b/arch/arm/mm/tlb-v4.S
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <asm/assembler.h>
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include "proc-macros.S" 20#include "proc-macros.S"
@@ -33,7 +34,7 @@ ENTRY(v4_flush_user_tlb_range)
33 vma_vm_mm ip, r2 34 vma_vm_mm ip, r2
34 act_mm r3 @ get current->active_mm 35 act_mm r3 @ get current->active_mm
35 eors r3, ip, r3 @ == mm ? 36 eors r3, ip, r3 @ == mm ?
36 movne pc, lr @ no, we dont do anything 37 retne lr @ no, we dont do anything
37.v4_flush_kern_tlb_range: 38.v4_flush_kern_tlb_range:
38 bic r0, r0, #0x0ff 39 bic r0, r0, #0x0ff
39 bic r0, r0, #0xf00 40 bic r0, r0, #0xf00
@@ -41,7 +42,7 @@ ENTRY(v4_flush_user_tlb_range)
41 add r0, r0, #PAGE_SZ 42 add r0, r0, #PAGE_SZ
42 cmp r0, r1 43 cmp r0, r1
43 blo 1b 44 blo 1b
44 mov pc, lr 45 ret lr
45 46
46/* 47/*
47 * v4_flush_kern_tlb_range(start, end) 48 * v4_flush_kern_tlb_range(start, end)
diff --git a/arch/arm/mm/tlb-v4wb.S b/arch/arm/mm/tlb-v4wb.S
index c04598fa4d4a..5a093b458dbc 100644
--- a/arch/arm/mm/tlb-v4wb.S
+++ b/arch/arm/mm/tlb-v4wb.S
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <asm/assembler.h>
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include "proc-macros.S" 20#include "proc-macros.S"
@@ -33,7 +34,7 @@ ENTRY(v4wb_flush_user_tlb_range)
33 vma_vm_mm ip, r2 34 vma_vm_mm ip, r2
34 act_mm r3 @ get current->active_mm 35 act_mm r3 @ get current->active_mm
35 eors r3, ip, r3 @ == mm ? 36 eors r3, ip, r3 @ == mm ?
36 movne pc, lr @ no, we dont do anything 37 retne lr @ no, we dont do anything
37 vma_vm_flags r2, r2 38 vma_vm_flags r2, r2
38 mcr p15, 0, r3, c7, c10, 4 @ drain WB 39 mcr p15, 0, r3, c7, c10, 4 @ drain WB
39 tst r2, #VM_EXEC 40 tst r2, #VM_EXEC
@@ -44,7 +45,7 @@ ENTRY(v4wb_flush_user_tlb_range)
44 add r0, r0, #PAGE_SZ 45 add r0, r0, #PAGE_SZ
45 cmp r0, r1 46 cmp r0, r1
46 blo 1b 47 blo 1b
47 mov pc, lr 48 ret lr
48 49
49/* 50/*
50 * v4_flush_kern_tlb_range(start, end) 51 * v4_flush_kern_tlb_range(start, end)
@@ -65,7 +66,7 @@ ENTRY(v4wb_flush_kern_tlb_range)
65 add r0, r0, #PAGE_SZ 66 add r0, r0, #PAGE_SZ
66 cmp r0, r1 67 cmp r0, r1
67 blo 1b 68 blo 1b
68 mov pc, lr 69 ret lr
69 70
70 __INITDATA 71 __INITDATA
71 72
diff --git a/arch/arm/mm/tlb-v4wbi.S b/arch/arm/mm/tlb-v4wbi.S
index 1f6062b6c1c1..058861548f68 100644
--- a/arch/arm/mm/tlb-v4wbi.S
+++ b/arch/arm/mm/tlb-v4wbi.S
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <asm/assembler.h>
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include "proc-macros.S" 20#include "proc-macros.S"
@@ -32,7 +33,7 @@ ENTRY(v4wbi_flush_user_tlb_range)
32 vma_vm_mm ip, r2 33 vma_vm_mm ip, r2
33 act_mm r3 @ get current->active_mm 34 act_mm r3 @ get current->active_mm
34 eors r3, ip, r3 @ == mm ? 35 eors r3, ip, r3 @ == mm ?
35 movne pc, lr @ no, we dont do anything 36 retne lr @ no, we dont do anything
36 mov r3, #0 37 mov r3, #0
37 mcr p15, 0, r3, c7, c10, 4 @ drain WB 38 mcr p15, 0, r3, c7, c10, 4 @ drain WB
38 vma_vm_flags r2, r2 39 vma_vm_flags r2, r2
@@ -44,7 +45,7 @@ ENTRY(v4wbi_flush_user_tlb_range)
44 add r0, r0, #PAGE_SZ 45 add r0, r0, #PAGE_SZ
45 cmp r0, r1 46 cmp r0, r1
46 blo 1b 47 blo 1b
47 mov pc, lr 48 ret lr
48 49
49ENTRY(v4wbi_flush_kern_tlb_range) 50ENTRY(v4wbi_flush_kern_tlb_range)
50 mov r3, #0 51 mov r3, #0
@@ -56,7 +57,7 @@ ENTRY(v4wbi_flush_kern_tlb_range)
56 add r0, r0, #PAGE_SZ 57 add r0, r0, #PAGE_SZ
57 cmp r0, r1 58 cmp r0, r1
58 blo 1b 59 blo 1b
59 mov pc, lr 60 ret lr
60 61
61 __INITDATA 62 __INITDATA
62 63
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index eca07f550a0b..6f689be638bd 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/linkage.h> 14#include <linux/linkage.h>
15#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/assembler.h>
16#include <asm/page.h> 17#include <asm/page.h>
17#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
18#include "proc-macros.S" 19#include "proc-macros.S"
@@ -55,7 +56,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
55 cmp r0, r1 56 cmp r0, r1
56 blo 1b 57 blo 1b
57 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier 58 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
58 mov pc, lr 59 ret lr
59 60
60/* 61/*
61 * v6wbi_flush_kern_tlb_range(start,end) 62 * v6wbi_flush_kern_tlb_range(start,end)
@@ -84,7 +85,7 @@ ENTRY(v6wbi_flush_kern_tlb_range)
84 blo 1b 85 blo 1b
85 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier 86 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
86 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb) 87 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
87 mov pc, lr 88 ret lr
88 89
89 __INIT 90 __INIT
90 91
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 355308767bae..e5101a3bc57c 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -57,7 +57,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
57 cmp r0, r1 57 cmp r0, r1
58 blo 1b 58 blo 1b
59 dsb ish 59 dsb ish
60 mov pc, lr 60 ret lr
61ENDPROC(v7wbi_flush_user_tlb_range) 61ENDPROC(v7wbi_flush_user_tlb_range)
62 62
63/* 63/*
@@ -86,7 +86,7 @@ ENTRY(v7wbi_flush_kern_tlb_range)
86 blo 1b 86 blo 1b
87 dsb ish 87 dsb ish
88 isb 88 isb
89 mov pc, lr 89 ret lr
90ENDPROC(v7wbi_flush_kern_tlb_range) 90ENDPROC(v7wbi_flush_kern_tlb_range)
91 91
92 __INIT 92 __INIT
diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S
index d18dde95b8aa..5d65be1f1e8a 100644
--- a/arch/arm/nwfpe/entry.S
+++ b/arch/arm/nwfpe/entry.S
@@ -19,7 +19,7 @@
19 along with this program; if not, write to the Free Software 19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21*/ 21*/
22 22#include <asm/assembler.h>
23#include <asm/opcodes.h> 23#include <asm/opcodes.h>
24 24
25/* This is the kernel's entry point into the floating point emulator. 25/* This is the kernel's entry point into the floating point emulator.
@@ -92,7 +92,7 @@ emulate:
92 mov r0, r6 @ prepare for EmulateAll() 92 mov r0, r6 @ prepare for EmulateAll()
93 bl EmulateAll @ emulate the instruction 93 bl EmulateAll @ emulate the instruction
94 cmp r0, #0 @ was emulation successful 94 cmp r0, #0 @ was emulation successful
95 moveq pc, r4 @ no, return failure 95 reteq r4 @ no, return failure
96 96
97next: 97next:
98.Lx1: ldrt r6, [r5], #4 @ get the next instruction and 98.Lx1: ldrt r6, [r5], #4 @ get the next instruction and
@@ -102,7 +102,7 @@ next:
102 teq r2, #0x0C000000 102 teq r2, #0x0C000000
103 teqne r2, #0x0D000000 103 teqne r2, #0x0D000000
104 teqne r2, #0x0E000000 104 teqne r2, #0x0E000000
105 movne pc, r9 @ return ok if not a fp insn 105 retne r9 @ return ok if not a fp insn
106 106
107 str r5, [sp, #S_PC] @ update PC copy in regs 107 str r5, [sp, #S_PC] @ update PC copy in regs
108 108
@@ -115,7 +115,7 @@ next:
115 @ plain LDR instruction. Weird, but it seems harmless. 115 @ plain LDR instruction. Weird, but it seems harmless.
116 .pushsection .fixup,"ax" 116 .pushsection .fixup,"ax"
117 .align 2 117 .align 2
118.Lfix: mov pc, r9 @ let the user eat segfaults 118.Lfix: ret r9 @ let the user eat segfaults
119 .popsection 119 .popsection
120 120
121 .pushsection __ex_table,"a" 121 .pushsection __ex_table,"a"
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index fe6ca574d093..2e78760f3495 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -34,7 +34,7 @@ ENDPROC(do_vfp)
34 34
35ENTRY(vfp_null_entry) 35ENTRY(vfp_null_entry)
36 dec_preempt_count_ti r10, r4 36 dec_preempt_count_ti r10, r4
37 mov pc, lr 37 ret lr
38ENDPROC(vfp_null_entry) 38ENDPROC(vfp_null_entry)
39 39
40 .align 2 40 .align 2
@@ -49,7 +49,7 @@ ENTRY(vfp_testing_entry)
49 dec_preempt_count_ti r10, r4 49 dec_preempt_count_ti r10, r4
50 ldr r0, VFP_arch_address 50 ldr r0, VFP_arch_address
51 str r0, [r0] @ set to non-zero value 51 str r0, [r0] @ set to non-zero value
52 mov pc, r9 @ we have handled the fault 52 ret r9 @ we have handled the fault
53ENDPROC(vfp_testing_entry) 53ENDPROC(vfp_testing_entry)
54 54
55 .align 2 55 .align 2
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index be807625ed8c..cda654cbf2c2 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -183,7 +183,7 @@ vfp_hw_state_valid:
183 @ always subtract 4 from the following 183 @ always subtract 4 from the following
184 @ instruction address. 184 @ instruction address.
185 dec_preempt_count_ti r10, r4 185 dec_preempt_count_ti r10, r4
186 mov pc, r9 @ we think we have handled things 186 ret r9 @ we think we have handled things
187 187
188 188
189look_for_VFP_exceptions: 189look_for_VFP_exceptions:
@@ -202,7 +202,7 @@ look_for_VFP_exceptions:
202 202
203 DBGSTR "not VFP" 203 DBGSTR "not VFP"
204 dec_preempt_count_ti r10, r4 204 dec_preempt_count_ti r10, r4
205 mov pc, lr 205 ret lr
206 206
207process_exception: 207process_exception:
208 DBGSTR "bounce" 208 DBGSTR "bounce"
@@ -234,7 +234,7 @@ ENTRY(vfp_save_state)
234 VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present) 234 VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present)
2351: 2351:
236 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 236 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
237 mov pc, lr 237 ret lr
238ENDPROC(vfp_save_state) 238ENDPROC(vfp_save_state)
239 239
240 .align 240 .align
@@ -245,7 +245,7 @@ vfp_current_hw_state_address:
245#ifdef CONFIG_THUMB2_KERNEL 245#ifdef CONFIG_THUMB2_KERNEL
246 adr \tmp, 1f 246 adr \tmp, 1f
247 add \tmp, \tmp, \base, lsl \shift 247 add \tmp, \tmp, \base, lsl \shift
248 mov pc, \tmp 248 ret \tmp
249#else 249#else
250 add pc, pc, \base, lsl \shift 250 add pc, pc, \base, lsl \shift
251 mov r0, r0 251 mov r0, r0
@@ -257,10 +257,10 @@ ENTRY(vfp_get_float)
257 tbl_branch r0, r3, #3 257 tbl_branch r0, r3, #3
258 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 258 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2591: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 2591: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0
260 mov pc, lr 260 ret lr
261 .org 1b + 8 261 .org 1b + 8
2621: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 2621: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1
263 mov pc, lr 263 ret lr
264 .org 1b + 8 264 .org 1b + 8
265 .endr 265 .endr
266ENDPROC(vfp_get_float) 266ENDPROC(vfp_get_float)
@@ -269,10 +269,10 @@ ENTRY(vfp_put_float)
269 tbl_branch r1, r3, #3 269 tbl_branch r1, r3, #3
270 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 270 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2711: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 2711: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0
272 mov pc, lr 272 ret lr
273 .org 1b + 8 273 .org 1b + 8
2741: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 2741: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1
275 mov pc, lr 275 ret lr
276 .org 1b + 8 276 .org 1b + 8
277 .endr 277 .endr
278ENDPROC(vfp_put_float) 278ENDPROC(vfp_put_float)
@@ -281,14 +281,14 @@ ENTRY(vfp_get_double)
281 tbl_branch r0, r3, #3 281 tbl_branch r0, r3, #3
282 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 282 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2831: fmrrd r0, r1, d\dr 2831: fmrrd r0, r1, d\dr
284 mov pc, lr 284 ret lr
285 .org 1b + 8 285 .org 1b + 8
286 .endr 286 .endr
287#ifdef CONFIG_VFPv3 287#ifdef CONFIG_VFPv3
288 @ d16 - d31 registers 288 @ d16 - d31 registers
289 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 289 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2901: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr 2901: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr
291 mov pc, lr 291 ret lr
292 .org 1b + 8 292 .org 1b + 8
293 .endr 293 .endr
294#endif 294#endif
@@ -296,21 +296,21 @@ ENTRY(vfp_get_double)
296 @ virtual register 16 (or 32 if VFPv3) for compare with zero 296 @ virtual register 16 (or 32 if VFPv3) for compare with zero
297 mov r0, #0 297 mov r0, #0
298 mov r1, #0 298 mov r1, #0
299 mov pc, lr 299 ret lr
300ENDPROC(vfp_get_double) 300ENDPROC(vfp_get_double)
301 301
302ENTRY(vfp_put_double) 302ENTRY(vfp_put_double)
303 tbl_branch r2, r3, #3 303 tbl_branch r2, r3, #3
304 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 304 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
3051: fmdrr d\dr, r0, r1 3051: fmdrr d\dr, r0, r1
306 mov pc, lr 306 ret lr
307 .org 1b + 8 307 .org 1b + 8
308 .endr 308 .endr
309#ifdef CONFIG_VFPv3 309#ifdef CONFIG_VFPv3
310 @ d16 - d31 registers 310 @ d16 - d31 registers
311 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 311 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
3121: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr 3121: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr
313 mov pc, lr 313 ret lr
314 .org 1b + 8 314 .org 1b + 8
315 .endr 315 .endr
316#endif 316#endif
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index 44e3a5f10c4c..f00e08075938 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -58,7 +58,7 @@
58ENTRY(HYPERVISOR_##hypercall) \ 58ENTRY(HYPERVISOR_##hypercall) \
59 mov r12, #__HYPERVISOR_##hypercall; \ 59 mov r12, #__HYPERVISOR_##hypercall; \
60 __HVC(XEN_IMM); \ 60 __HVC(XEN_IMM); \
61 mov pc, lr; \ 61 ret lr; \
62ENDPROC(HYPERVISOR_##hypercall) 62ENDPROC(HYPERVISOR_##hypercall)
63 63
64#define HYPERCALL0 HYPERCALL_SIMPLE 64#define HYPERCALL0 HYPERCALL_SIMPLE
@@ -74,7 +74,7 @@ ENTRY(HYPERVISOR_##hypercall) \
74 mov r12, #__HYPERVISOR_##hypercall; \ 74 mov r12, #__HYPERVISOR_##hypercall; \
75 __HVC(XEN_IMM); \ 75 __HVC(XEN_IMM); \
76 ldm sp!, {r4} \ 76 ldm sp!, {r4} \
77 mov pc, lr \ 77 ret lr \
78ENDPROC(HYPERVISOR_##hypercall) 78ENDPROC(HYPERVISOR_##hypercall)
79 79
80 .text 80 .text
@@ -101,5 +101,5 @@ ENTRY(privcmd_call)
101 ldr r4, [sp, #4] 101 ldr r4, [sp, #4]
102 __HVC(XEN_IMM) 102 __HVC(XEN_IMM)
103 ldm sp!, {r4} 103 ldm sp!, {r4}
104 mov pc, lr 104 ret lr
105ENDPROC(privcmd_call); 105ENDPROC(privcmd_call);