aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2008-08-28 06:22:32 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-09-01 07:06:34 -0400
commit93ed3970114983543bbebd195bef65db84444ea2 (patch)
tree9df88b61a2a7b3cc493c6cfc5f4848448250f6b5 /arch/arm/kernel
parent8d5796d2ec6b5a4e7a52861144e63af438d6f8f7 (diff)
[ARM] 5227/1: Add the ENDPROC declarations to the .S files
This declaration specifies the "function" type and size for various assembly functions, mainly needed for generating the correct branch instructions in Thumb-2. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/debug.S5
-rw-r--r--arch/arm/kernel/entry-armv.S16
-rw-r--r--arch/arm/kernel/entry-common.S25
-rw-r--r--arch/arm/kernel/head-common.S19
-rw-r--r--arch/arm/kernel/head-nommu.S4
-rw-r--r--arch/arm/kernel/head.S12
6 files changed, 62 insertions, 19 deletions
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index 9550ff0ddde4..f53c58290543 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -89,10 +89,12 @@
89ENTRY(printhex8) 89ENTRY(printhex8)
90 mov r1, #8 90 mov r1, #8
91 b printhex 91 b printhex
92ENDPROC(printhex8)
92 93
93ENTRY(printhex4) 94ENTRY(printhex4)
94 mov r1, #4 95 mov r1, #4
95 b printhex 96 b printhex
97ENDPROC(printhex4)
96 98
97ENTRY(printhex2) 99ENTRY(printhex2)
98 mov r1, #2 100 mov r1, #2
@@ -110,6 +112,7 @@ printhex: adr r2, hexbuf
110 bne 1b 112 bne 1b
111 mov r0, r2 113 mov r0, r2
112 b printascii 114 b printascii
115ENDPROC(printhex2)
113 116
114 .ltorg 117 .ltorg
115 118
@@ -127,11 +130,13 @@ ENTRY(printascii)
127 teqne r1, #0 130 teqne r1, #0
128 bne 1b 131 bne 1b
129 mov pc, lr 132 mov pc, lr
133ENDPROC(printascii)
130 134
131ENTRY(printch) 135ENTRY(printch)
132 addruart r3 136 addruart r3
133 mov r1, r0 137 mov r1, r0
134 mov r0, #0 138 mov r0, #0
135 b 1b 139 b 1b
140ENDPROC(printch)
136 141
137hexbuf: .space 16 142hexbuf: .space 16
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 617e509d60df..77b047475539 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -76,14 +76,17 @@
76__pabt_invalid: 76__pabt_invalid:
77 inv_entry BAD_PREFETCH 77 inv_entry BAD_PREFETCH
78 b common_invalid 78 b common_invalid
79ENDPROC(__pabt_invalid)
79 80
80__dabt_invalid: 81__dabt_invalid:
81 inv_entry BAD_DATA 82 inv_entry BAD_DATA
82 b common_invalid 83 b common_invalid
84ENDPROC(__dabt_invalid)
83 85
84__irq_invalid: 86__irq_invalid:
85 inv_entry BAD_IRQ 87 inv_entry BAD_IRQ
86 b common_invalid 88 b common_invalid
89ENDPROC(__irq_invalid)
87 90
88__und_invalid: 91__und_invalid:
89 inv_entry BAD_UNDEFINSTR 92 inv_entry BAD_UNDEFINSTR
@@ -107,6 +110,7 @@ common_invalid:
107 110
108 mov r0, sp 111 mov r0, sp
109 b bad_mode 112 b bad_mode
113ENDPROC(__und_invalid)
110 114
111/* 115/*
112 * SVC mode handlers 116 * SVC mode handlers
@@ -192,6 +196,7 @@ __dabt_svc:
192 ldr r0, [sp, #S_PSR] 196 ldr r0, [sp, #S_PSR]
193 msr spsr_cxsf, r0 197 msr spsr_cxsf, r0
194 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 198 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
199ENDPROC(__dabt_svc)
195 200
196 .align 5 201 .align 5
197__irq_svc: 202__irq_svc:
@@ -223,6 +228,7 @@ __irq_svc:
223 bleq trace_hardirqs_on 228 bleq trace_hardirqs_on
224#endif 229#endif
225 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 230 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
231ENDPROC(__irq_svc)
226 232
227 .ltorg 233 .ltorg
228 234
@@ -272,6 +278,7 @@ __und_svc:
272 ldr lr, [sp, #S_PSR] @ Get SVC cpsr 278 ldr lr, [sp, #S_PSR] @ Get SVC cpsr
273 msr spsr_cxsf, lr 279 msr spsr_cxsf, lr
274 ldmia sp, {r0 - pc}^ @ Restore SVC registers 280 ldmia sp, {r0 - pc}^ @ Restore SVC registers
281ENDPROC(__und_svc)
275 282
276 .align 5 283 .align 5
277__pabt_svc: 284__pabt_svc:
@@ -313,6 +320,7 @@ __pabt_svc:
313 ldr r0, [sp, #S_PSR] 320 ldr r0, [sp, #S_PSR]
314 msr spsr_cxsf, r0 321 msr spsr_cxsf, r0
315 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 322 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
323ENDPROC(__pabt_svc)
316 324
317 .align 5 325 .align 5
318.LCcralign: 326.LCcralign:
@@ -412,6 +420,7 @@ __dabt_usr:
412 mov r2, sp 420 mov r2, sp
413 adr lr, ret_from_exception 421 adr lr, ret_from_exception
414 b do_DataAbort 422 b do_DataAbort
423ENDPROC(__dabt_usr)
415 424
416 .align 5 425 .align 5
417__irq_usr: 426__irq_usr:
@@ -441,6 +450,7 @@ __irq_usr:
441 450
442 mov why, #0 451 mov why, #0
443 b ret_to_user 452 b ret_to_user
453ENDPROC(__irq_usr)
444 454
445 .ltorg 455 .ltorg
446 456
@@ -474,6 +484,7 @@ __und_usr:
474#else 484#else
475 b __und_usr_unknown 485 b __und_usr_unknown
476#endif 486#endif
487ENDPROC(__und_usr)
477 488
478 @ 489 @
479 @ fallthrough to call_fpe 490 @ fallthrough to call_fpe
@@ -642,6 +653,7 @@ __und_usr_unknown:
642 mov r0, sp 653 mov r0, sp
643 adr lr, ret_from_exception 654 adr lr, ret_from_exception
644 b do_undefinstr 655 b do_undefinstr
656ENDPROC(__und_usr_unknown)
645 657
646 .align 5 658 .align 5
647__pabt_usr: 659__pabt_usr:
@@ -666,6 +678,8 @@ ENTRY(ret_from_exception)
666 get_thread_info tsk 678 get_thread_info tsk
667 mov why, #0 679 mov why, #0
668 b ret_to_user 680 b ret_to_user
681ENDPROC(__pabt_usr)
682ENDPROC(ret_from_exception)
669 683
670/* 684/*
671 * Register switch for ARMv3 and ARMv4 processors 685 * Register switch for ARMv3 and ARMv4 processors
@@ -702,6 +716,7 @@ ENTRY(__switch_to)
702 bl atomic_notifier_call_chain 716 bl atomic_notifier_call_chain
703 mov r0, r5 717 mov r0, r5
704 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously 718 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
719ENDPROC(__switch_to)
705 720
706 __INIT 721 __INIT
707 722
@@ -1029,6 +1044,7 @@ vector_\name:
1029 mov r0, sp 1044 mov r0, sp
1030 ldr lr, [pc, lr, lsl #2] 1045 ldr lr, [pc, lr, lsl #2]
1031 movs pc, lr @ branch to handler in SVC mode 1046 movs pc, lr @ branch to handler in SVC mode
1047ENDPROC(vector_\name)
1032 .endm 1048 .endm
1033 1049
1034 .globl __stubs_start 1050 .globl __stubs_start
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 060d7e2e9f64..3aa14dcc5bab 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -77,6 +77,7 @@ no_work_pending:
77 mov r0, r0 77 mov r0, r0
78 add sp, sp, #S_FRAME_SIZE - S_PC 78 add sp, sp, #S_FRAME_SIZE - S_PC
79 movs pc, lr @ return & move spsr_svc into cpsr 79 movs pc, lr @ return & move spsr_svc into cpsr
80ENDPROC(ret_to_user)
80 81
81/* 82/*
82 * This is how we return from a fork. 83 * This is how we return from a fork.
@@ -92,7 +93,7 @@ ENTRY(ret_from_fork)
92 mov r0, #1 @ trace exit [IP = 1] 93 mov r0, #1 @ trace exit [IP = 1]
93 bl syscall_trace 94 bl syscall_trace
94 b ret_slow_syscall 95 b ret_slow_syscall
95 96ENDPROC(ret_from_fork)
96 97
97 .equ NR_syscalls,0 98 .equ NR_syscalls,0
98#define CALL(x) .equ NR_syscalls,NR_syscalls+1 99#define CALL(x) .equ NR_syscalls,NR_syscalls+1
@@ -269,6 +270,7 @@ ENTRY(vector_swi)
269 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 270 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
270 bcs arm_syscall 271 bcs arm_syscall
271 b sys_ni_syscall @ not private func 272 b sys_ni_syscall @ not private func
273ENDPROC(vector_swi)
272 274
273 /* 275 /*
274 * This is the really slow path. We're going to be doing 276 * This is the really slow path. We're going to be doing
@@ -326,7 +328,6 @@ ENTRY(sys_call_table)
326 */ 328 */
327@ r0 = syscall number 329@ r0 = syscall number
328@ r8 = syscall table 330@ r8 = syscall table
329 .type sys_syscall, #function
330sys_syscall: 331sys_syscall:
331 bic scno, r0, #__NR_OABI_SYSCALL_BASE 332 bic scno, r0, #__NR_OABI_SYSCALL_BASE
332 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 333 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
@@ -338,53 +339,65 @@ sys_syscall:
338 movlo r3, r4 339 movlo r3, r4
339 ldrlo pc, [tbl, scno, lsl #2] 340 ldrlo pc, [tbl, scno, lsl #2]
340 b sys_ni_syscall 341 b sys_ni_syscall
342ENDPROC(sys_syscall)
341 343
342sys_fork_wrapper: 344sys_fork_wrapper:
343 add r0, sp, #S_OFF 345 add r0, sp, #S_OFF
344 b sys_fork 346 b sys_fork
347ENDPROC(sys_fork_wrapper)
345 348
346sys_vfork_wrapper: 349sys_vfork_wrapper:
347 add r0, sp, #S_OFF 350 add r0, sp, #S_OFF
348 b sys_vfork 351 b sys_vfork
352ENDPROC(sys_vfork_wrapper)
349 353
350sys_execve_wrapper: 354sys_execve_wrapper:
351 add r3, sp, #S_OFF 355 add r3, sp, #S_OFF
352 b sys_execve 356 b sys_execve
357ENDPROC(sys_execve_wrapper)
353 358
354sys_clone_wrapper: 359sys_clone_wrapper:
355 add ip, sp, #S_OFF 360 add ip, sp, #S_OFF
356 str ip, [sp, #4] 361 str ip, [sp, #4]
357 b sys_clone 362 b sys_clone
363ENDPROC(sys_clone_wrapper)
358 364
359sys_sigsuspend_wrapper: 365sys_sigsuspend_wrapper:
360 add r3, sp, #S_OFF 366 add r3, sp, #S_OFF
361 b sys_sigsuspend 367 b sys_sigsuspend
368ENDPROC(sys_sigsuspend_wrapper)
362 369
363sys_rt_sigsuspend_wrapper: 370sys_rt_sigsuspend_wrapper:
364 add r2, sp, #S_OFF 371 add r2, sp, #S_OFF
365 b sys_rt_sigsuspend 372 b sys_rt_sigsuspend
373ENDPROC(sys_rt_sigsuspend_wrapper)
366 374
367sys_sigreturn_wrapper: 375sys_sigreturn_wrapper:
368 add r0, sp, #S_OFF 376 add r0, sp, #S_OFF
369 b sys_sigreturn 377 b sys_sigreturn
378ENDPROC(sys_sigreturn_wrapper)
370 379
371sys_rt_sigreturn_wrapper: 380sys_rt_sigreturn_wrapper:
372 add r0, sp, #S_OFF 381 add r0, sp, #S_OFF
373 b sys_rt_sigreturn 382 b sys_rt_sigreturn
383ENDPROC(sys_rt_sigreturn_wrapper)
374 384
375sys_sigaltstack_wrapper: 385sys_sigaltstack_wrapper:
376 ldr r2, [sp, #S_OFF + S_SP] 386 ldr r2, [sp, #S_OFF + S_SP]
377 b do_sigaltstack 387 b do_sigaltstack
388ENDPROC(sys_sigaltstack_wrapper)
378 389
379sys_statfs64_wrapper: 390sys_statfs64_wrapper:
380 teq r1, #88 391 teq r1, #88
381 moveq r1, #84 392 moveq r1, #84
382 b sys_statfs64 393 b sys_statfs64
394ENDPROC(sys_statfs64_wrapper)
383 395
384sys_fstatfs64_wrapper: 396sys_fstatfs64_wrapper:
385 teq r1, #88 397 teq r1, #88
386 moveq r1, #84 398 moveq r1, #84
387 b sys_fstatfs64 399 b sys_fstatfs64
400ENDPROC(sys_fstatfs64_wrapper)
388 401
389/* 402/*
390 * Note: off_4k (r5) is always units of 4K. If we can't do the requested 403 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
@@ -402,11 +415,14 @@ sys_mmap2:
402 str r5, [sp, #4] 415 str r5, [sp, #4]
403 b do_mmap2 416 b do_mmap2
404#endif 417#endif
418ENDPROC(sys_mmap2)
405 419
406ENTRY(pabort_ifar) 420ENTRY(pabort_ifar)
407 mrc p15, 0, r0, cr6, cr0, 2 421 mrc p15, 0, r0, cr6, cr0, 2
408ENTRY(pabort_noifar) 422ENTRY(pabort_noifar)
409 mov pc, lr 423 mov pc, lr
424ENDPROC(pabort_ifar)
425ENDPROC(pabort_noifar)
410 426
411#ifdef CONFIG_OABI_COMPAT 427#ifdef CONFIG_OABI_COMPAT
412 428
@@ -417,26 +433,31 @@ ENTRY(pabort_noifar)
417sys_oabi_pread64: 433sys_oabi_pread64:
418 stmia sp, {r3, r4} 434 stmia sp, {r3, r4}
419 b sys_pread64 435 b sys_pread64
436ENDPROC(sys_oabi_pread64)
420 437
421sys_oabi_pwrite64: 438sys_oabi_pwrite64:
422 stmia sp, {r3, r4} 439 stmia sp, {r3, r4}
423 b sys_pwrite64 440 b sys_pwrite64
441ENDPROC(sys_oabi_pwrite64)
424 442
425sys_oabi_truncate64: 443sys_oabi_truncate64:
426 mov r3, r2 444 mov r3, r2
427 mov r2, r1 445 mov r2, r1
428 b sys_truncate64 446 b sys_truncate64
447ENDPROC(sys_oabi_truncate64)
429 448
430sys_oabi_ftruncate64: 449sys_oabi_ftruncate64:
431 mov r3, r2 450 mov r3, r2
432 mov r2, r1 451 mov r2, r1
433 b sys_ftruncate64 452 b sys_ftruncate64
453ENDPROC(sys_oabi_ftruncate64)
434 454
435sys_oabi_readahead: 455sys_oabi_readahead:
436 str r3, [sp] 456 str r3, [sp]
437 mov r3, r2 457 mov r3, r2
438 mov r2, r1 458 mov r2, r1
439 b sys_readahead 459 b sys_readahead
460ENDPROC(sys_oabi_readahead)
440 461
441/* 462/*
442 * Let's declare a second syscall table for old ABI binaries 463 * Let's declare a second syscall table for old ABI binaries
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 1c3c6ea5f9e7..bde52df1c668 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -36,7 +36,6 @@ __switch_data:
36 * r2 = atags pointer 36 * r2 = atags pointer
37 * r9 = processor ID 37 * r9 = processor ID
38 */ 38 */
39 .type __mmap_switched, %function
40__mmap_switched: 39__mmap_switched:
41 adr r3, __switch_data + 4 40 adr r3, __switch_data + 4
42 41
@@ -59,6 +58,7 @@ __mmap_switched:
59 bic r4, r0, #CR_A @ Clear 'A' bit 58 bic r4, r0, #CR_A @ Clear 'A' bit
60 stmia r7, {r0, r4} @ Save control register values 59 stmia r7, {r0, r4} @ Save control register values
61 b start_kernel 60 b start_kernel
61ENDPROC(__mmap_switched)
62 62
63/* 63/*
64 * Exception handling. Something went wrong and we can't proceed. We 64 * Exception handling. Something went wrong and we can't proceed. We
@@ -69,8 +69,6 @@ __mmap_switched:
69 * and hope for the best (useful if bootloader fails to pass a proper 69 * and hope for the best (useful if bootloader fails to pass a proper
70 * machine ID for example). 70 * machine ID for example).
71 */ 71 */
72
73 .type __error_p, %function
74__error_p: 72__error_p:
75#ifdef CONFIG_DEBUG_LL 73#ifdef CONFIG_DEBUG_LL
76 adr r0, str_p1 74 adr r0, str_p1
@@ -84,8 +82,8 @@ str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x"
84str_p2: .asciz ").\n" 82str_p2: .asciz ").\n"
85 .align 83 .align
86#endif 84#endif
85ENDPROC(__error_p)
87 86
88 .type __error_a, %function
89__error_a: 87__error_a:
90#ifdef CONFIG_DEBUG_LL 88#ifdef CONFIG_DEBUG_LL
91 mov r4, r1 @ preserve machine ID 89 mov r4, r1 @ preserve machine ID
@@ -115,13 +113,14 @@ __error_a:
115 adr r0, str_a3 113 adr r0, str_a3
116 bl printascii 114 bl printascii
117 b __error 115 b __error
116ENDPROC(__error_a)
117
118str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x" 118str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
119str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n" 119str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
120str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" 120str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
121 .align 121 .align
122#endif 122#endif
123 123
124 .type __error, %function
125__error: 124__error:
126#ifdef CONFIG_ARCH_RPC 125#ifdef CONFIG_ARCH_RPC
127/* 126/*
@@ -138,6 +137,7 @@ __error:
138#endif 137#endif
1391: mov r0, r0 1381: mov r0, r0
140 b 1b 139 b 1b
140ENDPROC(__error)
141 141
142 142
143/* 143/*
@@ -153,7 +153,6 @@ __error:
153 * r5 = proc_info pointer in physical address space 153 * r5 = proc_info pointer in physical address space
154 * r9 = cpuid (preserved) 154 * r9 = cpuid (preserved)
155 */ 155 */
156 .type __lookup_processor_type, %function
157__lookup_processor_type: 156__lookup_processor_type:
158 adr r3, 3f 157 adr r3, 3f
159 ldmda r3, {r5 - r7} 158 ldmda r3, {r5 - r7}
@@ -169,6 +168,7 @@ __lookup_processor_type:
169 blo 1b 168 blo 1b
170 mov r5, #0 @ unknown processor 169 mov r5, #0 @ unknown processor
1712: mov pc, lr 1702: mov pc, lr
171ENDPROC(__lookup_processor_type)
172 172
173/* 173/*
174 * This provides a C-API version of the above function. 174 * This provides a C-API version of the above function.
@@ -179,6 +179,7 @@ ENTRY(lookup_processor_type)
179 bl __lookup_processor_type 179 bl __lookup_processor_type
180 mov r0, r5 180 mov r0, r5
181 ldmfd sp!, {r4 - r7, r9, pc} 181 ldmfd sp!, {r4 - r7, r9, pc}
182ENDPROC(lookup_processor_type)
182 183
183/* 184/*
184 * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for 185 * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for
@@ -201,7 +202,6 @@ ENTRY(lookup_processor_type)
201 * r3, r4, r6 corrupted 202 * r3, r4, r6 corrupted
202 * r5 = mach_info pointer in physical address space 203 * r5 = mach_info pointer in physical address space
203 */ 204 */
204 .type __lookup_machine_type, %function
205__lookup_machine_type: 205__lookup_machine_type:
206 adr r3, 3b 206 adr r3, 3b
207 ldmia r3, {r4, r5, r6} 207 ldmia r3, {r4, r5, r6}
@@ -216,6 +216,7 @@ __lookup_machine_type:
216 blo 1b 216 blo 1b
217 mov r5, #0 @ unknown machine 217 mov r5, #0 @ unknown machine
2182: mov pc, lr 2182: mov pc, lr
219ENDPROC(__lookup_machine_type)
219 220
220/* 221/*
221 * This provides a C-API version of the above function. 222 * This provides a C-API version of the above function.
@@ -226,6 +227,7 @@ ENTRY(lookup_machine_type)
226 bl __lookup_machine_type 227 bl __lookup_machine_type
227 mov r0, r5 228 mov r0, r5
228 ldmfd sp!, {r4 - r6, pc} 229 ldmfd sp!, {r4 - r6, pc}
230ENDPROC(lookup_machine_type)
229 231
230/* Determine validity of the r2 atags pointer. The heuristic requires 232/* Determine validity of the r2 atags pointer. The heuristic requires
231 * that the pointer be aligned, in the first 16k of physical RAM and 233 * that the pointer be aligned, in the first 16k of physical RAM and
@@ -239,8 +241,6 @@ ENTRY(lookup_machine_type)
239 * r2 either valid atags pointer, or zero 241 * r2 either valid atags pointer, or zero
240 * r5, r6 corrupted 242 * r5, r6 corrupted
241 */ 243 */
242
243 .type __vet_atags, %function
244__vet_atags: 244__vet_atags:
245 tst r2, #0x3 @ aligned? 245 tst r2, #0x3 @ aligned?
246 bne 1f 246 bne 1f
@@ -257,3 +257,4 @@ __vet_atags:
257 257
2581: mov r2, #0 2581: mov r2, #0
259 mov pc, lr 259 mov pc, lr
260ENDPROC(__vet_atags)
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 27329bd32037..cc87e1765ed2 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -33,7 +33,6 @@
33 * 33 *
34 */ 34 */
35 .section ".text.head", "ax" 35 .section ".text.head", "ax"
36 .type stext, %function
37ENTRY(stext) 36ENTRY(stext)
38 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode 37 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
39 @ and irqs disabled 38 @ and irqs disabled
@@ -53,11 +52,11 @@ ENTRY(stext)
53 @ the initialization is done 52 @ the initialization is done
54 adr lr, __after_proc_init @ return (PIC) address 53 adr lr, __after_proc_init @ return (PIC) address
55 add pc, r10, #PROCINFO_INITFUNC 54 add pc, r10, #PROCINFO_INITFUNC
55ENDPROC(stext)
56 56
57/* 57/*
58 * Set the Control Register and Read the process ID. 58 * Set the Control Register and Read the process ID.
59 */ 59 */
60 .type __after_proc_init, %function
61__after_proc_init: 60__after_proc_init:
62#ifdef CONFIG_CPU_CP15 61#ifdef CONFIG_CPU_CP15
63 mrc p15, 0, r0, c1, c0, 0 @ read control reg 62 mrc p15, 0, r0, c1, c0, 0 @ read control reg
@@ -85,6 +84,7 @@ __after_proc_init:
85 84
86 mov pc, r13 @ clear the BSS and jump 85 mov pc, r13 @ clear the BSS and jump
87 @ to start_kernel 86 @ to start_kernel
87ENDPROC(__after_proc_init)
88 .ltorg 88 .ltorg
89 89
90#include "head-common.S" 90#include "head-common.S"
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index bff4c6e90dd5..21e17dc94cb5 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -75,7 +75,6 @@
75 * circumstances, zImage) is for. 75 * circumstances, zImage) is for.
76 */ 76 */
77 .section ".text.head", "ax" 77 .section ".text.head", "ax"
78 .type stext, %function
79ENTRY(stext) 78ENTRY(stext)
80 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode 79 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
81 @ and irqs disabled 80 @ and irqs disabled
@@ -100,9 +99,9 @@ ENTRY(stext)
100 @ mmu has been enabled 99 @ mmu has been enabled
101 adr lr, __enable_mmu @ return (PIC) address 100 adr lr, __enable_mmu @ return (PIC) address
102 add pc, r10, #PROCINFO_INITFUNC 101 add pc, r10, #PROCINFO_INITFUNC
102ENDPROC(stext)
103 103
104#if defined(CONFIG_SMP) 104#if defined(CONFIG_SMP)
105 .type secondary_startup, #function
106ENTRY(secondary_startup) 105ENTRY(secondary_startup)
107 /* 106 /*
108 * Common entry point for secondary CPUs. 107 * Common entry point for secondary CPUs.
@@ -128,6 +127,7 @@ ENTRY(secondary_startup)
128 adr lr, __enable_mmu @ return address 127 adr lr, __enable_mmu @ return address
129 add pc, r10, #PROCINFO_INITFUNC @ initialise processor 128 add pc, r10, #PROCINFO_INITFUNC @ initialise processor
130 @ (return control reg) 129 @ (return control reg)
130ENDPROC(secondary_startup)
131 131
132 /* 132 /*
133 * r6 = &secondary_data 133 * r6 = &secondary_data
@@ -136,6 +136,7 @@ ENTRY(__secondary_switched)
136 ldr sp, [r7, #4] @ get secondary_data.stack 136 ldr sp, [r7, #4] @ get secondary_data.stack
137 mov fp, #0 137 mov fp, #0
138 b secondary_start_kernel 138 b secondary_start_kernel
139ENDPROC(__secondary_switched)
139 140
140 .type __secondary_data, %object 141 .type __secondary_data, %object
141__secondary_data: 142__secondary_data:
@@ -151,7 +152,6 @@ __secondary_data:
151 * this is just loading the page table pointer and domain access 152 * this is just loading the page table pointer and domain access
152 * registers. 153 * registers.
153 */ 154 */
154 .type __enable_mmu, %function
155__enable_mmu: 155__enable_mmu:
156#ifdef CONFIG_ALIGNMENT_TRAP 156#ifdef CONFIG_ALIGNMENT_TRAP
157 orr r0, r0, #CR_A 157 orr r0, r0, #CR_A
@@ -174,6 +174,7 @@ __enable_mmu:
174 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 174 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
175 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 175 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
176 b __turn_mmu_on 176 b __turn_mmu_on
177ENDPROC(__enable_mmu)
177 178
178/* 179/*
179 * Enable the MMU. This completely changes the structure of the visible 180 * Enable the MMU. This completely changes the structure of the visible
@@ -187,7 +188,6 @@ __enable_mmu:
187 * other registers depend on the function called upon completion 188 * other registers depend on the function called upon completion
188 */ 189 */
189 .align 5 190 .align 5
190 .type __turn_mmu_on, %function
191__turn_mmu_on: 191__turn_mmu_on:
192 mov r0, r0 192 mov r0, r0
193 mcr p15, 0, r0, c1, c0, 0 @ write control reg 193 mcr p15, 0, r0, c1, c0, 0 @ write control reg
@@ -195,7 +195,7 @@ __turn_mmu_on:
195 mov r3, r3 195 mov r3, r3
196 mov r3, r3 196 mov r3, r3
197 mov pc, r13 197 mov pc, r13
198 198ENDPROC(__turn_mmu_on)
199 199
200 200
201/* 201/*
@@ -211,7 +211,6 @@ __turn_mmu_on:
211 * r0, r3, r6, r7 corrupted 211 * r0, r3, r6, r7 corrupted
212 * r4 = physical page table address 212 * r4 = physical page table address
213 */ 213 */
214 .type __create_page_tables, %function
215__create_page_tables: 214__create_page_tables:
216 pgtbl r4 @ page table address 215 pgtbl r4 @ page table address
217 216
@@ -325,6 +324,7 @@ __create_page_tables:
325#endif 324#endif
326#endif 325#endif
327 mov pc, lr 326 mov pc, lr
327ENDPROC(__create_page_tables)
328 .ltorg 328 .ltorg
329 329
330#include "head-common.S" 330#include "head-common.S"