aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa
diff options
context:
space:
mode:
authorChris Zankel <chris@zankel.net>2012-11-16 19:16:20 -0500
committerChris Zankel <chris@zankel.net>2012-12-19 00:10:20 -0500
commitd1538c4675f37d0eeb34bd38bec798b3b29a5a7e (patch)
tree4fa2d2c539825ad99f2f677cef686ea0051fbe9f /arch/xtensa
parentc0226e34a4293dee0e7c5787e1ebfc5ee8b44b7c (diff)
xtensa: provide proper assembler function boundaries with ENDPROC()
Use ENDPROC() to mark the end of assembler functions. Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/kernel/align.S1
-rw-r--r--arch/xtensa/kernel/coprocessor.S22
-rw-r--r--arch/xtensa/kernel/entry.S30
-rw-r--r--arch/xtensa/kernel/head.S14
-rw-r--r--arch/xtensa/kernel/vectors.S51
-rw-r--r--arch/xtensa/lib/checksum.S5
-rw-r--r--arch/xtensa/mm/misc.S49
7 files changed, 147 insertions, 25 deletions
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index 934ae58e2c79..39d2f597382d 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -450,6 +450,7 @@ ENTRY(fast_unaligned)
4501: movi a0, _user_exception 4501: movi a0, _user_exception
451 jx a0 451 jx a0
452 452
453ENDPROC(fast_unaligned)
453 454
454#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */ 455#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
455 456
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 54c3be313bfa..60bb13589249 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -43,10 +43,13 @@
43/* IO protection is currently unsupported. */ 43/* IO protection is currently unsupported. */
44 44
45ENTRY(fast_io_protect) 45ENTRY(fast_io_protect)
46
46 wsr a0, excsave1 47 wsr a0, excsave1
47 movi a0, unrecoverable_exception 48 movi a0, unrecoverable_exception
48 callx0 a0 49 callx0 a0
49 50
51ENDPROC(fast_io_protect)
52
50#if XTENSA_HAVE_COPROCESSORS 53#if XTENSA_HAVE_COPROCESSORS
51 54
52/* 55/*
@@ -139,6 +142,7 @@ ENTRY(fast_io_protect)
139 */ 142 */
140 143
141ENTRY(coprocessor_save) 144ENTRY(coprocessor_save)
145
142 entry a1, 32 146 entry a1, 32
143 s32i a0, a1, 0 147 s32i a0, a1, 0
144 movi a0, .Lsave_cp_regs_jump_table 148 movi a0, .Lsave_cp_regs_jump_table
@@ -150,7 +154,10 @@ ENTRY(coprocessor_save)
1501: l32i a0, a1, 0 1541: l32i a0, a1, 0
151 retw 155 retw
152 156
157ENDPROC(coprocessor_save)
158
153ENTRY(coprocessor_load) 159ENTRY(coprocessor_load)
160
154 entry a1, 32 161 entry a1, 32
155 s32i a0, a1, 0 162 s32i a0, a1, 0
156 movi a0, .Lload_cp_regs_jump_table 163 movi a0, .Lload_cp_regs_jump_table
@@ -162,6 +169,8 @@ ENTRY(coprocessor_load)
1621: l32i a0, a1, 0 1691: l32i a0, a1, 0
163 retw 170 retw
164 171
172ENDPROC(coprocessor_load)
173
165/* 174/*
166 * coprocessor_flush(struct task_info*, index) 175 * coprocessor_flush(struct task_info*, index)
167 * a2 a3 176 * a2 a3
@@ -178,6 +187,7 @@ ENTRY(coprocessor_load)
178 187
179 188
180ENTRY(coprocessor_flush) 189ENTRY(coprocessor_flush)
190
181 entry a1, 32 191 entry a1, 32
182 s32i a0, a1, 0 192 s32i a0, a1, 0
183 movi a0, .Lsave_cp_regs_jump_table 193 movi a0, .Lsave_cp_regs_jump_table
@@ -191,6 +201,8 @@ ENTRY(coprocessor_flush)
1911: l32i a0, a1, 0 2011: l32i a0, a1, 0
192 retw 202 retw
193 203
204ENDPROC(coprocessor_flush)
205
194ENTRY(coprocessor_restore) 206ENTRY(coprocessor_restore)
195 entry a1, 32 207 entry a1, 32
196 s32i a0, a1, 0 208 s32i a0, a1, 0
@@ -205,6 +217,8 @@ ENTRY(coprocessor_restore)
2051: l32i a0, a1, 0 2171: l32i a0, a1, 0
206 retw 218 retw
207 219
220ENDPROC(coprocessor_restore)
221
208/* 222/*
209 * Entry condition: 223 * Entry condition:
210 * 224 *
@@ -220,10 +234,12 @@ ENTRY(coprocessor_restore)
220 */ 234 */
221 235
222ENTRY(fast_coprocessor_double) 236ENTRY(fast_coprocessor_double)
237
223 wsr a0, excsave1 238 wsr a0, excsave1
224 movi a0, unrecoverable_exception 239 movi a0, unrecoverable_exception
225 callx0 a0 240 callx0 a0
226 241
242ENDPROC(fast_coprocessor_double)
227 243
228ENTRY(fast_coprocessor) 244ENTRY(fast_coprocessor)
229 245
@@ -327,9 +343,15 @@ ENTRY(fast_coprocessor)
327 343
328 rfe 344 rfe
329 345
346ENDPROC(fast_coprocessor)
347
330 .data 348 .data
349
331ENTRY(coprocessor_owner) 350ENTRY(coprocessor_owner)
351
332 .fill XCHAL_CP_MAX, 4, 0 352 .fill XCHAL_CP_MAX, 4, 0
333 353
354END(coprocessor_owner)
355
334#endif /* XTENSA_HAVE_COPROCESSORS */ 356#endif /* XTENSA_HAVE_COPROCESSORS */
335 357
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 90bfc1dbc13d..41ad9cfe9a2a 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -219,6 +219,7 @@ _user_exception:
219 219
220 j common_exception 220 j common_exception
221 221
222ENDPROC(user_exception)
222 223
223/* 224/*
224 * First-level exit handler for kernel exceptions 225 * First-level exit handler for kernel exceptions
@@ -641,6 +642,8 @@ common_exception_exit:
641 l32i a1, a1, PT_AREG1 642 l32i a1, a1, PT_AREG1
642 rfde 643 rfde
643 644
645ENDPROC(kernel_exception)
646
644/* 647/*
645 * Debug exception handler. 648 * Debug exception handler.
646 * 649 *
@@ -701,6 +704,7 @@ ENTRY(debug_exception)
701 /* Debug exception while in exception mode. */ 704 /* Debug exception while in exception mode. */
7021: j 1b // FIXME!! 7051: j 1b // FIXME!!
703 706
707ENDPROC(debug_exception)
704 708
705/* 709/*
706 * We get here in case of an unrecoverable exception. 710 * We get here in case of an unrecoverable exception.
@@ -751,6 +755,7 @@ ENTRY(unrecoverable_exception)
751 755
7521: j 1b 7561: j 1b
753 757
758ENDPROC(unrecoverable_exception)
754 759
755/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ 760/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
756 761
@@ -929,6 +934,7 @@ ENTRY(fast_alloca)
929 l32i a2, a2, PT_AREG2 934 l32i a2, a2, PT_AREG2
930 rfe 935 rfe
931 936
937ENDPROC(fast_alloca)
932 938
933/* 939/*
934 * fast system calls. 940 * fast system calls.
@@ -966,6 +972,8 @@ ENTRY(fast_syscall_kernel)
966 972
967 j kernel_exception 973 j kernel_exception
968 974
975ENDPROC(fast_syscall_kernel)
976
969ENTRY(fast_syscall_user) 977ENTRY(fast_syscall_user)
970 978
971 /* Skip syscall. */ 979 /* Skip syscall. */
@@ -983,6 +991,8 @@ ENTRY(fast_syscall_user)
983 991
984 j user_exception 992 j user_exception
985 993
994ENDPROC(fast_syscall_user)
995
986ENTRY(fast_syscall_unrecoverable) 996ENTRY(fast_syscall_unrecoverable)
987 997
988 /* Restore all states. */ 998 /* Restore all states. */
@@ -995,7 +1005,7 @@ ENTRY(fast_syscall_unrecoverable)
995 movi a0, unrecoverable_exception 1005 movi a0, unrecoverable_exception
996 callx0 a0 1006 callx0 a0
997 1007
998 1008ENDPROC(fast_syscall_unrecoverable)
999 1009
1000/* 1010/*
1001 * sysxtensa syscall handler 1011 * sysxtensa syscall handler
@@ -1101,7 +1111,7 @@ CATCH
1101 movi a2, -EINVAL 1111 movi a2, -EINVAL
1102 rfe 1112 rfe
1103 1113
1104 1114ENDPROC(fast_syscall_xtensa)
1105 1115
1106 1116
1107/* fast_syscall_spill_registers. 1117/* fast_syscall_spill_registers.
@@ -1160,6 +1170,8 @@ ENTRY(fast_syscall_spill_registers)
1160 movi a2, 0 1170 movi a2, 0
1161 rfe 1171 rfe
1162 1172
1173ENDPROC(fast_syscall_spill_registers)
1174
1163/* Fixup handler. 1175/* Fixup handler.
1164 * 1176 *
1165 * We get here if the spill routine causes an exception, e.g. tlb miss. 1177 * We get here if the spill routine causes an exception, e.g. tlb miss.
@@ -1464,6 +1476,8 @@ ENTRY(_spill_registers)
1464 callx0 a0 # should not return 1476 callx0 a0 # should not return
14651: j 1b 14771: j 1b
1466 1478
1479ENDPROC(_spill_registers)
1480
1467#ifdef CONFIG_MMU 1481#ifdef CONFIG_MMU
1468/* 1482/*
1469 * We should never get here. Bail out! 1483 * We should never get here. Bail out!
@@ -1475,6 +1489,8 @@ ENTRY(fast_second_level_miss_double_kernel)
1475 callx0 a0 # should not return 1489 callx0 a0 # should not return
14761: j 1b 14901: j 1b
1477 1491
1492ENDPROC(fast_second_level_miss_double_kernel)
1493
1478/* First-level entry handler for user, kernel, and double 2nd-level 1494/* First-level entry handler for user, kernel, and double 2nd-level
1479 * TLB miss exceptions. Note that for now, user and kernel miss 1495 * TLB miss exceptions. Note that for now, user and kernel miss
1480 * exceptions share the same entry point and are handled identically. 1496 * exceptions share the same entry point and are handled identically.
@@ -1682,6 +1698,7 @@ ENTRY(fast_second_level_miss)
1682 j _kernel_exception 1698 j _kernel_exception
16831: j _user_exception 16991: j _user_exception
1684 1700
1701ENDPROC(fast_second_level_miss)
1685 1702
1686/* 1703/*
1687 * StoreProhibitedException 1704 * StoreProhibitedException
@@ -1777,6 +1794,9 @@ ENTRY(fast_store_prohibited)
1777 bbsi.l a2, PS_UM_BIT, 1f 1794 bbsi.l a2, PS_UM_BIT, 1f
1778 j _kernel_exception 1795 j _kernel_exception
17791: j _user_exception 17961: j _user_exception
1797
1798ENDPROC(fast_store_prohibited)
1799
1780#endif /* CONFIG_MMU */ 1800#endif /* CONFIG_MMU */
1781 1801
1782/* 1802/*
@@ -1787,6 +1807,7 @@ ENTRY(fast_store_prohibited)
1787 */ 1807 */
1788 1808
1789ENTRY(system_call) 1809ENTRY(system_call)
1810
1790 entry a1, 32 1811 entry a1, 32
1791 1812
1792 /* regs->syscall = regs->areg[2] */ 1813 /* regs->syscall = regs->areg[2] */
@@ -1831,6 +1852,8 @@ ENTRY(system_call)
1831 callx4 a4 1852 callx4 a4
1832 retw 1853 retw
1833 1854
1855ENDPROC(system_call)
1856
1834 1857
1835/* 1858/*
1836 * Task switch. 1859 * Task switch.
@@ -1899,6 +1922,7 @@ ENTRY(_switch_to)
1899 1922
1900 retw 1923 retw
1901 1924
1925ENDPROC(_switch_to)
1902 1926
1903ENTRY(ret_from_fork) 1927ENTRY(ret_from_fork)
1904 1928
@@ -1914,6 +1938,8 @@ ENTRY(ret_from_fork)
1914 1938
1915 j common_exception_return 1939 j common_exception_return
1916 1940
1941ENDPROC(ret_from_fork)
1942
1917/* 1943/*
1918 * Kernel thread creation helper 1944 * Kernel thread creation helper
1919 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg 1945 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index bdc50788f35e..417998c02108 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -47,16 +47,19 @@
47 */ 47 */
48 48
49 __HEAD 49 __HEAD
50 .globl _start 50ENTRY(_start)
51_start: _j 2f 51
52 _j 2f
52 .align 4 53 .align 4
531: .word _startup 541: .word _startup
542: l32r a0, 1b 552: l32r a0, 1b
55 jx a0 56 jx a0
56 57
58ENDPROC(_start)
59
57 .section .init.text, "ax" 60 .section .init.text, "ax"
58 .align 4 61
59_startup: 62ENTRY(_startup)
60 63
61 /* Disable interrupts and exceptions. */ 64 /* Disable interrupts and exceptions. */
62 65
@@ -230,6 +233,7 @@ _startup:
230should_never_return: 233should_never_return:
231 j should_never_return 234 j should_never_return
232 235
236ENDPROC(_startup)
233 237
234/* 238/*
235 * BSS section 239 * BSS section
@@ -239,6 +243,8 @@ __PAGE_ALIGNED_BSS
239#ifdef CONFIG_MMU 243#ifdef CONFIG_MMU
240ENTRY(swapper_pg_dir) 244ENTRY(swapper_pg_dir)
241 .fill PAGE_SIZE, 1, 0 245 .fill PAGE_SIZE, 1, 0
246END(swapper_pg_dir)
242#endif 247#endif
243ENTRY(empty_zero_page) 248ENTRY(empty_zero_page)
244 .fill PAGE_SIZE, 1, 0 249 .fill PAGE_SIZE, 1, 0
250END(empty_zero_page)
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 3a57c15f7942..9365ee5064d3 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -79,6 +79,8 @@ ENTRY(_UserExceptionVector)
79 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 79 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
80 jx a0 80 jx a0
81 81
82ENDPROC(_UserExceptionVector)
83
82/* 84/*
83 * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0) 85 * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
84 * 86 *
@@ -103,6 +105,7 @@ ENTRY(_KernelExceptionVector)
103 l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address 105 l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
104 jx a0 106 jx a0
105 107
108ENDPROC(_KernelExceptionVector)
106 109
107/* 110/*
108 * Double exception vector (Exceptions with PS.EXCM == 1) 111 * Double exception vector (Exceptions with PS.EXCM == 1)
@@ -344,6 +347,7 @@ ENTRY(_DoubleExceptionVector)
344 347
345 .end literal_prefix 348 .end literal_prefix
346 349
350ENDPROC(_DoubleExceptionVector)
347 351
348/* 352/*
349 * Debug interrupt vector 353 * Debug interrupt vector
@@ -355,9 +359,11 @@ ENTRY(_DoubleExceptionVector)
355 .section .DebugInterruptVector.text, "ax" 359 .section .DebugInterruptVector.text, "ax"
356 360
357ENTRY(_DebugInterruptVector) 361ENTRY(_DebugInterruptVector)
362
358 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 363 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
359 jx a0 364 jx a0
360 365
366ENDPROC(_DebugInterruptVector)
361 367
362 368
363/* Window overflow and underflow handlers. 369/* Window overflow and underflow handlers.
@@ -369,38 +375,43 @@ ENTRY(_DebugInterruptVector)
369 * we try to access any page that would cause a page fault early. 375 * we try to access any page that would cause a page fault early.
370 */ 376 */
371 377
378#define ENTRY_ALIGN64(name) \
379 .globl name; \
380 .align 64; \
381 name:
382
372 .section .WindowVectors.text, "ax" 383 .section .WindowVectors.text, "ax"
373 384
374 385
375/* 4-Register Window Overflow Vector (Handler) */ 386/* 4-Register Window Overflow Vector (Handler) */
376 387
377 .align 64 388ENTRY_ALIGN64(_WindowOverflow4)
378.global _WindowOverflow4 389
379_WindowOverflow4:
380 s32e a0, a5, -16 390 s32e a0, a5, -16
381 s32e a1, a5, -12 391 s32e a1, a5, -12
382 s32e a2, a5, -8 392 s32e a2, a5, -8
383 s32e a3, a5, -4 393 s32e a3, a5, -4
384 rfwo 394 rfwo
385 395
396ENDPROC(_WindowOverflow4)
397
386 398
387/* 4-Register Window Underflow Vector (Handler) */ 399/* 4-Register Window Underflow Vector (Handler) */
388 400
389 .align 64 401ENTRY_ALIGN64(_WindowUnderflow4)
390.global _WindowUnderflow4 402
391_WindowUnderflow4:
392 l32e a0, a5, -16 403 l32e a0, a5, -16
393 l32e a1, a5, -12 404 l32e a1, a5, -12
394 l32e a2, a5, -8 405 l32e a2, a5, -8
395 l32e a3, a5, -4 406 l32e a3, a5, -4
396 rfwu 407 rfwu
397 408
409ENDPROC(_WindowUnderflow4)
398 410
399/* 8-Register Window Overflow Vector (Handler) */ 411/* 8-Register Window Overflow Vector (Handler) */
400 412
401 .align 64 413ENTRY_ALIGN64(_WindowOverflow8)
402.global _WindowOverflow8 414
403_WindowOverflow8:
404 s32e a0, a9, -16 415 s32e a0, a9, -16
405 l32e a0, a1, -12 416 l32e a0, a1, -12
406 s32e a2, a9, -8 417 s32e a2, a9, -8
@@ -412,11 +423,12 @@ _WindowOverflow8:
412 s32e a7, a0, -20 423 s32e a7, a0, -20
413 rfwo 424 rfwo
414 425
426ENDPROC(_WindowOverflow8)
427
415/* 8-Register Window Underflow Vector (Handler) */ 428/* 8-Register Window Underflow Vector (Handler) */
416 429
417 .align 64 430ENTRY_ALIGN64(_WindowUnderflow8)
418.global _WindowUnderflow8 431
419_WindowUnderflow8:
420 l32e a1, a9, -12 432 l32e a1, a9, -12
421 l32e a0, a9, -16 433 l32e a0, a9, -16
422 l32e a7, a1, -12 434 l32e a7, a1, -12
@@ -428,12 +440,12 @@ _WindowUnderflow8:
428 l32e a7, a7, -20 440 l32e a7, a7, -20
429 rfwu 441 rfwu
430 442
443ENDPROC(_WindowUnderflow8)
431 444
432/* 12-Register Window Overflow Vector (Handler) */ 445/* 12-Register Window Overflow Vector (Handler) */
433 446
434 .align 64 447ENTRY_ALIGN64(_WindowOverflow12)
435.global _WindowOverflow12 448
436_WindowOverflow12:
437 s32e a0, a13, -16 449 s32e a0, a13, -16
438 l32e a0, a1, -12 450 l32e a0, a1, -12
439 s32e a1, a13, -12 451 s32e a1, a13, -12
@@ -449,11 +461,12 @@ _WindowOverflow12:
449 s32e a11, a0, -20 461 s32e a11, a0, -20
450 rfwo 462 rfwo
451 463
464ENDPROC(_WindowOverflow12)
465
452/* 12-Register Window Underflow Vector (Handler) */ 466/* 12-Register Window Underflow Vector (Handler) */
453 467
454 .align 64 468ENTRY_ALIGN64(_WindowUnderflow12)
455.global _WindowUnderflow12 469
456_WindowUnderflow12:
457 l32e a1, a13, -12 470 l32e a1, a13, -12
458 l32e a0, a13, -16 471 l32e a0, a13, -16
459 l32e a11, a1, -12 472 l32e a11, a1, -12
@@ -469,6 +482,8 @@ _WindowUnderflow12:
469 l32e a11, a11, -20 482 l32e a11, a11, -20
470 rfwu 483 rfwu
471 484
485ENDPROC(_WindowUnderflow12)
486
472 .text 487 .text
473 488
474 489
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
index df397f932d0e..0470ca21a359 100644
--- a/arch/xtensa/lib/checksum.S
+++ b/arch/xtensa/lib/checksum.S
@@ -170,7 +170,7 @@ ENTRY(csum_partial)
1703: 1703:
171 j 5b /* branch to handle the remaining byte */ 171 j 5b /* branch to handle the remaining byte */
172 172
173 173ENDPROC(csum_partial)
174 174
175/* 175/*
176 * Copy from ds while checksumming, otherwise like csum_partial 176 * Copy from ds while checksumming, otherwise like csum_partial
@@ -211,6 +211,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
211 */ 211 */
212 212
213ENTRY(csum_partial_copy_generic) 213ENTRY(csum_partial_copy_generic)
214
214 entry sp, 32 215 entry sp, 32
215 mov a12, a3 216 mov a12, a3
216 mov a11, a4 217 mov a11, a4
@@ -367,6 +368,8 @@ DST( s8i a8, a3, 1 )
3676: 3686:
368 j 4b /* process the possible trailing odd byte */ 369 j 4b /* process the possible trailing odd byte */
369 370
371ENDPROC(csum_partial_copy_generic)
372
370 373
371# Exception handler: 374# Exception handler:
372.section .fixup, "ax" 375.section .fixup, "ax"
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index b048406d8756..7f7078f57c41 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -29,6 +29,7 @@
29 */ 29 */
30 30
31ENTRY(clear_page) 31ENTRY(clear_page)
32
32 entry a1, 16 33 entry a1, 16
33 34
34 movi a3, 0 35 movi a3, 0
@@ -45,6 +46,8 @@ ENTRY(clear_page)
45 46
46 retw 47 retw
47 48
49ENDPROC(clear_page)
50
48/* 51/*
49 * copy_page and copy_user_page are the same for non-cache-aliased configs. 52 * copy_page and copy_user_page are the same for non-cache-aliased configs.
50 * 53 *
@@ -53,6 +56,7 @@ ENTRY(clear_page)
53 */ 56 */
54 57
55ENTRY(copy_page) 58ENTRY(copy_page)
59
56 entry a1, 16 60 entry a1, 16
57 61
58 __loopi a2, a4, PAGE_SIZE, 32 62 __loopi a2, a4, PAGE_SIZE, 32
@@ -84,6 +88,8 @@ ENTRY(copy_page)
84 88
85 retw 89 retw
86 90
91ENDPROC(copy_page)
92
87#ifdef CONFIG_MMU 93#ifdef CONFIG_MMU
88/* 94/*
89 * If we have to deal with cache aliasing, we use temporary memory mappings 95 * If we have to deal with cache aliasing, we use temporary memory mappings
@@ -109,6 +115,7 @@ ENTRY(__tlbtemp_mapping_start)
109 */ 115 */
110 116
111ENTRY(clear_user_page) 117ENTRY(clear_user_page)
118
112 entry a1, 32 119 entry a1, 32
113 120
114 /* Mark page dirty and determine alias. */ 121 /* Mark page dirty and determine alias. */
@@ -164,6 +171,8 @@ ENTRY(clear_user_page)
164 171
165 retw 172 retw
166 173
174ENDPROC(clear_user_page)
175
167/* 176/*
168 * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) 177 * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
169 * a2 a3 a4 a5 178 * a2 a3 a4 a5
@@ -262,6 +271,8 @@ ENTRY(copy_user_page)
262 271
263 retw 272 retw
264 273
274ENDPROC(copy_user_page)
275
265#endif 276#endif
266 277
267#if (DCACHE_WAY_SIZE > PAGE_SIZE) 278#if (DCACHE_WAY_SIZE > PAGE_SIZE)
@@ -272,6 +283,7 @@ ENTRY(copy_user_page)
272 */ 283 */
273 284
274ENTRY(__flush_invalidate_dcache_page_alias) 285ENTRY(__flush_invalidate_dcache_page_alias)
286
275 entry sp, 16 287 entry sp, 16
276 288
277 movi a7, 0 # required for exception handler 289 movi a7, 0 # required for exception handler
@@ -287,6 +299,7 @@ ENTRY(__flush_invalidate_dcache_page_alias)
287 299
288 retw 300 retw
289 301
302ENDPROC(__flush_invalidate_dcache_page_alias)
290#endif 303#endif
291 304
292ENTRY(__tlbtemp_mapping_itlb) 305ENTRY(__tlbtemp_mapping_itlb)
@@ -294,6 +307,7 @@ ENTRY(__tlbtemp_mapping_itlb)
294#if (ICACHE_WAY_SIZE > PAGE_SIZE) 307#if (ICACHE_WAY_SIZE > PAGE_SIZE)
295 308
296ENTRY(__invalidate_icache_page_alias) 309ENTRY(__invalidate_icache_page_alias)
310
297 entry sp, 16 311 entry sp, 16
298 312
299 addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) 313 addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
@@ -307,11 +321,14 @@ ENTRY(__invalidate_icache_page_alias)
307 isync 321 isync
308 retw 322 retw
309 323
324ENDPROC(__invalidate_icache_page_alias)
325
310#endif 326#endif
311 327
312/* End of special treatment in tlb miss exception */ 328/* End of special treatment in tlb miss exception */
313 329
314ENTRY(__tlbtemp_mapping_end) 330ENTRY(__tlbtemp_mapping_end)
331
315#endif /* CONFIG_MMU 332#endif /* CONFIG_MMU
316 333
317/* 334/*
@@ -319,6 +336,7 @@ ENTRY(__tlbtemp_mapping_end)
319 */ 336 */
320 337
321ENTRY(__invalidate_icache_page) 338ENTRY(__invalidate_icache_page)
339
322 entry sp, 16 340 entry sp, 16
323 341
324 ___invalidate_icache_page a2 a3 342 ___invalidate_icache_page a2 a3
@@ -326,11 +344,14 @@ ENTRY(__invalidate_icache_page)
326 344
327 retw 345 retw
328 346
347ENDPROC(__invalidate_icache_page)
348
329/* 349/*
330 * void __invalidate_dcache_page(ulong start) 350 * void __invalidate_dcache_page(ulong start)
331 */ 351 */
332 352
333ENTRY(__invalidate_dcache_page) 353ENTRY(__invalidate_dcache_page)
354
334 entry sp, 16 355 entry sp, 16
335 356
336 ___invalidate_dcache_page a2 a3 357 ___invalidate_dcache_page a2 a3
@@ -338,11 +359,14 @@ ENTRY(__invalidate_dcache_page)
338 359
339 retw 360 retw
340 361
362ENDPROC(__invalidate_dcache_page)
363
341/* 364/*
342 * void __flush_invalidate_dcache_page(ulong start) 365 * void __flush_invalidate_dcache_page(ulong start)
343 */ 366 */
344 367
345ENTRY(__flush_invalidate_dcache_page) 368ENTRY(__flush_invalidate_dcache_page)
369
346 entry sp, 16 370 entry sp, 16
347 371
348 ___flush_invalidate_dcache_page a2 a3 372 ___flush_invalidate_dcache_page a2 a3
@@ -350,11 +374,14 @@ ENTRY(__flush_invalidate_dcache_page)
350 dsync 374 dsync
351 retw 375 retw
352 376
377ENDPROC(__flush_invalidate_dcache_page)
378
353/* 379/*
354 * void __flush_dcache_page(ulong start) 380 * void __flush_dcache_page(ulong start)
355 */ 381 */
356 382
357ENTRY(__flush_dcache_page) 383ENTRY(__flush_dcache_page)
384
358 entry sp, 16 385 entry sp, 16
359 386
360 ___flush_dcache_page a2 a3 387 ___flush_dcache_page a2 a3
@@ -362,11 +389,14 @@ ENTRY(__flush_dcache_page)
362 dsync 389 dsync
363 retw 390 retw
364 391
392ENDPROC(__flush_dcache_page)
393
365/* 394/*
366 * void __invalidate_icache_range(ulong start, ulong size) 395 * void __invalidate_icache_range(ulong start, ulong size)
367 */ 396 */
368 397
369ENTRY(__invalidate_icache_range) 398ENTRY(__invalidate_icache_range)
399
370 entry sp, 16 400 entry sp, 16
371 401
372 ___invalidate_icache_range a2 a3 a4 402 ___invalidate_icache_range a2 a3 a4
@@ -374,11 +404,14 @@ ENTRY(__invalidate_icache_range)
374 404
375 retw 405 retw
376 406
407ENDPROC(__invalidate_icache_range)
408
377/* 409/*
378 * void __flush_invalidate_dcache_range(ulong start, ulong size) 410 * void __flush_invalidate_dcache_range(ulong start, ulong size)
379 */ 411 */
380 412
381ENTRY(__flush_invalidate_dcache_range) 413ENTRY(__flush_invalidate_dcache_range)
414
382 entry sp, 16 415 entry sp, 16
383 416
384 ___flush_invalidate_dcache_range a2 a3 a4 417 ___flush_invalidate_dcache_range a2 a3 a4
@@ -386,11 +419,14 @@ ENTRY(__flush_invalidate_dcache_range)
386 419
387 retw 420 retw
388 421
422ENDPROC(__flush_invalidate_dcache_range)
423
389/* 424/*
390 * void _flush_dcache_range(ulong start, ulong size) 425 * void _flush_dcache_range(ulong start, ulong size)
391 */ 426 */
392 427
393ENTRY(__flush_dcache_range) 428ENTRY(__flush_dcache_range)
429
394 entry sp, 16 430 entry sp, 16
395 431
396 ___flush_dcache_range a2 a3 a4 432 ___flush_dcache_range a2 a3 a4
@@ -398,22 +434,28 @@ ENTRY(__flush_dcache_range)
398 434
399 retw 435 retw
400 436
437ENDPROC(__flush_dcache_range)
438
401/* 439/*
402 * void _invalidate_dcache_range(ulong start, ulong size) 440 * void _invalidate_dcache_range(ulong start, ulong size)
403 */ 441 */
404 442
405ENTRY(__invalidate_dcache_range) 443ENTRY(__invalidate_dcache_range)
444
406 entry sp, 16 445 entry sp, 16
407 446
408 ___invalidate_dcache_range a2 a3 a4 447 ___invalidate_dcache_range a2 a3 a4
409 448
410 retw 449 retw
411 450
451ENDPROC(__invalidate_dcache_range)
452
412/* 453/*
413 * void _invalidate_icache_all(void) 454 * void _invalidate_icache_all(void)
414 */ 455 */
415 456
416ENTRY(__invalidate_icache_all) 457ENTRY(__invalidate_icache_all)
458
417 entry sp, 16 459 entry sp, 16
418 460
419 ___invalidate_icache_all a2 a3 461 ___invalidate_icache_all a2 a3
@@ -421,11 +463,14 @@ ENTRY(__invalidate_icache_all)
421 463
422 retw 464 retw
423 465
466ENDPROC(__invalidate_icache_all)
467
424/* 468/*
425 * void _flush_invalidate_dcache_all(void) 469 * void _flush_invalidate_dcache_all(void)
426 */ 470 */
427 471
428ENTRY(__flush_invalidate_dcache_all) 472ENTRY(__flush_invalidate_dcache_all)
473
429 entry sp, 16 474 entry sp, 16
430 475
431 ___flush_invalidate_dcache_all a2 a3 476 ___flush_invalidate_dcache_all a2 a3
@@ -433,11 +478,14 @@ ENTRY(__flush_invalidate_dcache_all)
433 478
434 retw 479 retw
435 480
481ENDPROC(__flush_invalidate_dcache_all)
482
436/* 483/*
437 * void _invalidate_dcache_all(void) 484 * void _invalidate_dcache_all(void)
438 */ 485 */
439 486
440ENTRY(__invalidate_dcache_all) 487ENTRY(__invalidate_dcache_all)
488
441 entry sp, 16 489 entry sp, 16
442 490
443 ___invalidate_dcache_all a2 a3 491 ___invalidate_dcache_all a2 a3
@@ -445,3 +493,4 @@ ENTRY(__invalidate_dcache_all)
445 493
446 retw 494 retw
447 495
496ENDPROC(__invalidate_dcache_all)