diff options
author | Helge Deller <deller@gmx.de> | 2018-08-04 18:03:29 -0400 |
---|---|---|
committer | Helge Deller <deller@gmx.de> | 2018-08-13 03:54:17 -0400 |
commit | c8921d72e390cb6fca3fb2b0c2badfda851647eb (patch) | |
tree | 8c6aec1a9405da9caad70088a7dfd910b71b6675 | |
parent | 3b885ac1dc35b87a39ee176a6c7e2af9c789d8b8 (diff) |
parisc: Fix and improve kernel stack unwinding
This patchset fixes and improves stack unwinding a lot:
1. Show backward stack traces with up to 30 callsites
2. Add callinfo to ENTRY_CFI() such that every assembler function will get an
entry in the unwind table
3. Use constants instead of numbers in call_on_stack()
4. Do not depend on CONFIG_KALLSYMS to generate backtraces.
5. Speed up backtrace generation
Make sure you have this patch to GNU as installed:
https://sourceware.org/ml/binutils/2018-07/msg00474.html
Without this patch, unwind info in the kernel is often wrong for various
functions.
Signed-off-by: Helge Deller <deller@gmx.de>
-rw-r--r-- | arch/parisc/include/asm/assembly.h | 2 | ||||
-rw-r--r-- | arch/parisc/include/asm/linkage.h | 17 | ||||
-rw-r--r-- | arch/parisc/include/asm/unwind.h | 3 | ||||
-rw-r--r-- | arch/parisc/kernel/entry.S | 53 | ||||
-rw-r--r-- | arch/parisc/kernel/pacache.S | 125 | ||||
-rw-r--r-- | arch/parisc/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/real2.S | 6 | ||||
-rw-r--r-- | arch/parisc/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/unwind.c | 89 | ||||
-rw-r--r-- | arch/parisc/lib/lusercopy.S | 21 |
10 files changed, 88 insertions, 232 deletions
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index 60e6f07b7e32..e9c6385ef0d1 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h | |||
@@ -36,6 +36,7 @@ | |||
36 | #define RP_OFFSET 16 | 36 | #define RP_OFFSET 16 |
37 | #define FRAME_SIZE 128 | 37 | #define FRAME_SIZE 128 |
38 | #define CALLEE_REG_FRAME_SIZE 144 | 38 | #define CALLEE_REG_FRAME_SIZE 144 |
39 | #define REG_SZ 8 | ||
39 | #define ASM_ULONG_INSN .dword | 40 | #define ASM_ULONG_INSN .dword |
40 | #else /* CONFIG_64BIT */ | 41 | #else /* CONFIG_64BIT */ |
41 | #define LDREG ldw | 42 | #define LDREG ldw |
@@ -50,6 +51,7 @@ | |||
50 | #define RP_OFFSET 20 | 51 | #define RP_OFFSET 20 |
51 | #define FRAME_SIZE 64 | 52 | #define FRAME_SIZE 64 |
52 | #define CALLEE_REG_FRAME_SIZE 128 | 53 | #define CALLEE_REG_FRAME_SIZE 128 |
54 | #define REG_SZ 4 | ||
53 | #define ASM_ULONG_INSN .word | 55 | #define ASM_ULONG_INSN .word |
54 | #endif | 56 | #endif |
55 | 57 | ||
diff --git a/arch/parisc/include/asm/linkage.h b/arch/parisc/include/asm/linkage.h index 9a69bf6fc4b6..49f6f3d772cc 100644 --- a/arch/parisc/include/asm/linkage.h +++ b/arch/parisc/include/asm/linkage.h | |||
@@ -18,9 +18,9 @@ | |||
18 | #ifdef __ASSEMBLY__ | 18 | #ifdef __ASSEMBLY__ |
19 | 19 | ||
20 | #define ENTRY(name) \ | 20 | #define ENTRY(name) \ |
21 | .export name !\ | 21 | ALIGN !\ |
22 | ALIGN !\ | 22 | name: ASM_NL\ |
23 | name: | 23 | .export name |
24 | 24 | ||
25 | #ifdef CONFIG_64BIT | 25 | #ifdef CONFIG_64BIT |
26 | #define ENDPROC(name) \ | 26 | #define ENDPROC(name) \ |
@@ -31,13 +31,18 @@ name: | |||
31 | END(name) | 31 | END(name) |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | #define ENTRY_CFI(name) \ | 34 | #define ENTRY_CFI(name, ...) \ |
35 | ENTRY(name) ASM_NL\ | 35 | ENTRY(name) ASM_NL\ |
36 | .proc ASM_NL\ | ||
37 | .callinfo __VA_ARGS__ ASM_NL\ | ||
38 | .entry ASM_NL\ | ||
36 | CFI_STARTPROC | 39 | CFI_STARTPROC |
37 | 40 | ||
38 | #define ENDPROC_CFI(name) \ | 41 | #define ENDPROC_CFI(name) \ |
39 | ENDPROC(name) ASM_NL\ | 42 | CFI_ENDPROC ASM_NL\ |
40 | CFI_ENDPROC | 43 | .exit ASM_NL\ |
44 | .procend ASM_NL\ | ||
45 | ENDPROC(name) | ||
41 | 46 | ||
42 | #endif /* __ASSEMBLY__ */ | 47 | #endif /* __ASSEMBLY__ */ |
43 | 48 | ||
diff --git a/arch/parisc/include/asm/unwind.h b/arch/parisc/include/asm/unwind.h index c73a3ee20226..f133b7efbebb 100644 --- a/arch/parisc/include/asm/unwind.h +++ b/arch/parisc/include/asm/unwind.h | |||
@@ -4,6 +4,9 @@ | |||
4 | 4 | ||
5 | #include <linux/list.h> | 5 | #include <linux/list.h> |
6 | 6 | ||
7 | /* Max number of levels to backtrace */ | ||
8 | #define MAX_UNWIND_ENTRIES 30 | ||
9 | |||
7 | /* From ABI specifications */ | 10 | /* From ABI specifications */ |
8 | struct unwind_table_entry { | 11 | struct unwind_table_entry { |
9 | unsigned int region_start; | 12 | unsigned int region_start; |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 1b4732e20137..c7508f5717fb 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
@@ -766,7 +766,6 @@ END(fault_vector_11) | |||
766 | #endif | 766 | #endif |
767 | /* Fault vector is separately protected and *must* be on its own page */ | 767 | /* Fault vector is separately protected and *must* be on its own page */ |
768 | .align PAGE_SIZE | 768 | .align PAGE_SIZE |
769 | ENTRY(end_fault_vector) | ||
770 | 769 | ||
771 | .import handle_interruption,code | 770 | .import handle_interruption,code |
772 | .import do_cpu_irq_mask,code | 771 | .import do_cpu_irq_mask,code |
@@ -778,7 +777,6 @@ ENTRY(end_fault_vector) | |||
778 | */ | 777 | */ |
779 | 778 | ||
780 | ENTRY_CFI(ret_from_kernel_thread) | 779 | ENTRY_CFI(ret_from_kernel_thread) |
781 | |||
782 | /* Call schedule_tail first though */ | 780 | /* Call schedule_tail first though */ |
783 | BL schedule_tail, %r2 | 781 | BL schedule_tail, %r2 |
784 | nop | 782 | nop |
@@ -817,8 +815,9 @@ ENTRY_CFI(_switch_to) | |||
817 | LDREG TASK_THREAD_INFO(%r25), %r25 | 815 | LDREG TASK_THREAD_INFO(%r25), %r25 |
818 | bv %r0(%r2) | 816 | bv %r0(%r2) |
819 | mtctl %r25,%cr30 | 817 | mtctl %r25,%cr30 |
818 | ENDPROC_CFI(_switch_to) | ||
820 | 819 | ||
821 | _switch_to_ret: | 820 | ENTRY_CFI(_switch_to_ret) |
822 | mtctl %r0, %cr0 /* Needed for single stepping */ | 821 | mtctl %r0, %cr0 /* Needed for single stepping */ |
823 | callee_rest | 822 | callee_rest |
824 | callee_rest_float | 823 | callee_rest_float |
@@ -826,7 +825,7 @@ _switch_to_ret: | |||
826 | LDREG -RP_OFFSET(%r30), %r2 | 825 | LDREG -RP_OFFSET(%r30), %r2 |
827 | bv %r0(%r2) | 826 | bv %r0(%r2) |
828 | copy %r26, %r28 | 827 | copy %r26, %r28 |
829 | ENDPROC_CFI(_switch_to) | 828 | ENDPROC_CFI(_switch_to_ret) |
830 | 829 | ||
831 | /* | 830 | /* |
832 | * Common rfi return path for interruptions, kernel execve, and | 831 | * Common rfi return path for interruptions, kernel execve, and |
@@ -887,12 +886,14 @@ ENTRY_CFI(syscall_exit_rfi) | |||
887 | STREG %r19,PT_SR5(%r16) | 886 | STREG %r19,PT_SR5(%r16) |
888 | STREG %r19,PT_SR6(%r16) | 887 | STREG %r19,PT_SR6(%r16) |
889 | STREG %r19,PT_SR7(%r16) | 888 | STREG %r19,PT_SR7(%r16) |
889 | ENDPROC_CFI(syscall_exit_rfi) | ||
890 | 890 | ||
891 | intr_return: | 891 | ENTRY_CFI(intr_return) |
892 | /* check for reschedule */ | 892 | /* check for reschedule */ |
893 | mfctl %cr30,%r1 | 893 | mfctl %cr30,%r1 |
894 | LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ | 894 | LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ |
895 | bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ | 895 | bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ |
896 | ENDPROC_CFI(intr_return) | ||
896 | 897 | ||
897 | .import do_notify_resume,code | 898 | .import do_notify_resume,code |
898 | intr_check_sig: | 899 | intr_check_sig: |
@@ -1048,7 +1049,6 @@ intr_extint: | |||
1048 | 1049 | ||
1049 | b do_cpu_irq_mask | 1050 | b do_cpu_irq_mask |
1050 | ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ | 1051 | ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ |
1051 | ENDPROC_CFI(syscall_exit_rfi) | ||
1052 | 1052 | ||
1053 | 1053 | ||
1054 | /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ | 1054 | /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ |
@@ -1999,12 +1999,9 @@ ENDPROC_CFI(syscall_exit) | |||
1999 | .align L1_CACHE_BYTES | 1999 | .align L1_CACHE_BYTES |
2000 | .globl mcount | 2000 | .globl mcount |
2001 | .type mcount, @function | 2001 | .type mcount, @function |
2002 | ENTRY(mcount) | 2002 | ENTRY_CFI(mcount, caller) |
2003 | _mcount: | 2003 | _mcount: |
2004 | .export _mcount,data | 2004 | .export _mcount,data |
2005 | .proc | ||
2006 | .callinfo caller,frame=0 | ||
2007 | .entry | ||
2008 | /* | 2005 | /* |
2009 | * The 64bit mcount() function pointer needs 4 dwords, of which the | 2006 | * The 64bit mcount() function pointer needs 4 dwords, of which the |
2010 | * first two are free. We optimize it here and put 2 instructions for | 2007 | * first two are free. We optimize it here and put 2 instructions for |
@@ -2026,18 +2023,13 @@ ftrace_stub: | |||
2026 | .dword mcount | 2023 | .dword mcount |
2027 | .dword 0 /* code in head.S puts value of global gp here */ | 2024 | .dword 0 /* code in head.S puts value of global gp here */ |
2028 | #endif | 2025 | #endif |
2029 | .exit | 2026 | ENDPROC_CFI(mcount) |
2030 | .procend | ||
2031 | ENDPROC(mcount) | ||
2032 | 2027 | ||
2033 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 2028 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2034 | .align 8 | 2029 | .align 8 |
2035 | .globl return_to_handler | 2030 | .globl return_to_handler |
2036 | .type return_to_handler, @function | 2031 | .type return_to_handler, @function |
2037 | ENTRY_CFI(return_to_handler) | 2032 | ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE) |
2038 | .proc | ||
2039 | .callinfo caller,frame=FRAME_SIZE | ||
2040 | .entry | ||
2041 | .export parisc_return_to_handler,data | 2033 | .export parisc_return_to_handler,data |
2042 | parisc_return_to_handler: | 2034 | parisc_return_to_handler: |
2043 | copy %r3,%r1 | 2035 | copy %r3,%r1 |
@@ -2076,8 +2068,6 @@ parisc_return_to_handler: | |||
2076 | bv %r0(%rp) | 2068 | bv %r0(%rp) |
2077 | #endif | 2069 | #endif |
2078 | LDREGM -FRAME_SIZE(%sp),%r3 | 2070 | LDREGM -FRAME_SIZE(%sp),%r3 |
2079 | .exit | ||
2080 | .procend | ||
2081 | ENDPROC_CFI(return_to_handler) | 2071 | ENDPROC_CFI(return_to_handler) |
2082 | 2072 | ||
2083 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2073 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
@@ -2087,31 +2077,30 @@ ENDPROC_CFI(return_to_handler) | |||
2087 | #ifdef CONFIG_IRQSTACKS | 2077 | #ifdef CONFIG_IRQSTACKS |
2088 | /* void call_on_stack(unsigned long param1, void *func, | 2078 | /* void call_on_stack(unsigned long param1, void *func, |
2089 | unsigned long new_stack) */ | 2079 | unsigned long new_stack) */ |
2090 | ENTRY_CFI(call_on_stack) | 2080 | ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) |
2091 | copy %sp, %r1 | 2081 | copy %sp, %r1 |
2092 | 2082 | ||
2093 | /* Regarding the HPPA calling conventions for function pointers, | 2083 | /* Regarding the HPPA calling conventions for function pointers, |
2094 | we assume the PIC register is not changed across call. For | 2084 | we assume the PIC register is not changed across call. For |
2095 | CONFIG_64BIT, the argument pointer is left to point at the | 2085 | CONFIG_64BIT, the argument pointer is left to point at the |
2096 | argument region allocated for the call to call_on_stack. */ | 2086 | argument region allocated for the call to call_on_stack. */ |
2087 | |||
2088 | /* Switch to new stack. We allocate two frames. */ | ||
2089 | ldo 2*FRAME_SIZE(%arg2), %sp | ||
2097 | # ifdef CONFIG_64BIT | 2090 | # ifdef CONFIG_64BIT |
2098 | /* Switch to new stack. We allocate two 128 byte frames. */ | ||
2099 | ldo 256(%arg2), %sp | ||
2100 | /* Save previous stack pointer and return pointer in frame marker */ | 2091 | /* Save previous stack pointer and return pointer in frame marker */ |
2101 | STREG %rp, -144(%sp) | 2092 | STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) |
2102 | /* Calls always use function descriptor */ | 2093 | /* Calls always use function descriptor */ |
2103 | LDREG 16(%arg1), %arg1 | 2094 | LDREG 16(%arg1), %arg1 |
2104 | bve,l (%arg1), %rp | 2095 | bve,l (%arg1), %rp |
2105 | STREG %r1, -136(%sp) | 2096 | STREG %r1, -FRAME_SIZE-REG_SZ(%sp) |
2106 | LDREG -144(%sp), %rp | 2097 | LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp |
2107 | bve (%rp) | 2098 | bve (%rp) |
2108 | LDREG -136(%sp), %sp | 2099 | LDREG -FRAME_SIZE-REG_SZ(%sp), %sp |
2109 | # else | 2100 | # else |
2110 | /* Switch to new stack. We allocate two 64 byte frames. */ | ||
2111 | ldo 128(%arg2), %sp | ||
2112 | /* Save previous stack pointer and return pointer in frame marker */ | 2101 | /* Save previous stack pointer and return pointer in frame marker */ |
2113 | STREG %r1, -68(%sp) | 2102 | STREG %r1, -FRAME_SIZE-REG_SZ(%sp) |
2114 | STREG %rp, -84(%sp) | 2103 | STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) |
2115 | /* Calls use function descriptor if PLABEL bit is set */ | 2104 | /* Calls use function descriptor if PLABEL bit is set */ |
2116 | bb,>=,n %arg1, 30, 1f | 2105 | bb,>=,n %arg1, 30, 1f |
2117 | depwi 0,31,2, %arg1 | 2106 | depwi 0,31,2, %arg1 |
@@ -2119,9 +2108,9 @@ ENTRY_CFI(call_on_stack) | |||
2119 | 1: | 2108 | 1: |
2120 | be,l 0(%sr4,%arg1), %sr0, %r31 | 2109 | be,l 0(%sr4,%arg1), %sr0, %r31 |
2121 | copy %r31, %rp | 2110 | copy %r31, %rp |
2122 | LDREG -84(%sp), %rp | 2111 | LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp |
2123 | bv (%rp) | 2112 | bv (%rp) |
2124 | LDREG -68(%sp), %sp | 2113 | LDREG -FRAME_SIZE-REG_SZ(%sp), %sp |
2125 | # endif /* CONFIG_64BIT */ | 2114 | # endif /* CONFIG_64BIT */ |
2126 | ENDPROC_CFI(call_on_stack) | 2115 | ENDPROC_CFI(call_on_stack) |
2127 | #endif /* CONFIG_IRQSTACKS */ | 2116 | #endif /* CONFIG_IRQSTACKS */ |
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 97451e67d35b..f33bf2d306d6 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S | |||
@@ -44,10 +44,6 @@ | |||
44 | .align 16 | 44 | .align 16 |
45 | 45 | ||
46 | ENTRY_CFI(flush_tlb_all_local) | 46 | ENTRY_CFI(flush_tlb_all_local) |
47 | .proc | ||
48 | .callinfo NO_CALLS | ||
49 | .entry | ||
50 | |||
51 | /* | 47 | /* |
52 | * The pitlbe and pdtlbe instructions should only be used to | 48 | * The pitlbe and pdtlbe instructions should only be used to |
53 | * flush the entire tlb. Also, there needs to be no intervening | 49 | * flush the entire tlb. Also, there needs to be no intervening |
@@ -189,18 +185,11 @@ fdtdone: | |||
189 | 185 | ||
190 | 2: bv %r0(%r2) | 186 | 2: bv %r0(%r2) |
191 | nop | 187 | nop |
192 | |||
193 | .exit | ||
194 | .procend | ||
195 | ENDPROC_CFI(flush_tlb_all_local) | 188 | ENDPROC_CFI(flush_tlb_all_local) |
196 | 189 | ||
197 | .import cache_info,data | 190 | .import cache_info,data |
198 | 191 | ||
199 | ENTRY_CFI(flush_instruction_cache_local) | 192 | ENTRY_CFI(flush_instruction_cache_local) |
200 | .proc | ||
201 | .callinfo NO_CALLS | ||
202 | .entry | ||
203 | |||
204 | load32 cache_info, %r1 | 193 | load32 cache_info, %r1 |
205 | 194 | ||
206 | /* Flush Instruction Cache */ | 195 | /* Flush Instruction Cache */ |
@@ -256,18 +245,11 @@ fisync: | |||
256 | mtsm %r22 /* restore I-bit */ | 245 | mtsm %r22 /* restore I-bit */ |
257 | bv %r0(%r2) | 246 | bv %r0(%r2) |
258 | nop | 247 | nop |
259 | .exit | ||
260 | |||
261 | .procend | ||
262 | ENDPROC_CFI(flush_instruction_cache_local) | 248 | ENDPROC_CFI(flush_instruction_cache_local) |
263 | 249 | ||
264 | 250 | ||
265 | .import cache_info, data | 251 | .import cache_info, data |
266 | ENTRY_CFI(flush_data_cache_local) | 252 | ENTRY_CFI(flush_data_cache_local) |
267 | .proc | ||
268 | .callinfo NO_CALLS | ||
269 | .entry | ||
270 | |||
271 | load32 cache_info, %r1 | 253 | load32 cache_info, %r1 |
272 | 254 | ||
273 | /* Flush Data Cache */ | 255 | /* Flush Data Cache */ |
@@ -324,9 +306,6 @@ fdsync: | |||
324 | mtsm %r22 /* restore I-bit */ | 306 | mtsm %r22 /* restore I-bit */ |
325 | bv %r0(%r2) | 307 | bv %r0(%r2) |
326 | nop | 308 | nop |
327 | .exit | ||
328 | |||
329 | .procend | ||
330 | ENDPROC_CFI(flush_data_cache_local) | 309 | ENDPROC_CFI(flush_data_cache_local) |
331 | 310 | ||
332 | /* Macros to serialize TLB purge operations on SMP. */ | 311 | /* Macros to serialize TLB purge operations on SMP. */ |
@@ -362,10 +341,6 @@ ENDPROC_CFI(flush_data_cache_local) | |||
362 | /* Clear page using kernel mapping. */ | 341 | /* Clear page using kernel mapping. */ |
363 | 342 | ||
364 | ENTRY_CFI(clear_page_asm) | 343 | ENTRY_CFI(clear_page_asm) |
365 | .proc | ||
366 | .callinfo NO_CALLS | ||
367 | .entry | ||
368 | |||
369 | #ifdef CONFIG_64BIT | 344 | #ifdef CONFIG_64BIT |
370 | 345 | ||
371 | /* Unroll the loop. */ | 346 | /* Unroll the loop. */ |
@@ -424,18 +399,11 @@ ENTRY_CFI(clear_page_asm) | |||
424 | #endif | 399 | #endif |
425 | bv %r0(%r2) | 400 | bv %r0(%r2) |
426 | nop | 401 | nop |
427 | .exit | ||
428 | |||
429 | .procend | ||
430 | ENDPROC_CFI(clear_page_asm) | 402 | ENDPROC_CFI(clear_page_asm) |
431 | 403 | ||
432 | /* Copy page using kernel mapping. */ | 404 | /* Copy page using kernel mapping. */ |
433 | 405 | ||
434 | ENTRY_CFI(copy_page_asm) | 406 | ENTRY_CFI(copy_page_asm) |
435 | .proc | ||
436 | .callinfo NO_CALLS | ||
437 | .entry | ||
438 | |||
439 | #ifdef CONFIG_64BIT | 407 | #ifdef CONFIG_64BIT |
440 | /* PA8x00 CPUs can consume 2 loads or 1 store per cycle. | 408 | /* PA8x00 CPUs can consume 2 loads or 1 store per cycle. |
441 | * Unroll the loop by hand and arrange insn appropriately. | 409 | * Unroll the loop by hand and arrange insn appropriately. |
@@ -542,9 +510,6 @@ ENTRY_CFI(copy_page_asm) | |||
542 | #endif | 510 | #endif |
543 | bv %r0(%r2) | 511 | bv %r0(%r2) |
544 | nop | 512 | nop |
545 | .exit | ||
546 | |||
547 | .procend | ||
548 | ENDPROC_CFI(copy_page_asm) | 513 | ENDPROC_CFI(copy_page_asm) |
549 | 514 | ||
550 | /* | 515 | /* |
@@ -598,10 +563,6 @@ ENDPROC_CFI(copy_page_asm) | |||
598 | */ | 563 | */ |
599 | 564 | ||
600 | ENTRY_CFI(copy_user_page_asm) | 565 | ENTRY_CFI(copy_user_page_asm) |
601 | .proc | ||
602 | .callinfo NO_CALLS | ||
603 | .entry | ||
604 | |||
605 | /* Convert virtual `to' and `from' addresses to physical addresses. | 566 | /* Convert virtual `to' and `from' addresses to physical addresses. |
606 | Move `from' physical address to non shadowed register. */ | 567 | Move `from' physical address to non shadowed register. */ |
607 | ldil L%(__PAGE_OFFSET), %r1 | 568 | ldil L%(__PAGE_OFFSET), %r1 |
@@ -750,16 +711,9 @@ ENTRY_CFI(copy_user_page_asm) | |||
750 | 711 | ||
751 | bv %r0(%r2) | 712 | bv %r0(%r2) |
752 | nop | 713 | nop |
753 | .exit | ||
754 | |||
755 | .procend | ||
756 | ENDPROC_CFI(copy_user_page_asm) | 714 | ENDPROC_CFI(copy_user_page_asm) |
757 | 715 | ||
758 | ENTRY_CFI(clear_user_page_asm) | 716 | ENTRY_CFI(clear_user_page_asm) |
759 | .proc | ||
760 | .callinfo NO_CALLS | ||
761 | .entry | ||
762 | |||
763 | tophys_r1 %r26 | 717 | tophys_r1 %r26 |
764 | 718 | ||
765 | ldil L%(TMPALIAS_MAP_START), %r28 | 719 | ldil L%(TMPALIAS_MAP_START), %r28 |
@@ -836,16 +790,9 @@ ENTRY_CFI(clear_user_page_asm) | |||
836 | 790 | ||
837 | bv %r0(%r2) | 791 | bv %r0(%r2) |
838 | nop | 792 | nop |
839 | .exit | ||
840 | |||
841 | .procend | ||
842 | ENDPROC_CFI(clear_user_page_asm) | 793 | ENDPROC_CFI(clear_user_page_asm) |
843 | 794 | ||
844 | ENTRY_CFI(flush_dcache_page_asm) | 795 | ENTRY_CFI(flush_dcache_page_asm) |
845 | .proc | ||
846 | .callinfo NO_CALLS | ||
847 | .entry | ||
848 | |||
849 | ldil L%(TMPALIAS_MAP_START), %r28 | 796 | ldil L%(TMPALIAS_MAP_START), %r28 |
850 | #ifdef CONFIG_64BIT | 797 | #ifdef CONFIG_64BIT |
851 | #if (TMPALIAS_MAP_START >= 0x80000000) | 798 | #if (TMPALIAS_MAP_START >= 0x80000000) |
@@ -903,16 +850,9 @@ ENTRY_CFI(flush_dcache_page_asm) | |||
903 | sync | 850 | sync |
904 | bv %r0(%r2) | 851 | bv %r0(%r2) |
905 | nop | 852 | nop |
906 | .exit | ||
907 | |||
908 | .procend | ||
909 | ENDPROC_CFI(flush_dcache_page_asm) | 853 | ENDPROC_CFI(flush_dcache_page_asm) |
910 | 854 | ||
911 | ENTRY_CFI(flush_icache_page_asm) | 855 | ENTRY_CFI(flush_icache_page_asm) |
912 | .proc | ||
913 | .callinfo NO_CALLS | ||
914 | .entry | ||
915 | |||
916 | ldil L%(TMPALIAS_MAP_START), %r28 | 856 | ldil L%(TMPALIAS_MAP_START), %r28 |
917 | #ifdef CONFIG_64BIT | 857 | #ifdef CONFIG_64BIT |
918 | #if (TMPALIAS_MAP_START >= 0x80000000) | 858 | #if (TMPALIAS_MAP_START >= 0x80000000) |
@@ -977,16 +917,9 @@ ENTRY_CFI(flush_icache_page_asm) | |||
977 | sync | 917 | sync |
978 | bv %r0(%r2) | 918 | bv %r0(%r2) |
979 | nop | 919 | nop |
980 | .exit | ||
981 | |||
982 | .procend | ||
983 | ENDPROC_CFI(flush_icache_page_asm) | 920 | ENDPROC_CFI(flush_icache_page_asm) |
984 | 921 | ||
985 | ENTRY_CFI(flush_kernel_dcache_page_asm) | 922 | ENTRY_CFI(flush_kernel_dcache_page_asm) |
986 | .proc | ||
987 | .callinfo NO_CALLS | ||
988 | .entry | ||
989 | |||
990 | ldil L%dcache_stride, %r1 | 923 | ldil L%dcache_stride, %r1 |
991 | ldw R%dcache_stride(%r1), %r23 | 924 | ldw R%dcache_stride(%r1), %r23 |
992 | 925 | ||
@@ -1020,16 +953,9 @@ ENTRY_CFI(flush_kernel_dcache_page_asm) | |||
1020 | sync | 953 | sync |
1021 | bv %r0(%r2) | 954 | bv %r0(%r2) |
1022 | nop | 955 | nop |
1023 | .exit | ||
1024 | |||
1025 | .procend | ||
1026 | ENDPROC_CFI(flush_kernel_dcache_page_asm) | 956 | ENDPROC_CFI(flush_kernel_dcache_page_asm) |
1027 | 957 | ||
1028 | ENTRY_CFI(purge_kernel_dcache_page_asm) | 958 | ENTRY_CFI(purge_kernel_dcache_page_asm) |
1029 | .proc | ||
1030 | .callinfo NO_CALLS | ||
1031 | .entry | ||
1032 | |||
1033 | ldil L%dcache_stride, %r1 | 959 | ldil L%dcache_stride, %r1 |
1034 | ldw R%dcache_stride(%r1), %r23 | 960 | ldw R%dcache_stride(%r1), %r23 |
1035 | 961 | ||
@@ -1062,16 +988,9 @@ ENTRY_CFI(purge_kernel_dcache_page_asm) | |||
1062 | sync | 988 | sync |
1063 | bv %r0(%r2) | 989 | bv %r0(%r2) |
1064 | nop | 990 | nop |
1065 | .exit | ||
1066 | |||
1067 | .procend | ||
1068 | ENDPROC_CFI(purge_kernel_dcache_page_asm) | 991 | ENDPROC_CFI(purge_kernel_dcache_page_asm) |
1069 | 992 | ||
1070 | ENTRY_CFI(flush_user_dcache_range_asm) | 993 | ENTRY_CFI(flush_user_dcache_range_asm) |
1071 | .proc | ||
1072 | .callinfo NO_CALLS | ||
1073 | .entry | ||
1074 | |||
1075 | ldil L%dcache_stride, %r1 | 994 | ldil L%dcache_stride, %r1 |
1076 | ldw R%dcache_stride(%r1), %r23 | 995 | ldw R%dcache_stride(%r1), %r23 |
1077 | ldo -1(%r23), %r21 | 996 | ldo -1(%r23), %r21 |
@@ -1083,16 +1002,9 @@ ENTRY_CFI(flush_user_dcache_range_asm) | |||
1083 | sync | 1002 | sync |
1084 | bv %r0(%r2) | 1003 | bv %r0(%r2) |
1085 | nop | 1004 | nop |
1086 | .exit | ||
1087 | |||
1088 | .procend | ||
1089 | ENDPROC_CFI(flush_user_dcache_range_asm) | 1005 | ENDPROC_CFI(flush_user_dcache_range_asm) |
1090 | 1006 | ||
1091 | ENTRY_CFI(flush_kernel_dcache_range_asm) | 1007 | ENTRY_CFI(flush_kernel_dcache_range_asm) |
1092 | .proc | ||
1093 | .callinfo NO_CALLS | ||
1094 | .entry | ||
1095 | |||
1096 | ldil L%dcache_stride, %r1 | 1008 | ldil L%dcache_stride, %r1 |
1097 | ldw R%dcache_stride(%r1), %r23 | 1009 | ldw R%dcache_stride(%r1), %r23 |
1098 | ldo -1(%r23), %r21 | 1010 | ldo -1(%r23), %r21 |
@@ -1105,16 +1017,9 @@ ENTRY_CFI(flush_kernel_dcache_range_asm) | |||
1105 | syncdma | 1017 | syncdma |
1106 | bv %r0(%r2) | 1018 | bv %r0(%r2) |
1107 | nop | 1019 | nop |
1108 | .exit | ||
1109 | |||
1110 | .procend | ||
1111 | ENDPROC_CFI(flush_kernel_dcache_range_asm) | 1020 | ENDPROC_CFI(flush_kernel_dcache_range_asm) |
1112 | 1021 | ||
1113 | ENTRY_CFI(purge_kernel_dcache_range_asm) | 1022 | ENTRY_CFI(purge_kernel_dcache_range_asm) |
1114 | .proc | ||
1115 | .callinfo NO_CALLS | ||
1116 | .entry | ||
1117 | |||
1118 | ldil L%dcache_stride, %r1 | 1023 | ldil L%dcache_stride, %r1 |
1119 | ldw R%dcache_stride(%r1), %r23 | 1024 | ldw R%dcache_stride(%r1), %r23 |
1120 | ldo -1(%r23), %r21 | 1025 | ldo -1(%r23), %r21 |
@@ -1127,16 +1032,9 @@ ENTRY_CFI(purge_kernel_dcache_range_asm) | |||
1127 | syncdma | 1032 | syncdma |
1128 | bv %r0(%r2) | 1033 | bv %r0(%r2) |
1129 | nop | 1034 | nop |
1130 | .exit | ||
1131 | |||
1132 | .procend | ||
1133 | ENDPROC_CFI(purge_kernel_dcache_range_asm) | 1035 | ENDPROC_CFI(purge_kernel_dcache_range_asm) |
1134 | 1036 | ||
1135 | ENTRY_CFI(flush_user_icache_range_asm) | 1037 | ENTRY_CFI(flush_user_icache_range_asm) |
1136 | .proc | ||
1137 | .callinfo NO_CALLS | ||
1138 | .entry | ||
1139 | |||
1140 | ldil L%icache_stride, %r1 | 1038 | ldil L%icache_stride, %r1 |
1141 | ldw R%icache_stride(%r1), %r23 | 1039 | ldw R%icache_stride(%r1), %r23 |
1142 | ldo -1(%r23), %r21 | 1040 | ldo -1(%r23), %r21 |
@@ -1148,16 +1046,9 @@ ENTRY_CFI(flush_user_icache_range_asm) | |||
1148 | sync | 1046 | sync |
1149 | bv %r0(%r2) | 1047 | bv %r0(%r2) |
1150 | nop | 1048 | nop |
1151 | .exit | ||
1152 | |||
1153 | .procend | ||
1154 | ENDPROC_CFI(flush_user_icache_range_asm) | 1049 | ENDPROC_CFI(flush_user_icache_range_asm) |
1155 | 1050 | ||
1156 | ENTRY_CFI(flush_kernel_icache_page) | 1051 | ENTRY_CFI(flush_kernel_icache_page) |
1157 | .proc | ||
1158 | .callinfo NO_CALLS | ||
1159 | .entry | ||
1160 | |||
1161 | ldil L%icache_stride, %r1 | 1052 | ldil L%icache_stride, %r1 |
1162 | ldw R%icache_stride(%r1), %r23 | 1053 | ldw R%icache_stride(%r1), %r23 |
1163 | 1054 | ||
@@ -1191,16 +1082,9 @@ ENTRY_CFI(flush_kernel_icache_page) | |||
1191 | sync | 1082 | sync |
1192 | bv %r0(%r2) | 1083 | bv %r0(%r2) |
1193 | nop | 1084 | nop |
1194 | .exit | ||
1195 | |||
1196 | .procend | ||
1197 | ENDPROC_CFI(flush_kernel_icache_page) | 1085 | ENDPROC_CFI(flush_kernel_icache_page) |
1198 | 1086 | ||
1199 | ENTRY_CFI(flush_kernel_icache_range_asm) | 1087 | ENTRY_CFI(flush_kernel_icache_range_asm) |
1200 | .proc | ||
1201 | .callinfo NO_CALLS | ||
1202 | .entry | ||
1203 | |||
1204 | ldil L%icache_stride, %r1 | 1088 | ldil L%icache_stride, %r1 |
1205 | ldw R%icache_stride(%r1), %r23 | 1089 | ldw R%icache_stride(%r1), %r23 |
1206 | ldo -1(%r23), %r21 | 1090 | ldo -1(%r23), %r21 |
@@ -1212,8 +1096,6 @@ ENTRY_CFI(flush_kernel_icache_range_asm) | |||
1212 | sync | 1096 | sync |
1213 | bv %r0(%r2) | 1097 | bv %r0(%r2) |
1214 | nop | 1098 | nop |
1215 | .exit | ||
1216 | .procend | ||
1217 | ENDPROC_CFI(flush_kernel_icache_range_asm) | 1099 | ENDPROC_CFI(flush_kernel_icache_range_asm) |
1218 | 1100 | ||
1219 | __INIT | 1101 | __INIT |
@@ -1223,10 +1105,6 @@ ENDPROC_CFI(flush_kernel_icache_range_asm) | |||
1223 | */ | 1105 | */ |
1224 | .align 256 | 1106 | .align 256 |
1225 | ENTRY_CFI(disable_sr_hashing_asm) | 1107 | ENTRY_CFI(disable_sr_hashing_asm) |
1226 | .proc | ||
1227 | .callinfo NO_CALLS | ||
1228 | .entry | ||
1229 | |||
1230 | /* | 1108 | /* |
1231 | * Switch to real mode | 1109 | * Switch to real mode |
1232 | */ | 1110 | */ |
@@ -1308,9 +1186,6 @@ srdis_done: | |||
1308 | 1186 | ||
1309 | 2: bv %r0(%r2) | 1187 | 2: bv %r0(%r2) |
1310 | nop | 1188 | nop |
1311 | .exit | ||
1312 | |||
1313 | .procend | ||
1314 | ENDPROC_CFI(disable_sr_hashing_asm) | 1189 | ENDPROC_CFI(disable_sr_hashing_asm) |
1315 | 1190 | ||
1316 | .end | 1191 | .end |
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index b931745815e0..eb39e7e380d7 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c | |||
@@ -302,7 +302,7 @@ get_wchan(struct task_struct *p) | |||
302 | ip = info.ip; | 302 | ip = info.ip; |
303 | if (!in_sched_functions(ip)) | 303 | if (!in_sched_functions(ip)) |
304 | return ip; | 304 | return ip; |
305 | } while (count++ < 16); | 305 | } while (count++ < MAX_UNWIND_ENTRIES); |
306 | return 0; | 306 | return 0; |
307 | } | 307 | } |
308 | 308 | ||
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S index cc9963421a19..2b16d8d6598f 100644 --- a/arch/parisc/kernel/real2.S +++ b/arch/parisc/kernel/real2.S | |||
@@ -35,12 +35,6 @@ real32_stack: | |||
35 | real64_stack: | 35 | real64_stack: |
36 | .block 8192 | 36 | .block 8192 |
37 | 37 | ||
38 | #ifdef CONFIG_64BIT | ||
39 | # define REG_SZ 8 | ||
40 | #else | ||
41 | # define REG_SZ 4 | ||
42 | #endif | ||
43 | |||
44 | #define N_SAVED_REGS 9 | 38 | #define N_SAVED_REGS 9 |
45 | 39 | ||
46 | save_cr_space: | 40 | save_cr_space: |
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 4309ad31a874..318815212518 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c | |||
@@ -172,7 +172,7 @@ static void do_show_stack(struct unwind_frame_info *info) | |||
172 | int i = 1; | 172 | int i = 1; |
173 | 173 | ||
174 | printk(KERN_CRIT "Backtrace:\n"); | 174 | printk(KERN_CRIT "Backtrace:\n"); |
175 | while (i <= 16) { | 175 | while (i <= MAX_UNWIND_ENTRIES) { |
176 | if (unwind_once(info) < 0 || info->ip == 0) | 176 | if (unwind_once(info) < 0 || info->ip == 0) |
177 | break; | 177 | break; |
178 | 178 | ||
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index a4b430f440a9..5cdf13069dd9 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/kallsyms.h> | ||
17 | #include <linux/sort.h> | 16 | #include <linux/sort.h> |
18 | 17 | ||
19 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
@@ -117,7 +116,8 @@ unwind_table_init(struct unwind_table *table, const char *name, | |||
117 | for (; start <= end; start++) { | 116 | for (; start <= end; start++) { |
118 | if (start < end && | 117 | if (start < end && |
119 | start->region_end > (start+1)->region_start) { | 118 | start->region_end > (start+1)->region_start) { |
120 | printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1); | 119 | pr_warn("Out of order unwind entry! %px and %px\n", |
120 | start, start+1); | ||
121 | } | 121 | } |
122 | 122 | ||
123 | start->region_start += base_addr; | 123 | start->region_start += base_addr; |
@@ -203,25 +203,60 @@ int __init unwind_init(void) | |||
203 | return 0; | 203 | return 0; |
204 | } | 204 | } |
205 | 205 | ||
206 | #ifdef CONFIG_64BIT | ||
207 | #define get_func_addr(fptr) fptr[2] | ||
208 | #else | ||
209 | #define get_func_addr(fptr) fptr[0] | ||
210 | #endif | ||
211 | |||
212 | static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size) | 206 | static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size) |
213 | { | 207 | { |
214 | extern void handle_interruption(int, struct pt_regs *); | 208 | /* |
215 | static unsigned long *hi = (unsigned long *)&handle_interruption; | 209 | * We have to use void * instead of a function pointer, because |
216 | 210 | * function pointers aren't a pointer to the function on 64-bit. | |
217 | if (pc == get_func_addr(hi)) { | 211 | * Make them const so the compiler knows they live in .text |
212 | */ | ||
213 | extern void * const handle_interruption; | ||
214 | extern void * const ret_from_kernel_thread; | ||
215 | extern void * const syscall_exit; | ||
216 | extern void * const intr_return; | ||
217 | extern void * const _switch_to_ret; | ||
218 | #ifdef CONFIG_IRQSTACKS | ||
219 | extern void * const call_on_stack; | ||
220 | #endif /* CONFIG_IRQSTACKS */ | ||
221 | |||
222 | if (pc == (unsigned long) &handle_interruption) { | ||
218 | struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN); | 223 | struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN); |
219 | dbg("Unwinding through handle_interruption()\n"); | 224 | dbg("Unwinding through handle_interruption()\n"); |
220 | info->prev_sp = regs->gr[30]; | 225 | info->prev_sp = regs->gr[30]; |
221 | info->prev_ip = regs->iaoq[0]; | 226 | info->prev_ip = regs->iaoq[0]; |
227 | return 1; | ||
228 | } | ||
229 | |||
230 | if (pc == (unsigned long) &ret_from_kernel_thread || | ||
231 | pc == (unsigned long) &syscall_exit) { | ||
232 | info->prev_sp = info->prev_ip = 0; | ||
233 | return 1; | ||
234 | } | ||
235 | |||
236 | if (pc == (unsigned long) &intr_return) { | ||
237 | struct pt_regs *regs; | ||
238 | |||
239 | dbg("Found intr_return()\n"); | ||
240 | regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN); | ||
241 | info->prev_sp = regs->gr[30]; | ||
242 | info->prev_ip = regs->iaoq[0]; | ||
243 | info->rp = regs->gr[2]; | ||
244 | return 1; | ||
245 | } | ||
246 | |||
247 | if (pc == (unsigned long) &_switch_to_ret) { | ||
248 | info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE; | ||
249 | info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); | ||
250 | return 1; | ||
251 | } | ||
222 | 252 | ||
253 | #ifdef CONFIG_IRQSTACKS | ||
254 | if (pc == (unsigned long) &call_on_stack) { | ||
255 | info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ); | ||
256 | info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET); | ||
223 | return 1; | 257 | return 1; |
224 | } | 258 | } |
259 | #endif | ||
225 | 260 | ||
226 | return 0; | 261 | return 0; |
227 | } | 262 | } |
@@ -238,34 +273,8 @@ static void unwind_frame_regs(struct unwind_frame_info *info) | |||
238 | if (e == NULL) { | 273 | if (e == NULL) { |
239 | unsigned long sp; | 274 | unsigned long sp; |
240 | 275 | ||
241 | dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip); | 276 | dbg("Cannot find unwind entry for %pS; forced unwinding\n", |
242 | 277 | (void *) info->ip); | |
243 | #ifdef CONFIG_KALLSYMS | ||
244 | /* Handle some frequent special cases.... */ | ||
245 | { | ||
246 | char symname[KSYM_NAME_LEN]; | ||
247 | char *modname; | ||
248 | |||
249 | kallsyms_lookup(info->ip, NULL, NULL, &modname, | ||
250 | symname); | ||
251 | |||
252 | dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname); | ||
253 | |||
254 | if (strcmp(symname, "_switch_to_ret") == 0) { | ||
255 | info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE; | ||
256 | info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); | ||
257 | dbg("_switch_to_ret @ %lx - setting " | ||
258 | "prev_sp=%lx prev_ip=%lx\n", | ||
259 | info->ip, info->prev_sp, | ||
260 | info->prev_ip); | ||
261 | return; | ||
262 | } else if (strcmp(symname, "ret_from_kernel_thread") == 0 || | ||
263 | strcmp(symname, "syscall_exit") == 0) { | ||
264 | info->prev_ip = info->prev_sp = 0; | ||
265 | return; | ||
266 | } | ||
267 | } | ||
268 | #endif | ||
269 | 278 | ||
270 | /* Since we are doing the unwinding blind, we don't know if | 279 | /* Since we are doing the unwinding blind, we don't know if |
271 | we are adjusting the stack correctly or extracting the rp | 280 | we are adjusting the stack correctly or extracting the rp |
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S index d4fe19806d57..b53fb6fedf06 100644 --- a/arch/parisc/lib/lusercopy.S +++ b/arch/parisc/lib/lusercopy.S | |||
@@ -64,9 +64,6 @@ | |||
64 | */ | 64 | */ |
65 | 65 | ||
66 | ENTRY_CFI(lclear_user) | 66 | ENTRY_CFI(lclear_user) |
67 | .proc | ||
68 | .callinfo NO_CALLS | ||
69 | .entry | ||
70 | comib,=,n 0,%r25,$lclu_done | 67 | comib,=,n 0,%r25,$lclu_done |
71 | get_sr | 68 | get_sr |
72 | $lclu_loop: | 69 | $lclu_loop: |
@@ -81,13 +78,9 @@ $lclu_done: | |||
81 | ldo 1(%r25),%r25 | 78 | ldo 1(%r25),%r25 |
82 | 79 | ||
83 | ASM_EXCEPTIONTABLE_ENTRY(1b,2b) | 80 | ASM_EXCEPTIONTABLE_ENTRY(1b,2b) |
84 | |||
85 | .exit | ||
86 | ENDPROC_CFI(lclear_user) | 81 | ENDPROC_CFI(lclear_user) |
87 | 82 | ||
88 | 83 | ||
89 | .procend | ||
90 | |||
91 | /* | 84 | /* |
92 | * long lstrnlen_user(char *s, long n) | 85 | * long lstrnlen_user(char *s, long n) |
93 | * | 86 | * |
@@ -97,9 +90,6 @@ ENDPROC_CFI(lclear_user) | |||
97 | */ | 90 | */ |
98 | 91 | ||
99 | ENTRY_CFI(lstrnlen_user) | 92 | ENTRY_CFI(lstrnlen_user) |
100 | .proc | ||
101 | .callinfo NO_CALLS | ||
102 | .entry | ||
103 | comib,= 0,%r25,$lslen_nzero | 93 | comib,= 0,%r25,$lslen_nzero |
104 | copy %r26,%r24 | 94 | copy %r26,%r24 |
105 | get_sr | 95 | get_sr |
@@ -111,7 +101,6 @@ $lslen_loop: | |||
111 | $lslen_done: | 101 | $lslen_done: |
112 | bv %r0(%r2) | 102 | bv %r0(%r2) |
113 | sub %r26,%r24,%r28 | 103 | sub %r26,%r24,%r28 |
114 | .exit | ||
115 | 104 | ||
116 | $lslen_nzero: | 105 | $lslen_nzero: |
117 | b $lslen_done | 106 | b $lslen_done |
@@ -125,9 +114,6 @@ $lslen_nzero: | |||
125 | 114 | ||
126 | ENDPROC_CFI(lstrnlen_user) | 115 | ENDPROC_CFI(lstrnlen_user) |
127 | 116 | ||
128 | .procend | ||
129 | |||
130 | |||
131 | 117 | ||
132 | /* | 118 | /* |
133 | * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) | 119 | * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) |
@@ -186,10 +172,6 @@ ENDPROC_CFI(lstrnlen_user) | |||
186 | save_len = r31 | 172 | save_len = r31 |
187 | 173 | ||
188 | ENTRY_CFI(pa_memcpy) | 174 | ENTRY_CFI(pa_memcpy) |
189 | .proc | ||
190 | .callinfo NO_CALLS | ||
191 | .entry | ||
192 | |||
193 | /* Last destination address */ | 175 | /* Last destination address */ |
194 | add dst,len,end | 176 | add dst,len,end |
195 | 177 | ||
@@ -439,9 +421,6 @@ ENTRY_CFI(pa_memcpy) | |||
439 | b .Lcopy_done | 421 | b .Lcopy_done |
440 | 10: stw,ma t1,4(dstspc,dst) | 422 | 10: stw,ma t1,4(dstspc,dst) |
441 | ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) | 423 | ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) |
442 | |||
443 | .exit | ||
444 | ENDPROC_CFI(pa_memcpy) | 424 | ENDPROC_CFI(pa_memcpy) |
445 | .procend | ||
446 | 425 | ||
447 | .end | 426 | .end |