diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-29 19:40:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-29 19:40:28 -0500 |
commit | d0bd31dc5c0b46b9c778112900cf8f910ac26e1b (patch) | |
tree | 84998565ff4a1aee4540a986923ba75b63674ea8 /arch/xtensa | |
parent | aca21de2e8355769513c27d1c218e3e8947fe84b (diff) | |
parent | ca47480921587ae30417dd234a9f79af188e3666 (diff) |
Merge tag 'xtensa-20180129' of git://github.com/jcmvbkbc/linux-xtensa
Pull Xtensa updates from Max Filippov:
- add SSP support
- add KASAN support
- improvements to xtensa-specific assembly:
- use ENTRY and ENDPROC consistently
- clean up and unify word alignment macros
- clean up and unify fixup marking
- use 'call' instead of 'callx' where possible
- various cleanups:
- consiolidate kernel stack size related definitions
- replace #ifdef'fed/commented out debug printk statements with
pr_debug
- use struct exc_table instead of flat array for exception handling
data
- build kernel with -mtext-section-literals; simplify xtensa linker
script
- fix futex_atomic_cmpxchg_inatomic()
* tag 'xtensa-20180129' of git://github.com/jcmvbkbc/linux-xtensa: (21 commits)
xtensa: fix futex_atomic_cmpxchg_inatomic
xtensa: shut up gcc-8 warnings
xtensa: print kernel sections info in mem_init
xtensa: use generic strncpy_from_user with KASAN
xtensa: use __memset in __xtensa_clear_user
xtensa: add support for KASAN
xtensa: move fixmap and kmap just above the KSEG
xtensa: don't clear swapper_pg_dir in paging_init
xtensa: extract init_kio
xtensa: implement early_trap_init
xtensa: clean up exception handling structure
xtensa: clean up custom-controlled debug output
xtensa: enable stack protector
xtensa: print hardware config ID on startup
xtensa: consolidate kernel stack size related definitions
xtensa: clean up functions in assembly code
xtensa: clean up word alignment macros in assembly code
xtensa: clean up fixups in assembly code
xtensa: use call instead of callx in assembly code
xtensa: build kernel with text-section-literals
...
Diffstat (limited to 'arch/xtensa')
54 files changed, 785 insertions, 625 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 8bc52f749f20..c921e8bccdc8 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig | |||
@@ -15,6 +15,9 @@ config XTENSA | |||
15 | select GENERIC_IRQ_SHOW | 15 | select GENERIC_IRQ_SHOW |
16 | select GENERIC_PCI_IOMAP | 16 | select GENERIC_PCI_IOMAP |
17 | select GENERIC_SCHED_CLOCK | 17 | select GENERIC_SCHED_CLOCK |
18 | select GENERIC_STRNCPY_FROM_USER if KASAN | ||
19 | select HAVE_ARCH_KASAN if MMU | ||
20 | select HAVE_CC_STACKPROTECTOR | ||
18 | select HAVE_DEBUG_KMEMLEAK | 21 | select HAVE_DEBUG_KMEMLEAK |
19 | select HAVE_DMA_API_DEBUG | 22 | select HAVE_DMA_API_DEBUG |
20 | select HAVE_DMA_CONTIGUOUS | 23 | select HAVE_DMA_CONTIGUOUS |
@@ -79,6 +82,10 @@ config VARIANT_IRQ_SWITCH | |||
79 | config HAVE_XTENSA_GPIO32 | 82 | config HAVE_XTENSA_GPIO32 |
80 | def_bool n | 83 | def_bool n |
81 | 84 | ||
85 | config KASAN_SHADOW_OFFSET | ||
86 | hex | ||
87 | default 0x6e400000 | ||
88 | |||
82 | menu "Processor type and features" | 89 | menu "Processor type and features" |
83 | 90 | ||
84 | choice | 91 | choice |
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 7ee02fe4a63d..3a934b72a272 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile | |||
@@ -42,10 +42,11 @@ export PLATFORM | |||
42 | 42 | ||
43 | # temporarily until string.h is fixed | 43 | # temporarily until string.h is fixed |
44 | KBUILD_CFLAGS += -ffreestanding -D__linux__ | 44 | KBUILD_CFLAGS += -ffreestanding -D__linux__ |
45 | 45 | KBUILD_CFLAGS += -pipe -mlongcalls -mtext-section-literals | |
46 | KBUILD_CFLAGS += -pipe -mlongcalls | ||
47 | |||
48 | KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,) | 46 | KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,) |
47 | KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,) | ||
48 | |||
49 | KBUILD_AFLAGS += -mlongcalls -mtext-section-literals | ||
49 | 50 | ||
50 | ifneq ($(CONFIG_LD_NO_RELAX),) | 51 | ifneq ($(CONFIG_LD_NO_RELAX),) |
51 | LDFLAGS := --no-relax | 52 | LDFLAGS := --no-relax |
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S index bf7fabe6310d..bbf3b4b080cd 100644 --- a/arch/xtensa/boot/boot-redboot/bootstrap.S +++ b/arch/xtensa/boot/boot-redboot/bootstrap.S | |||
@@ -42,6 +42,7 @@ __start_a0: | |||
42 | .align 4 | 42 | .align 4 |
43 | 43 | ||
44 | .section .text, "ax" | 44 | .section .text, "ax" |
45 | .literal_position | ||
45 | .begin literal_prefix .text | 46 | .begin literal_prefix .text |
46 | 47 | ||
47 | /* put literals in here! */ | 48 | /* put literals in here! */ |
diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile index d2a7f48564a4..355127faade1 100644 --- a/arch/xtensa/boot/lib/Makefile +++ b/arch/xtensa/boot/lib/Makefile | |||
@@ -15,6 +15,12 @@ CFLAGS_REMOVE_inftrees.o = -pg | |||
15 | CFLAGS_REMOVE_inffast.o = -pg | 15 | CFLAGS_REMOVE_inffast.o = -pg |
16 | endif | 16 | endif |
17 | 17 | ||
18 | KASAN_SANITIZE := n | ||
19 | |||
20 | CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong | ||
21 | CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong | ||
22 | CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong | ||
23 | CFLAGS_REMOVE_inffast.o += -fstack-protector -fstack-protector-strong | ||
18 | 24 | ||
19 | quiet_cmd_copy_zlib = COPY $@ | 25 | quiet_cmd_copy_zlib = COPY $@ |
20 | cmd_copy_zlib = cat $< > $@ | 26 | cmd_copy_zlib = cat $< > $@ |
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h index 746dcc8b5abc..7f2ae5872151 100644 --- a/arch/xtensa/include/asm/asmmacro.h +++ b/arch/xtensa/include/asm/asmmacro.h | |||
@@ -150,5 +150,45 @@ | |||
150 | __endl \ar \as | 150 | __endl \ar \as |
151 | .endm | 151 | .endm |
152 | 152 | ||
153 | /* Load or store instructions that may cause exceptions use the EX macro. */ | ||
154 | |||
155 | #define EX(handler) \ | ||
156 | .section __ex_table, "a"; \ | ||
157 | .word 97f, handler; \ | ||
158 | .previous \ | ||
159 | 97: | ||
160 | |||
161 | |||
162 | /* | ||
163 | * Extract unaligned word that is split between two registers w0 and w1 | ||
164 | * into r regardless of machine endianness. SAR must be loaded with the | ||
165 | * starting bit of the word (see __ssa8). | ||
166 | */ | ||
167 | |||
168 | .macro __src_b r, w0, w1 | ||
169 | #ifdef __XTENSA_EB__ | ||
170 | src \r, \w0, \w1 | ||
171 | #else | ||
172 | src \r, \w1, \w0 | ||
173 | #endif | ||
174 | .endm | ||
175 | |||
176 | /* | ||
177 | * Load 2 lowest address bits of r into SAR for __src_b to extract unaligned | ||
178 | * word starting at r from two registers loaded from consecutive aligned | ||
179 | * addresses covering r regardless of machine endianness. | ||
180 | * | ||
181 | * r 0 1 2 3 | ||
182 | * LE SAR 0 8 16 24 | ||
183 | * BE SAR 32 24 16 8 | ||
184 | */ | ||
185 | |||
186 | .macro __ssa8 r | ||
187 | #ifdef __XTENSA_EB__ | ||
188 | ssa8b \r | ||
189 | #else | ||
190 | ssa8l \r | ||
191 | #endif | ||
192 | .endm | ||
153 | 193 | ||
154 | #endif /* _XTENSA_ASMMACRO_H */ | 194 | #endif /* _XTENSA_ASMMACRO_H */ |
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h index 47e46dcf5d49..5d98a7ad4251 100644 --- a/arch/xtensa/include/asm/current.h +++ b/arch/xtensa/include/asm/current.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #ifndef _XTENSA_CURRENT_H | 11 | #ifndef _XTENSA_CURRENT_H |
12 | #define _XTENSA_CURRENT_H | 12 | #define _XTENSA_CURRENT_H |
13 | 13 | ||
14 | #include <asm/thread_info.h> | ||
15 | |||
14 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
15 | 17 | ||
16 | #include <linux/thread_info.h> | 18 | #include <linux/thread_info.h> |
@@ -26,8 +28,6 @@ static inline struct task_struct *get_current(void) | |||
26 | 28 | ||
27 | #else | 29 | #else |
28 | 30 | ||
29 | #define CURRENT_SHIFT 13 | ||
30 | |||
31 | #define GET_CURRENT(reg,sp) \ | 31 | #define GET_CURRENT(reg,sp) \ |
32 | GET_THREAD_INFO(reg,sp); \ | 32 | GET_THREAD_INFO(reg,sp); \ |
33 | l32i reg, reg, TI_TASK \ | 33 | l32i reg, reg, TI_TASK \ |
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h index 0d30403b6c95..7e25c1b50ac0 100644 --- a/arch/xtensa/include/asm/fixmap.h +++ b/arch/xtensa/include/asm/fixmap.h | |||
@@ -44,7 +44,7 @@ enum fixed_addresses { | |||
44 | __end_of_fixed_addresses | 44 | __end_of_fixed_addresses |
45 | }; | 45 | }; |
46 | 46 | ||
47 | #define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE) | 47 | #define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE) |
48 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | 48 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) |
49 | #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) | 49 | #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) |
50 | 50 | ||
@@ -63,7 +63,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx) | |||
63 | * table. | 63 | * table. |
64 | */ | 64 | */ |
65 | BUILD_BUG_ON(FIXADDR_START < | 65 | BUILD_BUG_ON(FIXADDR_START < |
66 | XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE); | 66 | TLBTEMP_BASE_1 + TLBTEMP_SIZE); |
67 | BUILD_BUG_ON(idx >= __end_of_fixed_addresses); | 67 | BUILD_BUG_ON(idx >= __end_of_fixed_addresses); |
68 | return __fix_to_virt(idx); | 68 | return __fix_to_virt(idx); |
69 | } | 69 | } |
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h index eaaf1ebcc7a4..5bfbc1c401d4 100644 --- a/arch/xtensa/include/asm/futex.h +++ b/arch/xtensa/include/asm/futex.h | |||
@@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
92 | u32 oldval, u32 newval) | 92 | u32 oldval, u32 newval) |
93 | { | 93 | { |
94 | int ret = 0; | 94 | int ret = 0; |
95 | u32 prev; | ||
96 | 95 | ||
97 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | 96 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
98 | return -EFAULT; | 97 | return -EFAULT; |
@@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
103 | 102 | ||
104 | __asm__ __volatile__ ( | 103 | __asm__ __volatile__ ( |
105 | " # futex_atomic_cmpxchg_inatomic\n" | 104 | " # futex_atomic_cmpxchg_inatomic\n" |
106 | "1: l32i %1, %3, 0\n" | 105 | " wsr %5, scompare1\n" |
107 | " mov %0, %5\n" | 106 | "1: s32c1i %1, %4, 0\n" |
108 | " wsr %1, scompare1\n" | 107 | " s32i %1, %6, 0\n" |
109 | "2: s32c1i %0, %3, 0\n" | 108 | "2:\n" |
110 | "3:\n" | ||
111 | " .section .fixup,\"ax\"\n" | 109 | " .section .fixup,\"ax\"\n" |
112 | " .align 4\n" | 110 | " .align 4\n" |
113 | "4: .long 3b\n" | 111 | "3: .long 2b\n" |
114 | "5: l32r %1, 4b\n" | 112 | "4: l32r %1, 3b\n" |
115 | " movi %0, %6\n" | 113 | " movi %0, %7\n" |
116 | " jx %1\n" | 114 | " jx %1\n" |
117 | " .previous\n" | 115 | " .previous\n" |
118 | " .section __ex_table,\"a\"\n" | 116 | " .section __ex_table,\"a\"\n" |
119 | " .long 1b,5b,2b,5b\n" | 117 | " .long 1b,4b\n" |
120 | " .previous\n" | 118 | " .previous\n" |
121 | : "+r" (ret), "=&r" (prev), "+m" (*uaddr) | 119 | : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval) |
122 | : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) | 120 | : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT) |
123 | : "memory"); | 121 | : "memory"); |
124 | 122 | ||
125 | *uval = prev; | ||
126 | return ret; | 123 | return ret; |
127 | } | 124 | } |
128 | 125 | ||
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h index 6e070db1022e..04e9340eac4b 100644 --- a/arch/xtensa/include/asm/highmem.h +++ b/arch/xtensa/include/asm/highmem.h | |||
@@ -72,7 +72,7 @@ static inline void *kmap(struct page *page) | |||
72 | * page table. | 72 | * page table. |
73 | */ | 73 | */ |
74 | BUILD_BUG_ON(PKMAP_BASE < | 74 | BUILD_BUG_ON(PKMAP_BASE < |
75 | XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE); | 75 | TLBTEMP_BASE_1 + TLBTEMP_SIZE); |
76 | BUG_ON(in_interrupt()); | 76 | BUG_ON(in_interrupt()); |
77 | if (!PageHighMem(page)) | 77 | if (!PageHighMem(page)) |
78 | return page_address(page); | 78 | return page_address(page); |
diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h new file mode 100644 index 000000000000..54be80876e57 --- /dev/null +++ b/arch/xtensa/include/asm/kasan.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef __ASM_KASAN_H | ||
3 | #define __ASM_KASAN_H | ||
4 | |||
5 | #ifndef __ASSEMBLY__ | ||
6 | |||
7 | #ifdef CONFIG_KASAN | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/sizes.h> | ||
11 | #include <asm/kmem_layout.h> | ||
12 | |||
13 | /* Start of area covered by KASAN */ | ||
14 | #define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000) | ||
15 | /* Start of the shadow map */ | ||
16 | #define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE) | ||
17 | /* Size of the shadow map */ | ||
18 | #define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT) | ||
19 | /* Offset for mem to shadow address transformation */ | ||
20 | #define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET) | ||
21 | |||
22 | void __init kasan_early_init(void); | ||
23 | void __init kasan_init(void); | ||
24 | |||
25 | #else | ||
26 | |||
27 | static inline void kasan_early_init(void) | ||
28 | { | ||
29 | } | ||
30 | |||
31 | static inline void kasan_init(void) | ||
32 | { | ||
33 | } | ||
34 | |||
35 | #endif | ||
36 | #endif | ||
37 | #endif | ||
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h index 561f8729bcde..2317c835a4db 100644 --- a/arch/xtensa/include/asm/kmem_layout.h +++ b/arch/xtensa/include/asm/kmem_layout.h | |||
@@ -71,4 +71,11 @@ | |||
71 | 71 | ||
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | #ifndef CONFIG_KASAN | ||
75 | #define KERNEL_STACK_SHIFT 13 | ||
76 | #else | ||
77 | #define KERNEL_STACK_SHIFT 15 | ||
78 | #endif | ||
79 | #define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT) | ||
80 | |||
74 | #endif | 81 | #endif |
diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h new file mode 100644 index 000000000000..0ba9973235d9 --- /dev/null +++ b/arch/xtensa/include/asm/linkage.h | |||
@@ -0,0 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #ifndef __ASM_LINKAGE_H | ||
4 | #define __ASM_LINKAGE_H | ||
5 | |||
6 | #define __ALIGN .align 4 | ||
7 | #define __ALIGN_STR ".align 4" | ||
8 | |||
9 | #endif | ||
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h index f7e186dfc4e4..de5e6cbbafe4 100644 --- a/arch/xtensa/include/asm/mmu_context.h +++ b/arch/xtensa/include/asm/mmu_context.h | |||
@@ -52,6 +52,7 @@ DECLARE_PER_CPU(unsigned long, asid_cache); | |||
52 | #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) | 52 | #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) |
53 | 53 | ||
54 | void init_mmu(void); | 54 | void init_mmu(void); |
55 | void init_kio(void); | ||
55 | 56 | ||
56 | static inline void set_rasid_register (unsigned long val) | 57 | static inline void set_rasid_register (unsigned long val) |
57 | { | 58 | { |
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h index 2cebdbbdb633..37251b2ef871 100644 --- a/arch/xtensa/include/asm/nommu_context.h +++ b/arch/xtensa/include/asm/nommu_context.h | |||
@@ -3,6 +3,10 @@ static inline void init_mmu(void) | |||
3 | { | 3 | { |
4 | } | 4 | } |
5 | 5 | ||
6 | static inline void init_kio(void) | ||
7 | { | ||
8 | } | ||
9 | |||
6 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 10 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
7 | { | 11 | { |
8 | } | 12 | } |
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 4ddbfd57a7c8..5d69c11c01b8 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h | |||
@@ -36,8 +36,6 @@ | |||
36 | #define MAX_LOW_PFN PHYS_PFN(0xfffffffful) | 36 | #define MAX_LOW_PFN PHYS_PFN(0xfffffffful) |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | #define PGTABLE_START 0x80000000 | ||
40 | |||
41 | /* | 39 | /* |
42 | * Cache aliasing: | 40 | * Cache aliasing: |
43 | * | 41 | * |
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 30dd5b2e4ad5..38802259978f 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h | |||
@@ -12,9 +12,9 @@ | |||
12 | #define _XTENSA_PGTABLE_H | 12 | #define _XTENSA_PGTABLE_H |
13 | 13 | ||
14 | #define __ARCH_USE_5LEVEL_HACK | 14 | #define __ARCH_USE_5LEVEL_HACK |
15 | #include <asm-generic/pgtable-nopmd.h> | ||
16 | #include <asm/page.h> | 15 | #include <asm/page.h> |
17 | #include <asm/kmem_layout.h> | 16 | #include <asm/kmem_layout.h> |
17 | #include <asm-generic/pgtable-nopmd.h> | ||
18 | 18 | ||
19 | /* | 19 | /* |
20 | * We only use two ring levels, user and kernel space. | 20 | * We only use two ring levels, user and kernel space. |
@@ -170,6 +170,7 @@ | |||
170 | #define PAGE_SHARED_EXEC \ | 170 | #define PAGE_SHARED_EXEC \ |
171 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC) | 171 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC) |
172 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE) | 172 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE) |
173 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT) | ||
173 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC) | 174 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC) |
174 | 175 | ||
175 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | 176 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) |
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h index e2d9c5eb10bd..3a5c5918aea3 100644 --- a/arch/xtensa/include/asm/ptrace.h +++ b/arch/xtensa/include/asm/ptrace.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #ifndef _XTENSA_PTRACE_H | 10 | #ifndef _XTENSA_PTRACE_H |
11 | #define _XTENSA_PTRACE_H | 11 | #define _XTENSA_PTRACE_H |
12 | 12 | ||
13 | #include <asm/kmem_layout.h> | ||
13 | #include <uapi/asm/ptrace.h> | 14 | #include <uapi/asm/ptrace.h> |
14 | 15 | ||
15 | /* | 16 | /* |
@@ -38,20 +39,6 @@ | |||
38 | * +-----------------------+ -------- | 39 | * +-----------------------+ -------- |
39 | */ | 40 | */ |
40 | 41 | ||
41 | #define KERNEL_STACK_SIZE (2 * PAGE_SIZE) | ||
42 | |||
43 | /* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */ | ||
44 | |||
45 | #define EXC_TABLE_KSTK 0x004 /* Kernel Stack */ | ||
46 | #define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */ | ||
47 | #define EXC_TABLE_FIXUP 0x00c /* Fixup handler */ | ||
48 | #define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */ | ||
49 | #define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */ | ||
50 | #define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */ | ||
51 | #define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */ | ||
52 | #define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */ | ||
53 | #define EXC_TABLE_SIZE 0x400 | ||
54 | |||
55 | #ifndef __ASSEMBLY__ | 42 | #ifndef __ASSEMBLY__ |
56 | 43 | ||
57 | #include <asm/coprocessor.h> | 44 | #include <asm/coprocessor.h> |
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h index 881a1134a4b4..477594e5817f 100644 --- a/arch/xtensa/include/asm/regs.h +++ b/arch/xtensa/include/asm/regs.h | |||
@@ -76,6 +76,7 @@ | |||
76 | #define EXCCAUSE_COPROCESSOR5_DISABLED 37 | 76 | #define EXCCAUSE_COPROCESSOR5_DISABLED 37 |
77 | #define EXCCAUSE_COPROCESSOR6_DISABLED 38 | 77 | #define EXCCAUSE_COPROCESSOR6_DISABLED 38 |
78 | #define EXCCAUSE_COPROCESSOR7_DISABLED 39 | 78 | #define EXCCAUSE_COPROCESSOR7_DISABLED 39 |
79 | #define EXCCAUSE_N 64 | ||
79 | 80 | ||
80 | /* PS register fields. */ | 81 | /* PS register fields. */ |
81 | 82 | ||
diff --git a/arch/xtensa/include/asm/stackprotector.h b/arch/xtensa/include/asm/stackprotector.h new file mode 100644 index 000000000000..e368f94fd2af --- /dev/null +++ b/arch/xtensa/include/asm/stackprotector.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * GCC stack protector support. | ||
3 | * | ||
4 | * (This is directly adopted from the ARM implementation) | ||
5 | * | ||
6 | * Stack protector works by putting predefined pattern at the start of | ||
7 | * the stack frame and verifying that it hasn't been overwritten when | ||
8 | * returning from the function. The pattern is called stack canary | ||
9 | * and gcc expects it to be defined by a global variable called | ||
10 | * "__stack_chk_guard" on Xtensa. This unfortunately means that on SMP | ||
11 | * we cannot have a different canary value per task. | ||
12 | */ | ||
13 | |||
14 | #ifndef _ASM_STACKPROTECTOR_H | ||
15 | #define _ASM_STACKPROTECTOR_H 1 | ||
16 | |||
17 | #include <linux/random.h> | ||
18 | #include <linux/version.h> | ||
19 | |||
20 | extern unsigned long __stack_chk_guard; | ||
21 | |||
22 | /* | ||
23 | * Initialize the stackprotector canary value. | ||
24 | * | ||
25 | * NOTE: this must only be called from functions that never return, | ||
26 | * and it must always be inlined. | ||
27 | */ | ||
28 | static __always_inline void boot_init_stack_canary(void) | ||
29 | { | ||
30 | unsigned long canary; | ||
31 | |||
32 | /* Try to get a semi random initial value. */ | ||
33 | get_random_bytes(&canary, sizeof(canary)); | ||
34 | canary ^= LINUX_VERSION_CODE; | ||
35 | |||
36 | current->stack_canary = canary; | ||
37 | __stack_chk_guard = current->stack_canary; | ||
38 | } | ||
39 | |||
40 | #endif /* _ASM_STACKPROTECTOR_H */ | ||
diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h index 8d5d9dfadb09..89b51a0c752f 100644 --- a/arch/xtensa/include/asm/string.h +++ b/arch/xtensa/include/asm/string.h | |||
@@ -53,7 +53,7 @@ static inline char *strncpy(char *__dest, const char *__src, size_t __n) | |||
53 | "bne %1, %5, 1b\n" | 53 | "bne %1, %5, 1b\n" |
54 | "2:" | 54 | "2:" |
55 | : "=r" (__dest), "=r" (__src), "=&r" (__dummy) | 55 | : "=r" (__dest), "=r" (__src), "=&r" (__dummy) |
56 | : "0" (__dest), "1" (__src), "r" (__src+__n) | 56 | : "0" (__dest), "1" (__src), "r" ((uintptr_t)__src+__n) |
57 | : "memory"); | 57 | : "memory"); |
58 | 58 | ||
59 | return __xdest; | 59 | return __xdest; |
@@ -101,21 +101,40 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n) | |||
101 | "2:\n\t" | 101 | "2:\n\t" |
102 | "sub %2, %2, %3" | 102 | "sub %2, %2, %3" |
103 | : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy) | 103 | : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy) |
104 | : "0" (__cs), "1" (__ct), "r" (__cs+__n)); | 104 | : "0" (__cs), "1" (__ct), "r" ((uintptr_t)__cs+__n)); |
105 | 105 | ||
106 | return __res; | 106 | return __res; |
107 | } | 107 | } |
108 | 108 | ||
109 | #define __HAVE_ARCH_MEMSET | 109 | #define __HAVE_ARCH_MEMSET |
110 | extern void *memset(void *__s, int __c, size_t __count); | 110 | extern void *memset(void *__s, int __c, size_t __count); |
111 | extern void *__memset(void *__s, int __c, size_t __count); | ||
111 | 112 | ||
112 | #define __HAVE_ARCH_MEMCPY | 113 | #define __HAVE_ARCH_MEMCPY |
113 | extern void *memcpy(void *__to, __const__ void *__from, size_t __n); | 114 | extern void *memcpy(void *__to, __const__ void *__from, size_t __n); |
115 | extern void *__memcpy(void *__to, __const__ void *__from, size_t __n); | ||
114 | 116 | ||
115 | #define __HAVE_ARCH_MEMMOVE | 117 | #define __HAVE_ARCH_MEMMOVE |
116 | extern void *memmove(void *__dest, __const__ void *__src, size_t __n); | 118 | extern void *memmove(void *__dest, __const__ void *__src, size_t __n); |
119 | extern void *__memmove(void *__dest, __const__ void *__src, size_t __n); | ||
117 | 120 | ||
118 | /* Don't build bcopy at all ... */ | 121 | /* Don't build bcopy at all ... */ |
119 | #define __HAVE_ARCH_BCOPY | 122 | #define __HAVE_ARCH_BCOPY |
120 | 123 | ||
124 | #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) | ||
125 | |||
126 | /* | ||
127 | * For files that are not instrumented (e.g. mm/slub.c) we | ||
128 | * should use not instrumented version of mem* functions. | ||
129 | */ | ||
130 | |||
131 | #define memcpy(dst, src, len) __memcpy(dst, src, len) | ||
132 | #define memmove(dst, src, len) __memmove(dst, src, len) | ||
133 | #define memset(s, c, n) __memset(s, c, n) | ||
134 | |||
135 | #ifndef __NO_FORTIFY | ||
136 | #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ | ||
137 | #endif | ||
138 | #endif | ||
139 | |||
121 | #endif /* _XTENSA_STRING_H */ | 140 | #endif /* _XTENSA_STRING_H */ |
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index 2ccd37510aaa..2bd19ae61e47 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h | |||
@@ -11,7 +11,9 @@ | |||
11 | #ifndef _XTENSA_THREAD_INFO_H | 11 | #ifndef _XTENSA_THREAD_INFO_H |
12 | #define _XTENSA_THREAD_INFO_H | 12 | #define _XTENSA_THREAD_INFO_H |
13 | 13 | ||
14 | #ifdef __KERNEL__ | 14 | #include <asm/kmem_layout.h> |
15 | |||
16 | #define CURRENT_SHIFT KERNEL_STACK_SHIFT | ||
15 | 17 | ||
16 | #ifndef __ASSEMBLY__ | 18 | #ifndef __ASSEMBLY__ |
17 | # include <asm/processor.h> | 19 | # include <asm/processor.h> |
@@ -81,7 +83,7 @@ struct thread_info { | |||
81 | static inline struct thread_info *current_thread_info(void) | 83 | static inline struct thread_info *current_thread_info(void) |
82 | { | 84 | { |
83 | struct thread_info *ti; | 85 | struct thread_info *ti; |
84 | __asm__("extui %0,a1,0,13\n\t" | 86 | __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t" |
85 | "xor %0, a1, %0" : "=&r" (ti) : ); | 87 | "xor %0, a1, %0" : "=&r" (ti) : ); |
86 | return ti; | 88 | return ti; |
87 | } | 89 | } |
@@ -90,7 +92,7 @@ static inline struct thread_info *current_thread_info(void) | |||
90 | 92 | ||
91 | /* how to get the thread information struct from ASM */ | 93 | /* how to get the thread information struct from ASM */ |
92 | #define GET_THREAD_INFO(reg,sp) \ | 94 | #define GET_THREAD_INFO(reg,sp) \ |
93 | extui reg, sp, 0, 13; \ | 95 | extui reg, sp, 0, CURRENT_SHIFT; \ |
94 | xor reg, sp, reg | 96 | xor reg, sp, reg |
95 | #endif | 97 | #endif |
96 | 98 | ||
@@ -127,8 +129,7 @@ static inline struct thread_info *current_thread_info(void) | |||
127 | */ | 129 | */ |
128 | #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ | 130 | #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ |
129 | 131 | ||
130 | #define THREAD_SIZE 8192 //(2*PAGE_SIZE) | 132 | #define THREAD_SIZE KERNEL_STACK_SIZE |
131 | #define THREAD_SIZE_ORDER 1 | 133 | #define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT) |
132 | 134 | ||
133 | #endif /* __KERNEL__ */ | ||
134 | #endif /* _XTENSA_THREAD_INFO */ | 135 | #endif /* _XTENSA_THREAD_INFO */ |
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h index 2e69aa4b843f..f5cd7a7e65e0 100644 --- a/arch/xtensa/include/asm/traps.h +++ b/arch/xtensa/include/asm/traps.h | |||
@@ -13,12 +13,47 @@ | |||
13 | #include <asm/ptrace.h> | 13 | #include <asm/ptrace.h> |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * Per-CPU exception handling data structure. | ||
17 | * EXCSAVE1 points to it. | ||
18 | */ | ||
19 | struct exc_table { | ||
20 | /* Kernel Stack */ | ||
21 | void *kstk; | ||
22 | /* Double exception save area for a0 */ | ||
23 | unsigned long double_save; | ||
24 | /* Fixup handler */ | ||
25 | void *fixup; | ||
26 | /* For passing a parameter to fixup */ | ||
27 | void *fixup_param; | ||
28 | /* For fast syscall handler */ | ||
29 | unsigned long syscall_save; | ||
30 | /* Fast user exception handlers */ | ||
31 | void *fast_user_handler[EXCCAUSE_N]; | ||
32 | /* Fast kernel exception handlers */ | ||
33 | void *fast_kernel_handler[EXCCAUSE_N]; | ||
34 | /* Default C-Handlers */ | ||
35 | void *default_handler[EXCCAUSE_N]; | ||
36 | }; | ||
37 | |||
38 | /* | ||
16 | * handler must be either of the following: | 39 | * handler must be either of the following: |
17 | * void (*)(struct pt_regs *regs); | 40 | * void (*)(struct pt_regs *regs); |
18 | * void (*)(struct pt_regs *regs, unsigned long exccause); | 41 | * void (*)(struct pt_regs *regs, unsigned long exccause); |
19 | */ | 42 | */ |
20 | extern void * __init trap_set_handler(int cause, void *handler); | 43 | extern void * __init trap_set_handler(int cause, void *handler); |
21 | extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); | 44 | extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); |
45 | void fast_second_level_miss(void); | ||
46 | |||
47 | /* Initialize minimal exc_table structure sufficient for basic paging */ | ||
48 | static inline void __init early_trap_init(void) | ||
49 | { | ||
50 | static struct exc_table exc_table __initdata = { | ||
51 | .fast_kernel_handler[EXCCAUSE_DTLB_MISS] = | ||
52 | fast_second_level_miss, | ||
53 | }; | ||
54 | __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table)); | ||
55 | } | ||
56 | |||
22 | void secondary_trap_init(void); | 57 | void secondary_trap_init(void); |
23 | 58 | ||
24 | static inline void spill_registers(void) | 59 | static inline void spill_registers(void) |
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index b8f152b6aaa5..f1158b4c629c 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h | |||
@@ -44,6 +44,8 @@ | |||
44 | #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) | 44 | #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) |
45 | #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) | 45 | #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) |
46 | 46 | ||
47 | #define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) | ||
48 | |||
47 | /* | 49 | /* |
48 | * These are the main single-value transfer routines. They | 50 | * These are the main single-value transfer routines. They |
49 | * automatically use the right size if we just have the right pointer | 51 | * automatically use the right size if we just have the right pointer |
@@ -261,7 +263,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) | |||
261 | static inline unsigned long | 263 | static inline unsigned long |
262 | __xtensa_clear_user(void *addr, unsigned long size) | 264 | __xtensa_clear_user(void *addr, unsigned long size) |
263 | { | 265 | { |
264 | if ( ! memset(addr, 0, size) ) | 266 | if (!__memset(addr, 0, size)) |
265 | return size; | 267 | return size; |
266 | return 0; | 268 | return 0; |
267 | } | 269 | } |
@@ -277,6 +279,8 @@ clear_user(void *addr, unsigned long size) | |||
277 | #define __clear_user __xtensa_clear_user | 279 | #define __clear_user __xtensa_clear_user |
278 | 280 | ||
279 | 281 | ||
282 | #ifndef CONFIG_GENERIC_STRNCPY_FROM_USER | ||
283 | |||
280 | extern long __strncpy_user(char *, const char *, long); | 284 | extern long __strncpy_user(char *, const char *, long); |
281 | 285 | ||
282 | static inline long | 286 | static inline long |
@@ -286,6 +290,9 @@ strncpy_from_user(char *dst, const char *src, long count) | |||
286 | return __strncpy_user(dst, src, count); | 290 | return __strncpy_user(dst, src, count); |
287 | return -EFAULT; | 291 | return -EFAULT; |
288 | } | 292 | } |
293 | #else | ||
294 | long strncpy_from_user(char *dst, const char *src, long count); | ||
295 | #endif | ||
289 | 296 | ||
290 | /* | 297 | /* |
291 | * Return the size of a string (including the ending 0!) | 298 | * Return the size of a string (including the ending 0!) |
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index bb8d55775a97..91907590d183 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile | |||
@@ -17,9 +17,6 @@ obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o | |||
17 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | 17 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o |
18 | obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o | 18 | obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o |
19 | 19 | ||
20 | AFLAGS_head.o += -mtext-section-literals | ||
21 | AFLAGS_mxhead.o += -mtext-section-literals | ||
22 | |||
23 | # In the Xtensa architecture, assembly generates literals which must always | 20 | # In the Xtensa architecture, assembly generates literals which must always |
24 | # precede the L32R instruction with a relative offset less than 256 kB. | 21 | # precede the L32R instruction with a relative offset less than 256 kB. |
25 | # Therefore, the .text and .literal section must be combined in parenthesis | 22 | # Therefore, the .text and .literal section must be combined in parenthesis |
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index 890004af03a9..9301452e521e 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/linkage.h> | 19 | #include <linux/linkage.h> |
20 | #include <asm/current.h> | 20 | #include <asm/current.h> |
21 | #include <asm/asm-offsets.h> | 21 | #include <asm/asm-offsets.h> |
22 | #include <asm/asmmacro.h> | ||
22 | #include <asm/processor.h> | 23 | #include <asm/processor.h> |
23 | 24 | ||
24 | #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION | 25 | #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION |
@@ -66,8 +67,6 @@ | |||
66 | #define INSN_T 24 | 67 | #define INSN_T 24 |
67 | #define INSN_OP1 16 | 68 | #define INSN_OP1 16 |
68 | 69 | ||
69 | .macro __src_b r, w0, w1; src \r, \w0, \w1; .endm | ||
70 | .macro __ssa8 r; ssa8b \r; .endm | ||
71 | .macro __ssa8r r; ssa8l \r; .endm | 70 | .macro __ssa8r r; ssa8l \r; .endm |
72 | .macro __sh r, s; srl \r, \s; .endm | 71 | .macro __sh r, s; srl \r, \s; .endm |
73 | .macro __sl r, s; sll \r, \s; .endm | 72 | .macro __sl r, s; sll \r, \s; .endm |
@@ -81,8 +80,6 @@ | |||
81 | #define INSN_T 4 | 80 | #define INSN_T 4 |
82 | #define INSN_OP1 12 | 81 | #define INSN_OP1 12 |
83 | 82 | ||
84 | .macro __src_b r, w0, w1; src \r, \w1, \w0; .endm | ||
85 | .macro __ssa8 r; ssa8l \r; .endm | ||
86 | .macro __ssa8r r; ssa8b \r; .endm | 83 | .macro __ssa8r r; ssa8b \r; .endm |
87 | .macro __sh r, s; sll \r, \s; .endm | 84 | .macro __sh r, s; sll \r, \s; .endm |
88 | .macro __sl r, s; srl \r, \s; .endm | 85 | .macro __sl r, s; srl \r, \s; .endm |
@@ -155,7 +152,7 @@ | |||
155 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 152 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception |
156 | */ | 153 | */ |
157 | 154 | ||
158 | 155 | .literal_position | |
159 | ENTRY(fast_unaligned) | 156 | ENTRY(fast_unaligned) |
160 | 157 | ||
161 | /* Note: We don't expect the address to be aligned on a word | 158 | /* Note: We don't expect the address to be aligned on a word |
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index bcb5beb81177..022cf918ec20 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c | |||
@@ -76,6 +76,9 @@ int main(void) | |||
76 | DEFINE(TASK_PID, offsetof (struct task_struct, pid)); | 76 | DEFINE(TASK_PID, offsetof (struct task_struct, pid)); |
77 | DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); | 77 | DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); |
78 | DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); | 78 | DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); |
79 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
80 | DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); | ||
81 | #endif | ||
79 | DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); | 82 | DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); |
80 | 83 | ||
81 | /* offsets in thread_info struct */ | 84 | /* offsets in thread_info struct */ |
@@ -129,5 +132,18 @@ int main(void) | |||
129 | offsetof(struct debug_table, icount_level_save)); | 132 | offsetof(struct debug_table, icount_level_save)); |
130 | #endif | 133 | #endif |
131 | 134 | ||
135 | /* struct exc_table */ | ||
136 | DEFINE(EXC_TABLE_KSTK, offsetof(struct exc_table, kstk)); | ||
137 | DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save)); | ||
138 | DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup)); | ||
139 | DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param)); | ||
140 | DEFINE(EXC_TABLE_SYSCALL_SAVE, | ||
141 | offsetof(struct exc_table, syscall_save)); | ||
142 | DEFINE(EXC_TABLE_FAST_USER, | ||
143 | offsetof(struct exc_table, fast_user_handler)); | ||
144 | DEFINE(EXC_TABLE_FAST_KERNEL, | ||
145 | offsetof(struct exc_table, fast_kernel_handler)); | ||
146 | DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler)); | ||
147 | |||
132 | return 0; | 148 | return 0; |
133 | } | 149 | } |
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index 3a98503ad11a..4f8b52d575a2 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S | |||
@@ -212,8 +212,7 @@ ENDPROC(coprocessor_restore) | |||
212 | ENTRY(fast_coprocessor_double) | 212 | ENTRY(fast_coprocessor_double) |
213 | 213 | ||
214 | wsr a0, excsave1 | 214 | wsr a0, excsave1 |
215 | movi a0, unrecoverable_exception | 215 | call0 unrecoverable_exception |
216 | callx0 a0 | ||
217 | 216 | ||
218 | ENDPROC(fast_coprocessor_double) | 217 | ENDPROC(fast_coprocessor_double) |
219 | 218 | ||
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 37a239556889..5caff0744f3c 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
16 | #include <asm/asm-offsets.h> | 16 | #include <asm/asm-offsets.h> |
17 | #include <asm/asmmacro.h> | ||
17 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
18 | #include <asm/coprocessor.h> | 19 | #include <asm/coprocessor.h> |
19 | #include <asm/thread_info.h> | 20 | #include <asm/thread_info.h> |
@@ -125,6 +126,7 @@ | |||
125 | * | 126 | * |
126 | * Note: _user_exception might be at an odd address. Don't use call0..call12 | 127 | * Note: _user_exception might be at an odd address. Don't use call0..call12 |
127 | */ | 128 | */ |
129 | .literal_position | ||
128 | 130 | ||
129 | ENTRY(user_exception) | 131 | ENTRY(user_exception) |
130 | 132 | ||
@@ -475,8 +477,7 @@ common_exception_return: | |||
475 | 1: | 477 | 1: |
476 | irq_save a2, a3 | 478 | irq_save a2, a3 |
477 | #ifdef CONFIG_TRACE_IRQFLAGS | 479 | #ifdef CONFIG_TRACE_IRQFLAGS |
478 | movi a4, trace_hardirqs_off | 480 | call4 trace_hardirqs_off |
479 | callx4 a4 | ||
480 | #endif | 481 | #endif |
481 | 482 | ||
482 | /* Jump if we are returning from kernel exceptions. */ | 483 | /* Jump if we are returning from kernel exceptions. */ |
@@ -503,24 +504,20 @@ common_exception_return: | |||
503 | /* Call do_signal() */ | 504 | /* Call do_signal() */ |
504 | 505 | ||
505 | #ifdef CONFIG_TRACE_IRQFLAGS | 506 | #ifdef CONFIG_TRACE_IRQFLAGS |
506 | movi a4, trace_hardirqs_on | 507 | call4 trace_hardirqs_on |
507 | callx4 a4 | ||
508 | #endif | 508 | #endif |
509 | rsil a2, 0 | 509 | rsil a2, 0 |
510 | movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) | ||
511 | mov a6, a1 | 510 | mov a6, a1 |
512 | callx4 a4 | 511 | call4 do_notify_resume # int do_notify_resume(struct pt_regs*) |
513 | j 1b | 512 | j 1b |
514 | 513 | ||
515 | 3: /* Reschedule */ | 514 | 3: /* Reschedule */ |
516 | 515 | ||
517 | #ifdef CONFIG_TRACE_IRQFLAGS | 516 | #ifdef CONFIG_TRACE_IRQFLAGS |
518 | movi a4, trace_hardirqs_on | 517 | call4 trace_hardirqs_on |
519 | callx4 a4 | ||
520 | #endif | 518 | #endif |
521 | rsil a2, 0 | 519 | rsil a2, 0 |
522 | movi a4, schedule # void schedule (void) | 520 | call4 schedule # void schedule (void) |
523 | callx4 a4 | ||
524 | j 1b | 521 | j 1b |
525 | 522 | ||
526 | #ifdef CONFIG_PREEMPT | 523 | #ifdef CONFIG_PREEMPT |
@@ -531,8 +528,7 @@ common_exception_return: | |||
531 | 528 | ||
532 | l32i a4, a2, TI_PRE_COUNT | 529 | l32i a4, a2, TI_PRE_COUNT |
533 | bnez a4, 4f | 530 | bnez a4, 4f |
534 | movi a4, preempt_schedule_irq | 531 | call4 preempt_schedule_irq |
535 | callx4 a4 | ||
536 | j 1b | 532 | j 1b |
537 | #endif | 533 | #endif |
538 | 534 | ||
@@ -545,23 +541,20 @@ common_exception_return: | |||
545 | 5: | 541 | 5: |
546 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 542 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
547 | _bbci.l a4, TIF_DB_DISABLED, 7f | 543 | _bbci.l a4, TIF_DB_DISABLED, 7f |
548 | movi a4, restore_dbreak | 544 | call4 restore_dbreak |
549 | callx4 a4 | ||
550 | 7: | 545 | 7: |
551 | #endif | 546 | #endif |
552 | #ifdef CONFIG_DEBUG_TLB_SANITY | 547 | #ifdef CONFIG_DEBUG_TLB_SANITY |
553 | l32i a4, a1, PT_DEPC | 548 | l32i a4, a1, PT_DEPC |
554 | bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f | 549 | bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f |
555 | movi a4, check_tlb_sanity | 550 | call4 check_tlb_sanity |
556 | callx4 a4 | ||
557 | #endif | 551 | #endif |
558 | 6: | 552 | 6: |
559 | 4: | 553 | 4: |
560 | #ifdef CONFIG_TRACE_IRQFLAGS | 554 | #ifdef CONFIG_TRACE_IRQFLAGS |
561 | extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH | 555 | extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH |
562 | bgei a4, LOCKLEVEL, 1f | 556 | bgei a4, LOCKLEVEL, 1f |
563 | movi a4, trace_hardirqs_on | 557 | call4 trace_hardirqs_on |
564 | callx4 a4 | ||
565 | 1: | 558 | 1: |
566 | #endif | 559 | #endif |
567 | /* Restore optional registers. */ | 560 | /* Restore optional registers. */ |
@@ -777,6 +770,8 @@ ENDPROC(kernel_exception) | |||
777 | * When we get here, a0 is trashed and saved to excsave[debuglevel] | 770 | * When we get here, a0 is trashed and saved to excsave[debuglevel] |
778 | */ | 771 | */ |
779 | 772 | ||
773 | .literal_position | ||
774 | |||
780 | ENTRY(debug_exception) | 775 | ENTRY(debug_exception) |
781 | 776 | ||
782 | rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL | 777 | rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL |
@@ -916,6 +911,8 @@ ENDPROC(debug_exception) | |||
916 | unrecoverable_text: | 911 | unrecoverable_text: |
917 | .ascii "Unrecoverable error in exception handler\0" | 912 | .ascii "Unrecoverable error in exception handler\0" |
918 | 913 | ||
914 | .literal_position | ||
915 | |||
919 | ENTRY(unrecoverable_exception) | 916 | ENTRY(unrecoverable_exception) |
920 | 917 | ||
921 | movi a0, 1 | 918 | movi a0, 1 |
@@ -933,10 +930,8 @@ ENTRY(unrecoverable_exception) | |||
933 | movi a0, 0 | 930 | movi a0, 0 |
934 | addi a1, a1, PT_REGS_OFFSET | 931 | addi a1, a1, PT_REGS_OFFSET |
935 | 932 | ||
936 | movi a4, panic | ||
937 | movi a6, unrecoverable_text | 933 | movi a6, unrecoverable_text |
938 | 934 | call4 panic | |
939 | callx4 a4 | ||
940 | 935 | ||
941 | 1: j 1b | 936 | 1: j 1b |
942 | 937 | ||
@@ -1073,8 +1068,7 @@ ENTRY(fast_syscall_unrecoverable) | |||
1073 | xsr a2, depc # restore a2, depc | 1068 | xsr a2, depc # restore a2, depc |
1074 | 1069 | ||
1075 | wsr a0, excsave1 | 1070 | wsr a0, excsave1 |
1076 | movi a0, unrecoverable_exception | 1071 | call0 unrecoverable_exception |
1077 | callx0 a0 | ||
1078 | 1072 | ||
1079 | ENDPROC(fast_syscall_unrecoverable) | 1073 | ENDPROC(fast_syscall_unrecoverable) |
1080 | 1074 | ||
@@ -1101,32 +1095,11 @@ ENDPROC(fast_syscall_unrecoverable) | |||
1101 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 1095 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception |
1102 | * | 1096 | * |
1103 | * Note: we don't have to save a2; a2 holds the return value | 1097 | * Note: we don't have to save a2; a2 holds the return value |
1104 | * | ||
1105 | * We use the two macros TRY and CATCH: | ||
1106 | * | ||
1107 | * TRY adds an entry to the __ex_table fixup table for the immediately | ||
1108 | * following instruction. | ||
1109 | * | ||
1110 | * CATCH catches any exception that occurred at one of the preceding TRY | ||
1111 | * statements and continues from there | ||
1112 | * | ||
1113 | * Usage TRY l32i a0, a1, 0 | ||
1114 | * <other code> | ||
1115 | * done: rfe | ||
1116 | * CATCH <set return code> | ||
1117 | * j done | ||
1118 | */ | 1098 | */ |
1119 | 1099 | ||
1120 | #ifdef CONFIG_FAST_SYSCALL_XTENSA | 1100 | .literal_position |
1121 | |||
1122 | #define TRY \ | ||
1123 | .section __ex_table, "a"; \ | ||
1124 | .word 66f, 67f; \ | ||
1125 | .text; \ | ||
1126 | 66: | ||
1127 | 1101 | ||
1128 | #define CATCH \ | 1102 | #ifdef CONFIG_FAST_SYSCALL_XTENSA |
1129 | 67: | ||
1130 | 1103 | ||
1131 | ENTRY(fast_syscall_xtensa) | 1104 | ENTRY(fast_syscall_xtensa) |
1132 | 1105 | ||
@@ -1141,9 +1114,9 @@ ENTRY(fast_syscall_xtensa) | |||
1141 | 1114 | ||
1142 | .Lswp: /* Atomic compare and swap */ | 1115 | .Lswp: /* Atomic compare and swap */ |
1143 | 1116 | ||
1144 | TRY l32i a0, a3, 0 # read old value | 1117 | EX(.Leac) l32i a0, a3, 0 # read old value |
1145 | bne a0, a4, 1f # same as old value? jump | 1118 | bne a0, a4, 1f # same as old value? jump |
1146 | TRY s32i a5, a3, 0 # different, modify value | 1119 | EX(.Leac) s32i a5, a3, 0 # different, modify value |
1147 | l32i a7, a2, PT_AREG7 # restore a7 | 1120 | l32i a7, a2, PT_AREG7 # restore a7 |
1148 | l32i a0, a2, PT_AREG0 # restore a0 | 1121 | l32i a0, a2, PT_AREG0 # restore a0 |
1149 | movi a2, 1 # and return 1 | 1122 | movi a2, 1 # and return 1 |
@@ -1156,12 +1129,12 @@ TRY s32i a5, a3, 0 # different, modify value | |||
1156 | 1129 | ||
1157 | .Lnswp: /* Atomic set, add, and exg_add. */ | 1130 | .Lnswp: /* Atomic set, add, and exg_add. */ |
1158 | 1131 | ||
1159 | TRY l32i a7, a3, 0 # orig | 1132 | EX(.Leac) l32i a7, a3, 0 # orig |
1160 | addi a6, a6, -SYS_XTENSA_ATOMIC_SET | 1133 | addi a6, a6, -SYS_XTENSA_ATOMIC_SET |
1161 | add a0, a4, a7 # + arg | 1134 | add a0, a4, a7 # + arg |
1162 | moveqz a0, a4, a6 # set | 1135 | moveqz a0, a4, a6 # set |
1163 | addi a6, a6, SYS_XTENSA_ATOMIC_SET | 1136 | addi a6, a6, SYS_XTENSA_ATOMIC_SET |
1164 | TRY s32i a0, a3, 0 # write new value | 1137 | EX(.Leac) s32i a0, a3, 0 # write new value |
1165 | 1138 | ||
1166 | mov a0, a2 | 1139 | mov a0, a2 |
1167 | mov a2, a7 | 1140 | mov a2, a7 |
@@ -1169,7 +1142,6 @@ TRY s32i a0, a3, 0 # write new value | |||
1169 | l32i a0, a0, PT_AREG0 # restore a0 | 1142 | l32i a0, a0, PT_AREG0 # restore a0 |
1170 | rfe | 1143 | rfe |
1171 | 1144 | ||
1172 | CATCH | ||
1173 | .Leac: l32i a7, a2, PT_AREG7 # restore a7 | 1145 | .Leac: l32i a7, a2, PT_AREG7 # restore a7 |
1174 | l32i a0, a2, PT_AREG0 # restore a0 | 1146 | l32i a0, a2, PT_AREG0 # restore a0 |
1175 | movi a2, -EFAULT | 1147 | movi a2, -EFAULT |
@@ -1411,14 +1383,12 @@ ENTRY(fast_syscall_spill_registers) | |||
1411 | rsync | 1383 | rsync |
1412 | 1384 | ||
1413 | movi a6, SIGSEGV | 1385 | movi a6, SIGSEGV |
1414 | movi a4, do_exit | 1386 | call4 do_exit |
1415 | callx4 a4 | ||
1416 | 1387 | ||
1417 | /* shouldn't return, so panic */ | 1388 | /* shouldn't return, so panic */ |
1418 | 1389 | ||
1419 | wsr a0, excsave1 | 1390 | wsr a0, excsave1 |
1420 | movi a0, unrecoverable_exception | 1391 | call0 unrecoverable_exception # should not return |
1421 | callx0 a0 # should not return | ||
1422 | 1: j 1b | 1392 | 1: j 1b |
1423 | 1393 | ||
1424 | 1394 | ||
@@ -1564,8 +1534,8 @@ ENDPROC(fast_syscall_spill_registers) | |||
1564 | 1534 | ||
1565 | ENTRY(fast_second_level_miss_double_kernel) | 1535 | ENTRY(fast_second_level_miss_double_kernel) |
1566 | 1536 | ||
1567 | 1: movi a0, unrecoverable_exception | 1537 | 1: |
1568 | callx0 a0 # should not return | 1538 | call0 unrecoverable_exception # should not return |
1569 | 1: j 1b | 1539 | 1: j 1b |
1570 | 1540 | ||
1571 | ENDPROC(fast_second_level_miss_double_kernel) | 1541 | ENDPROC(fast_second_level_miss_double_kernel) |
@@ -1887,6 +1857,7 @@ ENDPROC(fast_store_prohibited) | |||
1887 | * void system_call (struct pt_regs* regs, int exccause) | 1857 | * void system_call (struct pt_regs* regs, int exccause) |
1888 | * a2 a3 | 1858 | * a2 a3 |
1889 | */ | 1859 | */ |
1860 | .literal_position | ||
1890 | 1861 | ||
1891 | ENTRY(system_call) | 1862 | ENTRY(system_call) |
1892 | 1863 | ||
@@ -1896,9 +1867,8 @@ ENTRY(system_call) | |||
1896 | 1867 | ||
1897 | l32i a3, a2, PT_AREG2 | 1868 | l32i a3, a2, PT_AREG2 |
1898 | mov a6, a2 | 1869 | mov a6, a2 |
1899 | movi a4, do_syscall_trace_enter | ||
1900 | s32i a3, a2, PT_SYSCALL | 1870 | s32i a3, a2, PT_SYSCALL |
1901 | callx4 a4 | 1871 | call4 do_syscall_trace_enter |
1902 | mov a3, a6 | 1872 | mov a3, a6 |
1903 | 1873 | ||
1904 | /* syscall = sys_call_table[syscall_nr] */ | 1874 | /* syscall = sys_call_table[syscall_nr] */ |
@@ -1930,9 +1900,8 @@ ENTRY(system_call) | |||
1930 | 1: /* regs->areg[2] = return_value */ | 1900 | 1: /* regs->areg[2] = return_value */ |
1931 | 1901 | ||
1932 | s32i a6, a2, PT_AREG2 | 1902 | s32i a6, a2, PT_AREG2 |
1933 | movi a4, do_syscall_trace_leave | ||
1934 | mov a6, a2 | 1903 | mov a6, a2 |
1935 | callx4 a4 | 1904 | call4 do_syscall_trace_leave |
1936 | retw | 1905 | retw |
1937 | 1906 | ||
1938 | ENDPROC(system_call) | 1907 | ENDPROC(system_call) |
@@ -2002,6 +1971,12 @@ ENTRY(_switch_to) | |||
2002 | s32i a1, a2, THREAD_SP # save stack pointer | 1971 | s32i a1, a2, THREAD_SP # save stack pointer |
2003 | #endif | 1972 | #endif |
2004 | 1973 | ||
1974 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) | ||
1975 | movi a6, __stack_chk_guard | ||
1976 | l32i a8, a3, TASK_STACK_CANARY | ||
1977 | s32i a8, a6, 0 | ||
1978 | #endif | ||
1979 | |||
2005 | /* Disable ints while we manipulate the stack pointer. */ | 1980 | /* Disable ints while we manipulate the stack pointer. */ |
2006 | 1981 | ||
2007 | irq_save a14, a3 | 1982 | irq_save a14, a3 |
@@ -2048,12 +2023,10 @@ ENTRY(ret_from_fork) | |||
2048 | /* void schedule_tail (struct task_struct *prev) | 2023 | /* void schedule_tail (struct task_struct *prev) |
2049 | * Note: prev is still in a6 (return value from fake call4 frame) | 2024 | * Note: prev is still in a6 (return value from fake call4 frame) |
2050 | */ | 2025 | */ |
2051 | movi a4, schedule_tail | 2026 | call4 schedule_tail |
2052 | callx4 a4 | ||
2053 | 2027 | ||
2054 | movi a4, do_syscall_trace_leave | ||
2055 | mov a6, a1 | 2028 | mov a6, a1 |
2056 | callx4 a4 | 2029 | call4 do_syscall_trace_leave |
2057 | 2030 | ||
2058 | j common_exception_return | 2031 | j common_exception_return |
2059 | 2032 | ||
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index 23ce62e60435..9c4e9433e536 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S | |||
@@ -264,11 +264,8 @@ ENTRY(_startup) | |||
264 | 264 | ||
265 | /* init_arch kick-starts the linux kernel */ | 265 | /* init_arch kick-starts the linux kernel */ |
266 | 266 | ||
267 | movi a4, init_arch | 267 | call4 init_arch |
268 | callx4 a4 | 268 | call4 start_kernel |
269 | |||
270 | movi a4, start_kernel | ||
271 | callx4 a4 | ||
272 | 269 | ||
273 | should_never_return: | 270 | should_never_return: |
274 | j should_never_return | 271 | j should_never_return |
@@ -294,8 +291,7 @@ should_never_return: | |||
294 | movi a6, 0 | 291 | movi a6, 0 |
295 | wsr a6, excsave1 | 292 | wsr a6, excsave1 |
296 | 293 | ||
297 | movi a4, secondary_start_kernel | 294 | call4 secondary_start_kernel |
298 | callx4 a4 | ||
299 | j should_never_return | 295 | j should_never_return |
300 | 296 | ||
301 | #endif /* CONFIG_SMP */ | 297 | #endif /* CONFIG_SMP */ |
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c index b715237bae61..902845ddacb7 100644 --- a/arch/xtensa/kernel/module.c +++ b/arch/xtensa/kernel/module.c | |||
@@ -22,8 +22,6 @@ | |||
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/cache.h> | 23 | #include <linux/cache.h> |
24 | 24 | ||
25 | #undef DEBUG_RELOCATE | ||
26 | |||
27 | static int | 25 | static int |
28 | decode_calln_opcode (unsigned char *location) | 26 | decode_calln_opcode (unsigned char *location) |
29 | { | 27 | { |
@@ -58,10 +56,9 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
58 | unsigned char *location; | 56 | unsigned char *location; |
59 | uint32_t value; | 57 | uint32_t value; |
60 | 58 | ||
61 | #ifdef DEBUG_RELOCATE | 59 | pr_debug("Applying relocate section %u to %u\n", relsec, |
62 | printk("Applying relocate section %u to %u\n", relsec, | 60 | sechdrs[relsec].sh_info); |
63 | sechdrs[relsec].sh_info); | 61 | |
64 | #endif | ||
65 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { | 62 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { |
66 | location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr | 63 | location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr |
67 | + rela[i].r_offset; | 64 | + rela[i].r_offset; |
@@ -87,7 +84,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
87 | value -= ((unsigned long)location & -4) + 4; | 84 | value -= ((unsigned long)location & -4) + 4; |
88 | if ((value & 3) != 0 || | 85 | if ((value & 3) != 0 || |
89 | ((value + (1 << 19)) >> 20) != 0) { | 86 | ((value + (1 << 19)) >> 20) != 0) { |
90 | printk("%s: relocation out of range, " | 87 | pr_err("%s: relocation out of range, " |
91 | "section %d reloc %d " | 88 | "section %d reloc %d " |
92 | "sym '%s'\n", | 89 | "sym '%s'\n", |
93 | mod->name, relsec, i, | 90 | mod->name, relsec, i, |
@@ -111,7 +108,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
111 | value -= (((unsigned long)location + 3) & -4); | 108 | value -= (((unsigned long)location + 3) & -4); |
112 | if ((value & 3) != 0 || | 109 | if ((value & 3) != 0 || |
113 | (signed int)value >> 18 != -1) { | 110 | (signed int)value >> 18 != -1) { |
114 | printk("%s: relocation out of range, " | 111 | pr_err("%s: relocation out of range, " |
115 | "section %d reloc %d " | 112 | "section %d reloc %d " |
116 | "sym '%s'\n", | 113 | "sym '%s'\n", |
117 | mod->name, relsec, i, | 114 | mod->name, relsec, i, |
@@ -156,7 +153,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
156 | case R_XTENSA_SLOT12_OP: | 153 | case R_XTENSA_SLOT12_OP: |
157 | case R_XTENSA_SLOT13_OP: | 154 | case R_XTENSA_SLOT13_OP: |
158 | case R_XTENSA_SLOT14_OP: | 155 | case R_XTENSA_SLOT14_OP: |
159 | printk("%s: unexpected FLIX relocation: %u\n", | 156 | pr_err("%s: unexpected FLIX relocation: %u\n", |
160 | mod->name, | 157 | mod->name, |
161 | ELF32_R_TYPE(rela[i].r_info)); | 158 | ELF32_R_TYPE(rela[i].r_info)); |
162 | return -ENOEXEC; | 159 | return -ENOEXEC; |
@@ -176,13 +173,13 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
176 | case R_XTENSA_SLOT12_ALT: | 173 | case R_XTENSA_SLOT12_ALT: |
177 | case R_XTENSA_SLOT13_ALT: | 174 | case R_XTENSA_SLOT13_ALT: |
178 | case R_XTENSA_SLOT14_ALT: | 175 | case R_XTENSA_SLOT14_ALT: |
179 | printk("%s: unexpected ALT relocation: %u\n", | 176 | pr_err("%s: unexpected ALT relocation: %u\n", |
180 | mod->name, | 177 | mod->name, |
181 | ELF32_R_TYPE(rela[i].r_info)); | 178 | ELF32_R_TYPE(rela[i].r_info)); |
182 | return -ENOEXEC; | 179 | return -ENOEXEC; |
183 | 180 | ||
184 | default: | 181 | default: |
185 | printk("%s: unexpected relocation: %u\n", | 182 | pr_err("%s: unexpected relocation: %u\n", |
186 | mod->name, | 183 | mod->name, |
187 | ELF32_R_TYPE(rela[i].r_info)); | 184 | ELF32_R_TYPE(rela[i].r_info)); |
188 | return -ENOEXEC; | 185 | return -ENOEXEC; |
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index 903963ee495d..d981f01c8d89 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c | |||
@@ -29,14 +29,6 @@ | |||
29 | #include <asm/pci-bridge.h> | 29 | #include <asm/pci-bridge.h> |
30 | #include <asm/platform.h> | 30 | #include <asm/platform.h> |
31 | 31 | ||
32 | #undef DEBUG | ||
33 | |||
34 | #ifdef DEBUG | ||
35 | #define DBG(x...) printk(x) | ||
36 | #else | ||
37 | #define DBG(x...) | ||
38 | #endif | ||
39 | |||
40 | /* PCI Controller */ | 32 | /* PCI Controller */ |
41 | 33 | ||
42 | 34 | ||
@@ -101,8 +93,8 @@ pcibios_enable_resources(struct pci_dev *dev, int mask) | |||
101 | for(idx=0; idx<6; idx++) { | 93 | for(idx=0; idx<6; idx++) { |
102 | r = &dev->resource[idx]; | 94 | r = &dev->resource[idx]; |
103 | if (!r->start && r->end) { | 95 | if (!r->start && r->end) { |
104 | printk (KERN_ERR "PCI: Device %s not available because " | 96 | pr_err("PCI: Device %s not available because " |
105 | "of resource collisions\n", pci_name(dev)); | 97 | "of resource collisions\n", pci_name(dev)); |
106 | return -EINVAL; | 98 | return -EINVAL; |
107 | } | 99 | } |
108 | if (r->flags & IORESOURCE_IO) | 100 | if (r->flags & IORESOURCE_IO) |
@@ -113,7 +105,7 @@ pcibios_enable_resources(struct pci_dev *dev, int mask) | |||
113 | if (dev->resource[PCI_ROM_RESOURCE].start) | 105 | if (dev->resource[PCI_ROM_RESOURCE].start) |
114 | cmd |= PCI_COMMAND_MEMORY; | 106 | cmd |= PCI_COMMAND_MEMORY; |
115 | if (cmd != old_cmd) { | 107 | if (cmd != old_cmd) { |
116 | printk("PCI: Enabling device %s (%04x -> %04x)\n", | 108 | pr_info("PCI: Enabling device %s (%04x -> %04x)\n", |
117 | pci_name(dev), old_cmd, cmd); | 109 | pci_name(dev), old_cmd, cmd); |
118 | pci_write_config_word(dev, PCI_COMMAND, cmd); | 110 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
119 | } | 111 | } |
@@ -144,8 +136,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl, | |||
144 | res = &pci_ctrl->io_resource; | 136 | res = &pci_ctrl->io_resource; |
145 | if (!res->flags) { | 137 | if (!res->flags) { |
146 | if (io_offset) | 138 | if (io_offset) |
147 | printk (KERN_ERR "I/O resource not set for host" | 139 | pr_err("I/O resource not set for host bridge %d\n", |
148 | " bridge %d\n", pci_ctrl->index); | 140 | pci_ctrl->index); |
149 | res->start = 0; | 141 | res->start = 0; |
150 | res->end = IO_SPACE_LIMIT; | 142 | res->end = IO_SPACE_LIMIT; |
151 | res->flags = IORESOURCE_IO; | 143 | res->flags = IORESOURCE_IO; |
@@ -159,8 +151,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl, | |||
159 | if (!res->flags) { | 151 | if (!res->flags) { |
160 | if (i > 0) | 152 | if (i > 0) |
161 | continue; | 153 | continue; |
162 | printk(KERN_ERR "Memory resource not set for " | 154 | pr_err("Memory resource not set for host bridge %d\n", |
163 | "host bridge %d\n", pci_ctrl->index); | 155 | pci_ctrl->index); |
164 | res->start = 0; | 156 | res->start = 0; |
165 | res->end = ~0U; | 157 | res->end = ~0U; |
166 | res->flags = IORESOURCE_MEM; | 158 | res->flags = IORESOURCE_MEM; |
@@ -176,7 +168,7 @@ static int __init pcibios_init(void) | |||
176 | struct pci_bus *bus; | 168 | struct pci_bus *bus; |
177 | int next_busno = 0, ret; | 169 | int next_busno = 0, ret; |
178 | 170 | ||
179 | printk("PCI: Probing PCI hardware\n"); | 171 | pr_info("PCI: Probing PCI hardware\n"); |
180 | 172 | ||
181 | /* Scan all of the recorded PCI controllers. */ | 173 | /* Scan all of the recorded PCI controllers. */ |
182 | for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) { | 174 | for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) { |
@@ -232,7 +224,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
232 | for (idx=0; idx<6; idx++) { | 224 | for (idx=0; idx<6; idx++) { |
233 | r = &dev->resource[idx]; | 225 | r = &dev->resource[idx]; |
234 | if (!r->start && r->end) { | 226 | if (!r->start && r->end) { |
235 | printk(KERN_ERR "PCI: Device %s not available because " | 227 | pr_err("PCI: Device %s not available because " |
236 | "of resource collisions\n", pci_name(dev)); | 228 | "of resource collisions\n", pci_name(dev)); |
237 | return -EINVAL; | 229 | return -EINVAL; |
238 | } | 230 | } |
@@ -242,8 +234,8 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
242 | cmd |= PCI_COMMAND_MEMORY; | 234 | cmd |= PCI_COMMAND_MEMORY; |
243 | } | 235 | } |
244 | if (cmd != old_cmd) { | 236 | if (cmd != old_cmd) { |
245 | printk("PCI: Enabling device %s (%04x -> %04x)\n", | 237 | pr_info("PCI: Enabling device %s (%04x -> %04x)\n", |
246 | pci_name(dev), old_cmd, cmd); | 238 | pci_name(dev), old_cmd, cmd); |
247 | pci_write_config_word(dev, PCI_COMMAND, cmd); | 239 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
248 | } | 240 | } |
249 | 241 | ||
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index ff4f0ecb03dd..8dd0593fb2c4 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c | |||
@@ -58,6 +58,12 @@ void (*pm_power_off)(void) = NULL; | |||
58 | EXPORT_SYMBOL(pm_power_off); | 58 | EXPORT_SYMBOL(pm_power_off); |
59 | 59 | ||
60 | 60 | ||
61 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
62 | #include <linux/stackprotector.h> | ||
63 | unsigned long __stack_chk_guard __read_mostly; | ||
64 | EXPORT_SYMBOL(__stack_chk_guard); | ||
65 | #endif | ||
66 | |||
61 | #if XTENSA_HAVE_COPROCESSORS | 67 | #if XTENSA_HAVE_COPROCESSORS |
62 | 68 | ||
63 | void coprocessor_release_all(struct thread_info *ti) | 69 | void coprocessor_release_all(struct thread_info *ti) |
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 08175df7a69e..a931af9075f2 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | #include <asm/bootparam.h> | 38 | #include <asm/bootparam.h> |
39 | #include <asm/kasan.h> | ||
39 | #include <asm/mmu_context.h> | 40 | #include <asm/mmu_context.h> |
40 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
41 | #include <asm/processor.h> | 42 | #include <asm/processor.h> |
@@ -156,7 +157,7 @@ static int __init parse_bootparam(const bp_tag_t* tag) | |||
156 | /* Boot parameters must start with a BP_TAG_FIRST tag. */ | 157 | /* Boot parameters must start with a BP_TAG_FIRST tag. */ |
157 | 158 | ||
158 | if (tag->id != BP_TAG_FIRST) { | 159 | if (tag->id != BP_TAG_FIRST) { |
159 | printk(KERN_WARNING "Invalid boot parameters!\n"); | 160 | pr_warn("Invalid boot parameters!\n"); |
160 | return 0; | 161 | return 0; |
161 | } | 162 | } |
162 | 163 | ||
@@ -165,15 +166,14 @@ static int __init parse_bootparam(const bp_tag_t* tag) | |||
165 | /* Parse all tags. */ | 166 | /* Parse all tags. */ |
166 | 167 | ||
167 | while (tag != NULL && tag->id != BP_TAG_LAST) { | 168 | while (tag != NULL && tag->id != BP_TAG_LAST) { |
168 | for (t = &__tagtable_begin; t < &__tagtable_end; t++) { | 169 | for (t = &__tagtable_begin; t < &__tagtable_end; t++) { |
169 | if (tag->id == t->tag) { | 170 | if (tag->id == t->tag) { |
170 | t->parse(tag); | 171 | t->parse(tag); |
171 | break; | 172 | break; |
172 | } | 173 | } |
173 | } | 174 | } |
174 | if (t == &__tagtable_end) | 175 | if (t == &__tagtable_end) |
175 | printk(KERN_WARNING "Ignoring tag " | 176 | pr_warn("Ignoring tag 0x%08x\n", tag->id); |
176 | "0x%08x\n", tag->id); | ||
177 | tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size); | 177 | tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size); |
178 | } | 178 | } |
179 | 179 | ||
@@ -208,6 +208,8 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname, | |||
208 | /* round down to nearest 256MB boundary */ | 208 | /* round down to nearest 256MB boundary */ |
209 | xtensa_kio_paddr &= 0xf0000000; | 209 | xtensa_kio_paddr &= 0xf0000000; |
210 | 210 | ||
211 | init_kio(); | ||
212 | |||
211 | return 1; | 213 | return 1; |
212 | } | 214 | } |
213 | #else | 215 | #else |
@@ -246,6 +248,14 @@ void __init early_init_devtree(void *params) | |||
246 | 248 | ||
247 | void __init init_arch(bp_tag_t *bp_start) | 249 | void __init init_arch(bp_tag_t *bp_start) |
248 | { | 250 | { |
251 | /* Initialize MMU. */ | ||
252 | |||
253 | init_mmu(); | ||
254 | |||
255 | /* Initialize initial KASAN shadow map */ | ||
256 | |||
257 | kasan_early_init(); | ||
258 | |||
249 | /* Parse boot parameters */ | 259 | /* Parse boot parameters */ |
250 | 260 | ||
251 | if (bp_start) | 261 | if (bp_start) |
@@ -263,10 +273,6 @@ void __init init_arch(bp_tag_t *bp_start) | |||
263 | /* Early hook for platforms */ | 273 | /* Early hook for platforms */ |
264 | 274 | ||
265 | platform_init(bp_start); | 275 | platform_init(bp_start); |
266 | |||
267 | /* Initialize MMU. */ | ||
268 | |||
269 | init_mmu(); | ||
270 | } | 276 | } |
271 | 277 | ||
272 | /* | 278 | /* |
@@ -277,13 +283,13 @@ extern char _end[]; | |||
277 | extern char _stext[]; | 283 | extern char _stext[]; |
278 | extern char _WindowVectors_text_start; | 284 | extern char _WindowVectors_text_start; |
279 | extern char _WindowVectors_text_end; | 285 | extern char _WindowVectors_text_end; |
280 | extern char _DebugInterruptVector_literal_start; | 286 | extern char _DebugInterruptVector_text_start; |
281 | extern char _DebugInterruptVector_text_end; | 287 | extern char _DebugInterruptVector_text_end; |
282 | extern char _KernelExceptionVector_literal_start; | 288 | extern char _KernelExceptionVector_text_start; |
283 | extern char _KernelExceptionVector_text_end; | 289 | extern char _KernelExceptionVector_text_end; |
284 | extern char _UserExceptionVector_literal_start; | 290 | extern char _UserExceptionVector_text_start; |
285 | extern char _UserExceptionVector_text_end; | 291 | extern char _UserExceptionVector_text_end; |
286 | extern char _DoubleExceptionVector_literal_start; | 292 | extern char _DoubleExceptionVector_text_start; |
287 | extern char _DoubleExceptionVector_text_end; | 293 | extern char _DoubleExceptionVector_text_end; |
288 | #if XCHAL_EXCM_LEVEL >= 2 | 294 | #if XCHAL_EXCM_LEVEL >= 2 |
289 | extern char _Level2InterruptVector_text_start; | 295 | extern char _Level2InterruptVector_text_start; |
@@ -317,6 +323,13 @@ static inline int mem_reserve(unsigned long start, unsigned long end) | |||
317 | 323 | ||
318 | void __init setup_arch(char **cmdline_p) | 324 | void __init setup_arch(char **cmdline_p) |
319 | { | 325 | { |
326 | pr_info("config ID: %08x:%08x\n", | ||
327 | get_sr(SREG_EPC), get_sr(SREG_EXCSAVE)); | ||
328 | if (get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 || | ||
329 | get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1) | ||
330 | pr_info("built for config ID: %08x:%08x\n", | ||
331 | XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1); | ||
332 | |||
320 | *cmdline_p = command_line; | 333 | *cmdline_p = command_line; |
321 | platform_setup(cmdline_p); | 334 | platform_setup(cmdline_p); |
322 | strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); | 335 | strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); |
@@ -339,16 +352,16 @@ void __init setup_arch(char **cmdline_p) | |||
339 | mem_reserve(__pa(&_WindowVectors_text_start), | 352 | mem_reserve(__pa(&_WindowVectors_text_start), |
340 | __pa(&_WindowVectors_text_end)); | 353 | __pa(&_WindowVectors_text_end)); |
341 | 354 | ||
342 | mem_reserve(__pa(&_DebugInterruptVector_literal_start), | 355 | mem_reserve(__pa(&_DebugInterruptVector_text_start), |
343 | __pa(&_DebugInterruptVector_text_end)); | 356 | __pa(&_DebugInterruptVector_text_end)); |
344 | 357 | ||
345 | mem_reserve(__pa(&_KernelExceptionVector_literal_start), | 358 | mem_reserve(__pa(&_KernelExceptionVector_text_start), |
346 | __pa(&_KernelExceptionVector_text_end)); | 359 | __pa(&_KernelExceptionVector_text_end)); |
347 | 360 | ||
348 | mem_reserve(__pa(&_UserExceptionVector_literal_start), | 361 | mem_reserve(__pa(&_UserExceptionVector_text_start), |
349 | __pa(&_UserExceptionVector_text_end)); | 362 | __pa(&_UserExceptionVector_text_end)); |
350 | 363 | ||
351 | mem_reserve(__pa(&_DoubleExceptionVector_literal_start), | 364 | mem_reserve(__pa(&_DoubleExceptionVector_text_start), |
352 | __pa(&_DoubleExceptionVector_text_end)); | 365 | __pa(&_DoubleExceptionVector_text_end)); |
353 | 366 | ||
354 | #if XCHAL_EXCM_LEVEL >= 2 | 367 | #if XCHAL_EXCM_LEVEL >= 2 |
@@ -380,7 +393,7 @@ void __init setup_arch(char **cmdline_p) | |||
380 | #endif | 393 | #endif |
381 | parse_early_param(); | 394 | parse_early_param(); |
382 | bootmem_init(); | 395 | bootmem_init(); |
383 | 396 | kasan_init(); | |
384 | unflatten_and_copy_device_tree(); | 397 | unflatten_and_copy_device_tree(); |
385 | 398 | ||
386 | #ifdef CONFIG_SMP | 399 | #ifdef CONFIG_SMP |
@@ -582,12 +595,14 @@ c_show(struct seq_file *f, void *slot) | |||
582 | "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" | 595 | "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" |
583 | "core ID\t\t: " XCHAL_CORE_ID "\n" | 596 | "core ID\t\t: " XCHAL_CORE_ID "\n" |
584 | "build ID\t: 0x%x\n" | 597 | "build ID\t: 0x%x\n" |
598 | "config ID\t: %08x:%08x\n" | ||
585 | "byte order\t: %s\n" | 599 | "byte order\t: %s\n" |
586 | "cpu MHz\t\t: %lu.%02lu\n" | 600 | "cpu MHz\t\t: %lu.%02lu\n" |
587 | "bogomips\t: %lu.%02lu\n", | 601 | "bogomips\t: %lu.%02lu\n", |
588 | num_online_cpus(), | 602 | num_online_cpus(), |
589 | cpumask_pr_args(cpu_online_mask), | 603 | cpumask_pr_args(cpu_online_mask), |
590 | XCHAL_BUILD_UNIQUE_ID, | 604 | XCHAL_BUILD_UNIQUE_ID, |
605 | get_sr(SREG_EPC), get_sr(SREG_EXCSAVE), | ||
591 | XCHAL_HAVE_BE ? "big" : "little", | 606 | XCHAL_HAVE_BE ? "big" : "little", |
592 | ccount_freq/1000000, | 607 | ccount_freq/1000000, |
593 | (ccount_freq/10000) % 100, | 608 | (ccount_freq/10000) % 100, |
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index d427e784ab44..f88e7a0b232c 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c | |||
@@ -28,8 +28,6 @@ | |||
28 | #include <asm/coprocessor.h> | 28 | #include <asm/coprocessor.h> |
29 | #include <asm/unistd.h> | 29 | #include <asm/unistd.h> |
30 | 30 | ||
31 | #define DEBUG_SIG 0 | ||
32 | |||
33 | extern struct task_struct *coproc_owners[]; | 31 | extern struct task_struct *coproc_owners[]; |
34 | 32 | ||
35 | struct rt_sigframe | 33 | struct rt_sigframe |
@@ -399,10 +397,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, | |||
399 | regs->areg[8] = (unsigned long) &frame->uc; | 397 | regs->areg[8] = (unsigned long) &frame->uc; |
400 | regs->threadptr = tp; | 398 | regs->threadptr = tp; |
401 | 399 | ||
402 | #if DEBUG_SIG | 400 | pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n", |
403 | printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n", | 401 | current->comm, current->pid, sig, frame, regs->pc); |
404 | current->comm, current->pid, sig, frame, regs->pc); | ||
405 | #endif | ||
406 | 402 | ||
407 | return 0; | 403 | return 0; |
408 | } | 404 | } |
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index bae697a06a98..32c5207f1226 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/kallsyms.h> | 33 | #include <linux/kallsyms.h> |
34 | #include <linux/delay.h> | 34 | #include <linux/delay.h> |
35 | #include <linux/hardirq.h> | 35 | #include <linux/hardirq.h> |
36 | #include <linux/ratelimit.h> | ||
36 | 37 | ||
37 | #include <asm/stacktrace.h> | 38 | #include <asm/stacktrace.h> |
38 | #include <asm/ptrace.h> | 39 | #include <asm/ptrace.h> |
@@ -158,8 +159,7 @@ COPROCESSOR(7), | |||
158 | * 2. it is a temporary memory buffer for the exception handlers. | 159 | * 2. it is a temporary memory buffer for the exception handlers. |
159 | */ | 160 | */ |
160 | 161 | ||
161 | DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]); | 162 | DEFINE_PER_CPU(struct exc_table, exc_table); |
162 | |||
163 | DEFINE_PER_CPU(struct debug_table, debug_table); | 163 | DEFINE_PER_CPU(struct debug_table, debug_table); |
164 | 164 | ||
165 | void die(const char*, struct pt_regs*, long); | 165 | void die(const char*, struct pt_regs*, long); |
@@ -178,13 +178,14 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err) | |||
178 | void do_unhandled(struct pt_regs *regs, unsigned long exccause) | 178 | void do_unhandled(struct pt_regs *regs, unsigned long exccause) |
179 | { | 179 | { |
180 | __die_if_kernel("Caught unhandled exception - should not happen", | 180 | __die_if_kernel("Caught unhandled exception - should not happen", |
181 | regs, SIGKILL); | 181 | regs, SIGKILL); |
182 | 182 | ||
183 | /* If in user mode, send SIGILL signal to current process */ | 183 | /* If in user mode, send SIGILL signal to current process */ |
184 | printk("Caught unhandled exception in '%s' " | 184 | pr_info_ratelimited("Caught unhandled exception in '%s' " |
185 | "(pid = %d, pc = %#010lx) - should not happen\n" | 185 | "(pid = %d, pc = %#010lx) - should not happen\n" |
186 | "\tEXCCAUSE is %ld\n", | 186 | "\tEXCCAUSE is %ld\n", |
187 | current->comm, task_pid_nr(current), regs->pc, exccause); | 187 | current->comm, task_pid_nr(current), regs->pc, |
188 | exccause); | ||
188 | force_sig(SIGILL, current); | 189 | force_sig(SIGILL, current); |
189 | } | 190 | } |
190 | 191 | ||
@@ -305,8 +306,8 @@ do_illegal_instruction(struct pt_regs *regs) | |||
305 | 306 | ||
306 | /* If in user mode, send SIGILL signal to current process. */ | 307 | /* If in user mode, send SIGILL signal to current process. */ |
307 | 308 | ||
308 | printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", | 309 | pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", |
309 | current->comm, task_pid_nr(current), regs->pc); | 310 | current->comm, task_pid_nr(current), regs->pc); |
310 | force_sig(SIGILL, current); | 311 | force_sig(SIGILL, current); |
311 | } | 312 | } |
312 | 313 | ||
@@ -325,13 +326,14 @@ do_unaligned_user (struct pt_regs *regs) | |||
325 | siginfo_t info; | 326 | siginfo_t info; |
326 | 327 | ||
327 | __die_if_kernel("Unhandled unaligned exception in kernel", | 328 | __die_if_kernel("Unhandled unaligned exception in kernel", |
328 | regs, SIGKILL); | 329 | regs, SIGKILL); |
329 | 330 | ||
330 | current->thread.bad_vaddr = regs->excvaddr; | 331 | current->thread.bad_vaddr = regs->excvaddr; |
331 | current->thread.error_code = -3; | 332 | current->thread.error_code = -3; |
332 | printk("Unaligned memory access to %08lx in '%s' " | 333 | pr_info_ratelimited("Unaligned memory access to %08lx in '%s' " |
333 | "(pid = %d, pc = %#010lx)\n", | 334 | "(pid = %d, pc = %#010lx)\n", |
334 | regs->excvaddr, current->comm, task_pid_nr(current), regs->pc); | 335 | regs->excvaddr, current->comm, |
336 | task_pid_nr(current), regs->pc); | ||
335 | info.si_signo = SIGBUS; | 337 | info.si_signo = SIGBUS; |
336 | info.si_errno = 0; | 338 | info.si_errno = 0; |
337 | info.si_code = BUS_ADRALN; | 339 | info.si_code = BUS_ADRALN; |
@@ -365,28 +367,28 @@ do_debug(struct pt_regs *regs) | |||
365 | } | 367 | } |
366 | 368 | ||
367 | 369 | ||
368 | static void set_handler(int idx, void *handler) | 370 | #define set_handler(type, cause, handler) \ |
369 | { | 371 | do { \ |
370 | unsigned int cpu; | 372 | unsigned int cpu; \ |
371 | 373 | \ | |
372 | for_each_possible_cpu(cpu) | 374 | for_each_possible_cpu(cpu) \ |
373 | per_cpu(exc_table, cpu)[idx] = (unsigned long)handler; | 375 | per_cpu(exc_table, cpu).type[cause] = (handler);\ |
374 | } | 376 | } while (0) |
375 | 377 | ||
376 | /* Set exception C handler - for temporary use when probing exceptions */ | 378 | /* Set exception C handler - for temporary use when probing exceptions */ |
377 | 379 | ||
378 | void * __init trap_set_handler(int cause, void *handler) | 380 | void * __init trap_set_handler(int cause, void *handler) |
379 | { | 381 | { |
380 | void *previous = (void *)per_cpu(exc_table, 0)[ | 382 | void *previous = per_cpu(exc_table, 0).default_handler[cause]; |
381 | EXC_TABLE_DEFAULT / 4 + cause]; | 383 | |
382 | set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler); | 384 | set_handler(default_handler, cause, handler); |
383 | return previous; | 385 | return previous; |
384 | } | 386 | } |
385 | 387 | ||
386 | 388 | ||
387 | static void trap_init_excsave(void) | 389 | static void trap_init_excsave(void) |
388 | { | 390 | { |
389 | unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table); | 391 | unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table); |
390 | __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1)); | 392 | __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1)); |
391 | } | 393 | } |
392 | 394 | ||
@@ -418,10 +420,10 @@ void __init trap_init(void) | |||
418 | 420 | ||
419 | /* Setup default vectors. */ | 421 | /* Setup default vectors. */ |
420 | 422 | ||
421 | for(i = 0; i < 64; i++) { | 423 | for (i = 0; i < EXCCAUSE_N; i++) { |
422 | set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception); | 424 | set_handler(fast_user_handler, i, user_exception); |
423 | set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception); | 425 | set_handler(fast_kernel_handler, i, kernel_exception); |
424 | set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled); | 426 | set_handler(default_handler, i, do_unhandled); |
425 | } | 427 | } |
426 | 428 | ||
427 | /* Setup specific handlers. */ | 429 | /* Setup specific handlers. */ |
@@ -433,11 +435,11 @@ void __init trap_init(void) | |||
433 | void *handler = dispatch_init_table[i].handler; | 435 | void *handler = dispatch_init_table[i].handler; |
434 | 436 | ||
435 | if (fast == 0) | 437 | if (fast == 0) |
436 | set_handler (EXC_TABLE_DEFAULT/4 + cause, handler); | 438 | set_handler(default_handler, cause, handler); |
437 | if (fast && fast & USER) | 439 | if (fast && fast & USER) |
438 | set_handler (EXC_TABLE_FAST_USER/4 + cause, handler); | 440 | set_handler(fast_user_handler, cause, handler); |
439 | if (fast && fast & KRNL) | 441 | if (fast && fast & KRNL) |
440 | set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler); | 442 | set_handler(fast_kernel_handler, cause, handler); |
441 | } | 443 | } |
442 | 444 | ||
443 | /* Initialize EXCSAVE_1 to hold the address of the exception table. */ | 445 | /* Initialize EXCSAVE_1 to hold the address of the exception table. */ |
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 332e9d635fb6..841503d3307c 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S | |||
@@ -205,9 +205,6 @@ ENDPROC(_KernelExceptionVector) | |||
205 | */ | 205 | */ |
206 | 206 | ||
207 | .section .DoubleExceptionVector.text, "ax" | 207 | .section .DoubleExceptionVector.text, "ax" |
208 | .begin literal_prefix .DoubleExceptionVector | ||
209 | .globl _DoubleExceptionVector_WindowUnderflow | ||
210 | .globl _DoubleExceptionVector_WindowOverflow | ||
211 | 208 | ||
212 | ENTRY(_DoubleExceptionVector) | 209 | ENTRY(_DoubleExceptionVector) |
213 | 210 | ||
@@ -217,8 +214,12 @@ ENTRY(_DoubleExceptionVector) | |||
217 | /* Check for kernel double exception (usually fatal). */ | 214 | /* Check for kernel double exception (usually fatal). */ |
218 | 215 | ||
219 | rsr a2, ps | 216 | rsr a2, ps |
220 | _bbci.l a2, PS_UM_BIT, .Lksp | 217 | _bbsi.l a2, PS_UM_BIT, 1f |
218 | j .Lksp | ||
221 | 219 | ||
220 | .align 4 | ||
221 | .literal_position | ||
222 | 1: | ||
222 | /* Check if we are currently handling a window exception. */ | 223 | /* Check if we are currently handling a window exception. */ |
223 | /* Note: We don't need to indicate that we enter a critical section. */ | 224 | /* Note: We don't need to indicate that we enter a critical section. */ |
224 | 225 | ||
@@ -304,8 +305,7 @@ _DoubleExceptionVector_WindowUnderflow: | |||
304 | .Lunrecoverable: | 305 | .Lunrecoverable: |
305 | rsr a3, excsave1 | 306 | rsr a3, excsave1 |
306 | wsr a0, excsave1 | 307 | wsr a0, excsave1 |
307 | movi a0, unrecoverable_exception | 308 | call0 unrecoverable_exception |
308 | callx0 a0 | ||
309 | 309 | ||
310 | .Lfixup:/* Check for a fixup handler or if we were in a critical section. */ | 310 | .Lfixup:/* Check for a fixup handler or if we were in a critical section. */ |
311 | 311 | ||
@@ -475,11 +475,8 @@ _DoubleExceptionVector_handle_exception: | |||
475 | rotw -3 | 475 | rotw -3 |
476 | j 1b | 476 | j 1b |
477 | 477 | ||
478 | |||
479 | ENDPROC(_DoubleExceptionVector) | 478 | ENDPROC(_DoubleExceptionVector) |
480 | 479 | ||
481 | .end literal_prefix | ||
482 | |||
483 | .text | 480 | .text |
484 | /* | 481 | /* |
485 | * Fixup handler for TLB miss in double exception handler for window owerflow. | 482 | * Fixup handler for TLB miss in double exception handler for window owerflow. |
@@ -508,6 +505,8 @@ ENDPROC(_DoubleExceptionVector) | |||
508 | * a3: exctable, original value in excsave1 | 505 | * a3: exctable, original value in excsave1 |
509 | */ | 506 | */ |
510 | 507 | ||
508 | .literal_position | ||
509 | |||
511 | ENTRY(window_overflow_restore_a0_fixup) | 510 | ENTRY(window_overflow_restore_a0_fixup) |
512 | 511 | ||
513 | rsr a0, ps | 512 | rsr a0, ps |
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 162c77e53ca8..70b731edc7b8 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S | |||
@@ -45,24 +45,16 @@ jiffies = jiffies_64; | |||
45 | LONG(sym ## _end); \ | 45 | LONG(sym ## _end); \ |
46 | LONG(LOADADDR(section)) | 46 | LONG(LOADADDR(section)) |
47 | 47 | ||
48 | /* Macro to define a section for a vector. | 48 | /* |
49 | * | 49 | * Macro to define a section for a vector. When CONFIG_VECTORS_OFFSET is |
50 | * Use of the MIN function catches the types of errors illustrated in | 50 | * defined code for every vector is located with other init data. At startup |
51 | * the following example: | 51 | * time head.S copies code for every vector to its final position according |
52 | * | 52 | * to description recorded in the corresponding RELOCATE_ENTRY. |
53 | * Assume the section .DoubleExceptionVector.literal is completely | ||
54 | * full. Then a programmer adds code to .DoubleExceptionVector.text | ||
55 | * that produces another literal. The final literal position will | ||
56 | * overlay onto the first word of the adjacent code section | ||
57 | * .DoubleExceptionVector.text. (In practice, the literals will | ||
58 | * overwrite the code, and the first few instructions will be | ||
59 | * garbage.) | ||
60 | */ | 53 | */ |
61 | 54 | ||
62 | #ifdef CONFIG_VECTORS_OFFSET | 55 | #ifdef CONFIG_VECTORS_OFFSET |
63 | #define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \ | 56 | #define SECTION_VECTOR(sym, section, addr, prevsec) \ |
64 | section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \ | 57 | section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \ |
65 | LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \ | ||
66 | { \ | 58 | { \ |
67 | . = ALIGN(4); \ | 59 | . = ALIGN(4); \ |
68 | sym ## _start = ABSOLUTE(.); \ | 60 | sym ## _start = ABSOLUTE(.); \ |
@@ -112,26 +104,19 @@ SECTIONS | |||
112 | #if XCHAL_EXCM_LEVEL >= 6 | 104 | #if XCHAL_EXCM_LEVEL >= 6 |
113 | SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR) | 105 | SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR) |
114 | #endif | 106 | #endif |
115 | SECTION_VECTOR (.DebugInterruptVector.literal, DEBUG_VECTOR_VADDR - 4) | ||
116 | SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR) | 107 | SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR) |
117 | SECTION_VECTOR (.KernelExceptionVector.literal, KERNEL_VECTOR_VADDR - 4) | ||
118 | SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) | 108 | SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) |
119 | SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) | ||
120 | SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) | 109 | SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) |
121 | SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20) | ||
122 | SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) | 110 | SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) |
123 | #endif | 111 | #endif |
124 | 112 | ||
113 | IRQENTRY_TEXT | ||
114 | SOFTIRQENTRY_TEXT | ||
115 | ENTRY_TEXT | ||
125 | TEXT_TEXT | 116 | TEXT_TEXT |
126 | VMLINUX_SYMBOL(__sched_text_start) = .; | 117 | SCHED_TEXT |
127 | *(.sched.literal .sched.text) | 118 | CPUIDLE_TEXT |
128 | VMLINUX_SYMBOL(__sched_text_end) = .; | 119 | LOCK_TEXT |
129 | VMLINUX_SYMBOL(__cpuidle_text_start) = .; | ||
130 | *(.cpuidle.literal .cpuidle.text) | ||
131 | VMLINUX_SYMBOL(__cpuidle_text_end) = .; | ||
132 | VMLINUX_SYMBOL(__lock_text_start) = .; | ||
133 | *(.spinlock.literal .spinlock.text) | ||
134 | VMLINUX_SYMBOL(__lock_text_end) = .; | ||
135 | 120 | ||
136 | } | 121 | } |
137 | _etext = .; | 122 | _etext = .; |
@@ -196,8 +181,6 @@ SECTIONS | |||
196 | .KernelExceptionVector.text); | 181 | .KernelExceptionVector.text); |
197 | RELOCATE_ENTRY(_UserExceptionVector_text, | 182 | RELOCATE_ENTRY(_UserExceptionVector_text, |
198 | .UserExceptionVector.text); | 183 | .UserExceptionVector.text); |
199 | RELOCATE_ENTRY(_DoubleExceptionVector_literal, | ||
200 | .DoubleExceptionVector.literal); | ||
201 | RELOCATE_ENTRY(_DoubleExceptionVector_text, | 184 | RELOCATE_ENTRY(_DoubleExceptionVector_text, |
202 | .DoubleExceptionVector.text); | 185 | .DoubleExceptionVector.text); |
203 | RELOCATE_ENTRY(_DebugInterruptVector_text, | 186 | RELOCATE_ENTRY(_DebugInterruptVector_text, |
@@ -230,25 +213,19 @@ SECTIONS | |||
230 | 213 | ||
231 | SECTION_VECTOR (_WindowVectors_text, | 214 | SECTION_VECTOR (_WindowVectors_text, |
232 | .WindowVectors.text, | 215 | .WindowVectors.text, |
233 | WINDOW_VECTORS_VADDR, 4, | 216 | WINDOW_VECTORS_VADDR, |
234 | .dummy) | 217 | .dummy) |
235 | SECTION_VECTOR (_DebugInterruptVector_literal, | ||
236 | .DebugInterruptVector.literal, | ||
237 | DEBUG_VECTOR_VADDR - 4, | ||
238 | SIZEOF(.WindowVectors.text), | ||
239 | .WindowVectors.text) | ||
240 | SECTION_VECTOR (_DebugInterruptVector_text, | 218 | SECTION_VECTOR (_DebugInterruptVector_text, |
241 | .DebugInterruptVector.text, | 219 | .DebugInterruptVector.text, |
242 | DEBUG_VECTOR_VADDR, | 220 | DEBUG_VECTOR_VADDR, |
243 | 4, | 221 | .WindowVectors.text) |
244 | .DebugInterruptVector.literal) | ||
245 | #undef LAST | 222 | #undef LAST |
246 | #define LAST .DebugInterruptVector.text | 223 | #define LAST .DebugInterruptVector.text |
247 | #if XCHAL_EXCM_LEVEL >= 2 | 224 | #if XCHAL_EXCM_LEVEL >= 2 |
248 | SECTION_VECTOR (_Level2InterruptVector_text, | 225 | SECTION_VECTOR (_Level2InterruptVector_text, |
249 | .Level2InterruptVector.text, | 226 | .Level2InterruptVector.text, |
250 | INTLEVEL2_VECTOR_VADDR, | 227 | INTLEVEL2_VECTOR_VADDR, |
251 | SIZEOF(LAST), LAST) | 228 | LAST) |
252 | # undef LAST | 229 | # undef LAST |
253 | # define LAST .Level2InterruptVector.text | 230 | # define LAST .Level2InterruptVector.text |
254 | #endif | 231 | #endif |
@@ -256,7 +233,7 @@ SECTIONS | |||
256 | SECTION_VECTOR (_Level3InterruptVector_text, | 233 | SECTION_VECTOR (_Level3InterruptVector_text, |
257 | .Level3InterruptVector.text, | 234 | .Level3InterruptVector.text, |
258 | INTLEVEL3_VECTOR_VADDR, | 235 | INTLEVEL3_VECTOR_VADDR, |
259 | SIZEOF(LAST), LAST) | 236 | LAST) |
260 | # undef LAST | 237 | # undef LAST |
261 | # define LAST .Level3InterruptVector.text | 238 | # define LAST .Level3InterruptVector.text |
262 | #endif | 239 | #endif |
@@ -264,7 +241,7 @@ SECTIONS | |||
264 | SECTION_VECTOR (_Level4InterruptVector_text, | 241 | SECTION_VECTOR (_Level4InterruptVector_text, |
265 | .Level4InterruptVector.text, | 242 | .Level4InterruptVector.text, |
266 | INTLEVEL4_VECTOR_VADDR, | 243 | INTLEVEL4_VECTOR_VADDR, |
267 | SIZEOF(LAST), LAST) | 244 | LAST) |
268 | # undef LAST | 245 | # undef LAST |
269 | # define LAST .Level4InterruptVector.text | 246 | # define LAST .Level4InterruptVector.text |
270 | #endif | 247 | #endif |
@@ -272,7 +249,7 @@ SECTIONS | |||
272 | SECTION_VECTOR (_Level5InterruptVector_text, | 249 | SECTION_VECTOR (_Level5InterruptVector_text, |
273 | .Level5InterruptVector.text, | 250 | .Level5InterruptVector.text, |
274 | INTLEVEL5_VECTOR_VADDR, | 251 | INTLEVEL5_VECTOR_VADDR, |
275 | SIZEOF(LAST), LAST) | 252 | LAST) |
276 | # undef LAST | 253 | # undef LAST |
277 | # define LAST .Level5InterruptVector.text | 254 | # define LAST .Level5InterruptVector.text |
278 | #endif | 255 | #endif |
@@ -280,40 +257,23 @@ SECTIONS | |||
280 | SECTION_VECTOR (_Level6InterruptVector_text, | 257 | SECTION_VECTOR (_Level6InterruptVector_text, |
281 | .Level6InterruptVector.text, | 258 | .Level6InterruptVector.text, |
282 | INTLEVEL6_VECTOR_VADDR, | 259 | INTLEVEL6_VECTOR_VADDR, |
283 | SIZEOF(LAST), LAST) | 260 | LAST) |
284 | # undef LAST | 261 | # undef LAST |
285 | # define LAST .Level6InterruptVector.text | 262 | # define LAST .Level6InterruptVector.text |
286 | #endif | 263 | #endif |
287 | SECTION_VECTOR (_KernelExceptionVector_literal, | ||
288 | .KernelExceptionVector.literal, | ||
289 | KERNEL_VECTOR_VADDR - 4, | ||
290 | SIZEOF(LAST), LAST) | ||
291 | #undef LAST | ||
292 | SECTION_VECTOR (_KernelExceptionVector_text, | 264 | SECTION_VECTOR (_KernelExceptionVector_text, |
293 | .KernelExceptionVector.text, | 265 | .KernelExceptionVector.text, |
294 | KERNEL_VECTOR_VADDR, | 266 | KERNEL_VECTOR_VADDR, |
295 | 4, | 267 | LAST) |
296 | .KernelExceptionVector.literal) | 268 | #undef LAST |
297 | SECTION_VECTOR (_UserExceptionVector_literal, | ||
298 | .UserExceptionVector.literal, | ||
299 | USER_VECTOR_VADDR - 4, | ||
300 | SIZEOF(.KernelExceptionVector.text), | ||
301 | .KernelExceptionVector.text) | ||
302 | SECTION_VECTOR (_UserExceptionVector_text, | 269 | SECTION_VECTOR (_UserExceptionVector_text, |
303 | .UserExceptionVector.text, | 270 | .UserExceptionVector.text, |
304 | USER_VECTOR_VADDR, | 271 | USER_VECTOR_VADDR, |
305 | 4, | 272 | .KernelExceptionVector.text) |
306 | .UserExceptionVector.literal) | ||
307 | SECTION_VECTOR (_DoubleExceptionVector_literal, | ||
308 | .DoubleExceptionVector.literal, | ||
309 | DOUBLEEXC_VECTOR_VADDR - 20, | ||
310 | SIZEOF(.UserExceptionVector.text), | ||
311 | .UserExceptionVector.text) | ||
312 | SECTION_VECTOR (_DoubleExceptionVector_text, | 273 | SECTION_VECTOR (_DoubleExceptionVector_text, |
313 | .DoubleExceptionVector.text, | 274 | .DoubleExceptionVector.text, |
314 | DOUBLEEXC_VECTOR_VADDR, | 275 | DOUBLEEXC_VECTOR_VADDR, |
315 | 20, | 276 | .UserExceptionVector.text) |
316 | .DoubleExceptionVector.literal) | ||
317 | 277 | ||
318 | . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; | 278 | . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; |
319 | 279 | ||
@@ -323,7 +283,6 @@ SECTIONS | |||
323 | SECTION_VECTOR (_SecondaryResetVector_text, | 283 | SECTION_VECTOR (_SecondaryResetVector_text, |
324 | .SecondaryResetVector.text, | 284 | .SecondaryResetVector.text, |
325 | RESET_VECTOR1_VADDR, | 285 | RESET_VECTOR1_VADDR, |
326 | SIZEOF(.DoubleExceptionVector.text), | ||
327 | .DoubleExceptionVector.text) | 286 | .DoubleExceptionVector.text) |
328 | 287 | ||
329 | . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text); | 288 | . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text); |
@@ -373,5 +332,4 @@ SECTIONS | |||
373 | 332 | ||
374 | /* Sections to be discarded */ | 333 | /* Sections to be discarded */ |
375 | DISCARDS | 334 | DISCARDS |
376 | /DISCARD/ : { *(.exit.literal) } | ||
377 | } | 335 | } |
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 672391003e40..04f19de46700 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c | |||
@@ -41,7 +41,12 @@ | |||
41 | EXPORT_SYMBOL(memset); | 41 | EXPORT_SYMBOL(memset); |
42 | EXPORT_SYMBOL(memcpy); | 42 | EXPORT_SYMBOL(memcpy); |
43 | EXPORT_SYMBOL(memmove); | 43 | EXPORT_SYMBOL(memmove); |
44 | EXPORT_SYMBOL(__memset); | ||
45 | EXPORT_SYMBOL(__memcpy); | ||
46 | EXPORT_SYMBOL(__memmove); | ||
47 | #ifndef CONFIG_GENERIC_STRNCPY_FROM_USER | ||
44 | EXPORT_SYMBOL(__strncpy_user); | 48 | EXPORT_SYMBOL(__strncpy_user); |
49 | #endif | ||
45 | EXPORT_SYMBOL(clear_page); | 50 | EXPORT_SYMBOL(clear_page); |
46 | EXPORT_SYMBOL(copy_page); | 51 | EXPORT_SYMBOL(copy_page); |
47 | 52 | ||
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S index 4eb573d2720e..528fe0dd9339 100644 --- a/arch/xtensa/lib/checksum.S +++ b/arch/xtensa/lib/checksum.S | |||
@@ -14,9 +14,10 @@ | |||
14 | * 2 of the License, or (at your option) any later version. | 14 | * 2 of the License, or (at your option) any later version. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <asm/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
19 | #include <variant/core.h> | 19 | #include <variant/core.h> |
20 | #include <asm/asmmacro.h> | ||
20 | 21 | ||
21 | /* | 22 | /* |
22 | * computes a partial checksum, e.g. for TCP/UDP fragments | 23 | * computes a partial checksum, e.g. for TCP/UDP fragments |
@@ -175,23 +176,8 @@ ENDPROC(csum_partial) | |||
175 | 176 | ||
176 | /* | 177 | /* |
177 | * Copy from ds while checksumming, otherwise like csum_partial | 178 | * Copy from ds while checksumming, otherwise like csum_partial |
178 | * | ||
179 | * The macros SRC and DST specify the type of access for the instruction. | ||
180 | * thus we can call a custom exception handler for each access type. | ||
181 | */ | 179 | */ |
182 | 180 | ||
183 | #define SRC(y...) \ | ||
184 | 9999: y; \ | ||
185 | .section __ex_table, "a"; \ | ||
186 | .long 9999b, 6001f ; \ | ||
187 | .previous | ||
188 | |||
189 | #define DST(y...) \ | ||
190 | 9999: y; \ | ||
191 | .section __ex_table, "a"; \ | ||
192 | .long 9999b, 6002f ; \ | ||
193 | .previous | ||
194 | |||
195 | /* | 181 | /* |
196 | unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, | 182 | unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, |
197 | int sum, int *src_err_ptr, int *dst_err_ptr) | 183 | int sum, int *src_err_ptr, int *dst_err_ptr) |
@@ -244,28 +230,28 @@ ENTRY(csum_partial_copy_generic) | |||
244 | add a10, a10, a2 /* a10 = end of last 32-byte src chunk */ | 230 | add a10, a10, a2 /* a10 = end of last 32-byte src chunk */ |
245 | .Loop5: | 231 | .Loop5: |
246 | #endif | 232 | #endif |
247 | SRC( l32i a9, a2, 0 ) | 233 | EX(10f) l32i a9, a2, 0 |
248 | SRC( l32i a8, a2, 4 ) | 234 | EX(10f) l32i a8, a2, 4 |
249 | DST( s32i a9, a3, 0 ) | 235 | EX(11f) s32i a9, a3, 0 |
250 | DST( s32i a8, a3, 4 ) | 236 | EX(11f) s32i a8, a3, 4 |
251 | ONES_ADD(a5, a9) | 237 | ONES_ADD(a5, a9) |
252 | ONES_ADD(a5, a8) | 238 | ONES_ADD(a5, a8) |
253 | SRC( l32i a9, a2, 8 ) | 239 | EX(10f) l32i a9, a2, 8 |
254 | SRC( l32i a8, a2, 12 ) | 240 | EX(10f) l32i a8, a2, 12 |
255 | DST( s32i a9, a3, 8 ) | 241 | EX(11f) s32i a9, a3, 8 |
256 | DST( s32i a8, a3, 12 ) | 242 | EX(11f) s32i a8, a3, 12 |
257 | ONES_ADD(a5, a9) | 243 | ONES_ADD(a5, a9) |
258 | ONES_ADD(a5, a8) | 244 | ONES_ADD(a5, a8) |
259 | SRC( l32i a9, a2, 16 ) | 245 | EX(10f) l32i a9, a2, 16 |
260 | SRC( l32i a8, a2, 20 ) | 246 | EX(10f) l32i a8, a2, 20 |
261 | DST( s32i a9, a3, 16 ) | 247 | EX(11f) s32i a9, a3, 16 |
262 | DST( s32i a8, a3, 20 ) | 248 | EX(11f) s32i a8, a3, 20 |
263 | ONES_ADD(a5, a9) | 249 | ONES_ADD(a5, a9) |
264 | ONES_ADD(a5, a8) | 250 | ONES_ADD(a5, a8) |
265 | SRC( l32i a9, a2, 24 ) | 251 | EX(10f) l32i a9, a2, 24 |
266 | SRC( l32i a8, a2, 28 ) | 252 | EX(10f) l32i a8, a2, 28 |
267 | DST( s32i a9, a3, 24 ) | 253 | EX(11f) s32i a9, a3, 24 |
268 | DST( s32i a8, a3, 28 ) | 254 | EX(11f) s32i a8, a3, 28 |
269 | ONES_ADD(a5, a9) | 255 | ONES_ADD(a5, a9) |
270 | ONES_ADD(a5, a8) | 256 | ONES_ADD(a5, a8) |
271 | addi a2, a2, 32 | 257 | addi a2, a2, 32 |
@@ -284,8 +270,8 @@ DST( s32i a8, a3, 28 ) | |||
284 | add a10, a10, a2 /* a10 = end of last 4-byte src chunk */ | 270 | add a10, a10, a2 /* a10 = end of last 4-byte src chunk */ |
285 | .Loop6: | 271 | .Loop6: |
286 | #endif | 272 | #endif |
287 | SRC( l32i a9, a2, 0 ) | 273 | EX(10f) l32i a9, a2, 0 |
288 | DST( s32i a9, a3, 0 ) | 274 | EX(11f) s32i a9, a3, 0 |
289 | ONES_ADD(a5, a9) | 275 | ONES_ADD(a5, a9) |
290 | addi a2, a2, 4 | 276 | addi a2, a2, 4 |
291 | addi a3, a3, 4 | 277 | addi a3, a3, 4 |
@@ -315,8 +301,8 @@ DST( s32i a9, a3, 0 ) | |||
315 | add a10, a10, a2 /* a10 = end of last 2-byte src chunk */ | 301 | add a10, a10, a2 /* a10 = end of last 2-byte src chunk */ |
316 | .Loop7: | 302 | .Loop7: |
317 | #endif | 303 | #endif |
318 | SRC( l16ui a9, a2, 0 ) | 304 | EX(10f) l16ui a9, a2, 0 |
319 | DST( s16i a9, a3, 0 ) | 305 | EX(11f) s16i a9, a3, 0 |
320 | ONES_ADD(a5, a9) | 306 | ONES_ADD(a5, a9) |
321 | addi a2, a2, 2 | 307 | addi a2, a2, 2 |
322 | addi a3, a3, 2 | 308 | addi a3, a3, 2 |
@@ -326,8 +312,8 @@ DST( s16i a9, a3, 0 ) | |||
326 | 4: | 312 | 4: |
327 | /* This section processes a possible trailing odd byte. */ | 313 | /* This section processes a possible trailing odd byte. */ |
328 | _bbci.l a4, 0, 8f /* 1-byte chunk */ | 314 | _bbci.l a4, 0, 8f /* 1-byte chunk */ |
329 | SRC( l8ui a9, a2, 0 ) | 315 | EX(10f) l8ui a9, a2, 0 |
330 | DST( s8i a9, a3, 0 ) | 316 | EX(11f) s8i a9, a3, 0 |
331 | #ifdef __XTENSA_EB__ | 317 | #ifdef __XTENSA_EB__ |
332 | slli a9, a9, 8 /* shift byte to bits 8..15 */ | 318 | slli a9, a9, 8 /* shift byte to bits 8..15 */ |
333 | #endif | 319 | #endif |
@@ -350,10 +336,10 @@ DST( s8i a9, a3, 0 ) | |||
350 | add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */ | 336 | add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */ |
351 | .Loop8: | 337 | .Loop8: |
352 | #endif | 338 | #endif |
353 | SRC( l8ui a9, a2, 0 ) | 339 | EX(10f) l8ui a9, a2, 0 |
354 | SRC( l8ui a8, a2, 1 ) | 340 | EX(10f) l8ui a8, a2, 1 |
355 | DST( s8i a9, a3, 0 ) | 341 | EX(11f) s8i a9, a3, 0 |
356 | DST( s8i a8, a3, 1 ) | 342 | EX(11f) s8i a8, a3, 1 |
357 | #ifdef __XTENSA_EB__ | 343 | #ifdef __XTENSA_EB__ |
358 | slli a9, a9, 8 /* combine into a single 16-bit value */ | 344 | slli a9, a9, 8 /* combine into a single 16-bit value */ |
359 | #else /* for checksum computation */ | 345 | #else /* for checksum computation */ |
@@ -381,7 +367,7 @@ ENDPROC(csum_partial_copy_generic) | |||
381 | a12 = original dst for exception handling | 367 | a12 = original dst for exception handling |
382 | */ | 368 | */ |
383 | 369 | ||
384 | 6001: | 370 | 10: |
385 | _movi a2, -EFAULT | 371 | _movi a2, -EFAULT |
386 | s32i a2, a6, 0 /* src_err_ptr */ | 372 | s32i a2, a6, 0 /* src_err_ptr */ |
387 | 373 | ||
@@ -403,7 +389,7 @@ ENDPROC(csum_partial_copy_generic) | |||
403 | 2: | 389 | 2: |
404 | retw | 390 | retw |
405 | 391 | ||
406 | 6002: | 392 | 11: |
407 | movi a2, -EFAULT | 393 | movi a2, -EFAULT |
408 | s32i a2, a7, 0 /* dst_err_ptr */ | 394 | s32i a2, a7, 0 /* dst_err_ptr */ |
409 | movi a2, 0 | 395 | movi a2, 0 |
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S index b1c219acabe7..c0f6981719d6 100644 --- a/arch/xtensa/lib/memcopy.S +++ b/arch/xtensa/lib/memcopy.S | |||
@@ -9,23 +9,9 @@ | |||
9 | * Copyright (C) 2002 - 2012 Tensilica Inc. | 9 | * Copyright (C) 2002 - 2012 Tensilica Inc. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/linkage.h> | ||
12 | #include <variant/core.h> | 13 | #include <variant/core.h> |
13 | 14 | #include <asm/asmmacro.h> | |
14 | .macro src_b r, w0, w1 | ||
15 | #ifdef __XTENSA_EB__ | ||
16 | src \r, \w0, \w1 | ||
17 | #else | ||
18 | src \r, \w1, \w0 | ||
19 | #endif | ||
20 | .endm | ||
21 | |||
22 | .macro ssa8 r | ||
23 | #ifdef __XTENSA_EB__ | ||
24 | ssa8b \r | ||
25 | #else | ||
26 | ssa8l \r | ||
27 | #endif | ||
28 | .endm | ||
29 | 15 | ||
30 | /* | 16 | /* |
31 | * void *memcpy(void *dst, const void *src, size_t len); | 17 | * void *memcpy(void *dst, const void *src, size_t len); |
@@ -123,10 +109,8 @@ | |||
123 | addi a5, a5, 2 | 109 | addi a5, a5, 2 |
124 | j .Ldstaligned # dst is now aligned, return to main algorithm | 110 | j .Ldstaligned # dst is now aligned, return to main algorithm |
125 | 111 | ||
126 | .align 4 | 112 | ENTRY(__memcpy) |
127 | .global memcpy | 113 | WEAK(memcpy) |
128 | .type memcpy,@function | ||
129 | memcpy: | ||
130 | 114 | ||
131 | entry sp, 16 # minimal stack frame | 115 | entry sp, 16 # minimal stack frame |
132 | # a2/ dst, a3/ src, a4/ len | 116 | # a2/ dst, a3/ src, a4/ len |
@@ -209,7 +193,7 @@ memcpy: | |||
209 | .Lsrcunaligned: | 193 | .Lsrcunaligned: |
210 | _beqz a4, .Ldone # avoid loading anything for zero-length copies | 194 | _beqz a4, .Ldone # avoid loading anything for zero-length copies |
211 | # copy 16 bytes per iteration for word-aligned dst and unaligned src | 195 | # copy 16 bytes per iteration for word-aligned dst and unaligned src |
212 | ssa8 a3 # set shift amount from byte offset | 196 | __ssa8 a3 # set shift amount from byte offset |
213 | 197 | ||
214 | /* set to 1 when running on ISS (simulator) with the | 198 | /* set to 1 when running on ISS (simulator) with the |
215 | lint or ferret client, or 0 to save a few cycles */ | 199 | lint or ferret client, or 0 to save a few cycles */ |
@@ -229,16 +213,16 @@ memcpy: | |||
229 | .Loop2: | 213 | .Loop2: |
230 | l32i a7, a3, 4 | 214 | l32i a7, a3, 4 |
231 | l32i a8, a3, 8 | 215 | l32i a8, a3, 8 |
232 | src_b a6, a6, a7 | 216 | __src_b a6, a6, a7 |
233 | s32i a6, a5, 0 | 217 | s32i a6, a5, 0 |
234 | l32i a9, a3, 12 | 218 | l32i a9, a3, 12 |
235 | src_b a7, a7, a8 | 219 | __src_b a7, a7, a8 |
236 | s32i a7, a5, 4 | 220 | s32i a7, a5, 4 |
237 | l32i a6, a3, 16 | 221 | l32i a6, a3, 16 |
238 | src_b a8, a8, a9 | 222 | __src_b a8, a8, a9 |
239 | s32i a8, a5, 8 | 223 | s32i a8, a5, 8 |
240 | addi a3, a3, 16 | 224 | addi a3, a3, 16 |
241 | src_b a9, a9, a6 | 225 | __src_b a9, a9, a6 |
242 | s32i a9, a5, 12 | 226 | s32i a9, a5, 12 |
243 | addi a5, a5, 16 | 227 | addi a5, a5, 16 |
244 | #if !XCHAL_HAVE_LOOPS | 228 | #if !XCHAL_HAVE_LOOPS |
@@ -249,10 +233,10 @@ memcpy: | |||
249 | # copy 8 bytes | 233 | # copy 8 bytes |
250 | l32i a7, a3, 4 | 234 | l32i a7, a3, 4 |
251 | l32i a8, a3, 8 | 235 | l32i a8, a3, 8 |
252 | src_b a6, a6, a7 | 236 | __src_b a6, a6, a7 |
253 | s32i a6, a5, 0 | 237 | s32i a6, a5, 0 |
254 | addi a3, a3, 8 | 238 | addi a3, a3, 8 |
255 | src_b a7, a7, a8 | 239 | __src_b a7, a7, a8 |
256 | s32i a7, a5, 4 | 240 | s32i a7, a5, 4 |
257 | addi a5, a5, 8 | 241 | addi a5, a5, 8 |
258 | mov a6, a8 | 242 | mov a6, a8 |
@@ -261,7 +245,7 @@ memcpy: | |||
261 | # copy 4 bytes | 245 | # copy 4 bytes |
262 | l32i a7, a3, 4 | 246 | l32i a7, a3, 4 |
263 | addi a3, a3, 4 | 247 | addi a3, a3, 4 |
264 | src_b a6, a6, a7 | 248 | __src_b a6, a6, a7 |
265 | s32i a6, a5, 0 | 249 | s32i a6, a5, 0 |
266 | addi a5, a5, 4 | 250 | addi a5, a5, 4 |
267 | mov a6, a7 | 251 | mov a6, a7 |
@@ -288,14 +272,14 @@ memcpy: | |||
288 | s8i a6, a5, 0 | 272 | s8i a6, a5, 0 |
289 | retw | 273 | retw |
290 | 274 | ||
275 | ENDPROC(__memcpy) | ||
291 | 276 | ||
292 | /* | 277 | /* |
293 | * void bcopy(const void *src, void *dest, size_t n); | 278 | * void bcopy(const void *src, void *dest, size_t n); |
294 | */ | 279 | */ |
295 | .align 4 | 280 | |
296 | .global bcopy | 281 | ENTRY(bcopy) |
297 | .type bcopy,@function | 282 | |
298 | bcopy: | ||
299 | entry sp, 16 # minimal stack frame | 283 | entry sp, 16 # minimal stack frame |
300 | # a2=src, a3=dst, a4=len | 284 | # a2=src, a3=dst, a4=len |
301 | mov a5, a3 | 285 | mov a5, a3 |
@@ -303,6 +287,8 @@ bcopy: | |||
303 | mov a2, a5 | 287 | mov a2, a5 |
304 | j .Lmovecommon # go to common code for memmove+bcopy | 288 | j .Lmovecommon # go to common code for memmove+bcopy |
305 | 289 | ||
290 | ENDPROC(bcopy) | ||
291 | |||
306 | /* | 292 | /* |
307 | * void *memmove(void *dst, const void *src, size_t len); | 293 | * void *memmove(void *dst, const void *src, size_t len); |
308 | * | 294 | * |
@@ -391,10 +377,8 @@ bcopy: | |||
391 | j .Lbackdstaligned # dst is now aligned, | 377 | j .Lbackdstaligned # dst is now aligned, |
392 | # return to main algorithm | 378 | # return to main algorithm |
393 | 379 | ||
394 | .align 4 | 380 | ENTRY(__memmove) |
395 | .global memmove | 381 | WEAK(memmove) |
396 | .type memmove,@function | ||
397 | memmove: | ||
398 | 382 | ||
399 | entry sp, 16 # minimal stack frame | 383 | entry sp, 16 # minimal stack frame |
400 | # a2/ dst, a3/ src, a4/ len | 384 | # a2/ dst, a3/ src, a4/ len |
@@ -485,7 +469,7 @@ memmove: | |||
485 | .Lbacksrcunaligned: | 469 | .Lbacksrcunaligned: |
486 | _beqz a4, .Lbackdone # avoid loading anything for zero-length copies | 470 | _beqz a4, .Lbackdone # avoid loading anything for zero-length copies |
487 | # copy 16 bytes per iteration for word-aligned dst and unaligned src | 471 | # copy 16 bytes per iteration for word-aligned dst and unaligned src |
488 | ssa8 a3 # set shift amount from byte offset | 472 | __ssa8 a3 # set shift amount from byte offset |
489 | #define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with | 473 | #define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with |
490 | * the lint or ferret client, or 0 | 474 | * the lint or ferret client, or 0 |
491 | * to save a few cycles */ | 475 | * to save a few cycles */ |
@@ -506,15 +490,15 @@ memmove: | |||
506 | l32i a7, a3, 12 | 490 | l32i a7, a3, 12 |
507 | l32i a8, a3, 8 | 491 | l32i a8, a3, 8 |
508 | addi a5, a5, -16 | 492 | addi a5, a5, -16 |
509 | src_b a6, a7, a6 | 493 | __src_b a6, a7, a6 |
510 | s32i a6, a5, 12 | 494 | s32i a6, a5, 12 |
511 | l32i a9, a3, 4 | 495 | l32i a9, a3, 4 |
512 | src_b a7, a8, a7 | 496 | __src_b a7, a8, a7 |
513 | s32i a7, a5, 8 | 497 | s32i a7, a5, 8 |
514 | l32i a6, a3, 0 | 498 | l32i a6, a3, 0 |
515 | src_b a8, a9, a8 | 499 | __src_b a8, a9, a8 |
516 | s32i a8, a5, 4 | 500 | s32i a8, a5, 4 |
517 | src_b a9, a6, a9 | 501 | __src_b a9, a6, a9 |
518 | s32i a9, a5, 0 | 502 | s32i a9, a5, 0 |
519 | #if !XCHAL_HAVE_LOOPS | 503 | #if !XCHAL_HAVE_LOOPS |
520 | bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start | 504 | bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start |
@@ -526,9 +510,9 @@ memmove: | |||
526 | l32i a7, a3, 4 | 510 | l32i a7, a3, 4 |
527 | l32i a8, a3, 0 | 511 | l32i a8, a3, 0 |
528 | addi a5, a5, -8 | 512 | addi a5, a5, -8 |
529 | src_b a6, a7, a6 | 513 | __src_b a6, a7, a6 |
530 | s32i a6, a5, 4 | 514 | s32i a6, a5, 4 |
531 | src_b a7, a8, a7 | 515 | __src_b a7, a8, a7 |
532 | s32i a7, a5, 0 | 516 | s32i a7, a5, 0 |
533 | mov a6, a8 | 517 | mov a6, a8 |
534 | .Lback12: | 518 | .Lback12: |
@@ -537,7 +521,7 @@ memmove: | |||
537 | addi a3, a3, -4 | 521 | addi a3, a3, -4 |
538 | l32i a7, a3, 0 | 522 | l32i a7, a3, 0 |
539 | addi a5, a5, -4 | 523 | addi a5, a5, -4 |
540 | src_b a6, a7, a6 | 524 | __src_b a6, a7, a6 |
541 | s32i a6, a5, 0 | 525 | s32i a6, a5, 0 |
542 | mov a6, a7 | 526 | mov a6, a7 |
543 | .Lback13: | 527 | .Lback13: |
@@ -566,11 +550,4 @@ memmove: | |||
566 | s8i a6, a5, 0 | 550 | s8i a6, a5, 0 |
567 | retw | 551 | retw |
568 | 552 | ||
569 | 553 | ENDPROC(__memmove) | |
570 | /* | ||
571 | * Local Variables: | ||
572 | * mode:fundamental | ||
573 | * comment-start: "# " | ||
574 | * comment-start-skip: "# *" | ||
575 | * End: | ||
576 | */ | ||
diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S index 10b8c400f175..276747dec300 100644 --- a/arch/xtensa/lib/memset.S +++ b/arch/xtensa/lib/memset.S | |||
@@ -11,7 +11,9 @@ | |||
11 | * Copyright (C) 2002 Tensilica Inc. | 11 | * Copyright (C) 2002 Tensilica Inc. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/linkage.h> | ||
14 | #include <variant/core.h> | 15 | #include <variant/core.h> |
16 | #include <asm/asmmacro.h> | ||
15 | 17 | ||
16 | /* | 18 | /* |
17 | * void *memset(void *dst, int c, size_t length) | 19 | * void *memset(void *dst, int c, size_t length) |
@@ -28,20 +30,10 @@ | |||
28 | * the alignment labels). | 30 | * the alignment labels). |
29 | */ | 31 | */ |
30 | 32 | ||
31 | /* Load or store instructions that may cause exceptions use the EX macro. */ | ||
32 | |||
33 | #define EX(insn,reg1,reg2,offset,handler) \ | ||
34 | 9: insn reg1, reg2, offset; \ | ||
35 | .section __ex_table, "a"; \ | ||
36 | .word 9b, handler; \ | ||
37 | .previous | ||
38 | |||
39 | |||
40 | .text | 33 | .text |
41 | .align 4 | 34 | ENTRY(__memset) |
42 | .global memset | 35 | WEAK(memset) |
43 | .type memset,@function | 36 | |
44 | memset: | ||
45 | entry sp, 16 # minimal stack frame | 37 | entry sp, 16 # minimal stack frame |
46 | # a2/ dst, a3/ c, a4/ length | 38 | # a2/ dst, a3/ c, a4/ length |
47 | extui a3, a3, 0, 8 # mask to just 8 bits | 39 | extui a3, a3, 0, 8 # mask to just 8 bits |
@@ -73,10 +65,10 @@ memset: | |||
73 | add a6, a6, a5 # a6 = end of last 16B chunk | 65 | add a6, a6, a5 # a6 = end of last 16B chunk |
74 | #endif /* !XCHAL_HAVE_LOOPS */ | 66 | #endif /* !XCHAL_HAVE_LOOPS */ |
75 | .Loop1: | 67 | .Loop1: |
76 | EX(s32i, a3, a5, 0, memset_fixup) | 68 | EX(10f) s32i a3, a5, 0 |
77 | EX(s32i, a3, a5, 4, memset_fixup) | 69 | EX(10f) s32i a3, a5, 4 |
78 | EX(s32i, a3, a5, 8, memset_fixup) | 70 | EX(10f) s32i a3, a5, 8 |
79 | EX(s32i, a3, a5, 12, memset_fixup) | 71 | EX(10f) s32i a3, a5, 12 |
80 | addi a5, a5, 16 | 72 | addi a5, a5, 16 |
81 | #if !XCHAL_HAVE_LOOPS | 73 | #if !XCHAL_HAVE_LOOPS |
82 | blt a5, a6, .Loop1 | 74 | blt a5, a6, .Loop1 |
@@ -84,23 +76,23 @@ memset: | |||
84 | .Loop1done: | 76 | .Loop1done: |
85 | bbci.l a4, 3, .L2 | 77 | bbci.l a4, 3, .L2 |
86 | # set 8 bytes | 78 | # set 8 bytes |
87 | EX(s32i, a3, a5, 0, memset_fixup) | 79 | EX(10f) s32i a3, a5, 0 |
88 | EX(s32i, a3, a5, 4, memset_fixup) | 80 | EX(10f) s32i a3, a5, 4 |
89 | addi a5, a5, 8 | 81 | addi a5, a5, 8 |
90 | .L2: | 82 | .L2: |
91 | bbci.l a4, 2, .L3 | 83 | bbci.l a4, 2, .L3 |
92 | # set 4 bytes | 84 | # set 4 bytes |
93 | EX(s32i, a3, a5, 0, memset_fixup) | 85 | EX(10f) s32i a3, a5, 0 |
94 | addi a5, a5, 4 | 86 | addi a5, a5, 4 |
95 | .L3: | 87 | .L3: |
96 | bbci.l a4, 1, .L4 | 88 | bbci.l a4, 1, .L4 |
97 | # set 2 bytes | 89 | # set 2 bytes |
98 | EX(s16i, a3, a5, 0, memset_fixup) | 90 | EX(10f) s16i a3, a5, 0 |
99 | addi a5, a5, 2 | 91 | addi a5, a5, 2 |
100 | .L4: | 92 | .L4: |
101 | bbci.l a4, 0, .L5 | 93 | bbci.l a4, 0, .L5 |
102 | # set 1 byte | 94 | # set 1 byte |
103 | EX(s8i, a3, a5, 0, memset_fixup) | 95 | EX(10f) s8i a3, a5, 0 |
104 | .L5: | 96 | .L5: |
105 | .Lret1: | 97 | .Lret1: |
106 | retw | 98 | retw |
@@ -114,7 +106,7 @@ memset: | |||
114 | bbci.l a5, 0, .L20 # branch if dst alignment half-aligned | 106 | bbci.l a5, 0, .L20 # branch if dst alignment half-aligned |
115 | # dst is only byte aligned | 107 | # dst is only byte aligned |
116 | # set 1 byte | 108 | # set 1 byte |
117 | EX(s8i, a3, a5, 0, memset_fixup) | 109 | EX(10f) s8i a3, a5, 0 |
118 | addi a5, a5, 1 | 110 | addi a5, a5, 1 |
119 | addi a4, a4, -1 | 111 | addi a4, a4, -1 |
120 | # now retest if dst aligned | 112 | # now retest if dst aligned |
@@ -122,7 +114,7 @@ memset: | |||
122 | .L20: | 114 | .L20: |
123 | # dst half-aligned | 115 | # dst half-aligned |
124 | # set 2 bytes | 116 | # set 2 bytes |
125 | EX(s16i, a3, a5, 0, memset_fixup) | 117 | EX(10f) s16i a3, a5, 0 |
126 | addi a5, a5, 2 | 118 | addi a5, a5, 2 |
127 | addi a4, a4, -2 | 119 | addi a4, a4, -2 |
128 | j .L0 # dst is now aligned, return to main algorithm | 120 | j .L0 # dst is now aligned, return to main algorithm |
@@ -141,7 +133,7 @@ memset: | |||
141 | add a6, a5, a4 # a6 = ending address | 133 | add a6, a5, a4 # a6 = ending address |
142 | #endif /* !XCHAL_HAVE_LOOPS */ | 134 | #endif /* !XCHAL_HAVE_LOOPS */ |
143 | .Lbyteloop: | 135 | .Lbyteloop: |
144 | EX(s8i, a3, a5, 0, memset_fixup) | 136 | EX(10f) s8i a3, a5, 0 |
145 | addi a5, a5, 1 | 137 | addi a5, a5, 1 |
146 | #if !XCHAL_HAVE_LOOPS | 138 | #if !XCHAL_HAVE_LOOPS |
147 | blt a5, a6, .Lbyteloop | 139 | blt a5, a6, .Lbyteloop |
@@ -149,12 +141,13 @@ memset: | |||
149 | .Lbytesetdone: | 141 | .Lbytesetdone: |
150 | retw | 142 | retw |
151 | 143 | ||
144 | ENDPROC(__memset) | ||
152 | 145 | ||
153 | .section .fixup, "ax" | 146 | .section .fixup, "ax" |
154 | .align 4 | 147 | .align 4 |
155 | 148 | ||
156 | /* We return zero if a failure occurred. */ | 149 | /* We return zero if a failure occurred. */ |
157 | 150 | ||
158 | memset_fixup: | 151 | 10: |
159 | movi a2, 0 | 152 | movi a2, 0 |
160 | retw | 153 | retw |
diff --git a/arch/xtensa/lib/pci-auto.c b/arch/xtensa/lib/pci-auto.c index 34d05abbd921..a2b558161d6d 100644 --- a/arch/xtensa/lib/pci-auto.c +++ b/arch/xtensa/lib/pci-auto.c | |||
@@ -49,17 +49,6 @@ | |||
49 | * | 49 | * |
50 | */ | 50 | */ |
51 | 51 | ||
52 | |||
53 | /* define DEBUG to print some debugging messages. */ | ||
54 | |||
55 | #undef DEBUG | ||
56 | |||
57 | #ifdef DEBUG | ||
58 | # define DBG(x...) printk(x) | ||
59 | #else | ||
60 | # define DBG(x...) | ||
61 | #endif | ||
62 | |||
63 | static int pciauto_upper_iospc; | 52 | static int pciauto_upper_iospc; |
64 | static int pciauto_upper_memspc; | 53 | static int pciauto_upper_memspc; |
65 | 54 | ||
@@ -97,7 +86,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit) | |||
97 | { | 86 | { |
98 | bar_size &= PCI_BASE_ADDRESS_IO_MASK; | 87 | bar_size &= PCI_BASE_ADDRESS_IO_MASK; |
99 | upper_limit = &pciauto_upper_iospc; | 88 | upper_limit = &pciauto_upper_iospc; |
100 | DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr); | 89 | pr_debug("PCI Autoconfig: BAR %d, I/O, ", bar_nr); |
101 | } | 90 | } |
102 | else | 91 | else |
103 | { | 92 | { |
@@ -107,7 +96,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit) | |||
107 | 96 | ||
108 | bar_size &= PCI_BASE_ADDRESS_MEM_MASK; | 97 | bar_size &= PCI_BASE_ADDRESS_MEM_MASK; |
109 | upper_limit = &pciauto_upper_memspc; | 98 | upper_limit = &pciauto_upper_memspc; |
110 | DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr); | 99 | pr_debug("PCI Autoconfig: BAR %d, Mem, ", bar_nr); |
111 | } | 100 | } |
112 | 101 | ||
113 | /* Allocate a base address (bar_size is negative!) */ | 102 | /* Allocate a base address (bar_size is negative!) */ |
@@ -125,7 +114,8 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit) | |||
125 | if (found_mem64) | 114 | if (found_mem64) |
126 | pci_write_config_dword(dev, (bar+=4), 0x00000000); | 115 | pci_write_config_dword(dev, (bar+=4), 0x00000000); |
127 | 116 | ||
128 | DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit); | 117 | pr_debug("size=0x%x, address=0x%x\n", |
118 | ~bar_size + 1, *upper_limit); | ||
129 | } | 119 | } |
130 | } | 120 | } |
131 | 121 | ||
@@ -150,7 +140,7 @@ pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn) | |||
150 | if (irq == -1) | 140 | if (irq == -1) |
151 | irq = 0; | 141 | irq = 0; |
152 | 142 | ||
153 | DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin); | 143 | pr_debug("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin); |
154 | 144 | ||
155 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); | 145 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); |
156 | } | 146 | } |
@@ -289,8 +279,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) | |||
289 | 279 | ||
290 | int iosave, memsave; | 280 | int iosave, memsave; |
291 | 281 | ||
292 | DBG("PCI Autoconfig: Found P2P bridge, device %d\n", | 282 | pr_debug("PCI Autoconfig: Found P2P bridge, device %d\n", |
293 | PCI_SLOT(pci_devfn)); | 283 | PCI_SLOT(pci_devfn)); |
294 | 284 | ||
295 | /* Allocate PCI I/O and/or memory space */ | 285 | /* Allocate PCI I/O and/or memory space */ |
296 | pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1); | 286 | pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1); |
@@ -306,23 +296,6 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) | |||
306 | 296 | ||
307 | } | 297 | } |
308 | 298 | ||
309 | |||
310 | #if 0 | ||
311 | /* Skip legacy mode IDE controller */ | ||
312 | |||
313 | if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) { | ||
314 | |||
315 | unsigned char prg_iface; | ||
316 | pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface); | ||
317 | |||
318 | if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) { | ||
319 | DBG("PCI Autoconfig: Skipping legacy mode " | ||
320 | "IDE controller\n"); | ||
321 | continue; | ||
322 | } | ||
323 | } | ||
324 | #endif | ||
325 | |||
326 | /* | 299 | /* |
327 | * Found a peripheral, enable some standard | 300 | * Found a peripheral, enable some standard |
328 | * settings | 301 | * settings |
@@ -337,8 +310,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) | |||
337 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); | 310 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); |
338 | 311 | ||
339 | /* Allocate PCI I/O and/or memory space */ | 312 | /* Allocate PCI I/O and/or memory space */ |
340 | DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", | 313 | pr_debug("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", |
341 | current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) ); | 314 | current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn)); |
342 | 315 | ||
343 | pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5); | 316 | pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5); |
344 | pciauto_setup_irq(pci_ctrl, dev, pci_devfn); | 317 | pciauto_setup_irq(pci_ctrl, dev, pci_devfn); |
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S index 1ad0ecf45368..5fce16b67dca 100644 --- a/arch/xtensa/lib/strncpy_user.S +++ b/arch/xtensa/lib/strncpy_user.S | |||
@@ -11,16 +11,10 @@ | |||
11 | * Copyright (C) 2002 Tensilica Inc. | 11 | * Copyright (C) 2002 Tensilica Inc. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <variant/core.h> | ||
15 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
16 | 15 | #include <linux/linkage.h> | |
17 | /* Load or store instructions that may cause exceptions use the EX macro. */ | 16 | #include <variant/core.h> |
18 | 17 | #include <asm/asmmacro.h> | |
19 | #define EX(insn,reg1,reg2,offset,handler) \ | ||
20 | 9: insn reg1, reg2, offset; \ | ||
21 | .section __ex_table, "a"; \ | ||
22 | .word 9b, handler; \ | ||
23 | .previous | ||
24 | 18 | ||
25 | /* | 19 | /* |
26 | * char *__strncpy_user(char *dst, const char *src, size_t len) | 20 | * char *__strncpy_user(char *dst, const char *src, size_t len) |
@@ -54,10 +48,8 @@ | |||
54 | # a12/ tmp | 48 | # a12/ tmp |
55 | 49 | ||
56 | .text | 50 | .text |
57 | .align 4 | 51 | ENTRY(__strncpy_user) |
58 | .global __strncpy_user | 52 | |
59 | .type __strncpy_user,@function | ||
60 | __strncpy_user: | ||
61 | entry sp, 16 # minimal stack frame | 53 | entry sp, 16 # minimal stack frame |
62 | # a2/ dst, a3/ src, a4/ len | 54 | # a2/ dst, a3/ src, a4/ len |
63 | mov a11, a2 # leave dst in return value register | 55 | mov a11, a2 # leave dst in return value register |
@@ -75,9 +67,9 @@ __strncpy_user: | |||
75 | j .Ldstunaligned | 67 | j .Ldstunaligned |
76 | 68 | ||
77 | .Lsrc1mod2: # src address is odd | 69 | .Lsrc1mod2: # src address is odd |
78 | EX(l8ui, a9, a3, 0, fixup_l) # get byte 0 | 70 | EX(11f) l8ui a9, a3, 0 # get byte 0 |
79 | addi a3, a3, 1 # advance src pointer | 71 | addi a3, a3, 1 # advance src pointer |
80 | EX(s8i, a9, a11, 0, fixup_s) # store byte 0 | 72 | EX(10f) s8i a9, a11, 0 # store byte 0 |
81 | beqz a9, .Lret # if byte 0 is zero | 73 | beqz a9, .Lret # if byte 0 is zero |
82 | addi a11, a11, 1 # advance dst pointer | 74 | addi a11, a11, 1 # advance dst pointer |
83 | addi a4, a4, -1 # decrement len | 75 | addi a4, a4, -1 # decrement len |
@@ -85,16 +77,16 @@ __strncpy_user: | |||
85 | bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned | 77 | bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned |
86 | 78 | ||
87 | .Lsrc2mod4: # src address is 2 mod 4 | 79 | .Lsrc2mod4: # src address is 2 mod 4 |
88 | EX(l8ui, a9, a3, 0, fixup_l) # get byte 0 | 80 | EX(11f) l8ui a9, a3, 0 # get byte 0 |
89 | /* 1-cycle interlock */ | 81 | /* 1-cycle interlock */ |
90 | EX(s8i, a9, a11, 0, fixup_s) # store byte 0 | 82 | EX(10f) s8i a9, a11, 0 # store byte 0 |
91 | beqz a9, .Lret # if byte 0 is zero | 83 | beqz a9, .Lret # if byte 0 is zero |
92 | addi a11, a11, 1 # advance dst pointer | 84 | addi a11, a11, 1 # advance dst pointer |
93 | addi a4, a4, -1 # decrement len | 85 | addi a4, a4, -1 # decrement len |
94 | beqz a4, .Lret # if len is zero | 86 | beqz a4, .Lret # if len is zero |
95 | EX(l8ui, a9, a3, 1, fixup_l) # get byte 0 | 87 | EX(11f) l8ui a9, a3, 1 # get byte 0 |
96 | addi a3, a3, 2 # advance src pointer | 88 | addi a3, a3, 2 # advance src pointer |
97 | EX(s8i, a9, a11, 0, fixup_s) # store byte 0 | 89 | EX(10f) s8i a9, a11, 0 # store byte 0 |
98 | beqz a9, .Lret # if byte 0 is zero | 90 | beqz a9, .Lret # if byte 0 is zero |
99 | addi a11, a11, 1 # advance dst pointer | 91 | addi a11, a11, 1 # advance dst pointer |
100 | addi a4, a4, -1 # decrement len | 92 | addi a4, a4, -1 # decrement len |
@@ -117,12 +109,12 @@ __strncpy_user: | |||
117 | add a12, a12, a11 # a12 = end of last 4B chunck | 109 | add a12, a12, a11 # a12 = end of last 4B chunck |
118 | #endif | 110 | #endif |
119 | .Loop1: | 111 | .Loop1: |
120 | EX(l32i, a9, a3, 0, fixup_l) # get word from src | 112 | EX(11f) l32i a9, a3, 0 # get word from src |
121 | addi a3, a3, 4 # advance src pointer | 113 | addi a3, a3, 4 # advance src pointer |
122 | bnone a9, a5, .Lz0 # if byte 0 is zero | 114 | bnone a9, a5, .Lz0 # if byte 0 is zero |
123 | bnone a9, a6, .Lz1 # if byte 1 is zero | 115 | bnone a9, a6, .Lz1 # if byte 1 is zero |
124 | bnone a9, a7, .Lz2 # if byte 2 is zero | 116 | bnone a9, a7, .Lz2 # if byte 2 is zero |
125 | EX(s32i, a9, a11, 0, fixup_s) # store word to dst | 117 | EX(10f) s32i a9, a11, 0 # store word to dst |
126 | bnone a9, a8, .Lz3 # if byte 3 is zero | 118 | bnone a9, a8, .Lz3 # if byte 3 is zero |
127 | addi a11, a11, 4 # advance dst pointer | 119 | addi a11, a11, 4 # advance dst pointer |
128 | #if !XCHAL_HAVE_LOOPS | 120 | #if !XCHAL_HAVE_LOOPS |
@@ -132,7 +124,7 @@ __strncpy_user: | |||
132 | .Loop1done: | 124 | .Loop1done: |
133 | bbci.l a4, 1, .L100 | 125 | bbci.l a4, 1, .L100 |
134 | # copy 2 bytes | 126 | # copy 2 bytes |
135 | EX(l16ui, a9, a3, 0, fixup_l) | 127 | EX(11f) l16ui a9, a3, 0 |
136 | addi a3, a3, 2 # advance src pointer | 128 | addi a3, a3, 2 # advance src pointer |
137 | #ifdef __XTENSA_EB__ | 129 | #ifdef __XTENSA_EB__ |
138 | bnone a9, a7, .Lz0 # if byte 2 is zero | 130 | bnone a9, a7, .Lz0 # if byte 2 is zero |
@@ -141,13 +133,13 @@ __strncpy_user: | |||
141 | bnone a9, a5, .Lz0 # if byte 0 is zero | 133 | bnone a9, a5, .Lz0 # if byte 0 is zero |
142 | bnone a9, a6, .Lz1 # if byte 1 is zero | 134 | bnone a9, a6, .Lz1 # if byte 1 is zero |
143 | #endif | 135 | #endif |
144 | EX(s16i, a9, a11, 0, fixup_s) | 136 | EX(10f) s16i a9, a11, 0 |
145 | addi a11, a11, 2 # advance dst pointer | 137 | addi a11, a11, 2 # advance dst pointer |
146 | .L100: | 138 | .L100: |
147 | bbci.l a4, 0, .Lret | 139 | bbci.l a4, 0, .Lret |
148 | EX(l8ui, a9, a3, 0, fixup_l) | 140 | EX(11f) l8ui a9, a3, 0 |
149 | /* slot */ | 141 | /* slot */ |
150 | EX(s8i, a9, a11, 0, fixup_s) | 142 | EX(10f) s8i a9, a11, 0 |
151 | beqz a9, .Lret # if byte is zero | 143 | beqz a9, .Lret # if byte is zero |
152 | addi a11, a11, 1-3 # advance dst ptr 1, but also cancel | 144 | addi a11, a11, 1-3 # advance dst ptr 1, but also cancel |
153 | # the effect of adding 3 in .Lz3 code | 145 | # the effect of adding 3 in .Lz3 code |
@@ -161,14 +153,14 @@ __strncpy_user: | |||
161 | #ifdef __XTENSA_EB__ | 153 | #ifdef __XTENSA_EB__ |
162 | movi a9, 0 | 154 | movi a9, 0 |
163 | #endif /* __XTENSA_EB__ */ | 155 | #endif /* __XTENSA_EB__ */ |
164 | EX(s8i, a9, a11, 0, fixup_s) | 156 | EX(10f) s8i a9, a11, 0 |
165 | sub a2, a11, a2 # compute strlen | 157 | sub a2, a11, a2 # compute strlen |
166 | retw | 158 | retw |
167 | .Lz1: # byte 1 is zero | 159 | .Lz1: # byte 1 is zero |
168 | #ifdef __XTENSA_EB__ | 160 | #ifdef __XTENSA_EB__ |
169 | extui a9, a9, 16, 16 | 161 | extui a9, a9, 16, 16 |
170 | #endif /* __XTENSA_EB__ */ | 162 | #endif /* __XTENSA_EB__ */ |
171 | EX(s16i, a9, a11, 0, fixup_s) | 163 | EX(10f) s16i a9, a11, 0 |
172 | addi a11, a11, 1 # advance dst pointer | 164 | addi a11, a11, 1 # advance dst pointer |
173 | sub a2, a11, a2 # compute strlen | 165 | sub a2, a11, a2 # compute strlen |
174 | retw | 166 | retw |
@@ -176,9 +168,9 @@ __strncpy_user: | |||
176 | #ifdef __XTENSA_EB__ | 168 | #ifdef __XTENSA_EB__ |
177 | extui a9, a9, 16, 16 | 169 | extui a9, a9, 16, 16 |
178 | #endif /* __XTENSA_EB__ */ | 170 | #endif /* __XTENSA_EB__ */ |
179 | EX(s16i, a9, a11, 0, fixup_s) | 171 | EX(10f) s16i a9, a11, 0 |
180 | movi a9, 0 | 172 | movi a9, 0 |
181 | EX(s8i, a9, a11, 2, fixup_s) | 173 | EX(10f) s8i a9, a11, 2 |
182 | addi a11, a11, 2 # advance dst pointer | 174 | addi a11, a11, 2 # advance dst pointer |
183 | sub a2, a11, a2 # compute strlen | 175 | sub a2, a11, a2 # compute strlen |
184 | retw | 176 | retw |
@@ -196,9 +188,9 @@ __strncpy_user: | |||
196 | add a12, a11, a4 # a12 = ending address | 188 | add a12, a11, a4 # a12 = ending address |
197 | #endif /* XCHAL_HAVE_LOOPS */ | 189 | #endif /* XCHAL_HAVE_LOOPS */ |
198 | .Lnextbyte: | 190 | .Lnextbyte: |
199 | EX(l8ui, a9, a3, 0, fixup_l) | 191 | EX(11f) l8ui a9, a3, 0 |
200 | addi a3, a3, 1 | 192 | addi a3, a3, 1 |
201 | EX(s8i, a9, a11, 0, fixup_s) | 193 | EX(10f) s8i a9, a11, 0 |
202 | beqz a9, .Lunalignedend | 194 | beqz a9, .Lunalignedend |
203 | addi a11, a11, 1 | 195 | addi a11, a11, 1 |
204 | #if !XCHAL_HAVE_LOOPS | 196 | #if !XCHAL_HAVE_LOOPS |
@@ -209,6 +201,7 @@ __strncpy_user: | |||
209 | sub a2, a11, a2 # compute strlen | 201 | sub a2, a11, a2 # compute strlen |
210 | retw | 202 | retw |
211 | 203 | ||
204 | ENDPROC(__strncpy_user) | ||
212 | 205 | ||
213 | .section .fixup, "ax" | 206 | .section .fixup, "ax" |
214 | .align 4 | 207 | .align 4 |
@@ -218,8 +211,7 @@ __strncpy_user: | |||
218 | * implementation in memset(). Thus, we differentiate between | 211 | * implementation in memset(). Thus, we differentiate between |
219 | * load/store fixups. */ | 212 | * load/store fixups. */ |
220 | 213 | ||
221 | fixup_s: | 214 | 10: |
222 | fixup_l: | 215 | 11: |
223 | movi a2, -EFAULT | 216 | movi a2, -EFAULT |
224 | retw | 217 | retw |
225 | |||
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S index 4c03b1e581e9..0b956ce7f386 100644 --- a/arch/xtensa/lib/strnlen_user.S +++ b/arch/xtensa/lib/strnlen_user.S | |||
@@ -11,15 +11,9 @@ | |||
11 | * Copyright (C) 2002 Tensilica Inc. | 11 | * Copyright (C) 2002 Tensilica Inc. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/linkage.h> | ||
14 | #include <variant/core.h> | 15 | #include <variant/core.h> |
15 | 16 | #include <asm/asmmacro.h> | |
16 | /* Load or store instructions that may cause exceptions use the EX macro. */ | ||
17 | |||
18 | #define EX(insn,reg1,reg2,offset,handler) \ | ||
19 | 9: insn reg1, reg2, offset; \ | ||
20 | .section __ex_table, "a"; \ | ||
21 | .word 9b, handler; \ | ||
22 | .previous | ||
23 | 17 | ||
24 | /* | 18 | /* |
25 | * size_t __strnlen_user(const char *s, size_t len) | 19 | * size_t __strnlen_user(const char *s, size_t len) |
@@ -49,10 +43,8 @@ | |||
49 | # a10/ tmp | 43 | # a10/ tmp |
50 | 44 | ||
51 | .text | 45 | .text |
52 | .align 4 | 46 | ENTRY(__strnlen_user) |
53 | .global __strnlen_user | 47 | |
54 | .type __strnlen_user,@function | ||
55 | __strnlen_user: | ||
56 | entry sp, 16 # minimal stack frame | 48 | entry sp, 16 # minimal stack frame |
57 | # a2/ s, a3/ len | 49 | # a2/ s, a3/ len |
58 | addi a4, a2, -4 # because we overincrement at the end; | 50 | addi a4, a2, -4 # because we overincrement at the end; |
@@ -77,7 +69,7 @@ __strnlen_user: | |||
77 | add a10, a10, a4 # a10 = end of last 4B chunk | 69 | add a10, a10, a4 # a10 = end of last 4B chunk |
78 | #endif /* XCHAL_HAVE_LOOPS */ | 70 | #endif /* XCHAL_HAVE_LOOPS */ |
79 | .Loop: | 71 | .Loop: |
80 | EX(l32i, a9, a4, 4, lenfixup) # get next word of string | 72 | EX(10f) l32i a9, a4, 4 # get next word of string |
81 | addi a4, a4, 4 # advance string pointer | 73 | addi a4, a4, 4 # advance string pointer |
82 | bnone a9, a5, .Lz0 # if byte 0 is zero | 74 | bnone a9, a5, .Lz0 # if byte 0 is zero |
83 | bnone a9, a6, .Lz1 # if byte 1 is zero | 75 | bnone a9, a6, .Lz1 # if byte 1 is zero |
@@ -88,7 +80,7 @@ __strnlen_user: | |||
88 | #endif | 80 | #endif |
89 | 81 | ||
90 | .Ldone: | 82 | .Ldone: |
91 | EX(l32i, a9, a4, 4, lenfixup) # load 4 bytes for remaining checks | 83 | EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks |
92 | 84 | ||
93 | bbci.l a3, 1, .L100 | 85 | bbci.l a3, 1, .L100 |
94 | # check two more bytes (bytes 0, 1 of word) | 86 | # check two more bytes (bytes 0, 1 of word) |
@@ -125,14 +117,14 @@ __strnlen_user: | |||
125 | retw | 117 | retw |
126 | 118 | ||
127 | .L1mod2: # address is odd | 119 | .L1mod2: # address is odd |
128 | EX(l8ui, a9, a4, 4, lenfixup) # get byte 0 | 120 | EX(10f) l8ui a9, a4, 4 # get byte 0 |
129 | addi a4, a4, 1 # advance string pointer | 121 | addi a4, a4, 1 # advance string pointer |
130 | beqz a9, .Lz3 # if byte 0 is zero | 122 | beqz a9, .Lz3 # if byte 0 is zero |
131 | bbci.l a4, 1, .Laligned # if string pointer is now word-aligned | 123 | bbci.l a4, 1, .Laligned # if string pointer is now word-aligned |
132 | 124 | ||
133 | .L2mod4: # address is 2 mod 4 | 125 | .L2mod4: # address is 2 mod 4 |
134 | addi a4, a4, 2 # advance ptr for aligned access | 126 | addi a4, a4, 2 # advance ptr for aligned access |
135 | EX(l32i, a9, a4, 0, lenfixup) # get word with first two bytes of string | 127 | EX(10f) l32i a9, a4, 0 # get word with first two bytes of string |
136 | bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero | 128 | bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero |
137 | bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero | 129 | bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero |
138 | # byte 3 is zero | 130 | # byte 3 is zero |
@@ -140,8 +132,10 @@ __strnlen_user: | |||
140 | sub a2, a4, a2 # subtract to get length | 132 | sub a2, a4, a2 # subtract to get length |
141 | retw | 133 | retw |
142 | 134 | ||
135 | ENDPROC(__strnlen_user) | ||
136 | |||
143 | .section .fixup, "ax" | 137 | .section .fixup, "ax" |
144 | .align 4 | 138 | .align 4 |
145 | lenfixup: | 139 | 10: |
146 | movi a2, 0 | 140 | movi a2, 0 |
147 | retw | 141 | retw |
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S index d9cd766bde3e..64ab1971324f 100644 --- a/arch/xtensa/lib/usercopy.S +++ b/arch/xtensa/lib/usercopy.S | |||
@@ -53,30 +53,13 @@ | |||
53 | * a11/ original length | 53 | * a11/ original length |
54 | */ | 54 | */ |
55 | 55 | ||
56 | #include <linux/linkage.h> | ||
56 | #include <variant/core.h> | 57 | #include <variant/core.h> |
57 | 58 | #include <asm/asmmacro.h> | |
58 | #ifdef __XTENSA_EB__ | ||
59 | #define ALIGN(R, W0, W1) src R, W0, W1 | ||
60 | #define SSA8(R) ssa8b R | ||
61 | #else | ||
62 | #define ALIGN(R, W0, W1) src R, W1, W0 | ||
63 | #define SSA8(R) ssa8l R | ||
64 | #endif | ||
65 | |||
66 | /* Load or store instructions that may cause exceptions use the EX macro. */ | ||
67 | |||
68 | #define EX(insn,reg1,reg2,offset,handler) \ | ||
69 | 9: insn reg1, reg2, offset; \ | ||
70 | .section __ex_table, "a"; \ | ||
71 | .word 9b, handler; \ | ||
72 | .previous | ||
73 | |||
74 | 59 | ||
75 | .text | 60 | .text |
76 | .align 4 | 61 | ENTRY(__xtensa_copy_user) |
77 | .global __xtensa_copy_user | 62 | |
78 | .type __xtensa_copy_user,@function | ||
79 | __xtensa_copy_user: | ||
80 | entry sp, 16 # minimal stack frame | 63 | entry sp, 16 # minimal stack frame |
81 | # a2/ dst, a3/ src, a4/ len | 64 | # a2/ dst, a3/ src, a4/ len |
82 | mov a5, a2 # copy dst so that a2 is return value | 65 | mov a5, a2 # copy dst so that a2 is return value |
@@ -89,7 +72,7 @@ __xtensa_copy_user: | |||
89 | # per iteration | 72 | # per iteration |
90 | movi a8, 3 # if source is also aligned, | 73 | movi a8, 3 # if source is also aligned, |
91 | bnone a3, a8, .Laligned # then use word copy | 74 | bnone a3, a8, .Laligned # then use word copy |
92 | SSA8( a3) # set shift amount from byte offset | 75 | __ssa8 a3 # set shift amount from byte offset |
93 | bnez a4, .Lsrcunaligned | 76 | bnez a4, .Lsrcunaligned |
94 | movi a2, 0 # return success for len==0 | 77 | movi a2, 0 # return success for len==0 |
95 | retw | 78 | retw |
@@ -102,9 +85,9 @@ __xtensa_copy_user: | |||
102 | bltui a4, 7, .Lbytecopy # do short copies byte by byte | 85 | bltui a4, 7, .Lbytecopy # do short copies byte by byte |
103 | 86 | ||
104 | # copy 1 byte | 87 | # copy 1 byte |
105 | EX(l8ui, a6, a3, 0, fixup) | 88 | EX(10f) l8ui a6, a3, 0 |
106 | addi a3, a3, 1 | 89 | addi a3, a3, 1 |
107 | EX(s8i, a6, a5, 0, fixup) | 90 | EX(10f) s8i a6, a5, 0 |
108 | addi a5, a5, 1 | 91 | addi a5, a5, 1 |
109 | addi a4, a4, -1 | 92 | addi a4, a4, -1 |
110 | bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then | 93 | bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then |
@@ -112,11 +95,11 @@ __xtensa_copy_user: | |||
112 | .Ldst2mod4: # dst 16-bit aligned | 95 | .Ldst2mod4: # dst 16-bit aligned |
113 | # copy 2 bytes | 96 | # copy 2 bytes |
114 | bltui a4, 6, .Lbytecopy # do short copies byte by byte | 97 | bltui a4, 6, .Lbytecopy # do short copies byte by byte |
115 | EX(l8ui, a6, a3, 0, fixup) | 98 | EX(10f) l8ui a6, a3, 0 |
116 | EX(l8ui, a7, a3, 1, fixup) | 99 | EX(10f) l8ui a7, a3, 1 |
117 | addi a3, a3, 2 | 100 | addi a3, a3, 2 |
118 | EX(s8i, a6, a5, 0, fixup) | 101 | EX(10f) s8i a6, a5, 0 |
119 | EX(s8i, a7, a5, 1, fixup) | 102 | EX(10f) s8i a7, a5, 1 |
120 | addi a5, a5, 2 | 103 | addi a5, a5, 2 |
121 | addi a4, a4, -2 | 104 | addi a4, a4, -2 |
122 | j .Ldstaligned # dst is now aligned, return to main algorithm | 105 | j .Ldstaligned # dst is now aligned, return to main algorithm |
@@ -135,9 +118,9 @@ __xtensa_copy_user: | |||
135 | add a7, a3, a4 # a7 = end address for source | 118 | add a7, a3, a4 # a7 = end address for source |
136 | #endif /* !XCHAL_HAVE_LOOPS */ | 119 | #endif /* !XCHAL_HAVE_LOOPS */ |
137 | .Lnextbyte: | 120 | .Lnextbyte: |
138 | EX(l8ui, a6, a3, 0, fixup) | 121 | EX(10f) l8ui a6, a3, 0 |
139 | addi a3, a3, 1 | 122 | addi a3, a3, 1 |
140 | EX(s8i, a6, a5, 0, fixup) | 123 | EX(10f) s8i a6, a5, 0 |
141 | addi a5, a5, 1 | 124 | addi a5, a5, 1 |
142 | #if !XCHAL_HAVE_LOOPS | 125 | #if !XCHAL_HAVE_LOOPS |
143 | blt a3, a7, .Lnextbyte | 126 | blt a3, a7, .Lnextbyte |
@@ -161,15 +144,15 @@ __xtensa_copy_user: | |||
161 | add a8, a8, a3 # a8 = end of last 16B source chunk | 144 | add a8, a8, a3 # a8 = end of last 16B source chunk |
162 | #endif /* !XCHAL_HAVE_LOOPS */ | 145 | #endif /* !XCHAL_HAVE_LOOPS */ |
163 | .Loop1: | 146 | .Loop1: |
164 | EX(l32i, a6, a3, 0, fixup) | 147 | EX(10f) l32i a6, a3, 0 |
165 | EX(l32i, a7, a3, 4, fixup) | 148 | EX(10f) l32i a7, a3, 4 |
166 | EX(s32i, a6, a5, 0, fixup) | 149 | EX(10f) s32i a6, a5, 0 |
167 | EX(l32i, a6, a3, 8, fixup) | 150 | EX(10f) l32i a6, a3, 8 |
168 | EX(s32i, a7, a5, 4, fixup) | 151 | EX(10f) s32i a7, a5, 4 |
169 | EX(l32i, a7, a3, 12, fixup) | 152 | EX(10f) l32i a7, a3, 12 |
170 | EX(s32i, a6, a5, 8, fixup) | 153 | EX(10f) s32i a6, a5, 8 |
171 | addi a3, a3, 16 | 154 | addi a3, a3, 16 |
172 | EX(s32i, a7, a5, 12, fixup) | 155 | EX(10f) s32i a7, a5, 12 |
173 | addi a5, a5, 16 | 156 | addi a5, a5, 16 |
174 | #if !XCHAL_HAVE_LOOPS | 157 | #if !XCHAL_HAVE_LOOPS |
175 | blt a3, a8, .Loop1 | 158 | blt a3, a8, .Loop1 |
@@ -177,31 +160,31 @@ __xtensa_copy_user: | |||
177 | .Loop1done: | 160 | .Loop1done: |
178 | bbci.l a4, 3, .L2 | 161 | bbci.l a4, 3, .L2 |
179 | # copy 8 bytes | 162 | # copy 8 bytes |
180 | EX(l32i, a6, a3, 0, fixup) | 163 | EX(10f) l32i a6, a3, 0 |
181 | EX(l32i, a7, a3, 4, fixup) | 164 | EX(10f) l32i a7, a3, 4 |
182 | addi a3, a3, 8 | 165 | addi a3, a3, 8 |
183 | EX(s32i, a6, a5, 0, fixup) | 166 | EX(10f) s32i a6, a5, 0 |
184 | EX(s32i, a7, a5, 4, fixup) | 167 | EX(10f) s32i a7, a5, 4 |
185 | addi a5, a5, 8 | 168 | addi a5, a5, 8 |
186 | .L2: | 169 | .L2: |
187 | bbci.l a4, 2, .L3 | 170 | bbci.l a4, 2, .L3 |
188 | # copy 4 bytes | 171 | # copy 4 bytes |
189 | EX(l32i, a6, a3, 0, fixup) | 172 | EX(10f) l32i a6, a3, 0 |
190 | addi a3, a3, 4 | 173 | addi a3, a3, 4 |
191 | EX(s32i, a6, a5, 0, fixup) | 174 | EX(10f) s32i a6, a5, 0 |
192 | addi a5, a5, 4 | 175 | addi a5, a5, 4 |
193 | .L3: | 176 | .L3: |
194 | bbci.l a4, 1, .L4 | 177 | bbci.l a4, 1, .L4 |
195 | # copy 2 bytes | 178 | # copy 2 bytes |
196 | EX(l16ui, a6, a3, 0, fixup) | 179 | EX(10f) l16ui a6, a3, 0 |
197 | addi a3, a3, 2 | 180 | addi a3, a3, 2 |
198 | EX(s16i, a6, a5, 0, fixup) | 181 | EX(10f) s16i a6, a5, 0 |
199 | addi a5, a5, 2 | 182 | addi a5, a5, 2 |
200 | .L4: | 183 | .L4: |
201 | bbci.l a4, 0, .L5 | 184 | bbci.l a4, 0, .L5 |
202 | # copy 1 byte | 185 | # copy 1 byte |
203 | EX(l8ui, a6, a3, 0, fixup) | 186 | EX(10f) l8ui a6, a3, 0 |
204 | EX(s8i, a6, a5, 0, fixup) | 187 | EX(10f) s8i a6, a5, 0 |
205 | .L5: | 188 | .L5: |
206 | movi a2, 0 # return success for len bytes copied | 189 | movi a2, 0 # return success for len bytes copied |
207 | retw | 190 | retw |
@@ -217,7 +200,7 @@ __xtensa_copy_user: | |||
217 | # copy 16 bytes per iteration for word-aligned dst and unaligned src | 200 | # copy 16 bytes per iteration for word-aligned dst and unaligned src |
218 | and a10, a3, a8 # save unalignment offset for below | 201 | and a10, a3, a8 # save unalignment offset for below |
219 | sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) | 202 | sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) |
220 | EX(l32i, a6, a3, 0, fixup) # load first word | 203 | EX(10f) l32i a6, a3, 0 # load first word |
221 | #if XCHAL_HAVE_LOOPS | 204 | #if XCHAL_HAVE_LOOPS |
222 | loopnez a7, .Loop2done | 205 | loopnez a7, .Loop2done |
223 | #else /* !XCHAL_HAVE_LOOPS */ | 206 | #else /* !XCHAL_HAVE_LOOPS */ |
@@ -226,19 +209,19 @@ __xtensa_copy_user: | |||
226 | add a12, a12, a3 # a12 = end of last 16B source chunk | 209 | add a12, a12, a3 # a12 = end of last 16B source chunk |
227 | #endif /* !XCHAL_HAVE_LOOPS */ | 210 | #endif /* !XCHAL_HAVE_LOOPS */ |
228 | .Loop2: | 211 | .Loop2: |
229 | EX(l32i, a7, a3, 4, fixup) | 212 | EX(10f) l32i a7, a3, 4 |
230 | EX(l32i, a8, a3, 8, fixup) | 213 | EX(10f) l32i a8, a3, 8 |
231 | ALIGN( a6, a6, a7) | 214 | __src_b a6, a6, a7 |
232 | EX(s32i, a6, a5, 0, fixup) | 215 | EX(10f) s32i a6, a5, 0 |
233 | EX(l32i, a9, a3, 12, fixup) | 216 | EX(10f) l32i a9, a3, 12 |
234 | ALIGN( a7, a7, a8) | 217 | __src_b a7, a7, a8 |
235 | EX(s32i, a7, a5, 4, fixup) | 218 | EX(10f) s32i a7, a5, 4 |
236 | EX(l32i, a6, a3, 16, fixup) | 219 | EX(10f) l32i a6, a3, 16 |
237 | ALIGN( a8, a8, a9) | 220 | __src_b a8, a8, a9 |
238 | EX(s32i, a8, a5, 8, fixup) | 221 | EX(10f) s32i a8, a5, 8 |
239 | addi a3, a3, 16 | 222 | addi a3, a3, 16 |
240 | ALIGN( a9, a9, a6) | 223 | __src_b a9, a9, a6 |
241 | EX(s32i, a9, a5, 12, fixup) | 224 | EX(10f) s32i a9, a5, 12 |
242 | addi a5, a5, 16 | 225 | addi a5, a5, 16 |
243 | #if !XCHAL_HAVE_LOOPS | 226 | #if !XCHAL_HAVE_LOOPS |
244 | blt a3, a12, .Loop2 | 227 | blt a3, a12, .Loop2 |
@@ -246,43 +229,44 @@ __xtensa_copy_user: | |||
246 | .Loop2done: | 229 | .Loop2done: |
247 | bbci.l a4, 3, .L12 | 230 | bbci.l a4, 3, .L12 |
248 | # copy 8 bytes | 231 | # copy 8 bytes |
249 | EX(l32i, a7, a3, 4, fixup) | 232 | EX(10f) l32i a7, a3, 4 |
250 | EX(l32i, a8, a3, 8, fixup) | 233 | EX(10f) l32i a8, a3, 8 |
251 | ALIGN( a6, a6, a7) | 234 | __src_b a6, a6, a7 |
252 | EX(s32i, a6, a5, 0, fixup) | 235 | EX(10f) s32i a6, a5, 0 |
253 | addi a3, a3, 8 | 236 | addi a3, a3, 8 |
254 | ALIGN( a7, a7, a8) | 237 | __src_b a7, a7, a8 |
255 | EX(s32i, a7, a5, 4, fixup) | 238 | EX(10f) s32i a7, a5, 4 |
256 | addi a5, a5, 8 | 239 | addi a5, a5, 8 |
257 | mov a6, a8 | 240 | mov a6, a8 |
258 | .L12: | 241 | .L12: |
259 | bbci.l a4, 2, .L13 | 242 | bbci.l a4, 2, .L13 |
260 | # copy 4 bytes | 243 | # copy 4 bytes |
261 | EX(l32i, a7, a3, 4, fixup) | 244 | EX(10f) l32i a7, a3, 4 |
262 | addi a3, a3, 4 | 245 | addi a3, a3, 4 |
263 | ALIGN( a6, a6, a7) | 246 | __src_b a6, a6, a7 |
264 | EX(s32i, a6, a5, 0, fixup) | 247 | EX(10f) s32i a6, a5, 0 |
265 | addi a5, a5, 4 | 248 | addi a5, a5, 4 |
266 | mov a6, a7 | 249 | mov a6, a7 |
267 | .L13: | 250 | .L13: |
268 | add a3, a3, a10 # readjust a3 with correct misalignment | 251 | add a3, a3, a10 # readjust a3 with correct misalignment |
269 | bbci.l a4, 1, .L14 | 252 | bbci.l a4, 1, .L14 |
270 | # copy 2 bytes | 253 | # copy 2 bytes |
271 | EX(l8ui, a6, a3, 0, fixup) | 254 | EX(10f) l8ui a6, a3, 0 |
272 | EX(l8ui, a7, a3, 1, fixup) | 255 | EX(10f) l8ui a7, a3, 1 |
273 | addi a3, a3, 2 | 256 | addi a3, a3, 2 |
274 | EX(s8i, a6, a5, 0, fixup) | 257 | EX(10f) s8i a6, a5, 0 |
275 | EX(s8i, a7, a5, 1, fixup) | 258 | EX(10f) s8i a7, a5, 1 |
276 | addi a5, a5, 2 | 259 | addi a5, a5, 2 |
277 | .L14: | 260 | .L14: |
278 | bbci.l a4, 0, .L15 | 261 | bbci.l a4, 0, .L15 |
279 | # copy 1 byte | 262 | # copy 1 byte |
280 | EX(l8ui, a6, a3, 0, fixup) | 263 | EX(10f) l8ui a6, a3, 0 |
281 | EX(s8i, a6, a5, 0, fixup) | 264 | EX(10f) s8i a6, a5, 0 |
282 | .L15: | 265 | .L15: |
283 | movi a2, 0 # return success for len bytes copied | 266 | movi a2, 0 # return success for len bytes copied |
284 | retw | 267 | retw |
285 | 268 | ||
269 | ENDPROC(__xtensa_copy_user) | ||
286 | 270 | ||
287 | .section .fixup, "ax" | 271 | .section .fixup, "ax" |
288 | .align 4 | 272 | .align 4 |
@@ -294,7 +278,7 @@ __xtensa_copy_user: | |||
294 | */ | 278 | */ |
295 | 279 | ||
296 | 280 | ||
297 | fixup: | 281 | 10: |
298 | sub a2, a5, a2 /* a2 <-- bytes copied */ | 282 | sub a2, a5, a2 /* a2 <-- bytes copied */ |
299 | sub a2, a11, a2 /* a2 <-- bytes not copied */ | 283 | sub a2, a11, a2 /* a2 <-- bytes not copied */ |
300 | retw | 284 | retw |
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile index 0b3d296a016a..734888a00dc8 100644 --- a/arch/xtensa/mm/Makefile +++ b/arch/xtensa/mm/Makefile | |||
@@ -5,3 +5,8 @@ | |||
5 | obj-y := init.o misc.o | 5 | obj-y := init.o misc.o |
6 | obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o | 6 | obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o |
7 | obj-$(CONFIG_HIGHMEM) += highmem.o | 7 | obj-$(CONFIG_HIGHMEM) += highmem.o |
8 | obj-$(CONFIG_KASAN) += kasan_init.o | ||
9 | |||
10 | KASAN_SANITIZE_fault.o := n | ||
11 | KASAN_SANITIZE_kasan_init.o := n | ||
12 | KASAN_SANITIZE_mmu.o := n | ||
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 3c75c4e597da..57dc231a0709 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c | |||
@@ -33,9 +33,6 @@ | |||
33 | #include <asm/pgalloc.h> | 33 | #include <asm/pgalloc.h> |
34 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
35 | 35 | ||
36 | //#define printd(x...) printk(x) | ||
37 | #define printd(x...) do { } while(0) | ||
38 | |||
39 | /* | 36 | /* |
40 | * Note: | 37 | * Note: |
41 | * The kernel provides one architecture bit PG_arch_1 in the page flags that | 38 | * The kernel provides one architecture bit PG_arch_1 in the page flags that |
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index a14df5aa98c8..8b9b6f44bb06 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c | |||
@@ -25,8 +25,6 @@ | |||
25 | DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; | 25 | DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; |
26 | void bad_page_fault(struct pt_regs*, unsigned long, int); | 26 | void bad_page_fault(struct pt_regs*, unsigned long, int); |
27 | 27 | ||
28 | #undef DEBUG_PAGE_FAULT | ||
29 | |||
30 | /* | 28 | /* |
31 | * This routine handles page faults. It determines the address, | 29 | * This routine handles page faults. It determines the address, |
32 | * and the problem, and then passes it off to one of the appropriate | 30 | * and the problem, and then passes it off to one of the appropriate |
@@ -68,10 +66,10 @@ void do_page_fault(struct pt_regs *regs) | |||
68 | exccause == EXCCAUSE_ITLB_MISS || | 66 | exccause == EXCCAUSE_ITLB_MISS || |
69 | exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; | 67 | exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; |
70 | 68 | ||
71 | #ifdef DEBUG_PAGE_FAULT | 69 | pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n", |
72 | printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, | 70 | current->comm, current->pid, |
73 | address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); | 71 | address, exccause, regs->pc, |
74 | #endif | 72 | is_write ? "w" : "", is_exec ? "x" : ""); |
75 | 73 | ||
76 | if (user_mode(regs)) | 74 | if (user_mode(regs)) |
77 | flags |= FAULT_FLAG_USER; | 75 | flags |= FAULT_FLAG_USER; |
@@ -247,10 +245,8 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) | |||
247 | 245 | ||
248 | /* Are we prepared to handle this kernel fault? */ | 246 | /* Are we prepared to handle this kernel fault? */ |
249 | if ((entry = search_exception_tables(regs->pc)) != NULL) { | 247 | if ((entry = search_exception_tables(regs->pc)) != NULL) { |
250 | #ifdef DEBUG_PAGE_FAULT | 248 | pr_debug("%s: Exception at pc=%#010lx (%lx)\n", |
251 | printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n", | 249 | current->comm, regs->pc, entry->fixup); |
252 | current->comm, regs->pc, entry->fixup); | ||
253 | #endif | ||
254 | current->thread.bad_uaddr = address; | 250 | current->thread.bad_uaddr = address; |
255 | regs->pc = entry->fixup; | 251 | regs->pc = entry->fixup; |
256 | return; | 252 | return; |
@@ -259,9 +255,9 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) | |||
259 | /* Oops. The kernel tried to access some bad page. We'll have to | 255 | /* Oops. The kernel tried to access some bad page. We'll have to |
260 | * terminate things with extreme prejudice. | 256 | * terminate things with extreme prejudice. |
261 | */ | 257 | */ |
262 | printk(KERN_ALERT "Unable to handle kernel paging request at virtual " | 258 | pr_alert("Unable to handle kernel paging request at virtual " |
263 | "address %08lx\n pc = %08lx, ra = %08lx\n", | 259 | "address %08lx\n pc = %08lx, ra = %08lx\n", |
264 | address, regs->pc, regs->areg[0]); | 260 | address, regs->pc, regs->areg[0]); |
265 | die("Oops", regs, sig); | 261 | die("Oops", regs, sig); |
266 | do_exit(sig); | 262 | do_exit(sig); |
267 | } | 263 | } |
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 720fe4e8b497..d776ec0d7b22 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c | |||
@@ -100,29 +100,51 @@ void __init mem_init(void) | |||
100 | 100 | ||
101 | mem_init_print_info(NULL); | 101 | mem_init_print_info(NULL); |
102 | pr_info("virtual kernel memory layout:\n" | 102 | pr_info("virtual kernel memory layout:\n" |
103 | #ifdef CONFIG_KASAN | ||
104 | " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n" | ||
105 | #endif | ||
106 | #ifdef CONFIG_MMU | ||
107 | " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n" | ||
108 | #endif | ||
103 | #ifdef CONFIG_HIGHMEM | 109 | #ifdef CONFIG_HIGHMEM |
104 | " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" | 110 | " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" |
105 | " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" | 111 | " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" |
106 | #endif | 112 | #endif |
107 | #ifdef CONFIG_MMU | 113 | " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n" |
108 | " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n" | 114 | " .text : 0x%08lx - 0x%08lx (%5lu kB)\n" |
115 | " .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n" | ||
116 | " .data : 0x%08lx - 0x%08lx (%5lu kB)\n" | ||
117 | " .init : 0x%08lx - 0x%08lx (%5lu kB)\n" | ||
118 | " .bss : 0x%08lx - 0x%08lx (%5lu kB)\n", | ||
119 | #ifdef CONFIG_KASAN | ||
120 | KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE, | ||
121 | KASAN_SHADOW_SIZE >> 20, | ||
109 | #endif | 122 | #endif |
110 | " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n", | 123 | #ifdef CONFIG_MMU |
124 | VMALLOC_START, VMALLOC_END, | ||
125 | (VMALLOC_END - VMALLOC_START) >> 20, | ||
111 | #ifdef CONFIG_HIGHMEM | 126 | #ifdef CONFIG_HIGHMEM |
112 | PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, | 127 | PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, |
113 | (LAST_PKMAP*PAGE_SIZE) >> 10, | 128 | (LAST_PKMAP*PAGE_SIZE) >> 10, |
114 | FIXADDR_START, FIXADDR_TOP, | 129 | FIXADDR_START, FIXADDR_TOP, |
115 | (FIXADDR_TOP - FIXADDR_START) >> 10, | 130 | (FIXADDR_TOP - FIXADDR_START) >> 10, |
116 | #endif | 131 | #endif |
117 | #ifdef CONFIG_MMU | ||
118 | VMALLOC_START, VMALLOC_END, | ||
119 | (VMALLOC_END - VMALLOC_START) >> 20, | ||
120 | PAGE_OFFSET, PAGE_OFFSET + | 132 | PAGE_OFFSET, PAGE_OFFSET + |
121 | (max_low_pfn - min_low_pfn) * PAGE_SIZE, | 133 | (max_low_pfn - min_low_pfn) * PAGE_SIZE, |
122 | #else | 134 | #else |
123 | min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE, | 135 | min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE, |
124 | #endif | 136 | #endif |
125 | ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20); | 137 | ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20, |
138 | (unsigned long)_text, (unsigned long)_etext, | ||
139 | (unsigned long)(_etext - _text) >> 10, | ||
140 | (unsigned long)__start_rodata, (unsigned long)_sdata, | ||
141 | (unsigned long)(_sdata - __start_rodata) >> 10, | ||
142 | (unsigned long)_sdata, (unsigned long)_edata, | ||
143 | (unsigned long)(_edata - _sdata) >> 10, | ||
144 | (unsigned long)__init_begin, (unsigned long)__init_end, | ||
145 | (unsigned long)(__init_end - __init_begin) >> 10, | ||
146 | (unsigned long)__bss_start, (unsigned long)__bss_stop, | ||
147 | (unsigned long)(__bss_stop - __bss_start) >> 10); | ||
126 | } | 148 | } |
127 | 149 | ||
128 | #ifdef CONFIG_BLK_DEV_INITRD | 150 | #ifdef CONFIG_BLK_DEV_INITRD |
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c new file mode 100644 index 000000000000..6b532b6bd785 --- /dev/null +++ b/arch/xtensa/mm/kasan_init.c | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * Xtensa KASAN shadow map initialization | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2017 Cadence Design Systems Inc. | ||
9 | */ | ||
10 | |||
11 | #include <linux/bootmem.h> | ||
12 | #include <linux/init_task.h> | ||
13 | #include <linux/kasan.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/memblock.h> | ||
16 | #include <asm/initialize_mmu.h> | ||
17 | #include <asm/tlbflush.h> | ||
18 | #include <asm/traps.h> | ||
19 | |||
20 | void __init kasan_early_init(void) | ||
21 | { | ||
22 | unsigned long vaddr = KASAN_SHADOW_START; | ||
23 | pgd_t *pgd = pgd_offset_k(vaddr); | ||
24 | pmd_t *pmd = pmd_offset(pgd, vaddr); | ||
25 | int i; | ||
26 | |||
27 | for (i = 0; i < PTRS_PER_PTE; ++i) | ||
28 | set_pte(kasan_zero_pte + i, | ||
29 | mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL)); | ||
30 | |||
31 | for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { | ||
32 | BUG_ON(!pmd_none(*pmd)); | ||
33 | set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte)); | ||
34 | } | ||
35 | early_trap_init(); | ||
36 | } | ||
37 | |||
38 | static void __init populate(void *start, void *end) | ||
39 | { | ||
40 | unsigned long n_pages = (end - start) / PAGE_SIZE; | ||
41 | unsigned long n_pmds = n_pages / PTRS_PER_PTE; | ||
42 | unsigned long i, j; | ||
43 | unsigned long vaddr = (unsigned long)start; | ||
44 | pgd_t *pgd = pgd_offset_k(vaddr); | ||
45 | pmd_t *pmd = pmd_offset(pgd, vaddr); | ||
46 | pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); | ||
47 | |||
48 | pr_debug("%s: %p - %p\n", __func__, start, end); | ||
49 | |||
50 | for (i = j = 0; i < n_pmds; ++i) { | ||
51 | int k; | ||
52 | |||
53 | for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { | ||
54 | phys_addr_t phys = | ||
55 | memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, | ||
56 | MEMBLOCK_ALLOC_ANYWHERE); | ||
57 | |||
58 | set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); | ||
59 | } | ||
60 | } | ||
61 | |||
62 | for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE) | ||
63 | set_pmd(pmd + i, __pmd((unsigned long)pte)); | ||
64 | |||
65 | local_flush_tlb_all(); | ||
66 | memset(start, 0, end - start); | ||
67 | } | ||
68 | |||
69 | void __init kasan_init(void) | ||
70 | { | ||
71 | int i; | ||
72 | |||
73 | BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START - | ||
74 | (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)); | ||
75 | BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR); | ||
76 | |||
77 | /* | ||
78 | * Replace shadow map pages that cover addresses from VMALLOC area | ||
79 | * start to the end of KSEG with clean writable pages. | ||
80 | */ | ||
81 | populate(kasan_mem_to_shadow((void *)VMALLOC_START), | ||
82 | kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR)); | ||
83 | |||
84 | /* Write protect kasan_zero_page and zero-initialize it again. */ | ||
85 | for (i = 0; i < PTRS_PER_PTE; ++i) | ||
86 | set_pte(kasan_zero_pte + i, | ||
87 | mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO)); | ||
88 | |||
89 | local_flush_tlb_all(); | ||
90 | memset(kasan_zero_page, 0, PAGE_SIZE); | ||
91 | |||
92 | /* At this point kasan is fully initialized. Enable error messages. */ | ||
93 | current->kasan_depth = 0; | ||
94 | pr_info("KernelAddressSanitizer initialized\n"); | ||
95 | } | ||
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 358d748d9083..9d1ecfc53670 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c | |||
@@ -56,7 +56,6 @@ static void __init fixedrange_init(void) | |||
56 | 56 | ||
57 | void __init paging_init(void) | 57 | void __init paging_init(void) |
58 | { | 58 | { |
59 | memset(swapper_pg_dir, 0, PAGE_SIZE); | ||
60 | #ifdef CONFIG_HIGHMEM | 59 | #ifdef CONFIG_HIGHMEM |
61 | fixedrange_init(); | 60 | fixedrange_init(); |
62 | pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); | 61 | pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); |
@@ -82,6 +81,23 @@ void init_mmu(void) | |||
82 | set_itlbcfg_register(0); | 81 | set_itlbcfg_register(0); |
83 | set_dtlbcfg_register(0); | 82 | set_dtlbcfg_register(0); |
84 | #endif | 83 | #endif |
84 | init_kio(); | ||
85 | local_flush_tlb_all(); | ||
86 | |||
87 | /* Set rasid register to a known value. */ | ||
88 | |||
89 | set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); | ||
90 | |||
91 | /* Set PTEVADDR special register to the start of the page | ||
92 | * table, which is in kernel mappable space (ie. not | ||
93 | * statically mapped). This register's value is undefined on | ||
94 | * reset. | ||
95 | */ | ||
96 | set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR); | ||
97 | } | ||
98 | |||
99 | void init_kio(void) | ||
100 | { | ||
85 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) | 101 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) |
86 | /* | 102 | /* |
87 | * Update the IO area mapping in case xtensa_kio_paddr has changed | 103 | * Update the IO area mapping in case xtensa_kio_paddr has changed |
@@ -95,17 +111,4 @@ void init_mmu(void) | |||
95 | write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), | 111 | write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), |
96 | XCHAL_KIO_BYPASS_VADDR + 6); | 112 | XCHAL_KIO_BYPASS_VADDR + 6); |
97 | #endif | 113 | #endif |
98 | |||
99 | local_flush_tlb_all(); | ||
100 | |||
101 | /* Set rasid register to a known value. */ | ||
102 | |||
103 | set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); | ||
104 | |||
105 | /* Set PTEVADDR special register to the start of the page | ||
106 | * table, which is in kernel mappable space (ie. not | ||
107 | * statically mapped). This register's value is undefined on | ||
108 | * reset. | ||
109 | */ | ||
110 | set_ptevaddr_register(PGTABLE_START); | ||
111 | } | 114 | } |
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 35c822286bbe..59153d0aa890 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c | |||
@@ -95,10 +95,8 @@ void local_flush_tlb_range(struct vm_area_struct *vma, | |||
95 | if (mm->context.asid[cpu] == NO_CONTEXT) | 95 | if (mm->context.asid[cpu] == NO_CONTEXT) |
96 | return; | 96 | return; |
97 | 97 | ||
98 | #if 0 | 98 | pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n", |
99 | printk("[tlbrange<%02lx,%08lx,%08lx>]\n", | 99 | (unsigned long)mm->context.asid[cpu], start, end); |
100 | (unsigned long)mm->context.asid[cpu], start, end); | ||
101 | #endif | ||
102 | local_irq_save(flags); | 100 | local_irq_save(flags); |
103 | 101 | ||
104 | if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { | 102 | if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { |
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c index 464c2684c4f1..92f567f9a21e 100644 --- a/arch/xtensa/platforms/iss/console.c +++ b/arch/xtensa/platforms/iss/console.c | |||
@@ -185,7 +185,7 @@ int __init rs_init(void) | |||
185 | 185 | ||
186 | serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES); | 186 | serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES); |
187 | 187 | ||
188 | printk ("%s %s\n", serial_name, serial_version); | 188 | pr_info("%s %s\n", serial_name, serial_version); |
189 | 189 | ||
190 | /* Initialize the tty_driver structure */ | 190 | /* Initialize the tty_driver structure */ |
191 | 191 | ||
@@ -214,7 +214,7 @@ static __exit void rs_exit(void) | |||
214 | int error; | 214 | int error; |
215 | 215 | ||
216 | if ((error = tty_unregister_driver(serial_driver))) | 216 | if ((error = tty_unregister_driver(serial_driver))) |
217 | printk("ISS_SERIAL: failed to unregister serial driver (%d)\n", | 217 | pr_err("ISS_SERIAL: failed to unregister serial driver (%d)\n", |
218 | error); | 218 | error); |
219 | put_tty_driver(serial_driver); | 219 | put_tty_driver(serial_driver); |
220 | tty_port_destroy(&serial_port); | 220 | tty_port_destroy(&serial_port); |
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 6363b18e5b8c..d027dddc41ca 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c | |||
@@ -16,6 +16,8 @@ | |||
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
20 | |||
19 | #include <linux/list.h> | 21 | #include <linux/list.h> |
20 | #include <linux/irq.h> | 22 | #include <linux/irq.h> |
21 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
@@ -606,8 +608,6 @@ struct iss_net_init { | |||
606 | * those fields. They will be later initialized in iss_net_init. | 608 | * those fields. They will be later initialized in iss_net_init. |
607 | */ | 609 | */ |
608 | 610 | ||
609 | #define ERR KERN_ERR "iss_net_setup: " | ||
610 | |||
611 | static int __init iss_net_setup(char *str) | 611 | static int __init iss_net_setup(char *str) |
612 | { | 612 | { |
613 | struct iss_net_private *device = NULL; | 613 | struct iss_net_private *device = NULL; |
@@ -619,14 +619,14 @@ static int __init iss_net_setup(char *str) | |||
619 | 619 | ||
620 | end = strchr(str, '='); | 620 | end = strchr(str, '='); |
621 | if (!end) { | 621 | if (!end) { |
622 | printk(ERR "Expected '=' after device number\n"); | 622 | pr_err("Expected '=' after device number\n"); |
623 | return 1; | 623 | return 1; |
624 | } | 624 | } |
625 | *end = 0; | 625 | *end = 0; |
626 | rc = kstrtouint(str, 0, &n); | 626 | rc = kstrtouint(str, 0, &n); |
627 | *end = '='; | 627 | *end = '='; |
628 | if (rc < 0) { | 628 | if (rc < 0) { |
629 | printk(ERR "Failed to parse '%s'\n", str); | 629 | pr_err("Failed to parse '%s'\n", str); |
630 | return 1; | 630 | return 1; |
631 | } | 631 | } |
632 | str = end; | 632 | str = end; |
@@ -642,13 +642,13 @@ static int __init iss_net_setup(char *str) | |||
642 | spin_unlock(&devices_lock); | 642 | spin_unlock(&devices_lock); |
643 | 643 | ||
644 | if (device && device->index == n) { | 644 | if (device && device->index == n) { |
645 | printk(ERR "Device %u already configured\n", n); | 645 | pr_err("Device %u already configured\n", n); |
646 | return 1; | 646 | return 1; |
647 | } | 647 | } |
648 | 648 | ||
649 | new = alloc_bootmem(sizeof(*new)); | 649 | new = alloc_bootmem(sizeof(*new)); |
650 | if (new == NULL) { | 650 | if (new == NULL) { |
651 | printk(ERR "Alloc_bootmem failed\n"); | 651 | pr_err("Alloc_bootmem failed\n"); |
652 | return 1; | 652 | return 1; |
653 | } | 653 | } |
654 | 654 | ||
@@ -660,8 +660,6 @@ static int __init iss_net_setup(char *str) | |||
660 | return 1; | 660 | return 1; |
661 | } | 661 | } |
662 | 662 | ||
663 | #undef ERR | ||
664 | |||
665 | __setup("eth", iss_net_setup); | 663 | __setup("eth", iss_net_setup); |
666 | 664 | ||
667 | /* | 665 | /* |