aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/Kconfig.debug14
-rw-r--r--arch/sparc/include/asm/spinlock_32.h62
-rw-r--r--arch/sparc/include/asm/spinlock_64.h54
-rw-r--r--arch/sparc/include/asm/spinlock_types.h8
-rw-r--r--arch/sparc/include/asm/string_32.h78
-rw-r--r--arch/sparc/include/asm/string_64.h25
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/include/asm/uaccess_32.h15
-rw-r--r--arch/sparc/include/asm/uaccess_64.h23
-rw-r--r--arch/sparc/include/asm/unistd.h2
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/kernel/ftrace.c11
-rw-r--r--arch/sparc/kernel/irq_64.c8
-rw-r--r--arch/sparc/kernel/kprobes.c3
-rw-r--r--arch/sparc/kernel/ldc.c4
-rw-r--r--arch/sparc/kernel/mdesc.c21
-rw-r--r--arch/sparc/kernel/nmi.c8
-rw-r--r--arch/sparc/kernel/of_device_64.c14
-rw-r--r--arch/sparc/kernel/ptrace_64.c10
-rw-r--r--arch/sparc/kernel/syscalls.S14
-rw-r--r--arch/sparc/kernel/time_64.c26
-rw-r--r--arch/sparc/kernel/unaligned_32.c15
-rw-r--r--arch/sparc/kernel/unaligned_64.c23
-rw-r--r--arch/sparc/kernel/visemul.c3
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/bzero.S5
-rw-r--r--arch/sparc/lib/checksum_32.S2
-rw-r--r--arch/sparc/lib/ksyms.c2
-rw-r--r--arch/sparc/lib/mcount.S5
-rw-r--r--arch/sparc/lib/memcpy.S3
-rw-r--r--arch/sparc/lib/memset.S3
-rw-r--r--arch/sparc/lib/usercopy.c8
-rw-r--r--arch/sparc/math-emu/math_32.c3
-rw-r--r--arch/sparc/math-emu/math_64.c2
-rw-r--r--arch/sparc/mm/fault_64.c24
36 files changed, 224 insertions, 280 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 33ac1a9ac881..108197ac0d56 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC64
43 select HAVE_SYSCALL_WRAPPERS 43 select HAVE_SYSCALL_WRAPPERS
44 select HAVE_DYNAMIC_FTRACE 44 select HAVE_DYNAMIC_FTRACE
45 select HAVE_FTRACE_MCOUNT_RECORD 45 select HAVE_FTRACE_MCOUNT_RECORD
46 select HAVE_SYSCALL_TRACEPOINTS
46 select USE_GENERIC_SMP_HELPERS if SMP 47 select USE_GENERIC_SMP_HELPERS if SMP
47 select RTC_DRV_CMOS 48 select RTC_DRV_CMOS
48 select RTC_DRV_BQ4802 49 select RTC_DRV_BQ4802
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index 90d5fe223a74..9d3c889718ac 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -33,4 +33,18 @@ config FRAME_POINTER
33 depends on MCOUNT 33 depends on MCOUNT
34 default y 34 default y
35 35
36config DEBUG_STRICT_USER_COPY_CHECKS
37 bool "Strict copy size checks"
38 depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
39 ---help---
40 Enabling this option turns a certain set of sanity checks for user
41 copy operations into compile time failures.
42
43 The copy_from_user() etc checks are there to help test if there
44 are sufficient security checks on the length argument of
45 the copy operation, by having gcc prove that the argument is
46 within bounds.
47
48 If unsure, or if you run an older (pre 4.4) gcc, say N.
49
36endmenu 50endmenu
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 857630cff636..7f9b9dba38a6 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -10,12 +10,12 @@
10 10
11#include <asm/psr.h> 11#include <asm/psr.h>
12 12
13#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) 13#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
14 14
15#define __raw_spin_unlock_wait(lock) \ 15#define arch_spin_unlock_wait(lock) \
16 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 16 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
17 17
18static inline void __raw_spin_lock(raw_spinlock_t *lock) 18static inline void arch_spin_lock(arch_spinlock_t *lock)
19{ 19{
20 __asm__ __volatile__( 20 __asm__ __volatile__(
21 "\n1:\n\t" 21 "\n1:\n\t"
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
35 : "g2", "memory", "cc"); 35 : "g2", "memory", "cc");
36} 36}
37 37
38static inline int __raw_spin_trylock(raw_spinlock_t *lock) 38static inline int arch_spin_trylock(arch_spinlock_t *lock)
39{ 39{
40 unsigned int result; 40 unsigned int result;
41 __asm__ __volatile__("ldstub [%1], %0" 41 __asm__ __volatile__("ldstub [%1], %0"
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
45 return (result == 0); 45 return (result == 0);
46} 46}
47 47
48static inline void __raw_spin_unlock(raw_spinlock_t *lock) 48static inline void arch_spin_unlock(arch_spinlock_t *lock)
49{ 49{
50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); 50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
51} 51}
@@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
65 * Sort of like atomic_t's on Sparc, but even more clever. 65 * Sort of like atomic_t's on Sparc, but even more clever.
66 * 66 *
67 * ------------------------------------ 67 * ------------------------------------
68 * | 24-bit counter | wlock | raw_rwlock_t 68 * | 24-bit counter | wlock | arch_rwlock_t
69 * ------------------------------------ 69 * ------------------------------------
70 * 31 8 7 0 70 * 31 8 7 0
71 * 71 *
@@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
76 * 76 *
77 * Unfortunately this scheme limits us to ~16,000,000 cpus. 77 * Unfortunately this scheme limits us to ~16,000,000 cpus.
78 */ 78 */
79static inline void arch_read_lock(raw_rwlock_t *rw) 79static inline void __arch_read_lock(arch_rwlock_t *rw)
80{ 80{
81 register raw_rwlock_t *lp asm("g1"); 81 register arch_rwlock_t *lp asm("g1");
82 lp = rw; 82 lp = rw;
83 __asm__ __volatile__( 83 __asm__ __volatile__(
84 "mov %%o7, %%g4\n\t" 84 "mov %%o7, %%g4\n\t"
@@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw)
89 : "g2", "g4", "memory", "cc"); 89 : "g2", "g4", "memory", "cc");
90} 90}
91 91
92#define __raw_read_lock(lock) \ 92#define arch_read_lock(lock) \
93do { unsigned long flags; \ 93do { unsigned long flags; \
94 local_irq_save(flags); \ 94 local_irq_save(flags); \
95 arch_read_lock(lock); \ 95 __arch_read_lock(lock); \
96 local_irq_restore(flags); \ 96 local_irq_restore(flags); \
97} while(0) 97} while(0)
98 98
99static inline void arch_read_unlock(raw_rwlock_t *rw) 99static inline void __arch_read_unlock(arch_rwlock_t *rw)
100{ 100{
101 register raw_rwlock_t *lp asm("g1"); 101 register arch_rwlock_t *lp asm("g1");
102 lp = rw; 102 lp = rw;
103 __asm__ __volatile__( 103 __asm__ __volatile__(
104 "mov %%o7, %%g4\n\t" 104 "mov %%o7, %%g4\n\t"
@@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw)
109 : "g2", "g4", "memory", "cc"); 109 : "g2", "g4", "memory", "cc");
110} 110}
111 111
112#define __raw_read_unlock(lock) \ 112#define arch_read_unlock(lock) \
113do { unsigned long flags; \ 113do { unsigned long flags; \
114 local_irq_save(flags); \ 114 local_irq_save(flags); \
115 arch_read_unlock(lock); \ 115 __arch_read_unlock(lock); \
116 local_irq_restore(flags); \ 116 local_irq_restore(flags); \
117} while(0) 117} while(0)
118 118
119static inline void __raw_write_lock(raw_rwlock_t *rw) 119static inline void arch_write_lock(arch_rwlock_t *rw)
120{ 120{
121 register raw_rwlock_t *lp asm("g1"); 121 register arch_rwlock_t *lp asm("g1");
122 lp = rw; 122 lp = rw;
123 __asm__ __volatile__( 123 __asm__ __volatile__(
124 "mov %%o7, %%g4\n\t" 124 "mov %%o7, %%g4\n\t"
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
130 *(volatile __u32 *)&lp->lock = ~0U; 130 *(volatile __u32 *)&lp->lock = ~0U;
131} 131}
132 132
133static inline int __raw_write_trylock(raw_rwlock_t *rw) 133static inline int arch_write_trylock(arch_rwlock_t *rw)
134{ 134{
135 unsigned int val; 135 unsigned int val;
136 136
@@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
150 return (val == 0); 150 return (val == 0);
151} 151}
152 152
153static inline int arch_read_trylock(raw_rwlock_t *rw) 153static inline int __arch_read_trylock(arch_rwlock_t *rw)
154{ 154{
155 register raw_rwlock_t *lp asm("g1"); 155 register arch_rwlock_t *lp asm("g1");
156 register int res asm("o0"); 156 register int res asm("o0");
157 lp = rw; 157 lp = rw;
158 __asm__ __volatile__( 158 __asm__ __volatile__(
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw)
165 return res; 165 return res;
166} 166}
167 167
168#define __raw_read_trylock(lock) \ 168#define arch_read_trylock(lock) \
169({ unsigned long flags; \ 169({ unsigned long flags; \
170 int res; \ 170 int res; \
171 local_irq_save(flags); \ 171 local_irq_save(flags); \
172 res = arch_read_trylock(lock); \ 172 res = __arch_read_trylock(lock); \
173 local_irq_restore(flags); \ 173 local_irq_restore(flags); \
174 res; \ 174 res; \
175}) 175})
176 176
177#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) 177#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
178 178
179#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 179#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
180#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 180#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
181#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) 181#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
182 182
183#define _raw_spin_relax(lock) cpu_relax() 183#define arch_spin_relax(lock) cpu_relax()
184#define _raw_read_relax(lock) cpu_relax() 184#define arch_read_relax(lock) cpu_relax()
185#define _raw_write_relax(lock) cpu_relax() 185#define arch_write_relax(lock) cpu_relax()
186 186
187#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) 187#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
188#define __raw_write_can_lock(rw) (!(rw)->lock) 188#define arch_write_can_lock(rw) (!(rw)->lock)
189 189
190#endif /* !(__ASSEMBLY__) */ 190#endif /* !(__ASSEMBLY__) */
191 191
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 43e514783582..073936a8b275 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -21,13 +21,13 @@
21 * the spinner sections must be pre-V9 branches. 21 * the spinner sections must be pre-V9 branches.
22 */ 22 */
23 23
24#define __raw_spin_is_locked(lp) ((lp)->lock != 0) 24#define arch_spin_is_locked(lp) ((lp)->lock != 0)
25 25
26#define __raw_spin_unlock_wait(lp) \ 26#define arch_spin_unlock_wait(lp) \
27 do { rmb(); \ 27 do { rmb(); \
28 } while((lp)->lock) 28 } while((lp)->lock)
29 29
30static inline void __raw_spin_lock(raw_spinlock_t *lock) 30static inline void arch_spin_lock(arch_spinlock_t *lock)
31{ 31{
32 unsigned long tmp; 32 unsigned long tmp;
33 33
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
46 : "memory"); 46 : "memory");
47} 47}
48 48
49static inline int __raw_spin_trylock(raw_spinlock_t *lock) 49static inline int arch_spin_trylock(arch_spinlock_t *lock)
50{ 50{
51 unsigned long result; 51 unsigned long result;
52 52
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
59 return (result == 0UL); 59 return (result == 0UL);
60} 60}
61 61
62static inline void __raw_spin_unlock(raw_spinlock_t *lock) 62static inline void arch_spin_unlock(arch_spinlock_t *lock)
63{ 63{
64 __asm__ __volatile__( 64 __asm__ __volatile__(
65" stb %%g0, [%0]" 65" stb %%g0, [%0]"
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
68 : "memory"); 68 : "memory");
69} 69}
70 70
71static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 71static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
72{ 72{
73 unsigned long tmp1, tmp2; 73 unsigned long tmp1, tmp2;
74 74
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
92 92
93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
94 94
95static void inline arch_read_lock(raw_rwlock_t *lock) 95static void inline arch_read_lock(arch_rwlock_t *lock)
96{ 96{
97 unsigned long tmp1, tmp2; 97 unsigned long tmp1, tmp2;
98 98
@@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
115 : "memory"); 115 : "memory");
116} 116}
117 117
118static int inline arch_read_trylock(raw_rwlock_t *lock) 118static int inline arch_read_trylock(arch_rwlock_t *lock)
119{ 119{
120 int tmp1, tmp2; 120 int tmp1, tmp2;
121 121
@@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
136 return tmp1; 136 return tmp1;
137} 137}
138 138
139static void inline arch_read_unlock(raw_rwlock_t *lock) 139static void inline arch_read_unlock(arch_rwlock_t *lock)
140{ 140{
141 unsigned long tmp1, tmp2; 141 unsigned long tmp1, tmp2;
142 142
@@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
152 : "memory"); 152 : "memory");
153} 153}
154 154
155static void inline arch_write_lock(raw_rwlock_t *lock) 155static void inline arch_write_lock(arch_rwlock_t *lock)
156{ 156{
157 unsigned long mask, tmp1, tmp2; 157 unsigned long mask, tmp1, tmp2;
158 158
@@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
177 : "memory"); 177 : "memory");
178} 178}
179 179
180static void inline arch_write_unlock(raw_rwlock_t *lock) 180static void inline arch_write_unlock(arch_rwlock_t *lock)
181{ 181{
182 __asm__ __volatile__( 182 __asm__ __volatile__(
183" stw %%g0, [%0]" 183" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
186 : "memory"); 186 : "memory");
187} 187}
188 188
189static int inline arch_write_trylock(raw_rwlock_t *lock) 189static int inline arch_write_trylock(arch_rwlock_t *lock)
190{ 190{
191 unsigned long mask, tmp1, tmp2, result; 191 unsigned long mask, tmp1, tmp2, result;
192 192
@@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock)
210 return result; 210 return result;
211} 211}
212 212
213#define __raw_read_lock(p) arch_read_lock(p) 213#define arch_read_lock(p) arch_read_lock(p)
214#define __raw_read_lock_flags(p, f) arch_read_lock(p) 214#define arch_read_lock_flags(p, f) arch_read_lock(p)
215#define __raw_read_trylock(p) arch_read_trylock(p) 215#define arch_read_trylock(p) arch_read_trylock(p)
216#define __raw_read_unlock(p) arch_read_unlock(p) 216#define arch_read_unlock(p) arch_read_unlock(p)
217#define __raw_write_lock(p) arch_write_lock(p) 217#define arch_write_lock(p) arch_write_lock(p)
218#define __raw_write_lock_flags(p, f) arch_write_lock(p) 218#define arch_write_lock_flags(p, f) arch_write_lock(p)
219#define __raw_write_unlock(p) arch_write_unlock(p) 219#define arch_write_unlock(p) arch_write_unlock(p)
220#define __raw_write_trylock(p) arch_write_trylock(p) 220#define arch_write_trylock(p) arch_write_trylock(p)
221 221
222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 222#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
223#define __raw_write_can_lock(rw) (!(rw)->lock) 223#define arch_write_can_lock(rw) (!(rw)->lock)
224 224
225#define _raw_spin_relax(lock) cpu_relax() 225#define arch_spin_relax(lock) cpu_relax()
226#define _raw_read_relax(lock) cpu_relax() 226#define arch_read_relax(lock) cpu_relax()
227#define _raw_write_relax(lock) cpu_relax() 227#define arch_write_relax(lock) cpu_relax()
228 228
229#endif /* !(__ASSEMBLY__) */ 229#endif /* !(__ASSEMBLY__) */
230 230
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index 37cbe01c585b..9c454fdeaad8 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned char lock; 9 volatile unsigned char lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h
index 6c5fddb7e6b5..edf196ee4ef8 100644
--- a/arch/sparc/include/asm/string_32.h
+++ b/arch/sparc/include/asm/string_32.h
@@ -16,8 +16,6 @@
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17 17
18extern void __memmove(void *,const void *,__kernel_size_t); 18extern void __memmove(void *,const void *,__kernel_size_t);
19extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
20extern __kernel_size_t __memset(void *,int,__kernel_size_t);
21 19
22#ifndef EXPORT_SYMTAB_STROPS 20#ifndef EXPORT_SYMTAB_STROPS
23 21
@@ -32,82 +30,10 @@ extern __kernel_size_t __memset(void *,int,__kernel_size_t);
32}) 30})
33 31
34#define __HAVE_ARCH_MEMCPY 32#define __HAVE_ARCH_MEMCPY
35 33#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
36static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
37{
38 extern void __copy_1page(void *, const void *);
39
40 if(n <= 32) {
41 __builtin_memcpy(to, from, n);
42 } else if (((unsigned int) to & 7) != 0) {
43 /* Destination is not aligned on the double-word boundary */
44 __memcpy(to, from, n);
45 } else {
46 switch(n) {
47 case PAGE_SIZE:
48 __copy_1page(to, from);
49 break;
50 default:
51 __memcpy(to, from, n);
52 break;
53 }
54 }
55 return to;
56}
57
58static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
59{
60 __memcpy(to, from, n);
61 return to;
62}
63
64#undef memcpy
65#define memcpy(t, f, n) \
66(__builtin_constant_p(n) ? \
67 __constant_memcpy((t),(f),(n)) : \
68 __nonconstant_memcpy((t),(f),(n)))
69 34
70#define __HAVE_ARCH_MEMSET 35#define __HAVE_ARCH_MEMSET
71 36#define memset(s, c, count) __builtin_memset(s, c, count)
72static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
73{
74 extern void bzero_1page(void *);
75 extern __kernel_size_t __bzero(void *, __kernel_size_t);
76
77 if(!c) {
78 if(count == PAGE_SIZE)
79 bzero_1page(s);
80 else
81 __bzero(s, count);
82 } else {
83 __memset(s, c, count);
84 }
85 return s;
86}
87
88static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
89{
90 extern __kernel_size_t __bzero(void *, __kernel_size_t);
91
92 if(!c)
93 __bzero(s, count);
94 else
95 __memset(s, c, count);
96 return s;
97}
98
99static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
100{
101 __memset(s, c, count);
102 return s;
103}
104
105#undef memset
106#define memset(s, c, count) \
107(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \
108 __constant_c_and_count_memset((s), (c), (count)) : \
109 __constant_c_memset((s), (c), (count))) \
110 : __nonconstant_memset((s), (c), (count)))
111 37
112#define __HAVE_ARCH_MEMSCAN 38#define __HAVE_ARCH_MEMSCAN
113 39
diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h
index 43161f2d17eb..9623bc213158 100644
--- a/arch/sparc/include/asm/string_64.h
+++ b/arch/sparc/include/asm/string_64.h
@@ -15,8 +15,6 @@
15 15
16#include <asm/asi.h> 16#include <asm/asi.h>
17 17
18extern void *__memset(void *,int,__kernel_size_t);
19
20#ifndef EXPORT_SYMTAB_STROPS 18#ifndef EXPORT_SYMTAB_STROPS
21 19
22/* First the mem*() things. */ 20/* First the mem*() things. */
@@ -24,29 +22,10 @@ extern void *__memset(void *,int,__kernel_size_t);
24extern void *memmove(void *, const void *, __kernel_size_t); 22extern void *memmove(void *, const void *, __kernel_size_t);
25 23
26#define __HAVE_ARCH_MEMCPY 24#define __HAVE_ARCH_MEMCPY
27extern void *memcpy(void *, const void *, __kernel_size_t); 25#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
28 26
29#define __HAVE_ARCH_MEMSET 27#define __HAVE_ARCH_MEMSET
30extern void *__builtin_memset(void *,int,__kernel_size_t); 28#define memset(s, c, count) __builtin_memset(s, c, count)
31
32static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
33{
34 extern __kernel_size_t __bzero(void *, __kernel_size_t);
35
36 if (!c) {
37 __bzero(s, count);
38 return s;
39 } else
40 return __memset(s, c, count);
41}
42
43#undef memset
44#define memset(s, c, count) \
45((__builtin_constant_p(count) && (count) <= 32) ? \
46 __builtin_memset((s), (c), (count)) : \
47 (__builtin_constant_p(c) ? \
48 __constant_memset((s), (c), (count)) : \
49 __memset((s), (c), (count))))
50 29
51#define __HAVE_ARCH_MEMSCAN 30#define __HAVE_ARCH_MEMSCAN
52 31
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 1b45a7bbe407..7257ebb8f394 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -227,6 +227,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
227/* flag bit 8 is available */ 227/* flag bit 8 is available */
228#define TIF_SECCOMP 9 /* secure computing */ 228#define TIF_SECCOMP 9 /* secure computing */
229#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ 229#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
230#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
230/* flag bit 11 is available */ 231/* flag bit 11 is available */
231/* NOTE: Thread flags >= 12 should be ones we have no interest 232/* NOTE: Thread flags >= 12 should be ones we have no interest
232 * in using in assembly, else we can't use the mask as 233 * in using in assembly, else we can't use the mask as
@@ -246,6 +247,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
246#define _TIF_32BIT (1<<TIF_32BIT) 247#define _TIF_32BIT (1<<TIF_32BIT)
247#define _TIF_SECCOMP (1<<TIF_SECCOMP) 248#define _TIF_SECCOMP (1<<TIF_SECCOMP)
248#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 249#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
250#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
249#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 251#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
250#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 252#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
251#define _TIF_FREEZE (1<<TIF_FREEZE) 253#define _TIF_FREEZE (1<<TIF_FREEZE)
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 8303ac481034..489d2ba92bcb 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -260,8 +260,23 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un
260 return __copy_user(to, (__force void __user *) from, n); 260 return __copy_user(to, (__force void __user *) from, n);
261} 261}
262 262
263extern void copy_from_user_overflow(void)
264#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
265 __compiletime_error("copy_from_user() buffer size is not provably correct")
266#else
267 __compiletime_warning("copy_from_user() buffer size is not provably correct")
268#endif
269;
270
263static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) 271static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
264{ 272{
273 int sz = __compiletime_object_size(to);
274
275 if (unlikely(sz != -1 && sz < n)) {
276 copy_from_user_overflow();
277 return -EFAULT;
278 }
279
265 if (n && __access_ok((unsigned long) from, n)) 280 if (n && __access_ok((unsigned long) from, n))
266 return __copy_user((__force void __user *) to, from, n); 281 return __copy_user((__force void __user *) to, from, n);
267 else 282 else
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 9ea271e19c70..dbc141660994 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#ifdef __KERNEL__ 8#ifdef __KERNEL__
9#include <linux/errno.h>
9#include <linux/compiler.h> 10#include <linux/compiler.h>
10#include <linux/string.h> 11#include <linux/string.h>
11#include <linux/thread_info.h> 12#include <linux/thread_info.h>
@@ -204,6 +205,14 @@ __asm__ __volatile__( \
204 205
205extern int __get_user_bad(void); 206extern int __get_user_bad(void);
206 207
208extern void copy_from_user_overflow(void)
209#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
210 __compiletime_error("copy_from_user() buffer size is not provably correct")
211#else
212 __compiletime_warning("copy_from_user() buffer size is not provably correct")
213#endif
214;
215
207extern unsigned long __must_check ___copy_from_user(void *to, 216extern unsigned long __must_check ___copy_from_user(void *to,
208 const void __user *from, 217 const void __user *from,
209 unsigned long size); 218 unsigned long size);
@@ -212,10 +221,16 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
212static inline unsigned long __must_check 221static inline unsigned long __must_check
213copy_from_user(void *to, const void __user *from, unsigned long size) 222copy_from_user(void *to, const void __user *from, unsigned long size)
214{ 223{
215 unsigned long ret = ___copy_from_user(to, from, size); 224 unsigned long ret = (unsigned long) -EFAULT;
216 225 int sz = __compiletime_object_size(to);
217 if (unlikely(ret)) 226
218 ret = copy_from_user_fixup(to, from, size); 227 if (likely(sz == -1 || sz >= size)) {
228 ret = ___copy_from_user(to, from, size);
229 if (unlikely(ret))
230 ret = copy_from_user_fixup(to, from, size);
231 } else {
232 copy_from_user_overflow();
233 }
219 return ret; 234 return ret;
220} 235}
221#define __copy_from_user copy_from_user 236#define __copy_from_user copy_from_user
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index d8d25bd97121..cb4b9bfd0d87 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -398,7 +398,7 @@
398#define __NR_perf_event_open 327 398#define __NR_perf_event_open 327
399#define __NR_recvmmsg 328 399#define __NR_recvmmsg 328
400 400
401#define NR_SYSCALLS 329 401#define NR_syscalls 329
402 402
403#ifdef __32bit_syscall_numbers__ 403#ifdef __32bit_syscall_numbers__
404/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 404/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index ec9c7bc67d21..1504df8ddf70 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1294,7 +1294,7 @@ linux_sparc_syscall:
1294 sethi %hi(PSR_SYSCALL), %l4 1294 sethi %hi(PSR_SYSCALL), %l4
1295 or %l0, %l4, %l0 1295 or %l0, %l4, %l0
1296 /* Direct access to user regs, must faster. */ 1296 /* Direct access to user regs, must faster. */
1297 cmp %g1, NR_SYSCALLS 1297 cmp %g1, NR_syscalls
1298 bgeu linux_sparc_ni_syscall 1298 bgeu linux_sparc_ni_syscall
1299 sll %g1, 2, %l4 1299 sll %g1, 2, %l4
1300 ld [%l7 + %l4], %l7 1300 ld [%l7 + %l4], %l7
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index d3b1a3076569..29973daa9930 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -4,6 +4,7 @@
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/list.h> 6#include <linux/list.h>
7#include <trace/syscall.h>
7 8
8#include <asm/ftrace.h> 9#include <asm/ftrace.h>
9 10
@@ -91,3 +92,13 @@ int __init ftrace_dyn_arch_init(void *data)
91} 92}
92#endif 93#endif
93 94
95#ifdef CONFIG_FTRACE_SYSCALLS
96
97extern unsigned int sys_call_table[];
98
99unsigned long __init arch_syscall_addr(int nr)
100{
101 return (unsigned long)sys_call_table[nr];
102}
103
104#endif
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index ce996f97855f..8d6882bb480a 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v)
176 } 176 }
177 177
178 if (i < NR_IRQS) { 178 if (i < NR_IRQS) {
179 spin_lock_irqsave(&irq_desc[i].lock, flags); 179 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
180 action = irq_desc[i].action; 180 action = irq_desc[i].action;
181 if (!action) 181 if (!action)
182 goto skip; 182 goto skip;
@@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v)
195 195
196 seq_putc(p, '\n'); 196 seq_putc(p, '\n');
197skip: 197skip:
198 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 198 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
199 } else if (i == NR_IRQS) { 199 } else if (i == NR_IRQS) {
200 seq_printf(p, "NMI: "); 200 seq_printf(p, "NMI: ");
201 for_each_online_cpu(j) 201 for_each_online_cpu(j)
@@ -785,14 +785,14 @@ void fixup_irqs(void)
785 for (irq = 0; irq < NR_IRQS; irq++) { 785 for (irq = 0; irq < NR_IRQS; irq++) {
786 unsigned long flags; 786 unsigned long flags;
787 787
788 spin_lock_irqsave(&irq_desc[irq].lock, flags); 788 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
789 if (irq_desc[irq].action && 789 if (irq_desc[irq].action &&
790 !(irq_desc[irq].status & IRQ_PER_CPU)) { 790 !(irq_desc[irq].status & IRQ_PER_CPU)) {
791 if (irq_desc[irq].chip->set_affinity) 791 if (irq_desc[irq].chip->set_affinity)
792 irq_desc[irq].chip->set_affinity(irq, 792 irq_desc[irq].chip->set_affinity(irq,
793 irq_desc[irq].affinity); 793 irq_desc[irq].affinity);
794 } 794 }
795 spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 795 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
796 } 796 }
797 797
798 tick_ops->disable_irq(); 798 tick_ops->disable_irq();
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index 3bc6527c95af..6716584e48ab 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -46,6 +46,9 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
46 46
47int __kprobes arch_prepare_kprobe(struct kprobe *p) 47int __kprobes arch_prepare_kprobe(struct kprobe *p)
48{ 48{
49 if ((unsigned long) p->addr & 0x3UL)
50 return -EILSEQ;
51
49 p->ainsn.insn[0] = *p->addr; 52 p->ainsn.insn[0] = *p->addr;
50 flushi(&p->ainsn.insn[0]); 53 flushi(&p->ainsn.insn[0]);
51 54
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index cb3c72c45aab..e0ba898e30cf 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
1242 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); 1242 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
1243 1243
1244 err = request_irq(lp->cfg.rx_irq, ldc_rx, 1244 err = request_irq(lp->cfg.rx_irq, ldc_rx,
1245 IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, 1245 IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
1246 lp->rx_irq_name, lp); 1246 lp->rx_irq_name, lp);
1247 if (err) 1247 if (err)
1248 return err; 1248 return err;
1249 1249
1250 err = request_irq(lp->cfg.tx_irq, ldc_tx, 1250 err = request_irq(lp->cfg.tx_irq, ldc_tx,
1251 IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, 1251 IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
1252 lp->tx_irq_name, lp); 1252 lp->tx_irq_name, lp);
1253 if (err) { 1253 if (err) {
1254 free_irq(lp->cfg.rx_irq, lp); 1254 free_irq(lp->cfg.rx_irq, lp);
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 938da19dc065..cdc91d919e93 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -10,6 +10,7 @@
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/miscdevice.h> 12#include <linux/miscdevice.h>
13#include <linux/bootmem.h>
13 14
14#include <asm/cpudata.h> 15#include <asm/cpudata.h>
15#include <asm/hypervisor.h> 16#include <asm/hypervisor.h>
@@ -108,25 +109,15 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
108 109
109static void mdesc_lmb_free(struct mdesc_handle *hp) 110static void mdesc_lmb_free(struct mdesc_handle *hp)
110{ 111{
111 unsigned int alloc_size, handle_size = hp->handle_size; 112 unsigned int alloc_size;
112 unsigned long start, end; 113 unsigned long start;
113 114
114 BUG_ON(atomic_read(&hp->refcnt) != 0); 115 BUG_ON(atomic_read(&hp->refcnt) != 0);
115 BUG_ON(!list_empty(&hp->list)); 116 BUG_ON(!list_empty(&hp->list));
116 117
117 alloc_size = PAGE_ALIGN(handle_size); 118 alloc_size = PAGE_ALIGN(hp->handle_size);
118 119 start = __pa(hp);
119 start = (unsigned long) hp; 120 free_bootmem_late(start, alloc_size);
120 end = start + alloc_size;
121
122 while (start < end) {
123 struct page *p;
124
125 p = virt_to_page(start);
126 ClearPageReserved(p);
127 __free_page(p);
128 start += PAGE_SIZE;
129 }
130} 121}
131 122
132static struct mdesc_mem_ops lmb_mdesc_ops = { 123static struct mdesc_mem_ops lmb_mdesc_ops = {
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index b129611590a4..f30f4a1ead23 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -47,7 +47,7 @@ static DEFINE_PER_CPU(short, wd_enabled);
47static int endflag __initdata; 47static int endflag __initdata;
48 48
49static DEFINE_PER_CPU(unsigned int, last_irq_sum); 49static DEFINE_PER_CPU(unsigned int, last_irq_sum);
50static DEFINE_PER_CPU(local_t, alert_counter); 50static DEFINE_PER_CPU(long, alert_counter);
51static DEFINE_PER_CPU(int, nmi_touch); 51static DEFINE_PER_CPU(int, nmi_touch);
52 52
53void touch_nmi_watchdog(void) 53void touch_nmi_watchdog(void)
@@ -112,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
112 touched = 1; 112 touched = 1;
113 } 113 }
114 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 114 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
115 local_inc(&__get_cpu_var(alert_counter)); 115 __this_cpu_inc(per_cpu_var(alert_counter));
116 if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz) 116 if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
117 die_nmi("BUG: NMI Watchdog detected LOCKUP", 117 die_nmi("BUG: NMI Watchdog detected LOCKUP",
118 regs, panic_on_timeout); 118 regs, panic_on_timeout);
119 } else { 119 } else {
120 __get_cpu_var(last_irq_sum) = sum; 120 __get_cpu_var(last_irq_sum) = sum;
121 local_set(&__get_cpu_var(alert_counter), 0); 121 __this_cpu_write(per_cpu_var(alert_counter), 0);
122 } 122 }
123 if (__get_cpu_var(wd_enabled)) { 123 if (__get_cpu_var(wd_enabled)) {
124 write_pic(picl_value(nmi_hz)); 124 write_pic(picl_value(nmi_hz));
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 881947e59e95..0a6f2d1798d1 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -104,9 +104,19 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
104 int i; 104 int i;
105 105
106 /* Check address type match */ 106 /* Check address type match */
107 if ((addr[0] ^ range[0]) & 0x03000000) 107 if (!((addr[0] ^ range[0]) & 0x03000000))
108 return -EINVAL; 108 goto type_match;
109
110 /* Special exception, we can map a 64-bit address into
111 * a 32-bit range.
112 */
113 if ((addr[0] & 0x03000000) == 0x03000000 &&
114 (range[0] & 0x03000000) == 0x02000000)
115 goto type_match;
116
117 return -EINVAL;
109 118
119type_match:
110 if (of_out_of_range(addr + 1, range + 1, range + na + pna, 120 if (of_out_of_range(addr + 1, range + 1, range + na + pna,
111 na - 1, ns)) 121 na - 1, ns))
112 return -EINVAL; 122 return -EINVAL;
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 4ae91dc2feb9..2f6524d1a817 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -23,6 +23,7 @@
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/regset.h> 24#include <linux/regset.h>
25#include <linux/tracehook.h> 25#include <linux/tracehook.h>
26#include <trace/syscall.h>
26#include <linux/compat.h> 27#include <linux/compat.h>
27#include <linux/elf.h> 28#include <linux/elf.h>
28 29
@@ -37,6 +38,9 @@
37#include <asm/cpudata.h> 38#include <asm/cpudata.h>
38#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
39 40
41#define CREATE_TRACE_POINTS
42#include <trace/events/syscalls.h>
43
40#include "entry.h" 44#include "entry.h"
41 45
42/* #define ALLOW_INIT_TRACING */ 46/* #define ALLOW_INIT_TRACING */
@@ -1059,6 +1063,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1059 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1063 if (test_thread_flag(TIF_SYSCALL_TRACE))
1060 ret = tracehook_report_syscall_entry(regs); 1064 ret = tracehook_report_syscall_entry(regs);
1061 1065
1066 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1067 trace_sys_enter(regs, regs->u_regs[UREG_G1]);
1068
1062 if (unlikely(current->audit_context) && !ret) 1069 if (unlikely(current->audit_context) && !ret)
1063 audit_syscall_entry((test_thread_flag(TIF_32BIT) ? 1070 audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
1064 AUDIT_ARCH_SPARC : 1071 AUDIT_ARCH_SPARC :
@@ -1084,6 +1091,9 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1084 audit_syscall_exit(result, regs->u_regs[UREG_I0]); 1091 audit_syscall_exit(result, regs->u_regs[UREG_I0]);
1085 } 1092 }
1086 1093
1094 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1095 trace_sys_exit(regs, regs->u_regs[UREG_G1]);
1096
1087 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1097 if (test_thread_flag(TIF_SYSCALL_TRACE))
1088 tracehook_report_syscall_exit(regs, 0); 1098 tracehook_report_syscall_exit(regs, 0);
1089} 1099}
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index d150c2aa98d2..dc4a458f74dc 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
62#endif 62#endif
63 .align 32 63 .align 32
641: ldx [%g6 + TI_FLAGS], %l5 641: ldx [%g6 + TI_FLAGS], %l5
65 andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 65 andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
66 be,pt %icc, rtrap 66 be,pt %icc, rtrap
67 nop 67 nop
68 call syscall_trace_leave 68 call syscall_trace_leave
@@ -187,7 +187,7 @@ linux_syscall_trace:
187 .globl linux_sparc_syscall32 187 .globl linux_sparc_syscall32
188linux_sparc_syscall32: 188linux_sparc_syscall32:
189 /* Direct access to user regs, much faster. */ 189 /* Direct access to user regs, much faster. */
190 cmp %g1, NR_SYSCALLS ! IEU1 Group 190 cmp %g1, NR_syscalls ! IEU1 Group
191 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI 191 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
192 srl %i0, 0, %o0 ! IEU0 192 srl %i0, 0, %o0 ! IEU0
193 sll %g1, 2, %l4 ! IEU0 Group 193 sll %g1, 2, %l4 ! IEU0 Group
@@ -198,7 +198,7 @@ linux_sparc_syscall32:
198 198
199 srl %i5, 0, %o5 ! IEU1 199 srl %i5, 0, %o5 ! IEU1
200 srl %i2, 0, %o2 ! IEU0 Group 200 srl %i2, 0, %o2 ! IEU0 Group
201 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 201 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
202 bne,pn %icc, linux_syscall_trace32 ! CTI 202 bne,pn %icc, linux_syscall_trace32 ! CTI
203 mov %i0, %l5 ! IEU1 203 mov %i0, %l5 ! IEU1
204 call %l7 ! CTI Group brk forced 204 call %l7 ! CTI Group brk forced
@@ -210,7 +210,7 @@ linux_sparc_syscall32:
210 .globl linux_sparc_syscall 210 .globl linux_sparc_syscall
211linux_sparc_syscall: 211linux_sparc_syscall:
212 /* Direct access to user regs, much faster. */ 212 /* Direct access to user regs, much faster. */
213 cmp %g1, NR_SYSCALLS ! IEU1 Group 213 cmp %g1, NR_syscalls ! IEU1 Group
214 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI 214 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
215 mov %i0, %o0 ! IEU0 215 mov %i0, %o0 ! IEU0
216 sll %g1, 2, %l4 ! IEU0 Group 216 sll %g1, 2, %l4 ! IEU0 Group
@@ -221,7 +221,7 @@ linux_sparc_syscall:
221 221
222 mov %i3, %o3 ! IEU1 222 mov %i3, %o3 ! IEU1
223 mov %i4, %o4 ! IEU0 Group 223 mov %i4, %o4 ! IEU0 Group
224 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 224 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
225 bne,pn %icc, linux_syscall_trace ! CTI Group 225 bne,pn %icc, linux_syscall_trace ! CTI Group
226 mov %i0, %l5 ! IEU0 226 mov %i0, %l5 ! IEU0
2272: call %l7 ! CTI Group brk forced 2272: call %l7 ! CTI Group brk forced
@@ -245,7 +245,7 @@ ret_sys_call:
245 245
246 cmp %o0, -ERESTART_RESTARTBLOCK 246 cmp %o0, -ERESTART_RESTARTBLOCK
247 bgeu,pn %xcc, 1f 247 bgeu,pn %xcc, 1f
248 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 248 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
24980: 24980:
250 /* System call success, clear Carry condition code. */ 250 /* System call success, clear Carry condition code. */
251 andn %g3, %g2, %g3 251 andn %g3, %g2, %g3
@@ -260,7 +260,7 @@ ret_sys_call:
260 /* System call failure, set Carry condition code. 260 /* System call failure, set Carry condition code.
261 * Also, get abs(errno) to return to the process. 261 * Also, get abs(errno) to return to the process.
262 */ 262 */
263 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 263 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
264 sub %g0, %o0, %o0 264 sub %g0, %o0, %o0
265 or %g3, %g2, %g3 265 or %g3, %g2, %g3
266 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] 266 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 63f73ae8a892..67e165102885 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -774,26 +774,9 @@ void __devinit setup_sparc64_timer(void)
774static struct clocksource clocksource_tick = { 774static struct clocksource clocksource_tick = {
775 .rating = 100, 775 .rating = 100,
776 .mask = CLOCKSOURCE_MASK(64), 776 .mask = CLOCKSOURCE_MASK(64),
777 .shift = 16,
778 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 777 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
779}; 778};
780 779
781static void __init setup_clockevent_multiplier(unsigned long hz)
782{
783 unsigned long mult, shift = 32;
784
785 while (1) {
786 mult = div_sc(hz, NSEC_PER_SEC, shift);
787 if (mult && (mult >> 32UL) == 0UL)
788 break;
789
790 shift--;
791 }
792
793 sparc64_clockevent.shift = shift;
794 sparc64_clockevent.mult = mult;
795}
796
797static unsigned long tb_ticks_per_usec __read_mostly; 780static unsigned long tb_ticks_per_usec __read_mostly;
798 781
799void __delay(unsigned long loops) 782void __delay(unsigned long loops)
@@ -828,9 +811,7 @@ void __init time_init(void)
828 clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT); 811 clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
829 812
830 clocksource_tick.name = tick_ops->name; 813 clocksource_tick.name = tick_ops->name;
831 clocksource_tick.mult = 814 clocksource_calc_mult_shift(&clocksource_tick, freq, 4);
832 clocksource_hz2mult(freq,
833 clocksource_tick.shift);
834 clocksource_tick.read = clocksource_tick_read; 815 clocksource_tick.read = clocksource_tick_read;
835 816
836 printk("clocksource: mult[%x] shift[%d]\n", 817 printk("clocksource: mult[%x] shift[%d]\n",
@@ -839,15 +820,14 @@ void __init time_init(void)
839 clocksource_register(&clocksource_tick); 820 clocksource_register(&clocksource_tick);
840 821
841 sparc64_clockevent.name = tick_ops->name; 822 sparc64_clockevent.name = tick_ops->name;
842 823 clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
843 setup_clockevent_multiplier(freq);
844 824
845 sparc64_clockevent.max_delta_ns = 825 sparc64_clockevent.max_delta_ns =
846 clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent); 826 clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent);
847 sparc64_clockevent.min_delta_ns = 827 sparc64_clockevent.min_delta_ns =
848 clockevent_delta2ns(0xF, &sparc64_clockevent); 828 clockevent_delta2ns(0xF, &sparc64_clockevent);
849 829
850 printk("clockevent: mult[%ux] shift[%d]\n", 830 printk("clockevent: mult[%x] shift[%d]\n",
851 sparc64_clockevent.mult, sparc64_clockevent.shift); 831 sparc64_clockevent.mult, sparc64_clockevent.shift);
852 832
853 setup_sparc64_timer(); 833 setup_sparc64_timer();
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index 6b1e6cde6fff..f8514e291e15 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -17,8 +17,7 @@
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/smp_lock.h> 19#include <linux/smp_lock.h>
20 20#include <linux/perf_event.h>
21/* #define DEBUG_MNA */
22 21
23enum direction { 22enum direction {
24 load, /* ld, ldd, ldh, ldsh */ 23 load, /* ld, ldd, ldh, ldsh */
@@ -29,12 +28,6 @@ enum direction {
29 invalid, 28 invalid,
30}; 29};
31 30
32#ifdef DEBUG_MNA
33static char *dirstrings[] = {
34 "load", "store", "both", "fpload", "fpstore", "invalid"
35};
36#endif
37
38static inline enum direction decode_direction(unsigned int insn) 31static inline enum direction decode_direction(unsigned int insn)
39{ 32{
40 unsigned long tmp = (insn >> 21) & 1; 33 unsigned long tmp = (insn >> 21) & 1;
@@ -255,10 +248,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
255 unsigned long addr = compute_effective_address(regs, insn); 248 unsigned long addr = compute_effective_address(regs, insn);
256 int err; 249 int err;
257 250
258#ifdef DEBUG_MNA 251 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
259 printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n",
260 regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
261#endif
262 switch (dir) { 252 switch (dir) {
263 case load: 253 case load:
264 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), 254 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
@@ -350,6 +340,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
350 } 340 }
351 341
352 addr = compute_effective_address(regs, insn); 342 addr = compute_effective_address(regs, insn);
343 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
353 switch(dir) { 344 switch(dir) {
354 case load: 345 case load:
355 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), 346 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 379209982a07..378ca82b9ccc 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -20,10 +20,9 @@
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/perf_event.h>
23#include <asm/fpumacro.h> 24#include <asm/fpumacro.h>
24 25
25/* #define DEBUG_MNA */
26
27enum direction { 26enum direction {
28 load, /* ld, ldd, ldh, ldsh */ 27 load, /* ld, ldd, ldh, ldsh */
29 store, /* st, std, sth, stsh */ 28 store, /* st, std, sth, stsh */
@@ -33,12 +32,6 @@ enum direction {
33 invalid, 32 invalid,
34}; 33};
35 34
36#ifdef DEBUG_MNA
37static char *dirstrings[] = {
38 "load", "store", "both", "fpload", "fpstore", "invalid"
39};
40#endif
41
42static inline enum direction decode_direction(unsigned int insn) 35static inline enum direction decode_direction(unsigned int insn)
43{ 36{
44 unsigned long tmp = (insn >> 21) & 1; 37 unsigned long tmp = (insn >> 21) & 1;
@@ -327,12 +320,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
327 320
328 addr = compute_effective_address(regs, insn, 321 addr = compute_effective_address(regs, insn,
329 ((insn >> 25) & 0x1f)); 322 ((insn >> 25) & 0x1f));
330#ifdef DEBUG_MNA 323 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
331 printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] "
332 "retpc[%016lx]\n",
333 regs->tpc, dirstrings[dir], addr, size,
334 regs->u_regs[UREG_RETPC]);
335#endif
336 switch (asi) { 324 switch (asi) {
337 case ASI_NL: 325 case ASI_NL:
338 case ASI_AIUPL: 326 case ASI_AIUPL:
@@ -399,6 +387,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
399 int ret, i, rd = ((insn >> 25) & 0x1f); 387 int ret, i, rd = ((insn >> 25) & 0x1f);
400 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 388 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
401 389
390 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
402 if (insn & 0x2000) { 391 if (insn & 0x2000) {
403 maybe_flush_windows(0, 0, rd, from_kernel); 392 maybe_flush_windows(0, 0, rd, from_kernel);
404 value = sign_extend_imm13(insn); 393 value = sign_extend_imm13(insn);
@@ -445,6 +434,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
445 int asi = decode_asi(insn, regs); 434 int asi = decode_asi(insn, regs);
446 int flag = (freg < 32) ? FPRS_DL : FPRS_DU; 435 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
447 436
437 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
438
448 save_and_clear_fpu(); 439 save_and_clear_fpu();
449 current_thread_info()->xfsr[0] &= ~0x1c000; 440 current_thread_info()->xfsr[0] &= ~0x1c000;
450 if (freg & 3) { 441 if (freg & 3) {
@@ -566,6 +557,8 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
566 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 557 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
567 unsigned long *reg; 558 unsigned long *reg;
568 559
560 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
561
569 maybe_flush_windows(0, 0, rd, from_kernel); 562 maybe_flush_windows(0, 0, rd, from_kernel);
570 reg = fetch_reg_addr(rd, regs); 563 reg = fetch_reg_addr(rd, regs);
571 if (from_kernel || rd < 16) { 564 if (from_kernel || rd < 16) {
@@ -596,6 +589,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
596 589
597 if (tstate & TSTATE_PRIV) 590 if (tstate & TSTATE_PRIV)
598 die_if_kernel("lddfmna from kernel", regs); 591 die_if_kernel("lddfmna from kernel", regs);
592 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
599 if (test_thread_flag(TIF_32BIT)) 593 if (test_thread_flag(TIF_32BIT))
600 pc = (u32)pc; 594 pc = (u32)pc;
601 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 595 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
@@ -657,6 +651,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
657 651
658 if (tstate & TSTATE_PRIV) 652 if (tstate & TSTATE_PRIV)
659 die_if_kernel("stdfmna from kernel", regs); 653 die_if_kernel("stdfmna from kernel", regs);
654 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
660 if (test_thread_flag(TIF_32BIT)) 655 if (test_thread_flag(TIF_32BIT))
661 pc = (u32)pc; 656 pc = (u32)pc;
662 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 657 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index d231cbd5c526..9dfd2ebcb157 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -5,6 +5,7 @@
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/errno.h> 6#include <linux/errno.h>
7#include <linux/thread_info.h> 7#include <linux/thread_info.h>
8#include <linux/perf_event.h>
8 9
9#include <asm/ptrace.h> 10#include <asm/ptrace.h>
10#include <asm/pstate.h> 11#include <asm/pstate.h>
@@ -801,6 +802,8 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
801 802
802 BUG_ON(regs->tstate & TSTATE_PRIV); 803 BUG_ON(regs->tstate & TSTATE_PRIV);
803 804
805 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
806
804 if (test_thread_flag(TIF_32BIT)) 807 if (test_thread_flag(TIF_32BIT))
805 pc = (u32)pc; 808 pc = (u32)pc;
806 809
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index e75faf0e59ae..c4b5e03af115 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -44,3 +44,4 @@ obj-y += iomap.o
44obj-$(CONFIG_SPARC32) += atomic32.o 44obj-$(CONFIG_SPARC32) += atomic32.o
45obj-y += ksyms.o 45obj-y += ksyms.o
46obj-$(CONFIG_SPARC64) += PeeCeeI.o 46obj-$(CONFIG_SPARC64) += PeeCeeI.o
47obj-y += usercopy.o
diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S
index b6557297440f..615f401edf69 100644
--- a/arch/sparc/lib/bzero.S
+++ b/arch/sparc/lib/bzero.S
@@ -6,10 +6,6 @@
6 6
7 .text 7 .text
8 8
9 .globl __memset
10 .type __memset, #function
11__memset: /* %o0=buf, %o1=pat, %o2=len */
12
13 .globl memset 9 .globl memset
14 .type memset, #function 10 .type memset, #function
15memset: /* %o0=buf, %o1=pat, %o2=len */ 11memset: /* %o0=buf, %o1=pat, %o2=len */
@@ -83,7 +79,6 @@ __bzero_done:
83 retl 79 retl
84 mov %o3, %o0 80 mov %o3, %o0
85 .size __bzero, .-__bzero 81 .size __bzero, .-__bzero
86 .size __memset, .-__memset
87 .size memset, .-memset 82 .size memset, .-memset
88 83
89#define EX_ST(x,y) \ 84#define EX_ST(x,y) \
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 77f228533d47..3632cb34e914 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -560,7 +560,7 @@ __csum_partial_copy_end:
560 mov %i0, %o1 560 mov %i0, %o1
561 mov %i1, %o0 561 mov %i1, %o0
5625: 5625:
563 call __memcpy 563 call memcpy
564 mov %i2, %o2 564 mov %i2, %o2
565 tst %o0 565 tst %o0
566 bne,a 2f 566 bne,a 2f
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 704b12668388..1b30bb3bfdb1 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -30,7 +30,6 @@ EXPORT_SYMBOL(__memscan_generic);
30EXPORT_SYMBOL(memcmp); 30EXPORT_SYMBOL(memcmp);
31EXPORT_SYMBOL(memcpy); 31EXPORT_SYMBOL(memcpy);
32EXPORT_SYMBOL(memset); 32EXPORT_SYMBOL(memset);
33EXPORT_SYMBOL(__memset);
34EXPORT_SYMBOL(memmove); 33EXPORT_SYMBOL(memmove);
35EXPORT_SYMBOL(__bzero); 34EXPORT_SYMBOL(__bzero);
36 35
@@ -81,7 +80,6 @@ EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
81 80
82/* Special internal versions of library functions. */ 81/* Special internal versions of library functions. */
83EXPORT_SYMBOL(__copy_1page); 82EXPORT_SYMBOL(__copy_1page);
84EXPORT_SYMBOL(__memcpy);
85EXPORT_SYMBOL(__memmove); 83EXPORT_SYMBOL(__memmove);
86EXPORT_SYMBOL(bzero_1page); 84EXPORT_SYMBOL(bzero_1page);
87 85
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 7ce9c65f3592..24b8b12deed2 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -64,8 +64,9 @@ mcount:
642: sethi %hi(softirq_stack), %g3 642: sethi %hi(softirq_stack), %g3
65 or %g3, %lo(softirq_stack), %g3 65 or %g3, %lo(softirq_stack), %g3
66 ldx [%g3 + %g1], %g7 66 ldx [%g3 + %g1], %g7
67 sub %g7, STACK_BIAS, %g7
67 cmp %sp, %g7 68 cmp %sp, %g7
68 bleu,pt %xcc, 2f 69 bleu,pt %xcc, 3f
69 sethi %hi(THREAD_SIZE), %g3 70 sethi %hi(THREAD_SIZE), %g3
70 add %g7, %g3, %g7 71 add %g7, %g3, %g7
71 cmp %sp, %g7 72 cmp %sp, %g7
@@ -75,7 +76,7 @@ mcount:
75 * again, we are already trying to output the stack overflow 76 * again, we are already trying to output the stack overflow
76 * message. 77 * message.
77 */ 78 */
78 sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough 793: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
79 or %g7, %lo(ovstack), %g7 80 or %g7, %lo(ovstack), %g7
80 add %g7, OVSTACKSIZE, %g3 81 add %g7, OVSTACKSIZE, %g3
81 sub %g3, STACK_BIAS + 192, %g3 82 sub %g3, STACK_BIAS + 192, %g3
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
index ce10bc869af9..34fe65751737 100644
--- a/arch/sparc/lib/memcpy.S
+++ b/arch/sparc/lib/memcpy.S
@@ -543,9 +543,6 @@ FUNC(memmove)
543 b 3f 543 b 3f
544 add %o0, 2, %o0 544 add %o0, 2, %o0
545 545
546#ifdef __KERNEL__
547FUNC(__memcpy)
548#endif
549FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ 546FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
550 547
551 sub %o0, %o1, %o4 548 sub %o0, %o1, %o4
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index 1c37ea892deb..99c017be8719 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -60,11 +60,10 @@
60 .globl __bzero_begin 60 .globl __bzero_begin
61__bzero_begin: 61__bzero_begin:
62 62
63 .globl __bzero, __memset, 63 .globl __bzero
64 .globl memset 64 .globl memset
65 .globl __memset_start, __memset_end 65 .globl __memset_start, __memset_end
66__memset_start: 66__memset_start:
67__memset:
68memset: 67memset:
69 and %o1, 0xff, %g3 68 and %o1, 0xff, %g3
70 sll %g3, 8, %g2 69 sll %g3, 8, %g2
diff --git a/arch/sparc/lib/usercopy.c b/arch/sparc/lib/usercopy.c
new file mode 100644
index 000000000000..14b363fec8a2
--- /dev/null
+++ b/arch/sparc/lib/usercopy.c
@@ -0,0 +1,8 @@
1#include <linux/module.h>
2#include <linux/bug.h>
3
4void copy_from_user_overflow(void)
5{
6 WARN(1, "Buffer overflow detected!\n");
7}
8EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index e13f65da17df..a3fccde894ec 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -67,6 +67,7 @@
67#include <linux/types.h> 67#include <linux/types.h>
68#include <linux/sched.h> 68#include <linux/sched.h>
69#include <linux/mm.h> 69#include <linux/mm.h>
70#include <linux/perf_event.h>
70#include <asm/uaccess.h> 71#include <asm/uaccess.h>
71 72
72#include "sfp-util_32.h" 73#include "sfp-util_32.h"
@@ -163,6 +164,8 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
163 int retcode = 0; /* assume all succeed */ 164 int retcode = 0; /* assume all succeed */
164 unsigned long insn; 165 unsigned long insn;
165 166
167 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
168
166#ifdef DEBUG_MATHEMU 169#ifdef DEBUG_MATHEMU
167 printk("In do_mathemu()... pc is %08lx\n", regs->pc); 170 printk("In do_mathemu()... pc is %08lx\n", regs->pc);
168 printk("fpqdepth is %ld\n", fpt->thread.fpqdepth); 171 printk("fpqdepth is %ld\n", fpt->thread.fpqdepth);
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c
index 6863c9bde25c..56d2c44747b8 100644
--- a/arch/sparc/math-emu/math_64.c
+++ b/arch/sparc/math-emu/math_64.c
@@ -11,6 +11,7 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/perf_event.h>
14 15
15#include <asm/fpumacro.h> 16#include <asm/fpumacro.h>
16#include <asm/ptrace.h> 17#include <asm/ptrace.h>
@@ -183,6 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
183 184
184 if (tstate & TSTATE_PRIV) 185 if (tstate & TSTATE_PRIV)
185 die_if_kernel("unfinished/unimplemented FPop from kernel", regs); 186 die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
187 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
186 if (test_thread_flag(TIF_32BIT)) 188 if (test_thread_flag(TIF_32BIT))
187 pc = (u32)pc; 189 pc = (u32)pc;
188 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 190 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 43b0da96a4fb..6081936bf03b 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -31,13 +31,12 @@
31#include <asm/sections.h> 31#include <asm/sections.h>
32#include <asm/mmu_context.h> 32#include <asm/mmu_context.h>
33 33
34#ifdef CONFIG_KPROBES 34static inline __kprobes int notify_page_fault(struct pt_regs *regs)
35static inline int notify_page_fault(struct pt_regs *regs)
36{ 35{
37 int ret = 0; 36 int ret = 0;
38 37
39 /* kprobe_running() needs smp_processor_id() */ 38 /* kprobe_running() needs smp_processor_id() */
40 if (!user_mode(regs)) { 39 if (kprobes_built_in() && !user_mode(regs)) {
41 preempt_disable(); 40 preempt_disable();
42 if (kprobe_running() && kprobe_fault_handler(regs, 0)) 41 if (kprobe_running() && kprobe_fault_handler(regs, 0))
43 ret = 1; 42 ret = 1;
@@ -45,12 +44,6 @@ static inline int notify_page_fault(struct pt_regs *regs)
45 } 44 }
46 return ret; 45 return ret;
47} 46}
48#else
49static inline int notify_page_fault(struct pt_regs *regs)
50{
51 return 0;
52}
53#endif
54 47
55static void __kprobes unhandled_fault(unsigned long address, 48static void __kprobes unhandled_fault(unsigned long address,
56 struct task_struct *tsk, 49 struct task_struct *tsk,
@@ -73,7 +66,7 @@ static void __kprobes unhandled_fault(unsigned long address,
73 die_if_kernel("Oops", regs); 66 die_if_kernel("Oops", regs);
74} 67}
75 68
76static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) 69static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
77{ 70{
78 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", 71 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
79 regs->tpc); 72 regs->tpc);
@@ -170,8 +163,9 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
170 return insn; 163 return insn;
171} 164}
172 165
173static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, 166static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
174 unsigned int insn, unsigned long address) 167 int fault_code, unsigned int insn,
168 unsigned long address)
175{ 169{
176 unsigned char asi = ASI_P; 170 unsigned char asi = ASI_P;
177 171
@@ -225,7 +219,7 @@ cannot_handle:
225 unhandled_fault (address, current, regs); 219 unhandled_fault (address, current, regs);
226} 220}
227 221
228static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs) 222static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
229{ 223{
230 static int times; 224 static int times;
231 225
@@ -237,8 +231,8 @@ static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs)
237 show_regs(regs); 231 show_regs(regs);
238} 232}
239 233
240static void noinline bogus_32bit_fault_address(struct pt_regs *regs, 234static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
241 unsigned long addr) 235 unsigned long addr)
242{ 236{
243 static int times; 237 static int times;
244 238