aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include')
-rw-r--r--arch/sparc/include/asm/elf_32.h2
-rw-r--r--arch/sparc/include/asm/elf_64.h1
-rw-r--r--arch/sparc/include/asm/spinlock_32.h62
-rw-r--r--arch/sparc/include/asm/spinlock_64.h54
-rw-r--r--arch/sparc/include/asm/spinlock_types.h8
-rw-r--r--arch/sparc/include/asm/string_32.h78
-rw-r--r--arch/sparc/include/asm/string_64.h25
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/include/asm/uaccess_32.h15
-rw-r--r--arch/sparc/include/asm/uaccess_64.h23
-rw-r--r--arch/sparc/include/asm/unistd.h2
11 files changed, 103 insertions, 169 deletions
diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
index 381a1b5256d6..4269ca6ad18a 100644
--- a/arch/sparc/include/asm/elf_32.h
+++ b/arch/sparc/include/asm/elf_32.h
@@ -104,8 +104,6 @@ typedef struct {
104#define ELF_CLASS ELFCLASS32 104#define ELF_CLASS ELFCLASS32
105#define ELF_DATA ELFDATA2MSB 105#define ELF_DATA ELFDATA2MSB
106 106
107#define USE_ELF_CORE_DUMP
108
109#define ELF_EXEC_PAGESIZE 4096 107#define ELF_EXEC_PAGESIZE 4096
110 108
111 109
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
index d42e393078c4..ff66bb88537b 100644
--- a/arch/sparc/include/asm/elf_64.h
+++ b/arch/sparc/include/asm/elf_64.h
@@ -152,7 +152,6 @@ typedef struct {
152 (x)->e_machine == EM_SPARC32PLUS) 152 (x)->e_machine == EM_SPARC32PLUS)
153#define compat_start_thread start_thread32 153#define compat_start_thread start_thread32
154 154
155#define USE_ELF_CORE_DUMP
156#define ELF_EXEC_PAGESIZE PAGE_SIZE 155#define ELF_EXEC_PAGESIZE PAGE_SIZE
157 156
158/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 157/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 857630cff636..7f9b9dba38a6 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -10,12 +10,12 @@
10 10
11#include <asm/psr.h> 11#include <asm/psr.h>
12 12
13#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) 13#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
14 14
15#define __raw_spin_unlock_wait(lock) \ 15#define arch_spin_unlock_wait(lock) \
16 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 16 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
17 17
18static inline void __raw_spin_lock(raw_spinlock_t *lock) 18static inline void arch_spin_lock(arch_spinlock_t *lock)
19{ 19{
20 __asm__ __volatile__( 20 __asm__ __volatile__(
21 "\n1:\n\t" 21 "\n1:\n\t"
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
35 : "g2", "memory", "cc"); 35 : "g2", "memory", "cc");
36} 36}
37 37
38static inline int __raw_spin_trylock(raw_spinlock_t *lock) 38static inline int arch_spin_trylock(arch_spinlock_t *lock)
39{ 39{
40 unsigned int result; 40 unsigned int result;
41 __asm__ __volatile__("ldstub [%1], %0" 41 __asm__ __volatile__("ldstub [%1], %0"
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
45 return (result == 0); 45 return (result == 0);
46} 46}
47 47
48static inline void __raw_spin_unlock(raw_spinlock_t *lock) 48static inline void arch_spin_unlock(arch_spinlock_t *lock)
49{ 49{
50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); 50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
51} 51}
@@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
65 * Sort of like atomic_t's on Sparc, but even more clever. 65 * Sort of like atomic_t's on Sparc, but even more clever.
66 * 66 *
67 * ------------------------------------ 67 * ------------------------------------
68 * | 24-bit counter | wlock | raw_rwlock_t 68 * | 24-bit counter | wlock | arch_rwlock_t
69 * ------------------------------------ 69 * ------------------------------------
70 * 31 8 7 0 70 * 31 8 7 0
71 * 71 *
@@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
76 * 76 *
77 * Unfortunately this scheme limits us to ~16,000,000 cpus. 77 * Unfortunately this scheme limits us to ~16,000,000 cpus.
78 */ 78 */
79static inline void arch_read_lock(raw_rwlock_t *rw) 79static inline void __arch_read_lock(arch_rwlock_t *rw)
80{ 80{
81 register raw_rwlock_t *lp asm("g1"); 81 register arch_rwlock_t *lp asm("g1");
82 lp = rw; 82 lp = rw;
83 __asm__ __volatile__( 83 __asm__ __volatile__(
84 "mov %%o7, %%g4\n\t" 84 "mov %%o7, %%g4\n\t"
@@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw)
89 : "g2", "g4", "memory", "cc"); 89 : "g2", "g4", "memory", "cc");
90} 90}
91 91
92#define __raw_read_lock(lock) \ 92#define arch_read_lock(lock) \
93do { unsigned long flags; \ 93do { unsigned long flags; \
94 local_irq_save(flags); \ 94 local_irq_save(flags); \
95 arch_read_lock(lock); \ 95 __arch_read_lock(lock); \
96 local_irq_restore(flags); \ 96 local_irq_restore(flags); \
97} while(0) 97} while(0)
98 98
99static inline void arch_read_unlock(raw_rwlock_t *rw) 99static inline void __arch_read_unlock(arch_rwlock_t *rw)
100{ 100{
101 register raw_rwlock_t *lp asm("g1"); 101 register arch_rwlock_t *lp asm("g1");
102 lp = rw; 102 lp = rw;
103 __asm__ __volatile__( 103 __asm__ __volatile__(
104 "mov %%o7, %%g4\n\t" 104 "mov %%o7, %%g4\n\t"
@@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw)
109 : "g2", "g4", "memory", "cc"); 109 : "g2", "g4", "memory", "cc");
110} 110}
111 111
112#define __raw_read_unlock(lock) \ 112#define arch_read_unlock(lock) \
113do { unsigned long flags; \ 113do { unsigned long flags; \
114 local_irq_save(flags); \ 114 local_irq_save(flags); \
115 arch_read_unlock(lock); \ 115 __arch_read_unlock(lock); \
116 local_irq_restore(flags); \ 116 local_irq_restore(flags); \
117} while(0) 117} while(0)
118 118
119static inline void __raw_write_lock(raw_rwlock_t *rw) 119static inline void arch_write_lock(arch_rwlock_t *rw)
120{ 120{
121 register raw_rwlock_t *lp asm("g1"); 121 register arch_rwlock_t *lp asm("g1");
122 lp = rw; 122 lp = rw;
123 __asm__ __volatile__( 123 __asm__ __volatile__(
124 "mov %%o7, %%g4\n\t" 124 "mov %%o7, %%g4\n\t"
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
130 *(volatile __u32 *)&lp->lock = ~0U; 130 *(volatile __u32 *)&lp->lock = ~0U;
131} 131}
132 132
133static inline int __raw_write_trylock(raw_rwlock_t *rw) 133static inline int arch_write_trylock(arch_rwlock_t *rw)
134{ 134{
135 unsigned int val; 135 unsigned int val;
136 136
@@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
150 return (val == 0); 150 return (val == 0);
151} 151}
152 152
153static inline int arch_read_trylock(raw_rwlock_t *rw) 153static inline int __arch_read_trylock(arch_rwlock_t *rw)
154{ 154{
155 register raw_rwlock_t *lp asm("g1"); 155 register arch_rwlock_t *lp asm("g1");
156 register int res asm("o0"); 156 register int res asm("o0");
157 lp = rw; 157 lp = rw;
158 __asm__ __volatile__( 158 __asm__ __volatile__(
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw)
165 return res; 165 return res;
166} 166}
167 167
168#define __raw_read_trylock(lock) \ 168#define arch_read_trylock(lock) \
169({ unsigned long flags; \ 169({ unsigned long flags; \
170 int res; \ 170 int res; \
171 local_irq_save(flags); \ 171 local_irq_save(flags); \
172 res = arch_read_trylock(lock); \ 172 res = __arch_read_trylock(lock); \
173 local_irq_restore(flags); \ 173 local_irq_restore(flags); \
174 res; \ 174 res; \
175}) 175})
176 176
177#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) 177#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
178 178
179#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 179#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
180#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 180#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
181#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) 181#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
182 182
183#define _raw_spin_relax(lock) cpu_relax() 183#define arch_spin_relax(lock) cpu_relax()
184#define _raw_read_relax(lock) cpu_relax() 184#define arch_read_relax(lock) cpu_relax()
185#define _raw_write_relax(lock) cpu_relax() 185#define arch_write_relax(lock) cpu_relax()
186 186
187#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) 187#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
188#define __raw_write_can_lock(rw) (!(rw)->lock) 188#define arch_write_can_lock(rw) (!(rw)->lock)
189 189
190#endif /* !(__ASSEMBLY__) */ 190#endif /* !(__ASSEMBLY__) */
191 191
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 43e514783582..073936a8b275 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -21,13 +21,13 @@
21 * the spinner sections must be pre-V9 branches. 21 * the spinner sections must be pre-V9 branches.
22 */ 22 */
23 23
24#define __raw_spin_is_locked(lp) ((lp)->lock != 0) 24#define arch_spin_is_locked(lp) ((lp)->lock != 0)
25 25
26#define __raw_spin_unlock_wait(lp) \ 26#define arch_spin_unlock_wait(lp) \
27 do { rmb(); \ 27 do { rmb(); \
28 } while((lp)->lock) 28 } while((lp)->lock)
29 29
30static inline void __raw_spin_lock(raw_spinlock_t *lock) 30static inline void arch_spin_lock(arch_spinlock_t *lock)
31{ 31{
32 unsigned long tmp; 32 unsigned long tmp;
33 33
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
46 : "memory"); 46 : "memory");
47} 47}
48 48
49static inline int __raw_spin_trylock(raw_spinlock_t *lock) 49static inline int arch_spin_trylock(arch_spinlock_t *lock)
50{ 50{
51 unsigned long result; 51 unsigned long result;
52 52
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
59 return (result == 0UL); 59 return (result == 0UL);
60} 60}
61 61
62static inline void __raw_spin_unlock(raw_spinlock_t *lock) 62static inline void arch_spin_unlock(arch_spinlock_t *lock)
63{ 63{
64 __asm__ __volatile__( 64 __asm__ __volatile__(
65" stb %%g0, [%0]" 65" stb %%g0, [%0]"
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
68 : "memory"); 68 : "memory");
69} 69}
70 70
71static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 71static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
72{ 72{
73 unsigned long tmp1, tmp2; 73 unsigned long tmp1, tmp2;
74 74
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
92 92
93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
94 94
95static void inline arch_read_lock(raw_rwlock_t *lock) 95static void inline arch_read_lock(arch_rwlock_t *lock)
96{ 96{
97 unsigned long tmp1, tmp2; 97 unsigned long tmp1, tmp2;
98 98
@@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
115 : "memory"); 115 : "memory");
116} 116}
117 117
118static int inline arch_read_trylock(raw_rwlock_t *lock) 118static int inline arch_read_trylock(arch_rwlock_t *lock)
119{ 119{
120 int tmp1, tmp2; 120 int tmp1, tmp2;
121 121
@@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
136 return tmp1; 136 return tmp1;
137} 137}
138 138
139static void inline arch_read_unlock(raw_rwlock_t *lock) 139static void inline arch_read_unlock(arch_rwlock_t *lock)
140{ 140{
141 unsigned long tmp1, tmp2; 141 unsigned long tmp1, tmp2;
142 142
@@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
152 : "memory"); 152 : "memory");
153} 153}
154 154
155static void inline arch_write_lock(raw_rwlock_t *lock) 155static void inline arch_write_lock(arch_rwlock_t *lock)
156{ 156{
157 unsigned long mask, tmp1, tmp2; 157 unsigned long mask, tmp1, tmp2;
158 158
@@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
177 : "memory"); 177 : "memory");
178} 178}
179 179
180static void inline arch_write_unlock(raw_rwlock_t *lock) 180static void inline arch_write_unlock(arch_rwlock_t *lock)
181{ 181{
182 __asm__ __volatile__( 182 __asm__ __volatile__(
183" stw %%g0, [%0]" 183" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
186 : "memory"); 186 : "memory");
187} 187}
188 188
189static int inline arch_write_trylock(raw_rwlock_t *lock) 189static int inline arch_write_trylock(arch_rwlock_t *lock)
190{ 190{
191 unsigned long mask, tmp1, tmp2, result; 191 unsigned long mask, tmp1, tmp2, result;
192 192
@@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock)
210 return result; 210 return result;
211} 211}
212 212
213#define __raw_read_lock(p) arch_read_lock(p) 213#define arch_read_lock(p) arch_read_lock(p)
214#define __raw_read_lock_flags(p, f) arch_read_lock(p) 214#define arch_read_lock_flags(p, f) arch_read_lock(p)
215#define __raw_read_trylock(p) arch_read_trylock(p) 215#define arch_read_trylock(p) arch_read_trylock(p)
216#define __raw_read_unlock(p) arch_read_unlock(p) 216#define arch_read_unlock(p) arch_read_unlock(p)
217#define __raw_write_lock(p) arch_write_lock(p) 217#define arch_write_lock(p) arch_write_lock(p)
218#define __raw_write_lock_flags(p, f) arch_write_lock(p) 218#define arch_write_lock_flags(p, f) arch_write_lock(p)
219#define __raw_write_unlock(p) arch_write_unlock(p) 219#define arch_write_unlock(p) arch_write_unlock(p)
220#define __raw_write_trylock(p) arch_write_trylock(p) 220#define arch_write_trylock(p) arch_write_trylock(p)
221 221
222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 222#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
223#define __raw_write_can_lock(rw) (!(rw)->lock) 223#define arch_write_can_lock(rw) (!(rw)->lock)
224 224
225#define _raw_spin_relax(lock) cpu_relax() 225#define arch_spin_relax(lock) cpu_relax()
226#define _raw_read_relax(lock) cpu_relax() 226#define arch_read_relax(lock) cpu_relax()
227#define _raw_write_relax(lock) cpu_relax() 227#define arch_write_relax(lock) cpu_relax()
228 228
229#endif /* !(__ASSEMBLY__) */ 229#endif /* !(__ASSEMBLY__) */
230 230
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index 37cbe01c585b..9c454fdeaad8 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned char lock; 9 volatile unsigned char lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h
index 6c5fddb7e6b5..edf196ee4ef8 100644
--- a/arch/sparc/include/asm/string_32.h
+++ b/arch/sparc/include/asm/string_32.h
@@ -16,8 +16,6 @@
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17 17
18extern void __memmove(void *,const void *,__kernel_size_t); 18extern void __memmove(void *,const void *,__kernel_size_t);
19extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
20extern __kernel_size_t __memset(void *,int,__kernel_size_t);
21 19
22#ifndef EXPORT_SYMTAB_STROPS 20#ifndef EXPORT_SYMTAB_STROPS
23 21
@@ -32,82 +30,10 @@ extern __kernel_size_t __memset(void *,int,__kernel_size_t);
32}) 30})
33 31
34#define __HAVE_ARCH_MEMCPY 32#define __HAVE_ARCH_MEMCPY
35 33#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
36static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
37{
38 extern void __copy_1page(void *, const void *);
39
40 if(n <= 32) {
41 __builtin_memcpy(to, from, n);
42 } else if (((unsigned int) to & 7) != 0) {
43 /* Destination is not aligned on the double-word boundary */
44 __memcpy(to, from, n);
45 } else {
46 switch(n) {
47 case PAGE_SIZE:
48 __copy_1page(to, from);
49 break;
50 default:
51 __memcpy(to, from, n);
52 break;
53 }
54 }
55 return to;
56}
57
58static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
59{
60 __memcpy(to, from, n);
61 return to;
62}
63
64#undef memcpy
65#define memcpy(t, f, n) \
66(__builtin_constant_p(n) ? \
67 __constant_memcpy((t),(f),(n)) : \
68 __nonconstant_memcpy((t),(f),(n)))
69 34
70#define __HAVE_ARCH_MEMSET 35#define __HAVE_ARCH_MEMSET
71 36#define memset(s, c, count) __builtin_memset(s, c, count)
72static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
73{
74 extern void bzero_1page(void *);
75 extern __kernel_size_t __bzero(void *, __kernel_size_t);
76
77 if(!c) {
78 if(count == PAGE_SIZE)
79 bzero_1page(s);
80 else
81 __bzero(s, count);
82 } else {
83 __memset(s, c, count);
84 }
85 return s;
86}
87
88static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
89{
90 extern __kernel_size_t __bzero(void *, __kernel_size_t);
91
92 if(!c)
93 __bzero(s, count);
94 else
95 __memset(s, c, count);
96 return s;
97}
98
99static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
100{
101 __memset(s, c, count);
102 return s;
103}
104
105#undef memset
106#define memset(s, c, count) \
107(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \
108 __constant_c_and_count_memset((s), (c), (count)) : \
109 __constant_c_memset((s), (c), (count))) \
110 : __nonconstant_memset((s), (c), (count)))
111 37
112#define __HAVE_ARCH_MEMSCAN 38#define __HAVE_ARCH_MEMSCAN
113 39
diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h
index 43161f2d17eb..9623bc213158 100644
--- a/arch/sparc/include/asm/string_64.h
+++ b/arch/sparc/include/asm/string_64.h
@@ -15,8 +15,6 @@
15 15
16#include <asm/asi.h> 16#include <asm/asi.h>
17 17
18extern void *__memset(void *,int,__kernel_size_t);
19
20#ifndef EXPORT_SYMTAB_STROPS 18#ifndef EXPORT_SYMTAB_STROPS
21 19
22/* First the mem*() things. */ 20/* First the mem*() things. */
@@ -24,29 +22,10 @@ extern void *__memset(void *,int,__kernel_size_t);
24extern void *memmove(void *, const void *, __kernel_size_t); 22extern void *memmove(void *, const void *, __kernel_size_t);
25 23
26#define __HAVE_ARCH_MEMCPY 24#define __HAVE_ARCH_MEMCPY
27extern void *memcpy(void *, const void *, __kernel_size_t); 25#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
28 26
29#define __HAVE_ARCH_MEMSET 27#define __HAVE_ARCH_MEMSET
30extern void *__builtin_memset(void *,int,__kernel_size_t); 28#define memset(s, c, count) __builtin_memset(s, c, count)
31
32static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
33{
34 extern __kernel_size_t __bzero(void *, __kernel_size_t);
35
36 if (!c) {
37 __bzero(s, count);
38 return s;
39 } else
40 return __memset(s, c, count);
41}
42
43#undef memset
44#define memset(s, c, count) \
45((__builtin_constant_p(count) && (count) <= 32) ? \
46 __builtin_memset((s), (c), (count)) : \
47 (__builtin_constant_p(c) ? \
48 __constant_memset((s), (c), (count)) : \
49 __memset((s), (c), (count))))
50 29
51#define __HAVE_ARCH_MEMSCAN 30#define __HAVE_ARCH_MEMSCAN
52 31
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 1b45a7bbe407..7257ebb8f394 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -227,6 +227,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
227/* flag bit 8 is available */ 227/* flag bit 8 is available */
228#define TIF_SECCOMP 9 /* secure computing */ 228#define TIF_SECCOMP 9 /* secure computing */
229#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ 229#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
230#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
230/* flag bit 11 is available */ 231/* flag bit 11 is available */
231/* NOTE: Thread flags >= 12 should be ones we have no interest 232/* NOTE: Thread flags >= 12 should be ones we have no interest
232 * in using in assembly, else we can't use the mask as 233 * in using in assembly, else we can't use the mask as
@@ -246,6 +247,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
246#define _TIF_32BIT (1<<TIF_32BIT) 247#define _TIF_32BIT (1<<TIF_32BIT)
247#define _TIF_SECCOMP (1<<TIF_SECCOMP) 248#define _TIF_SECCOMP (1<<TIF_SECCOMP)
248#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 249#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
250#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
249#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 251#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
250#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 252#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
251#define _TIF_FREEZE (1<<TIF_FREEZE) 253#define _TIF_FREEZE (1<<TIF_FREEZE)
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 8303ac481034..489d2ba92bcb 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -260,8 +260,23 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un
260 return __copy_user(to, (__force void __user *) from, n); 260 return __copy_user(to, (__force void __user *) from, n);
261} 261}
262 262
263extern void copy_from_user_overflow(void)
264#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
265 __compiletime_error("copy_from_user() buffer size is not provably correct")
266#else
267 __compiletime_warning("copy_from_user() buffer size is not provably correct")
268#endif
269;
270
263static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) 271static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
264{ 272{
273 int sz = __compiletime_object_size(to);
274
275 if (unlikely(sz != -1 && sz < n)) {
276 copy_from_user_overflow();
277 return -EFAULT;
278 }
279
265 if (n && __access_ok((unsigned long) from, n)) 280 if (n && __access_ok((unsigned long) from, n))
266 return __copy_user((__force void __user *) to, from, n); 281 return __copy_user((__force void __user *) to, from, n);
267 else 282 else
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 9ea271e19c70..dbc141660994 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#ifdef __KERNEL__ 8#ifdef __KERNEL__
9#include <linux/errno.h>
9#include <linux/compiler.h> 10#include <linux/compiler.h>
10#include <linux/string.h> 11#include <linux/string.h>
11#include <linux/thread_info.h> 12#include <linux/thread_info.h>
@@ -204,6 +205,14 @@ __asm__ __volatile__( \
204 205
205extern int __get_user_bad(void); 206extern int __get_user_bad(void);
206 207
208extern void copy_from_user_overflow(void)
209#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
210 __compiletime_error("copy_from_user() buffer size is not provably correct")
211#else
212 __compiletime_warning("copy_from_user() buffer size is not provably correct")
213#endif
214;
215
207extern unsigned long __must_check ___copy_from_user(void *to, 216extern unsigned long __must_check ___copy_from_user(void *to,
208 const void __user *from, 217 const void __user *from,
209 unsigned long size); 218 unsigned long size);
@@ -212,10 +221,16 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
212static inline unsigned long __must_check 221static inline unsigned long __must_check
213copy_from_user(void *to, const void __user *from, unsigned long size) 222copy_from_user(void *to, const void __user *from, unsigned long size)
214{ 223{
215 unsigned long ret = ___copy_from_user(to, from, size); 224 unsigned long ret = (unsigned long) -EFAULT;
216 225 int sz = __compiletime_object_size(to);
217 if (unlikely(ret)) 226
218 ret = copy_from_user_fixup(to, from, size); 227 if (likely(sz == -1 || sz >= size)) {
228 ret = ___copy_from_user(to, from, size);
229 if (unlikely(ret))
230 ret = copy_from_user_fixup(to, from, size);
231 } else {
232 copy_from_user_overflow();
233 }
219 return ret; 234 return ret;
220} 235}
221#define __copy_from_user copy_from_user 236#define __copy_from_user copy_from_user
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index d8d25bd97121..cb4b9bfd0d87 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -398,7 +398,7 @@
398#define __NR_perf_event_open 327 398#define __NR_perf_event_open 327
399#define __NR_recvmmsg 328 399#define __NR_recvmmsg 328
400 400
401#define NR_SYSCALLS 329 401#define NR_syscalls 329
402 402
403#ifdef __32bit_syscall_numbers__ 403#ifdef __32bit_syscall_numbers__
404/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 404/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,