diff options
Diffstat (limited to 'arch/sparc')
52 files changed, 633 insertions, 486 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index b6b442b0d793..0c7d365fa402 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -20,6 +20,7 @@ config SPARC | |||
20 | select HAVE_ARCH_TRACEHOOK | 20 | select HAVE_ARCH_TRACEHOOK |
21 | select SYSCTL_EXCEPTION_TRACE | 21 | select SYSCTL_EXCEPTION_TRACE |
22 | select ARCH_WANT_OPTIONAL_GPIOLIB | 22 | select ARCH_WANT_OPTIONAL_GPIOLIB |
23 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | ||
23 | select RTC_CLASS | 24 | select RTC_CLASS |
24 | select RTC_DRV_M48T59 | 25 | select RTC_DRV_M48T59 |
25 | select HAVE_IRQ_WORK | 26 | select HAVE_IRQ_WORK |
@@ -40,6 +41,8 @@ config SPARC | |||
40 | select GENERIC_STRNCPY_FROM_USER | 41 | select GENERIC_STRNCPY_FROM_USER |
41 | select GENERIC_STRNLEN_USER | 42 | select GENERIC_STRNLEN_USER |
42 | select MODULES_USE_ELF_RELA | 43 | select MODULES_USE_ELF_RELA |
44 | select GENERIC_KERNEL_THREAD | ||
45 | select GENERIC_KERNEL_EXECVE | ||
43 | 46 | ||
44 | config SPARC32 | 47 | config SPARC32 |
45 | def_bool !64BIT | 48 | def_bool !64BIT |
diff --git a/arch/sparc/crypto/Makefile b/arch/sparc/crypto/Makefile index 6ae1ad5e502b..5d469d81761f 100644 --- a/arch/sparc/crypto/Makefile +++ b/arch/sparc/crypto/Makefile | |||
@@ -13,13 +13,13 @@ obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o | |||
13 | 13 | ||
14 | obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o | 14 | obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o |
15 | 15 | ||
16 | sha1-sparc64-y := sha1_asm.o sha1_glue.o crop_devid.o | 16 | sha1-sparc64-y := sha1_asm.o sha1_glue.o |
17 | sha256-sparc64-y := sha256_asm.o sha256_glue.o crop_devid.o | 17 | sha256-sparc64-y := sha256_asm.o sha256_glue.o |
18 | sha512-sparc64-y := sha512_asm.o sha512_glue.o crop_devid.o | 18 | sha512-sparc64-y := sha512_asm.o sha512_glue.o |
19 | md5-sparc64-y := md5_asm.o md5_glue.o crop_devid.o | 19 | md5-sparc64-y := md5_asm.o md5_glue.o |
20 | 20 | ||
21 | aes-sparc64-y := aes_asm.o aes_glue.o crop_devid.o | 21 | aes-sparc64-y := aes_asm.o aes_glue.o |
22 | des-sparc64-y := des_asm.o des_glue.o crop_devid.o | 22 | des-sparc64-y := des_asm.o des_glue.o |
23 | camellia-sparc64-y := camellia_asm.o camellia_glue.o crop_devid.o | 23 | camellia-sparc64-y := camellia_asm.o camellia_glue.o |
24 | 24 | ||
25 | crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o crop_devid.o | 25 | crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o |
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index 8f1c9980f637..3965d1d36dfa 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c | |||
@@ -475,3 +475,5 @@ MODULE_LICENSE("GPL"); | |||
475 | MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated"); | 475 | MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated"); |
476 | 476 | ||
477 | MODULE_ALIAS("aes"); | 477 | MODULE_ALIAS("aes"); |
478 | |||
479 | #include "crop_devid.c" | ||
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c index 42905c084299..62c89af3fd3f 100644 --- a/arch/sparc/crypto/camellia_glue.c +++ b/arch/sparc/crypto/camellia_glue.c | |||
@@ -320,3 +320,5 @@ MODULE_LICENSE("GPL"); | |||
320 | MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); | 320 | MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); |
321 | 321 | ||
322 | MODULE_ALIAS("aes"); | 322 | MODULE_ALIAS("aes"); |
323 | |||
324 | #include "crop_devid.c" | ||
diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c index 0bd89cea8d8e..5162fad912ce 100644 --- a/arch/sparc/crypto/crc32c_glue.c +++ b/arch/sparc/crypto/crc32c_glue.c | |||
@@ -177,3 +177,5 @@ MODULE_LICENSE("GPL"); | |||
177 | MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); | 177 | MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); |
178 | 178 | ||
179 | MODULE_ALIAS("crc32c"); | 179 | MODULE_ALIAS("crc32c"); |
180 | |||
181 | #include "crop_devid.c" | ||
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index c4940c2d3073..41524cebcc49 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c | |||
@@ -527,3 +527,5 @@ MODULE_LICENSE("GPL"); | |||
527 | MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); | 527 | MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); |
528 | 528 | ||
529 | MODULE_ALIAS("des"); | 529 | MODULE_ALIAS("des"); |
530 | |||
531 | #include "crop_devid.c" | ||
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c index 603d723038ce..09a9ea1dfb69 100644 --- a/arch/sparc/crypto/md5_glue.c +++ b/arch/sparc/crypto/md5_glue.c | |||
@@ -186,3 +186,5 @@ MODULE_LICENSE("GPL"); | |||
186 | MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated"); | 186 | MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated"); |
187 | 187 | ||
188 | MODULE_ALIAS("md5"); | 188 | MODULE_ALIAS("md5"); |
189 | |||
190 | #include "crop_devid.c" | ||
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c index 2bbb20bee9f1..6cd5f29e1e0d 100644 --- a/arch/sparc/crypto/sha1_glue.c +++ b/arch/sparc/crypto/sha1_glue.c | |||
@@ -181,3 +181,5 @@ MODULE_LICENSE("GPL"); | |||
181 | MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated"); | 181 | MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated"); |
182 | 182 | ||
183 | MODULE_ALIAS("sha1"); | 183 | MODULE_ALIAS("sha1"); |
184 | |||
185 | #include "crop_devid.c" | ||
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c index 591e656bd891..04f555ab2680 100644 --- a/arch/sparc/crypto/sha256_glue.c +++ b/arch/sparc/crypto/sha256_glue.c | |||
@@ -239,3 +239,5 @@ MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 op | |||
239 | 239 | ||
240 | MODULE_ALIAS("sha224"); | 240 | MODULE_ALIAS("sha224"); |
241 | MODULE_ALIAS("sha256"); | 241 | MODULE_ALIAS("sha256"); |
242 | |||
243 | #include "crop_devid.c" | ||
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c index 486f0a2b7001..f04d1994d19a 100644 --- a/arch/sparc/crypto/sha512_glue.c +++ b/arch/sparc/crypto/sha512_glue.c | |||
@@ -224,3 +224,5 @@ MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 op | |||
224 | 224 | ||
225 | MODULE_ALIAS("sha384"); | 225 | MODULE_ALIAS("sha384"); |
226 | MODULE_ALIAS("sha512"); | 226 | MODULE_ALIAS("sha512"); |
227 | |||
228 | #include "crop_devid.c" | ||
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index ce35a1cf1a20..be56a244c9cf 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* atomic.h: Thankfully the V9 is at least reasonable for this | 1 | /* atomic.h: Thankfully the V9 is at least reasonable for this |
2 | * stuff. | 2 | * stuff. |
3 | * | 3 | * |
4 | * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com) | 4 | * Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com) |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #ifndef __ARCH_SPARC64_ATOMIC__ | 7 | #ifndef __ARCH_SPARC64_ATOMIC__ |
@@ -106,6 +106,8 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) | |||
106 | 106 | ||
107 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 107 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
108 | 108 | ||
109 | extern long atomic64_dec_if_positive(atomic64_t *v); | ||
110 | |||
109 | /* Atomic operations are already serializing */ | 111 | /* Atomic operations are already serializing */ |
110 | #define smp_mb__before_atomic_dec() barrier() | 112 | #define smp_mb__before_atomic_dec() barrier() |
111 | #define smp_mb__after_atomic_dec() barrier() | 113 | #define smp_mb__after_atomic_dec() barrier() |
diff --git a/arch/sparc/include/asm/backoff.h b/arch/sparc/include/asm/backoff.h index db3af0d30fb1..4e02086b839c 100644 --- a/arch/sparc/include/asm/backoff.h +++ b/arch/sparc/include/asm/backoff.h | |||
@@ -1,6 +1,46 @@ | |||
1 | #ifndef _SPARC64_BACKOFF_H | 1 | #ifndef _SPARC64_BACKOFF_H |
2 | #define _SPARC64_BACKOFF_H | 2 | #define _SPARC64_BACKOFF_H |
3 | 3 | ||
4 | /* The macros in this file implement an exponential backoff facility | ||
5 | * for atomic operations. | ||
6 | * | ||
7 | * When multiple threads compete on an atomic operation, it is | ||
8 | * possible for one thread to be continually denied a successful | ||
9 | * completion of the compare-and-swap instruction. Heavily | ||
10 | * threaded cpu implementations like Niagara can compound this | ||
11 | * problem even further. | ||
12 | * | ||
13 | * When an atomic operation fails and needs to be retried, we spin a | ||
14 | * certain number of times. At each subsequent failure of the same | ||
15 | * operation we double the spin count, realizing an exponential | ||
16 | * backoff. | ||
17 | * | ||
18 | * When we spin, we try to use an operation that will cause the | ||
19 | * current cpu strand to block, and therefore make the core fully | ||
20 | * available to any other other runnable strands. There are two | ||
21 | * options, based upon cpu capabilities. | ||
22 | * | ||
23 | * On all cpus prior to SPARC-T4 we do three dummy reads of the | ||
24 | * condition code register. Each read blocks the strand for something | ||
25 | * between 40 and 50 cpu cycles. | ||
26 | * | ||
27 | * For SPARC-T4 and later we have a special "pause" instruction | ||
28 | * available. This is implemented using writes to register %asr27. | ||
29 | * The cpu will block the number of cycles written into the register, | ||
30 | * unless a disrupting trap happens first. SPARC-T4 specifically | ||
31 | * implements pause with a granularity of 8 cycles. Each strand has | ||
32 | * an internal pause counter which decrements every 8 cycles. So the | ||
33 | * chip shifts the %asr27 value down by 3 bits, and writes the result | ||
34 | * into the pause counter. If a value smaller than 8 is written, the | ||
35 | * chip blocks for 1 cycle. | ||
36 | * | ||
37 | * To achieve the same amount of backoff as the three %ccr reads give | ||
38 | * on earlier chips, we shift the backoff value up by 7 bits. (Three | ||
39 | * %ccr reads block for about 128 cycles, 1 << 7 == 128) We write the | ||
40 | * whole amount we want to block into the pause register, rather than | ||
41 | * loop writing 128 each time. | ||
42 | */ | ||
43 | |||
4 | #define BACKOFF_LIMIT (4 * 1024) | 44 | #define BACKOFF_LIMIT (4 * 1024) |
5 | 45 | ||
6 | #ifdef CONFIG_SMP | 46 | #ifdef CONFIG_SMP |
@@ -11,16 +51,25 @@ | |||
11 | #define BACKOFF_LABEL(spin_label, continue_label) \ | 51 | #define BACKOFF_LABEL(spin_label, continue_label) \ |
12 | spin_label | 52 | spin_label |
13 | 53 | ||
14 | #define BACKOFF_SPIN(reg, tmp, label) \ | 54 | #define BACKOFF_SPIN(reg, tmp, label) \ |
15 | mov reg, tmp; \ | 55 | mov reg, tmp; \ |
16 | 88: brnz,pt tmp, 88b; \ | 56 | 88: rd %ccr, %g0; \ |
17 | sub tmp, 1, tmp; \ | 57 | rd %ccr, %g0; \ |
18 | set BACKOFF_LIMIT, tmp; \ | 58 | rd %ccr, %g0; \ |
19 | cmp reg, tmp; \ | 59 | .section .pause_3insn_patch,"ax";\ |
20 | bg,pn %xcc, label; \ | 60 | .word 88b; \ |
21 | nop; \ | 61 | sllx tmp, 7, tmp; \ |
22 | ba,pt %xcc, label; \ | 62 | wr tmp, 0, %asr27; \ |
23 | sllx reg, 1, reg; | 63 | clr tmp; \ |
64 | .previous; \ | ||
65 | brnz,pt tmp, 88b; \ | ||
66 | sub tmp, 1, tmp; \ | ||
67 | set BACKOFF_LIMIT, tmp; \ | ||
68 | cmp reg, tmp; \ | ||
69 | bg,pn %xcc, label; \ | ||
70 | nop; \ | ||
71 | ba,pt %xcc, label; \ | ||
72 | sllx reg, 1, reg; | ||
24 | 73 | ||
25 | #else | 74 | #else |
26 | 75 | ||
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index cef99fbc0a21..830502fe62b4 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h | |||
@@ -232,9 +232,10 @@ static inline void __user *arch_compat_alloc_user_space(long len) | |||
232 | struct pt_regs *regs = current_thread_info()->kregs; | 232 | struct pt_regs *regs = current_thread_info()->kregs; |
233 | unsigned long usp = regs->u_regs[UREG_I6]; | 233 | unsigned long usp = regs->u_regs[UREG_I6]; |
234 | 234 | ||
235 | if (!(test_thread_flag(TIF_32BIT))) | 235 | if (test_thread_64bit_stack(usp)) |
236 | usp += STACK_BIAS; | 236 | usp += STACK_BIAS; |
237 | else | 237 | |
238 | if (test_thread_flag(TIF_32BIT)) | ||
238 | usp &= 0xffffffffUL; | 239 | usp &= 0xffffffffUL; |
239 | 240 | ||
240 | usp -= len; | 241 | usp -= len; |
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index f74ac9ee33a8..c1e01914fd98 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h | |||
@@ -106,7 +106,6 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc, | |||
106 | 106 | ||
107 | /* Free all resources held by a thread. */ | 107 | /* Free all resources held by a thread. */ |
108 | #define release_thread(tsk) do { } while(0) | 108 | #define release_thread(tsk) do { } while(0) |
109 | extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | ||
110 | 109 | ||
111 | extern unsigned long get_wchan(struct task_struct *); | 110 | extern unsigned long get_wchan(struct task_struct *); |
112 | 111 | ||
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index 4e5a483122a0..cce72ce4c334 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h | |||
@@ -94,6 +94,7 @@ struct thread_struct { | |||
94 | #ifndef __ASSEMBLY__ | 94 | #ifndef __ASSEMBLY__ |
95 | 95 | ||
96 | #include <linux/types.h> | 96 | #include <linux/types.h> |
97 | #include <asm/fpumacro.h> | ||
97 | 98 | ||
98 | /* Return saved PC of a blocked thread. */ | 99 | /* Return saved PC of a blocked thread. */ |
99 | struct task_struct; | 100 | struct task_struct; |
@@ -143,6 +144,10 @@ do { \ | |||
143 | : \ | 144 | : \ |
144 | : "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \ | 145 | : "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \ |
145 | "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \ | 146 | "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \ |
147 | fprs_write(0); \ | ||
148 | current_thread_info()->xfsr[0] = 0; \ | ||
149 | current_thread_info()->fpsaved[0] = 0; \ | ||
150 | regs->tstate &= ~TSTATE_PEF; \ | ||
146 | } while (0) | 151 | } while (0) |
147 | 152 | ||
148 | #define start_thread32(regs, pc, sp) \ | 153 | #define start_thread32(regs, pc, sp) \ |
@@ -183,20 +188,37 @@ do { \ | |||
183 | : \ | 188 | : \ |
184 | : "r" (regs), "r" (sp - sizeof(struct reg_window32)), \ | 189 | : "r" (regs), "r" (sp - sizeof(struct reg_window32)), \ |
185 | "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \ | 190 | "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \ |
191 | fprs_write(0); \ | ||
192 | current_thread_info()->xfsr[0] = 0; \ | ||
193 | current_thread_info()->fpsaved[0] = 0; \ | ||
194 | regs->tstate &= ~TSTATE_PEF; \ | ||
186 | } while (0) | 195 | } while (0) |
187 | 196 | ||
188 | /* Free all resources held by a thread. */ | 197 | /* Free all resources held by a thread. */ |
189 | #define release_thread(tsk) do { } while (0) | 198 | #define release_thread(tsk) do { } while (0) |
190 | 199 | ||
191 | extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | ||
192 | |||
193 | extern unsigned long get_wchan(struct task_struct *task); | 200 | extern unsigned long get_wchan(struct task_struct *task); |
194 | 201 | ||
195 | #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs) | 202 | #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs) |
196 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) | 203 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) |
197 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) | 204 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) |
198 | 205 | ||
199 | #define cpu_relax() barrier() | 206 | /* Please see the commentary in asm/backoff.h for a description of |
207 | * what these instructions are doing and how they have been choosen. | ||
208 | * To make a long story short, we are trying to yield the current cpu | ||
209 | * strand during busy loops. | ||
210 | */ | ||
211 | #define cpu_relax() asm volatile("\n99:\n\t" \ | ||
212 | "rd %%ccr, %%g0\n\t" \ | ||
213 | "rd %%ccr, %%g0\n\t" \ | ||
214 | "rd %%ccr, %%g0\n\t" \ | ||
215 | ".section .pause_3insn_patch,\"ax\"\n\t"\ | ||
216 | ".word 99b\n\t" \ | ||
217 | "wr %%g0, 128, %%asr27\n\t" \ | ||
218 | "nop\n\t" \ | ||
219 | "nop\n\t" \ | ||
220 | ".previous" \ | ||
221 | ::: "memory") | ||
200 | 222 | ||
201 | /* Prefetch support. This is tuned for UltraSPARC-III and later. | 223 | /* Prefetch support. This is tuned for UltraSPARC-III and later. |
202 | * UltraSPARC-I will treat these as nops, and UltraSPARC-II has | 224 | * UltraSPARC-I will treat these as nops, and UltraSPARC-II has |
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index c28765110706..f93003123bce 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h | |||
@@ -63,5 +63,10 @@ extern char *of_console_options; | |||
63 | extern void irq_trans_init(struct device_node *dp); | 63 | extern void irq_trans_init(struct device_node *dp); |
64 | extern char *build_path_component(struct device_node *dp); | 64 | extern char *build_path_component(struct device_node *dp); |
65 | 65 | ||
66 | /* SPARC has a local implementation */ | ||
67 | extern int of_address_to_resource(struct device_node *dev, int index, | ||
68 | struct resource *r); | ||
69 | #define of_address_to_resource of_address_to_resource | ||
70 | |||
66 | #endif /* __KERNEL__ */ | 71 | #endif /* __KERNEL__ */ |
67 | #endif /* _SPARC_PROM_H */ | 72 | #endif /* _SPARC_PROM_H */ |
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h index 0c6f6b068289..bdfafd7af46f 100644 --- a/arch/sparc/include/asm/ptrace.h +++ b/arch/sparc/include/asm/ptrace.h | |||
@@ -32,6 +32,9 @@ static inline bool pt_regs_clear_syscall(struct pt_regs *regs) | |||
32 | #define arch_ptrace_stop(exit_code, info) \ | 32 | #define arch_ptrace_stop(exit_code, info) \ |
33 | synchronize_user_stack() | 33 | synchronize_user_stack() |
34 | 34 | ||
35 | #define current_pt_regs() \ | ||
36 | ((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE) - 1) | ||
37 | |||
35 | struct global_reg_snapshot { | 38 | struct global_reg_snapshot { |
36 | unsigned long tstate; | 39 | unsigned long tstate; |
37 | unsigned long tpc; | 40 | unsigned long tpc; |
@@ -42,11 +45,20 @@ struct global_reg_snapshot { | |||
42 | struct thread_info *thread; | 45 | struct thread_info *thread; |
43 | unsigned long pad1; | 46 | unsigned long pad1; |
44 | }; | 47 | }; |
45 | extern struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; | ||
46 | 48 | ||
47 | #define force_successful_syscall_return() \ | 49 | struct global_pmu_snapshot { |
48 | do { current_thread_info()->syscall_noerror = 1; \ | 50 | unsigned long pcr[4]; |
49 | } while (0) | 51 | unsigned long pic[4]; |
52 | }; | ||
53 | |||
54 | union global_cpu_snapshot { | ||
55 | struct global_reg_snapshot reg; | ||
56 | struct global_pmu_snapshot pmu; | ||
57 | }; | ||
58 | |||
59 | extern union global_cpu_snapshot global_cpu_snapshot[NR_CPUS]; | ||
60 | |||
61 | #define force_successful_syscall_return() set_thread_noerror(1) | ||
50 | #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) | 62 | #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) |
51 | #define instruction_pointer(regs) ((regs)->tpc) | 63 | #define instruction_pointer(regs) ((regs)->tpc) |
52 | #define instruction_pointer_set(regs, val) ((regs)->tpc = (val)) | 64 | #define instruction_pointer_set(regs, val) ((regs)->tpc = (val)) |
@@ -89,6 +101,9 @@ static inline bool pt_regs_clear_syscall(struct pt_regs *regs) | |||
89 | #define arch_ptrace_stop(exit_code, info) \ | 101 | #define arch_ptrace_stop(exit_code, info) \ |
90 | synchronize_user_stack() | 102 | synchronize_user_stack() |
91 | 103 | ||
104 | #define current_pt_regs() \ | ||
105 | ((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE) - 1) | ||
106 | |||
92 | #define user_mode(regs) (!((regs)->psr & PSR_PS)) | 107 | #define user_mode(regs) (!((regs)->psr & PSR_PS)) |
93 | #define instruction_pointer(regs) ((regs)->pc) | 108 | #define instruction_pointer(regs) ((regs)->pc) |
94 | #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP]) | 109 | #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP]) |
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h index 29862a9e9065..dd3bef4b9896 100644 --- a/arch/sparc/include/asm/smp_64.h +++ b/arch/sparc/include/asm/smp_64.h | |||
@@ -48,6 +48,7 @@ extern void smp_fill_in_sib_core_maps(void); | |||
48 | extern void cpu_play_dead(void); | 48 | extern void cpu_play_dead(void); |
49 | 49 | ||
50 | extern void smp_fetch_global_regs(void); | 50 | extern void smp_fetch_global_regs(void); |
51 | extern void smp_fetch_global_pmu(void); | ||
51 | 52 | ||
52 | struct seq_file; | 53 | struct seq_file; |
53 | void smp_bogo(struct seq_file *); | 54 | void smp_bogo(struct seq_file *); |
@@ -65,6 +66,7 @@ extern void __cpu_die(unsigned int cpu); | |||
65 | #define hard_smp_processor_id() 0 | 66 | #define hard_smp_processor_id() 0 |
66 | #define smp_fill_in_sib_core_maps() do { } while (0) | 67 | #define smp_fill_in_sib_core_maps() do { } while (0) |
67 | #define smp_fetch_global_regs() do { } while (0) | 68 | #define smp_fetch_global_regs() do { } while (0) |
69 | #define smp_fetch_global_pmu() do { } while (0) | ||
68 | 70 | ||
69 | #endif /* !(CONFIG_SMP) */ | 71 | #endif /* !(CONFIG_SMP) */ |
70 | 72 | ||
diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index 7923c4a2be38..cad36f56fa03 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h | |||
@@ -23,7 +23,7 @@ do { flush_tlb_pending(); \ | |||
23 | /* If you are tempted to conditionalize the following */ \ | 23 | /* If you are tempted to conditionalize the following */ \ |
24 | /* so that ASI is only written if it changes, think again. */ \ | 24 | /* so that ASI is only written if it changes, think again. */ \ |
25 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ | 25 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ |
26 | : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ | 26 | : : "r" (task_thread_info(next)->current_ds));\ |
27 | trap_block[current_thread_info()->cpu].thread = \ | 27 | trap_block[current_thread_info()->cpu].thread = \ |
28 | task_thread_info(next); \ | 28 | task_thread_info(next); \ |
29 | __asm__ __volatile__( \ | 29 | __asm__ __volatile__( \ |
diff --git a/arch/sparc/include/asm/syscalls.h b/arch/sparc/include/asm/syscalls.h index 45a43f637a14..bf8972adea17 100644 --- a/arch/sparc/include/asm/syscalls.h +++ b/arch/sparc/include/asm/syscalls.h | |||
@@ -8,6 +8,4 @@ extern asmlinkage long sparc_do_fork(unsigned long clone_flags, | |||
8 | struct pt_regs *regs, | 8 | struct pt_regs *regs, |
9 | unsigned long stack_size); | 9 | unsigned long stack_size); |
10 | 10 | ||
11 | extern asmlinkage int sparc_execve(struct pt_regs *regs); | ||
12 | |||
13 | #endif /* _SPARC64_SYSCALLS_H */ | 11 | #endif /* _SPARC64_SYSCALLS_H */ |
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 4e2276631081..269bd92313df 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h | |||
@@ -14,12 +14,12 @@ | |||
14 | #define TI_FLAG_FAULT_CODE_SHIFT 56 | 14 | #define TI_FLAG_FAULT_CODE_SHIFT 56 |
15 | #define TI_FLAG_BYTE_WSTATE 1 | 15 | #define TI_FLAG_BYTE_WSTATE 1 |
16 | #define TI_FLAG_WSTATE_SHIFT 48 | 16 | #define TI_FLAG_WSTATE_SHIFT 48 |
17 | #define TI_FLAG_BYTE_CWP 2 | 17 | #define TI_FLAG_BYTE_NOERROR 2 |
18 | #define TI_FLAG_CWP_SHIFT 40 | 18 | #define TI_FLAG_BYTE_NOERROR_SHIFT 40 |
19 | #define TI_FLAG_BYTE_CURRENT_DS 3 | 19 | #define TI_FLAG_BYTE_FPDEPTH 3 |
20 | #define TI_FLAG_CURRENT_DS_SHIFT 32 | 20 | #define TI_FLAG_FPDEPTH_SHIFT 32 |
21 | #define TI_FLAG_BYTE_FPDEPTH 4 | 21 | #define TI_FLAG_BYTE_CWP 4 |
22 | #define TI_FLAG_FPDEPTH_SHIFT 24 | 22 | #define TI_FLAG_CWP_SHIFT 24 |
23 | #define TI_FLAG_BYTE_WSAVED 5 | 23 | #define TI_FLAG_BYTE_WSAVED 5 |
24 | #define TI_FLAG_WSAVED_SHIFT 16 | 24 | #define TI_FLAG_WSAVED_SHIFT 16 |
25 | 25 | ||
@@ -47,7 +47,7 @@ struct thread_info { | |||
47 | struct exec_domain *exec_domain; | 47 | struct exec_domain *exec_domain; |
48 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 48 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
49 | __u8 new_child; | 49 | __u8 new_child; |
50 | __u8 syscall_noerror; | 50 | __u8 current_ds; |
51 | __u16 cpu; | 51 | __u16 cpu; |
52 | 52 | ||
53 | unsigned long *utraps; | 53 | unsigned long *utraps; |
@@ -74,9 +74,9 @@ struct thread_info { | |||
74 | #define TI_FAULT_CODE (TI_FLAGS + TI_FLAG_BYTE_FAULT_CODE) | 74 | #define TI_FAULT_CODE (TI_FLAGS + TI_FLAG_BYTE_FAULT_CODE) |
75 | #define TI_WSTATE (TI_FLAGS + TI_FLAG_BYTE_WSTATE) | 75 | #define TI_WSTATE (TI_FLAGS + TI_FLAG_BYTE_WSTATE) |
76 | #define TI_CWP (TI_FLAGS + TI_FLAG_BYTE_CWP) | 76 | #define TI_CWP (TI_FLAGS + TI_FLAG_BYTE_CWP) |
77 | #define TI_CURRENT_DS (TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS) | ||
78 | #define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH) | 77 | #define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH) |
79 | #define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED) | 78 | #define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED) |
79 | #define TI_SYS_NOERROR (TI_FLAGS + TI_FLAG_BYTE_NOERROR) | ||
80 | #define TI_FPSAVED 0x00000010 | 80 | #define TI_FPSAVED 0x00000010 |
81 | #define TI_KSP 0x00000018 | 81 | #define TI_KSP 0x00000018 |
82 | #define TI_FAULT_ADDR 0x00000020 | 82 | #define TI_FAULT_ADDR 0x00000020 |
@@ -84,7 +84,7 @@ struct thread_info { | |||
84 | #define TI_EXEC_DOMAIN 0x00000030 | 84 | #define TI_EXEC_DOMAIN 0x00000030 |
85 | #define TI_PRE_COUNT 0x00000038 | 85 | #define TI_PRE_COUNT 0x00000038 |
86 | #define TI_NEW_CHILD 0x0000003c | 86 | #define TI_NEW_CHILD 0x0000003c |
87 | #define TI_SYS_NOERROR 0x0000003d | 87 | #define TI_CURRENT_DS 0x0000003d |
88 | #define TI_CPU 0x0000003e | 88 | #define TI_CPU 0x0000003e |
89 | #define TI_UTRAPS 0x00000040 | 89 | #define TI_UTRAPS 0x00000040 |
90 | #define TI_REG_WINDOW 0x00000048 | 90 | #define TI_REG_WINDOW 0x00000048 |
@@ -121,7 +121,7 @@ struct thread_info { | |||
121 | #define INIT_THREAD_INFO(tsk) \ | 121 | #define INIT_THREAD_INFO(tsk) \ |
122 | { \ | 122 | { \ |
123 | .task = &tsk, \ | 123 | .task = &tsk, \ |
124 | .flags = ((unsigned long)ASI_P) << TI_FLAG_CURRENT_DS_SHIFT, \ | 124 | .current_ds = ASI_P, \ |
125 | .exec_domain = &default_exec_domain, \ | 125 | .exec_domain = &default_exec_domain, \ |
126 | .preempt_count = INIT_PREEMPT_COUNT, \ | 126 | .preempt_count = INIT_PREEMPT_COUNT, \ |
127 | .restart_block = { \ | 127 | .restart_block = { \ |
@@ -153,13 +153,12 @@ register struct thread_info *current_thread_info_reg asm("g6"); | |||
153 | #define set_thread_wstate(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE] = (val)) | 153 | #define set_thread_wstate(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE] = (val)) |
154 | #define get_thread_cwp() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP]) | 154 | #define get_thread_cwp() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP]) |
155 | #define set_thread_cwp(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP] = (val)) | 155 | #define set_thread_cwp(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP] = (val)) |
156 | #define get_thread_current_ds() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS]) | 156 | #define get_thread_noerror() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_NOERROR]) |
157 | #define set_thread_current_ds(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS] = (val)) | 157 | #define set_thread_noerror(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_NOERROR] = (val)) |
158 | #define get_thread_fpdepth() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH]) | 158 | #define get_thread_fpdepth() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH]) |
159 | #define set_thread_fpdepth(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH] = (val)) | 159 | #define set_thread_fpdepth(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH] = (val)) |
160 | #define get_thread_wsaved() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED]) | 160 | #define get_thread_wsaved() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED]) |
161 | #define set_thread_wsaved(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED] = (val)) | 161 | #define set_thread_wsaved(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED] = (val)) |
162 | |||
163 | #endif /* !(__ASSEMBLY__) */ | 162 | #endif /* !(__ASSEMBLY__) */ |
164 | 163 | ||
165 | /* | 164 | /* |
@@ -259,6 +258,11 @@ static inline bool test_and_clear_restore_sigmask(void) | |||
259 | 258 | ||
260 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | 259 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) |
261 | 260 | ||
261 | #define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0) | ||
262 | #define test_thread_64bit_stack(__SP) \ | ||
263 | ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \ | ||
264 | false : true) | ||
265 | |||
262 | #endif /* !__ASSEMBLY__ */ | 266 | #endif /* !__ASSEMBLY__ */ |
263 | 267 | ||
264 | #endif /* __KERNEL__ */ | 268 | #endif /* __KERNEL__ */ |
diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h index 48f2807d3265..71b5a67522ab 100644 --- a/arch/sparc/include/asm/ttable.h +++ b/arch/sparc/include/asm/ttable.h | |||
@@ -372,7 +372,9 @@ etrap_spill_fixup_64bit: \ | |||
372 | 372 | ||
373 | /* Normal 32bit spill */ | 373 | /* Normal 32bit spill */ |
374 | #define SPILL_2_GENERIC(ASI) \ | 374 | #define SPILL_2_GENERIC(ASI) \ |
375 | srl %sp, 0, %sp; \ | 375 | and %sp, 1, %g3; \ |
376 | brnz,pn %g3, (. - (128 + 4)); \ | ||
377 | srl %sp, 0, %sp; \ | ||
376 | stwa %l0, [%sp + %g0] ASI; \ | 378 | stwa %l0, [%sp + %g0] ASI; \ |
377 | mov 0x04, %g3; \ | 379 | mov 0x04, %g3; \ |
378 | stwa %l1, [%sp + %g3] ASI; \ | 380 | stwa %l1, [%sp + %g3] ASI; \ |
@@ -398,14 +400,16 @@ etrap_spill_fixup_64bit: \ | |||
398 | stwa %i6, [%g1 + %g0] ASI; \ | 400 | stwa %i6, [%g1 + %g0] ASI; \ |
399 | stwa %i7, [%g1 + %g3] ASI; \ | 401 | stwa %i7, [%g1 + %g3] ASI; \ |
400 | saved; \ | 402 | saved; \ |
401 | retry; nop; nop; \ | 403 | retry; \ |
402 | b,a,pt %xcc, spill_fixup_dax; \ | 404 | b,a,pt %xcc, spill_fixup_dax; \ |
403 | b,a,pt %xcc, spill_fixup_mna; \ | 405 | b,a,pt %xcc, spill_fixup_mna; \ |
404 | b,a,pt %xcc, spill_fixup; | 406 | b,a,pt %xcc, spill_fixup; |
405 | 407 | ||
406 | #define SPILL_2_GENERIC_ETRAP \ | 408 | #define SPILL_2_GENERIC_ETRAP \ |
407 | etrap_user_spill_32bit: \ | 409 | etrap_user_spill_32bit: \ |
408 | srl %sp, 0, %sp; \ | 410 | and %sp, 1, %g3; \ |
411 | brnz,pn %g3, etrap_user_spill_64bit; \ | ||
412 | srl %sp, 0, %sp; \ | ||
409 | stwa %l0, [%sp + 0x00] %asi; \ | 413 | stwa %l0, [%sp + 0x00] %asi; \ |
410 | stwa %l1, [%sp + 0x04] %asi; \ | 414 | stwa %l1, [%sp + 0x04] %asi; \ |
411 | stwa %l2, [%sp + 0x08] %asi; \ | 415 | stwa %l2, [%sp + 0x08] %asi; \ |
@@ -427,7 +431,7 @@ etrap_user_spill_32bit: \ | |||
427 | ba,pt %xcc, etrap_save; \ | 431 | ba,pt %xcc, etrap_save; \ |
428 | wrpr %g1, %cwp; \ | 432 | wrpr %g1, %cwp; \ |
429 | nop; nop; nop; nop; \ | 433 | nop; nop; nop; nop; \ |
430 | nop; nop; nop; nop; \ | 434 | nop; nop; \ |
431 | ba,a,pt %xcc, etrap_spill_fixup_32bit; \ | 435 | ba,a,pt %xcc, etrap_spill_fixup_32bit; \ |
432 | ba,a,pt %xcc, etrap_spill_fixup_32bit; \ | 436 | ba,a,pt %xcc, etrap_spill_fixup_32bit; \ |
433 | ba,a,pt %xcc, etrap_spill_fixup_32bit; | 437 | ba,a,pt %xcc, etrap_spill_fixup_32bit; |
@@ -592,7 +596,9 @@ user_rtt_fill_64bit: \ | |||
592 | 596 | ||
593 | /* Normal 32bit fill */ | 597 | /* Normal 32bit fill */ |
594 | #define FILL_2_GENERIC(ASI) \ | 598 | #define FILL_2_GENERIC(ASI) \ |
595 | srl %sp, 0, %sp; \ | 599 | and %sp, 1, %g3; \ |
600 | brnz,pn %g3, (. - (128 + 4)); \ | ||
601 | srl %sp, 0, %sp; \ | ||
596 | lduwa [%sp + %g0] ASI, %l0; \ | 602 | lduwa [%sp + %g0] ASI, %l0; \ |
597 | mov 0x04, %g2; \ | 603 | mov 0x04, %g2; \ |
598 | mov 0x08, %g3; \ | 604 | mov 0x08, %g3; \ |
@@ -616,14 +622,16 @@ user_rtt_fill_64bit: \ | |||
616 | lduwa [%g1 + %g3] ASI, %i6; \ | 622 | lduwa [%g1 + %g3] ASI, %i6; \ |
617 | lduwa [%g1 + %g5] ASI, %i7; \ | 623 | lduwa [%g1 + %g5] ASI, %i7; \ |
618 | restored; \ | 624 | restored; \ |
619 | retry; nop; nop; nop; nop; \ | 625 | retry; nop; nop; \ |
620 | b,a,pt %xcc, fill_fixup_dax; \ | 626 | b,a,pt %xcc, fill_fixup_dax; \ |
621 | b,a,pt %xcc, fill_fixup_mna; \ | 627 | b,a,pt %xcc, fill_fixup_mna; \ |
622 | b,a,pt %xcc, fill_fixup; | 628 | b,a,pt %xcc, fill_fixup; |
623 | 629 | ||
624 | #define FILL_2_GENERIC_RTRAP \ | 630 | #define FILL_2_GENERIC_RTRAP \ |
625 | user_rtt_fill_32bit: \ | 631 | user_rtt_fill_32bit: \ |
626 | srl %sp, 0, %sp; \ | 632 | and %sp, 1, %g3; \ |
633 | brnz,pn %g3, user_rtt_fill_64bit; \ | ||
634 | srl %sp, 0, %sp; \ | ||
627 | lduwa [%sp + 0x00] %asi, %l0; \ | 635 | lduwa [%sp + 0x00] %asi, %l0; \ |
628 | lduwa [%sp + 0x04] %asi, %l1; \ | 636 | lduwa [%sp + 0x04] %asi, %l1; \ |
629 | lduwa [%sp + 0x08] %asi, %l2; \ | 637 | lduwa [%sp + 0x08] %asi, %l2; \ |
@@ -643,7 +651,7 @@ user_rtt_fill_32bit: \ | |||
643 | ba,pt %xcc, user_rtt_pre_restore; \ | 651 | ba,pt %xcc, user_rtt_pre_restore; \ |
644 | restored; \ | 652 | restored; \ |
645 | nop; nop; nop; nop; nop; \ | 653 | nop; nop; nop; nop; nop; \ |
646 | nop; nop; nop; nop; nop; \ | 654 | nop; nop; nop; \ |
647 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | 655 | ba,a,pt %xcc, user_rtt_fill_fixup; \ |
648 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | 656 | ba,a,pt %xcc, user_rtt_fill_fixup; \ |
649 | ba,a,pt %xcc, user_rtt_fill_fixup; | 657 | ba,a,pt %xcc, user_rtt_fill_fixup; |
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 73083e1d38d9..e562d3caee57 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h | |||
@@ -38,14 +38,14 @@ | |||
38 | #define VERIFY_READ 0 | 38 | #define VERIFY_READ 0 |
39 | #define VERIFY_WRITE 1 | 39 | #define VERIFY_WRITE 1 |
40 | 40 | ||
41 | #define get_fs() ((mm_segment_t) { get_thread_current_ds() }) | 41 | #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) |
42 | #define get_ds() (KERNEL_DS) | 42 | #define get_ds() (KERNEL_DS) |
43 | 43 | ||
44 | #define segment_eq(a,b) ((a).seg == (b).seg) | 44 | #define segment_eq(a,b) ((a).seg == (b).seg) |
45 | 45 | ||
46 | #define set_fs(val) \ | 46 | #define set_fs(val) \ |
47 | do { \ | 47 | do { \ |
48 | set_thread_current_ds((val).seg); \ | 48 | current_thread_info()->current_ds =(val).seg; \ |
49 | __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ | 49 | __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ |
50 | } while(0) | 50 | } while(0) |
51 | 51 | ||
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index 0ecea6ed943e..c3e5d8b64171 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h | |||
@@ -46,6 +46,7 @@ | |||
46 | #define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND | 46 | #define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND |
47 | #define __ARCH_WANT_COMPAT_SYS_SENDFILE | 47 | #define __ARCH_WANT_COMPAT_SYS_SENDFILE |
48 | #endif | 48 | #endif |
49 | #define __ARCH_WANT_SYS_EXECVE | ||
49 | 50 | ||
50 | /* | 51 | /* |
51 | * "Conditional" syscalls | 52 | * "Conditional" syscalls |
diff --git a/arch/sparc/include/uapi/asm/sigcontext.h b/arch/sparc/include/uapi/asm/sigcontext.h index e69de29bb2d1..ae5704fa77ad 100644 --- a/arch/sparc/include/uapi/asm/sigcontext.h +++ b/arch/sparc/include/uapi/asm/sigcontext.h | |||
@@ -0,0 +1,4 @@ | |||
1 | /* | ||
2 | * There isn't anything here anymore, but the file must not be empty or patch | ||
3 | * will delete it. | ||
4 | */ | ||
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h index 8974ef7ae920..cac719d1bc5c 100644 --- a/arch/sparc/include/uapi/asm/unistd.h +++ b/arch/sparc/include/uapi/asm/unistd.h | |||
@@ -405,8 +405,13 @@ | |||
405 | #define __NR_setns 337 | 405 | #define __NR_setns 337 |
406 | #define __NR_process_vm_readv 338 | 406 | #define __NR_process_vm_readv 338 |
407 | #define __NR_process_vm_writev 339 | 407 | #define __NR_process_vm_writev 339 |
408 | #define __NR_kern_features 340 | ||
409 | #define __NR_kcmp 341 | ||
408 | 410 | ||
409 | #define NR_syscalls 340 | 411 | #define NR_syscalls 342 |
412 | |||
413 | /* Bitmask values returned from kern_features system call. */ | ||
414 | #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 | ||
410 | 415 | ||
411 | #ifdef __32bit_syscall_numbers__ | 416 | #ifdef __32bit_syscall_numbers__ |
412 | /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, | 417 | /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, |
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index dcaa1cf0de40..21fd1a8f47d2 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S | |||
@@ -806,23 +806,10 @@ sys_nis_syscall: | |||
806 | call c_sys_nis_syscall | 806 | call c_sys_nis_syscall |
807 | mov %l5, %o7 | 807 | mov %l5, %o7 |
808 | 808 | ||
809 | .align 4 | ||
810 | .globl sys_execve | ||
811 | sys_execve: | ||
812 | mov %o7, %l5 | ||
813 | add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg | ||
814 | call sparc_execve | ||
815 | mov %l5, %o7 | ||
816 | |||
817 | .globl sunos_execv | ||
818 | sunos_execv: | 809 | sunos_execv: |
819 | st %g0, [%sp + STACKFRAME_SZ + PT_I2] | 810 | .globl sunos_execv |
820 | 811 | b sys_execve | |
821 | call sparc_execve | 812 | clr %i2 |
822 | add %sp, STACKFRAME_SZ, %o0 | ||
823 | |||
824 | b ret_sys_call | ||
825 | ld [%sp + STACKFRAME_SZ + PT_I0], %o0 | ||
826 | 813 | ||
827 | .align 4 | 814 | .align 4 |
828 | .globl sys_sparc_pipe | 815 | .globl sys_sparc_pipe |
@@ -959,17 +946,9 @@ flush_patch_four: | |||
959 | .align 4 | 946 | .align 4 |
960 | linux_sparc_ni_syscall: | 947 | linux_sparc_ni_syscall: |
961 | sethi %hi(sys_ni_syscall), %l7 | 948 | sethi %hi(sys_ni_syscall), %l7 |
962 | b syscall_is_too_hard | 949 | b do_syscall |
963 | or %l7, %lo(sys_ni_syscall), %l7 | 950 | or %l7, %lo(sys_ni_syscall), %l7 |
964 | 951 | ||
965 | linux_fast_syscall: | ||
966 | andn %l7, 3, %l7 | ||
967 | mov %i0, %o0 | ||
968 | mov %i1, %o1 | ||
969 | mov %i2, %o2 | ||
970 | jmpl %l7 + %g0, %g0 | ||
971 | mov %i3, %o3 | ||
972 | |||
973 | linux_syscall_trace: | 952 | linux_syscall_trace: |
974 | add %sp, STACKFRAME_SZ, %o0 | 953 | add %sp, STACKFRAME_SZ, %o0 |
975 | call syscall_trace | 954 | call syscall_trace |
@@ -991,6 +970,23 @@ ret_from_fork: | |||
991 | b ret_sys_call | 970 | b ret_sys_call |
992 | ld [%sp + STACKFRAME_SZ + PT_I0], %o0 | 971 | ld [%sp + STACKFRAME_SZ + PT_I0], %o0 |
993 | 972 | ||
973 | .globl ret_from_kernel_thread | ||
974 | ret_from_kernel_thread: | ||
975 | call schedule_tail | ||
976 | ld [%g3 + TI_TASK], %o0 | ||
977 | ld [%sp + STACKFRAME_SZ + PT_G1], %l0 | ||
978 | call %l0 | ||
979 | ld [%sp + STACKFRAME_SZ + PT_G2], %o0 | ||
980 | rd %psr, %l1 | ||
981 | ld [%sp + STACKFRAME_SZ + PT_PSR], %l0 | ||
982 | andn %l0, PSR_CWP, %l0 | ||
983 | nop | ||
984 | and %l1, PSR_CWP, %l1 | ||
985 | or %l0, %l1, %l0 | ||
986 | st %l0, [%sp + STACKFRAME_SZ + PT_PSR] | ||
987 | b ret_sys_call | ||
988 | mov 0, %o0 | ||
989 | |||
994 | /* Linux native system calls enter here... */ | 990 | /* Linux native system calls enter here... */ |
995 | .align 4 | 991 | .align 4 |
996 | .globl linux_sparc_syscall | 992 | .globl linux_sparc_syscall |
@@ -1002,11 +998,8 @@ linux_sparc_syscall: | |||
1002 | bgeu linux_sparc_ni_syscall | 998 | bgeu linux_sparc_ni_syscall |
1003 | sll %g1, 2, %l4 | 999 | sll %g1, 2, %l4 |
1004 | ld [%l7 + %l4], %l7 | 1000 | ld [%l7 + %l4], %l7 |
1005 | andcc %l7, 1, %g0 | ||
1006 | bne linux_fast_syscall | ||
1007 | /* Just do first insn from SAVE_ALL in the delay slot */ | ||
1008 | 1001 | ||
1009 | syscall_is_too_hard: | 1002 | do_syscall: |
1010 | SAVE_ALL_HEAD | 1003 | SAVE_ALL_HEAD |
1011 | rd %wim, %l3 | 1004 | rd %wim, %l3 |
1012 | 1005 | ||
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index 0c218e4c0881..cc3c5cb47cda 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h | |||
@@ -59,6 +59,13 @@ struct popc_6insn_patch_entry { | |||
59 | extern struct popc_6insn_patch_entry __popc_6insn_patch, | 59 | extern struct popc_6insn_patch_entry __popc_6insn_patch, |
60 | __popc_6insn_patch_end; | 60 | __popc_6insn_patch_end; |
61 | 61 | ||
62 | struct pause_patch_entry { | ||
63 | unsigned int addr; | ||
64 | unsigned int insns[3]; | ||
65 | }; | ||
66 | extern struct pause_patch_entry __pause_3insn_patch, | ||
67 | __pause_3insn_patch_end; | ||
68 | |||
62 | extern void __init per_cpu_patch(void); | 69 | extern void __init per_cpu_patch(void); |
63 | extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, | 70 | extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, |
64 | struct sun4v_1insn_patch_entry *); | 71 | struct sun4v_1insn_patch_entry *); |
diff --git a/arch/sparc/kernel/etrap_64.S b/arch/sparc/kernel/etrap_64.S index 786b185e6e3f..1276ca2567ba 100644 --- a/arch/sparc/kernel/etrap_64.S +++ b/arch/sparc/kernel/etrap_64.S | |||
@@ -92,8 +92,10 @@ etrap_save: save %g2, -STACK_BIAS, %sp | |||
92 | rdpr %wstate, %g2 | 92 | rdpr %wstate, %g2 |
93 | wrpr %g0, 0, %canrestore | 93 | wrpr %g0, 0, %canrestore |
94 | sll %g2, 3, %g2 | 94 | sll %g2, 3, %g2 |
95 | |||
96 | /* Set TI_SYS_FPDEPTH to 1 and clear TI_SYS_NOERROR. */ | ||
95 | mov 1, %l5 | 97 | mov 1, %l5 |
96 | stb %l5, [%l6 + TI_FPDEPTH] | 98 | sth %l5, [%l6 + TI_SYS_NOERROR] |
97 | 99 | ||
98 | wrpr %g3, 0, %otherwin | 100 | wrpr %g3, 0, %otherwin |
99 | wrpr %g2, 0, %wstate | 101 | wrpr %g2, 0, %wstate |
@@ -152,7 +154,9 @@ etrap_save: save %g2, -STACK_BIAS, %sp | |||
152 | add %l6, TI_FPSAVED + 1, %l4 | 154 | add %l6, TI_FPSAVED + 1, %l4 |
153 | srl %l5, 1, %l3 | 155 | srl %l5, 1, %l3 |
154 | add %l5, 2, %l5 | 156 | add %l5, 2, %l5 |
155 | stb %l5, [%l6 + TI_FPDEPTH] | 157 | |
158 | /* Set TI_SYS_FPDEPTH to %l5 and clear TI_SYS_NOERROR. */ | ||
159 | sth %l5, [%l6 + TI_SYS_NOERROR] | ||
156 | ba,pt %xcc, 2b | 160 | ba,pt %xcc, 2b |
157 | stb %g0, [%l4 + %l3] | 161 | stb %g0, [%l4 + %l3] |
158 | nop | 162 | nop |
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index f8b6eee40bde..87f60ee65433 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c | |||
@@ -56,11 +56,13 @@ static inline unsigned int leon_eirq_get(int cpu) | |||
56 | static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc) | 56 | static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc) |
57 | { | 57 | { |
58 | unsigned int eirq; | 58 | unsigned int eirq; |
59 | struct irq_bucket *p; | ||
59 | int cpu = sparc_leon3_cpuid(); | 60 | int cpu = sparc_leon3_cpuid(); |
60 | 61 | ||
61 | eirq = leon_eirq_get(cpu); | 62 | eirq = leon_eirq_get(cpu); |
62 | if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */ | 63 | p = irq_map[eirq]; |
63 | generic_handle_irq(irq_map[eirq]->irq); | 64 | if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */ |
65 | generic_handle_irq(p->irq); | ||
64 | } | 66 | } |
65 | 67 | ||
66 | /* The extended IRQ controller has been found, this function registers it */ | 68 | /* The extended IRQ controller has been found, this function registers it */ |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index e48651dace1b..b5c38faa4ead 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -817,15 +817,17 @@ static u64 nop_for_index(int idx) | |||
817 | 817 | ||
818 | static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) | 818 | static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) |
819 | { | 819 | { |
820 | u64 val, mask = mask_for_index(idx); | 820 | u64 enc, val, mask = mask_for_index(idx); |
821 | int pcr_index = 0; | 821 | int pcr_index = 0; |
822 | 822 | ||
823 | if (sparc_pmu->num_pcrs > 1) | 823 | if (sparc_pmu->num_pcrs > 1) |
824 | pcr_index = idx; | 824 | pcr_index = idx; |
825 | 825 | ||
826 | enc = perf_event_get_enc(cpuc->events[idx]); | ||
827 | |||
826 | val = cpuc->pcr[pcr_index]; | 828 | val = cpuc->pcr[pcr_index]; |
827 | val &= ~mask; | 829 | val &= ~mask; |
828 | val |= hwc->config; | 830 | val |= event_encoding(enc, idx); |
829 | cpuc->pcr[pcr_index] = val; | 831 | cpuc->pcr[pcr_index] = val; |
830 | 832 | ||
831 | pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); | 833 | pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); |
@@ -1738,8 +1740,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, | |||
1738 | { | 1740 | { |
1739 | unsigned long ufp; | 1741 | unsigned long ufp; |
1740 | 1742 | ||
1741 | perf_callchain_store(entry, regs->tpc); | ||
1742 | |||
1743 | ufp = regs->u_regs[UREG_I6] + STACK_BIAS; | 1743 | ufp = regs->u_regs[UREG_I6] + STACK_BIAS; |
1744 | do { | 1744 | do { |
1745 | struct sparc_stackf *usf, sf; | 1745 | struct sparc_stackf *usf, sf; |
@@ -1760,19 +1760,27 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, | |||
1760 | { | 1760 | { |
1761 | unsigned long ufp; | 1761 | unsigned long ufp; |
1762 | 1762 | ||
1763 | perf_callchain_store(entry, regs->tpc); | ||
1764 | |||
1765 | ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; | 1763 | ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; |
1766 | do { | 1764 | do { |
1767 | struct sparc_stackf32 *usf, sf; | ||
1768 | unsigned long pc; | 1765 | unsigned long pc; |
1769 | 1766 | ||
1770 | usf = (struct sparc_stackf32 *) ufp; | 1767 | if (thread32_stack_is_64bit(ufp)) { |
1771 | if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) | 1768 | struct sparc_stackf *usf, sf; |
1772 | break; | ||
1773 | 1769 | ||
1774 | pc = sf.callers_pc; | 1770 | ufp += STACK_BIAS; |
1775 | ufp = (unsigned long)sf.fp; | 1771 | usf = (struct sparc_stackf *) ufp; |
1772 | if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) | ||
1773 | break; | ||
1774 | pc = sf.callers_pc & 0xffffffff; | ||
1775 | ufp = ((unsigned long) sf.fp) & 0xffffffff; | ||
1776 | } else { | ||
1777 | struct sparc_stackf32 *usf, sf; | ||
1778 | usf = (struct sparc_stackf32 *) ufp; | ||
1779 | if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) | ||
1780 | break; | ||
1781 | pc = sf.callers_pc; | ||
1782 | ufp = (unsigned long)sf.fp; | ||
1783 | } | ||
1776 | perf_callchain_store(entry, pc); | 1784 | perf_callchain_store(entry, pc); |
1777 | } while (entry->nr < PERF_MAX_STACK_DEPTH); | 1785 | } while (entry->nr < PERF_MAX_STACK_DEPTH); |
1778 | } | 1786 | } |
@@ -1780,6 +1788,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, | |||
1780 | void | 1788 | void |
1781 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | 1789 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) |
1782 | { | 1790 | { |
1791 | perf_callchain_store(entry, regs->tpc); | ||
1792 | |||
1793 | if (!current->mm) | ||
1794 | return; | ||
1795 | |||
1783 | flushw_user(); | 1796 | flushw_user(); |
1784 | if (test_thread_flag(TIF_32BIT)) | 1797 | if (test_thread_flag(TIF_32BIT)) |
1785 | perf_callchain_user_32(entry, regs); | 1798 | perf_callchain_user_32(entry, regs); |
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 487bffb36f5e..bf4c6addce7b 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c | |||
@@ -316,9 +316,10 @@ asmlinkage int sparc_do_fork(unsigned long clone_flags, | |||
316 | * XXX See comment above sys_vfork in sparc64. todo. | 316 | * XXX See comment above sys_vfork in sparc64. todo. |
317 | */ | 317 | */ |
318 | extern void ret_from_fork(void); | 318 | extern void ret_from_fork(void); |
319 | extern void ret_from_kernel_thread(void); | ||
319 | 320 | ||
320 | int copy_thread(unsigned long clone_flags, unsigned long sp, | 321 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
321 | unsigned long unused, | 322 | unsigned long arg, |
322 | struct task_struct *p, struct pt_regs *regs) | 323 | struct task_struct *p, struct pt_regs *regs) |
323 | { | 324 | { |
324 | struct thread_info *ti = task_thread_info(p); | 325 | struct thread_info *ti = task_thread_info(p); |
@@ -336,16 +337,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
336 | } | 337 | } |
337 | 338 | ||
338 | /* | 339 | /* |
339 | * p->thread_info new_stack childregs | 340 | * p->thread_info new_stack childregs stack bottom |
340 | * ! ! ! {if(PSR_PS) } | 341 | * ! ! ! ! |
341 | * V V (stk.fr.) V (pt_regs) { (stk.fr.) } | 342 | * V V (stk.fr.) V (pt_regs) V |
342 | * +----- - - - - - ------+===========+============={+==========}+ | 343 | * +----- - - - - - ------+===========+=============+ |
343 | */ | 344 | */ |
344 | new_stack = task_stack_page(p) + THREAD_SIZE; | 345 | new_stack = task_stack_page(p) + THREAD_SIZE; |
345 | if (regs->psr & PSR_PS) | ||
346 | new_stack -= STACKFRAME_SZ; | ||
347 | new_stack -= STACKFRAME_SZ + TRACEREG_SZ; | 346 | new_stack -= STACKFRAME_SZ + TRACEREG_SZ; |
348 | memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); | ||
349 | childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ); | 347 | childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ); |
350 | 348 | ||
351 | /* | 349 | /* |
@@ -356,55 +354,58 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
356 | * Thus, kpsr|=PSR_PIL. | 354 | * Thus, kpsr|=PSR_PIL. |
357 | */ | 355 | */ |
358 | ti->ksp = (unsigned long) new_stack; | 356 | ti->ksp = (unsigned long) new_stack; |
357 | p->thread.kregs = childregs; | ||
358 | |||
359 | if (unlikely(p->flags & PF_KTHREAD)) { | ||
360 | extern int nwindows; | ||
361 | unsigned long psr; | ||
362 | memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ); | ||
363 | p->thread.flags |= SPARC_FLAG_KTHREAD; | ||
364 | p->thread.current_ds = KERNEL_DS; | ||
365 | ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8); | ||
366 | childregs->u_regs[UREG_G1] = sp; /* function */ | ||
367 | childregs->u_regs[UREG_G2] = arg; | ||
368 | psr = childregs->psr = get_psr(); | ||
369 | ti->kpsr = psr | PSR_PIL; | ||
370 | ti->kwim = 1 << (((psr & PSR_CWP) + 1) % nwindows); | ||
371 | return 0; | ||
372 | } | ||
373 | memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); | ||
374 | childregs->u_regs[UREG_FP] = sp; | ||
375 | p->thread.flags &= ~SPARC_FLAG_KTHREAD; | ||
376 | p->thread.current_ds = USER_DS; | ||
359 | ti->kpc = (((unsigned long) ret_from_fork) - 0x8); | 377 | ti->kpc = (((unsigned long) ret_from_fork) - 0x8); |
360 | ti->kpsr = current->thread.fork_kpsr | PSR_PIL; | 378 | ti->kpsr = current->thread.fork_kpsr | PSR_PIL; |
361 | ti->kwim = current->thread.fork_kwim; | 379 | ti->kwim = current->thread.fork_kwim; |
362 | 380 | ||
363 | if(regs->psr & PSR_PS) { | 381 | if (sp != regs->u_regs[UREG_FP]) { |
364 | extern struct pt_regs fake_swapper_regs; | 382 | struct sparc_stackf __user *childstack; |
383 | struct sparc_stackf __user *parentstack; | ||
365 | 384 | ||
366 | p->thread.kregs = &fake_swapper_regs; | 385 | /* |
367 | new_stack += STACKFRAME_SZ + TRACEREG_SZ; | 386 | * This is a clone() call with supplied user stack. |
368 | childregs->u_regs[UREG_FP] = (unsigned long) new_stack; | 387 | * Set some valid stack frames to give to the child. |
369 | p->thread.flags |= SPARC_FLAG_KTHREAD; | 388 | */ |
370 | p->thread.current_ds = KERNEL_DS; | 389 | childstack = (struct sparc_stackf __user *) |
371 | memcpy(new_stack, (void *)regs->u_regs[UREG_FP], STACKFRAME_SZ); | 390 | (sp & ~0xfUL); |
372 | childregs->u_regs[UREG_G6] = (unsigned long) ti; | 391 | parentstack = (struct sparc_stackf __user *) |
373 | } else { | 392 | regs->u_regs[UREG_FP]; |
374 | p->thread.kregs = childregs; | ||
375 | childregs->u_regs[UREG_FP] = sp; | ||
376 | p->thread.flags &= ~SPARC_FLAG_KTHREAD; | ||
377 | p->thread.current_ds = USER_DS; | ||
378 | |||
379 | if (sp != regs->u_regs[UREG_FP]) { | ||
380 | struct sparc_stackf __user *childstack; | ||
381 | struct sparc_stackf __user *parentstack; | ||
382 | |||
383 | /* | ||
384 | * This is a clone() call with supplied user stack. | ||
385 | * Set some valid stack frames to give to the child. | ||
386 | */ | ||
387 | childstack = (struct sparc_stackf __user *) | ||
388 | (sp & ~0xfUL); | ||
389 | parentstack = (struct sparc_stackf __user *) | ||
390 | regs->u_regs[UREG_FP]; | ||
391 | 393 | ||
392 | #if 0 | 394 | #if 0 |
393 | printk("clone: parent stack:\n"); | 395 | printk("clone: parent stack:\n"); |
394 | show_stackframe(parentstack); | 396 | show_stackframe(parentstack); |
395 | #endif | 397 | #endif |
396 | 398 | ||
397 | childstack = clone_stackframe(childstack, parentstack); | 399 | childstack = clone_stackframe(childstack, parentstack); |
398 | if (!childstack) | 400 | if (!childstack) |
399 | return -EFAULT; | 401 | return -EFAULT; |
400 | 402 | ||
401 | #if 0 | 403 | #if 0 |
402 | printk("clone: child stack:\n"); | 404 | printk("clone: child stack:\n"); |
403 | show_stackframe(childstack); | 405 | show_stackframe(childstack); |
404 | #endif | 406 | #endif |
405 | 407 | ||
406 | childregs->u_regs[UREG_FP] = (unsigned long)childstack; | 408 | childregs->u_regs[UREG_FP] = (unsigned long)childstack; |
407 | } | ||
408 | } | 409 | } |
409 | 410 | ||
410 | #ifdef CONFIG_SMP | 411 | #ifdef CONFIG_SMP |
@@ -475,69 +476,6 @@ int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) | |||
475 | return 1; | 476 | return 1; |
476 | } | 477 | } |
477 | 478 | ||
478 | /* | ||
479 | * sparc_execve() executes a new program after the asm stub has set | ||
480 | * things up for us. This should basically do what I want it to. | ||
481 | */ | ||
482 | asmlinkage int sparc_execve(struct pt_regs *regs) | ||
483 | { | ||
484 | int error, base = 0; | ||
485 | struct filename *filename; | ||
486 | |||
487 | /* Check for indirect call. */ | ||
488 | if(regs->u_regs[UREG_G1] == 0) | ||
489 | base = 1; | ||
490 | |||
491 | filename = getname((char __user *)regs->u_regs[base + UREG_I0]); | ||
492 | error = PTR_ERR(filename); | ||
493 | if(IS_ERR(filename)) | ||
494 | goto out; | ||
495 | error = do_execve(filename->name, | ||
496 | (const char __user *const __user *) | ||
497 | regs->u_regs[base + UREG_I1], | ||
498 | (const char __user *const __user *) | ||
499 | regs->u_regs[base + UREG_I2], | ||
500 | regs); | ||
501 | putname(filename); | ||
502 | out: | ||
503 | return error; | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * This is the mechanism for creating a new kernel thread. | ||
508 | * | ||
509 | * NOTE! Only a kernel-only process(ie the swapper or direct descendants | ||
510 | * who haven't done an "execve()") should use this: it will work within | ||
511 | * a system call from a "real" process, but the process memory space will | ||
512 | * not be freed until both the parent and the child have exited. | ||
513 | */ | ||
514 | pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | ||
515 | { | ||
516 | long retval; | ||
517 | |||
518 | __asm__ __volatile__("mov %4, %%g2\n\t" /* Set aside fn ptr... */ | ||
519 | "mov %5, %%g3\n\t" /* and arg. */ | ||
520 | "mov %1, %%g1\n\t" | ||
521 | "mov %2, %%o0\n\t" /* Clone flags. */ | ||
522 | "mov 0, %%o1\n\t" /* usp arg == 0 */ | ||
523 | "t 0x10\n\t" /* Linux/Sparc clone(). */ | ||
524 | "cmp %%o1, 0\n\t" | ||
525 | "be 1f\n\t" /* The parent, just return. */ | ||
526 | " nop\n\t" /* Delay slot. */ | ||
527 | "jmpl %%g2, %%o7\n\t" /* Call the function. */ | ||
528 | " mov %%g3, %%o0\n\t" /* Get back the arg in delay. */ | ||
529 | "mov %3, %%g1\n\t" | ||
530 | "t 0x10\n\t" /* Linux/Sparc exit(). */ | ||
531 | /* Notreached by child. */ | ||
532 | "1: mov %%o0, %0\n\t" : | ||
533 | "=r" (retval) : | ||
534 | "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED), | ||
535 | "i" (__NR_exit), "r" (fn), "r" (arg) : | ||
536 | "g1", "g2", "g3", "o0", "o1", "memory", "cc"); | ||
537 | return retval; | ||
538 | } | ||
539 | EXPORT_SYMBOL(kernel_thread); | ||
540 | |||
541 | unsigned long get_wchan(struct task_struct *task) | 479 | unsigned long get_wchan(struct task_struct *task) |
542 | { | 480 | { |
543 | unsigned long pc, fp, bias = 0; | 481 | unsigned long pc, fp, bias = 0; |
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index fcaa59421126..dff54f46728d 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/tick.h> | 27 | #include <linux/tick.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/cpu.h> | 29 | #include <linux/cpu.h> |
30 | #include <linux/perf_event.h> | ||
30 | #include <linux/elfcore.h> | 31 | #include <linux/elfcore.h> |
31 | #include <linux/sysrq.h> | 32 | #include <linux/sysrq.h> |
32 | #include <linux/nmi.h> | 33 | #include <linux/nmi.h> |
@@ -47,6 +48,7 @@ | |||
47 | #include <asm/syscalls.h> | 48 | #include <asm/syscalls.h> |
48 | #include <asm/irq_regs.h> | 49 | #include <asm/irq_regs.h> |
49 | #include <asm/smp.h> | 50 | #include <asm/smp.h> |
51 | #include <asm/pcr.h> | ||
50 | 52 | ||
51 | #include "kstack.h" | 53 | #include "kstack.h" |
52 | 54 | ||
@@ -204,18 +206,22 @@ void show_regs(struct pt_regs *regs) | |||
204 | show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); | 206 | show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); |
205 | } | 207 | } |
206 | 208 | ||
207 | struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; | 209 | union global_cpu_snapshot global_cpu_snapshot[NR_CPUS]; |
208 | static DEFINE_SPINLOCK(global_reg_snapshot_lock); | 210 | static DEFINE_SPINLOCK(global_cpu_snapshot_lock); |
209 | 211 | ||
210 | static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, | 212 | static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, |
211 | int this_cpu) | 213 | int this_cpu) |
212 | { | 214 | { |
215 | struct global_reg_snapshot *rp; | ||
216 | |||
213 | flushw_all(); | 217 | flushw_all(); |
214 | 218 | ||
215 | global_reg_snapshot[this_cpu].tstate = regs->tstate; | 219 | rp = &global_cpu_snapshot[this_cpu].reg; |
216 | global_reg_snapshot[this_cpu].tpc = regs->tpc; | 220 | |
217 | global_reg_snapshot[this_cpu].tnpc = regs->tnpc; | 221 | rp->tstate = regs->tstate; |
218 | global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7]; | 222 | rp->tpc = regs->tpc; |
223 | rp->tnpc = regs->tnpc; | ||
224 | rp->o7 = regs->u_regs[UREG_I7]; | ||
219 | 225 | ||
220 | if (regs->tstate & TSTATE_PRIV) { | 226 | if (regs->tstate & TSTATE_PRIV) { |
221 | struct reg_window *rw; | 227 | struct reg_window *rw; |
@@ -223,17 +229,17 @@ static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, | |||
223 | rw = (struct reg_window *) | 229 | rw = (struct reg_window *) |
224 | (regs->u_regs[UREG_FP] + STACK_BIAS); | 230 | (regs->u_regs[UREG_FP] + STACK_BIAS); |
225 | if (kstack_valid(tp, (unsigned long) rw)) { | 231 | if (kstack_valid(tp, (unsigned long) rw)) { |
226 | global_reg_snapshot[this_cpu].i7 = rw->ins[7]; | 232 | rp->i7 = rw->ins[7]; |
227 | rw = (struct reg_window *) | 233 | rw = (struct reg_window *) |
228 | (rw->ins[6] + STACK_BIAS); | 234 | (rw->ins[6] + STACK_BIAS); |
229 | if (kstack_valid(tp, (unsigned long) rw)) | 235 | if (kstack_valid(tp, (unsigned long) rw)) |
230 | global_reg_snapshot[this_cpu].rpc = rw->ins[7]; | 236 | rp->rpc = rw->ins[7]; |
231 | } | 237 | } |
232 | } else { | 238 | } else { |
233 | global_reg_snapshot[this_cpu].i7 = 0; | 239 | rp->i7 = 0; |
234 | global_reg_snapshot[this_cpu].rpc = 0; | 240 | rp->rpc = 0; |
235 | } | 241 | } |
236 | global_reg_snapshot[this_cpu].thread = tp; | 242 | rp->thread = tp; |
237 | } | 243 | } |
238 | 244 | ||
239 | /* In order to avoid hangs we do not try to synchronize with the | 245 | /* In order to avoid hangs we do not try to synchronize with the |
@@ -261,9 +267,9 @@ void arch_trigger_all_cpu_backtrace(void) | |||
261 | if (!regs) | 267 | if (!regs) |
262 | regs = tp->kregs; | 268 | regs = tp->kregs; |
263 | 269 | ||
264 | spin_lock_irqsave(&global_reg_snapshot_lock, flags); | 270 | spin_lock_irqsave(&global_cpu_snapshot_lock, flags); |
265 | 271 | ||
266 | memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); | 272 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
267 | 273 | ||
268 | this_cpu = raw_smp_processor_id(); | 274 | this_cpu = raw_smp_processor_id(); |
269 | 275 | ||
@@ -272,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(void) | |||
272 | smp_fetch_global_regs(); | 278 | smp_fetch_global_regs(); |
273 | 279 | ||
274 | for_each_online_cpu(cpu) { | 280 | for_each_online_cpu(cpu) { |
275 | struct global_reg_snapshot *gp = &global_reg_snapshot[cpu]; | 281 | struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg; |
276 | 282 | ||
277 | __global_reg_poll(gp); | 283 | __global_reg_poll(gp); |
278 | 284 | ||
@@ -295,9 +301,9 @@ void arch_trigger_all_cpu_backtrace(void) | |||
295 | } | 301 | } |
296 | } | 302 | } |
297 | 303 | ||
298 | memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); | 304 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
299 | 305 | ||
300 | spin_unlock_irqrestore(&global_reg_snapshot_lock, flags); | 306 | spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); |
301 | } | 307 | } |
302 | 308 | ||
303 | #ifdef CONFIG_MAGIC_SYSRQ | 309 | #ifdef CONFIG_MAGIC_SYSRQ |
@@ -309,16 +315,90 @@ static void sysrq_handle_globreg(int key) | |||
309 | 315 | ||
310 | static struct sysrq_key_op sparc_globalreg_op = { | 316 | static struct sysrq_key_op sparc_globalreg_op = { |
311 | .handler = sysrq_handle_globreg, | 317 | .handler = sysrq_handle_globreg, |
312 | .help_msg = "Globalregs", | 318 | .help_msg = "global-regs(Y)", |
313 | .action_msg = "Show Global CPU Regs", | 319 | .action_msg = "Show Global CPU Regs", |
314 | }; | 320 | }; |
315 | 321 | ||
316 | static int __init sparc_globreg_init(void) | 322 | static void __global_pmu_self(int this_cpu) |
323 | { | ||
324 | struct global_pmu_snapshot *pp; | ||
325 | int i, num; | ||
326 | |||
327 | pp = &global_cpu_snapshot[this_cpu].pmu; | ||
328 | |||
329 | num = 1; | ||
330 | if (tlb_type == hypervisor && | ||
331 | sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) | ||
332 | num = 4; | ||
333 | |||
334 | for (i = 0; i < num; i++) { | ||
335 | pp->pcr[i] = pcr_ops->read_pcr(i); | ||
336 | pp->pic[i] = pcr_ops->read_pic(i); | ||
337 | } | ||
338 | } | ||
339 | |||
340 | static void __global_pmu_poll(struct global_pmu_snapshot *pp) | ||
341 | { | ||
342 | int limit = 0; | ||
343 | |||
344 | while (!pp->pcr[0] && ++limit < 100) { | ||
345 | barrier(); | ||
346 | udelay(1); | ||
347 | } | ||
348 | } | ||
349 | |||
350 | static void pmu_snapshot_all_cpus(void) | ||
351 | { | ||
352 | unsigned long flags; | ||
353 | int this_cpu, cpu; | ||
354 | |||
355 | spin_lock_irqsave(&global_cpu_snapshot_lock, flags); | ||
356 | |||
357 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); | ||
358 | |||
359 | this_cpu = raw_smp_processor_id(); | ||
360 | |||
361 | __global_pmu_self(this_cpu); | ||
362 | |||
363 | smp_fetch_global_pmu(); | ||
364 | |||
365 | for_each_online_cpu(cpu) { | ||
366 | struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu; | ||
367 | |||
368 | __global_pmu_poll(pp); | ||
369 | |||
370 | printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n", | ||
371 | (cpu == this_cpu ? '*' : ' '), cpu, | ||
372 | pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], | ||
373 | pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); | ||
374 | } | ||
375 | |||
376 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); | ||
377 | |||
378 | spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); | ||
379 | } | ||
380 | |||
381 | static void sysrq_handle_globpmu(int key) | ||
317 | { | 382 | { |
318 | return register_sysrq_key('y', &sparc_globalreg_op); | 383 | pmu_snapshot_all_cpus(); |
319 | } | 384 | } |
320 | 385 | ||
321 | core_initcall(sparc_globreg_init); | 386 | static struct sysrq_key_op sparc_globalpmu_op = { |
387 | .handler = sysrq_handle_globpmu, | ||
388 | .help_msg = "global-pmu(X)", | ||
389 | .action_msg = "Show Global PMU Regs", | ||
390 | }; | ||
391 | |||
392 | static int __init sparc_sysrq_init(void) | ||
393 | { | ||
394 | int ret = register_sysrq_key('y', &sparc_globalreg_op); | ||
395 | |||
396 | if (!ret) | ||
397 | ret = register_sysrq_key('x', &sparc_globalpmu_op); | ||
398 | return ret; | ||
399 | } | ||
400 | |||
401 | core_initcall(sparc_sysrq_init); | ||
322 | 402 | ||
323 | #endif | 403 | #endif |
324 | 404 | ||
@@ -372,13 +452,16 @@ void flush_thread(void) | |||
372 | /* It's a bit more tricky when 64-bit tasks are involved... */ | 452 | /* It's a bit more tricky when 64-bit tasks are involved... */ |
373 | static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) | 453 | static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) |
374 | { | 454 | { |
455 | bool stack_64bit = test_thread_64bit_stack(psp); | ||
375 | unsigned long fp, distance, rval; | 456 | unsigned long fp, distance, rval; |
376 | 457 | ||
377 | if (!(test_thread_flag(TIF_32BIT))) { | 458 | if (stack_64bit) { |
378 | csp += STACK_BIAS; | 459 | csp += STACK_BIAS; |
379 | psp += STACK_BIAS; | 460 | psp += STACK_BIAS; |
380 | __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); | 461 | __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); |
381 | fp += STACK_BIAS; | 462 | fp += STACK_BIAS; |
463 | if (test_thread_flag(TIF_32BIT)) | ||
464 | fp &= 0xffffffff; | ||
382 | } else | 465 | } else |
383 | __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); | 466 | __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); |
384 | 467 | ||
@@ -392,7 +475,7 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) | |||
392 | rval = (csp - distance); | 475 | rval = (csp - distance); |
393 | if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) | 476 | if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) |
394 | rval = 0; | 477 | rval = 0; |
395 | else if (test_thread_flag(TIF_32BIT)) { | 478 | else if (!stack_64bit) { |
396 | if (put_user(((u32)csp), | 479 | if (put_user(((u32)csp), |
397 | &(((struct reg_window32 __user *)rval)->ins[6]))) | 480 | &(((struct reg_window32 __user *)rval)->ins[6]))) |
398 | rval = 0; | 481 | rval = 0; |
@@ -427,18 +510,18 @@ void synchronize_user_stack(void) | |||
427 | 510 | ||
428 | flush_user_windows(); | 511 | flush_user_windows(); |
429 | if ((window = get_thread_wsaved()) != 0) { | 512 | if ((window = get_thread_wsaved()) != 0) { |
430 | int winsize = sizeof(struct reg_window); | ||
431 | int bias = 0; | ||
432 | |||
433 | if (test_thread_flag(TIF_32BIT)) | ||
434 | winsize = sizeof(struct reg_window32); | ||
435 | else | ||
436 | bias = STACK_BIAS; | ||
437 | |||
438 | window -= 1; | 513 | window -= 1; |
439 | do { | 514 | do { |
440 | unsigned long sp = (t->rwbuf_stkptrs[window] + bias); | ||
441 | struct reg_window *rwin = &t->reg_window[window]; | 515 | struct reg_window *rwin = &t->reg_window[window]; |
516 | int winsize = sizeof(struct reg_window); | ||
517 | unsigned long sp; | ||
518 | |||
519 | sp = t->rwbuf_stkptrs[window]; | ||
520 | |||
521 | if (test_thread_64bit_stack(sp)) | ||
522 | sp += STACK_BIAS; | ||
523 | else | ||
524 | winsize = sizeof(struct reg_window32); | ||
442 | 525 | ||
443 | if (!copy_to_user((char __user *)sp, rwin, winsize)) { | 526 | if (!copy_to_user((char __user *)sp, rwin, winsize)) { |
444 | shift_window_buffer(window, get_thread_wsaved() - 1, t); | 527 | shift_window_buffer(window, get_thread_wsaved() - 1, t); |
@@ -464,13 +547,6 @@ void fault_in_user_windows(void) | |||
464 | { | 547 | { |
465 | struct thread_info *t = current_thread_info(); | 548 | struct thread_info *t = current_thread_info(); |
466 | unsigned long window; | 549 | unsigned long window; |
467 | int winsize = sizeof(struct reg_window); | ||
468 | int bias = 0; | ||
469 | |||
470 | if (test_thread_flag(TIF_32BIT)) | ||
471 | winsize = sizeof(struct reg_window32); | ||
472 | else | ||
473 | bias = STACK_BIAS; | ||
474 | 550 | ||
475 | flush_user_windows(); | 551 | flush_user_windows(); |
476 | window = get_thread_wsaved(); | 552 | window = get_thread_wsaved(); |
@@ -478,8 +554,16 @@ void fault_in_user_windows(void) | |||
478 | if (likely(window != 0)) { | 554 | if (likely(window != 0)) { |
479 | window -= 1; | 555 | window -= 1; |
480 | do { | 556 | do { |
481 | unsigned long sp = (t->rwbuf_stkptrs[window] + bias); | ||
482 | struct reg_window *rwin = &t->reg_window[window]; | 557 | struct reg_window *rwin = &t->reg_window[window]; |
558 | int winsize = sizeof(struct reg_window); | ||
559 | unsigned long sp; | ||
560 | |||
561 | sp = t->rwbuf_stkptrs[window]; | ||
562 | |||
563 | if (test_thread_64bit_stack(sp)) | ||
564 | sp += STACK_BIAS; | ||
565 | else | ||
566 | winsize = sizeof(struct reg_window32); | ||
483 | 567 | ||
484 | if (unlikely(sp & 0x7UL)) | 568 | if (unlikely(sp & 0x7UL)) |
485 | stack_unaligned(sp); | 569 | stack_unaligned(sp); |
@@ -538,64 +622,55 @@ asmlinkage long sparc_do_fork(unsigned long clone_flags, | |||
538 | * Child --> %o0 == parents pid, %o1 == 1 | 622 | * Child --> %o0 == parents pid, %o1 == 1 |
539 | */ | 623 | */ |
540 | int copy_thread(unsigned long clone_flags, unsigned long sp, | 624 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
541 | unsigned long unused, | 625 | unsigned long arg, |
542 | struct task_struct *p, struct pt_regs *regs) | 626 | struct task_struct *p, struct pt_regs *regs) |
543 | { | 627 | { |
544 | struct thread_info *t = task_thread_info(p); | 628 | struct thread_info *t = task_thread_info(p); |
545 | struct sparc_stackf *parent_sf; | 629 | struct sparc_stackf *parent_sf; |
546 | unsigned long child_stack_sz; | 630 | unsigned long child_stack_sz; |
547 | char *child_trap_frame; | 631 | char *child_trap_frame; |
548 | int kernel_thread; | ||
549 | |||
550 | kernel_thread = (regs->tstate & TSTATE_PRIV) ? 1 : 0; | ||
551 | parent_sf = ((struct sparc_stackf *) regs) - 1; | ||
552 | 632 | ||
553 | /* Calculate offset to stack_frame & pt_regs */ | 633 | /* Calculate offset to stack_frame & pt_regs */ |
554 | child_stack_sz = ((STACKFRAME_SZ + TRACEREG_SZ) + | 634 | child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ); |
555 | (kernel_thread ? STACKFRAME_SZ : 0)); | ||
556 | child_trap_frame = (task_stack_page(p) + | 635 | child_trap_frame = (task_stack_page(p) + |
557 | (THREAD_SIZE - child_stack_sz)); | 636 | (THREAD_SIZE - child_stack_sz)); |
558 | memcpy(child_trap_frame, parent_sf, child_stack_sz); | ||
559 | 637 | ||
560 | t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | | ||
561 | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | | ||
562 | (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT); | ||
563 | t->new_child = 1; | 638 | t->new_child = 1; |
564 | t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; | 639 | t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; |
565 | t->kregs = (struct pt_regs *) (child_trap_frame + | 640 | t->kregs = (struct pt_regs *) (child_trap_frame + |
566 | sizeof(struct sparc_stackf)); | 641 | sizeof(struct sparc_stackf)); |
567 | t->fpsaved[0] = 0; | 642 | t->fpsaved[0] = 0; |
568 | 643 | ||
569 | if (kernel_thread) { | 644 | if (unlikely(p->flags & PF_KTHREAD)) { |
570 | struct sparc_stackf *child_sf = (struct sparc_stackf *) | 645 | memset(child_trap_frame, 0, child_stack_sz); |
571 | (child_trap_frame + (STACKFRAME_SZ + TRACEREG_SZ)); | 646 | __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = |
572 | 647 | (current_pt_regs()->tstate + 1) & TSTATE_CWP; | |
573 | /* Zero terminate the stack backtrace. */ | 648 | t->current_ds = ASI_P; |
574 | child_sf->fp = NULL; | 649 | t->kregs->u_regs[UREG_G1] = sp; /* function */ |
575 | t->kregs->u_regs[UREG_FP] = | 650 | t->kregs->u_regs[UREG_G2] = arg; |
576 | ((unsigned long) child_sf) - STACK_BIAS; | 651 | return 0; |
652 | } | ||
577 | 653 | ||
578 | t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); | 654 | parent_sf = ((struct sparc_stackf *) regs) - 1; |
579 | t->kregs->u_regs[UREG_G6] = (unsigned long) t; | 655 | memcpy(child_trap_frame, parent_sf, child_stack_sz); |
580 | t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; | 656 | if (t->flags & _TIF_32BIT) { |
581 | } else { | 657 | sp &= 0x00000000ffffffffUL; |
582 | if (t->flags & _TIF_32BIT) { | 658 | regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; |
583 | sp &= 0x00000000ffffffffUL; | ||
584 | regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; | ||
585 | } | ||
586 | t->kregs->u_regs[UREG_FP] = sp; | ||
587 | t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT); | ||
588 | if (sp != regs->u_regs[UREG_FP]) { | ||
589 | unsigned long csp; | ||
590 | |||
591 | csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); | ||
592 | if (!csp) | ||
593 | return -EFAULT; | ||
594 | t->kregs->u_regs[UREG_FP] = csp; | ||
595 | } | ||
596 | if (t->utraps) | ||
597 | t->utraps[0]++; | ||
598 | } | 659 | } |
660 | t->kregs->u_regs[UREG_FP] = sp; | ||
661 | __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = | ||
662 | (regs->tstate + 1) & TSTATE_CWP; | ||
663 | t->current_ds = ASI_AIUS; | ||
664 | if (sp != regs->u_regs[UREG_FP]) { | ||
665 | unsigned long csp; | ||
666 | |||
667 | csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); | ||
668 | if (!csp) | ||
669 | return -EFAULT; | ||
670 | t->kregs->u_regs[UREG_FP] = csp; | ||
671 | } | ||
672 | if (t->utraps) | ||
673 | t->utraps[0]++; | ||
599 | 674 | ||
600 | /* Set the return value for the child. */ | 675 | /* Set the return value for the child. */ |
601 | t->kregs->u_regs[UREG_I0] = current->pid; | 676 | t->kregs->u_regs[UREG_I0] = current->pid; |
@@ -610,45 +685,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
610 | return 0; | 685 | return 0; |
611 | } | 686 | } |
612 | 687 | ||
613 | /* | ||
614 | * This is the mechanism for creating a new kernel thread. | ||
615 | * | ||
616 | * NOTE! Only a kernel-only process(ie the swapper or direct descendants | ||
617 | * who haven't done an "execve()") should use this: it will work within | ||
618 | * a system call from a "real" process, but the process memory space will | ||
619 | * not be freed until both the parent and the child have exited. | ||
620 | */ | ||
621 | pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | ||
622 | { | ||
623 | long retval; | ||
624 | |||
625 | /* If the parent runs before fn(arg) is called by the child, | ||
626 | * the input registers of this function can be clobbered. | ||
627 | * So we stash 'fn' and 'arg' into global registers which | ||
628 | * will not be modified by the parent. | ||
629 | */ | ||
630 | __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */ | ||
631 | "mov %5, %%g3\n\t" /* Save ARG into global */ | ||
632 | "mov %1, %%g1\n\t" /* Clone syscall nr. */ | ||
633 | "mov %2, %%o0\n\t" /* Clone flags. */ | ||
634 | "mov 0, %%o1\n\t" /* usp arg == 0 */ | ||
635 | "t 0x6d\n\t" /* Linux/Sparc clone(). */ | ||
636 | "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */ | ||
637 | " mov %%o0, %0\n\t" | ||
638 | "jmpl %%g2, %%o7\n\t" /* Call the function. */ | ||
639 | " mov %%g3, %%o0\n\t" /* Set arg in delay. */ | ||
640 | "mov %3, %%g1\n\t" | ||
641 | "t 0x6d\n\t" /* Linux/Sparc exit(). */ | ||
642 | /* Notreached by child. */ | ||
643 | "1:" : | ||
644 | "=r" (retval) : | ||
645 | "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED), | ||
646 | "i" (__NR_exit), "r" (fn), "r" (arg) : | ||
647 | "g1", "g2", "g3", "o0", "o1", "memory", "cc"); | ||
648 | return retval; | ||
649 | } | ||
650 | EXPORT_SYMBOL(kernel_thread); | ||
651 | |||
652 | typedef struct { | 688 | typedef struct { |
653 | union { | 689 | union { |
654 | unsigned int pr_regs[32]; | 690 | unsigned int pr_regs[32]; |
@@ -715,41 +751,6 @@ int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) | |||
715 | } | 751 | } |
716 | EXPORT_SYMBOL(dump_fpu); | 752 | EXPORT_SYMBOL(dump_fpu); |
717 | 753 | ||
718 | /* | ||
719 | * sparc_execve() executes a new program after the asm stub has set | ||
720 | * things up for us. This should basically do what I want it to. | ||
721 | */ | ||
722 | asmlinkage int sparc_execve(struct pt_regs *regs) | ||
723 | { | ||
724 | int error, base = 0; | ||
725 | struct filename *filename; | ||
726 | |||
727 | /* User register window flush is done by entry.S */ | ||
728 | |||
729 | /* Check for indirect call. */ | ||
730 | if (regs->u_regs[UREG_G1] == 0) | ||
731 | base = 1; | ||
732 | |||
733 | filename = getname((char __user *)regs->u_regs[base + UREG_I0]); | ||
734 | error = PTR_ERR(filename); | ||
735 | if (IS_ERR(filename)) | ||
736 | goto out; | ||
737 | error = do_execve(filename->name, | ||
738 | (const char __user *const __user *) | ||
739 | regs->u_regs[base + UREG_I1], | ||
740 | (const char __user *const __user *) | ||
741 | regs->u_regs[base + UREG_I2], regs); | ||
742 | putname(filename); | ||
743 | if (!error) { | ||
744 | fprs_write(0); | ||
745 | current_thread_info()->xfsr[0] = 0; | ||
746 | current_thread_info()->fpsaved[0] = 0; | ||
747 | regs->tstate &= ~TSTATE_PEF; | ||
748 | } | ||
749 | out: | ||
750 | return error; | ||
751 | } | ||
752 | |||
753 | unsigned long get_wchan(struct task_struct *task) | 754 | unsigned long get_wchan(struct task_struct *task) |
754 | { | 755 | { |
755 | unsigned long pc, fp, bias = 0; | 756 | unsigned long pc, fp, bias = 0; |
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 484dabac7045..7ff45e4ba681 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c | |||
@@ -151,7 +151,7 @@ static int regwindow64_get(struct task_struct *target, | |||
151 | { | 151 | { |
152 | unsigned long rw_addr = regs->u_regs[UREG_I6]; | 152 | unsigned long rw_addr = regs->u_regs[UREG_I6]; |
153 | 153 | ||
154 | if (test_tsk_thread_flag(current, TIF_32BIT)) { | 154 | if (!test_thread_64bit_stack(rw_addr)) { |
155 | struct reg_window32 win32; | 155 | struct reg_window32 win32; |
156 | int i; | 156 | int i; |
157 | 157 | ||
@@ -176,7 +176,7 @@ static int regwindow64_set(struct task_struct *target, | |||
176 | { | 176 | { |
177 | unsigned long rw_addr = regs->u_regs[UREG_I6]; | 177 | unsigned long rw_addr = regs->u_regs[UREG_I6]; |
178 | 178 | ||
179 | if (test_tsk_thread_flag(current, TIF_32BIT)) { | 179 | if (!test_thread_64bit_stack(rw_addr)) { |
180 | struct reg_window32 win32; | 180 | struct reg_window32 win32; |
181 | int i; | 181 | int i; |
182 | 182 | ||
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 0800e71d8a88..0eaf0059aaef 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c | |||
@@ -316,6 +316,25 @@ static void __init popc_patch(void) | |||
316 | } | 316 | } |
317 | } | 317 | } |
318 | 318 | ||
319 | static void __init pause_patch(void) | ||
320 | { | ||
321 | struct pause_patch_entry *p; | ||
322 | |||
323 | p = &__pause_3insn_patch; | ||
324 | while (p < &__pause_3insn_patch_end) { | ||
325 | unsigned long i, addr = p->addr; | ||
326 | |||
327 | for (i = 0; i < 3; i++) { | ||
328 | *(unsigned int *) (addr + (i * 4)) = p->insns[i]; | ||
329 | wmb(); | ||
330 | __asm__ __volatile__("flush %0" | ||
331 | : : "r" (addr + (i * 4))); | ||
332 | } | ||
333 | |||
334 | p++; | ||
335 | } | ||
336 | } | ||
337 | |||
319 | #ifdef CONFIG_SMP | 338 | #ifdef CONFIG_SMP |
320 | void __init boot_cpu_id_too_large(int cpu) | 339 | void __init boot_cpu_id_too_large(int cpu) |
321 | { | 340 | { |
@@ -528,6 +547,8 @@ static void __init init_sparc64_elf_hwcap(void) | |||
528 | 547 | ||
529 | if (sparc64_elf_hwcap & AV_SPARC_POPC) | 548 | if (sparc64_elf_hwcap & AV_SPARC_POPC) |
530 | popc_patch(); | 549 | popc_patch(); |
550 | if (sparc64_elf_hwcap & AV_SPARC_PAUSE) | ||
551 | pause_patch(); | ||
531 | } | 552 | } |
532 | 553 | ||
533 | void __init setup_arch(char **cmdline_p) | 554 | void __init setup_arch(char **cmdline_p) |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 781bcb10b8bd..d94b878577b7 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -852,6 +852,8 @@ extern unsigned long xcall_flush_tlb_mm; | |||
852 | extern unsigned long xcall_flush_tlb_pending; | 852 | extern unsigned long xcall_flush_tlb_pending; |
853 | extern unsigned long xcall_flush_tlb_kernel_range; | 853 | extern unsigned long xcall_flush_tlb_kernel_range; |
854 | extern unsigned long xcall_fetch_glob_regs; | 854 | extern unsigned long xcall_fetch_glob_regs; |
855 | extern unsigned long xcall_fetch_glob_pmu; | ||
856 | extern unsigned long xcall_fetch_glob_pmu_n4; | ||
855 | extern unsigned long xcall_receive_signal; | 857 | extern unsigned long xcall_receive_signal; |
856 | extern unsigned long xcall_new_mmu_context_version; | 858 | extern unsigned long xcall_new_mmu_context_version; |
857 | #ifdef CONFIG_KGDB | 859 | #ifdef CONFIG_KGDB |
@@ -1000,6 +1002,15 @@ void smp_fetch_global_regs(void) | |||
1000 | smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); | 1002 | smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); |
1001 | } | 1003 | } |
1002 | 1004 | ||
1005 | void smp_fetch_global_pmu(void) | ||
1006 | { | ||
1007 | if (tlb_type == hypervisor && | ||
1008 | sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) | ||
1009 | smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0); | ||
1010 | else | ||
1011 | smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0); | ||
1012 | } | ||
1013 | |||
1003 | /* We know that the window frames of the user have been flushed | 1014 | /* We know that the window frames of the user have been flushed |
1004 | * to the stack before we get here because all callers of us | 1015 | * to the stack before we get here because all callers of us |
1005 | * are flush_tlb_*() routines, and these run after flush_cache_*() | 1016 | * are flush_tlb_*() routines, and these run after flush_cache_*() |
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c index c3239811a1b5..03c7e929ec34 100644 --- a/arch/sparc/kernel/sys_sparc32.c +++ b/arch/sparc/kernel/sys_sparc32.c | |||
@@ -396,42 +396,6 @@ asmlinkage long compat_sys_rt_sigaction(int sig, | |||
396 | return ret; | 396 | return ret; |
397 | } | 397 | } |
398 | 398 | ||
399 | /* | ||
400 | * sparc32_execve() executes a new program after the asm stub has set | ||
401 | * things up for us. This should basically do what I want it to. | ||
402 | */ | ||
403 | asmlinkage long sparc32_execve(struct pt_regs *regs) | ||
404 | { | ||
405 | int error, base = 0; | ||
406 | struct filename *filename; | ||
407 | |||
408 | /* User register window flush is done by entry.S */ | ||
409 | |||
410 | /* Check for indirect call. */ | ||
411 | if ((u32)regs->u_regs[UREG_G1] == 0) | ||
412 | base = 1; | ||
413 | |||
414 | filename = getname(compat_ptr(regs->u_regs[base + UREG_I0])); | ||
415 | error = PTR_ERR(filename); | ||
416 | if (IS_ERR(filename)) | ||
417 | goto out; | ||
418 | |||
419 | error = compat_do_execve(filename->name, | ||
420 | compat_ptr(regs->u_regs[base + UREG_I1]), | ||
421 | compat_ptr(regs->u_regs[base + UREG_I2]), regs); | ||
422 | |||
423 | putname(filename); | ||
424 | |||
425 | if (!error) { | ||
426 | fprs_write(0); | ||
427 | current_thread_info()->xfsr[0] = 0; | ||
428 | current_thread_info()->fpsaved[0] = 0; | ||
429 | regs->tstate &= ~TSTATE_PEF; | ||
430 | } | ||
431 | out: | ||
432 | return error; | ||
433 | } | ||
434 | |||
435 | #ifdef CONFIG_MODULES | 399 | #ifdef CONFIG_MODULES |
436 | 400 | ||
437 | asmlinkage long sys32_init_module(void __user *umod, u32 len, | 401 | asmlinkage long sys32_init_module(void __user *umod, u32 len, |
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 0c9b31b22e07..a8e6eb0a11d5 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c | |||
@@ -258,27 +258,3 @@ out: | |||
258 | up_read(&uts_sem); | 258 | up_read(&uts_sem); |
259 | return err; | 259 | return err; |
260 | } | 260 | } |
261 | |||
262 | /* | ||
263 | * Do a system call from kernel instead of calling sys_execve so we | ||
264 | * end up with proper pt_regs. | ||
265 | */ | ||
266 | int kernel_execve(const char *filename, | ||
267 | const char *const argv[], | ||
268 | const char *const envp[]) | ||
269 | { | ||
270 | long __res; | ||
271 | register long __g1 __asm__ ("g1") = __NR_execve; | ||
272 | register long __o0 __asm__ ("o0") = (long)(filename); | ||
273 | register long __o1 __asm__ ("o1") = (long)(argv); | ||
274 | register long __o2 __asm__ ("o2") = (long)(envp); | ||
275 | asm volatile ("t 0x10\n\t" | ||
276 | "bcc 1f\n\t" | ||
277 | "mov %%o0, %0\n\t" | ||
278 | "sub %%g0, %%o0, %0\n\t" | ||
279 | "1:\n\t" | ||
280 | : "=r" (__res), "=&r" (__o0) | ||
281 | : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) | ||
282 | : "cc"); | ||
283 | return __res; | ||
284 | } | ||
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 11c6c9603e71..51b85feb8b97 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c | |||
@@ -730,24 +730,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, | |||
730 | return ret; | 730 | return ret; |
731 | } | 731 | } |
732 | 732 | ||
733 | /* | 733 | asmlinkage long sys_kern_features(void) |
734 | * Do a system call from kernel instead of calling sys_execve so we | ||
735 | * end up with proper pt_regs. | ||
736 | */ | ||
737 | int kernel_execve(const char *filename, | ||
738 | const char *const argv[], | ||
739 | const char *const envp[]) | ||
740 | { | 734 | { |
741 | long __res; | 735 | return KERN_FEATURE_MIXED_MODE_STACK; |
742 | register long __g1 __asm__ ("g1") = __NR_execve; | ||
743 | register long __o0 __asm__ ("o0") = (long)(filename); | ||
744 | register long __o1 __asm__ ("o1") = (long)(argv); | ||
745 | register long __o2 __asm__ ("o2") = (long)(envp); | ||
746 | asm volatile ("t 0x6d\n\t" | ||
747 | "sub %%g0, %%o0, %0\n\t" | ||
748 | "movcc %%xcc, %%o0, %0\n\t" | ||
749 | : "=r" (__res), "=&r" (__o0) | ||
750 | : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) | ||
751 | : "cc"); | ||
752 | return __res; | ||
753 | } | 736 | } |
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index 7f5f65d0b3fd..2ef41e67f0be 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S | |||
@@ -1,23 +1,19 @@ | |||
1 | /* SunOS's execv() call only specifies the argv argument, the | 1 | /* SunOS's execv() call only specifies the argv argument, the |
2 | * environment settings are the same as the calling processes. | 2 | * environment settings are the same as the calling processes. |
3 | */ | 3 | */ |
4 | sys_execve: | 4 | sys64_execve: |
5 | sethi %hi(sparc_execve), %g1 | 5 | set sys_execve, %g1 |
6 | ba,pt %xcc, execve_merge | 6 | jmpl %g1, %g0 |
7 | or %g1, %lo(sparc_execve), %g1 | 7 | flushw |
8 | 8 | ||
9 | #ifdef CONFIG_COMPAT | 9 | #ifdef CONFIG_COMPAT |
10 | sunos_execv: | 10 | sunos_execv: |
11 | stx %g0, [%sp + PTREGS_OFF + PT_V9_I2] | 11 | mov %g0, %o2 |
12 | sys32_execve: | 12 | sys32_execve: |
13 | sethi %hi(sparc32_execve), %g1 | 13 | set compat_sys_execve, %g1 |
14 | or %g1, %lo(sparc32_execve), %g1 | ||
15 | #endif | ||
16 | |||
17 | execve_merge: | ||
18 | flushw | ||
19 | jmpl %g1, %g0 | 14 | jmpl %g1, %g0 |
20 | add %sp, PTREGS_OFF, %o0 | 15 | flushw |
16 | #endif | ||
21 | 17 | ||
22 | .align 32 | 18 | .align 32 |
23 | sys_sparc_pipe: | 19 | sys_sparc_pipe: |
@@ -112,11 +108,16 @@ sys_clone: | |||
112 | ret_from_syscall: | 108 | ret_from_syscall: |
113 | /* Clear current_thread_info()->new_child. */ | 109 | /* Clear current_thread_info()->new_child. */ |
114 | stb %g0, [%g6 + TI_NEW_CHILD] | 110 | stb %g0, [%g6 + TI_NEW_CHILD] |
115 | ldx [%g6 + TI_FLAGS], %l0 | ||
116 | call schedule_tail | 111 | call schedule_tail |
117 | mov %g7, %o0 | 112 | mov %g7, %o0 |
113 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 | ||
114 | brnz,pt %o0, ret_sys_call | ||
115 | ldx [%g6 + TI_FLAGS], %l0 | ||
116 | ldx [%sp + PTREGS_OFF + PT_V9_G1], %l1 | ||
117 | call %l1 | ||
118 | ldx [%sp + PTREGS_OFF + PT_V9_G2], %o0 | ||
118 | ba,pt %xcc, ret_sys_call | 119 | ba,pt %xcc, ret_sys_call |
119 | ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 | 120 | mov 0, %o0 |
120 | 121 | ||
121 | .globl sparc_exit | 122 | .globl sparc_exit |
122 | .type sparc_exit,#function | 123 | .type sparc_exit,#function |
@@ -222,7 +223,6 @@ ret_sys_call: | |||
222 | ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc | 223 | ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc |
223 | 224 | ||
224 | 2: | 225 | 2: |
225 | stb %g0, [%g6 + TI_SYS_NOERROR] | ||
226 | /* System call success, clear Carry condition code. */ | 226 | /* System call success, clear Carry condition code. */ |
227 | andn %g3, %g2, %g3 | 227 | andn %g3, %g2, %g3 |
228 | 3: | 228 | 3: |
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 63402f9e9f51..5147f574f125 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S | |||
@@ -85,3 +85,4 @@ sys_call_table: | |||
85 | /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init | 85 | /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init |
86 | /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime | 86 | /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime |
87 | /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev | 87 | /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev |
88 | /*340*/ .long sys_ni_syscall, sys_kcmp | ||
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 3a58e0d66f51..ebb7f5fc58fb 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S | |||
@@ -86,6 +86,7 @@ sys_call_table32: | |||
86 | .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init | 86 | .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init |
87 | /*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime | 87 | /*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime |
88 | .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev | 88 | .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev |
89 | /*340*/ .word sys_kern_features, sys_kcmp | ||
89 | 90 | ||
90 | #endif /* CONFIG_COMPAT */ | 91 | #endif /* CONFIG_COMPAT */ |
91 | 92 | ||
@@ -106,7 +107,7 @@ sys_call_table: | |||
106 | /*40*/ .word sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_nis_syscall | 107 | /*40*/ .word sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_nis_syscall |
107 | .word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid | 108 | .word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid |
108 | /*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl | 109 | /*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl |
109 | .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve | 110 | .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys64_execve |
110 | /*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize | 111 | /*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize |
111 | .word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall | 112 | .word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall |
112 | /*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys_64_munmap, sys_mprotect | 113 | /*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys_64_munmap, sys_mprotect |
@@ -163,3 +164,4 @@ sys_call_table: | |||
163 | .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init | 164 | .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init |
164 | /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime | 165 | /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime |
165 | .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev | 166 | .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev |
167 | /*340*/ .word sys_kern_features, sys_kcmp | ||
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index b66a77968f35..e7ecf1507d90 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c | |||
@@ -2688,8 +2688,8 @@ void __init trap_init(void) | |||
2688 | TI_PRE_COUNT != offsetof(struct thread_info, | 2688 | TI_PRE_COUNT != offsetof(struct thread_info, |
2689 | preempt_count) || | 2689 | preempt_count) || |
2690 | TI_NEW_CHILD != offsetof(struct thread_info, new_child) || | 2690 | TI_NEW_CHILD != offsetof(struct thread_info, new_child) || |
2691 | TI_SYS_NOERROR != offsetof(struct thread_info, | 2691 | TI_CURRENT_DS != offsetof(struct thread_info, |
2692 | syscall_noerror) || | 2692 | current_ds) || |
2693 | TI_RESTART_BLOCK != offsetof(struct thread_info, | 2693 | TI_RESTART_BLOCK != offsetof(struct thread_info, |
2694 | restart_block) || | 2694 | restart_block) || |
2695 | TI_KUNA_REGS != offsetof(struct thread_info, | 2695 | TI_KUNA_REGS != offsetof(struct thread_info, |
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index f81d038f7340..8201c25e7669 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c | |||
@@ -113,21 +113,24 @@ static inline long sign_extend_imm13(long imm) | |||
113 | 113 | ||
114 | static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) | 114 | static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) |
115 | { | 115 | { |
116 | unsigned long value; | 116 | unsigned long value, fp; |
117 | 117 | ||
118 | if (reg < 16) | 118 | if (reg < 16) |
119 | return (!reg ? 0 : regs->u_regs[reg]); | 119 | return (!reg ? 0 : regs->u_regs[reg]); |
120 | |||
121 | fp = regs->u_regs[UREG_FP]; | ||
122 | |||
120 | if (regs->tstate & TSTATE_PRIV) { | 123 | if (regs->tstate & TSTATE_PRIV) { |
121 | struct reg_window *win; | 124 | struct reg_window *win; |
122 | win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); | 125 | win = (struct reg_window *)(fp + STACK_BIAS); |
123 | value = win->locals[reg - 16]; | 126 | value = win->locals[reg - 16]; |
124 | } else if (test_thread_flag(TIF_32BIT)) { | 127 | } else if (!test_thread_64bit_stack(fp)) { |
125 | struct reg_window32 __user *win32; | 128 | struct reg_window32 __user *win32; |
126 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | 129 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); |
127 | get_user(value, &win32->locals[reg - 16]); | 130 | get_user(value, &win32->locals[reg - 16]); |
128 | } else { | 131 | } else { |
129 | struct reg_window __user *win; | 132 | struct reg_window __user *win; |
130 | win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); | 133 | win = (struct reg_window __user *)(fp + STACK_BIAS); |
131 | get_user(value, &win->locals[reg - 16]); | 134 | get_user(value, &win->locals[reg - 16]); |
132 | } | 135 | } |
133 | return value; | 136 | return value; |
@@ -135,19 +138,24 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) | |||
135 | 138 | ||
136 | static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) | 139 | static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) |
137 | { | 140 | { |
141 | unsigned long fp; | ||
142 | |||
138 | if (reg < 16) | 143 | if (reg < 16) |
139 | return ®s->u_regs[reg]; | 144 | return ®s->u_regs[reg]; |
145 | |||
146 | fp = regs->u_regs[UREG_FP]; | ||
147 | |||
140 | if (regs->tstate & TSTATE_PRIV) { | 148 | if (regs->tstate & TSTATE_PRIV) { |
141 | struct reg_window *win; | 149 | struct reg_window *win; |
142 | win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); | 150 | win = (struct reg_window *)(fp + STACK_BIAS); |
143 | return &win->locals[reg - 16]; | 151 | return &win->locals[reg - 16]; |
144 | } else if (test_thread_flag(TIF_32BIT)) { | 152 | } else if (!test_thread_64bit_stack(fp)) { |
145 | struct reg_window32 *win32; | 153 | struct reg_window32 *win32; |
146 | win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | 154 | win32 = (struct reg_window32 *)((unsigned long)((u32)fp)); |
147 | return (unsigned long *)&win32->locals[reg - 16]; | 155 | return (unsigned long *)&win32->locals[reg - 16]; |
148 | } else { | 156 | } else { |
149 | struct reg_window *win; | 157 | struct reg_window *win; |
150 | win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); | 158 | win = (struct reg_window *)(fp + STACK_BIAS); |
151 | return &win->locals[reg - 16]; | 159 | return &win->locals[reg - 16]; |
152 | } | 160 | } |
153 | } | 161 | } |
@@ -392,13 +400,15 @@ int handle_popc(u32 insn, struct pt_regs *regs) | |||
392 | if (rd) | 400 | if (rd) |
393 | regs->u_regs[rd] = ret; | 401 | regs->u_regs[rd] = ret; |
394 | } else { | 402 | } else { |
395 | if (test_thread_flag(TIF_32BIT)) { | 403 | unsigned long fp = regs->u_regs[UREG_FP]; |
404 | |||
405 | if (!test_thread_64bit_stack(fp)) { | ||
396 | struct reg_window32 __user *win32; | 406 | struct reg_window32 __user *win32; |
397 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | 407 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); |
398 | put_user(ret, &win32->locals[rd - 16]); | 408 | put_user(ret, &win32->locals[rd - 16]); |
399 | } else { | 409 | } else { |
400 | struct reg_window __user *win; | 410 | struct reg_window __user *win; |
401 | win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); | 411 | win = (struct reg_window __user *)(fp + STACK_BIAS); |
402 | put_user(ret, &win->locals[rd - 16]); | 412 | put_user(ret, &win->locals[rd - 16]); |
403 | } | 413 | } |
404 | } | 414 | } |
@@ -554,7 +564,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) | |||
554 | reg[0] = 0; | 564 | reg[0] = 0; |
555 | if ((insn & 0x780000) == 0x180000) | 565 | if ((insn & 0x780000) == 0x180000) |
556 | reg[1] = 0; | 566 | reg[1] = 0; |
557 | } else if (test_thread_flag(TIF_32BIT)) { | 567 | } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { |
558 | put_user(0, (int __user *) reg); | 568 | put_user(0, (int __user *) reg); |
559 | if ((insn & 0x780000) == 0x180000) | 569 | if ((insn & 0x780000) == 0x180000) |
560 | put_user(0, ((int __user *) reg) + 1); | 570 | put_user(0, ((int __user *) reg) + 1); |
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c index 08e074b7eb6a..c096c624ac4d 100644 --- a/arch/sparc/kernel/visemul.c +++ b/arch/sparc/kernel/visemul.c | |||
@@ -149,21 +149,24 @@ static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, | |||
149 | 149 | ||
150 | static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) | 150 | static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) |
151 | { | 151 | { |
152 | unsigned long value; | 152 | unsigned long value, fp; |
153 | 153 | ||
154 | if (reg < 16) | 154 | if (reg < 16) |
155 | return (!reg ? 0 : regs->u_regs[reg]); | 155 | return (!reg ? 0 : regs->u_regs[reg]); |
156 | |||
157 | fp = regs->u_regs[UREG_FP]; | ||
158 | |||
156 | if (regs->tstate & TSTATE_PRIV) { | 159 | if (regs->tstate & TSTATE_PRIV) { |
157 | struct reg_window *win; | 160 | struct reg_window *win; |
158 | win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); | 161 | win = (struct reg_window *)(fp + STACK_BIAS); |
159 | value = win->locals[reg - 16]; | 162 | value = win->locals[reg - 16]; |
160 | } else if (test_thread_flag(TIF_32BIT)) { | 163 | } else if (!test_thread_64bit_stack(fp)) { |
161 | struct reg_window32 __user *win32; | 164 | struct reg_window32 __user *win32; |
162 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | 165 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); |
163 | get_user(value, &win32->locals[reg - 16]); | 166 | get_user(value, &win32->locals[reg - 16]); |
164 | } else { | 167 | } else { |
165 | struct reg_window __user *win; | 168 | struct reg_window __user *win; |
166 | win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); | 169 | win = (struct reg_window __user *)(fp + STACK_BIAS); |
167 | get_user(value, &win->locals[reg - 16]); | 170 | get_user(value, &win->locals[reg - 16]); |
168 | } | 171 | } |
169 | return value; | 172 | return value; |
@@ -172,16 +175,18 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) | |||
172 | static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, | 175 | static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, |
173 | struct pt_regs *regs) | 176 | struct pt_regs *regs) |
174 | { | 177 | { |
178 | unsigned long fp = regs->u_regs[UREG_FP]; | ||
179 | |||
175 | BUG_ON(reg < 16); | 180 | BUG_ON(reg < 16); |
176 | BUG_ON(regs->tstate & TSTATE_PRIV); | 181 | BUG_ON(regs->tstate & TSTATE_PRIV); |
177 | 182 | ||
178 | if (test_thread_flag(TIF_32BIT)) { | 183 | if (!test_thread_64bit_stack(fp)) { |
179 | struct reg_window32 __user *win32; | 184 | struct reg_window32 __user *win32; |
180 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | 185 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); |
181 | return (unsigned long __user *)&win32->locals[reg - 16]; | 186 | return (unsigned long __user *)&win32->locals[reg - 16]; |
182 | } else { | 187 | } else { |
183 | struct reg_window __user *win; | 188 | struct reg_window __user *win; |
184 | win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); | 189 | win = (struct reg_window __user *)(fp + STACK_BIAS); |
185 | return &win->locals[reg - 16]; | 190 | return &win->locals[reg - 16]; |
186 | } | 191 | } |
187 | } | 192 | } |
@@ -204,7 +209,7 @@ static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd) | |||
204 | } else { | 209 | } else { |
205 | unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); | 210 | unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); |
206 | 211 | ||
207 | if (test_thread_flag(TIF_32BIT)) | 212 | if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) |
208 | __put_user((u32)val, (u32 __user *)rd_user); | 213 | __put_user((u32)val, (u32 __user *)rd_user); |
209 | else | 214 | else |
210 | __put_user(val, rd_user); | 215 | __put_user(val, rd_user); |
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 89c2c29f154b..0bacceb19150 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S | |||
@@ -132,6 +132,11 @@ SECTIONS | |||
132 | *(.popc_6insn_patch) | 132 | *(.popc_6insn_patch) |
133 | __popc_6insn_patch_end = .; | 133 | __popc_6insn_patch_end = .; |
134 | } | 134 | } |
135 | .pause_3insn_patch : { | ||
136 | __pause_3insn_patch = .; | ||
137 | *(.pause_3insn_patch) | ||
138 | __pause_3insn_patch_end = .; | ||
139 | } | ||
135 | PERCPU_SECTION(SMP_CACHE_BYTES) | 140 | PERCPU_SECTION(SMP_CACHE_BYTES) |
136 | 141 | ||
137 | . = ALIGN(PAGE_SIZE); | 142 | . = ALIGN(PAGE_SIZE); |
diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S index a6b0863c27df..1e67ce958369 100644 --- a/arch/sparc/kernel/winfixup.S +++ b/arch/sparc/kernel/winfixup.S | |||
@@ -43,6 +43,8 @@ spill_fixup_mna: | |||
43 | spill_fixup_dax: | 43 | spill_fixup_dax: |
44 | TRAP_LOAD_THREAD_REG(%g6, %g1) | 44 | TRAP_LOAD_THREAD_REG(%g6, %g1) |
45 | ldx [%g6 + TI_FLAGS], %g1 | 45 | ldx [%g6 + TI_FLAGS], %g1 |
46 | andcc %sp, 0x1, %g0 | ||
47 | movne %icc, 0, %g1 | ||
46 | andcc %g1, _TIF_32BIT, %g0 | 48 | andcc %g1, _TIF_32BIT, %g0 |
47 | ldub [%g6 + TI_WSAVED], %g1 | 49 | ldub [%g6 + TI_WSAVED], %g1 |
48 | sll %g1, 3, %g3 | 50 | sll %g1, 3, %g3 |
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index 4d502da3de78..85c233d0a340 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S | |||
@@ -1,6 +1,6 @@ | |||
1 | /* atomic.S: These things are too big to do inline. | 1 | /* atomic.S: These things are too big to do inline. |
2 | * | 2 | * |
3 | * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) | 3 | * Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/linkage.h> | 6 | #include <linux/linkage.h> |
@@ -117,3 +117,17 @@ ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ | |||
117 | sub %g1, %o0, %o0 | 117 | sub %g1, %o0, %o0 |
118 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | 118 | 2: BACKOFF_SPIN(%o2, %o3, 1b) |
119 | ENDPROC(atomic64_sub_ret) | 119 | ENDPROC(atomic64_sub_ret) |
120 | |||
121 | ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ | ||
122 | BACKOFF_SETUP(%o2) | ||
123 | 1: ldx [%o0], %g1 | ||
124 | brlez,pn %g1, 3f | ||
125 | sub %g1, 1, %g7 | ||
126 | casx [%o0], %g1, %g7 | ||
127 | cmp %g1, %g7 | ||
128 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) | ||
129 | nop | ||
130 | 3: retl | ||
131 | sub %g1, 1, %o0 | ||
132 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | ||
133 | ENDPROC(atomic64_dec_if_positive) | ||
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index ee31b884c61b..0c4e35e522fa 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c | |||
@@ -116,6 +116,7 @@ EXPORT_SYMBOL(atomic64_add); | |||
116 | EXPORT_SYMBOL(atomic64_add_ret); | 116 | EXPORT_SYMBOL(atomic64_add_ret); |
117 | EXPORT_SYMBOL(atomic64_sub); | 117 | EXPORT_SYMBOL(atomic64_sub); |
118 | EXPORT_SYMBOL(atomic64_sub_ret); | 118 | EXPORT_SYMBOL(atomic64_sub_ret); |
119 | EXPORT_SYMBOL(atomic64_dec_if_positive); | ||
119 | 120 | ||
120 | /* Atomic bit operations. */ | 121 | /* Atomic bit operations. */ |
121 | EXPORT_SYMBOL(test_and_set_bit); | 122 | EXPORT_SYMBOL(test_and_set_bit); |
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c index 1704068da928..034aadbff036 100644 --- a/arch/sparc/math-emu/math_64.c +++ b/arch/sparc/math-emu/math_64.c | |||
@@ -320,7 +320,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap) | |||
320 | XR = 0; | 320 | XR = 0; |
321 | else if (freg < 16) | 321 | else if (freg < 16) |
322 | XR = regs->u_regs[freg]; | 322 | XR = regs->u_regs[freg]; |
323 | else if (test_thread_flag(TIF_32BIT)) { | 323 | else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { |
324 | struct reg_window32 __user *win32; | 324 | struct reg_window32 __user *win32; |
325 | flushw_user (); | 325 | flushw_user (); |
326 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); | 326 | win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 9e28a118e6a4..85be1ca539b2 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -624,7 +624,7 @@ static void __init inherit_prom_mappings(void) | |||
624 | void prom_world(int enter) | 624 | void prom_world(int enter) |
625 | { | 625 | { |
626 | if (!enter) | 626 | if (!enter) |
627 | set_fs((mm_segment_t) { get_thread_current_ds() }); | 627 | set_fs(get_fs()); |
628 | 628 | ||
629 | __asm__ __volatile__("flushw"); | 629 | __asm__ __volatile__("flushw"); |
630 | } | 630 | } |
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index 874162a11ceb..f8e13d421fcb 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S | |||
@@ -481,8 +481,8 @@ xcall_sync_tick: | |||
481 | 481 | ||
482 | .globl xcall_fetch_glob_regs | 482 | .globl xcall_fetch_glob_regs |
483 | xcall_fetch_glob_regs: | 483 | xcall_fetch_glob_regs: |
484 | sethi %hi(global_reg_snapshot), %g1 | 484 | sethi %hi(global_cpu_snapshot), %g1 |
485 | or %g1, %lo(global_reg_snapshot), %g1 | 485 | or %g1, %lo(global_cpu_snapshot), %g1 |
486 | __GET_CPUID(%g2) | 486 | __GET_CPUID(%g2) |
487 | sllx %g2, 6, %g3 | 487 | sllx %g2, 6, %g3 |
488 | add %g1, %g3, %g1 | 488 | add %g1, %g3, %g1 |
@@ -509,6 +509,66 @@ xcall_fetch_glob_regs: | |||
509 | stx %g3, [%g1 + GR_SNAP_THREAD] | 509 | stx %g3, [%g1 + GR_SNAP_THREAD] |
510 | retry | 510 | retry |
511 | 511 | ||
512 | .globl xcall_fetch_glob_pmu | ||
513 | xcall_fetch_glob_pmu: | ||
514 | sethi %hi(global_cpu_snapshot), %g1 | ||
515 | or %g1, %lo(global_cpu_snapshot), %g1 | ||
516 | __GET_CPUID(%g2) | ||
517 | sllx %g2, 6, %g3 | ||
518 | add %g1, %g3, %g1 | ||
519 | rd %pic, %g7 | ||
520 | stx %g7, [%g1 + (4 * 8)] | ||
521 | rd %pcr, %g7 | ||
522 | stx %g7, [%g1 + (0 * 8)] | ||
523 | retry | ||
524 | |||
525 | .globl xcall_fetch_glob_pmu_n4 | ||
526 | xcall_fetch_glob_pmu_n4: | ||
527 | sethi %hi(global_cpu_snapshot), %g1 | ||
528 | or %g1, %lo(global_cpu_snapshot), %g1 | ||
529 | __GET_CPUID(%g2) | ||
530 | sllx %g2, 6, %g3 | ||
531 | add %g1, %g3, %g1 | ||
532 | |||
533 | ldxa [%g0] ASI_PIC, %g7 | ||
534 | stx %g7, [%g1 + (4 * 8)] | ||
535 | mov 0x08, %g3 | ||
536 | ldxa [%g3] ASI_PIC, %g7 | ||
537 | stx %g7, [%g1 + (5 * 8)] | ||
538 | mov 0x10, %g3 | ||
539 | ldxa [%g3] ASI_PIC, %g7 | ||
540 | stx %g7, [%g1 + (6 * 8)] | ||
541 | mov 0x18, %g3 | ||
542 | ldxa [%g3] ASI_PIC, %g7 | ||
543 | stx %g7, [%g1 + (7 * 8)] | ||
544 | |||
545 | mov %o0, %g2 | ||
546 | mov %o1, %g3 | ||
547 | mov %o5, %g7 | ||
548 | |||
549 | mov HV_FAST_VT_GET_PERFREG, %o5 | ||
550 | mov 3, %o0 | ||
551 | ta HV_FAST_TRAP | ||
552 | stx %o1, [%g1 + (3 * 8)] | ||
553 | mov HV_FAST_VT_GET_PERFREG, %o5 | ||
554 | mov 2, %o0 | ||
555 | ta HV_FAST_TRAP | ||
556 | stx %o1, [%g1 + (2 * 8)] | ||
557 | mov HV_FAST_VT_GET_PERFREG, %o5 | ||
558 | mov 1, %o0 | ||
559 | ta HV_FAST_TRAP | ||
560 | stx %o1, [%g1 + (1 * 8)] | ||
561 | mov HV_FAST_VT_GET_PERFREG, %o5 | ||
562 | mov 0, %o0 | ||
563 | ta HV_FAST_TRAP | ||
564 | stx %o1, [%g1 + (0 * 8)] | ||
565 | |||
566 | mov %g2, %o0 | ||
567 | mov %g3, %o1 | ||
568 | mov %g7, %o5 | ||
569 | |||
570 | retry | ||
571 | |||
512 | #ifdef DCACHE_ALIASING_POSSIBLE | 572 | #ifdef DCACHE_ALIASING_POSSIBLE |
513 | .align 32 | 573 | .align 32 |
514 | .globl xcall_flush_dcache_page_cheetah | 574 | .globl xcall_flush_dcache_page_cheetah |