aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include')
-rw-r--r--arch/sparc/include/asm/atomic_64.h4
-rw-r--r--arch/sparc/include/asm/backoff.h69
-rw-r--r--arch/sparc/include/asm/compat.h5
-rw-r--r--arch/sparc/include/asm/processor_64.h17
-rw-r--r--arch/sparc/include/asm/prom.h8
-rw-r--r--arch/sparc/include/asm/thread_info_64.h5
-rw-r--r--arch/sparc/include/asm/ttable.h24
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h7
8 files changed, 116 insertions, 23 deletions
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index ce35a1cf1a20..be56a244c9cf 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -1,7 +1,7 @@
1/* atomic.h: Thankfully the V9 is at least reasonable for this 1/* atomic.h: Thankfully the V9 is at least reasonable for this
2 * stuff. 2 * stuff.
3 * 3 *
4 * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com) 4 * Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com)
5 */ 5 */
6 6
7#ifndef __ARCH_SPARC64_ATOMIC__ 7#ifndef __ARCH_SPARC64_ATOMIC__
@@ -106,6 +106,8 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
106 106
107#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 107#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
108 108
109extern long atomic64_dec_if_positive(atomic64_t *v);
110
109/* Atomic operations are already serializing */ 111/* Atomic operations are already serializing */
110#define smp_mb__before_atomic_dec() barrier() 112#define smp_mb__before_atomic_dec() barrier()
111#define smp_mb__after_atomic_dec() barrier() 113#define smp_mb__after_atomic_dec() barrier()
diff --git a/arch/sparc/include/asm/backoff.h b/arch/sparc/include/asm/backoff.h
index db3af0d30fb1..4e02086b839c 100644
--- a/arch/sparc/include/asm/backoff.h
+++ b/arch/sparc/include/asm/backoff.h
@@ -1,6 +1,46 @@
1#ifndef _SPARC64_BACKOFF_H 1#ifndef _SPARC64_BACKOFF_H
2#define _SPARC64_BACKOFF_H 2#define _SPARC64_BACKOFF_H
3 3
4/* The macros in this file implement an exponential backoff facility
5 * for atomic operations.
6 *
7 * When multiple threads compete on an atomic operation, it is
8 * possible for one thread to be continually denied a successful
9 * completion of the compare-and-swap instruction. Heavily
10 * threaded cpu implementations like Niagara can compound this
11 * problem even further.
12 *
13 * When an atomic operation fails and needs to be retried, we spin a
14 * certain number of times. At each subsequent failure of the same
15 * operation we double the spin count, realizing an exponential
16 * backoff.
17 *
18 * When we spin, we try to use an operation that will cause the
19 * current cpu strand to block, and therefore make the core fully
20 * available to any other other runnable strands. There are two
21 * options, based upon cpu capabilities.
22 *
23 * On all cpus prior to SPARC-T4 we do three dummy reads of the
24 * condition code register. Each read blocks the strand for something
25 * between 40 and 50 cpu cycles.
26 *
27 * For SPARC-T4 and later we have a special "pause" instruction
28 * available. This is implemented using writes to register %asr27.
29 * The cpu will block the number of cycles written into the register,
30 * unless a disrupting trap happens first. SPARC-T4 specifically
31 * implements pause with a granularity of 8 cycles. Each strand has
32 * an internal pause counter which decrements every 8 cycles. So the
33 * chip shifts the %asr27 value down by 3 bits, and writes the result
34 * into the pause counter. If a value smaller than 8 is written, the
35 * chip blocks for 1 cycle.
36 *
37 * To achieve the same amount of backoff as the three %ccr reads give
38 * on earlier chips, we shift the backoff value up by 7 bits. (Three
39 * %ccr reads block for about 128 cycles, 1 << 7 == 128) We write the
40 * whole amount we want to block into the pause register, rather than
41 * loop writing 128 each time.
42 */
43
4#define BACKOFF_LIMIT (4 * 1024) 44#define BACKOFF_LIMIT (4 * 1024)
5 45
6#ifdef CONFIG_SMP 46#ifdef CONFIG_SMP
@@ -11,16 +51,25 @@
11#define BACKOFF_LABEL(spin_label, continue_label) \ 51#define BACKOFF_LABEL(spin_label, continue_label) \
12 spin_label 52 spin_label
13 53
14#define BACKOFF_SPIN(reg, tmp, label) \ 54#define BACKOFF_SPIN(reg, tmp, label) \
15 mov reg, tmp; \ 55 mov reg, tmp; \
1688: brnz,pt tmp, 88b; \ 5688: rd %ccr, %g0; \
17 sub tmp, 1, tmp; \ 57 rd %ccr, %g0; \
18 set BACKOFF_LIMIT, tmp; \ 58 rd %ccr, %g0; \
19 cmp reg, tmp; \ 59 .section .pause_3insn_patch,"ax";\
20 bg,pn %xcc, label; \ 60 .word 88b; \
21 nop; \ 61 sllx tmp, 7, tmp; \
22 ba,pt %xcc, label; \ 62 wr tmp, 0, %asr27; \
23 sllx reg, 1, reg; 63 clr tmp; \
64 .previous; \
65 brnz,pt tmp, 88b; \
66 sub tmp, 1, tmp; \
67 set BACKOFF_LIMIT, tmp; \
68 cmp reg, tmp; \
69 bg,pn %xcc, label; \
70 nop; \
71 ba,pt %xcc, label; \
72 sllx reg, 1, reg;
24 73
25#else 74#else
26 75
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index cef99fbc0a21..830502fe62b4 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -232,9 +232,10 @@ static inline void __user *arch_compat_alloc_user_space(long len)
232 struct pt_regs *regs = current_thread_info()->kregs; 232 struct pt_regs *regs = current_thread_info()->kregs;
233 unsigned long usp = regs->u_regs[UREG_I6]; 233 unsigned long usp = regs->u_regs[UREG_I6];
234 234
235 if (!(test_thread_flag(TIF_32BIT))) 235 if (test_thread_64bit_stack(usp))
236 usp += STACK_BIAS; 236 usp += STACK_BIAS;
237 else 237
238 if (test_thread_flag(TIF_32BIT))
238 usp &= 0xffffffffUL; 239 usp &= 0xffffffffUL;
239 240
240 usp -= len; 241 usp -= len;
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 4e5a483122a0..721e25f0e2ea 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -196,7 +196,22 @@ extern unsigned long get_wchan(struct task_struct *task);
196#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) 196#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
197#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) 197#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
198 198
199#define cpu_relax() barrier() 199/* Please see the commentary in asm/backoff.h for a description of
200 * what these instructions are doing and how they have been choosen.
201 * To make a long story short, we are trying to yield the current cpu
202 * strand during busy loops.
203 */
204#define cpu_relax() asm volatile("\n99:\n\t" \
205 "rd %%ccr, %%g0\n\t" \
206 "rd %%ccr, %%g0\n\t" \
207 "rd %%ccr, %%g0\n\t" \
208 ".section .pause_3insn_patch,\"ax\"\n\t"\
209 ".word 99b\n\t" \
210 "wr %%g0, 128, %%asr27\n\t" \
211 "nop\n\t" \
212 "nop\n\t" \
213 ".previous" \
214 ::: "memory")
200 215
201/* Prefetch support. This is tuned for UltraSPARC-III and later. 216/* Prefetch support. This is tuned for UltraSPARC-III and later.
202 * UltraSPARC-I will treat these as nops, and UltraSPARC-II has 217 * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index c28765110706..67c62578d170 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -63,5 +63,13 @@ extern char *of_console_options;
63extern void irq_trans_init(struct device_node *dp); 63extern void irq_trans_init(struct device_node *dp);
64extern char *build_path_component(struct device_node *dp); 64extern char *build_path_component(struct device_node *dp);
65 65
66/* SPARC has local implementations */
67extern int of_address_to_resource(struct device_node *dev, int index,
68 struct resource *r);
69#define of_address_to_resource of_address_to_resource
70
71void __iomem *of_iomap(struct device_node *node, int index);
72#define of_iomap of_iomap
73
66#endif /* __KERNEL__ */ 74#endif /* __KERNEL__ */
67#endif /* _SPARC_PROM_H */ 75#endif /* _SPARC_PROM_H */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 4e2276631081..a3fe4dcc0aa6 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -259,6 +259,11 @@ static inline bool test_and_clear_restore_sigmask(void)
259 259
260#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 260#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
261 261
262#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
263#define test_thread_64bit_stack(__SP) \
264 ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \
265 false : true)
266
262#endif /* !__ASSEMBLY__ */ 267#endif /* !__ASSEMBLY__ */
263 268
264#endif /* __KERNEL__ */ 269#endif /* __KERNEL__ */
diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
index 48f2807d3265..71b5a67522ab 100644
--- a/arch/sparc/include/asm/ttable.h
+++ b/arch/sparc/include/asm/ttable.h
@@ -372,7 +372,9 @@ etrap_spill_fixup_64bit: \
372 372
373/* Normal 32bit spill */ 373/* Normal 32bit spill */
374#define SPILL_2_GENERIC(ASI) \ 374#define SPILL_2_GENERIC(ASI) \
375 srl %sp, 0, %sp; \ 375 and %sp, 1, %g3; \
376 brnz,pn %g3, (. - (128 + 4)); \
377 srl %sp, 0, %sp; \
376 stwa %l0, [%sp + %g0] ASI; \ 378 stwa %l0, [%sp + %g0] ASI; \
377 mov 0x04, %g3; \ 379 mov 0x04, %g3; \
378 stwa %l1, [%sp + %g3] ASI; \ 380 stwa %l1, [%sp + %g3] ASI; \
@@ -398,14 +400,16 @@ etrap_spill_fixup_64bit: \
398 stwa %i6, [%g1 + %g0] ASI; \ 400 stwa %i6, [%g1 + %g0] ASI; \
399 stwa %i7, [%g1 + %g3] ASI; \ 401 stwa %i7, [%g1 + %g3] ASI; \
400 saved; \ 402 saved; \
401 retry; nop; nop; \ 403 retry; \
402 b,a,pt %xcc, spill_fixup_dax; \ 404 b,a,pt %xcc, spill_fixup_dax; \
403 b,a,pt %xcc, spill_fixup_mna; \ 405 b,a,pt %xcc, spill_fixup_mna; \
404 b,a,pt %xcc, spill_fixup; 406 b,a,pt %xcc, spill_fixup;
405 407
406#define SPILL_2_GENERIC_ETRAP \ 408#define SPILL_2_GENERIC_ETRAP \
407etrap_user_spill_32bit: \ 409etrap_user_spill_32bit: \
408 srl %sp, 0, %sp; \ 410 and %sp, 1, %g3; \
411 brnz,pn %g3, etrap_user_spill_64bit; \
412 srl %sp, 0, %sp; \
409 stwa %l0, [%sp + 0x00] %asi; \ 413 stwa %l0, [%sp + 0x00] %asi; \
410 stwa %l1, [%sp + 0x04] %asi; \ 414 stwa %l1, [%sp + 0x04] %asi; \
411 stwa %l2, [%sp + 0x08] %asi; \ 415 stwa %l2, [%sp + 0x08] %asi; \
@@ -427,7 +431,7 @@ etrap_user_spill_32bit: \
427 ba,pt %xcc, etrap_save; \ 431 ba,pt %xcc, etrap_save; \
428 wrpr %g1, %cwp; \ 432 wrpr %g1, %cwp; \
429 nop; nop; nop; nop; \ 433 nop; nop; nop; nop; \
430 nop; nop; nop; nop; \ 434 nop; nop; \
431 ba,a,pt %xcc, etrap_spill_fixup_32bit; \ 435 ba,a,pt %xcc, etrap_spill_fixup_32bit; \
432 ba,a,pt %xcc, etrap_spill_fixup_32bit; \ 436 ba,a,pt %xcc, etrap_spill_fixup_32bit; \
433 ba,a,pt %xcc, etrap_spill_fixup_32bit; 437 ba,a,pt %xcc, etrap_spill_fixup_32bit;
@@ -592,7 +596,9 @@ user_rtt_fill_64bit: \
592 596
593/* Normal 32bit fill */ 597/* Normal 32bit fill */
594#define FILL_2_GENERIC(ASI) \ 598#define FILL_2_GENERIC(ASI) \
595 srl %sp, 0, %sp; \ 599 and %sp, 1, %g3; \
600 brnz,pn %g3, (. - (128 + 4)); \
601 srl %sp, 0, %sp; \
596 lduwa [%sp + %g0] ASI, %l0; \ 602 lduwa [%sp + %g0] ASI, %l0; \
597 mov 0x04, %g2; \ 603 mov 0x04, %g2; \
598 mov 0x08, %g3; \ 604 mov 0x08, %g3; \
@@ -616,14 +622,16 @@ user_rtt_fill_64bit: \
616 lduwa [%g1 + %g3] ASI, %i6; \ 622 lduwa [%g1 + %g3] ASI, %i6; \
617 lduwa [%g1 + %g5] ASI, %i7; \ 623 lduwa [%g1 + %g5] ASI, %i7; \
618 restored; \ 624 restored; \
619 retry; nop; nop; nop; nop; \ 625 retry; nop; nop; \
620 b,a,pt %xcc, fill_fixup_dax; \ 626 b,a,pt %xcc, fill_fixup_dax; \
621 b,a,pt %xcc, fill_fixup_mna; \ 627 b,a,pt %xcc, fill_fixup_mna; \
622 b,a,pt %xcc, fill_fixup; 628 b,a,pt %xcc, fill_fixup;
623 629
624#define FILL_2_GENERIC_RTRAP \ 630#define FILL_2_GENERIC_RTRAP \
625user_rtt_fill_32bit: \ 631user_rtt_fill_32bit: \
626 srl %sp, 0, %sp; \ 632 and %sp, 1, %g3; \
633 brnz,pn %g3, user_rtt_fill_64bit; \
634 srl %sp, 0, %sp; \
627 lduwa [%sp + 0x00] %asi, %l0; \ 635 lduwa [%sp + 0x00] %asi, %l0; \
628 lduwa [%sp + 0x04] %asi, %l1; \ 636 lduwa [%sp + 0x04] %asi, %l1; \
629 lduwa [%sp + 0x08] %asi, %l2; \ 637 lduwa [%sp + 0x08] %asi, %l2; \
@@ -643,7 +651,7 @@ user_rtt_fill_32bit: \
643 ba,pt %xcc, user_rtt_pre_restore; \ 651 ba,pt %xcc, user_rtt_pre_restore; \
644 restored; \ 652 restored; \
645 nop; nop; nop; nop; nop; \ 653 nop; nop; nop; nop; nop; \
646 nop; nop; nop; nop; nop; \ 654 nop; nop; nop; \
647 ba,a,pt %xcc, user_rtt_fill_fixup; \ 655 ba,a,pt %xcc, user_rtt_fill_fixup; \
648 ba,a,pt %xcc, user_rtt_fill_fixup; \ 656 ba,a,pt %xcc, user_rtt_fill_fixup; \
649 ba,a,pt %xcc, user_rtt_fill_fixup; 657 ba,a,pt %xcc, user_rtt_fill_fixup;
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index 8974ef7ae920..cac719d1bc5c 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -405,8 +405,13 @@
405#define __NR_setns 337 405#define __NR_setns 337
406#define __NR_process_vm_readv 338 406#define __NR_process_vm_readv 338
407#define __NR_process_vm_writev 339 407#define __NR_process_vm_writev 339
408#define __NR_kern_features 340
409#define __NR_kcmp 341
408 410
409#define NR_syscalls 340 411#define NR_syscalls 342
412
413/* Bitmask values returned from kern_features system call. */
414#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
410 415
411#ifdef __32bit_syscall_numbers__ 416#ifdef __32bit_syscall_numbers__
412/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 417/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,