aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/include
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2012-03-28 13:30:03 -0400
committerDavid Howells <dhowells@redhat.com>2012-03-28 13:30:03 -0400
commitd550bbd40c0e10aefa05103dadbe0ae42e683707 (patch)
treec1f32662f605f2b87c73901fbf66d12b2ca69b04 /arch/sparc/include
parente839ca528718e68cad32a307dc9aabf01ef3eb05 (diff)
Disintegrate asm/system.h for Sparc
Disintegrate asm/system.h for Sparc. Signed-off-by: David Howells <dhowells@redhat.com> cc: sparclinux@vger.kernel.org
Diffstat (limited to 'arch/sparc/include')
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/atomic_64.h3
-rw-r--r--arch/sparc/include/asm/auxio_32.h1
-rw-r--r--arch/sparc/include/asm/barrier.h8
-rw-r--r--arch/sparc/include/asm/barrier_32.h15
-rw-r--r--arch/sparc/include/asm/barrier_64.h56
-rw-r--r--arch/sparc/include/asm/bug.h3
-rw-r--r--arch/sparc/include/asm/cacheflush_32.h9
-rw-r--r--arch/sparc/include/asm/cacheflush_64.h10
-rw-r--r--arch/sparc/include/asm/cmpxchg.h8
-rw-r--r--arch/sparc/include/asm/cmpxchg_32.h112
-rw-r--r--arch/sparc/include/asm/cmpxchg_64.h145
-rw-r--r--arch/sparc/include/asm/cpu_type.h34
-rw-r--r--arch/sparc/include/asm/exec.h6
-rw-r--r--arch/sparc/include/asm/floppy_32.h1
-rw-r--r--arch/sparc/include/asm/futex_64.h1
-rw-r--r--arch/sparc/include/asm/io_32.h1
-rw-r--r--arch/sparc/include/asm/io_64.h1
-rw-r--r--arch/sparc/include/asm/irqflags_32.h1
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h1
-rw-r--r--arch/sparc/include/asm/ns87303.h1
-rw-r--r--arch/sparc/include/asm/perfctr.h23
-rw-r--r--arch/sparc/include/asm/pgtable_32.h2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h1
-rw-r--r--arch/sparc/include/asm/processor.h3
-rw-r--r--arch/sparc/include/asm/processor_64.h3
-rw-r--r--arch/sparc/include/asm/ptrace.h5
-rw-r--r--arch/sparc/include/asm/setup.h16
-rw-r--r--arch/sparc/include/asm/switch_to.h8
-rw-r--r--arch/sparc/include/asm/switch_to_32.h106
-rw-r--r--arch/sparc/include/asm/switch_to_64.h72
-rw-r--r--arch/sparc/include/asm/system.h14
-rw-r--r--arch/sparc/include/asm/system_32.h284
-rw-r--r--arch/sparc/include/asm/system_64.h331
-rw-r--r--arch/sparc/include/asm/timer_32.h3
-rw-r--r--arch/sparc/include/asm/uaccess_64.h1
36 files changed, 651 insertions, 640 deletions
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 9dd0a769fa18..905832aa9e9e 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -13,9 +13,9 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15 15
16#include <asm/cmpxchg.h>
16#include <asm-generic/atomic64.h> 17#include <asm-generic/atomic64.h>
17 18
18#include <asm/system.h>
19 19
20#define ATOMIC_INIT(i) { (i) } 20#define ATOMIC_INIT(i) { (i) }
21 21
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 9f421df46aec..ce35a1cf1a20 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -8,7 +8,7 @@
8#define __ARCH_SPARC64_ATOMIC__ 8#define __ARCH_SPARC64_ATOMIC__
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <asm/system.h> 11#include <asm/cmpxchg.h>
12 12
13#define ATOMIC_INIT(i) { (i) } 13#define ATOMIC_INIT(i) { (i) }
14#define ATOMIC64_INIT(i) { (i) } 14#define ATOMIC64_INIT(i) { (i) }
@@ -85,7 +85,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
85 return c; 85 return c;
86} 86}
87 87
88
89#define atomic64_cmpxchg(v, o, n) \ 88#define atomic64_cmpxchg(v, o, n) \
90 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) 89 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
91#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 90#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
diff --git a/arch/sparc/include/asm/auxio_32.h b/arch/sparc/include/asm/auxio_32.h
index e03e088be95f..3a319775ae37 100644
--- a/arch/sparc/include/asm/auxio_32.h
+++ b/arch/sparc/include/asm/auxio_32.h
@@ -6,7 +6,6 @@
6#ifndef _SPARC_AUXIO_H 6#ifndef _SPARC_AUXIO_H
7#define _SPARC_AUXIO_H 7#define _SPARC_AUXIO_H
8 8
9#include <asm/system.h>
10#include <asm/vaddrs.h> 9#include <asm/vaddrs.h>
11 10
12/* This register is an unsigned char in IO space. It does two things. 11/* This register is an unsigned char in IO space. It does two things.
diff --git a/arch/sparc/include/asm/barrier.h b/arch/sparc/include/asm/barrier.h
new file mode 100644
index 000000000000..b25f02a029e0
--- /dev/null
+++ b/arch/sparc/include/asm/barrier.h
@@ -0,0 +1,8 @@
1#ifndef ___ASM_SPARC_BARRIER_H
2#define ___ASM_SPARC_BARRIER_H
3#if defined(__sparc__) && defined(__arch64__)
4#include <asm/barrier_64.h>
5#else
6#include <asm/barrier_32.h>
7#endif
8#endif
diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h
new file mode 100644
index 000000000000..c1b76654ee76
--- /dev/null
+++ b/arch/sparc/include/asm/barrier_32.h
@@ -0,0 +1,15 @@
1#ifndef __SPARC_BARRIER_H
2#define __SPARC_BARRIER_H
3
4/* XXX Change this if we ever use a PSO mode kernel. */
5#define mb() __asm__ __volatile__ ("" : : : "memory")
6#define rmb() mb()
7#define wmb() mb()
8#define read_barrier_depends() do { } while(0)
9#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
10#define smp_mb() __asm__ __volatile__("":::"memory")
11#define smp_rmb() __asm__ __volatile__("":::"memory")
12#define smp_wmb() __asm__ __volatile__("":::"memory")
13#define smp_read_barrier_depends() do { } while(0)
14
15#endif /* !(__SPARC_BARRIER_H) */
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
new file mode 100644
index 000000000000..95d45986f908
--- /dev/null
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -0,0 +1,56 @@
1#ifndef __SPARC64_BARRIER_H
2#define __SPARC64_BARRIER_H
3
4/* These are here in an effort to more fully work around Spitfire Errata
5 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
6 * branch, the chip can stop executing instructions until a trap occurs.
7 * Therefore, if interrupts are disabled, the chip can hang forever.
8 *
9 * It used to be believed that the memory barrier had to be right in the
10 * delay slot, but a case has been traced recently wherein the memory barrier
11 * was one instruction after the branch delay slot and the chip still hung.
12 * The offending sequence was the following in sym_wakeup_done() of the
13 * sym53c8xx_2 driver:
14 *
15 * call sym_ccb_from_dsa, 0
16 * movge %icc, 0, %l0
17 * brz,pn %o0, .LL1303
18 * mov %o0, %l2
19 * membar #LoadLoad
20 *
21 * The branch has to be mispredicted for the bug to occur. Therefore, we put
22 * the memory barrier explicitly into a "branch always, predicted taken"
23 * delay slot to avoid the problem case.
24 */
25#define membar_safe(type) \
26do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
27 " membar " type "\n" \
28 "1:\n" \
29 : : : "memory"); \
30} while (0)
31
32/* The kernel always executes in TSO memory model these days,
33 * and furthermore most sparc64 chips implement more stringent
34 * memory ordering than required by the specifications.
35 */
36#define mb() membar_safe("#StoreLoad")
37#define rmb() __asm__ __volatile__("":::"memory")
38#define wmb() __asm__ __volatile__("":::"memory")
39
40#define read_barrier_depends() do { } while(0)
41#define set_mb(__var, __value) \
42 do { __var = __value; membar_safe("#StoreLoad"); } while(0)
43
44#ifdef CONFIG_SMP
45#define smp_mb() mb()
46#define smp_rmb() rmb()
47#define smp_wmb() wmb()
48#else
49#define smp_mb() __asm__ __volatile__("":::"memory")
50#define smp_rmb() __asm__ __volatile__("":::"memory")
51#define smp_wmb() __asm__ __volatile__("":::"memory")
52#endif
53
54#define smp_read_barrier_depends() do { } while(0)
55
56#endif /* !(__SPARC64_BARRIER_H) */
diff --git a/arch/sparc/include/asm/bug.h b/arch/sparc/include/asm/bug.h
index 8a59e5a8c217..6bd9f43cb5a5 100644
--- a/arch/sparc/include/asm/bug.h
+++ b/arch/sparc/include/asm/bug.h
@@ -19,4 +19,7 @@ extern void do_BUG(const char *file, int line);
19 19
20#include <asm-generic/bug.h> 20#include <asm-generic/bug.h>
21 21
22struct pt_regs;
23extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
24
22#endif 25#endif
diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h
index 2e468773f250..68431b47a22a 100644
--- a/arch/sparc/include/asm/cacheflush_32.h
+++ b/arch/sparc/include/asm/cacheflush_32.h
@@ -83,4 +83,13 @@ extern void sparc_flush_page_to_ram(struct page *page);
83#define flush_cache_vmap(start, end) flush_cache_all() 83#define flush_cache_vmap(start, end) flush_cache_all()
84#define flush_cache_vunmap(start, end) flush_cache_all() 84#define flush_cache_vunmap(start, end) flush_cache_all()
85 85
86/* When a context switch happens we must flush all user windows so that
87 * the windows of the current process are flushed onto its stack. This
88 * way the windows are all clean for the next process and the stack
89 * frames are up to date.
90 */
91extern void flush_user_windows(void);
92extern void kill_user_windows(void);
93extern void flushw_all(void);
94
86#endif /* _SPARC_CACHEFLUSH_H */ 95#endif /* _SPARC_CACHEFLUSH_H */
diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h
index b95384033e89..2efea2ff88b7 100644
--- a/arch/sparc/include/asm/cacheflush_64.h
+++ b/arch/sparc/include/asm/cacheflush_64.h
@@ -9,6 +9,16 @@
9 9
10/* Cache flush operations. */ 10/* Cache flush operations. */
11 11
12
13#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
14#define flushw_all() __asm__ __volatile__("flushw")
15
16extern void __flushw_user(void);
17#define flushw_user() __flushw_user()
18
19#define flush_user_windows flushw_user
20#define flush_register_windows flushw_all
21
12/* These are the same regardless of whether this is an SMP kernel or not. */ 22/* These are the same regardless of whether this is an SMP kernel or not. */
13#define flush_cache_mm(__mm) \ 23#define flush_cache_mm(__mm) \
14 do { if ((__mm) == current->mm) flushw_user(); } while(0) 24 do { if ((__mm) == current->mm) flushw_user(); } while(0)
diff --git a/arch/sparc/include/asm/cmpxchg.h b/arch/sparc/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..9355893efa52
--- /dev/null
+++ b/arch/sparc/include/asm/cmpxchg.h
@@ -0,0 +1,8 @@
1#ifndef ___ASM_SPARC_CMPXCHG_H
2#define ___ASM_SPARC_CMPXCHG_H
3#if defined(__sparc__) && defined(__arch64__)
4#include <asm/cmpxchg_64.h>
5#else
6#include <asm/cmpxchg_32.h>
7#endif
8#endif
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
new file mode 100644
index 000000000000..c786b0a92b51
--- /dev/null
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -0,0 +1,112 @@
1/* 32-bit atomic xchg() and cmpxchg() definitions.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
5 * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
6 *
7 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
8 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
9 */
10
11#ifndef __ARCH_SPARC_CMPXCHG__
12#define __ARCH_SPARC_CMPXCHG__
13
14#include <asm/btfixup.h>
15
16/* This has special calling conventions */
17#ifndef CONFIG_SMP
18BTFIXUPDEF_CALL(void, ___xchg32, void)
19#endif
20
21static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
22{
23#ifdef CONFIG_SMP
24 __asm__ __volatile__("swap [%2], %0"
25 : "=&r" (val)
26 : "0" (val), "r" (m)
27 : "memory");
28 return val;
29#else
30 register unsigned long *ptr asm("g1");
31 register unsigned long ret asm("g2");
32
33 ptr = (unsigned long *) m;
34 ret = val;
35
36 /* Note: this is magic and the nop there is
37 really needed. */
38 __asm__ __volatile__(
39 "mov %%o7, %%g4\n\t"
40 "call ___f____xchg32\n\t"
41 " nop\n\t"
42 : "=&r" (ret)
43 : "0" (ret), "r" (ptr)
44 : "g3", "g4", "g7", "memory", "cc");
45
46 return ret;
47#endif
48}
49
50extern void __xchg_called_with_bad_pointer(void);
51
52static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
53{
54 switch (size) {
55 case 4:
56 return xchg_u32(ptr, x);
57 }
58 __xchg_called_with_bad_pointer();
59 return x;
60}
61
62#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
63
64/* Emulate cmpxchg() the same way we emulate atomics,
65 * by hashing the object address and indexing into an array
66 * of spinlocks to get a bit of performance...
67 *
68 * See arch/sparc/lib/atomic32.c for implementation.
69 *
70 * Cribbed from <asm-parisc/atomic.h>
71 */
72#define __HAVE_ARCH_CMPXCHG 1
73
74/* bug catcher for when unsupported size is used - won't link */
75extern void __cmpxchg_called_with_bad_pointer(void);
76/* we only need to support cmpxchg of a u32 on sparc */
77extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
78
79/* don't worry...optimizer will get rid of most of this */
80static inline unsigned long
81__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
82{
83 switch (size) {
84 case 4:
85 return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
86 default:
87 __cmpxchg_called_with_bad_pointer();
88 break;
89 }
90 return old;
91}
92
93#define cmpxchg(ptr, o, n) \
94({ \
95 __typeof__(*(ptr)) _o_ = (o); \
96 __typeof__(*(ptr)) _n_ = (n); \
97 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
98 (unsigned long)_n_, sizeof(*(ptr))); \
99})
100
101#include <asm-generic/cmpxchg-local.h>
102
103/*
104 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
105 * them available.
106 */
107#define cmpxchg_local(ptr, o, n) \
108 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
109 (unsigned long)(n), sizeof(*(ptr))))
110#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
111
112#endif /* __ARCH_SPARC_CMPXCHG__ */
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
new file mode 100644
index 000000000000..b30eb37294c5
--- /dev/null
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -0,0 +1,145 @@
1/* 64-bit atomic xchg() and cmpxchg() definitions.
2 *
3 * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
4 */
5
6#ifndef __ARCH_SPARC64_CMPXCHG__
7#define __ARCH_SPARC64_CMPXCHG__
8
9static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
10{
11 unsigned long tmp1, tmp2;
12
13 __asm__ __volatile__(
14" mov %0, %1\n"
15"1: lduw [%4], %2\n"
16" cas [%4], %2, %0\n"
17" cmp %2, %0\n"
18" bne,a,pn %%icc, 1b\n"
19" mov %1, %0\n"
20 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
21 : "0" (val), "r" (m)
22 : "cc", "memory");
23 return val;
24}
25
26static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
27{
28 unsigned long tmp1, tmp2;
29
30 __asm__ __volatile__(
31" mov %0, %1\n"
32"1: ldx [%4], %2\n"
33" casx [%4], %2, %0\n"
34" cmp %2, %0\n"
35" bne,a,pn %%xcc, 1b\n"
36" mov %1, %0\n"
37 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
38 : "0" (val), "r" (m)
39 : "cc", "memory");
40 return val;
41}
42
43#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
44
45extern void __xchg_called_with_bad_pointer(void);
46
47static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
48 int size)
49{
50 switch (size) {
51 case 4:
52 return xchg32(ptr, x);
53 case 8:
54 return xchg64(ptr, x);
55 }
56 __xchg_called_with_bad_pointer();
57 return x;
58}
59
60/*
61 * Atomic compare and exchange. Compare OLD with MEM, if identical,
62 * store NEW in MEM. Return the initial value in MEM. Success is
63 * indicated by comparing RETURN with OLD.
64 */
65
66#include <asm-generic/cmpxchg-local.h>
67
68#define __HAVE_ARCH_CMPXCHG 1
69
70static inline unsigned long
71__cmpxchg_u32(volatile int *m, int old, int new)
72{
73 __asm__ __volatile__("cas [%2], %3, %0"
74 : "=&r" (new)
75 : "0" (new), "r" (m), "r" (old)
76 : "memory");
77
78 return new;
79}
80
81static inline unsigned long
82__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
83{
84 __asm__ __volatile__("casx [%2], %3, %0"
85 : "=&r" (new)
86 : "0" (new), "r" (m), "r" (old)
87 : "memory");
88
89 return new;
90}
91
92/* This function doesn't exist, so you'll get a linker error
93 if something tries to do an invalid cmpxchg(). */
94extern void __cmpxchg_called_with_bad_pointer(void);
95
96static inline unsigned long
97__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
98{
99 switch (size) {
100 case 4:
101 return __cmpxchg_u32(ptr, old, new);
102 case 8:
103 return __cmpxchg_u64(ptr, old, new);
104 }
105 __cmpxchg_called_with_bad_pointer();
106 return old;
107}
108
109#define cmpxchg(ptr,o,n) \
110 ({ \
111 __typeof__(*(ptr)) _o_ = (o); \
112 __typeof__(*(ptr)) _n_ = (n); \
113 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
114 (unsigned long)_n_, sizeof(*(ptr))); \
115 })
116
117/*
118 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
119 * them available.
120 */
121
122static inline unsigned long __cmpxchg_local(volatile void *ptr,
123 unsigned long old,
124 unsigned long new, int size)
125{
126 switch (size) {
127 case 4:
128 case 8: return __cmpxchg(ptr, old, new, size);
129 default:
130 return __cmpxchg_local_generic(ptr, old, new, size);
131 }
132
133 return old;
134}
135
136#define cmpxchg_local(ptr, o, n) \
137 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
138 (unsigned long)(n), sizeof(*(ptr))))
139#define cmpxchg64_local(ptr, o, n) \
140 ({ \
141 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
142 cmpxchg_local((ptr), (o), (n)); \
143 })
144
145#endif /* __ARCH_SPARC64_CMPXCHG__ */
diff --git a/arch/sparc/include/asm/cpu_type.h b/arch/sparc/include/asm/cpu_type.h
new file mode 100644
index 000000000000..4ca184d95d82
--- /dev/null
+++ b/arch/sparc/include/asm/cpu_type.h
@@ -0,0 +1,34 @@
1#ifndef __ASM_CPU_TYPE_H
2#define __ASM_CPU_TYPE_H
3
4/*
5 * Sparc (general) CPU types
6 */
7enum sparc_cpu {
8 sun4 = 0x00,
9 sun4c = 0x01,
10 sun4m = 0x02,
11 sun4d = 0x03,
12 sun4e = 0x04,
13 sun4u = 0x05, /* V8 ploos ploos */
14 sun_unknown = 0x06,
15 ap1000 = 0x07, /* almost a sun4m */
16 sparc_leon = 0x08, /* Leon SoC */
17};
18
19#ifdef CONFIG_SPARC32
20extern enum sparc_cpu sparc_cpu_model;
21
22#define ARCH_SUN4C (sparc_cpu_model==sun4c)
23
24#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */
25
26#else
27
28#define sparc_cpu_model sun4u
29
30/* This cannot ever be a sun4c :) That's just history. */
31#define ARCH_SUN4C 0
32#endif
33
34#endif /* __ASM_CPU_TYPE_H */
diff --git a/arch/sparc/include/asm/exec.h b/arch/sparc/include/asm/exec.h
new file mode 100644
index 000000000000..2e085881e0d1
--- /dev/null
+++ b/arch/sparc/include/asm/exec.h
@@ -0,0 +1,6 @@
1#ifndef __SPARC_EXEC_H
2#define __SPARC_EXEC_H
3
4#define arch_align_stack(x) (x)
5
6#endif /* __SPARC_EXEC_H */
diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
index 7440915e86d8..698d9559fead 100644
--- a/arch/sparc/include/asm/floppy_32.h
+++ b/arch/sparc/include/asm/floppy_32.h
@@ -11,7 +11,6 @@
11 11
12#include <asm/page.h> 12#include <asm/page.h>
13#include <asm/pgtable.h> 13#include <asm/pgtable.h>
14#include <asm/system.h>
15#include <asm/idprom.h> 14#include <asm/idprom.h>
16#include <asm/machines.h> 15#include <asm/machines.h>
17#include <asm/oplib.h> 16#include <asm/oplib.h>
diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h
index 444e7bea23bc..4e899b0dabf7 100644
--- a/arch/sparc/include/asm/futex_64.h
+++ b/arch/sparc/include/asm/futex_64.h
@@ -4,7 +4,6 @@
4#include <linux/futex.h> 4#include <linux/futex.h>
5#include <linux/uaccess.h> 5#include <linux/uaccess.h>
6#include <asm/errno.h> 6#include <asm/errno.h>
7#include <asm/system.h>
8 7
9#define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \ 8#define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \
10 __asm__ __volatile__( \ 9 __asm__ __volatile__( \
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h
index 2006e5d359df..c1acbd891cbc 100644
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -6,7 +6,6 @@
6#include <linux/ioport.h> /* struct resource */ 6#include <linux/ioport.h> /* struct resource */
7 7
8#include <asm/page.h> /* IO address mapping routines need this */ 8#include <asm/page.h> /* IO address mapping routines need this */
9#include <asm/system.h>
10#include <asm-generic/pci_iomap.h> 9#include <asm-generic/pci_iomap.h>
11 10
12#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 11#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 9481e5a6fa90..09b0b88aeb2a 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -6,7 +6,6 @@
6#include <linux/types.h> 6#include <linux/types.h>
7 7
8#include <asm/page.h> /* IO address mapping routines need this */ 8#include <asm/page.h> /* IO address mapping routines need this */
9#include <asm/system.h>
10#include <asm/asi.h> 9#include <asm/asi.h>
11#include <asm-generic/pci_iomap.h> 10#include <asm-generic/pci_iomap.h>
12 11
diff --git a/arch/sparc/include/asm/irqflags_32.h b/arch/sparc/include/asm/irqflags_32.h
index 14848909e0de..e414c06615c1 100644
--- a/arch/sparc/include/asm/irqflags_32.h
+++ b/arch/sparc/include/asm/irqflags_32.h
@@ -13,6 +13,7 @@
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14 14
15#include <linux/types.h> 15#include <linux/types.h>
16#include <asm/psr.h>
16 17
17extern void arch_local_irq_restore(unsigned long); 18extern void arch_local_irq_restore(unsigned long);
18extern unsigned long arch_local_irq_save(void); 19extern unsigned long arch_local_irq_save(void);
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 666a73fef28d..a97fd085cebe 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -6,7 +6,6 @@
6#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
7 7
8#include <linux/spinlock.h> 8#include <linux/spinlock.h>
9#include <asm/system.h>
10#include <asm/spitfire.h> 9#include <asm/spitfire.h>
11#include <asm-generic/mm_hooks.h> 10#include <asm-generic/mm_hooks.h>
12 11
diff --git a/arch/sparc/include/asm/ns87303.h b/arch/sparc/include/asm/ns87303.h
index af755483e17d..6b947ee0f6aa 100644
--- a/arch/sparc/include/asm/ns87303.h
+++ b/arch/sparc/include/asm/ns87303.h
@@ -79,7 +79,6 @@
79 79
80#include <linux/spinlock.h> 80#include <linux/spinlock.h>
81 81
82#include <asm/system.h>
83#include <asm/io.h> 82#include <asm/io.h>
84 83
85extern spinlock_t ns87303_lock; 84extern spinlock_t ns87303_lock;
diff --git a/arch/sparc/include/asm/perfctr.h b/arch/sparc/include/asm/perfctr.h
index 8d8720a8770d..3332d2cba6c1 100644
--- a/arch/sparc/include/asm/perfctr.h
+++ b/arch/sparc/include/asm/perfctr.h
@@ -168,6 +168,29 @@ struct vcounter_struct {
168 unsigned long long vcnt1; 168 unsigned long long vcnt1;
169}; 169};
170 170
171#else /* !(__KERNEL__) */
172
173#ifndef CONFIG_SPARC32
174
175/* Performance counter register access. */
176#define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p))
177#define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p))
178#define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
179
180/* Blackbird errata workaround. See commentary in
181 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
182 * for more information.
183 */
184#define write_pic(__p) \
185 __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" \
186 " nop\n\t" \
187 ".align 64\n" \
188 "99:wr %0, 0x0, %%pic\n\t" \
189 "rd %%pic, %%g0" : : "r" (__p))
190#define reset_pic() write_pic(0)
191
192#endif /* !CONFIG_SPARC32 */
193
171#endif /* !(__KERNEL__) */ 194#endif /* !(__KERNEL__) */
172 195
173#endif /* !(PERF_COUNTER_API) */ 196#endif /* !(PERF_COUNTER_API) */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index a790cc657476..3d7101860e68 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -21,7 +21,7 @@
21#include <asm/vac-ops.h> 21#include <asm/vac-ops.h>
22#include <asm/oplib.h> 22#include <asm/oplib.h>
23#include <asm/btfixup.h> 23#include <asm/btfixup.h>
24#include <asm/system.h> 24#include <asm/cpu_type.h>
25 25
26 26
27struct vm_area_struct; 27struct vm_area_struct;
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 38ebb2c60137..6fa2f7980e6b 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -19,7 +19,6 @@
19#include <asm/types.h> 19#include <asm/types.h>
20#include <asm/spitfire.h> 20#include <asm/spitfire.h>
21#include <asm/asi.h> 21#include <asm/asi.h>
22#include <asm/system.h>
23#include <asm/page.h> 22#include <asm/page.h>
24#include <asm/processor.h> 23#include <asm/processor.h>
25 24
diff --git a/arch/sparc/include/asm/processor.h b/arch/sparc/include/asm/processor.h
index 9da9646bf6c6..2fe99e66e760 100644
--- a/arch/sparc/include/asm/processor.h
+++ b/arch/sparc/include/asm/processor.h
@@ -5,4 +5,7 @@
5#else 5#else
6#include <asm/processor_32.h> 6#include <asm/processor_32.h>
7#endif 7#endif
8
9#define nop() __asm__ __volatile__ ("nop")
10
8#endif 11#endif
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 59fcebb8f440..e713db249931 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -18,6 +18,9 @@
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
19#include <asm/page.h> 19#include <asm/page.h>
20 20
21/* Don't hold the runqueue lock over context switch */
22#define __ARCH_WANT_UNLOCKED_CTXSW
23
21/* The sparc has no problems with write protection */ 24/* The sparc has no problems with write protection */
22#define wp_works_ok 1 25#define wp_works_ok 1
23#define wp_works_ok__is_a_macro /* for versions in ksyms.c */ 26#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index c00c3b5c2806..ef8c7c068f53 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -98,6 +98,8 @@ struct sparc_trapf {
98 */ 98 */
99#ifndef __ASSEMBLY__ 99#ifndef __ASSEMBLY__
100 100
101#include <linux/types.h>
102
101struct pt_regs { 103struct pt_regs {
102 unsigned long psr; 104 unsigned long psr;
103 unsigned long pc; 105 unsigned long pc;
@@ -163,7 +165,6 @@ struct sparc_stackf {
163#ifdef __KERNEL__ 165#ifdef __KERNEL__
164 166
165#include <linux/threads.h> 167#include <linux/threads.h>
166#include <asm/system.h>
167 168
168static inline int pt_regs_trap_type(struct pt_regs *regs) 169static inline int pt_regs_trap_type(struct pt_regs *regs)
169{ 170{
@@ -240,8 +241,6 @@ extern unsigned long profile_pc(struct pt_regs *);
240 241
241#ifdef __KERNEL__ 242#ifdef __KERNEL__
242 243
243#include <asm/system.h>
244
245static inline bool pt_regs_is_syscall(struct pt_regs *regs) 244static inline bool pt_regs_is_syscall(struct pt_regs *regs)
246{ 245{
247 return (regs->psr & PSR_SYSCALL); 246 return (regs->psr & PSR_SYSCALL);
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 64718ba26434..00497abec996 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -13,14 +13,30 @@
13 13
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15 15
16extern char reboot_command[];
17
16#ifdef CONFIG_SPARC32 18#ifdef CONFIG_SPARC32
17/* The CPU that was used for booting 19/* The CPU that was used for booting
18 * Only sun4d + leon may have boot_cpu_id != 0 20 * Only sun4d + leon may have boot_cpu_id != 0
19 */ 21 */
20extern unsigned char boot_cpu_id; 22extern unsigned char boot_cpu_id;
21extern unsigned char boot_cpu_id4; 23extern unsigned char boot_cpu_id4;
24
25extern unsigned long empty_bad_page;
26extern unsigned long empty_bad_page_table;
27extern unsigned long empty_zero_page;
28
29extern int serial_console;
30static inline int con_is_present(void)
31{
32 return serial_console ? 0 : 1;
33}
22#endif 34#endif
23 35
36extern void sun_do_break(void);
37extern int stop_a_enabled;
38extern int scons_pwroff;
39
24#endif /* __KERNEL__ */ 40#endif /* __KERNEL__ */
25 41
26#endif /* _SPARC_SETUP_H */ 42#endif /* _SPARC_SETUP_H */
diff --git a/arch/sparc/include/asm/switch_to.h b/arch/sparc/include/asm/switch_to.h
new file mode 100644
index 000000000000..2dc4fa5c6f8c
--- /dev/null
+++ b/arch/sparc/include/asm/switch_to.h
@@ -0,0 +1,8 @@
1#ifndef ___ASM_SPARC_SWITCH_TO_H
2#define ___ASM_SPARC_SWITCH_TO_H
3#if defined(__sparc__) && defined(__arch64__)
4#include <asm/switch_to_64.h>
5#else
6#include <asm/switch_to_32.h>
7#endif
8#endif
diff --git a/arch/sparc/include/asm/switch_to_32.h b/arch/sparc/include/asm/switch_to_32.h
new file mode 100644
index 000000000000..e32e82b76eed
--- /dev/null
+++ b/arch/sparc/include/asm/switch_to_32.h
@@ -0,0 +1,106 @@
1#ifndef __SPARC_SWITCH_TO_H
2#define __SPARC_SWITCH_TO_H
3
4#include <asm/smp.h>
5
6extern struct thread_info *current_set[NR_CPUS];
7
8/*
9 * Flush windows so that the VM switch which follows
10 * would not pull the stack from under us.
11 *
12 * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
13 * XXX WTF is the above comment? Found in late teen 2.4.x.
14 */
15#ifdef CONFIG_SMP
16#define SWITCH_ENTER(prv) \
17 do { \
18 if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \
19 put_psr(get_psr() | PSR_EF); \
20 fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
21 &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
22 clear_tsk_thread_flag(prv, TIF_USEDFPU); \
23 (prv)->thread.kregs->psr &= ~PSR_EF; \
24 } \
25 } while(0)
26
27#define SWITCH_DO_LAZY_FPU(next) /* */
28#else
29#define SWITCH_ENTER(prv) /* */
30#define SWITCH_DO_LAZY_FPU(nxt) \
31 do { \
32 if (last_task_used_math != (nxt)) \
33 (nxt)->thread.kregs->psr&=~PSR_EF; \
34 } while(0)
35#endif
36
37#define prepare_arch_switch(next) do { \
38 __asm__ __volatile__( \
39 ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
40 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
41 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
42 "save %sp, -0x40, %sp\n\t" \
43 "restore; restore; restore; restore; restore; restore; restore"); \
44} while(0)
45
46 /* Much care has gone into this code, do not touch it.
47 *
48 * We need to loadup regs l0/l1 for the newly forked child
49 * case because the trap return path relies on those registers
50 * holding certain values, gcc is told that they are clobbered.
51 * Gcc needs registers for 3 values in and 1 value out, so we
52 * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM
53 *
54 * Hey Dave, that do not touch sign is too much of an incentive
55 * - Anton & Pete
56 */
57#define switch_to(prev, next, last) do { \
58 SWITCH_ENTER(prev); \
59 SWITCH_DO_LAZY_FPU(next); \
60 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next->active_mm)); \
61 __asm__ __volatile__( \
62 "sethi %%hi(here - 0x8), %%o7\n\t" \
63 "mov %%g6, %%g3\n\t" \
64 "or %%o7, %%lo(here - 0x8), %%o7\n\t" \
65 "rd %%psr, %%g4\n\t" \
66 "std %%sp, [%%g6 + %4]\n\t" \
67 "rd %%wim, %%g5\n\t" \
68 "wr %%g4, 0x20, %%psr\n\t" \
69 "nop\n\t" \
70 "std %%g4, [%%g6 + %3]\n\t" \
71 "ldd [%2 + %3], %%g4\n\t" \
72 "mov %2, %%g6\n\t" \
73 ".globl patchme_store_new_current\n" \
74"patchme_store_new_current:\n\t" \
75 "st %2, [%1]\n\t" \
76 "wr %%g4, 0x20, %%psr\n\t" \
77 "nop\n\t" \
78 "nop\n\t" \
79 "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \
80 "ldd [%%g6 + %4], %%sp\n\t" \
81 "wr %%g5, 0x0, %%wim\n\t" \
82 "ldd [%%sp + 0x00], %%l0\n\t" \
83 "ldd [%%sp + 0x38], %%i6\n\t" \
84 "wr %%g4, 0x0, %%psr\n\t" \
85 "nop\n\t" \
86 "nop\n\t" \
87 "jmpl %%o7 + 0x8, %%g0\n\t" \
88 " ld [%%g3 + %5], %0\n\t" \
89 "here:\n" \
90 : "=&r" (last) \
91 : "r" (&(current_set[hard_smp_processor_id()])), \
92 "r" (task_thread_info(next)), \
93 "i" (TI_KPSR), \
94 "i" (TI_KSP), \
95 "i" (TI_TASK) \
96 : "g1", "g2", "g3", "g4", "g5", "g7", \
97 "l0", "l1", "l3", "l4", "l5", "l6", "l7", \
98 "i0", "i1", "i2", "i3", "i4", "i5", \
99 "o0", "o1", "o2", "o3", "o7"); \
100 } while(0)
101
102extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
103 void *fpqueue, unsigned long *fpqdepth);
104extern void synchronize_user_stack(void);
105
106#endif /* __SPARC_SWITCH_TO_H */
diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
new file mode 100644
index 000000000000..7923c4a2be38
--- /dev/null
+++ b/arch/sparc/include/asm/switch_to_64.h
@@ -0,0 +1,72 @@
1#ifndef __SPARC64_SWITCH_TO_64_H
2#define __SPARC64_SWITCH_TO_64_H
3
4#include <asm/visasm.h>
5
6#define prepare_arch_switch(next) \
7do { \
8 flushw_all(); \
9} while (0)
10
11 /* See what happens when you design the chip correctly?
12 *
13 * We tell gcc we clobber all non-fixed-usage registers except
14 * for l0/l1. It will use one for 'next' and the other to hold
15 * the output value of 'last'. 'next' is not referenced again
16 * past the invocation of switch_to in the scheduler, so we need
17 * not preserve it's value. Hairy, but it lets us remove 2 loads
18 * and 2 stores in this critical code path. -DaveM
19 */
20#define switch_to(prev, next, last) \
21do { flush_tlb_pending(); \
22 save_and_clear_fpu(); \
23 /* If you are tempted to conditionalize the following */ \
24 /* so that ASI is only written if it changes, think again. */ \
25 __asm__ __volatile__("wr %%g0, %0, %%asi" \
26 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
27 trap_block[current_thread_info()->cpu].thread = \
28 task_thread_info(next); \
29 __asm__ __volatile__( \
30 "mov %%g4, %%g7\n\t" \
31 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
32 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
33 "rdpr %%wstate, %%o5\n\t" \
34 "stx %%o6, [%%g6 + %6]\n\t" \
35 "stb %%o5, [%%g6 + %5]\n\t" \
36 "rdpr %%cwp, %%o5\n\t" \
37 "stb %%o5, [%%g6 + %8]\n\t" \
38 "wrpr %%g0, 15, %%pil\n\t" \
39 "mov %4, %%g6\n\t" \
40 "ldub [%4 + %8], %%g1\n\t" \
41 "wrpr %%g1, %%cwp\n\t" \
42 "ldx [%%g6 + %6], %%o6\n\t" \
43 "ldub [%%g6 + %5], %%o5\n\t" \
44 "ldub [%%g6 + %7], %%o7\n\t" \
45 "wrpr %%o5, 0x0, %%wstate\n\t" \
46 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
47 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
48 "ldx [%%g6 + %9], %%g4\n\t" \
49 "wrpr %%g0, 14, %%pil\n\t" \
50 "brz,pt %%o7, switch_to_pc\n\t" \
51 " mov %%g7, %0\n\t" \
52 "sethi %%hi(ret_from_syscall), %%g1\n\t" \
53 "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \
54 " nop\n\t" \
55 ".globl switch_to_pc\n\t" \
56 "switch_to_pc:\n\t" \
57 : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
58 "=r" (__local_per_cpu_offset) \
59 : "0" (task_thread_info(next)), \
60 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
61 "i" (TI_CWP), "i" (TI_TASK) \
62 : "cc", \
63 "g1", "g2", "g3", "g7", \
64 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \
65 "i0", "i1", "i2", "i3", "i4", "i5", \
66 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
67} while(0)
68
69extern void synchronize_user_stack(void);
70extern void fault_in_user_windows(void);
71
72#endif /* __SPARC64_SWITCH_TO_64_H */
diff --git a/arch/sparc/include/asm/system.h b/arch/sparc/include/asm/system.h
index 7944a7cfc996..ed532ba000b1 100644
--- a/arch/sparc/include/asm/system.h
+++ b/arch/sparc/include/asm/system.h
@@ -1,8 +1,6 @@
1#ifndef ___ASM_SPARC_SYSTEM_H 1/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
2#define ___ASM_SPARC_SYSTEM_H 2#include <asm/barrier.h>
3#if defined(__sparc__) && defined(__arch64__) 3#include <asm/cpu_type.h>
4#include <asm/system_64.h> 4#include <asm/cmpxchg.h>
5#else 5#include <asm/exec.h>
6#include <asm/system_32.h> 6#include <asm/switch_to.h>
7#endif
8#endif
diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h
deleted file mode 100644
index aba16092a81b..000000000000
--- a/arch/sparc/include/asm/system_32.h
+++ /dev/null
@@ -1,284 +0,0 @@
1#ifndef __SPARC_SYSTEM_H
2#define __SPARC_SYSTEM_H
3
4#include <linux/kernel.h>
5#include <linux/threads.h> /* NR_CPUS */
6#include <linux/thread_info.h>
7
8#include <asm/page.h>
9#include <asm/psr.h>
10#include <asm/ptrace.h>
11#include <asm/btfixup.h>
12#include <asm/smp.h>
13
14#ifndef __ASSEMBLY__
15
16#include <linux/irqflags.h>
17
18/*
19 * Sparc (general) CPU types
20 */
21enum sparc_cpu {
22 sun4 = 0x00,
23 sun4c = 0x01,
24 sun4m = 0x02,
25 sun4d = 0x03,
26 sun4e = 0x04,
27 sun4u = 0x05, /* V8 ploos ploos */
28 sun_unknown = 0x06,
29 ap1000 = 0x07, /* almost a sun4m */
30 sparc_leon = 0x08, /* Leon SoC */
31};
32
33/* Really, userland should not be looking at any of this... */
34#ifdef __KERNEL__
35
36extern enum sparc_cpu sparc_cpu_model;
37
38#define ARCH_SUN4C (sparc_cpu_model==sun4c)
39
40#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */
41
42extern char reboot_command[];
43
44extern struct thread_info *current_set[NR_CPUS];
45
46extern unsigned long empty_bad_page;
47extern unsigned long empty_bad_page_table;
48extern unsigned long empty_zero_page;
49
50extern void sun_do_break(void);
51extern int serial_console;
52extern int stop_a_enabled;
53extern int scons_pwroff;
54
55static inline int con_is_present(void)
56{
57 return serial_console ? 0 : 1;
58}
59
60/* When a context switch happens we must flush all user windows so that
61 * the windows of the current process are flushed onto its stack. This
62 * way the windows are all clean for the next process and the stack
63 * frames are up to date.
64 */
65extern void flush_user_windows(void);
66extern void kill_user_windows(void);
67extern void synchronize_user_stack(void);
68extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
69 void *fpqueue, unsigned long *fpqdepth);
70
71#ifdef CONFIG_SMP
72#define SWITCH_ENTER(prv) \
73 do { \
74 if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \
75 put_psr(get_psr() | PSR_EF); \
76 fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
77 &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
78 clear_tsk_thread_flag(prv, TIF_USEDFPU); \
79 (prv)->thread.kregs->psr &= ~PSR_EF; \
80 } \
81 } while(0)
82
83#define SWITCH_DO_LAZY_FPU(next) /* */
84#else
85#define SWITCH_ENTER(prv) /* */
86#define SWITCH_DO_LAZY_FPU(nxt) \
87 do { \
88 if (last_task_used_math != (nxt)) \
89 (nxt)->thread.kregs->psr&=~PSR_EF; \
90 } while(0)
91#endif
92
93extern void flushw_all(void);
94
95/*
96 * Flush windows so that the VM switch which follows
97 * would not pull the stack from under us.
98 *
99 * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
100 * XXX WTF is the above comment? Found in late teen 2.4.x.
101 */
102#define prepare_arch_switch(next) do { \
103 __asm__ __volatile__( \
104 ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
105 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
106 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
107 "save %sp, -0x40, %sp\n\t" \
108 "restore; restore; restore; restore; restore; restore; restore"); \
109} while(0)
110
111 /* Much care has gone into this code, do not touch it.
112 *
113 * We need to loadup regs l0/l1 for the newly forked child
114 * case because the trap return path relies on those registers
115 * holding certain values, gcc is told that they are clobbered.
116 * Gcc needs registers for 3 values in and 1 value out, so we
117 * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM
118 *
119 * Hey Dave, that do not touch sign is too much of an incentive
120 * - Anton & Pete
121 */
122#define switch_to(prev, next, last) do { \
123 SWITCH_ENTER(prev); \
124 SWITCH_DO_LAZY_FPU(next); \
125 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next->active_mm)); \
126 __asm__ __volatile__( \
127 "sethi %%hi(here - 0x8), %%o7\n\t" \
128 "mov %%g6, %%g3\n\t" \
129 "or %%o7, %%lo(here - 0x8), %%o7\n\t" \
130 "rd %%psr, %%g4\n\t" \
131 "std %%sp, [%%g6 + %4]\n\t" \
132 "rd %%wim, %%g5\n\t" \
133 "wr %%g4, 0x20, %%psr\n\t" \
134 "nop\n\t" \
135 "std %%g4, [%%g6 + %3]\n\t" \
136 "ldd [%2 + %3], %%g4\n\t" \
137 "mov %2, %%g6\n\t" \
138 ".globl patchme_store_new_current\n" \
139"patchme_store_new_current:\n\t" \
140 "st %2, [%1]\n\t" \
141 "wr %%g4, 0x20, %%psr\n\t" \
142 "nop\n\t" \
143 "nop\n\t" \
144 "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \
145 "ldd [%%g6 + %4], %%sp\n\t" \
146 "wr %%g5, 0x0, %%wim\n\t" \
147 "ldd [%%sp + 0x00], %%l0\n\t" \
148 "ldd [%%sp + 0x38], %%i6\n\t" \
149 "wr %%g4, 0x0, %%psr\n\t" \
150 "nop\n\t" \
151 "nop\n\t" \
152 "jmpl %%o7 + 0x8, %%g0\n\t" \
153 " ld [%%g3 + %5], %0\n\t" \
154 "here:\n" \
155 : "=&r" (last) \
156 : "r" (&(current_set[hard_smp_processor_id()])), \
157 "r" (task_thread_info(next)), \
158 "i" (TI_KPSR), \
159 "i" (TI_KSP), \
160 "i" (TI_TASK) \
161 : "g1", "g2", "g3", "g4", "g5", "g7", \
162 "l0", "l1", "l3", "l4", "l5", "l6", "l7", \
163 "i0", "i1", "i2", "i3", "i4", "i5", \
164 "o0", "o1", "o2", "o3", "o7"); \
165 } while(0)
166
167/* XXX Change this if we ever use a PSO mode kernel. */
168#define mb() __asm__ __volatile__ ("" : : : "memory")
169#define rmb() mb()
170#define wmb() mb()
171#define read_barrier_depends() do { } while(0)
172#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
173#define smp_mb() __asm__ __volatile__("":::"memory")
174#define smp_rmb() __asm__ __volatile__("":::"memory")
175#define smp_wmb() __asm__ __volatile__("":::"memory")
176#define smp_read_barrier_depends() do { } while(0)
177
178#define nop() __asm__ __volatile__ ("nop")
179
180/* This has special calling conventions */
181#ifndef CONFIG_SMP
182BTFIXUPDEF_CALL(void, ___xchg32, void)
183#endif
184
185static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
186{
187#ifdef CONFIG_SMP
188 __asm__ __volatile__("swap [%2], %0"
189 : "=&r" (val)
190 : "0" (val), "r" (m)
191 : "memory");
192 return val;
193#else
194 register unsigned long *ptr asm("g1");
195 register unsigned long ret asm("g2");
196
197 ptr = (unsigned long *) m;
198 ret = val;
199
200 /* Note: this is magic and the nop there is
201 really needed. */
202 __asm__ __volatile__(
203 "mov %%o7, %%g4\n\t"
204 "call ___f____xchg32\n\t"
205 " nop\n\t"
206 : "=&r" (ret)
207 : "0" (ret), "r" (ptr)
208 : "g3", "g4", "g7", "memory", "cc");
209
210 return ret;
211#endif
212}
213
214#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
215
216extern void __xchg_called_with_bad_pointer(void);
217
218static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
219{
220 switch (size) {
221 case 4:
222 return xchg_u32(ptr, x);
223 }
224 __xchg_called_with_bad_pointer();
225 return x;
226}
227
228/* Emulate cmpxchg() the same way we emulate atomics,
229 * by hashing the object address and indexing into an array
230 * of spinlocks to get a bit of performance...
231 *
232 * See arch/sparc/lib/atomic32.c for implementation.
233 *
234 * Cribbed from <asm-parisc/atomic.h>
235 */
236#define __HAVE_ARCH_CMPXCHG 1
237
238/* bug catcher for when unsupported size is used - won't link */
239extern void __cmpxchg_called_with_bad_pointer(void);
240/* we only need to support cmpxchg of a u32 on sparc */
241extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
242
243/* don't worry...optimizer will get rid of most of this */
244static inline unsigned long
245__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
246{
247 switch (size) {
248 case 4:
249 return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
250 default:
251 __cmpxchg_called_with_bad_pointer();
252 break;
253 }
254 return old;
255}
256
257#define cmpxchg(ptr, o, n) \
258({ \
259 __typeof__(*(ptr)) _o_ = (o); \
260 __typeof__(*(ptr)) _n_ = (n); \
261 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
262 (unsigned long)_n_, sizeof(*(ptr))); \
263})
264
265#include <asm-generic/cmpxchg-local.h>
266
267/*
268 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
269 * them available.
270 */
271#define cmpxchg_local(ptr, o, n) \
272 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
273 (unsigned long)(n), sizeof(*(ptr))))
274#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
275
276extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
277
278#endif /* __KERNEL__ */
279
280#endif /* __ASSEMBLY__ */
281
282#define arch_align_stack(x) (x)
283
284#endif /* !(__SPARC_SYSTEM_H) */
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
deleted file mode 100644
index 10bcabce97b2..000000000000
--- a/arch/sparc/include/asm/system_64.h
+++ /dev/null
@@ -1,331 +0,0 @@
1#ifndef __SPARC64_SYSTEM_H
2#define __SPARC64_SYSTEM_H
3
4#include <asm/ptrace.h>
5#include <asm/processor.h>
6#include <asm/visasm.h>
7
8#ifndef __ASSEMBLY__
9
10#include <linux/irqflags.h>
11#include <asm-generic/cmpxchg-local.h>
12
13/*
14 * Sparc (general) CPU types
15 */
16enum sparc_cpu {
17 sun4 = 0x00,
18 sun4c = 0x01,
19 sun4m = 0x02,
20 sun4d = 0x03,
21 sun4e = 0x04,
22 sun4u = 0x05, /* V8 ploos ploos */
23 sun_unknown = 0x06,
24 ap1000 = 0x07, /* almost a sun4m */
25};
26
27#define sparc_cpu_model sun4u
28
29/* This cannot ever be a sun4c :) That's just history. */
30#define ARCH_SUN4C 0
31
32extern char reboot_command[];
33
34/* These are here in an effort to more fully work around Spitfire Errata
35 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
36 * branch, the chip can stop executing instructions until a trap occurs.
37 * Therefore, if interrupts are disabled, the chip can hang forever.
38 *
39 * It used to be believed that the memory barrier had to be right in the
40 * delay slot, but a case has been traced recently wherein the memory barrier
41 * was one instruction after the branch delay slot and the chip still hung.
42 * The offending sequence was the following in sym_wakeup_done() of the
43 * sym53c8xx_2 driver:
44 *
45 * call sym_ccb_from_dsa, 0
46 * movge %icc, 0, %l0
47 * brz,pn %o0, .LL1303
48 * mov %o0, %l2
49 * membar #LoadLoad
50 *
51 * The branch has to be mispredicted for the bug to occur. Therefore, we put
52 * the memory barrier explicitly into a "branch always, predicted taken"
53 * delay slot to avoid the problem case.
54 */
55#define membar_safe(type) \
56do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
57 " membar " type "\n" \
58 "1:\n" \
59 : : : "memory"); \
60} while (0)
61
62/* The kernel always executes in TSO memory model these days,
63 * and furthermore most sparc64 chips implement more stringent
64 * memory ordering than required by the specifications.
65 */
66#define mb() membar_safe("#StoreLoad")
67#define rmb() __asm__ __volatile__("":::"memory")
68#define wmb() __asm__ __volatile__("":::"memory")
69
70#endif
71
72#define nop() __asm__ __volatile__ ("nop")
73
74#define read_barrier_depends() do { } while(0)
75#define set_mb(__var, __value) \
76 do { __var = __value; membar_safe("#StoreLoad"); } while(0)
77
78#ifdef CONFIG_SMP
79#define smp_mb() mb()
80#define smp_rmb() rmb()
81#define smp_wmb() wmb()
82#else
83#define smp_mb() __asm__ __volatile__("":::"memory")
84#define smp_rmb() __asm__ __volatile__("":::"memory")
85#define smp_wmb() __asm__ __volatile__("":::"memory")
86#endif
87
88#define smp_read_barrier_depends() do { } while(0)
89
90#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
91
92#define flushw_all() __asm__ __volatile__("flushw")
93
94/* Performance counter register access. */
95#define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p))
96#define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p))
97#define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
98
99/* Blackbird errata workaround. See commentary in
100 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
101 * for more information.
102 */
103#define write_pic(__p) \
104 __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" \
105 " nop\n\t" \
106 ".align 64\n" \
107 "99:wr %0, 0x0, %%pic\n\t" \
108 "rd %%pic, %%g0" : : "r" (__p))
109#define reset_pic() write_pic(0)
110
111#ifndef __ASSEMBLY__
112
113extern void sun_do_break(void);
114extern int stop_a_enabled;
115extern int scons_pwroff;
116
117extern void fault_in_user_windows(void);
118extern void synchronize_user_stack(void);
119
120extern void __flushw_user(void);
121#define flushw_user() __flushw_user()
122
123#define flush_user_windows flushw_user
124#define flush_register_windows flushw_all
125
126/* Don't hold the runqueue lock over context switch */
127#define __ARCH_WANT_UNLOCKED_CTXSW
128#define prepare_arch_switch(next) \
129do { \
130 flushw_all(); \
131} while (0)
132
133 /* See what happens when you design the chip correctly?
134 *
135 * We tell gcc we clobber all non-fixed-usage registers except
136 * for l0/l1. It will use one for 'next' and the other to hold
137 * the output value of 'last'. 'next' is not referenced again
138 * past the invocation of switch_to in the scheduler, so we need
139 * not preserve it's value. Hairy, but it lets us remove 2 loads
140 * and 2 stores in this critical code path. -DaveM
141 */
142#define switch_to(prev, next, last) \
143do { flush_tlb_pending(); \
144 save_and_clear_fpu(); \
145 /* If you are tempted to conditionalize the following */ \
146 /* so that ASI is only written if it changes, think again. */ \
147 __asm__ __volatile__("wr %%g0, %0, %%asi" \
148 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
149 trap_block[current_thread_info()->cpu].thread = \
150 task_thread_info(next); \
151 __asm__ __volatile__( \
152 "mov %%g4, %%g7\n\t" \
153 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
154 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
155 "rdpr %%wstate, %%o5\n\t" \
156 "stx %%o6, [%%g6 + %6]\n\t" \
157 "stb %%o5, [%%g6 + %5]\n\t" \
158 "rdpr %%cwp, %%o5\n\t" \
159 "stb %%o5, [%%g6 + %8]\n\t" \
160 "wrpr %%g0, 15, %%pil\n\t" \
161 "mov %4, %%g6\n\t" \
162 "ldub [%4 + %8], %%g1\n\t" \
163 "wrpr %%g1, %%cwp\n\t" \
164 "ldx [%%g6 + %6], %%o6\n\t" \
165 "ldub [%%g6 + %5], %%o5\n\t" \
166 "ldub [%%g6 + %7], %%o7\n\t" \
167 "wrpr %%o5, 0x0, %%wstate\n\t" \
168 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
169 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
170 "ldx [%%g6 + %9], %%g4\n\t" \
171 "wrpr %%g0, 14, %%pil\n\t" \
172 "brz,pt %%o7, switch_to_pc\n\t" \
173 " mov %%g7, %0\n\t" \
174 "sethi %%hi(ret_from_syscall), %%g1\n\t" \
175 "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \
176 " nop\n\t" \
177 ".globl switch_to_pc\n\t" \
178 "switch_to_pc:\n\t" \
179 : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
180 "=r" (__local_per_cpu_offset) \
181 : "0" (task_thread_info(next)), \
182 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
183 "i" (TI_CWP), "i" (TI_TASK) \
184 : "cc", \
185 "g1", "g2", "g3", "g7", \
186 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \
187 "i0", "i1", "i2", "i3", "i4", "i5", \
188 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
189} while(0)
190
191static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
192{
193 unsigned long tmp1, tmp2;
194
195 __asm__ __volatile__(
196" mov %0, %1\n"
197"1: lduw [%4], %2\n"
198" cas [%4], %2, %0\n"
199" cmp %2, %0\n"
200" bne,a,pn %%icc, 1b\n"
201" mov %1, %0\n"
202 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
203 : "0" (val), "r" (m)
204 : "cc", "memory");
205 return val;
206}
207
208static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
209{
210 unsigned long tmp1, tmp2;
211
212 __asm__ __volatile__(
213" mov %0, %1\n"
214"1: ldx [%4], %2\n"
215" casx [%4], %2, %0\n"
216" cmp %2, %0\n"
217" bne,a,pn %%xcc, 1b\n"
218" mov %1, %0\n"
219 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
220 : "0" (val), "r" (m)
221 : "cc", "memory");
222 return val;
223}
224
225#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
226
227extern void __xchg_called_with_bad_pointer(void);
228
229static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
230 int size)
231{
232 switch (size) {
233 case 4:
234 return xchg32(ptr, x);
235 case 8:
236 return xchg64(ptr, x);
237 }
238 __xchg_called_with_bad_pointer();
239 return x;
240}
241
242extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
243
244/*
245 * Atomic compare and exchange. Compare OLD with MEM, if identical,
246 * store NEW in MEM. Return the initial value in MEM. Success is
247 * indicated by comparing RETURN with OLD.
248 */
249
250#define __HAVE_ARCH_CMPXCHG 1
251
252static inline unsigned long
253__cmpxchg_u32(volatile int *m, int old, int new)
254{
255 __asm__ __volatile__("cas [%2], %3, %0"
256 : "=&r" (new)
257 : "0" (new), "r" (m), "r" (old)
258 : "memory");
259
260 return new;
261}
262
263static inline unsigned long
264__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
265{
266 __asm__ __volatile__("casx [%2], %3, %0"
267 : "=&r" (new)
268 : "0" (new), "r" (m), "r" (old)
269 : "memory");
270
271 return new;
272}
273
274/* This function doesn't exist, so you'll get a linker error
275 if something tries to do an invalid cmpxchg(). */
276extern void __cmpxchg_called_with_bad_pointer(void);
277
278static inline unsigned long
279__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
280{
281 switch (size) {
282 case 4:
283 return __cmpxchg_u32(ptr, old, new);
284 case 8:
285 return __cmpxchg_u64(ptr, old, new);
286 }
287 __cmpxchg_called_with_bad_pointer();
288 return old;
289}
290
291#define cmpxchg(ptr,o,n) \
292 ({ \
293 __typeof__(*(ptr)) _o_ = (o); \
294 __typeof__(*(ptr)) _n_ = (n); \
295 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
296 (unsigned long)_n_, sizeof(*(ptr))); \
297 })
298
299/*
300 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
301 * them available.
302 */
303
304static inline unsigned long __cmpxchg_local(volatile void *ptr,
305 unsigned long old,
306 unsigned long new, int size)
307{
308 switch (size) {
309 case 4:
310 case 8: return __cmpxchg(ptr, old, new, size);
311 default:
312 return __cmpxchg_local_generic(ptr, old, new, size);
313 }
314
315 return old;
316}
317
318#define cmpxchg_local(ptr, o, n) \
319 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
320 (unsigned long)(n), sizeof(*(ptr))))
321#define cmpxchg64_local(ptr, o, n) \
322 ({ \
323 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
324 cmpxchg_local((ptr), (o), (n)); \
325 })
326
327#endif /* !(__ASSEMBLY__) */
328
329#define arch_align_stack(x) (x)
330
331#endif /* !(__SPARC64_SYSTEM_H) */
diff --git a/arch/sparc/include/asm/timer_32.h b/arch/sparc/include/asm/timer_32.h
index 2ec030ef3810..1a91e11dd104 100644
--- a/arch/sparc/include/asm/timer_32.h
+++ b/arch/sparc/include/asm/timer_32.h
@@ -8,12 +8,13 @@
8#ifndef _SPARC_TIMER_H 8#ifndef _SPARC_TIMER_H
9#define _SPARC_TIMER_H 9#define _SPARC_TIMER_H
10 10
11#include <asm/system.h> /* For SUN4M_NCPUS */ 11#include <asm/cpu_type.h> /* For SUN4M_NCPUS */
12#include <asm/btfixup.h> 12#include <asm/btfixup.h>
13 13
14extern __volatile__ unsigned int *master_l10_counter; 14extern __volatile__ unsigned int *master_l10_counter;
15 15
16/* FIXME: Make do_[gs]ettimeofday btfixup calls */ 16/* FIXME: Make do_[gs]ettimeofday btfixup calls */
17struct timespec;
17BTFIXUPDEF_CALL(int, bus_do_settimeofday, struct timespec *tv) 18BTFIXUPDEF_CALL(int, bus_do_settimeofday, struct timespec *tv)
18#define bus_do_settimeofday(tv) BTFIXUP_CALL(bus_do_settimeofday)(tv) 19#define bus_do_settimeofday(tv) BTFIXUP_CALL(bus_do_settimeofday)(tv)
19 20
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 3e1449f07798..a1091afb8831 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -11,7 +11,6 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/thread_info.h> 12#include <linux/thread_info.h>
13#include <asm/asi.h> 13#include <asm/asi.h>
14#include <asm/system.h>
15#include <asm/spitfire.h> 14#include <asm/spitfire.h>
16#include <asm-generic/uaccess-unaligned.h> 15#include <asm-generic/uaccess-unaligned.h>
17#endif 16#endif