aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:58:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 18:58:21 -0400
commit0195c00244dc2e9f522475868fa278c473ba7339 (patch)
treef97ca98ae64ede2c33ad3de05ed7bbfa4f4495ed /arch/powerpc/include/asm
parentf21ce8f8447c8be8847dadcfdbcc76b0d7365fa5 (diff)
parent141124c02059eee9dbc5c86ea797b1ca888e77f7 (diff)
Merge tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system
Pull "Disintegrate and delete asm/system.h" from David Howells: "Here are a bunch of patches to disintegrate asm/system.h into a set of separate bits to relieve the problem of circular inclusion dependencies. I've built all the working defconfigs from all the arches that I can and made sure that they don't break. The reason for these patches is that I recently encountered a circular dependency problem that came about when I produced some patches to optimise get_order() by rewriting it to use ilog2(). This uses bitops - and on the SH arch asm/bitops.h drags in asm-generic/get_order.h by a circuituous route involving asm/system.h. The main difficulty seems to be asm/system.h. It holds a number of low level bits with no/few dependencies that are commonly used (eg. memory barriers) and a number of bits with more dependencies that aren't used in many places (eg. switch_to()). These patches break asm/system.h up into the following core pieces: (1) asm/barrier.h Move memory barriers here. This already done for MIPS and Alpha. (2) asm/switch_to.h Move switch_to() and related stuff here. (3) asm/exec.h Move arch_align_stack() here. Other process execution related bits could perhaps go here from asm/processor.h. (4) asm/cmpxchg.h Move xchg() and cmpxchg() here as they're full word atomic ops and frequently used by atomic_xchg() and atomic_cmpxchg(). (5) asm/bug.h Move die() and related bits. (6) asm/auxvec.h Move AT_VECTOR_SIZE_ARCH here. Other arch headers are created as needed on a per-arch basis." Fixed up some conflicts from other header file cleanups and moving code around that has happened in the meantime, so David's testing is somewhat weakened by that. We'll find out anything that got broken and fix it.. * tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system: (38 commits) Delete all instances of asm/system.h Remove all #inclusions of asm/system.h Add #includes needed to permit the removal of asm/system.h Move all declarations of free_initmem() to linux/mm.h Disintegrate asm/system.h for OpenRISC Split arch_align_stack() out from asm-generic/system.h Split the switch_to() wrapper out of asm-generic/system.h Move the asm-generic/system.h xchg() implementation to asm-generic/cmpxchg.h Create asm-generic/barrier.h Make asm-generic/cmpxchg.h #include asm-generic/cmpxchg-local.h Disintegrate asm/system.h for Xtensa Disintegrate asm/system.h for Unicore32 [based on ver #3, changed by gxt] Disintegrate asm/system.h for Tile Disintegrate asm/system.h for Sparc Disintegrate asm/system.h for SH Disintegrate asm/system.h for Score Disintegrate asm/system.h for S390 Disintegrate asm/system.h for PowerPC Disintegrate asm/system.h for PA-RISC Disintegrate asm/system.h for MN10300 ...
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/atomic.h8
-rw-r--r--arch/powerpc/include/asm/auxvec.h2
-rw-r--r--arch/powerpc/include/asm/barrier.h68
-rw-r--r--arch/powerpc/include/asm/bug.h11
-rw-r--r--arch/powerpc/include/asm/cache.h16
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h309
-rw-r--r--arch/powerpc/include/asm/debug.h56
-rw-r--r--arch/powerpc/include/asm/dma.h1
-rw-r--r--arch/powerpc/include/asm/exec.h9
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h2
-rw-r--r--arch/powerpc/include/asm/processor.h30
-rw-r--r--arch/powerpc/include/asm/reg_booke.h5
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/runlatch.h45
-rw-r--r--arch/powerpc/include/asm/setup.h24
-rw-r--r--arch/powerpc/include/asm/switch_to.h65
-rw-r--r--arch/powerpc/include/asm/system.h592
17 files changed, 644 insertions, 601 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 14174e838ad9..da29032ae38f 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -5,13 +5,9 @@
5 * PowerPC atomic operations 5 * PowerPC atomic operations
6 */ 6 */
7 7
8#include <linux/types.h>
9
10#ifdef __KERNEL__ 8#ifdef __KERNEL__
11#include <linux/compiler.h> 9#include <linux/types.h>
12#include <asm/synch.h> 10#include <asm/cmpxchg.h>
13#include <asm/asm-compat.h>
14#include <asm/system.h>
15 11
16#define ATOMIC_INIT(i) { (i) } 12#define ATOMIC_INIT(i) { (i) }
17 13
diff --git a/arch/powerpc/include/asm/auxvec.h b/arch/powerpc/include/asm/auxvec.h
index 19a099b62cd6..ce17d2c9eb4e 100644
--- a/arch/powerpc/include/asm/auxvec.h
+++ b/arch/powerpc/include/asm/auxvec.h
@@ -16,4 +16,6 @@
16 */ 16 */
17#define AT_SYSINFO_EHDR 33 17#define AT_SYSINFO_EHDR 33
18 18
19#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */
20
19#endif 21#endif
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
new file mode 100644
index 000000000000..ae782254e731
--- /dev/null
+++ b/arch/powerpc/include/asm/barrier.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef _ASM_POWERPC_BARRIER_H
5#define _ASM_POWERPC_BARRIER_H
6
7/*
8 * Memory barrier.
9 * The sync instruction guarantees that all memory accesses initiated
10 * by this processor have been performed (with respect to all other
11 * mechanisms that access memory). The eieio instruction is a barrier
12 * providing an ordering (separately) for (a) cacheable stores and (b)
13 * loads and stores to non-cacheable memory (e.g. I/O devices).
14 *
15 * mb() prevents loads and stores being reordered across this point.
16 * rmb() prevents loads being reordered across this point.
17 * wmb() prevents stores being reordered across this point.
18 * read_barrier_depends() prevents data-dependent loads being reordered
19 * across this point (nop on PPC).
20 *
21 * *mb() variants without smp_ prefix must order all types of memory
22 * operations with one another. sync is the only instruction sufficient
23 * to do this.
24 *
25 * For the smp_ barriers, ordering is for cacheable memory operations
26 * only. We have to use the sync instruction for smp_mb(), since lwsync
27 * doesn't order loads with respect to previous stores. Lwsync can be
28 * used for smp_rmb() and smp_wmb().
29 *
30 * However, on CPUs that don't support lwsync, lwsync actually maps to a
31 * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
32 */
33#define mb() __asm__ __volatile__ ("sync" : : : "memory")
34#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
35#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
36#define read_barrier_depends() do { } while(0)
37
38#define set_mb(var, value) do { var = value; mb(); } while (0)
39
40#ifdef CONFIG_SMP
41
42#ifdef __SUBARCH_HAS_LWSYNC
43# define SMPWMB LWSYNC
44#else
45# define SMPWMB eieio
46#endif
47
48#define smp_mb() mb()
49#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
50#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
51#define smp_read_barrier_depends() read_barrier_depends()
52#else
53#define smp_mb() barrier()
54#define smp_rmb() barrier()
55#define smp_wmb() barrier()
56#define smp_read_barrier_depends() do { } while(0)
57#endif /* CONFIG_SMP */
58
59/*
60 * This is a barrier which prevents following instructions from being
61 * started until the value of the argument x is known. For example, if
62 * x is a variable loaded from memory, this prevents following
63 * instructions from being executed until the load has been performed.
64 */
65#define data_barrier(x) \
66 asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
67
68#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index 065c590c991d..3eb53d741070 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -126,5 +126,16 @@
126 126
127#include <asm-generic/bug.h> 127#include <asm-generic/bug.h>
128 128
129#ifndef __ASSEMBLY__
130
131struct pt_regs;
132extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
133extern void bad_page_fault(struct pt_regs *, unsigned long, int);
134extern void _exception(int, struct pt_regs *, int, unsigned long);
135extern void die(const char *, struct pt_regs *, long);
136extern void print_backtrace(unsigned long *);
137
138#endif /* !__ASSEMBLY__ */
139
129#endif /* __KERNEL__ */ 140#endif /* __KERNEL__ */
130#endif /* _ASM_POWERPC_BUG_H */ 141#endif /* _ASM_POWERPC_BUG_H */
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 4b509411ad8a..9e495c9a6a88 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -42,8 +42,24 @@ extern struct ppc64_caches ppc64_caches;
42#endif /* __powerpc64__ && ! __ASSEMBLY__ */ 42#endif /* __powerpc64__ && ! __ASSEMBLY__ */
43 43
44#if !defined(__ASSEMBLY__) 44#if !defined(__ASSEMBLY__)
45
45#define __read_mostly __attribute__((__section__(".data..read_mostly"))) 46#define __read_mostly __attribute__((__section__(".data..read_mostly")))
47
48#ifdef CONFIG_6xx
49extern long _get_L2CR(void);
50extern long _get_L3CR(void);
51extern void _set_L2CR(unsigned long);
52extern void _set_L3CR(unsigned long);
53#else
54#define _get_L2CR() 0L
55#define _get_L3CR() 0L
56#define _set_L2CR(val) do { } while(0)
57#define _set_L3CR(val) do { } while(0)
46#endif 58#endif
47 59
60extern void cacheable_memzero(void *p, unsigned int nb);
61extern void *cacheable_memcpy(void *, const void *, unsigned int);
62
63#endif /* !__ASSEMBLY__ */
48#endif /* __KERNEL__ */ 64#endif /* __KERNEL__ */
49#endif /* _ASM_POWERPC_CACHE_H */ 65#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..e245aab7f191
--- /dev/null
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -0,0 +1,309 @@
1#ifndef _ASM_POWERPC_CMPXCHG_H_
2#define _ASM_POWERPC_CMPXCHG_H_
3
4#ifdef __KERNEL__
5#include <linux/compiler.h>
6#include <asm/synch.h>
7#include <asm/asm-compat.h>
8
9/*
10 * Atomic exchange
11 *
12 * Changes the memory location '*ptr' to be val and returns
13 * the previous value stored there.
14 */
15static __always_inline unsigned long
16__xchg_u32(volatile void *p, unsigned long val)
17{
18 unsigned long prev;
19
20 __asm__ __volatile__(
21 PPC_RELEASE_BARRIER
22"1: lwarx %0,0,%2 \n"
23 PPC405_ERR77(0,%2)
24" stwcx. %3,0,%2 \n\
25 bne- 1b"
26 PPC_ACQUIRE_BARRIER
27 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
28 : "r" (p), "r" (val)
29 : "cc", "memory");
30
31 return prev;
32}
33
34/*
35 * Atomic exchange
36 *
37 * Changes the memory location '*ptr' to be val and returns
38 * the previous value stored there.
39 */
40static __always_inline unsigned long
41__xchg_u32_local(volatile void *p, unsigned long val)
42{
43 unsigned long prev;
44
45 __asm__ __volatile__(
46"1: lwarx %0,0,%2 \n"
47 PPC405_ERR77(0,%2)
48" stwcx. %3,0,%2 \n\
49 bne- 1b"
50 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
51 : "r" (p), "r" (val)
52 : "cc", "memory");
53
54 return prev;
55}
56
57#ifdef CONFIG_PPC64
58static __always_inline unsigned long
59__xchg_u64(volatile void *p, unsigned long val)
60{
61 unsigned long prev;
62
63 __asm__ __volatile__(
64 PPC_RELEASE_BARRIER
65"1: ldarx %0,0,%2 \n"
66 PPC405_ERR77(0,%2)
67" stdcx. %3,0,%2 \n\
68 bne- 1b"
69 PPC_ACQUIRE_BARRIER
70 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
71 : "r" (p), "r" (val)
72 : "cc", "memory");
73
74 return prev;
75}
76
77static __always_inline unsigned long
78__xchg_u64_local(volatile void *p, unsigned long val)
79{
80 unsigned long prev;
81
82 __asm__ __volatile__(
83"1: ldarx %0,0,%2 \n"
84 PPC405_ERR77(0,%2)
85" stdcx. %3,0,%2 \n\
86 bne- 1b"
87 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
88 : "r" (p), "r" (val)
89 : "cc", "memory");
90
91 return prev;
92}
93#endif
94
95/*
96 * This function doesn't exist, so you'll get a linker error
97 * if something tries to do an invalid xchg().
98 */
99extern void __xchg_called_with_bad_pointer(void);
100
101static __always_inline unsigned long
102__xchg(volatile void *ptr, unsigned long x, unsigned int size)
103{
104 switch (size) {
105 case 4:
106 return __xchg_u32(ptr, x);
107#ifdef CONFIG_PPC64
108 case 8:
109 return __xchg_u64(ptr, x);
110#endif
111 }
112 __xchg_called_with_bad_pointer();
113 return x;
114}
115
116static __always_inline unsigned long
117__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
118{
119 switch (size) {
120 case 4:
121 return __xchg_u32_local(ptr, x);
122#ifdef CONFIG_PPC64
123 case 8:
124 return __xchg_u64_local(ptr, x);
125#endif
126 }
127 __xchg_called_with_bad_pointer();
128 return x;
129}
130#define xchg(ptr,x) \
131 ({ \
132 __typeof__(*(ptr)) _x_ = (x); \
133 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
134 })
135
136#define xchg_local(ptr,x) \
137 ({ \
138 __typeof__(*(ptr)) _x_ = (x); \
139 (__typeof__(*(ptr))) __xchg_local((ptr), \
140 (unsigned long)_x_, sizeof(*(ptr))); \
141 })
142
143/*
144 * Compare and exchange - if *p == old, set it to new,
145 * and return the old value of *p.
146 */
147#define __HAVE_ARCH_CMPXCHG 1
148
149static __always_inline unsigned long
150__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
151{
152 unsigned int prev;
153
154 __asm__ __volatile__ (
155 PPC_RELEASE_BARRIER
156"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
157 cmpw 0,%0,%3\n\
158 bne- 2f\n"
159 PPC405_ERR77(0,%2)
160" stwcx. %4,0,%2\n\
161 bne- 1b"
162 PPC_ACQUIRE_BARRIER
163 "\n\
1642:"
165 : "=&r" (prev), "+m" (*p)
166 : "r" (p), "r" (old), "r" (new)
167 : "cc", "memory");
168
169 return prev;
170}
171
172static __always_inline unsigned long
173__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
174 unsigned long new)
175{
176 unsigned int prev;
177
178 __asm__ __volatile__ (
179"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
180 cmpw 0,%0,%3\n\
181 bne- 2f\n"
182 PPC405_ERR77(0,%2)
183" stwcx. %4,0,%2\n\
184 bne- 1b"
185 "\n\
1862:"
187 : "=&r" (prev), "+m" (*p)
188 : "r" (p), "r" (old), "r" (new)
189 : "cc", "memory");
190
191 return prev;
192}
193
194#ifdef CONFIG_PPC64
195static __always_inline unsigned long
196__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
197{
198 unsigned long prev;
199
200 __asm__ __volatile__ (
201 PPC_RELEASE_BARRIER
202"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
203 cmpd 0,%0,%3\n\
204 bne- 2f\n\
205 stdcx. %4,0,%2\n\
206 bne- 1b"
207 PPC_ACQUIRE_BARRIER
208 "\n\
2092:"
210 : "=&r" (prev), "+m" (*p)
211 : "r" (p), "r" (old), "r" (new)
212 : "cc", "memory");
213
214 return prev;
215}
216
217static __always_inline unsigned long
218__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
219 unsigned long new)
220{
221 unsigned long prev;
222
223 __asm__ __volatile__ (
224"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
225 cmpd 0,%0,%3\n\
226 bne- 2f\n\
227 stdcx. %4,0,%2\n\
228 bne- 1b"
229 "\n\
2302:"
231 : "=&r" (prev), "+m" (*p)
232 : "r" (p), "r" (old), "r" (new)
233 : "cc", "memory");
234
235 return prev;
236}
237#endif
238
239/* This function doesn't exist, so you'll get a linker error
240 if something tries to do an invalid cmpxchg(). */
241extern void __cmpxchg_called_with_bad_pointer(void);
242
243static __always_inline unsigned long
244__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
245 unsigned int size)
246{
247 switch (size) {
248 case 4:
249 return __cmpxchg_u32(ptr, old, new);
250#ifdef CONFIG_PPC64
251 case 8:
252 return __cmpxchg_u64(ptr, old, new);
253#endif
254 }
255 __cmpxchg_called_with_bad_pointer();
256 return old;
257}
258
259static __always_inline unsigned long
260__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
261 unsigned int size)
262{
263 switch (size) {
264 case 4:
265 return __cmpxchg_u32_local(ptr, old, new);
266#ifdef CONFIG_PPC64
267 case 8:
268 return __cmpxchg_u64_local(ptr, old, new);
269#endif
270 }
271 __cmpxchg_called_with_bad_pointer();
272 return old;
273}
274
275#define cmpxchg(ptr, o, n) \
276 ({ \
277 __typeof__(*(ptr)) _o_ = (o); \
278 __typeof__(*(ptr)) _n_ = (n); \
279 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
280 (unsigned long)_n_, sizeof(*(ptr))); \
281 })
282
283
284#define cmpxchg_local(ptr, o, n) \
285 ({ \
286 __typeof__(*(ptr)) _o_ = (o); \
287 __typeof__(*(ptr)) _n_ = (n); \
288 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
289 (unsigned long)_n_, sizeof(*(ptr))); \
290 })
291
292#ifdef CONFIG_PPC64
293#define cmpxchg64(ptr, o, n) \
294 ({ \
295 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
296 cmpxchg((ptr), (o), (n)); \
297 })
298#define cmpxchg64_local(ptr, o, n) \
299 ({ \
300 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
301 cmpxchg_local((ptr), (o), (n)); \
302 })
303#else
304#include <asm-generic/cmpxchg-local.h>
305#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
306#endif
307
308#endif /* __KERNEL__ */
309#endif /* _ASM_POWERPC_CMPXCHG_H_ */
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
new file mode 100644
index 000000000000..716d2f089eb6
--- /dev/null
+++ b/arch/powerpc/include/asm/debug.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef _ASM_POWERPC_DEBUG_H
5#define _ASM_POWERPC_DEBUG_H
6
7struct pt_regs;
8
9extern struct dentry *powerpc_debugfs_root;
10
11#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
12
13extern int (*__debugger)(struct pt_regs *regs);
14extern int (*__debugger_ipi)(struct pt_regs *regs);
15extern int (*__debugger_bpt)(struct pt_regs *regs);
16extern int (*__debugger_sstep)(struct pt_regs *regs);
17extern int (*__debugger_iabr_match)(struct pt_regs *regs);
18extern int (*__debugger_dabr_match)(struct pt_regs *regs);
19extern int (*__debugger_fault_handler)(struct pt_regs *regs);
20
21#define DEBUGGER_BOILERPLATE(__NAME) \
22static inline int __NAME(struct pt_regs *regs) \
23{ \
24 if (unlikely(__ ## __NAME)) \
25 return __ ## __NAME(regs); \
26 return 0; \
27}
28
29DEBUGGER_BOILERPLATE(debugger)
30DEBUGGER_BOILERPLATE(debugger_ipi)
31DEBUGGER_BOILERPLATE(debugger_bpt)
32DEBUGGER_BOILERPLATE(debugger_sstep)
33DEBUGGER_BOILERPLATE(debugger_iabr_match)
34DEBUGGER_BOILERPLATE(debugger_dabr_match)
35DEBUGGER_BOILERPLATE(debugger_fault_handler)
36
37#else
38static inline int debugger(struct pt_regs *regs) { return 0; }
39static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
40static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
41static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
42static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
43static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
44static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
45#endif
46
47extern int set_dabr(unsigned long dabr);
48#ifdef CONFIG_PPC_ADV_DEBUG_REGS
49extern void do_send_trap(struct pt_regs *regs, unsigned long address,
50 unsigned long error_code, int signal_code, int brkpt);
51#else
52extern void do_dabr(struct pt_regs *regs, unsigned long address,
53 unsigned long error_code);
54#endif
55
56#endif /* _ASM_POWERPC_DEBUG_H */
diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h
index adadb9943610..f6813e919bb2 100644
--- a/arch/powerpc/include/asm/dma.h
+++ b/arch/powerpc/include/asm/dma.h
@@ -24,7 +24,6 @@
24 24
25#include <asm/io.h> 25#include <asm/io.h>
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <asm/system.h>
28 27
29#ifndef MAX_DMA_CHANNELS 28#ifndef MAX_DMA_CHANNELS
30#define MAX_DMA_CHANNELS 8 29#define MAX_DMA_CHANNELS 8
diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
new file mode 100644
index 000000000000..8196e9c7d7e8
--- /dev/null
+++ b/arch/powerpc/include/asm/exec.h
@@ -0,0 +1,9 @@
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef _ASM_POWERPC_EXEC_H
5#define _ASM_POWERPC_EXEC_H
6
7extern unsigned long arch_align_stack(unsigned long sp);
8
9#endif /* _ASM_POWERPC_EXEC_H */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 80fd4d2b4a62..be04330af751 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -35,7 +35,7 @@ struct arch_hw_breakpoint {
35 35
36#include <linux/kdebug.h> 36#include <linux/kdebug.h>
37#include <asm/reg.h> 37#include <asm/reg.h>
38#include <asm/system.h> 38#include <asm/debug.h>
39 39
40struct perf_event; 40struct perf_event;
41struct pmu; 41struct pmu;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index b585bff1a022..8e2d0371fe1e 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -385,6 +385,36 @@ static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
385extern unsigned long cpuidle_disable; 385extern unsigned long cpuidle_disable;
386enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; 386enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
387 387
388extern int powersave_nap; /* set if nap mode can be used in idle loop */
389void cpu_idle_wait(void);
390
391#ifdef CONFIG_PSERIES_IDLE
392extern void update_smt_snooze_delay(int snooze);
393extern int pseries_notify_cpuidle_add_cpu(int cpu);
394#else
395static inline void update_smt_snooze_delay(int snooze) {}
396static inline int pseries_notify_cpuidle_add_cpu(int cpu) { return 0; }
397#endif
398
399extern void flush_instruction_cache(void);
400extern void hard_reset_now(void);
401extern void poweroff_now(void);
402extern int fix_alignment(struct pt_regs *);
403extern void cvt_fd(float *from, double *to);
404extern void cvt_df(double *from, float *to);
405extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
406
407#ifdef CONFIG_PPC64
408/*
409 * We handle most unaligned accesses in hardware. On the other hand
410 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
411 * powers of 2 writes until it reaches sufficient alignment).
412 *
413 * Based on this we disable the IP header alignment in network drivers.
414 */
415#define NET_IP_ALIGN 0
416#endif
417
388#endif /* __KERNEL__ */ 418#endif /* __KERNEL__ */
389#endif /* __ASSEMBLY__ */ 419#endif /* __ASSEMBLY__ */
390#endif /* _ASM_POWERPC_PROCESSOR_H */ 420#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 8a97aa7289d3..b86faa9107da 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -15,6 +15,11 @@
15#ifndef __ASM_POWERPC_REG_BOOKE_H__ 15#ifndef __ASM_POWERPC_REG_BOOKE_H__
16#define __ASM_POWERPC_REG_BOOKE_H__ 16#define __ASM_POWERPC_REG_BOOKE_H__
17 17
18#ifdef CONFIG_BOOKE_WDT
19extern u32 booke_wdt_enabled;
20extern u32 booke_wdt_period;
21#endif /* CONFIG_BOOKE_WDT */
22
18/* Machine State Register (MSR) Fields */ 23/* Machine State Register (MSR) Fields */
19#define MSR_GS (1<<28) /* Guest state */ 24#define MSR_GS (1<<28) /* Guest state */
20#define MSR_UCLE (1<<26) /* User-mode cache lock enable */ 25#define MSR_UCLE (1<<26) /* User-mode cache lock enable */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index f0a4db31ecb6..557cff845dee 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -357,5 +357,7 @@ static inline int page_is_rtas_user_buf(unsigned long pfn)
357static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;} 357static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
358#endif 358#endif
359 359
360extern int call_rtas(const char *, int, int, unsigned long *, ...);
361
360#endif /* __KERNEL__ */ 362#endif /* __KERNEL__ */
361#endif /* _POWERPC_RTAS_H */ 363#endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/include/asm/runlatch.h b/arch/powerpc/include/asm/runlatch.h
new file mode 100644
index 000000000000..54e9b963876e
--- /dev/null
+++ b/arch/powerpc/include/asm/runlatch.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef _ASM_POWERPC_RUNLATCH_H
5#define _ASM_POWERPC_RUNLATCH_H
6
7#ifdef CONFIG_PPC64
8
9extern void __ppc64_runlatch_on(void);
10extern void __ppc64_runlatch_off(void);
11
12/*
13 * We manually hard enable-disable, this is called
14 * in the idle loop and we don't want to mess up
15 * with soft-disable/enable & interrupt replay.
16 */
17#define ppc64_runlatch_off() \
18 do { \
19 if (cpu_has_feature(CPU_FTR_CTRL) && \
20 test_thread_local_flags(_TLF_RUNLATCH)) { \
21 unsigned long msr = mfmsr(); \
22 __hard_irq_disable(); \
23 __ppc64_runlatch_off(); \
24 if (msr & MSR_EE) \
25 __hard_irq_enable(); \
26 } \
27 } while (0)
28
29#define ppc64_runlatch_on() \
30 do { \
31 if (cpu_has_feature(CPU_FTR_CTRL) && \
32 !test_thread_local_flags(_TLF_RUNLATCH)) { \
33 unsigned long msr = mfmsr(); \
34 __hard_irq_disable(); \
35 __ppc64_runlatch_on(); \
36 if (msr & MSR_EE) \
37 __hard_irq_enable(); \
38 } \
39 } while (0)
40#else
41#define ppc64_runlatch_on()
42#define ppc64_runlatch_off()
43#endif /* CONFIG_PPC64 */
44
45#endif /* _ASM_POWERPC_RUNLATCH_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 186e0fb835bd..d084ce195fc3 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -5,6 +5,28 @@
5 5
6#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
7extern void ppc_printk_progress(char *s, unsigned short hex); 7extern void ppc_printk_progress(char *s, unsigned short hex);
8#endif 8
9extern unsigned int rtas_data;
10extern int mem_init_done; /* set on boot once kmalloc can be called */
11extern int init_bootmem_done; /* set once bootmem is available */
12extern phys_addr_t memory_limit;
13extern unsigned long klimit;
14extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
15
16extern void via_cuda_init(void);
17extern void read_rtc_time(void);
18extern void pmac_find_display(void);
19
20struct device_node;
21extern void note_scsi_host(struct device_node *, void *);
22
23/* Used in very early kernel initialization. */
24extern unsigned long reloc_offset(void);
25extern unsigned long add_reloc_offset(unsigned long);
26extern void reloc_got2(unsigned long);
27
28#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
29
30#endif /* !__ASSEMBLY__ */
9 31
10#endif /* _ASM_POWERPC_SETUP_H */ 32#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
new file mode 100644
index 000000000000..caf82d0a00de
--- /dev/null
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef _ASM_POWERPC_SWITCH_TO_H
5#define _ASM_POWERPC_SWITCH_TO_H
6
7struct thread_struct;
8struct task_struct;
9struct pt_regs;
10
11extern struct task_struct *__switch_to(struct task_struct *,
12 struct task_struct *);
13#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
14
15struct thread_struct;
16extern struct task_struct *_switch(struct thread_struct *prev,
17 struct thread_struct *next);
18
19extern void giveup_fpu(struct task_struct *);
20extern void disable_kernel_fp(void);
21extern void enable_kernel_fp(void);
22extern void flush_fp_to_thread(struct task_struct *);
23extern void enable_kernel_altivec(void);
24extern void giveup_altivec(struct task_struct *);
25extern void load_up_altivec(struct task_struct *);
26extern int emulate_altivec(struct pt_regs *);
27extern void __giveup_vsx(struct task_struct *);
28extern void giveup_vsx(struct task_struct *);
29extern void enable_kernel_spe(void);
30extern void giveup_spe(struct task_struct *);
31extern void load_up_spe(struct task_struct *);
32
33#ifndef CONFIG_SMP
34extern void discard_lazy_cpu_state(void);
35#else
36static inline void discard_lazy_cpu_state(void)
37{
38}
39#endif
40
41#ifdef CONFIG_ALTIVEC
42extern void flush_altivec_to_thread(struct task_struct *);
43#else
44static inline void flush_altivec_to_thread(struct task_struct *t)
45{
46}
47#endif
48
49#ifdef CONFIG_VSX
50extern void flush_vsx_to_thread(struct task_struct *);
51#else
52static inline void flush_vsx_to_thread(struct task_struct *t)
53{
54}
55#endif
56
57#ifdef CONFIG_SPE
58extern void flush_spe_to_thread(struct task_struct *);
59#else
60static inline void flush_spe_to_thread(struct task_struct *t)
61{
62}
63#endif
64
65#endif /* _ASM_POWERPC_SWITCH_TO_H */
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
deleted file mode 100644
index a02883d5af43..000000000000
--- a/arch/powerpc/include/asm/system.h
+++ /dev/null
@@ -1,592 +0,0 @@
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef _ASM_POWERPC_SYSTEM_H
5#define _ASM_POWERPC_SYSTEM_H
6
7#include <linux/kernel.h>
8#include <linux/irqflags.h>
9
10#include <asm/hw_irq.h>
11
12/*
13 * Memory barrier.
14 * The sync instruction guarantees that all memory accesses initiated
15 * by this processor have been performed (with respect to all other
16 * mechanisms that access memory). The eieio instruction is a barrier
17 * providing an ordering (separately) for (a) cacheable stores and (b)
18 * loads and stores to non-cacheable memory (e.g. I/O devices).
19 *
20 * mb() prevents loads and stores being reordered across this point.
21 * rmb() prevents loads being reordered across this point.
22 * wmb() prevents stores being reordered across this point.
23 * read_barrier_depends() prevents data-dependent loads being reordered
24 * across this point (nop on PPC).
25 *
26 * *mb() variants without smp_ prefix must order all types of memory
27 * operations with one another. sync is the only instruction sufficient
28 * to do this.
29 *
30 * For the smp_ barriers, ordering is for cacheable memory operations
31 * only. We have to use the sync instruction for smp_mb(), since lwsync
32 * doesn't order loads with respect to previous stores. Lwsync can be
33 * used for smp_rmb() and smp_wmb().
34 *
35 * However, on CPUs that don't support lwsync, lwsync actually maps to a
36 * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
37 */
38#define mb() __asm__ __volatile__ ("sync" : : : "memory")
39#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
40#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
41#define read_barrier_depends() do { } while(0)
42
43#define set_mb(var, value) do { var = value; mb(); } while (0)
44
45#ifdef __KERNEL__
46#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */
47#ifdef CONFIG_SMP
48
49#ifdef __SUBARCH_HAS_LWSYNC
50# define SMPWMB LWSYNC
51#else
52# define SMPWMB eieio
53#endif
54
55#define smp_mb() mb()
56#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
57#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
58#define smp_read_barrier_depends() read_barrier_depends()
59#else
60#define smp_mb() barrier()
61#define smp_rmb() barrier()
62#define smp_wmb() barrier()
63#define smp_read_barrier_depends() do { } while(0)
64#endif /* CONFIG_SMP */
65
66/*
67 * This is a barrier which prevents following instructions from being
68 * started until the value of the argument x is known. For example, if
69 * x is a variable loaded from memory, this prevents following
70 * instructions from being executed until the load has been performed.
71 */
72#define data_barrier(x) \
73 asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
74
75struct task_struct;
76struct pt_regs;
77
78#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
79
80extern int (*__debugger)(struct pt_regs *regs);
81extern int (*__debugger_ipi)(struct pt_regs *regs);
82extern int (*__debugger_bpt)(struct pt_regs *regs);
83extern int (*__debugger_sstep)(struct pt_regs *regs);
84extern int (*__debugger_iabr_match)(struct pt_regs *regs);
85extern int (*__debugger_dabr_match)(struct pt_regs *regs);
86extern int (*__debugger_fault_handler)(struct pt_regs *regs);
87
88#define DEBUGGER_BOILERPLATE(__NAME) \
89static inline int __NAME(struct pt_regs *regs) \
90{ \
91 if (unlikely(__ ## __NAME)) \
92 return __ ## __NAME(regs); \
93 return 0; \
94}
95
96DEBUGGER_BOILERPLATE(debugger)
97DEBUGGER_BOILERPLATE(debugger_ipi)
98DEBUGGER_BOILERPLATE(debugger_bpt)
99DEBUGGER_BOILERPLATE(debugger_sstep)
100DEBUGGER_BOILERPLATE(debugger_iabr_match)
101DEBUGGER_BOILERPLATE(debugger_dabr_match)
102DEBUGGER_BOILERPLATE(debugger_fault_handler)
103
104#else
105static inline int debugger(struct pt_regs *regs) { return 0; }
106static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
107static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
108static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
109static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
110static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
111static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
112#endif
113
114extern int set_dabr(unsigned long dabr);
115#ifdef CONFIG_PPC_ADV_DEBUG_REGS
116extern void do_send_trap(struct pt_regs *regs, unsigned long address,
117 unsigned long error_code, int signal_code, int brkpt);
118#else
119extern void do_dabr(struct pt_regs *regs, unsigned long address,
120 unsigned long error_code);
121#endif
122extern void print_backtrace(unsigned long *);
123extern void flush_instruction_cache(void);
124extern void hard_reset_now(void);
125extern void poweroff_now(void);
126
127#ifdef CONFIG_6xx
128extern long _get_L2CR(void);
129extern long _get_L3CR(void);
130extern void _set_L2CR(unsigned long);
131extern void _set_L3CR(unsigned long);
132#else
133#define _get_L2CR() 0L
134#define _get_L3CR() 0L
135#define _set_L2CR(val) do { } while(0)
136#define _set_L3CR(val) do { } while(0)
137#endif
138
139extern void via_cuda_init(void);
140extern void read_rtc_time(void);
141extern void pmac_find_display(void);
142extern void giveup_fpu(struct task_struct *);
143extern void disable_kernel_fp(void);
144extern void enable_kernel_fp(void);
145extern void flush_fp_to_thread(struct task_struct *);
146extern void enable_kernel_altivec(void);
147extern void giveup_altivec(struct task_struct *);
148extern void load_up_altivec(struct task_struct *);
149extern int emulate_altivec(struct pt_regs *);
150extern void __giveup_vsx(struct task_struct *);
151extern void giveup_vsx(struct task_struct *);
152extern void enable_kernel_spe(void);
153extern void giveup_spe(struct task_struct *);
154extern void load_up_spe(struct task_struct *);
155extern int fix_alignment(struct pt_regs *);
156extern void cvt_fd(float *from, double *to);
157extern void cvt_df(double *from, float *to);
158
159#ifndef CONFIG_SMP
160extern void discard_lazy_cpu_state(void);
161#else
162static inline void discard_lazy_cpu_state(void)
163{
164}
165#endif
166
167#ifdef CONFIG_ALTIVEC
168extern void flush_altivec_to_thread(struct task_struct *);
169#else
170static inline void flush_altivec_to_thread(struct task_struct *t)
171{
172}
173#endif
174
175#ifdef CONFIG_VSX
176extern void flush_vsx_to_thread(struct task_struct *);
177#else
178static inline void flush_vsx_to_thread(struct task_struct *t)
179{
180}
181#endif
182
183#ifdef CONFIG_SPE
184extern void flush_spe_to_thread(struct task_struct *);
185#else
186static inline void flush_spe_to_thread(struct task_struct *t)
187{
188}
189#endif
190
191extern int call_rtas(const char *, int, int, unsigned long *, ...);
192extern void cacheable_memzero(void *p, unsigned int nb);
193extern void *cacheable_memcpy(void *, const void *, unsigned int);
194extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
195extern void bad_page_fault(struct pt_regs *, unsigned long, int);
196extern void _exception(int, struct pt_regs *, int, unsigned long);
197extern void die(const char *, struct pt_regs *, long);
198extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
199
200#ifdef CONFIG_BOOKE_WDT
201extern u32 booke_wdt_enabled;
202extern u32 booke_wdt_period;
203#endif /* CONFIG_BOOKE_WDT */
204
205struct device_node;
206extern void note_scsi_host(struct device_node *, void *);
207
208extern struct task_struct *__switch_to(struct task_struct *,
209 struct task_struct *);
210#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
211
212struct thread_struct;
213extern struct task_struct *_switch(struct thread_struct *prev,
214 struct thread_struct *next);
215
216extern unsigned int rtas_data;
217extern int mem_init_done; /* set on boot once kmalloc can be called */
218extern int init_bootmem_done; /* set once bootmem is available */
219extern phys_addr_t memory_limit;
220extern unsigned long klimit;
221extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
222
223extern int powersave_nap; /* set if nap mode can be used in idle loop */
224void cpu_idle_wait(void);
225
226#ifdef CONFIG_PSERIES_IDLE
227extern void update_smt_snooze_delay(int snooze);
228extern int pseries_notify_cpuidle_add_cpu(int cpu);
229#else
230static inline void update_smt_snooze_delay(int snooze) {}
231static inline int pseries_notify_cpuidle_add_cpu(int cpu) { return 0; }
232#endif
233
234/*
235 * Atomic exchange
236 *
237 * Changes the memory location '*ptr' to be val and returns
238 * the previous value stored there.
239 */
240static __always_inline unsigned long
241__xchg_u32(volatile void *p, unsigned long val)
242{
243 unsigned long prev;
244
245 __asm__ __volatile__(
246 PPC_RELEASE_BARRIER
247"1: lwarx %0,0,%2 \n"
248 PPC405_ERR77(0,%2)
249" stwcx. %3,0,%2 \n\
250 bne- 1b"
251 PPC_ACQUIRE_BARRIER
252 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
253 : "r" (p), "r" (val)
254 : "cc", "memory");
255
256 return prev;
257}
258
259/*
260 * Atomic exchange
261 *
262 * Changes the memory location '*ptr' to be val and returns
263 * the previous value stored there.
264 */
265static __always_inline unsigned long
266__xchg_u32_local(volatile void *p, unsigned long val)
267{
268 unsigned long prev;
269
270 __asm__ __volatile__(
271"1: lwarx %0,0,%2 \n"
272 PPC405_ERR77(0,%2)
273" stwcx. %3,0,%2 \n\
274 bne- 1b"
275 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
276 : "r" (p), "r" (val)
277 : "cc", "memory");
278
279 return prev;
280}
281
282#ifdef CONFIG_PPC64
283static __always_inline unsigned long
284__xchg_u64(volatile void *p, unsigned long val)
285{
286 unsigned long prev;
287
288 __asm__ __volatile__(
289 PPC_RELEASE_BARRIER
290"1: ldarx %0,0,%2 \n"
291 PPC405_ERR77(0,%2)
292" stdcx. %3,0,%2 \n\
293 bne- 1b"
294 PPC_ACQUIRE_BARRIER
295 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
296 : "r" (p), "r" (val)
297 : "cc", "memory");
298
299 return prev;
300}
301
302static __always_inline unsigned long
303__xchg_u64_local(volatile void *p, unsigned long val)
304{
305 unsigned long prev;
306
307 __asm__ __volatile__(
308"1: ldarx %0,0,%2 \n"
309 PPC405_ERR77(0,%2)
310" stdcx. %3,0,%2 \n\
311 bne- 1b"
312 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
313 : "r" (p), "r" (val)
314 : "cc", "memory");
315
316 return prev;
317}
318#endif
319
320/*
321 * This function doesn't exist, so you'll get a linker error
322 * if something tries to do an invalid xchg().
323 */
324extern void __xchg_called_with_bad_pointer(void);
325
326static __always_inline unsigned long
327__xchg(volatile void *ptr, unsigned long x, unsigned int size)
328{
329 switch (size) {
330 case 4:
331 return __xchg_u32(ptr, x);
332#ifdef CONFIG_PPC64
333 case 8:
334 return __xchg_u64(ptr, x);
335#endif
336 }
337 __xchg_called_with_bad_pointer();
338 return x;
339}
340
341static __always_inline unsigned long
342__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
343{
344 switch (size) {
345 case 4:
346 return __xchg_u32_local(ptr, x);
347#ifdef CONFIG_PPC64
348 case 8:
349 return __xchg_u64_local(ptr, x);
350#endif
351 }
352 __xchg_called_with_bad_pointer();
353 return x;
354}
355#define xchg(ptr,x) \
356 ({ \
357 __typeof__(*(ptr)) _x_ = (x); \
358 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
359 })
360
361#define xchg_local(ptr,x) \
362 ({ \
363 __typeof__(*(ptr)) _x_ = (x); \
364 (__typeof__(*(ptr))) __xchg_local((ptr), \
365 (unsigned long)_x_, sizeof(*(ptr))); \
366 })
367
368/*
369 * Compare and exchange - if *p == old, set it to new,
370 * and return the old value of *p.
371 */
372#define __HAVE_ARCH_CMPXCHG 1
373
374static __always_inline unsigned long
375__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
376{
377 unsigned int prev;
378
379 __asm__ __volatile__ (
380 PPC_RELEASE_BARRIER
381"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
382 cmpw 0,%0,%3\n\
383 bne- 2f\n"
384 PPC405_ERR77(0,%2)
385" stwcx. %4,0,%2\n\
386 bne- 1b"
387 PPC_ACQUIRE_BARRIER
388 "\n\
3892:"
390 : "=&r" (prev), "+m" (*p)
391 : "r" (p), "r" (old), "r" (new)
392 : "cc", "memory");
393
394 return prev;
395}
396
397static __always_inline unsigned long
398__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
399 unsigned long new)
400{
401 unsigned int prev;
402
403 __asm__ __volatile__ (
404"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
405 cmpw 0,%0,%3\n\
406 bne- 2f\n"
407 PPC405_ERR77(0,%2)
408" stwcx. %4,0,%2\n\
409 bne- 1b"
410 "\n\
4112:"
412 : "=&r" (prev), "+m" (*p)
413 : "r" (p), "r" (old), "r" (new)
414 : "cc", "memory");
415
416 return prev;
417}
418
419#ifdef CONFIG_PPC64
420static __always_inline unsigned long
421__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
422{
423 unsigned long prev;
424
425 __asm__ __volatile__ (
426 PPC_RELEASE_BARRIER
427"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
428 cmpd 0,%0,%3\n\
429 bne- 2f\n\
430 stdcx. %4,0,%2\n\
431 bne- 1b"
432 PPC_ACQUIRE_BARRIER
433 "\n\
4342:"
435 : "=&r" (prev), "+m" (*p)
436 : "r" (p), "r" (old), "r" (new)
437 : "cc", "memory");
438
439 return prev;
440}
441
442static __always_inline unsigned long
443__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
444 unsigned long new)
445{
446 unsigned long prev;
447
448 __asm__ __volatile__ (
449"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
450 cmpd 0,%0,%3\n\
451 bne- 2f\n\
452 stdcx. %4,0,%2\n\
453 bne- 1b"
454 "\n\
4552:"
456 : "=&r" (prev), "+m" (*p)
457 : "r" (p), "r" (old), "r" (new)
458 : "cc", "memory");
459
460 return prev;
461}
462#endif
463
464/* This function doesn't exist, so you'll get a linker error
465 if something tries to do an invalid cmpxchg(). */
466extern void __cmpxchg_called_with_bad_pointer(void);
467
468static __always_inline unsigned long
469__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
470 unsigned int size)
471{
472 switch (size) {
473 case 4:
474 return __cmpxchg_u32(ptr, old, new);
475#ifdef CONFIG_PPC64
476 case 8:
477 return __cmpxchg_u64(ptr, old, new);
478#endif
479 }
480 __cmpxchg_called_with_bad_pointer();
481 return old;
482}
483
484static __always_inline unsigned long
485__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
486 unsigned int size)
487{
488 switch (size) {
489 case 4:
490 return __cmpxchg_u32_local(ptr, old, new);
491#ifdef CONFIG_PPC64
492 case 8:
493 return __cmpxchg_u64_local(ptr, old, new);
494#endif
495 }
496 __cmpxchg_called_with_bad_pointer();
497 return old;
498}
499
500#define cmpxchg(ptr, o, n) \
501 ({ \
502 __typeof__(*(ptr)) _o_ = (o); \
503 __typeof__(*(ptr)) _n_ = (n); \
504 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
505 (unsigned long)_n_, sizeof(*(ptr))); \
506 })
507
508
509#define cmpxchg_local(ptr, o, n) \
510 ({ \
511 __typeof__(*(ptr)) _o_ = (o); \
512 __typeof__(*(ptr)) _n_ = (n); \
513 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
514 (unsigned long)_n_, sizeof(*(ptr))); \
515 })
516
517#ifdef CONFIG_PPC64
518/*
519 * We handle most unaligned accesses in hardware. On the other hand
520 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
521 * powers of 2 writes until it reaches sufficient alignment).
522 *
523 * Based on this we disable the IP header alignment in network drivers.
524 */
525#define NET_IP_ALIGN 0
526
527#define cmpxchg64(ptr, o, n) \
528 ({ \
529 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
530 cmpxchg((ptr), (o), (n)); \
531 })
532#define cmpxchg64_local(ptr, o, n) \
533 ({ \
534 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
535 cmpxchg_local((ptr), (o), (n)); \
536 })
537#else
538#include <asm-generic/cmpxchg-local.h>
539#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
540#endif
541
542extern unsigned long arch_align_stack(unsigned long sp);
543
544/* Used in very early kernel initialization. */
545extern unsigned long reloc_offset(void);
546extern unsigned long add_reloc_offset(unsigned long);
547extern void reloc_got2(unsigned long);
548
549#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
550
551extern struct dentry *powerpc_debugfs_root;
552
553#ifdef CONFIG_PPC64
554
555extern void __ppc64_runlatch_on(void);
556extern void __ppc64_runlatch_off(void);
557
558/*
559 * We manually hard enable-disable, this is called
560 * in the idle loop and we don't want to mess up
561 * with soft-disable/enable & interrupt replay.
562 */
563#define ppc64_runlatch_off() \
564 do { \
565 if (cpu_has_feature(CPU_FTR_CTRL) && \
566 test_thread_local_flags(_TLF_RUNLATCH)) { \
567 unsigned long msr = mfmsr(); \
568 __hard_irq_disable(); \
569 __ppc64_runlatch_off(); \
570 if (msr & MSR_EE) \
571 __hard_irq_enable(); \
572 } \
573 } while (0)
574
575#define ppc64_runlatch_on() \
576 do { \
577 if (cpu_has_feature(CPU_FTR_CTRL) && \
578 !test_thread_local_flags(_TLF_RUNLATCH)) { \
579 unsigned long msr = mfmsr(); \
580 __hard_irq_disable(); \
581 __ppc64_runlatch_on(); \
582 if (msr & MSR_EE) \
583 __hard_irq_enable(); \
584 } \
585 } while (0)
586#else
587#define ppc64_runlatch_on()
588#define ppc64_runlatch_off()
589#endif /* CONFIG_PPC64 */
590
591#endif /* __KERNEL__ */
592#endif /* _ASM_POWERPC_SYSTEM_H */