aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64/system.h
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-11-14 01:22:01 -0500
committerPaul Mackerras <paulus@samba.org>2005-11-14 01:22:01 -0500
commitc55377ee73f6efeb373ae06f6e918d87660b4852 (patch)
tree8085472005f758e73d996d2b3e0e91064524d533 /include/asm-ppc64/system.h
parent821077b2617ef70662a861393c929d7e47609512 (diff)
powerpc: Move a bunch of ppc64 headers to include/asm-powerpc
... and also delete some that are no longer used because we already had an include/asm-powerpc version of the header. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-ppc64/system.h')
-rw-r--r--include/asm-ppc64/system.h310
1 files changed, 0 insertions, 310 deletions
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
deleted file mode 100644
index bf9a6aba19c9..000000000000
--- a/include/asm-ppc64/system.h
+++ /dev/null
@@ -1,310 +0,0 @@
1#ifndef __PPC64_SYSTEM_H
2#define __PPC64_SYSTEM_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/config.h>
12#include <linux/compiler.h>
13#include <asm/page.h>
14#include <asm/processor.h>
15#include <asm/hw_irq.h>
16#include <asm/synch.h>
17
18/*
19 * Memory barrier.
20 * The sync instruction guarantees that all memory accesses initiated
21 * by this processor have been performed (with respect to all other
22 * mechanisms that access memory). The eieio instruction is a barrier
23 * providing an ordering (separately) for (a) cacheable stores and (b)
24 * loads and stores to non-cacheable memory (e.g. I/O devices).
25 *
26 * mb() prevents loads and stores being reordered across this point.
27 * rmb() prevents loads being reordered across this point.
28 * wmb() prevents stores being reordered across this point.
29 * read_barrier_depends() prevents data-dependent loads being reordered
30 * across this point (nop on PPC).
31 *
32 * We have to use the sync instructions for mb(), since lwsync doesn't
33 * order loads with respect to previous stores. Lwsync is fine for
34 * rmb(), though.
35 * For wmb(), we use sync since wmb is used in drivers to order
36 * stores to system memory with respect to writes to the device.
37 * However, smp_wmb() can be a lighter-weight eieio barrier on
38 * SMP since it is only used to order updates to system memory.
39 */
40#define mb() __asm__ __volatile__ ("sync" : : : "memory")
41#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
42#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
43#define read_barrier_depends() do { } while(0)
44
45#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
46#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
47
48#ifdef CONFIG_SMP
49#define smp_mb() mb()
50#define smp_rmb() rmb()
51#define smp_wmb() eieio()
52#define smp_read_barrier_depends() read_barrier_depends()
53#else
54#define smp_mb() __asm__ __volatile__("": : :"memory")
55#define smp_rmb() __asm__ __volatile__("": : :"memory")
56#define smp_wmb() __asm__ __volatile__("": : :"memory")
57#define smp_read_barrier_depends() do { } while(0)
58#endif /* CONFIG_SMP */
59
60#ifdef __KERNEL__
61struct task_struct;
62struct pt_regs;
63
64#ifdef CONFIG_DEBUGGER
65
66extern int (*__debugger)(struct pt_regs *regs);
67extern int (*__debugger_ipi)(struct pt_regs *regs);
68extern int (*__debugger_bpt)(struct pt_regs *regs);
69extern int (*__debugger_sstep)(struct pt_regs *regs);
70extern int (*__debugger_iabr_match)(struct pt_regs *regs);
71extern int (*__debugger_dabr_match)(struct pt_regs *regs);
72extern int (*__debugger_fault_handler)(struct pt_regs *regs);
73
74#define DEBUGGER_BOILERPLATE(__NAME) \
75static inline int __NAME(struct pt_regs *regs) \
76{ \
77 if (unlikely(__ ## __NAME)) \
78 return __ ## __NAME(regs); \
79 return 0; \
80}
81
82DEBUGGER_BOILERPLATE(debugger)
83DEBUGGER_BOILERPLATE(debugger_ipi)
84DEBUGGER_BOILERPLATE(debugger_bpt)
85DEBUGGER_BOILERPLATE(debugger_sstep)
86DEBUGGER_BOILERPLATE(debugger_iabr_match)
87DEBUGGER_BOILERPLATE(debugger_dabr_match)
88DEBUGGER_BOILERPLATE(debugger_fault_handler)
89
90#ifdef CONFIG_XMON
91extern void xmon_init(int enable);
92#endif
93
94#else
95static inline int debugger(struct pt_regs *regs) { return 0; }
96static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
97static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
98static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
99static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
100static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
101static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
102#endif
103
104extern int set_dabr(unsigned long dabr);
105extern void _exception(int signr, struct pt_regs *regs, int code,
106 unsigned long addr);
107extern int fix_alignment(struct pt_regs *regs);
108extern void bad_page_fault(struct pt_regs *regs, unsigned long address,
109 int sig);
110extern void show_regs(struct pt_regs * regs);
111extern void low_hash_fault(struct pt_regs *regs, unsigned long address);
112extern int die(const char *str, struct pt_regs *regs, long err);
113
114extern int _get_PVR(void);
115extern void giveup_fpu(struct task_struct *);
116extern void disable_kernel_fp(void);
117extern void flush_fp_to_thread(struct task_struct *);
118extern void enable_kernel_fp(void);
119extern void giveup_altivec(struct task_struct *);
120extern void disable_kernel_altivec(void);
121extern void enable_kernel_altivec(void);
122extern int emulate_altivec(struct pt_regs *);
123extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
124extern void cvt_df(double *from, float *to, struct thread_struct *thread);
125
126#ifdef CONFIG_ALTIVEC
127extern void flush_altivec_to_thread(struct task_struct *);
128#else
129static inline void flush_altivec_to_thread(struct task_struct *t)
130{
131}
132#endif
133
134static inline void flush_spe_to_thread(struct task_struct *t)
135{
136}
137
138extern int mem_init_done; /* set on boot once kmalloc can be called */
139extern unsigned long memory_limit;
140
141/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
142extern unsigned char e2a(unsigned char);
143
144extern struct task_struct *__switch_to(struct task_struct *,
145 struct task_struct *);
146#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
147
148struct thread_struct;
149extern struct task_struct * _switch(struct thread_struct *prev,
150 struct thread_struct *next);
151
152extern unsigned long klimit;
153
154extern int powersave_nap; /* set if nap mode can be used in idle loop */
155
156/*
157 * Atomic exchange
158 *
159 * Changes the memory location '*ptr' to be val and returns
160 * the previous value stored there.
161 *
162 * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
163 * is more like most of the other architectures.
164 */
165static __inline__ unsigned long
166__xchg_u32(volatile unsigned int *m, unsigned long val)
167{
168 unsigned long dummy;
169
170 __asm__ __volatile__(
171 EIEIO_ON_SMP
172"1: lwarx %0,0,%3 # __xchg_u32\n\
173 stwcx. %2,0,%3\n\
1742: bne- 1b"
175 ISYNC_ON_SMP
176 : "=&r" (dummy), "=m" (*m)
177 : "r" (val), "r" (m)
178 : "cc", "memory");
179
180 return (dummy);
181}
182
183static __inline__ unsigned long
184__xchg_u64(volatile long *m, unsigned long val)
185{
186 unsigned long dummy;
187
188 __asm__ __volatile__(
189 EIEIO_ON_SMP
190"1: ldarx %0,0,%3 # __xchg_u64\n\
191 stdcx. %2,0,%3\n\
1922: bne- 1b"
193 ISYNC_ON_SMP
194 : "=&r" (dummy), "=m" (*m)
195 : "r" (val), "r" (m)
196 : "cc", "memory");
197
198 return (dummy);
199}
200
201/*
202 * This function doesn't exist, so you'll get a linker error
203 * if something tries to do an invalid xchg().
204 */
205extern void __xchg_called_with_bad_pointer(void);
206
207static __inline__ unsigned long
208__xchg(volatile void *ptr, unsigned long x, unsigned int size)
209{
210 switch (size) {
211 case 4:
212 return __xchg_u32(ptr, x);
213 case 8:
214 return __xchg_u64(ptr, x);
215 }
216 __xchg_called_with_bad_pointer();
217 return x;
218}
219
220#define xchg(ptr,x) \
221 ({ \
222 __typeof__(*(ptr)) _x_ = (x); \
223 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
224 })
225
226#define tas(ptr) (xchg((ptr),1))
227
228#define __HAVE_ARCH_CMPXCHG 1
229
230static __inline__ unsigned long
231__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
232{
233 unsigned int prev;
234
235 __asm__ __volatile__ (
236 EIEIO_ON_SMP
237"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
238 cmpw 0,%0,%3\n\
239 bne- 2f\n\
240 stwcx. %4,0,%2\n\
241 bne- 1b"
242 ISYNC_ON_SMP
243 "\n\
2442:"
245 : "=&r" (prev), "=m" (*p)
246 : "r" (p), "r" (old), "r" (new), "m" (*p)
247 : "cc", "memory");
248
249 return prev;
250}
251
252static __inline__ unsigned long
253__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
254{
255 unsigned long prev;
256
257 __asm__ __volatile__ (
258 EIEIO_ON_SMP
259"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
260 cmpd 0,%0,%3\n\
261 bne- 2f\n\
262 stdcx. %4,0,%2\n\
263 bne- 1b"
264 ISYNC_ON_SMP
265 "\n\
2662:"
267 : "=&r" (prev), "=m" (*p)
268 : "r" (p), "r" (old), "r" (new), "m" (*p)
269 : "cc", "memory");
270
271 return prev;
272}
273
274/* This function doesn't exist, so you'll get a linker error
275 if something tries to do an invalid cmpxchg(). */
276extern void __cmpxchg_called_with_bad_pointer(void);
277
278static __inline__ unsigned long
279__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
280 unsigned int size)
281{
282 switch (size) {
283 case 4:
284 return __cmpxchg_u32(ptr, old, new);
285 case 8:
286 return __cmpxchg_u64(ptr, old, new);
287 }
288 __cmpxchg_called_with_bad_pointer();
289 return old;
290}
291
292#define cmpxchg(ptr,o,n)\
293 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
294 (unsigned long)(n),sizeof(*(ptr))))
295
296/*
297 * We handle most unaligned accesses in hardware. On the other hand
298 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
299 * powers of 2 writes until it reaches sufficient alignment).
300 *
301 * Based on this we disable the IP header alignment in network drivers.
302 */
303#define NET_IP_ALIGN 0
304
305#define arch_align_stack(x) (x)
306
307extern unsigned long reloc_offset(void);
308
309#endif /* __KERNEL__ */
310#endif