aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc/system.h')
-rw-r--r--include/asm-powerpc/system.h363
1 files changed, 363 insertions, 0 deletions
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
new file mode 100644
index 000000000000..5b2ecbc47907
--- /dev/null
+++ b/include/asm-powerpc/system.h
@@ -0,0 +1,363 @@
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef _ASM_POWERPC_SYSTEM_H
5#define _ASM_POWERPC_SYSTEM_H
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9
10#include <asm/hw_irq.h>
11#include <asm/ppc_asm.h>
12#include <asm/atomic.h>
13
14/*
15 * Memory barrier.
16 * The sync instruction guarantees that all memory accesses initiated
17 * by this processor have been performed (with respect to all other
18 * mechanisms that access memory). The eieio instruction is a barrier
19 * providing an ordering (separately) for (a) cacheable stores and (b)
20 * loads and stores to non-cacheable memory (e.g. I/O devices).
21 *
22 * mb() prevents loads and stores being reordered across this point.
23 * rmb() prevents loads being reordered across this point.
24 * wmb() prevents stores being reordered across this point.
25 * read_barrier_depends() prevents data-dependent loads being reordered
26 * across this point (nop on PPC).
27 *
28 * We have to use the sync instructions for mb(), since lwsync doesn't
29 * order loads with respect to previous stores. Lwsync is fine for
30 * rmb(), though. Note that lwsync is interpreted as sync by
31 * 32-bit and older 64-bit CPUs.
32 *
33 * For wmb(), we use sync since wmb is used in drivers to order
34 * stores to system memory with respect to writes to the device.
35 * However, smp_wmb() can be a lighter-weight eieio barrier on
36 * SMP since it is only used to order updates to system memory.
37 */
38#define mb() __asm__ __volatile__ ("sync" : : : "memory")
39#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
40#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
41#define read_barrier_depends() do { } while(0)
42
43#define set_mb(var, value) do { var = value; mb(); } while (0)
44#define set_wmb(var, value) do { var = value; wmb(); } while (0)
45
46#ifdef CONFIG_SMP
47#define smp_mb() mb()
48#define smp_rmb() rmb()
49#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
50#define smp_read_barrier_depends() read_barrier_depends()
51#else
52#define smp_mb() barrier()
53#define smp_rmb() barrier()
54#define smp_wmb() barrier()
55#define smp_read_barrier_depends() do { } while(0)
56#endif /* CONFIG_SMP */
57
58#ifdef __KERNEL__
59struct task_struct;
60struct pt_regs;
61
62#ifdef CONFIG_DEBUGGER
63
64extern int (*__debugger)(struct pt_regs *regs);
65extern int (*__debugger_ipi)(struct pt_regs *regs);
66extern int (*__debugger_bpt)(struct pt_regs *regs);
67extern int (*__debugger_sstep)(struct pt_regs *regs);
68extern int (*__debugger_iabr_match)(struct pt_regs *regs);
69extern int (*__debugger_dabr_match)(struct pt_regs *regs);
70extern int (*__debugger_fault_handler)(struct pt_regs *regs);
71
72#define DEBUGGER_BOILERPLATE(__NAME) \
73static inline int __NAME(struct pt_regs *regs) \
74{ \
75 if (unlikely(__ ## __NAME)) \
76 return __ ## __NAME(regs); \
77 return 0; \
78}
79
80DEBUGGER_BOILERPLATE(debugger)
81DEBUGGER_BOILERPLATE(debugger_ipi)
82DEBUGGER_BOILERPLATE(debugger_bpt)
83DEBUGGER_BOILERPLATE(debugger_sstep)
84DEBUGGER_BOILERPLATE(debugger_iabr_match)
85DEBUGGER_BOILERPLATE(debugger_dabr_match)
86DEBUGGER_BOILERPLATE(debugger_fault_handler)
87
88#ifdef CONFIG_XMON
89extern void xmon_init(int enable);
90#endif
91
92#else
93static inline int debugger(struct pt_regs *regs) { return 0; }
94static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
95static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
96static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
97static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
98static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
99static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
100#endif
101
102extern int set_dabr(unsigned long dabr);
103extern void print_backtrace(unsigned long *);
104extern void show_regs(struct pt_regs * regs);
105extern void flush_instruction_cache(void);
106extern void hard_reset_now(void);
107extern void poweroff_now(void);
108
109#ifdef CONFIG_6xx
110extern long _get_L2CR(void);
111extern long _get_L3CR(void);
112extern void _set_L2CR(unsigned long);
113extern void _set_L3CR(unsigned long);
114#else
115#define _get_L2CR() 0L
116#define _get_L3CR() 0L
117#define _set_L2CR(val) do { } while(0)
118#define _set_L3CR(val) do { } while(0)
119#endif
120
121extern void via_cuda_init(void);
122extern void read_rtc_time(void);
123extern void pmac_find_display(void);
124extern void giveup_fpu(struct task_struct *);
125extern void disable_kernel_fp(void);
126extern void enable_kernel_fp(void);
127extern void flush_fp_to_thread(struct task_struct *);
128extern void enable_kernel_altivec(void);
129extern void giveup_altivec(struct task_struct *);
130extern void load_up_altivec(struct task_struct *);
131extern int emulate_altivec(struct pt_regs *);
132extern void giveup_spe(struct task_struct *);
133extern void load_up_spe(struct task_struct *);
134extern int fix_alignment(struct pt_regs *);
135extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
136extern void cvt_df(double *from, float *to, struct thread_struct *thread);
137
138#ifdef CONFIG_ALTIVEC
139extern void flush_altivec_to_thread(struct task_struct *);
140#else
141static inline void flush_altivec_to_thread(struct task_struct *t)
142{
143}
144#endif
145
146#ifdef CONFIG_SPE
147extern void flush_spe_to_thread(struct task_struct *);
148#else
149static inline void flush_spe_to_thread(struct task_struct *t)
150{
151}
152#endif
153
154extern int call_rtas(const char *, int, int, unsigned long *, ...);
155extern void cacheable_memzero(void *p, unsigned int nb);
156extern void *cacheable_memcpy(void *, const void *, unsigned int);
157extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
158extern void bad_page_fault(struct pt_regs *, unsigned long, int);
159extern int die(const char *, struct pt_regs *, long);
160extern void _exception(int, struct pt_regs *, int, unsigned long);
161#ifdef CONFIG_BOOKE_WDT
162extern u32 booke_wdt_enabled;
163extern u32 booke_wdt_period;
164#endif /* CONFIG_BOOKE_WDT */
165
166/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
167extern unsigned char e2a(unsigned char);
168
169struct device_node;
170extern void note_scsi_host(struct device_node *, void *);
171
172extern struct task_struct *__switch_to(struct task_struct *,
173 struct task_struct *);
174#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
175
176struct thread_struct;
177extern struct task_struct *_switch(struct thread_struct *prev,
178 struct thread_struct *next);
179
180extern unsigned int rtas_data;
181extern int mem_init_done; /* set on boot once kmalloc can be called */
182extern unsigned long memory_limit;
183
184extern int powersave_nap; /* set if nap mode can be used in idle loop */
185
186/*
187 * Atomic exchange
188 *
189 * Changes the memory location '*ptr' to be val and returns
190 * the previous value stored there.
191 */
192static __inline__ unsigned long
193__xchg_u32(volatile void *p, unsigned long val)
194{
195 unsigned long prev;
196
197 __asm__ __volatile__(
198 EIEIO_ON_SMP
199"1: lwarx %0,0,%2 \n"
200 PPC405_ERR77(0,%2)
201" stwcx. %3,0,%2 \n\
202 bne- 1b"
203 ISYNC_ON_SMP
204 : "=&r" (prev), "=m" (*(volatile unsigned int *)p)
205 : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
206 : "cc", "memory");
207
208 return prev;
209}
210
211#ifdef CONFIG_PPC64
212static __inline__ unsigned long
213__xchg_u64(volatile void *p, unsigned long val)
214{
215 unsigned long prev;
216
217 __asm__ __volatile__(
218 EIEIO_ON_SMP
219"1: ldarx %0,0,%2 \n"
220 PPC405_ERR77(0,%2)
221" stdcx. %3,0,%2 \n\
222 bne- 1b"
223 ISYNC_ON_SMP
224 : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
225 : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
226 : "cc", "memory");
227
228 return prev;
229}
230#endif
231
232/*
233 * This function doesn't exist, so you'll get a linker error
234 * if something tries to do an invalid xchg().
235 */
236extern void __xchg_called_with_bad_pointer(void);
237
238static __inline__ unsigned long
239__xchg(volatile void *ptr, unsigned long x, unsigned int size)
240{
241 switch (size) {
242 case 4:
243 return __xchg_u32(ptr, x);
244#ifdef CONFIG_PPC64
245 case 8:
246 return __xchg_u64(ptr, x);
247#endif
248 }
249 __xchg_called_with_bad_pointer();
250 return x;
251}
252
253#define xchg(ptr,x) \
254 ({ \
255 __typeof__(*(ptr)) _x_ = (x); \
256 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
257 })
258
259#define tas(ptr) (xchg((ptr),1))
260
261/*
262 * Compare and exchange - if *p == old, set it to new,
263 * and return the old value of *p.
264 */
265#define __HAVE_ARCH_CMPXCHG 1
266
267static __inline__ unsigned long
268__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
269{
270 unsigned int prev;
271
272 __asm__ __volatile__ (
273 EIEIO_ON_SMP
274"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
275 cmpw 0,%0,%3\n\
276 bne- 2f\n"
277 PPC405_ERR77(0,%2)
278" stwcx. %4,0,%2\n\
279 bne- 1b"
280 ISYNC_ON_SMP
281 "\n\
2822:"
283 : "=&r" (prev), "=m" (*p)
284 : "r" (p), "r" (old), "r" (new), "m" (*p)
285 : "cc", "memory");
286
287 return prev;
288}
289
290#ifdef CONFIG_PPC64
291static __inline__ unsigned long
292__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
293{
294 unsigned long prev;
295
296 __asm__ __volatile__ (
297 EIEIO_ON_SMP
298"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
299 cmpd 0,%0,%3\n\
300 bne- 2f\n\
301 stdcx. %4,0,%2\n\
302 bne- 1b"
303 ISYNC_ON_SMP
304 "\n\
3052:"
306 : "=&r" (prev), "=m" (*p)
307 : "r" (p), "r" (old), "r" (new), "m" (*p)
308 : "cc", "memory");
309
310 return prev;
311}
312#endif
313
314/* This function doesn't exist, so you'll get a linker error
315 if something tries to do an invalid cmpxchg(). */
316extern void __cmpxchg_called_with_bad_pointer(void);
317
318static __inline__ unsigned long
319__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
320 unsigned int size)
321{
322 switch (size) {
323 case 4:
324 return __cmpxchg_u32(ptr, old, new);
325#ifdef CONFIG_PPC64
326 case 8:
327 return __cmpxchg_u64(ptr, old, new);
328#endif
329 }
330 __cmpxchg_called_with_bad_pointer();
331 return old;
332}
333
334#define cmpxchg(ptr,o,n) \
335 ({ \
336 __typeof__(*(ptr)) _o_ = (o); \
337 __typeof__(*(ptr)) _n_ = (n); \
338 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
339 (unsigned long)_n_, sizeof(*(ptr))); \
340 })
341
342#ifdef CONFIG_PPC64
343/*
344 * We handle most unaligned accesses in hardware. On the other hand
345 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
346 * powers of 2 writes until it reaches sufficient alignment).
347 *
348 * Based on this we disable the IP header alignment in network drivers.
349 */
350#define NET_IP_ALIGN 0
351#endif
352
353#define arch_align_stack(x) (x)
354
355/* Used in very early kernel initialization. */
356extern unsigned long reloc_offset(void);
357extern unsigned long add_reloc_offset(unsigned long);
358extern void reloc_got2(unsigned long);
359
360#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
361
362#endif /* __KERNEL__ */
363#endif /* _ASM_POWERPC_SYSTEM_H */