aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ppc/system.h')
-rw-r--r--include/asm-ppc/system.h289
1 files changed, 0 insertions, 289 deletions
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
deleted file mode 100644
index 70ebd333c55b..000000000000
--- a/include/asm-ppc/system.h
+++ /dev/null
@@ -1,289 +0,0 @@
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef __PPC_SYSTEM_H
5#define __PPC_SYSTEM_H
6
7#include <linux/kernel.h>
8
9#include <asm/hw_irq.h>
10
11/*
12 * Memory barrier.
13 * The sync instruction guarantees that all memory accesses initiated
14 * by this processor have been performed (with respect to all other
15 * mechanisms that access memory). The eieio instruction is a barrier
16 * providing an ordering (separately) for (a) cacheable stores and (b)
17 * loads and stores to non-cacheable memory (e.g. I/O devices).
18 *
19 * mb() prevents loads and stores being reordered across this point.
20 * rmb() prevents loads being reordered across this point.
21 * wmb() prevents stores being reordered across this point.
22 * read_barrier_depends() prevents data-dependent loads being reordered
23 * across this point (nop on PPC).
24 *
25 * We can use the eieio instruction for wmb, but since it doesn't
26 * give any ordering guarantees about loads, we have to use the
27 * stronger but slower sync instruction for mb and rmb.
28 */
29#define mb() __asm__ __volatile__ ("sync" : : : "memory")
30#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
31#define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
32#define read_barrier_depends() do { } while(0)
33
34#define set_mb(var, value) do { var = value; mb(); } while (0)
35
36#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */
37#ifdef CONFIG_SMP
38#define smp_mb() mb()
39#define smp_rmb() rmb()
40#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
41#define smp_read_barrier_depends() read_barrier_depends()
42#else
43#define smp_mb() barrier()
44#define smp_rmb() barrier()
45#define smp_wmb() barrier()
46#define smp_read_barrier_depends() do { } while(0)
47#endif /* CONFIG_SMP */
48
49#ifdef __KERNEL__
50struct task_struct;
51struct pt_regs;
52
53extern void print_backtrace(unsigned long *);
54extern void show_regs(struct pt_regs * regs);
55extern void flush_instruction_cache(void);
56extern void hard_reset_now(void);
57extern void poweroff_now(void);
58extern int set_dabr(unsigned long dabr);
59#ifdef CONFIG_6xx
60extern long _get_L2CR(void);
61extern long _get_L3CR(void);
62extern void _set_L2CR(unsigned long);
63extern void _set_L3CR(unsigned long);
64#else
65#define _get_L2CR() 0L
66#define _get_L3CR() 0L
67#define _set_L2CR(val) do { } while(0)
68#define _set_L3CR(val) do { } while(0)
69#endif
70extern void via_cuda_init(void);
71extern void pmac_nvram_init(void);
72extern void chrp_nvram_init(void);
73extern void read_rtc_time(void);
74extern void pmac_find_display(void);
75extern void giveup_fpu(struct task_struct *);
76extern void disable_kernel_fp(void);
77extern void enable_kernel_fp(void);
78extern void flush_fp_to_thread(struct task_struct *);
79extern void enable_kernel_altivec(void);
80extern void giveup_altivec(struct task_struct *);
81extern void load_up_altivec(struct task_struct *);
82extern int emulate_altivec(struct pt_regs *);
83extern void giveup_spe(struct task_struct *);
84extern void load_up_spe(struct task_struct *);
85extern int fix_alignment(struct pt_regs *);
86extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
87extern void cvt_df(double *from, float *to, struct thread_struct *thread);
88
89#ifndef CONFIG_SMP
90extern void discard_lazy_cpu_state(void);
91#else
92static inline void discard_lazy_cpu_state(void)
93{
94}
95#endif
96
97#ifdef CONFIG_ALTIVEC
98extern void flush_altivec_to_thread(struct task_struct *);
99#else
100static inline void flush_altivec_to_thread(struct task_struct *t)
101{
102}
103#endif
104
105#ifdef CONFIG_SPE
106extern void flush_spe_to_thread(struct task_struct *);
107#else
108static inline void flush_spe_to_thread(struct task_struct *t)
109{
110}
111#endif
112
113extern int call_rtas(const char *, int, int, unsigned long *, ...);
114extern void cacheable_memzero(void *p, unsigned int nb);
115extern void *cacheable_memcpy(void *, const void *, unsigned int);
116extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
117extern void bad_page_fault(struct pt_regs *, unsigned long, int);
118extern int die(const char *, struct pt_regs *, long);
119extern void _exception(int, struct pt_regs *, int, unsigned long);
120void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
121
122#ifdef CONFIG_BOOKE_WDT
123extern u32 booke_wdt_enabled;
124extern u32 booke_wdt_period;
125#endif /* CONFIG_BOOKE_WDT */
126
127struct device_node;
128extern void note_scsi_host(struct device_node *, void *);
129
130extern struct task_struct *__switch_to(struct task_struct *,
131 struct task_struct *);
132#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
133
134struct thread_struct;
135extern struct task_struct *_switch(struct thread_struct *prev,
136 struct thread_struct *next);
137
138extern unsigned int rtas_data;
139
140static __inline__ unsigned long
141xchg_u32(volatile void *p, unsigned long val)
142{
143 unsigned long prev;
144
145 __asm__ __volatile__ ("\n\
1461: lwarx %0,0,%2 \n"
147 PPC405_ERR77(0,%2)
148" stwcx. %3,0,%2 \n\
149 bne- 1b"
150 : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
151 : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
152 : "cc", "memory");
153
154 return prev;
155}
156
157/*
158 * This function doesn't exist, so you'll get a linker error
159 * if something tries to do an invalid xchg().
160 */
161extern void __xchg_called_with_bad_pointer(void);
162
163#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
164
165static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
166{
167 switch (size) {
168 case 4:
169 return (unsigned long) xchg_u32(ptr, x);
170#if 0 /* xchg_u64 doesn't exist on 32-bit PPC */
171 case 8:
172 return (unsigned long) xchg_u64(ptr, x);
173#endif /* 0 */
174 }
175 __xchg_called_with_bad_pointer();
176 return x;
177
178
179}
180
181static inline void * xchg_ptr(void * m, void * val)
182{
183 return (void *) xchg_u32(m, (unsigned long) val);
184}
185
186
187#define __HAVE_ARCH_CMPXCHG 1
188
189static __inline__ unsigned long
190__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
191{
192 unsigned int prev;
193
194 __asm__ __volatile__ ("\n\
1951: lwarx %0,0,%2 \n\
196 cmpw 0,%0,%3 \n\
197 bne 2f \n"
198 PPC405_ERR77(0,%2)
199" stwcx. %4,0,%2 \n\
200 bne- 1b\n"
201#ifdef CONFIG_SMP
202" sync\n"
203#endif /* CONFIG_SMP */
204"2:"
205 : "=&r" (prev), "=m" (*p)
206 : "r" (p), "r" (old), "r" (new), "m" (*p)
207 : "cc", "memory");
208
209 return prev;
210}
211
212static inline unsigned long
213__cmpxchg_u32_local(volatile unsigned int *p, unsigned int old,
214 unsigned int new)
215{
216 unsigned int prev;
217
218 __asm__ __volatile__ ("\n\
2191: lwarx %0,0,%2 \n\
220 cmpw 0,%0,%3 \n\
221 bne 2f \n"
222 PPC405_ERR77(0,%2)
223" stwcx. %4,0,%2 \n\
224 bne- 1b\n"
225"2:"
226 : "=&r" (prev), "=m" (*p)
227 : "r" (p), "r" (old), "r" (new), "m" (*p)
228 : "cc", "memory");
229
230 return prev;
231}
232
233/* This function doesn't exist, so you'll get a linker error
234 if something tries to do an invalid cmpxchg(). */
235extern void __cmpxchg_called_with_bad_pointer(void);
236
237static __inline__ unsigned long
238__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
239 unsigned int size)
240{
241 switch (size) {
242 case 4:
243 return __cmpxchg_u32(ptr, old, new);
244#if 0 /* we don't have __cmpxchg_u64 on 32-bit PPC */
245 case 8:
246 return __cmpxchg_u64(ptr, old, new);
247#endif /* 0 */
248 }
249 __cmpxchg_called_with_bad_pointer();
250 return old;
251}
252
253#define cmpxchg(ptr, o, n) \
254 ({ \
255 __typeof__(*(ptr)) _o_ = (o); \
256 __typeof__(*(ptr)) _n_ = (n); \
257 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
258 (unsigned long)_n_, sizeof(*(ptr))); \
259 })
260
261#include <asm-generic/cmpxchg-local.h>
262
263static inline unsigned long __cmpxchg_local(volatile void *ptr,
264 unsigned long old,
265 unsigned long new, int size)
266{
267 switch (size) {
268 case 4:
269 return __cmpxchg_u32_local(ptr, old, new);
270 default:
271 return __cmpxchg_local_generic(ptr, old, new, size);
272 }
273
274 return old;
275}
276
277/*
278 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
279 * them available.
280 */
281#define cmpxchg_local(ptr, o, n) \
282 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
283 (unsigned long)(n), sizeof(*(ptr))))
284#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
285
286#define arch_align_stack(x) (x)
287
288#endif /* __KERNEL__ */
289#endif /* __PPC_SYSTEM_H */