aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/system.h
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2012-03-28 13:30:02 -0400
committerDavid Howells <dhowells@redhat.com>2012-03-28 13:30:02 -0400
commitae3a197e3d0bfe3f4bf1693723e82dc018c096f3 (patch)
tree12a222c01afd73dbc3ebb6859952083e2eb96441 /arch/powerpc/include/asm/system.h
parent527dcdccd60759ee38e6224c93f87a6194d970ad (diff)
Disintegrate asm/system.h for PowerPC
Disintegrate asm/system.h for PowerPC. Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> cc: linuxppc-dev@lists.ozlabs.org
Diffstat (limited to 'arch/powerpc/include/asm/system.h')
-rw-r--r--arch/powerpc/include/asm/system.h598
1 files changed, 6 insertions, 592 deletions
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index a02883d5af43..502c1e0275af 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -1,592 +1,6 @@
1/* 1/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 2#include <asm/barrier.h>
3 */ 3#include <asm/cmpxchg.h>
4#ifndef _ASM_POWERPC_SYSTEM_H 4#include <asm/debug.h>
5#define _ASM_POWERPC_SYSTEM_H 5#include <asm/exec.h>
6 6#include <asm/switch_to.h>
7#include <linux/kernel.h>
8#include <linux/irqflags.h>
9
10#include <asm/hw_irq.h>
11
12/*
13 * Memory barrier.
14 * The sync instruction guarantees that all memory accesses initiated
15 * by this processor have been performed (with respect to all other
16 * mechanisms that access memory). The eieio instruction is a barrier
17 * providing an ordering (separately) for (a) cacheable stores and (b)
18 * loads and stores to non-cacheable memory (e.g. I/O devices).
19 *
20 * mb() prevents loads and stores being reordered across this point.
21 * rmb() prevents loads being reordered across this point.
22 * wmb() prevents stores being reordered across this point.
23 * read_barrier_depends() prevents data-dependent loads being reordered
24 * across this point (nop on PPC).
25 *
26 * *mb() variants without smp_ prefix must order all types of memory
27 * operations with one another. sync is the only instruction sufficient
28 * to do this.
29 *
30 * For the smp_ barriers, ordering is for cacheable memory operations
31 * only. We have to use the sync instruction for smp_mb(), since lwsync
32 * doesn't order loads with respect to previous stores. Lwsync can be
33 * used for smp_rmb() and smp_wmb().
34 *
35 * However, on CPUs that don't support lwsync, lwsync actually maps to a
36 * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
37 */
38#define mb() __asm__ __volatile__ ("sync" : : : "memory")
39#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
40#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
41#define read_barrier_depends() do { } while(0)
42
43#define set_mb(var, value) do { var = value; mb(); } while (0)
44
45#ifdef __KERNEL__
46#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */
47#ifdef CONFIG_SMP
48
49#ifdef __SUBARCH_HAS_LWSYNC
50# define SMPWMB LWSYNC
51#else
52# define SMPWMB eieio
53#endif
54
55#define smp_mb() mb()
56#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
57#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
58#define smp_read_barrier_depends() read_barrier_depends()
59#else
60#define smp_mb() barrier()
61#define smp_rmb() barrier()
62#define smp_wmb() barrier()
63#define smp_read_barrier_depends() do { } while(0)
64#endif /* CONFIG_SMP */
65
66/*
67 * This is a barrier which prevents following instructions from being
68 * started until the value of the argument x is known. For example, if
69 * x is a variable loaded from memory, this prevents following
70 * instructions from being executed until the load has been performed.
71 */
72#define data_barrier(x) \
73 asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
74
75struct task_struct;
76struct pt_regs;
77
78#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
79
80extern int (*__debugger)(struct pt_regs *regs);
81extern int (*__debugger_ipi)(struct pt_regs *regs);
82extern int (*__debugger_bpt)(struct pt_regs *regs);
83extern int (*__debugger_sstep)(struct pt_regs *regs);
84extern int (*__debugger_iabr_match)(struct pt_regs *regs);
85extern int (*__debugger_dabr_match)(struct pt_regs *regs);
86extern int (*__debugger_fault_handler)(struct pt_regs *regs);
87
88#define DEBUGGER_BOILERPLATE(__NAME) \
89static inline int __NAME(struct pt_regs *regs) \
90{ \
91 if (unlikely(__ ## __NAME)) \
92 return __ ## __NAME(regs); \
93 return 0; \
94}
95
96DEBUGGER_BOILERPLATE(debugger)
97DEBUGGER_BOILERPLATE(debugger_ipi)
98DEBUGGER_BOILERPLATE(debugger_bpt)
99DEBUGGER_BOILERPLATE(debugger_sstep)
100DEBUGGER_BOILERPLATE(debugger_iabr_match)
101DEBUGGER_BOILERPLATE(debugger_dabr_match)
102DEBUGGER_BOILERPLATE(debugger_fault_handler)
103
104#else
105static inline int debugger(struct pt_regs *regs) { return 0; }
106static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
107static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
108static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
109static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
110static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
111static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
112#endif
113
114extern int set_dabr(unsigned long dabr);
115#ifdef CONFIG_PPC_ADV_DEBUG_REGS
116extern void do_send_trap(struct pt_regs *regs, unsigned long address,
117 unsigned long error_code, int signal_code, int brkpt);
118#else
119extern void do_dabr(struct pt_regs *regs, unsigned long address,
120 unsigned long error_code);
121#endif
122extern void print_backtrace(unsigned long *);
123extern void flush_instruction_cache(void);
124extern void hard_reset_now(void);
125extern void poweroff_now(void);
126
127#ifdef CONFIG_6xx
128extern long _get_L2CR(void);
129extern long _get_L3CR(void);
130extern void _set_L2CR(unsigned long);
131extern void _set_L3CR(unsigned long);
132#else
133#define _get_L2CR() 0L
134#define _get_L3CR() 0L
135#define _set_L2CR(val) do { } while(0)
136#define _set_L3CR(val) do { } while(0)
137#endif
138
139extern void via_cuda_init(void);
140extern void read_rtc_time(void);
141extern void pmac_find_display(void);
142extern void giveup_fpu(struct task_struct *);
143extern void disable_kernel_fp(void);
144extern void enable_kernel_fp(void);
145extern void flush_fp_to_thread(struct task_struct *);
146extern void enable_kernel_altivec(void);
147extern void giveup_altivec(struct task_struct *);
148extern void load_up_altivec(struct task_struct *);
149extern int emulate_altivec(struct pt_regs *);
150extern void __giveup_vsx(struct task_struct *);
151extern void giveup_vsx(struct task_struct *);
152extern void enable_kernel_spe(void);
153extern void giveup_spe(struct task_struct *);
154extern void load_up_spe(struct task_struct *);
155extern int fix_alignment(struct pt_regs *);
156extern void cvt_fd(float *from, double *to);
157extern void cvt_df(double *from, float *to);
158
159#ifndef CONFIG_SMP
160extern void discard_lazy_cpu_state(void);
161#else
162static inline void discard_lazy_cpu_state(void)
163{
164}
165#endif
166
167#ifdef CONFIG_ALTIVEC
168extern void flush_altivec_to_thread(struct task_struct *);
169#else
170static inline void flush_altivec_to_thread(struct task_struct *t)
171{
172}
173#endif
174
175#ifdef CONFIG_VSX
176extern void flush_vsx_to_thread(struct task_struct *);
177#else
178static inline void flush_vsx_to_thread(struct task_struct *t)
179{
180}
181#endif
182
183#ifdef CONFIG_SPE
184extern void flush_spe_to_thread(struct task_struct *);
185#else
186static inline void flush_spe_to_thread(struct task_struct *t)
187{
188}
189#endif
190
191extern int call_rtas(const char *, int, int, unsigned long *, ...);
192extern void cacheable_memzero(void *p, unsigned int nb);
193extern void *cacheable_memcpy(void *, const void *, unsigned int);
194extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
195extern void bad_page_fault(struct pt_regs *, unsigned long, int);
196extern void _exception(int, struct pt_regs *, int, unsigned long);
197extern void die(const char *, struct pt_regs *, long);
198extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
199
200#ifdef CONFIG_BOOKE_WDT
201extern u32 booke_wdt_enabled;
202extern u32 booke_wdt_period;
203#endif /* CONFIG_BOOKE_WDT */
204
205struct device_node;
206extern void note_scsi_host(struct device_node *, void *);
207
208extern struct task_struct *__switch_to(struct task_struct *,
209 struct task_struct *);
210#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
211
212struct thread_struct;
213extern struct task_struct *_switch(struct thread_struct *prev,
214 struct thread_struct *next);
215
216extern unsigned int rtas_data;
217extern int mem_init_done; /* set on boot once kmalloc can be called */
218extern int init_bootmem_done; /* set once bootmem is available */
219extern phys_addr_t memory_limit;
220extern unsigned long klimit;
221extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
222
223extern int powersave_nap; /* set if nap mode can be used in idle loop */
224void cpu_idle_wait(void);
225
226#ifdef CONFIG_PSERIES_IDLE
227extern void update_smt_snooze_delay(int snooze);
228extern int pseries_notify_cpuidle_add_cpu(int cpu);
229#else
230static inline void update_smt_snooze_delay(int snooze) {}
231static inline int pseries_notify_cpuidle_add_cpu(int cpu) { return 0; }
232#endif
233
234/*
235 * Atomic exchange
236 *
237 * Changes the memory location '*ptr' to be val and returns
238 * the previous value stored there.
239 */
240static __always_inline unsigned long
241__xchg_u32(volatile void *p, unsigned long val)
242{
243 unsigned long prev;
244
245 __asm__ __volatile__(
246 PPC_RELEASE_BARRIER
247"1: lwarx %0,0,%2 \n"
248 PPC405_ERR77(0,%2)
249" stwcx. %3,0,%2 \n\
250 bne- 1b"
251 PPC_ACQUIRE_BARRIER
252 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
253 : "r" (p), "r" (val)
254 : "cc", "memory");
255
256 return prev;
257}
258
259/*
260 * Atomic exchange
261 *
262 * Changes the memory location '*ptr' to be val and returns
263 * the previous value stored there.
264 */
265static __always_inline unsigned long
266__xchg_u32_local(volatile void *p, unsigned long val)
267{
268 unsigned long prev;
269
270 __asm__ __volatile__(
271"1: lwarx %0,0,%2 \n"
272 PPC405_ERR77(0,%2)
273" stwcx. %3,0,%2 \n\
274 bne- 1b"
275 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
276 : "r" (p), "r" (val)
277 : "cc", "memory");
278
279 return prev;
280}
281
282#ifdef CONFIG_PPC64
283static __always_inline unsigned long
284__xchg_u64(volatile void *p, unsigned long val)
285{
286 unsigned long prev;
287
288 __asm__ __volatile__(
289 PPC_RELEASE_BARRIER
290"1: ldarx %0,0,%2 \n"
291 PPC405_ERR77(0,%2)
292" stdcx. %3,0,%2 \n\
293 bne- 1b"
294 PPC_ACQUIRE_BARRIER
295 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
296 : "r" (p), "r" (val)
297 : "cc", "memory");
298
299 return prev;
300}
301
302static __always_inline unsigned long
303__xchg_u64_local(volatile void *p, unsigned long val)
304{
305 unsigned long prev;
306
307 __asm__ __volatile__(
308"1: ldarx %0,0,%2 \n"
309 PPC405_ERR77(0,%2)
310" stdcx. %3,0,%2 \n\
311 bne- 1b"
312 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
313 : "r" (p), "r" (val)
314 : "cc", "memory");
315
316 return prev;
317}
318#endif
319
320/*
321 * This function doesn't exist, so you'll get a linker error
322 * if something tries to do an invalid xchg().
323 */
324extern void __xchg_called_with_bad_pointer(void);
325
326static __always_inline unsigned long
327__xchg(volatile void *ptr, unsigned long x, unsigned int size)
328{
329 switch (size) {
330 case 4:
331 return __xchg_u32(ptr, x);
332#ifdef CONFIG_PPC64
333 case 8:
334 return __xchg_u64(ptr, x);
335#endif
336 }
337 __xchg_called_with_bad_pointer();
338 return x;
339}
340
341static __always_inline unsigned long
342__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
343{
344 switch (size) {
345 case 4:
346 return __xchg_u32_local(ptr, x);
347#ifdef CONFIG_PPC64
348 case 8:
349 return __xchg_u64_local(ptr, x);
350#endif
351 }
352 __xchg_called_with_bad_pointer();
353 return x;
354}
355#define xchg(ptr,x) \
356 ({ \
357 __typeof__(*(ptr)) _x_ = (x); \
358 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
359 })
360
361#define xchg_local(ptr,x) \
362 ({ \
363 __typeof__(*(ptr)) _x_ = (x); \
364 (__typeof__(*(ptr))) __xchg_local((ptr), \
365 (unsigned long)_x_, sizeof(*(ptr))); \
366 })
367
368/*
369 * Compare and exchange - if *p == old, set it to new,
370 * and return the old value of *p.
371 */
372#define __HAVE_ARCH_CMPXCHG 1
373
374static __always_inline unsigned long
375__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
376{
377 unsigned int prev;
378
379 __asm__ __volatile__ (
380 PPC_RELEASE_BARRIER
381"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
382 cmpw 0,%0,%3\n\
383 bne- 2f\n"
384 PPC405_ERR77(0,%2)
385" stwcx. %4,0,%2\n\
386 bne- 1b"
387 PPC_ACQUIRE_BARRIER
388 "\n\
3892:"
390 : "=&r" (prev), "+m" (*p)
391 : "r" (p), "r" (old), "r" (new)
392 : "cc", "memory");
393
394 return prev;
395}
396
397static __always_inline unsigned long
398__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
399 unsigned long new)
400{
401 unsigned int prev;
402
403 __asm__ __volatile__ (
404"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
405 cmpw 0,%0,%3\n\
406 bne- 2f\n"
407 PPC405_ERR77(0,%2)
408" stwcx. %4,0,%2\n\
409 bne- 1b"
410 "\n\
4112:"
412 : "=&r" (prev), "+m" (*p)
413 : "r" (p), "r" (old), "r" (new)
414 : "cc", "memory");
415
416 return prev;
417}
418
419#ifdef CONFIG_PPC64
420static __always_inline unsigned long
421__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
422{
423 unsigned long prev;
424
425 __asm__ __volatile__ (
426 PPC_RELEASE_BARRIER
427"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
428 cmpd 0,%0,%3\n\
429 bne- 2f\n\
430 stdcx. %4,0,%2\n\
431 bne- 1b"
432 PPC_ACQUIRE_BARRIER
433 "\n\
4342:"
435 : "=&r" (prev), "+m" (*p)
436 : "r" (p), "r" (old), "r" (new)
437 : "cc", "memory");
438
439 return prev;
440}
441
442static __always_inline unsigned long
443__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
444 unsigned long new)
445{
446 unsigned long prev;
447
448 __asm__ __volatile__ (
449"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
450 cmpd 0,%0,%3\n\
451 bne- 2f\n\
452 stdcx. %4,0,%2\n\
453 bne- 1b"
454 "\n\
4552:"
456 : "=&r" (prev), "+m" (*p)
457 : "r" (p), "r" (old), "r" (new)
458 : "cc", "memory");
459
460 return prev;
461}
462#endif
463
464/* This function doesn't exist, so you'll get a linker error
465 if something tries to do an invalid cmpxchg(). */
466extern void __cmpxchg_called_with_bad_pointer(void);
467
468static __always_inline unsigned long
469__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
470 unsigned int size)
471{
472 switch (size) {
473 case 4:
474 return __cmpxchg_u32(ptr, old, new);
475#ifdef CONFIG_PPC64
476 case 8:
477 return __cmpxchg_u64(ptr, old, new);
478#endif
479 }
480 __cmpxchg_called_with_bad_pointer();
481 return old;
482}
483
484static __always_inline unsigned long
485__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
486 unsigned int size)
487{
488 switch (size) {
489 case 4:
490 return __cmpxchg_u32_local(ptr, old, new);
491#ifdef CONFIG_PPC64
492 case 8:
493 return __cmpxchg_u64_local(ptr, old, new);
494#endif
495 }
496 __cmpxchg_called_with_bad_pointer();
497 return old;
498}
499
500#define cmpxchg(ptr, o, n) \
501 ({ \
502 __typeof__(*(ptr)) _o_ = (o); \
503 __typeof__(*(ptr)) _n_ = (n); \
504 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
505 (unsigned long)_n_, sizeof(*(ptr))); \
506 })
507
508
509#define cmpxchg_local(ptr, o, n) \
510 ({ \
511 __typeof__(*(ptr)) _o_ = (o); \
512 __typeof__(*(ptr)) _n_ = (n); \
513 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
514 (unsigned long)_n_, sizeof(*(ptr))); \
515 })
516
517#ifdef CONFIG_PPC64
518/*
519 * We handle most unaligned accesses in hardware. On the other hand
520 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
521 * powers of 2 writes until it reaches sufficient alignment).
522 *
523 * Based on this we disable the IP header alignment in network drivers.
524 */
525#define NET_IP_ALIGN 0
526
527#define cmpxchg64(ptr, o, n) \
528 ({ \
529 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
530 cmpxchg((ptr), (o), (n)); \
531 })
532#define cmpxchg64_local(ptr, o, n) \
533 ({ \
534 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
535 cmpxchg_local((ptr), (o), (n)); \
536 })
537#else
538#include <asm-generic/cmpxchg-local.h>
539#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
540#endif
541
542extern unsigned long arch_align_stack(unsigned long sp);
543
544/* Used in very early kernel initialization. */
545extern unsigned long reloc_offset(void);
546extern unsigned long add_reloc_offset(unsigned long);
547extern void reloc_got2(unsigned long);
548
549#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
550
551extern struct dentry *powerpc_debugfs_root;
552
553#ifdef CONFIG_PPC64
554
555extern void __ppc64_runlatch_on(void);
556extern void __ppc64_runlatch_off(void);
557
558/*
559 * We manually hard enable-disable, this is called
560 * in the idle loop and we don't want to mess up
561 * with soft-disable/enable & interrupt replay.
562 */
563#define ppc64_runlatch_off() \
564 do { \
565 if (cpu_has_feature(CPU_FTR_CTRL) && \
566 test_thread_local_flags(_TLF_RUNLATCH)) { \
567 unsigned long msr = mfmsr(); \
568 __hard_irq_disable(); \
569 __ppc64_runlatch_off(); \
570 if (msr & MSR_EE) \
571 __hard_irq_enable(); \
572 } \
573 } while (0)
574
575#define ppc64_runlatch_on() \
576 do { \
577 if (cpu_has_feature(CPU_FTR_CTRL) && \
578 !test_thread_local_flags(_TLF_RUNLATCH)) { \
579 unsigned long msr = mfmsr(); \
580 __hard_irq_disable(); \
581 __ppc64_runlatch_on(); \
582 if (msr & MSR_EE) \
583 __hard_irq_enable(); \
584 } \
585 } while (0)
586#else
587#define ppc64_runlatch_on()
588#define ppc64_runlatch_off()
589#endif /* CONFIG_PPC64 */
590
591#endif /* __KERNEL__ */
592#endif /* _ASM_POWERPC_SYSTEM_H */