diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2008-08-01 01:20:30 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-08-03 22:02:00 -0400 |
commit | b8b572e1015f81b4e748417be2629dfe51ab99f9 (patch) | |
tree | 7df58667d5ed71d6c8f8f4ce40ca16b6fb776d0b /arch/powerpc/include/asm/system.h | |
parent | 2b12a4c524812fb3f6ee590a02e65b95c8c32229 (diff) |
powerpc: Move include files to arch/powerpc/include/asm
from include/asm-powerpc. This is the result of a
mkdir arch/powerpc/include/asm
git mv include/asm-powerpc/* arch/powerpc/include/asm
Followed by a few documentation/comment fixups and a couple of places
where <asm-powepc/...> was being used explicitly. Of the latter only
one was outside the arch code and it is a driver only built for powerpc.
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/include/asm/system.h')
-rw-r--r-- | arch/powerpc/include/asm/system.h | 548 |
1 files changed, 548 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h new file mode 100644 index 000000000000..d6648c143322 --- /dev/null +++ b/arch/powerpc/include/asm/system.h | |||
@@ -0,0 +1,548 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
3 | */ | ||
4 | #ifndef _ASM_POWERPC_SYSTEM_H | ||
5 | #define _ASM_POWERPC_SYSTEM_H | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/irqflags.h> | ||
9 | |||
10 | #include <asm/hw_irq.h> | ||
11 | |||
12 | /* | ||
13 | * Memory barrier. | ||
14 | * The sync instruction guarantees that all memory accesses initiated | ||
15 | * by this processor have been performed (with respect to all other | ||
16 | * mechanisms that access memory). The eieio instruction is a barrier | ||
17 | * providing an ordering (separately) for (a) cacheable stores and (b) | ||
18 | * loads and stores to non-cacheable memory (e.g. I/O devices). | ||
19 | * | ||
20 | * mb() prevents loads and stores being reordered across this point. | ||
21 | * rmb() prevents loads being reordered across this point. | ||
22 | * wmb() prevents stores being reordered across this point. | ||
23 | * read_barrier_depends() prevents data-dependent loads being reordered | ||
24 | * across this point (nop on PPC). | ||
25 | * | ||
26 | * We have to use the sync instructions for mb(), since lwsync doesn't | ||
27 | * order loads with respect to previous stores. Lwsync is fine for | ||
28 | * rmb(), though. Note that rmb() actually uses a sync on 32-bit | ||
29 | * architectures. | ||
30 | * | ||
31 | * For wmb(), we use sync since wmb is used in drivers to order | ||
32 | * stores to system memory with respect to writes to the device. | ||
33 | * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier | ||
34 | * on SMP since it is only used to order updates to system memory. | ||
35 | */ | ||
36 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | ||
37 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") | ||
38 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") | ||
39 | #define read_barrier_depends() do { } while(0) | ||
40 | |||
41 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
42 | |||
43 | #ifdef __KERNEL__ | ||
44 | #define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */ | ||
45 | #ifdef CONFIG_SMP | ||
46 | |||
47 | #ifdef __SUBARCH_HAS_LWSYNC | ||
48 | # define SMPWMB lwsync | ||
49 | #else | ||
50 | # define SMPWMB eieio | ||
51 | #endif | ||
52 | |||
53 | #define smp_mb() mb() | ||
54 | #define smp_rmb() rmb() | ||
55 | #define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory") | ||
56 | #define smp_read_barrier_depends() read_barrier_depends() | ||
57 | #else | ||
58 | #define smp_mb() barrier() | ||
59 | #define smp_rmb() barrier() | ||
60 | #define smp_wmb() barrier() | ||
61 | #define smp_read_barrier_depends() do { } while(0) | ||
62 | #endif /* CONFIG_SMP */ | ||
63 | |||
64 | /* | ||
65 | * This is a barrier which prevents following instructions from being | ||
66 | * started until the value of the argument x is known. For example, if | ||
67 | * x is a variable loaded from memory, this prevents following | ||
68 | * instructions from being executed until the load has been performed. | ||
69 | */ | ||
70 | #define data_barrier(x) \ | ||
71 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); | ||
72 | |||
73 | struct task_struct; | ||
74 | struct pt_regs; | ||
75 | |||
76 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | ||
77 | |||
78 | extern int (*__debugger)(struct pt_regs *regs); | ||
79 | extern int (*__debugger_ipi)(struct pt_regs *regs); | ||
80 | extern int (*__debugger_bpt)(struct pt_regs *regs); | ||
81 | extern int (*__debugger_sstep)(struct pt_regs *regs); | ||
82 | extern int (*__debugger_iabr_match)(struct pt_regs *regs); | ||
83 | extern int (*__debugger_dabr_match)(struct pt_regs *regs); | ||
84 | extern int (*__debugger_fault_handler)(struct pt_regs *regs); | ||
85 | |||
86 | #define DEBUGGER_BOILERPLATE(__NAME) \ | ||
87 | static inline int __NAME(struct pt_regs *regs) \ | ||
88 | { \ | ||
89 | if (unlikely(__ ## __NAME)) \ | ||
90 | return __ ## __NAME(regs); \ | ||
91 | return 0; \ | ||
92 | } | ||
93 | |||
94 | DEBUGGER_BOILERPLATE(debugger) | ||
95 | DEBUGGER_BOILERPLATE(debugger_ipi) | ||
96 | DEBUGGER_BOILERPLATE(debugger_bpt) | ||
97 | DEBUGGER_BOILERPLATE(debugger_sstep) | ||
98 | DEBUGGER_BOILERPLATE(debugger_iabr_match) | ||
99 | DEBUGGER_BOILERPLATE(debugger_dabr_match) | ||
100 | DEBUGGER_BOILERPLATE(debugger_fault_handler) | ||
101 | |||
102 | #else | ||
103 | static inline int debugger(struct pt_regs *regs) { return 0; } | ||
104 | static inline int debugger_ipi(struct pt_regs *regs) { return 0; } | ||
105 | static inline int debugger_bpt(struct pt_regs *regs) { return 0; } | ||
106 | static inline int debugger_sstep(struct pt_regs *regs) { return 0; } | ||
107 | static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } | ||
108 | static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } | ||
109 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } | ||
110 | #endif | ||
111 | |||
112 | extern int set_dabr(unsigned long dabr); | ||
113 | extern void do_dabr(struct pt_regs *regs, unsigned long address, | ||
114 | unsigned long error_code); | ||
115 | extern void print_backtrace(unsigned long *); | ||
116 | extern void show_regs(struct pt_regs * regs); | ||
117 | extern void flush_instruction_cache(void); | ||
118 | extern void hard_reset_now(void); | ||
119 | extern void poweroff_now(void); | ||
120 | |||
121 | #ifdef CONFIG_6xx | ||
122 | extern long _get_L2CR(void); | ||
123 | extern long _get_L3CR(void); | ||
124 | extern void _set_L2CR(unsigned long); | ||
125 | extern void _set_L3CR(unsigned long); | ||
126 | #else | ||
127 | #define _get_L2CR() 0L | ||
128 | #define _get_L3CR() 0L | ||
129 | #define _set_L2CR(val) do { } while(0) | ||
130 | #define _set_L3CR(val) do { } while(0) | ||
131 | #endif | ||
132 | |||
133 | extern void via_cuda_init(void); | ||
134 | extern void read_rtc_time(void); | ||
135 | extern void pmac_find_display(void); | ||
136 | extern void giveup_fpu(struct task_struct *); | ||
137 | extern void disable_kernel_fp(void); | ||
138 | extern void enable_kernel_fp(void); | ||
139 | extern void flush_fp_to_thread(struct task_struct *); | ||
140 | extern void enable_kernel_altivec(void); | ||
141 | extern void giveup_altivec(struct task_struct *); | ||
142 | extern void load_up_altivec(struct task_struct *); | ||
143 | extern int emulate_altivec(struct pt_regs *); | ||
144 | extern void __giveup_vsx(struct task_struct *); | ||
145 | extern void giveup_vsx(struct task_struct *); | ||
146 | extern void enable_kernel_spe(void); | ||
147 | extern void giveup_spe(struct task_struct *); | ||
148 | extern void load_up_spe(struct task_struct *); | ||
149 | extern int fix_alignment(struct pt_regs *); | ||
150 | extern void cvt_fd(float *from, double *to, struct thread_struct *thread); | ||
151 | extern void cvt_df(double *from, float *to, struct thread_struct *thread); | ||
152 | |||
153 | #ifndef CONFIG_SMP | ||
154 | extern void discard_lazy_cpu_state(void); | ||
155 | #else | ||
156 | static inline void discard_lazy_cpu_state(void) | ||
157 | { | ||
158 | } | ||
159 | #endif | ||
160 | |||
161 | #ifdef CONFIG_ALTIVEC | ||
162 | extern void flush_altivec_to_thread(struct task_struct *); | ||
163 | #else | ||
164 | static inline void flush_altivec_to_thread(struct task_struct *t) | ||
165 | { | ||
166 | } | ||
167 | #endif | ||
168 | |||
169 | #ifdef CONFIG_VSX | ||
170 | extern void flush_vsx_to_thread(struct task_struct *); | ||
171 | #else | ||
172 | static inline void flush_vsx_to_thread(struct task_struct *t) | ||
173 | { | ||
174 | } | ||
175 | #endif | ||
176 | |||
177 | #ifdef CONFIG_SPE | ||
178 | extern void flush_spe_to_thread(struct task_struct *); | ||
179 | #else | ||
180 | static inline void flush_spe_to_thread(struct task_struct *t) | ||
181 | { | ||
182 | } | ||
183 | #endif | ||
184 | |||
185 | extern int call_rtas(const char *, int, int, unsigned long *, ...); | ||
186 | extern void cacheable_memzero(void *p, unsigned int nb); | ||
187 | extern void *cacheable_memcpy(void *, const void *, unsigned int); | ||
188 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | ||
189 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | ||
190 | extern int die(const char *, struct pt_regs *, long); | ||
191 | extern void _exception(int, struct pt_regs *, int, unsigned long); | ||
192 | extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); | ||
193 | |||
194 | #ifdef CONFIG_BOOKE_WDT | ||
195 | extern u32 booke_wdt_enabled; | ||
196 | extern u32 booke_wdt_period; | ||
197 | #endif /* CONFIG_BOOKE_WDT */ | ||
198 | |||
199 | struct device_node; | ||
200 | extern void note_scsi_host(struct device_node *, void *); | ||
201 | |||
202 | extern struct task_struct *__switch_to(struct task_struct *, | ||
203 | struct task_struct *); | ||
204 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) | ||
205 | |||
206 | struct thread_struct; | ||
207 | extern struct task_struct *_switch(struct thread_struct *prev, | ||
208 | struct thread_struct *next); | ||
209 | |||
210 | extern unsigned int rtas_data; | ||
211 | extern int mem_init_done; /* set on boot once kmalloc can be called */ | ||
212 | extern int init_bootmem_done; /* set on !NUMA once bootmem is available */ | ||
213 | extern unsigned long memory_limit; | ||
214 | extern unsigned long klimit; | ||
215 | |||
216 | extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); | ||
217 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); | ||
218 | |||
219 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ | ||
220 | |||
221 | /* | ||
222 | * Atomic exchange | ||
223 | * | ||
224 | * Changes the memory location '*ptr' to be val and returns | ||
225 | * the previous value stored there. | ||
226 | */ | ||
227 | static __always_inline unsigned long | ||
228 | __xchg_u32(volatile void *p, unsigned long val) | ||
229 | { | ||
230 | unsigned long prev; | ||
231 | |||
232 | __asm__ __volatile__( | ||
233 | LWSYNC_ON_SMP | ||
234 | "1: lwarx %0,0,%2 \n" | ||
235 | PPC405_ERR77(0,%2) | ||
236 | " stwcx. %3,0,%2 \n\ | ||
237 | bne- 1b" | ||
238 | ISYNC_ON_SMP | ||
239 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) | ||
240 | : "r" (p), "r" (val) | ||
241 | : "cc", "memory"); | ||
242 | |||
243 | return prev; | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Atomic exchange | ||
248 | * | ||
249 | * Changes the memory location '*ptr' to be val and returns | ||
250 | * the previous value stored there. | ||
251 | */ | ||
252 | static __always_inline unsigned long | ||
253 | __xchg_u32_local(volatile void *p, unsigned long val) | ||
254 | { | ||
255 | unsigned long prev; | ||
256 | |||
257 | __asm__ __volatile__( | ||
258 | "1: lwarx %0,0,%2 \n" | ||
259 | PPC405_ERR77(0,%2) | ||
260 | " stwcx. %3,0,%2 \n\ | ||
261 | bne- 1b" | ||
262 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) | ||
263 | : "r" (p), "r" (val) | ||
264 | : "cc", "memory"); | ||
265 | |||
266 | return prev; | ||
267 | } | ||
268 | |||
269 | #ifdef CONFIG_PPC64 | ||
270 | static __always_inline unsigned long | ||
271 | __xchg_u64(volatile void *p, unsigned long val) | ||
272 | { | ||
273 | unsigned long prev; | ||
274 | |||
275 | __asm__ __volatile__( | ||
276 | LWSYNC_ON_SMP | ||
277 | "1: ldarx %0,0,%2 \n" | ||
278 | PPC405_ERR77(0,%2) | ||
279 | " stdcx. %3,0,%2 \n\ | ||
280 | bne- 1b" | ||
281 | ISYNC_ON_SMP | ||
282 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) | ||
283 | : "r" (p), "r" (val) | ||
284 | : "cc", "memory"); | ||
285 | |||
286 | return prev; | ||
287 | } | ||
288 | |||
289 | static __always_inline unsigned long | ||
290 | __xchg_u64_local(volatile void *p, unsigned long val) | ||
291 | { | ||
292 | unsigned long prev; | ||
293 | |||
294 | __asm__ __volatile__( | ||
295 | "1: ldarx %0,0,%2 \n" | ||
296 | PPC405_ERR77(0,%2) | ||
297 | " stdcx. %3,0,%2 \n\ | ||
298 | bne- 1b" | ||
299 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) | ||
300 | : "r" (p), "r" (val) | ||
301 | : "cc", "memory"); | ||
302 | |||
303 | return prev; | ||
304 | } | ||
305 | #endif | ||
306 | |||
307 | /* | ||
308 | * This function doesn't exist, so you'll get a linker error | ||
309 | * if something tries to do an invalid xchg(). | ||
310 | */ | ||
311 | extern void __xchg_called_with_bad_pointer(void); | ||
312 | |||
313 | static __always_inline unsigned long | ||
314 | __xchg(volatile void *ptr, unsigned long x, unsigned int size) | ||
315 | { | ||
316 | switch (size) { | ||
317 | case 4: | ||
318 | return __xchg_u32(ptr, x); | ||
319 | #ifdef CONFIG_PPC64 | ||
320 | case 8: | ||
321 | return __xchg_u64(ptr, x); | ||
322 | #endif | ||
323 | } | ||
324 | __xchg_called_with_bad_pointer(); | ||
325 | return x; | ||
326 | } | ||
327 | |||
328 | static __always_inline unsigned long | ||
329 | __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) | ||
330 | { | ||
331 | switch (size) { | ||
332 | case 4: | ||
333 | return __xchg_u32_local(ptr, x); | ||
334 | #ifdef CONFIG_PPC64 | ||
335 | case 8: | ||
336 | return __xchg_u64_local(ptr, x); | ||
337 | #endif | ||
338 | } | ||
339 | __xchg_called_with_bad_pointer(); | ||
340 | return x; | ||
341 | } | ||
342 | #define xchg(ptr,x) \ | ||
343 | ({ \ | ||
344 | __typeof__(*(ptr)) _x_ = (x); \ | ||
345 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ | ||
346 | }) | ||
347 | |||
348 | #define xchg_local(ptr,x) \ | ||
349 | ({ \ | ||
350 | __typeof__(*(ptr)) _x_ = (x); \ | ||
351 | (__typeof__(*(ptr))) __xchg_local((ptr), \ | ||
352 | (unsigned long)_x_, sizeof(*(ptr))); \ | ||
353 | }) | ||
354 | |||
355 | /* | ||
356 | * Compare and exchange - if *p == old, set it to new, | ||
357 | * and return the old value of *p. | ||
358 | */ | ||
359 | #define __HAVE_ARCH_CMPXCHG 1 | ||
360 | |||
361 | static __always_inline unsigned long | ||
362 | __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) | ||
363 | { | ||
364 | unsigned int prev; | ||
365 | |||
366 | __asm__ __volatile__ ( | ||
367 | LWSYNC_ON_SMP | ||
368 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ | ||
369 | cmpw 0,%0,%3\n\ | ||
370 | bne- 2f\n" | ||
371 | PPC405_ERR77(0,%2) | ||
372 | " stwcx. %4,0,%2\n\ | ||
373 | bne- 1b" | ||
374 | ISYNC_ON_SMP | ||
375 | "\n\ | ||
376 | 2:" | ||
377 | : "=&r" (prev), "+m" (*p) | ||
378 | : "r" (p), "r" (old), "r" (new) | ||
379 | : "cc", "memory"); | ||
380 | |||
381 | return prev; | ||
382 | } | ||
383 | |||
384 | static __always_inline unsigned long | ||
385 | __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, | ||
386 | unsigned long new) | ||
387 | { | ||
388 | unsigned int prev; | ||
389 | |||
390 | __asm__ __volatile__ ( | ||
391 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ | ||
392 | cmpw 0,%0,%3\n\ | ||
393 | bne- 2f\n" | ||
394 | PPC405_ERR77(0,%2) | ||
395 | " stwcx. %4,0,%2\n\ | ||
396 | bne- 1b" | ||
397 | "\n\ | ||
398 | 2:" | ||
399 | : "=&r" (prev), "+m" (*p) | ||
400 | : "r" (p), "r" (old), "r" (new) | ||
401 | : "cc", "memory"); | ||
402 | |||
403 | return prev; | ||
404 | } | ||
405 | |||
406 | #ifdef CONFIG_PPC64 | ||
407 | static __always_inline unsigned long | ||
408 | __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) | ||
409 | { | ||
410 | unsigned long prev; | ||
411 | |||
412 | __asm__ __volatile__ ( | ||
413 | LWSYNC_ON_SMP | ||
414 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ | ||
415 | cmpd 0,%0,%3\n\ | ||
416 | bne- 2f\n\ | ||
417 | stdcx. %4,0,%2\n\ | ||
418 | bne- 1b" | ||
419 | ISYNC_ON_SMP | ||
420 | "\n\ | ||
421 | 2:" | ||
422 | : "=&r" (prev), "+m" (*p) | ||
423 | : "r" (p), "r" (old), "r" (new) | ||
424 | : "cc", "memory"); | ||
425 | |||
426 | return prev; | ||
427 | } | ||
428 | |||
429 | static __always_inline unsigned long | ||
430 | __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, | ||
431 | unsigned long new) | ||
432 | { | ||
433 | unsigned long prev; | ||
434 | |||
435 | __asm__ __volatile__ ( | ||
436 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ | ||
437 | cmpd 0,%0,%3\n\ | ||
438 | bne- 2f\n\ | ||
439 | stdcx. %4,0,%2\n\ | ||
440 | bne- 1b" | ||
441 | "\n\ | ||
442 | 2:" | ||
443 | : "=&r" (prev), "+m" (*p) | ||
444 | : "r" (p), "r" (old), "r" (new) | ||
445 | : "cc", "memory"); | ||
446 | |||
447 | return prev; | ||
448 | } | ||
449 | #endif | ||
450 | |||
451 | /* This function doesn't exist, so you'll get a linker error | ||
452 | if something tries to do an invalid cmpxchg(). */ | ||
453 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
454 | |||
455 | static __always_inline unsigned long | ||
456 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, | ||
457 | unsigned int size) | ||
458 | { | ||
459 | switch (size) { | ||
460 | case 4: | ||
461 | return __cmpxchg_u32(ptr, old, new); | ||
462 | #ifdef CONFIG_PPC64 | ||
463 | case 8: | ||
464 | return __cmpxchg_u64(ptr, old, new); | ||
465 | #endif | ||
466 | } | ||
467 | __cmpxchg_called_with_bad_pointer(); | ||
468 | return old; | ||
469 | } | ||
470 | |||
471 | static __always_inline unsigned long | ||
472 | __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, | ||
473 | unsigned int size) | ||
474 | { | ||
475 | switch (size) { | ||
476 | case 4: | ||
477 | return __cmpxchg_u32_local(ptr, old, new); | ||
478 | #ifdef CONFIG_PPC64 | ||
479 | case 8: | ||
480 | return __cmpxchg_u64_local(ptr, old, new); | ||
481 | #endif | ||
482 | } | ||
483 | __cmpxchg_called_with_bad_pointer(); | ||
484 | return old; | ||
485 | } | ||
486 | |||
487 | #define cmpxchg(ptr, o, n) \ | ||
488 | ({ \ | ||
489 | __typeof__(*(ptr)) _o_ = (o); \ | ||
490 | __typeof__(*(ptr)) _n_ = (n); \ | ||
491 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
492 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
493 | }) | ||
494 | |||
495 | |||
496 | #define cmpxchg_local(ptr, o, n) \ | ||
497 | ({ \ | ||
498 | __typeof__(*(ptr)) _o_ = (o); \ | ||
499 | __typeof__(*(ptr)) _n_ = (n); \ | ||
500 | (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ | ||
501 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
502 | }) | ||
503 | |||
504 | #ifdef CONFIG_PPC64 | ||
505 | /* | ||
506 | * We handle most unaligned accesses in hardware. On the other hand | ||
507 | * unaligned DMA can be very expensive on some ppc64 IO chips (it does | ||
508 | * powers of 2 writes until it reaches sufficient alignment). | ||
509 | * | ||
510 | * Based on this we disable the IP header alignment in network drivers. | ||
511 | * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining | ||
512 | * cacheline alignment of buffers. | ||
513 | */ | ||
514 | #define NET_IP_ALIGN 0 | ||
515 | #define NET_SKB_PAD L1_CACHE_BYTES | ||
516 | |||
517 | #define cmpxchg64(ptr, o, n) \ | ||
518 | ({ \ | ||
519 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | ||
520 | cmpxchg((ptr), (o), (n)); \ | ||
521 | }) | ||
522 | #define cmpxchg64_local(ptr, o, n) \ | ||
523 | ({ \ | ||
524 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | ||
525 | cmpxchg_local((ptr), (o), (n)); \ | ||
526 | }) | ||
527 | #else | ||
528 | #include <asm-generic/cmpxchg-local.h> | ||
529 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
530 | #endif | ||
531 | |||
532 | #define arch_align_stack(x) (x) | ||
533 | |||
534 | /* Used in very early kernel initialization. */ | ||
535 | extern unsigned long reloc_offset(void); | ||
536 | extern unsigned long add_reloc_offset(unsigned long); | ||
537 | extern void reloc_got2(unsigned long); | ||
538 | |||
539 | #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) | ||
540 | |||
541 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
542 | extern void account_system_vtime(struct task_struct *); | ||
543 | #endif | ||
544 | |||
545 | extern struct dentry *powerpc_debugfs_root; | ||
546 | |||
547 | #endif /* __KERNEL__ */ | ||
548 | #endif /* _ASM_POWERPC_SYSTEM_H */ | ||