aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc/system.h')
-rw-r--r--include/asm-powerpc/system.h29
1 files changed, 23 insertions, 6 deletions
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 5341b75c75cb..d9bf53653b10 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -4,7 +4,6 @@
4#ifndef _ASM_POWERPC_SYSTEM_H 4#ifndef _ASM_POWERPC_SYSTEM_H
5#define _ASM_POWERPC_SYSTEM_H 5#define _ASM_POWERPC_SYSTEM_H
6 6
7#include <linux/config.h>
8#include <linux/kernel.h> 7#include <linux/kernel.h>
9 8
10#include <asm/hw_irq.h> 9#include <asm/hw_irq.h>
@@ -42,6 +41,7 @@
42#define set_mb(var, value) do { var = value; mb(); } while (0) 41#define set_mb(var, value) do { var = value; mb(); } while (0)
43#define set_wmb(var, value) do { var = value; wmb(); } while (0) 42#define set_wmb(var, value) do { var = value; wmb(); } while (0)
44 43
44#ifdef __KERNEL__
45#ifdef CONFIG_SMP 45#ifdef CONFIG_SMP
46#define smp_mb() mb() 46#define smp_mb() mb()
47#define smp_rmb() rmb() 47#define smp_rmb() rmb()
@@ -54,7 +54,6 @@
54#define smp_read_barrier_depends() do { } while(0) 54#define smp_read_barrier_depends() do { } while(0)
55#endif /* CONFIG_SMP */ 55#endif /* CONFIG_SMP */
56 56
57#ifdef __KERNEL__
58struct task_struct; 57struct task_struct;
59struct pt_regs; 58struct pt_regs;
60 59
@@ -134,6 +133,14 @@ extern int fix_alignment(struct pt_regs *);
134extern void cvt_fd(float *from, double *to, struct thread_struct *thread); 133extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
135extern void cvt_df(double *from, float *to, struct thread_struct *thread); 134extern void cvt_df(double *from, float *to, struct thread_struct *thread);
136 135
136#ifndef CONFIG_SMP
137extern void discard_lazy_cpu_state(void);
138#else
139static inline void discard_lazy_cpu_state(void)
140{
141}
142#endif
143
137#ifdef CONFIG_ALTIVEC 144#ifdef CONFIG_ALTIVEC
138extern void flush_altivec_to_thread(struct task_struct *); 145extern void flush_altivec_to_thread(struct task_struct *);
139#else 146#else
@@ -176,6 +183,16 @@ struct thread_struct;
176extern struct task_struct *_switch(struct thread_struct *prev, 183extern struct task_struct *_switch(struct thread_struct *prev,
177 struct thread_struct *next); 184 struct thread_struct *next);
178 185
186/*
187 * On SMP systems, when the scheduler does migration-cost autodetection,
188 * it needs a way to flush as much of the CPU's caches as possible.
189 *
190 * TODO: fill this in!
191 */
192static inline void sched_cacheflush(void)
193{
194}
195
179extern unsigned int rtas_data; 196extern unsigned int rtas_data;
180extern int mem_init_done; /* set on boot once kmalloc can be called */ 197extern int mem_init_done; /* set on boot once kmalloc can be called */
181extern unsigned long memory_limit; 198extern unsigned long memory_limit;
@@ -195,7 +212,7 @@ __xchg_u32(volatile void *p, unsigned long val)
195 unsigned long prev; 212 unsigned long prev;
196 213
197 __asm__ __volatile__( 214 __asm__ __volatile__(
198 EIEIO_ON_SMP 215 LWSYNC_ON_SMP
199"1: lwarx %0,0,%2 \n" 216"1: lwarx %0,0,%2 \n"
200 PPC405_ERR77(0,%2) 217 PPC405_ERR77(0,%2)
201" stwcx. %3,0,%2 \n\ 218" stwcx. %3,0,%2 \n\
@@ -215,7 +232,7 @@ __xchg_u64(volatile void *p, unsigned long val)
215 unsigned long prev; 232 unsigned long prev;
216 233
217 __asm__ __volatile__( 234 __asm__ __volatile__(
218 EIEIO_ON_SMP 235 LWSYNC_ON_SMP
219"1: ldarx %0,0,%2 \n" 236"1: ldarx %0,0,%2 \n"
220 PPC405_ERR77(0,%2) 237 PPC405_ERR77(0,%2)
221" stdcx. %3,0,%2 \n\ 238" stdcx. %3,0,%2 \n\
@@ -270,7 +287,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
270 unsigned int prev; 287 unsigned int prev;
271 288
272 __asm__ __volatile__ ( 289 __asm__ __volatile__ (
273 EIEIO_ON_SMP 290 LWSYNC_ON_SMP
274"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ 291"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
275 cmpw 0,%0,%3\n\ 292 cmpw 0,%0,%3\n\
276 bne- 2f\n" 293 bne- 2f\n"
@@ -294,7 +311,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
294 unsigned long prev; 311 unsigned long prev;
295 312
296 __asm__ __volatile__ ( 313 __asm__ __volatile__ (
297 EIEIO_ON_SMP 314 LWSYNC_ON_SMP
298"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ 315"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
299 cmpd 0,%0,%3\n\ 316 cmpd 0,%0,%3\n\
300 bne- 2f\n\ 317 bne- 2f\n\