aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-01-12 04:05:27 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-12 12:08:49 -0500
commit4dc7a0bbeb6882ad665e588e82fabe5bb4645f2f (patch)
tree8c034f802157d7f449e76f45086c0e13e0ea4711 /include
parentc6b44d10f25e5a93eca5135b686a35775c63546e (diff)
[PATCH] sched: add cacheflush() asm
Add per-arch sched_cacheflush() which is a write-back cacheflush used by the migration-cost calibration code at bootup time. Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/system.h10
-rw-r--r--include/asm-arm/system.h10
-rw-r--r--include/asm-arm26/system.h10
-rw-r--r--include/asm-i386/system.h9
-rw-r--r--include/asm-ia64/system.h1
-rw-r--r--include/asm-m32r/system.h10
-rw-r--r--include/asm-mips/system.h10
-rw-r--r--include/asm-parisc/system.h9
-rw-r--r--include/asm-powerpc/system.h10
-rw-r--r--include/asm-ppc/system.h10
-rw-r--r--include/asm-s390/system.h10
-rw-r--r--include/asm-sh/system.h10
-rw-r--r--include/asm-sparc/system.h10
-rw-r--r--include/asm-sparc64/system.h10
-rw-r--r--include/asm-x86_64/system.h9
15 files changed, 138 insertions, 0 deletions
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 050e86d12891..766ab868e8ab 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -140,6 +140,16 @@ extern void halt(void) __attribute__((noreturn));
140struct task_struct; 140struct task_struct;
141extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); 141extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
142 142
143/*
144 * On SMP systems, when the scheduler does migration-cost autodetection,
145 * it needs a way to flush as much of the CPU's caches as possible.
146 *
147 * TODO: fill this in!
148 */
149static inline void sched_cacheflush(void)
150{
151}
152
143#define imb() \ 153#define imb() \
144__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") 154__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
145 155
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 5621d61ebc07..0497171df8c9 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -172,6 +172,16 @@ do { \
172} while (0) 172} while (0)
173 173
174/* 174/*
175 * On SMP systems, when the scheduler does migration-cost autodetection,
176 * it needs a way to flush as much of the CPU's caches as possible.
177 *
178 * TODO: fill this in!
179 */
180static inline void sched_cacheflush(void)
181{
182}
183
184/*
175 * CPU interrupt mask handling. 185 * CPU interrupt mask handling.
176 */ 186 */
177#if __LINUX_ARM_ARCH__ >= 6 187#if __LINUX_ARM_ARCH__ >= 6
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h
index f23fac1938f3..1bce6b3590ff 100644
--- a/include/asm-arm26/system.h
+++ b/include/asm-arm26/system.h
@@ -115,6 +115,16 @@ do { \
115} while (0) 115} while (0)
116 116
117/* 117/*
118 * On SMP systems, when the scheduler does migration-cost autodetection,
119 * it needs a way to flush as much of the CPU's caches as possible.
120 *
121 * TODO: fill this in!
122 */
123static inline void sched_cacheflush(void)
124{
125}
126
127/*
118 * Save the current interrupt enable state & disable IRQs 128 * Save the current interrupt enable state & disable IRQs
119 */ 129 */
120#define local_irq_save(x) \ 130#define local_irq_save(x) \
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 9c0593b7a94e..36a92ed6a9d0 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -548,6 +548,15 @@ void enable_hlt(void);
548extern int es7000_plat; 548extern int es7000_plat;
549void cpu_idle_wait(void); 549void cpu_idle_wait(void);
550 550
551/*
552 * On SMP systems, when the scheduler does migration-cost autodetection,
553 * it needs a way to flush as much of the CPU's caches as possible:
554 */
555static inline void sched_cacheflush(void)
556{
557 wbinvd();
558}
559
551extern unsigned long arch_align_stack(unsigned long sp); 560extern unsigned long arch_align_stack(unsigned long sp);
552 561
553#endif 562#endif
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 635235fa1e32..510c31c50723 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -279,6 +279,7 @@ extern void ia64_load_extra (struct task_struct *task);
279#define ia64_platform_is(x) (strcmp(x, platform_name) == 0) 279#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
280 280
281void cpu_idle_wait(void); 281void cpu_idle_wait(void);
282void sched_cacheflush(void);
282 283
283#define arch_align_stack(x) (x) 284#define arch_align_stack(x) (x)
284 285
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index dcf619a0a0b0..06c12a037cba 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -68,6 +68,16 @@
68 last = __last; \ 68 last = __last; \
69} while(0) 69} while(0)
70 70
71/*
72 * On SMP systems, when the scheduler does migration-cost autodetection,
73 * it needs a way to flush as much of the CPU's caches as possible.
74 *
75 * TODO: fill this in!
76 */
77static inline void sched_cacheflush(void)
78{
79}
80
71/* Interrupt Control */ 81/* Interrupt Control */
72#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) 82#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
73#define local_irq_enable() \ 83#define local_irq_enable() \
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 330c4e497af3..5f761ad5a8d9 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -164,6 +164,16 @@ do { \
164 __restore_dsp(current); \ 164 __restore_dsp(current); \
165} while(0) 165} while(0)
166 166
167/*
168 * On SMP systems, when the scheduler does migration-cost autodetection,
169 * it needs a way to flush as much of the CPU's caches as possible.
170 *
171 * TODO: fill this in!
172 */
173static inline void sched_cacheflush(void)
174{
175}
176
167static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) 177static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
168{ 178{
169 __u32 retval; 179 __u32 retval;
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index f3928d3a80cb..a5a973c0c07f 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -49,6 +49,15 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
49 (last) = _switch_to(prev, next); \ 49 (last) = _switch_to(prev, next); \
50} while(0) 50} while(0)
51 51
52/*
53 * On SMP systems, when the scheduler does migration-cost autodetection,
54 * it needs a way to flush as much of the CPU's caches as possible.
55 *
56 * TODO: fill this in!
57 */
58static inline void sched_cacheflush(void)
59{
60}
52 61
53 62
54/* interrupt control */ 63/* interrupt control */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 0c58e32a9570..1be629b4fb97 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -175,6 +175,16 @@ struct thread_struct;
175extern struct task_struct *_switch(struct thread_struct *prev, 175extern struct task_struct *_switch(struct thread_struct *prev,
176 struct thread_struct *next); 176 struct thread_struct *next);
177 177
178/*
179 * On SMP systems, when the scheduler does migration-cost autodetection,
180 * it needs a way to flush as much of the CPU's caches as possible.
181 *
182 * TODO: fill this in!
183 */
184static inline void sched_cacheflush(void)
185{
186}
187
178extern unsigned int rtas_data; 188extern unsigned int rtas_data;
179extern int mem_init_done; /* set on boot once kmalloc can be called */ 189extern int mem_init_done; /* set on boot once kmalloc can be called */
180extern unsigned long memory_limit; 190extern unsigned long memory_limit;
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index bd99cb53a19f..212dca66fcac 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -123,6 +123,16 @@ extern struct task_struct *__switch_to(struct task_struct *,
123 struct task_struct *); 123 struct task_struct *);
124#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) 124#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
125 125
126/*
127 * On SMP systems, when the scheduler does migration-cost autodetection,
128 * it needs a way to flush as much of the CPU's caches as possible.
129 *
130 * TODO: fill this in!
131 */
132static inline void sched_cacheflush(void)
133{
134}
135
126struct thread_struct; 136struct thread_struct;
127extern struct task_struct *_switch(struct thread_struct *prev, 137extern struct task_struct *_switch(struct thread_struct *prev,
128 struct thread_struct *next); 138 struct thread_struct *next);
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 864cae7e1fd6..c7c3a9ad593f 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -104,6 +104,16 @@ static inline void restore_access_regs(unsigned int *acrs)
104 prev = __switch_to(prev,next); \ 104 prev = __switch_to(prev,next); \
105} while (0) 105} while (0)
106 106
107/*
108 * On SMP systems, when the scheduler does migration-cost autodetection,
109 * it needs a way to flush as much of the CPU's caches as possible.
110 *
111 * TODO: fill this in!
112 */
113static inline void sched_cacheflush(void)
114{
115}
116
107#ifdef CONFIG_VIRT_CPU_ACCOUNTING 117#ifdef CONFIG_VIRT_CPU_ACCOUNTING
108extern void account_user_vtime(struct task_struct *); 118extern void account_user_vtime(struct task_struct *);
109extern void account_system_vtime(struct task_struct *); 119extern void account_system_vtime(struct task_struct *);
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 28a3c2d8bcd7..bb0330499bdf 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -57,6 +57,16 @@
57 last = __last; \ 57 last = __last; \
58} while (0) 58} while (0)
59 59
60/*
61 * On SMP systems, when the scheduler does migration-cost autodetection,
62 * it needs a way to flush as much of the CPU's caches as possible.
63 *
64 * TODO: fill this in!
65 */
66static inline void sched_cacheflush(void)
67{
68}
69
60#define nop() __asm__ __volatile__ ("nop") 70#define nop() __asm__ __volatile__ ("nop")
61 71
62 72
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 1f6b71f9e1b6..52fe2e464e15 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -166,6 +166,16 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
166 } while(0) 166 } while(0)
167 167
168/* 168/*
169 * On SMP systems, when the scheduler does migration-cost autodetection,
170 * it needs a way to flush as much of the CPU's caches as possible.
171 *
172 * TODO: fill this in!
173 */
174static inline void sched_cacheflush(void)
175{
176}
177
178/*
169 * Changing the IRQ level on the Sparc. 179 * Changing the IRQ level on the Sparc.
170 */ 180 */
171extern void local_irq_restore(unsigned long); 181extern void local_irq_restore(unsigned long);
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 309f1466b6fa..07d72367f82c 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -253,6 +253,16 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
253 } \ 253 } \
254} while(0) 254} while(0)
255 255
256/*
257 * On SMP systems, when the scheduler does migration-cost autodetection,
258 * it needs a way to flush as much of the CPU's caches as possible.
259 *
260 * TODO: fill this in!
261 */
262static inline void sched_cacheflush(void)
263{
264}
265
256static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) 266static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
257{ 267{
258 unsigned long tmp1, tmp2; 268 unsigned long tmp1, tmp2;
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 38c1e8a69c9c..0eacbefb7dd0 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -193,6 +193,15 @@ static inline void write_cr4(unsigned long val)
193#define wbinvd() \ 193#define wbinvd() \
194 __asm__ __volatile__ ("wbinvd": : :"memory"); 194 __asm__ __volatile__ ("wbinvd": : :"memory");
195 195
196/*
197 * On SMP systems, when the scheduler does migration-cost autodetection,
198 * it needs a way to flush as much of the CPU's caches as possible.
199 */
200static inline void sched_cacheflush(void)
201{
202 wbinvd();
203}
204
196#endif /* __KERNEL__ */ 205#endif /* __KERNEL__ */
197 206
198#define nop() __asm__ __volatile__ ("nop") 207#define nop() __asm__ __volatile__ ("nop")