aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 17:11:14 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 17:11:14 -0400
commitff86303e3021587c49a14df1bc54fe2d393e2223 (patch)
tree7f1b26407aef36ba486428285604b8b7a7cbf99e
parent626ac545c12e5f9bffe93086d1d03d26c99987ea (diff)
parente436d80085133858bf2613a630365e8a0459fd58 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: [PATCH] sched: implement cpu_clock(cpu) high-speed time source [PATCH] sched: fix the all pinned logic in load_balance_newidle() [PATCH] sched: fix newly idle load balance in case of SMT [PATCH] sched: sched_cacheflush is now unused
-rw-r--r--arch/ia64/kernel/setup.c9
-rw-r--r--include/asm-alpha/system.h10
-rw-r--r--include/asm-arm/system.h10
-rw-r--r--include/asm-arm26/system.h10
-rw-r--r--include/asm-i386/system.h9
-rw-r--r--include/asm-ia64/system.h1
-rw-r--r--include/asm-m32r/system.h10
-rw-r--r--include/asm-mips/system.h10
-rw-r--r--include/asm-parisc/system.h11
-rw-r--r--include/asm-powerpc/system.h10
-rw-r--r--include/asm-ppc/system.h10
-rw-r--r--include/asm-s390/system.h10
-rw-r--r--include/asm-sh/system.h10
-rw-r--r--include/asm-sparc/system.h10
-rw-r--r--include/asm-sparc64/system.h10
-rw-r--r--include/asm-x86_64/system.h9
-rw-r--r--include/linux/sched.h7
-rw-r--r--kernel/sched.c31
18 files changed, 33 insertions, 154 deletions
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 4d9864cc92c9..cf06fe799041 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -980,15 +980,6 @@ cpu_init (void)
980 pm_idle = default_idle; 980 pm_idle = default_idle;
981} 981}
982 982
983/*
984 * On SMP systems, when the scheduler does migration-cost autodetection,
985 * it needs a way to flush as much of the CPU's caches as possible.
986 */
987void sched_cacheflush(void)
988{
989 ia64_sal_cache_flush(3);
990}
991
992void __init 983void __init
993check_bugs (void) 984check_bugs (void)
994{ 985{
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index cf1021a97b2e..620c4d86cbf4 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -139,16 +139,6 @@ extern void halt(void) __attribute__((noreturn));
139struct task_struct; 139struct task_struct;
140extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); 140extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
141 141
142/*
143 * On SMP systems, when the scheduler does migration-cost autodetection,
144 * it needs a way to flush as much of the CPU's caches as possible.
145 *
146 * TODO: fill this in!
147 */
148static inline void sched_cacheflush(void)
149{
150}
151
152#define imb() \ 142#define imb() \
153__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") 143__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
154 144
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 6f8e6a69dc5f..94ea8c6dc1a4 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -254,16 +254,6 @@ do { \
254 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ 254 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
255} while (0) 255} while (0)
256 256
257/*
258 * On SMP systems, when the scheduler does migration-cost autodetection,
259 * it needs a way to flush as much of the CPU's caches as possible.
260 *
261 * TODO: fill this in!
262 */
263static inline void sched_cacheflush(void)
264{
265}
266
267#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 257#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
268/* 258/*
269 * On the StrongARM, "swp" is terminally broken since it bypasses the 259 * On the StrongARM, "swp" is terminally broken since it bypasses the
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h
index 4703593b3bb5..e09da5ff1f54 100644
--- a/include/asm-arm26/system.h
+++ b/include/asm-arm26/system.h
@@ -110,16 +110,6 @@ do { \
110} while (0) 110} while (0)
111 111
112/* 112/*
113 * On SMP systems, when the scheduler does migration-cost autodetection,
114 * it needs a way to flush as much of the CPU's caches as possible.
115 *
116 * TODO: fill this in!
117 */
118static inline void sched_cacheflush(void)
119{
120}
121
122/*
123 * Save the current interrupt enable state & disable IRQs 113 * Save the current interrupt enable state & disable IRQs
124 */ 114 */
125#define local_irq_save(x) \ 115#define local_irq_save(x) \
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 94ed3686a5f3..609756c61676 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -310,15 +310,6 @@ void enable_hlt(void);
310extern int es7000_plat; 310extern int es7000_plat;
311void cpu_idle_wait(void); 311void cpu_idle_wait(void);
312 312
313/*
314 * On SMP systems, when the scheduler does migration-cost autodetection,
315 * it needs a way to flush as much of the CPU's caches as possible:
316 */
317static inline void sched_cacheflush(void)
318{
319 wbinvd();
320}
321
322extern unsigned long arch_align_stack(unsigned long sp); 313extern unsigned long arch_align_stack(unsigned long sp);
323extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 314extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
324 315
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 384fbf7f2a0f..91bb8e00066c 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -259,7 +259,6 @@ extern void ia64_load_extra (struct task_struct *task);
259#define ia64_platform_is(x) (strcmp(x, platform_name) == 0) 259#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
260 260
261void cpu_idle_wait(void); 261void cpu_idle_wait(void);
262void sched_cacheflush(void);
263 262
264#define arch_align_stack(x) (x) 263#define arch_align_stack(x) (x)
265 264
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 8ee73d3f316d..2365de5c2955 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -54,16 +54,6 @@
54 ); \ 54 ); \
55} while(0) 55} while(0)
56 56
57/*
58 * On SMP systems, when the scheduler does migration-cost autodetection,
59 * it needs a way to flush as much of the CPU's caches as possible.
60 *
61 * TODO: fill this in!
62 */
63static inline void sched_cacheflush(void)
64{
65}
66
67/* Interrupt Control */ 57/* Interrupt Control */
68#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) 58#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
69#define local_irq_enable() \ 59#define local_irq_enable() \
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 46bdb3f566f9..76339165bc20 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -71,16 +71,6 @@ do { \
71 write_c0_userlocal(task_thread_info(current)->tp_value);\ 71 write_c0_userlocal(task_thread_info(current)->tp_value);\
72} while(0) 72} while(0)
73 73
74/*
75 * On SMP systems, when the scheduler does migration-cost autodetection,
76 * it needs a way to flush as much of the CPU's caches as possible.
77 *
78 * TODO: fill this in!
79 */
80static inline void sched_cacheflush(void)
81{
82}
83
84static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) 74static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
85{ 75{
86 __u32 retval; 76 __u32 retval;
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index 21fbfc5afd02..ee80c920b464 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -48,17 +48,6 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
48 (last) = _switch_to(prev, next); \ 48 (last) = _switch_to(prev, next); \
49} while(0) 49} while(0)
50 50
51/*
52 * On SMP systems, when the scheduler does migration-cost autodetection,
53 * it needs a way to flush as much of the CPU's caches as possible.
54 *
55 * TODO: fill this in!
56 */
57static inline void sched_cacheflush(void)
58{
59}
60
61
62/* interrupt control */ 51/* interrupt control */
63#define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory") 52#define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory")
64#define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) 53#define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 32aa42b748be..41520b7a7b76 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -184,16 +184,6 @@ struct thread_struct;
184extern struct task_struct *_switch(struct thread_struct *prev, 184extern struct task_struct *_switch(struct thread_struct *prev,
185 struct thread_struct *next); 185 struct thread_struct *next);
186 186
187/*
188 * On SMP systems, when the scheduler does migration-cost autodetection,
189 * it needs a way to flush as much of the CPU's caches as possible.
190 *
191 * TODO: fill this in!
192 */
193static inline void sched_cacheflush(void)
194{
195}
196
197extern unsigned int rtas_data; 187extern unsigned int rtas_data;
198extern int mem_init_done; /* set on boot once kmalloc can be called */ 188extern int mem_init_done; /* set on boot once kmalloc can be called */
199extern unsigned long memory_limit; 189extern unsigned long memory_limit;
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index d84a3cf4d033..f1311a8f310f 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -129,16 +129,6 @@ extern struct task_struct *__switch_to(struct task_struct *,
129 struct task_struct *); 129 struct task_struct *);
130#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) 130#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
131 131
132/*
133 * On SMP systems, when the scheduler does migration-cost autodetection,
134 * it needs a way to flush as much of the CPU's caches as possible.
135 *
136 * TODO: fill this in!
137 */
138static inline void sched_cacheflush(void)
139{
140}
141
142struct thread_struct; 132struct thread_struct;
143extern struct task_struct *_switch(struct thread_struct *prev, 133extern struct task_struct *_switch(struct thread_struct *prev,
144 struct thread_struct *next); 134 struct thread_struct *next);
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index bbe137c3ed69..64a3cd05cae1 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -97,16 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs)
97 prev = __switch_to(prev,next); \ 97 prev = __switch_to(prev,next); \
98} while (0) 98} while (0)
99 99
100/*
101 * On SMP systems, when the scheduler does migration-cost autodetection,
102 * it needs a way to flush as much of the CPU's caches as possible.
103 *
104 * TODO: fill this in!
105 */
106static inline void sched_cacheflush(void)
107{
108}
109
110#ifdef CONFIG_VIRT_CPU_ACCOUNTING 100#ifdef CONFIG_VIRT_CPU_ACCOUNTING
111extern void account_vtime(struct task_struct *); 101extern void account_vtime(struct task_struct *);
112extern void account_tick_vtime(struct task_struct *); 102extern void account_tick_vtime(struct task_struct *);
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 7c75045ae22b..245042537205 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -64,16 +64,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
64 last = __last; \ 64 last = __last; \
65} while (0) 65} while (0)
66 66
67/*
68 * On SMP systems, when the scheduler does migration-cost autodetection,
69 * it needs a way to flush as much of the CPU's caches as possible.
70 *
71 * TODO: fill this in!
72 */
73static inline void sched_cacheflush(void)
74{
75}
76
77#ifdef CONFIG_CPU_SH4A 67#ifdef CONFIG_CPU_SH4A
78#define __icbi() \ 68#define __icbi() \
79{ \ 69{ \
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 8b4e23b3bb38..d1a2572e3f55 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -165,16 +165,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
165 } while(0) 165 } while(0)
166 166
167/* 167/*
168 * On SMP systems, when the scheduler does migration-cost autodetection,
169 * it needs a way to flush as much of the CPU's caches as possible.
170 *
171 * TODO: fill this in!
172 */
173static inline void sched_cacheflush(void)
174{
175}
176
177/*
178 * Changing the IRQ level on the Sparc. 168 * Changing the IRQ level on the Sparc.
179 */ 169 */
180extern void local_irq_restore(unsigned long); 170extern void local_irq_restore(unsigned long);
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 8ba380ec6daa..409067408eec 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -204,16 +204,6 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
204 } \ 204 } \
205} while(0) 205} while(0)
206 206
207/*
208 * On SMP systems, when the scheduler does migration-cost autodetection,
209 * it needs a way to flush as much of the CPU's caches as possible.
210 *
211 * TODO: fill this in!
212 */
213static inline void sched_cacheflush(void)
214{
215}
216
217static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) 207static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
218{ 208{
219 unsigned long tmp1, tmp2; 209 unsigned long tmp1, tmp2;
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index ead9f9a56234..e4f246d62c46 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -111,15 +111,6 @@ static inline void write_cr4(unsigned long val)
111#define wbinvd() \ 111#define wbinvd() \
112 __asm__ __volatile__ ("wbinvd": : :"memory"); 112 __asm__ __volatile__ ("wbinvd": : :"memory");
113 113
114/*
115 * On SMP systems, when the scheduler does migration-cost autodetection,
116 * it needs a way to flush as much of the CPU's caches as possible.
117 */
118static inline void sched_cacheflush(void)
119{
120 wbinvd();
121}
122
123#endif /* __KERNEL__ */ 114#endif /* __KERNEL__ */
124 115
125#define nop() __asm__ __volatile__ ("nop") 116#define nop() __asm__ __volatile__ ("nop")
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 94f624aef017..33b9b4841ee7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1348,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1348#endif 1348#endif
1349 1349
1350extern unsigned long long sched_clock(void); 1350extern unsigned long long sched_clock(void);
1351
1352/*
1353 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
1354 * clock constructed from sched_clock():
1355 */
1356extern unsigned long long cpu_clock(int cpu);
1357
1351extern unsigned long long 1358extern unsigned long long
1352task_sched_runtime(struct task_struct *task); 1359task_sched_runtime(struct task_struct *task);
1353 1360
diff --git a/kernel/sched.c b/kernel/sched.c
index 645256b228c3..93cf241cfbe9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq)
379#define task_rq(p) cpu_rq(task_cpu(p)) 379#define task_rq(p) cpu_rq(task_cpu(p))
380#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 380#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
381 381
382/*
383 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
384 * clock constructed from sched_clock():
385 */
386unsigned long long cpu_clock(int cpu)
387{
388 struct rq *rq = cpu_rq(cpu);
389 unsigned long long now;
390 unsigned long flags;
391
392 spin_lock_irqsave(&rq->lock, flags);
393 now = rq_clock(rq);
394 spin_unlock_irqrestore(&rq->lock, flags);
395
396 return now;
397}
398
382#ifdef CONFIG_FAIR_GROUP_SCHED 399#ifdef CONFIG_FAIR_GROUP_SCHED
383/* Change a task's ->cfs_rq if it moves across CPUs */ 400/* Change a task's ->cfs_rq if it moves across CPUs */
384static inline void set_task_cfs_rq(struct task_struct *p) 401static inline void set_task_cfs_rq(struct task_struct *p)
@@ -2235,7 +2252,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2235 2252
2236 rq = cpu_rq(i); 2253 rq = cpu_rq(i);
2237 2254
2238 if (*sd_idle && !idle_cpu(i)) 2255 if (*sd_idle && rq->nr_running)
2239 *sd_idle = 0; 2256 *sd_idle = 0;
2240 2257
2241 /* Bias balancing toward cpus of our domain */ 2258 /* Bias balancing toward cpus of our domain */
@@ -2257,9 +2274,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2257 /* 2274 /*
2258 * First idle cpu or the first cpu(busiest) in this sched group 2275 * First idle cpu or the first cpu(busiest) in this sched group
2259 * is eligible for doing load balancing at this and above 2276 * is eligible for doing load balancing at this and above
2260 * domains. 2277 * domains. In the newly idle case, we will allow all the cpu's
2278 * to do the newly idle load balance.
2261 */ 2279 */
2262 if (local_group && balance_cpu != this_cpu && balance) { 2280 if (idle != CPU_NEWLY_IDLE && local_group &&
2281 balance_cpu != this_cpu && balance) {
2263 *balance = 0; 2282 *balance = 0;
2264 goto ret; 2283 goto ret;
2265 } 2284 }
@@ -2677,6 +2696,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
2677 unsigned long imbalance; 2696 unsigned long imbalance;
2678 int nr_moved = 0; 2697 int nr_moved = 0;
2679 int sd_idle = 0; 2698 int sd_idle = 0;
2699 int all_pinned = 0;
2680 cpumask_t cpus = CPU_MASK_ALL; 2700 cpumask_t cpus = CPU_MASK_ALL;
2681 2701
2682 /* 2702 /*
@@ -2715,10 +2735,11 @@ redo:
2715 double_lock_balance(this_rq, busiest); 2735 double_lock_balance(this_rq, busiest);
2716 nr_moved = move_tasks(this_rq, this_cpu, busiest, 2736 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2717 minus_1_or_zero(busiest->nr_running), 2737 minus_1_or_zero(busiest->nr_running),
2718 imbalance, sd, CPU_NEWLY_IDLE, NULL); 2738 imbalance, sd, CPU_NEWLY_IDLE,
2739 &all_pinned);
2719 spin_unlock(&busiest->lock); 2740 spin_unlock(&busiest->lock);
2720 2741
2721 if (!nr_moved) { 2742 if (unlikely(all_pinned)) {
2722 cpu_clear(cpu_of(busiest), cpus); 2743 cpu_clear(cpu_of(busiest), cpus);
2723 if (!cpus_empty(cpus)) 2744 if (!cpus_empty(cpus))
2724 goto redo; 2745 goto redo;