aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-17 12:11:18 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-17 12:11:18 -0400
commite6d5a11dad44b8ae18ca8fc4ecb72ccccfa0a2d2 (patch)
tree7e3837c8f28e2e969a7b7d040b00676c90bf72c7 /kernel
parentb6257a9036f06878a0f02354d5a07f155e1cfee0 (diff)
parentb9dca1e0fcb696716840a3bc8f20a6941b484dbf (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: sched: fix new task startup crash sched: fix !SYSFS build breakage sched: fix improper load balance across sched domain sched: more robust sd-sysctl entry freeing
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c38
-rw-r--r--kernel/sched_fair.c6
-rw-r--r--kernel/user.c23
3 files changed, 47 insertions, 20 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c4889abc00b6..92721d1534b8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1712,7 +1712,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1712 1712
1713 p->prio = effective_prio(p); 1713 p->prio = effective_prio(p);
1714 1714
1715 if (!p->sched_class->task_new || !current->se.on_rq || !rq->cfs.curr) { 1715 if (!p->sched_class->task_new || !current->se.on_rq) {
1716 activate_task(rq, p, 0); 1716 activate_task(rq, p, 0);
1717 } else { 1717 } else {
1718 /* 1718 /*
@@ -2336,7 +2336,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2336 unsigned long max_pull; 2336 unsigned long max_pull;
2337 unsigned long busiest_load_per_task, busiest_nr_running; 2337 unsigned long busiest_load_per_task, busiest_nr_running;
2338 unsigned long this_load_per_task, this_nr_running; 2338 unsigned long this_load_per_task, this_nr_running;
2339 int load_idx; 2339 int load_idx, group_imb = 0;
2340#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 2340#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2341 int power_savings_balance = 1; 2341 int power_savings_balance = 1;
2342 unsigned long leader_nr_running = 0, min_load_per_task = 0; 2342 unsigned long leader_nr_running = 0, min_load_per_task = 0;
@@ -2355,9 +2355,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2355 load_idx = sd->idle_idx; 2355 load_idx = sd->idle_idx;
2356 2356
2357 do { 2357 do {
2358 unsigned long load, group_capacity; 2358 unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
2359 int local_group; 2359 int local_group;
2360 int i; 2360 int i;
2361 int __group_imb = 0;
2361 unsigned int balance_cpu = -1, first_idle_cpu = 0; 2362 unsigned int balance_cpu = -1, first_idle_cpu = 0;
2362 unsigned long sum_nr_running, sum_weighted_load; 2363 unsigned long sum_nr_running, sum_weighted_load;
2363 2364
@@ -2368,6 +2369,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2368 2369
2369 /* Tally up the load of all CPUs in the group */ 2370 /* Tally up the load of all CPUs in the group */
2370 sum_weighted_load = sum_nr_running = avg_load = 0; 2371 sum_weighted_load = sum_nr_running = avg_load = 0;
2372 max_cpu_load = 0;
2373 min_cpu_load = ~0UL;
2371 2374
2372 for_each_cpu_mask(i, group->cpumask) { 2375 for_each_cpu_mask(i, group->cpumask) {
2373 struct rq *rq; 2376 struct rq *rq;
@@ -2388,8 +2391,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2388 } 2391 }
2389 2392
2390 load = target_load(i, load_idx); 2393 load = target_load(i, load_idx);
2391 } else 2394 } else {
2392 load = source_load(i, load_idx); 2395 load = source_load(i, load_idx);
2396 if (load > max_cpu_load)
2397 max_cpu_load = load;
2398 if (min_cpu_load > load)
2399 min_cpu_load = load;
2400 }
2393 2401
2394 avg_load += load; 2402 avg_load += load;
2395 sum_nr_running += rq->nr_running; 2403 sum_nr_running += rq->nr_running;
@@ -2415,6 +2423,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2415 avg_load = sg_div_cpu_power(group, 2423 avg_load = sg_div_cpu_power(group,
2416 avg_load * SCHED_LOAD_SCALE); 2424 avg_load * SCHED_LOAD_SCALE);
2417 2425
2426 if ((max_cpu_load - min_cpu_load) > SCHED_LOAD_SCALE)
2427 __group_imb = 1;
2428
2418 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; 2429 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
2419 2430
2420 if (local_group) { 2431 if (local_group) {
@@ -2423,11 +2434,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2423 this_nr_running = sum_nr_running; 2434 this_nr_running = sum_nr_running;
2424 this_load_per_task = sum_weighted_load; 2435 this_load_per_task = sum_weighted_load;
2425 } else if (avg_load > max_load && 2436 } else if (avg_load > max_load &&
2426 sum_nr_running > group_capacity) { 2437 (sum_nr_running > group_capacity || __group_imb)) {
2427 max_load = avg_load; 2438 max_load = avg_load;
2428 busiest = group; 2439 busiest = group;
2429 busiest_nr_running = sum_nr_running; 2440 busiest_nr_running = sum_nr_running;
2430 busiest_load_per_task = sum_weighted_load; 2441 busiest_load_per_task = sum_weighted_load;
2442 group_imb = __group_imb;
2431 } 2443 }
2432 2444
2433#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 2445#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -2499,6 +2511,9 @@ group_next:
2499 goto out_balanced; 2511 goto out_balanced;
2500 2512
2501 busiest_load_per_task /= busiest_nr_running; 2513 busiest_load_per_task /= busiest_nr_running;
2514 if (group_imb)
2515 busiest_load_per_task = min(busiest_load_per_task, avg_load);
2516
2502 /* 2517 /*
2503 * We're trying to get all the cpus to the average_load, so we don't 2518 * We're trying to get all the cpus to the average_load, so we don't
2504 * want to push ourselves above the average load, nor do we wish to 2519 * want to push ourselves above the average load, nor do we wish to
@@ -5282,11 +5297,20 @@ static struct ctl_table *sd_alloc_ctl_entry(int n)
5282 5297
5283static void sd_free_ctl_entry(struct ctl_table **tablep) 5298static void sd_free_ctl_entry(struct ctl_table **tablep)
5284{ 5299{
5285 struct ctl_table *entry = *tablep; 5300 struct ctl_table *entry;
5286 5301
5287 for (entry = *tablep; entry->procname; entry++) 5302 /*
5303 * In the intermediate directories, both the child directory and
5304 * procname are dynamically allocated and could fail but the mode
5305 * will always be set. In the lowest directory the names are
5306 * static strings and all have proc handlers.
5307 */
5308 for (entry = *tablep; entry->mode; entry++) {
5288 if (entry->child) 5309 if (entry->child)
5289 sd_free_ctl_entry(&entry->child); 5310 sd_free_ctl_entry(&entry->child);
5311 if (entry->proc_handler == NULL)
5312 kfree(entry->procname);
5313 }
5290 5314
5291 kfree(*tablep); 5315 kfree(*tablep);
5292 *tablep = NULL; 5316 *tablep = NULL;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a17b785d7000..166ed6db600b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1031,12 +1031,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1031 swap(curr->vruntime, se->vruntime); 1031 swap(curr->vruntime, se->vruntime);
1032 } 1032 }
1033 1033
1034 update_stats_enqueue(cfs_rq, se);
1035 check_spread(cfs_rq, se);
1036 check_spread(cfs_rq, curr);
1037 __enqueue_entity(cfs_rq, se);
1038 account_entity_enqueue(cfs_rq, se);
1039 se->peer_preempt = 0; 1034 se->peer_preempt = 0;
1035 enqueue_task_fair(rq, p, 0);
1040 resched_task(rq->curr); 1036 resched_task(rq->curr);
1041} 1037}
1042 1038
diff --git a/kernel/user.c b/kernel/user.c
index 9cb6f6403561..e91331c457e2 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -84,9 +84,6 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
84 84
85#ifdef CONFIG_FAIR_USER_SCHED 85#ifdef CONFIG_FAIR_USER_SCHED
86 86
87static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
88static DEFINE_MUTEX(uids_mutex);
89
90static void sched_destroy_user(struct user_struct *up) 87static void sched_destroy_user(struct user_struct *up)
91{ 88{
92 sched_destroy_group(up->tg); 89 sched_destroy_group(up->tg);
@@ -108,6 +105,19 @@ static void sched_switch_user(struct task_struct *p)
108 sched_move_task(p); 105 sched_move_task(p);
109} 106}
110 107
108#else /* CONFIG_FAIR_USER_SCHED */
109
110static void sched_destroy_user(struct user_struct *up) { }
111static int sched_create_user(struct user_struct *up) { return 0; }
112static void sched_switch_user(struct task_struct *p) { }
113
114#endif /* CONFIG_FAIR_USER_SCHED */
115
116#if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
117
118static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
119static DEFINE_MUTEX(uids_mutex);
120
111static inline void uids_mutex_lock(void) 121static inline void uids_mutex_lock(void)
112{ 122{
113 mutex_lock(&uids_mutex); 123 mutex_lock(&uids_mutex);
@@ -254,11 +264,8 @@ static inline void free_user(struct user_struct *up, unsigned long flags)
254 schedule_work(&up->work); 264 schedule_work(&up->work);
255} 265}
256 266
257#else /* CONFIG_FAIR_USER_SCHED */ 267#else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
258 268
259static void sched_destroy_user(struct user_struct *up) { }
260static int sched_create_user(struct user_struct *up) { return 0; }
261static void sched_switch_user(struct task_struct *p) { }
262static inline int user_kobject_create(struct user_struct *up) { return 0; } 269static inline int user_kobject_create(struct user_struct *up) { return 0; }
263static inline void uids_mutex_lock(void) { } 270static inline void uids_mutex_lock(void) { }
264static inline void uids_mutex_unlock(void) { } 271static inline void uids_mutex_unlock(void) { }
@@ -277,7 +284,7 @@ static inline void free_user(struct user_struct *up, unsigned long flags)
277 kmem_cache_free(uid_cachep, up); 284 kmem_cache_free(uid_cachep, up);
278} 285}
279 286
280#endif /* CONFIG_FAIR_USER_SCHED */ 287#endif
281 288
282/* 289/*
283 * Locate the user_struct for the passed UID. If found, take a ref on it. The 290 * Locate the user_struct for the passed UID. If found, take a ref on it. The