diff options
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 38 | ||||
-rw-r--r-- | kernel/sched_fair.c | 6 | ||||
-rw-r--r-- | kernel/user.c | 23 |
4 files changed, 49 insertions, 20 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 3de5aa210feb..c204ab0d4df1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -535,10 +535,12 @@ struct user_struct { | |||
535 | 535 | ||
536 | #ifdef CONFIG_FAIR_USER_SCHED | 536 | #ifdef CONFIG_FAIR_USER_SCHED |
537 | struct task_group *tg; | 537 | struct task_group *tg; |
538 | #ifdef CONFIG_SYSFS | ||
538 | struct kset kset; | 539 | struct kset kset; |
539 | struct subsys_attribute user_attr; | 540 | struct subsys_attribute user_attr; |
540 | struct work_struct work; | 541 | struct work_struct work; |
541 | #endif | 542 | #endif |
543 | #endif | ||
542 | }; | 544 | }; |
543 | 545 | ||
544 | #ifdef CONFIG_FAIR_USER_SCHED | 546 | #ifdef CONFIG_FAIR_USER_SCHED |
diff --git a/kernel/sched.c b/kernel/sched.c index c4889abc00b6..92721d1534b8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1712,7 +1712,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
1712 | 1712 | ||
1713 | p->prio = effective_prio(p); | 1713 | p->prio = effective_prio(p); |
1714 | 1714 | ||
1715 | if (!p->sched_class->task_new || !current->se.on_rq || !rq->cfs.curr) { | 1715 | if (!p->sched_class->task_new || !current->se.on_rq) { |
1716 | activate_task(rq, p, 0); | 1716 | activate_task(rq, p, 0); |
1717 | } else { | 1717 | } else { |
1718 | /* | 1718 | /* |
@@ -2336,7 +2336,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2336 | unsigned long max_pull; | 2336 | unsigned long max_pull; |
2337 | unsigned long busiest_load_per_task, busiest_nr_running; | 2337 | unsigned long busiest_load_per_task, busiest_nr_running; |
2338 | unsigned long this_load_per_task, this_nr_running; | 2338 | unsigned long this_load_per_task, this_nr_running; |
2339 | int load_idx; | 2339 | int load_idx, group_imb = 0; |
2340 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 2340 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
2341 | int power_savings_balance = 1; | 2341 | int power_savings_balance = 1; |
2342 | unsigned long leader_nr_running = 0, min_load_per_task = 0; | 2342 | unsigned long leader_nr_running = 0, min_load_per_task = 0; |
@@ -2355,9 +2355,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2355 | load_idx = sd->idle_idx; | 2355 | load_idx = sd->idle_idx; |
2356 | 2356 | ||
2357 | do { | 2357 | do { |
2358 | unsigned long load, group_capacity; | 2358 | unsigned long load, group_capacity, max_cpu_load, min_cpu_load; |
2359 | int local_group; | 2359 | int local_group; |
2360 | int i; | 2360 | int i; |
2361 | int __group_imb = 0; | ||
2361 | unsigned int balance_cpu = -1, first_idle_cpu = 0; | 2362 | unsigned int balance_cpu = -1, first_idle_cpu = 0; |
2362 | unsigned long sum_nr_running, sum_weighted_load; | 2363 | unsigned long sum_nr_running, sum_weighted_load; |
2363 | 2364 | ||
@@ -2368,6 +2369,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2368 | 2369 | ||
2369 | /* Tally up the load of all CPUs in the group */ | 2370 | /* Tally up the load of all CPUs in the group */ |
2370 | sum_weighted_load = sum_nr_running = avg_load = 0; | 2371 | sum_weighted_load = sum_nr_running = avg_load = 0; |
2372 | max_cpu_load = 0; | ||
2373 | min_cpu_load = ~0UL; | ||
2371 | 2374 | ||
2372 | for_each_cpu_mask(i, group->cpumask) { | 2375 | for_each_cpu_mask(i, group->cpumask) { |
2373 | struct rq *rq; | 2376 | struct rq *rq; |
@@ -2388,8 +2391,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2388 | } | 2391 | } |
2389 | 2392 | ||
2390 | load = target_load(i, load_idx); | 2393 | load = target_load(i, load_idx); |
2391 | } else | 2394 | } else { |
2392 | load = source_load(i, load_idx); | 2395 | load = source_load(i, load_idx); |
2396 | if (load > max_cpu_load) | ||
2397 | max_cpu_load = load; | ||
2398 | if (min_cpu_load > load) | ||
2399 | min_cpu_load = load; | ||
2400 | } | ||
2393 | 2401 | ||
2394 | avg_load += load; | 2402 | avg_load += load; |
2395 | sum_nr_running += rq->nr_running; | 2403 | sum_nr_running += rq->nr_running; |
@@ -2415,6 +2423,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2415 | avg_load = sg_div_cpu_power(group, | 2423 | avg_load = sg_div_cpu_power(group, |
2416 | avg_load * SCHED_LOAD_SCALE); | 2424 | avg_load * SCHED_LOAD_SCALE); |
2417 | 2425 | ||
2426 | if ((max_cpu_load - min_cpu_load) > SCHED_LOAD_SCALE) | ||
2427 | __group_imb = 1; | ||
2428 | |||
2418 | group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; | 2429 | group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; |
2419 | 2430 | ||
2420 | if (local_group) { | 2431 | if (local_group) { |
@@ -2423,11 +2434,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2423 | this_nr_running = sum_nr_running; | 2434 | this_nr_running = sum_nr_running; |
2424 | this_load_per_task = sum_weighted_load; | 2435 | this_load_per_task = sum_weighted_load; |
2425 | } else if (avg_load > max_load && | 2436 | } else if (avg_load > max_load && |
2426 | sum_nr_running > group_capacity) { | 2437 | (sum_nr_running > group_capacity || __group_imb)) { |
2427 | max_load = avg_load; | 2438 | max_load = avg_load; |
2428 | busiest = group; | 2439 | busiest = group; |
2429 | busiest_nr_running = sum_nr_running; | 2440 | busiest_nr_running = sum_nr_running; |
2430 | busiest_load_per_task = sum_weighted_load; | 2441 | busiest_load_per_task = sum_weighted_load; |
2442 | group_imb = __group_imb; | ||
2431 | } | 2443 | } |
2432 | 2444 | ||
2433 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 2445 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
@@ -2499,6 +2511,9 @@ group_next: | |||
2499 | goto out_balanced; | 2511 | goto out_balanced; |
2500 | 2512 | ||
2501 | busiest_load_per_task /= busiest_nr_running; | 2513 | busiest_load_per_task /= busiest_nr_running; |
2514 | if (group_imb) | ||
2515 | busiest_load_per_task = min(busiest_load_per_task, avg_load); | ||
2516 | |||
2502 | /* | 2517 | /* |
2503 | * We're trying to get all the cpus to the average_load, so we don't | 2518 | * We're trying to get all the cpus to the average_load, so we don't |
2504 | * want to push ourselves above the average load, nor do we wish to | 2519 | * want to push ourselves above the average load, nor do we wish to |
@@ -5282,11 +5297,20 @@ static struct ctl_table *sd_alloc_ctl_entry(int n) | |||
5282 | 5297 | ||
5283 | static void sd_free_ctl_entry(struct ctl_table **tablep) | 5298 | static void sd_free_ctl_entry(struct ctl_table **tablep) |
5284 | { | 5299 | { |
5285 | struct ctl_table *entry = *tablep; | 5300 | struct ctl_table *entry; |
5286 | 5301 | ||
5287 | for (entry = *tablep; entry->procname; entry++) | 5302 | /* |
5303 | * In the intermediate directories, both the child directory and | ||
5304 | * procname are dynamically allocated and could fail but the mode | ||
5305 | * will always be set. In the lowest directory the names are | ||
5306 | * static strings and all have proc handlers. | ||
5307 | */ | ||
5308 | for (entry = *tablep; entry->mode; entry++) { | ||
5288 | if (entry->child) | 5309 | if (entry->child) |
5289 | sd_free_ctl_entry(&entry->child); | 5310 | sd_free_ctl_entry(&entry->child); |
5311 | if (entry->proc_handler == NULL) | ||
5312 | kfree(entry->procname); | ||
5313 | } | ||
5290 | 5314 | ||
5291 | kfree(*tablep); | 5315 | kfree(*tablep); |
5292 | *tablep = NULL; | 5316 | *tablep = NULL; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a17b785d7000..166ed6db600b 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1031,12 +1031,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1031 | swap(curr->vruntime, se->vruntime); | 1031 | swap(curr->vruntime, se->vruntime); |
1032 | } | 1032 | } |
1033 | 1033 | ||
1034 | update_stats_enqueue(cfs_rq, se); | ||
1035 | check_spread(cfs_rq, se); | ||
1036 | check_spread(cfs_rq, curr); | ||
1037 | __enqueue_entity(cfs_rq, se); | ||
1038 | account_entity_enqueue(cfs_rq, se); | ||
1039 | se->peer_preempt = 0; | 1034 | se->peer_preempt = 0; |
1035 | enqueue_task_fair(rq, p, 0); | ||
1040 | resched_task(rq->curr); | 1036 | resched_task(rq->curr); |
1041 | } | 1037 | } |
1042 | 1038 | ||
diff --git a/kernel/user.c b/kernel/user.c index 9cb6f6403561..e91331c457e2 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -84,9 +84,6 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | |||
84 | 84 | ||
85 | #ifdef CONFIG_FAIR_USER_SCHED | 85 | #ifdef CONFIG_FAIR_USER_SCHED |
86 | 86 | ||
87 | static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */ | ||
88 | static DEFINE_MUTEX(uids_mutex); | ||
89 | |||
90 | static void sched_destroy_user(struct user_struct *up) | 87 | static void sched_destroy_user(struct user_struct *up) |
91 | { | 88 | { |
92 | sched_destroy_group(up->tg); | 89 | sched_destroy_group(up->tg); |
@@ -108,6 +105,19 @@ static void sched_switch_user(struct task_struct *p) | |||
108 | sched_move_task(p); | 105 | sched_move_task(p); |
109 | } | 106 | } |
110 | 107 | ||
108 | #else /* CONFIG_FAIR_USER_SCHED */ | ||
109 | |||
110 | static void sched_destroy_user(struct user_struct *up) { } | ||
111 | static int sched_create_user(struct user_struct *up) { return 0; } | ||
112 | static void sched_switch_user(struct task_struct *p) { } | ||
113 | |||
114 | #endif /* CONFIG_FAIR_USER_SCHED */ | ||
115 | |||
116 | #if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS) | ||
117 | |||
118 | static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */ | ||
119 | static DEFINE_MUTEX(uids_mutex); | ||
120 | |||
111 | static inline void uids_mutex_lock(void) | 121 | static inline void uids_mutex_lock(void) |
112 | { | 122 | { |
113 | mutex_lock(&uids_mutex); | 123 | mutex_lock(&uids_mutex); |
@@ -254,11 +264,8 @@ static inline void free_user(struct user_struct *up, unsigned long flags) | |||
254 | schedule_work(&up->work); | 264 | schedule_work(&up->work); |
255 | } | 265 | } |
256 | 266 | ||
257 | #else /* CONFIG_FAIR_USER_SCHED */ | 267 | #else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */ |
258 | 268 | ||
259 | static void sched_destroy_user(struct user_struct *up) { } | ||
260 | static int sched_create_user(struct user_struct *up) { return 0; } | ||
261 | static void sched_switch_user(struct task_struct *p) { } | ||
262 | static inline int user_kobject_create(struct user_struct *up) { return 0; } | 269 | static inline int user_kobject_create(struct user_struct *up) { return 0; } |
263 | static inline void uids_mutex_lock(void) { } | 270 | static inline void uids_mutex_lock(void) { } |
264 | static inline void uids_mutex_unlock(void) { } | 271 | static inline void uids_mutex_unlock(void) { } |
@@ -277,7 +284,7 @@ static inline void free_user(struct user_struct *up, unsigned long flags) | |||
277 | kmem_cache_free(uid_cachep, up); | 284 | kmem_cache_free(uid_cachep, up); |
278 | } | 285 | } |
279 | 286 | ||
280 | #endif /* CONFIG_FAIR_USER_SCHED */ | 287 | #endif |
281 | 288 | ||
282 | /* | 289 | /* |
283 | * Locate the user_struct for the passed UID. If found, take a ref on it. The | 290 | * Locate the user_struct for the passed UID. If found, take a ref on it. The |