aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c7
-rw-r--r--kernel/sched_fair.c23
-rw-r--r--kernel/sched_features.h2
3 files changed, 10 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index af04ede6dd2f..5049d959bb26 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -376,13 +376,6 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
376 376
377#else 377#else
378 378
379#ifdef CONFIG_SMP
380static int root_task_group_empty(void)
381{
382 return 1;
383}
384#endif
385
386static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 379static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
387static inline struct task_group *task_group(struct task_struct *p) 380static inline struct task_group *task_group(struct task_struct *p)
388{ 381{
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index acf16a8d934b..722d392b0dac 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1348,7 +1348,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1348 */ 1348 */
1349static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags) 1349static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
1350{ 1350{
1351 struct sched_domain *tmp, *sd = NULL; 1351 struct sched_domain *tmp, *shares = NULL, *sd = NULL;
1352 int cpu = smp_processor_id(); 1352 int cpu = smp_processor_id();
1353 int prev_cpu = task_cpu(p); 1353 int prev_cpu = task_cpu(p);
1354 int new_cpu = cpu; 1354 int new_cpu = cpu;
@@ -1387,22 +1387,14 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
1387 break; 1387 break;
1388 } 1388 }
1389 1389
1390 switch (sd_flag) {
1391 case SD_BALANCE_WAKE:
1392 if (!sched_feat(LB_WAKEUP_UPDATE))
1393 break;
1394 case SD_BALANCE_FORK:
1395 case SD_BALANCE_EXEC:
1396 if (root_task_group_empty())
1397 break;
1398 update_shares(tmp);
1399 default:
1400 break;
1401 }
1402
1403 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && 1390 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
1404 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { 1391 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
1405 1392
1393 if (sched_feat(LB_SHARES_UPDATE)) {
1394 update_shares(tmp);
1395 shares = tmp;
1396 }
1397
1406 if (wake_affine(tmp, p, sync)) { 1398 if (wake_affine(tmp, p, sync)) {
1407 new_cpu = cpu; 1399 new_cpu = cpu;
1408 goto out; 1400 goto out;
@@ -1417,6 +1409,9 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
1417 sd = tmp; 1409 sd = tmp;
1418 } 1410 }
1419 1411
1412 if (sd && sd != shares && sched_feat(LB_SHARES_UPDATE))
1413 update_shares(sd);
1414
1420 while (sd) { 1415 while (sd) {
1421 struct sched_group *group; 1416 struct sched_group *group;
1422 int weight; 1417 int weight;
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index fd375675f834..d5059fd761d9 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -107,7 +107,7 @@ SCHED_FEAT(ARCH_POWER, 0)
107SCHED_FEAT(HRTICK, 0) 107SCHED_FEAT(HRTICK, 0)
108SCHED_FEAT(DOUBLE_TICK, 0) 108SCHED_FEAT(DOUBLE_TICK, 0)
109SCHED_FEAT(LB_BIAS, 1) 109SCHED_FEAT(LB_BIAS, 1)
110SCHED_FEAT(LB_WAKEUP_UPDATE, 1) 110SCHED_FEAT(LB_SHARES_UPDATE, 1)
111SCHED_FEAT(ASYM_EFF_LOAD, 1) 111SCHED_FEAT(ASYM_EFF_LOAD, 1)
112 112
113/* 113/*