diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 48 | ||||
-rw-r--r-- | kernel/sched_fair.c | 12 |
2 files changed, 30 insertions, 30 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6247e4a8350f..45e17b83b7f1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3106,7 +3106,7 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3106 | if (need_resched()) | 3106 | if (need_resched()) |
3107 | break; | 3107 | break; |
3108 | 3108 | ||
3109 | rebalance_domains(balance_cpu, SCHED_IDLE); | 3109 | rebalance_domains(balance_cpu, CPU_IDLE); |
3110 | 3110 | ||
3111 | rq = cpu_rq(balance_cpu); | 3111 | rq = cpu_rq(balance_cpu); |
3112 | if (time_after(this_rq->next_balance, rq->next_balance)) | 3112 | if (time_after(this_rq->next_balance, rq->next_balance)) |
@@ -6328,7 +6328,7 @@ int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) | |||
6328 | } | 6328 | } |
6329 | 6329 | ||
6330 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 6330 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
6331 | int arch_reinit_sched_domains(void) | 6331 | static int arch_reinit_sched_domains(void) |
6332 | { | 6332 | { |
6333 | int err; | 6333 | int err; |
6334 | 6334 | ||
@@ -6357,24 +6357,6 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | |||
6357 | return ret ? ret : count; | 6357 | return ret ? ret : count; |
6358 | } | 6358 | } |
6359 | 6359 | ||
6360 | int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | ||
6361 | { | ||
6362 | int err = 0; | ||
6363 | |||
6364 | #ifdef CONFIG_SCHED_SMT | ||
6365 | if (smt_capable()) | ||
6366 | err = sysfs_create_file(&cls->kset.kobj, | ||
6367 | &attr_sched_smt_power_savings.attr); | ||
6368 | #endif | ||
6369 | #ifdef CONFIG_SCHED_MC | ||
6370 | if (!err && mc_capable()) | ||
6371 | err = sysfs_create_file(&cls->kset.kobj, | ||
6372 | &attr_sched_mc_power_savings.attr); | ||
6373 | #endif | ||
6374 | return err; | ||
6375 | } | ||
6376 | #endif | ||
6377 | |||
6378 | #ifdef CONFIG_SCHED_MC | 6360 | #ifdef CONFIG_SCHED_MC |
6379 | static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) | 6361 | static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) |
6380 | { | 6362 | { |
@@ -6385,8 +6367,8 @@ static ssize_t sched_mc_power_savings_store(struct sys_device *dev, | |||
6385 | { | 6367 | { |
6386 | return sched_power_savings_store(buf, count, 0); | 6368 | return sched_power_savings_store(buf, count, 0); |
6387 | } | 6369 | } |
6388 | SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, | 6370 | static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, |
6389 | sched_mc_power_savings_store); | 6371 | sched_mc_power_savings_store); |
6390 | #endif | 6372 | #endif |
6391 | 6373 | ||
6392 | #ifdef CONFIG_SCHED_SMT | 6374 | #ifdef CONFIG_SCHED_SMT |
@@ -6399,8 +6381,26 @@ static ssize_t sched_smt_power_savings_store(struct sys_device *dev, | |||
6399 | { | 6381 | { |
6400 | return sched_power_savings_store(buf, count, 1); | 6382 | return sched_power_savings_store(buf, count, 1); |
6401 | } | 6383 | } |
6402 | SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, | 6384 | static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, |
6403 | sched_smt_power_savings_store); | 6385 | sched_smt_power_savings_store); |
6386 | #endif | ||
6387 | |||
6388 | int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | ||
6389 | { | ||
6390 | int err = 0; | ||
6391 | |||
6392 | #ifdef CONFIG_SCHED_SMT | ||
6393 | if (smt_capable()) | ||
6394 | err = sysfs_create_file(&cls->kset.kobj, | ||
6395 | &attr_sched_smt_power_savings.attr); | ||
6396 | #endif | ||
6397 | #ifdef CONFIG_SCHED_MC | ||
6398 | if (!err && mc_capable()) | ||
6399 | err = sysfs_create_file(&cls->kset.kobj, | ||
6400 | &attr_sched_mc_power_savings.attr); | ||
6401 | #endif | ||
6402 | return err; | ||
6403 | } | ||
6404 | #endif | 6404 | #endif |
6405 | 6405 | ||
6406 | /* | 6406 | /* |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c5af38948a1e..fedbb51bba96 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -75,7 +75,7 @@ enum { | |||
75 | 75 | ||
76 | unsigned int sysctl_sched_features __read_mostly = | 76 | unsigned int sysctl_sched_features __read_mostly = |
77 | SCHED_FEAT_FAIR_SLEEPERS *1 | | 77 | SCHED_FEAT_FAIR_SLEEPERS *1 | |
78 | SCHED_FEAT_SLEEPER_AVG *1 | | 78 | SCHED_FEAT_SLEEPER_AVG *0 | |
79 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 | | 79 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 | |
80 | SCHED_FEAT_PRECISE_CPU_LOAD *1 | | 80 | SCHED_FEAT_PRECISE_CPU_LOAD *1 | |
81 | SCHED_FEAT_START_DEBIT *1 | | 81 | SCHED_FEAT_START_DEBIT *1 | |
@@ -304,11 +304,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
304 | delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); | 304 | delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); |
305 | 305 | ||
306 | if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) { | 306 | if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) { |
307 | delta = calc_delta_mine(cfs_rq->sleeper_bonus, | 307 | delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec); |
308 | curr->load.weight, lw); | 308 | delta = calc_delta_mine(delta, curr->load.weight, lw); |
309 | if (unlikely(delta > cfs_rq->sleeper_bonus)) | 309 | delta = min((u64)delta, cfs_rq->sleeper_bonus); |
310 | delta = cfs_rq->sleeper_bonus; | ||
311 | |||
312 | cfs_rq->sleeper_bonus -= delta; | 310 | cfs_rq->sleeper_bonus -= delta; |
313 | delta_mine -= delta; | 311 | delta_mine -= delta; |
314 | } | 312 | } |
@@ -521,6 +519,8 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
521 | * Track the amount of bonus we've given to sleepers: | 519 | * Track the amount of bonus we've given to sleepers: |
522 | */ | 520 | */ |
523 | cfs_rq->sleeper_bonus += delta_fair; | 521 | cfs_rq->sleeper_bonus += delta_fair; |
522 | if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit)) | ||
523 | cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit; | ||
524 | 524 | ||
525 | schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); | 525 | schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); |
526 | } | 526 | } |