aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c62
1 files changed, 31 insertions, 31 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d906f72b42d2..e8819bc6f462 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -55,6 +55,7 @@
55#include <linux/cpuset.h> 55#include <linux/cpuset.h>
56#include <linux/percpu.h> 56#include <linux/percpu.h>
57#include <linux/kthread.h> 57#include <linux/kthread.h>
58#include <linux/proc_fs.h>
58#include <linux/seq_file.h> 59#include <linux/seq_file.h>
59#include <linux/sysctl.h> 60#include <linux/sysctl.h>
60#include <linux/syscalls.h> 61#include <linux/syscalls.h>
@@ -227,9 +228,8 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
227 228
228 now = hrtimer_cb_get_time(&rt_b->rt_period_timer); 229 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
229 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); 230 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
230 hrtimer_start(&rt_b->rt_period_timer, 231 hrtimer_start_expires(&rt_b->rt_period_timer,
231 rt_b->rt_period_timer.expires, 232 HRTIMER_MODE_ABS);
232 HRTIMER_MODE_ABS);
233 } 233 }
234 spin_unlock(&rt_b->rt_runtime_lock); 234 spin_unlock(&rt_b->rt_runtime_lock);
235} 235}
@@ -386,7 +386,6 @@ struct cfs_rq {
386 386
387 u64 exec_clock; 387 u64 exec_clock;
388 u64 min_vruntime; 388 u64 min_vruntime;
389 u64 pair_start;
390 389
391 struct rb_root tasks_timeline; 390 struct rb_root tasks_timeline;
392 struct rb_node *rb_leftmost; 391 struct rb_node *rb_leftmost;
@@ -819,6 +818,13 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
819unsigned int sysctl_sched_shares_ratelimit = 250000; 818unsigned int sysctl_sched_shares_ratelimit = 250000;
820 819
821/* 820/*
821 * Inject some fuzzyness into changing the per-cpu group shares
822 * this avoids remote rq-locks at the expense of fairness.
823 * default: 4
824 */
825unsigned int sysctl_sched_shares_thresh = 4;
826
827/*
822 * period over which we measure -rt task cpu usage in us. 828 * period over which we measure -rt task cpu usage in us.
823 * default: 1s 829 * default: 1s
824 */ 830 */
@@ -1064,7 +1070,7 @@ static void hrtick_start(struct rq *rq, u64 delay)
1064 struct hrtimer *timer = &rq->hrtick_timer; 1070 struct hrtimer *timer = &rq->hrtick_timer;
1065 ktime_t time = ktime_add_ns(timer->base->get_time(), delay); 1071 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
1066 1072
1067 timer->expires = time; 1073 hrtimer_set_expires(timer, time);
1068 1074
1069 if (rq == this_rq()) { 1075 if (rq == this_rq()) {
1070 hrtimer_restart(timer); 1076 hrtimer_restart(timer);
@@ -1454,8 +1460,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1454 * Calculate and set the cpu's group shares. 1460 * Calculate and set the cpu's group shares.
1455 */ 1461 */
1456static void 1462static void
1457__update_group_shares_cpu(struct task_group *tg, int cpu, 1463update_group_shares_cpu(struct task_group *tg, int cpu,
1458 unsigned long sd_shares, unsigned long sd_rq_weight) 1464 unsigned long sd_shares, unsigned long sd_rq_weight)
1459{ 1465{
1460 int boost = 0; 1466 int boost = 0;
1461 unsigned long shares; 1467 unsigned long shares;
@@ -1486,19 +1492,23 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
1486 * 1492 *
1487 */ 1493 */
1488 shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); 1494 shares = (sd_shares * rq_weight) / (sd_rq_weight + 1);
1495 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
1489 1496
1490 /* 1497 if (abs(shares - tg->se[cpu]->load.weight) >
1491 * record the actual number of shares, not the boosted amount. 1498 sysctl_sched_shares_thresh) {
1492 */ 1499 struct rq *rq = cpu_rq(cpu);
1493 tg->cfs_rq[cpu]->shares = boost ? 0 : shares; 1500 unsigned long flags;
1494 tg->cfs_rq[cpu]->rq_weight = rq_weight;
1495 1501
1496 if (shares < MIN_SHARES) 1502 spin_lock_irqsave(&rq->lock, flags);
1497 shares = MIN_SHARES; 1503 /*
1498 else if (shares > MAX_SHARES) 1504 * record the actual number of shares, not the boosted amount.
1499 shares = MAX_SHARES; 1505 */
1506 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1507 tg->cfs_rq[cpu]->rq_weight = rq_weight;
1500 1508
1501 __set_se_shares(tg->se[cpu], shares); 1509 __set_se_shares(tg->se[cpu], shares);
1510 spin_unlock_irqrestore(&rq->lock, flags);
1511 }
1502} 1512}
1503 1513
1504/* 1514/*
@@ -1527,14 +1537,8 @@ static int tg_shares_up(struct task_group *tg, void *data)
1527 if (!rq_weight) 1537 if (!rq_weight)
1528 rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; 1538 rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
1529 1539
1530 for_each_cpu_mask(i, sd->span) { 1540 for_each_cpu_mask(i, sd->span)
1531 struct rq *rq = cpu_rq(i); 1541 update_group_shares_cpu(tg, i, shares, rq_weight);
1532 unsigned long flags;
1533
1534 spin_lock_irqsave(&rq->lock, flags);
1535 __update_group_shares_cpu(tg, i, shares, rq_weight);
1536 spin_unlock_irqrestore(&rq->lock, flags);
1537 }
1538 1542
1539 return 0; 1543 return 0;
1540} 1544}
@@ -3339,7 +3343,7 @@ small_imbalance:
3339 } else 3343 } else
3340 this_load_per_task = cpu_avg_load_per_task(this_cpu); 3344 this_load_per_task = cpu_avg_load_per_task(this_cpu);
3341 3345
3342 if (max_load - this_load + 2*busiest_load_per_task >= 3346 if (max_load - this_load + busiest_load_per_task >=
3343 busiest_load_per_task * imbn) { 3347 busiest_load_per_task * imbn) {
3344 *imbalance = busiest_load_per_task; 3348 *imbalance = busiest_load_per_task;
3345 return busiest; 3349 return busiest;
@@ -4443,12 +4447,8 @@ need_resched_nonpreemptible:
4443 if (sched_feat(HRTICK)) 4447 if (sched_feat(HRTICK))
4444 hrtick_clear(rq); 4448 hrtick_clear(rq);
4445 4449
4446 /* 4450 spin_lock_irq(&rq->lock);
4447 * Do the rq-clock update outside the rq lock:
4448 */
4449 local_irq_disable();
4450 update_rq_clock(rq); 4451 update_rq_clock(rq);
4451 spin_lock(&rq->lock);
4452 clear_tsk_need_resched(prev); 4452 clear_tsk_need_resched(prev);
4453 4453
4454 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 4454 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {