diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 5a70189d5051..6625c3c4b10d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -228,9 +228,8 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
228 | 228 | ||
229 | now = hrtimer_cb_get_time(&rt_b->rt_period_timer); | 229 | now = hrtimer_cb_get_time(&rt_b->rt_period_timer); |
230 | hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); | 230 | hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); |
231 | hrtimer_start(&rt_b->rt_period_timer, | 231 | hrtimer_start_expires(&rt_b->rt_period_timer, |
232 | rt_b->rt_period_timer.expires, | 232 | HRTIMER_MODE_ABS); |
233 | HRTIMER_MODE_ABS); | ||
234 | } | 233 | } |
235 | spin_unlock(&rt_b->rt_runtime_lock); | 234 | spin_unlock(&rt_b->rt_runtime_lock); |
236 | } | 235 | } |
@@ -820,6 +819,13 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
820 | unsigned int sysctl_sched_shares_ratelimit = 250000; | 819 | unsigned int sysctl_sched_shares_ratelimit = 250000; |
821 | 820 | ||
822 | /* | 821 | /* |
822 | * Inject some fuzzyness into changing the per-cpu group shares | ||
823 | * this avoids remote rq-locks at the expense of fairness. | ||
824 | * default: 4 | ||
825 | */ | ||
826 | unsigned int sysctl_sched_shares_thresh = 4; | ||
827 | |||
828 | /* | ||
823 | * period over which we measure -rt task cpu usage in us. | 829 | * period over which we measure -rt task cpu usage in us. |
824 | * default: 1s | 830 | * default: 1s |
825 | */ | 831 | */ |
@@ -1065,7 +1071,7 @@ static void hrtick_start(struct rq *rq, u64 delay) | |||
1065 | struct hrtimer *timer = &rq->hrtick_timer; | 1071 | struct hrtimer *timer = &rq->hrtick_timer; |
1066 | ktime_t time = ktime_add_ns(timer->base->get_time(), delay); | 1072 | ktime_t time = ktime_add_ns(timer->base->get_time(), delay); |
1067 | 1073 | ||
1068 | timer->expires = time; | 1074 | hrtimer_set_expires(timer, time); |
1069 | 1075 | ||
1070 | if (rq == this_rq()) { | 1076 | if (rq == this_rq()) { |
1071 | hrtimer_restart(timer); | 1077 | hrtimer_restart(timer); |
@@ -1455,8 +1461,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares); | |||
1455 | * Calculate and set the cpu's group shares. | 1461 | * Calculate and set the cpu's group shares. |
1456 | */ | 1462 | */ |
1457 | static void | 1463 | static void |
1458 | __update_group_shares_cpu(struct task_group *tg, int cpu, | 1464 | update_group_shares_cpu(struct task_group *tg, int cpu, |
1459 | unsigned long sd_shares, unsigned long sd_rq_weight) | 1465 | unsigned long sd_shares, unsigned long sd_rq_weight) |
1460 | { | 1466 | { |
1461 | int boost = 0; | 1467 | int boost = 0; |
1462 | unsigned long shares; | 1468 | unsigned long shares; |
@@ -1487,19 +1493,23 @@ __update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1487 | * | 1493 | * |
1488 | */ | 1494 | */ |
1489 | shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); | 1495 | shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); |
1496 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); | ||
1490 | 1497 | ||
1491 | /* | 1498 | if (abs(shares - tg->se[cpu]->load.weight) > |
1492 | * record the actual number of shares, not the boosted amount. | 1499 | sysctl_sched_shares_thresh) { |
1493 | */ | 1500 | struct rq *rq = cpu_rq(cpu); |
1494 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | 1501 | unsigned long flags; |
1495 | tg->cfs_rq[cpu]->rq_weight = rq_weight; | ||
1496 | 1502 | ||
1497 | if (shares < MIN_SHARES) | 1503 | spin_lock_irqsave(&rq->lock, flags); |
1498 | shares = MIN_SHARES; | 1504 | /* |
1499 | else if (shares > MAX_SHARES) | 1505 | * record the actual number of shares, not the boosted amount. |
1500 | shares = MAX_SHARES; | 1506 | */ |
1507 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | ||
1508 | tg->cfs_rq[cpu]->rq_weight = rq_weight; | ||
1501 | 1509 | ||
1502 | __set_se_shares(tg->se[cpu], shares); | 1510 | __set_se_shares(tg->se[cpu], shares); |
1511 | spin_unlock_irqrestore(&rq->lock, flags); | ||
1512 | } | ||
1503 | } | 1513 | } |
1504 | 1514 | ||
1505 | /* | 1515 | /* |
@@ -1528,14 +1538,8 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1528 | if (!rq_weight) | 1538 | if (!rq_weight) |
1529 | rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; | 1539 | rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; |
1530 | 1540 | ||
1531 | for_each_cpu_mask(i, sd->span) { | 1541 | for_each_cpu_mask(i, sd->span) |
1532 | struct rq *rq = cpu_rq(i); | 1542 | update_group_shares_cpu(tg, i, shares, rq_weight); |
1533 | unsigned long flags; | ||
1534 | |||
1535 | spin_lock_irqsave(&rq->lock, flags); | ||
1536 | __update_group_shares_cpu(tg, i, shares, rq_weight); | ||
1537 | spin_unlock_irqrestore(&rq->lock, flags); | ||
1538 | } | ||
1539 | 1543 | ||
1540 | return 0; | 1544 | return 0; |
1541 | } | 1545 | } |
@@ -4444,12 +4448,8 @@ need_resched_nonpreemptible: | |||
4444 | if (sched_feat(HRTICK)) | 4448 | if (sched_feat(HRTICK)) |
4445 | hrtick_clear(rq); | 4449 | hrtick_clear(rq); |
4446 | 4450 | ||
4447 | /* | 4451 | spin_lock_irq(&rq->lock); |
4448 | * Do the rq-clock update outside the rq lock: | ||
4449 | */ | ||
4450 | local_irq_disable(); | ||
4451 | update_rq_clock(rq); | 4452 | update_rq_clock(rq); |
4452 | spin_lock(&rq->lock); | ||
4453 | clear_tsk_need_resched(prev); | 4453 | clear_tsk_need_resched(prev); |
4454 | 4454 | ||
4455 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 4455 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |