aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 76c0e9691fc0..a455dca884a6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -676,6 +676,7 @@ inline void update_rq_clock(struct rq *rq)
676 676
677/** 677/**
678 * runqueue_is_locked 678 * runqueue_is_locked
679 * @cpu: the processor in question.
679 * 680 *
680 * Returns true if the current cpu runqueue is locked. 681 * Returns true if the current cpu runqueue is locked.
681 * This interface allows printk to be called with the runqueue lock 682 * This interface allows printk to be called with the runqueue lock
@@ -1563,11 +1564,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1563 1564
1564#ifdef CONFIG_FAIR_GROUP_SCHED 1565#ifdef CONFIG_FAIR_GROUP_SCHED
1565 1566
1566struct update_shares_data { 1567static __read_mostly unsigned long *update_shares_data;
1567 unsigned long rq_weight[NR_CPUS];
1568};
1569
1570static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
1571 1568
1572static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1569static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1573 1570
@@ -1577,12 +1574,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1577static void update_group_shares_cpu(struct task_group *tg, int cpu, 1574static void update_group_shares_cpu(struct task_group *tg, int cpu,
1578 unsigned long sd_shares, 1575 unsigned long sd_shares,
1579 unsigned long sd_rq_weight, 1576 unsigned long sd_rq_weight,
1580 struct update_shares_data *usd) 1577 unsigned long *usd_rq_weight)
1581{ 1578{
1582 unsigned long shares, rq_weight; 1579 unsigned long shares, rq_weight;
1583 int boost = 0; 1580 int boost = 0;
1584 1581
1585 rq_weight = usd->rq_weight[cpu]; 1582 rq_weight = usd_rq_weight[cpu];
1586 if (!rq_weight) { 1583 if (!rq_weight) {
1587 boost = 1; 1584 boost = 1;
1588 rq_weight = NICE_0_LOAD; 1585 rq_weight = NICE_0_LOAD;
@@ -1617,7 +1614,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1617static int tg_shares_up(struct task_group *tg, void *data) 1614static int tg_shares_up(struct task_group *tg, void *data)
1618{ 1615{
1619 unsigned long weight, rq_weight = 0, shares = 0; 1616 unsigned long weight, rq_weight = 0, shares = 0;
1620 struct update_shares_data *usd; 1617 unsigned long *usd_rq_weight;
1621 struct sched_domain *sd = data; 1618 struct sched_domain *sd = data;
1622 unsigned long flags; 1619 unsigned long flags;
1623 int i; 1620 int i;
@@ -1626,11 +1623,11 @@ static int tg_shares_up(struct task_group *tg, void *data)
1626 return 0; 1623 return 0;
1627 1624
1628 local_irq_save(flags); 1625 local_irq_save(flags);
1629 usd = &__get_cpu_var(update_shares_data); 1626 usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
1630 1627
1631 for_each_cpu(i, sched_domain_span(sd)) { 1628 for_each_cpu(i, sched_domain_span(sd)) {
1632 weight = tg->cfs_rq[i]->load.weight; 1629 weight = tg->cfs_rq[i]->load.weight;
1633 usd->rq_weight[i] = weight; 1630 usd_rq_weight[i] = weight;
1634 1631
1635 /* 1632 /*
1636 * If there are currently no tasks on the cpu pretend there 1633 * If there are currently no tasks on the cpu pretend there
@@ -1651,7 +1648,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1651 shares = tg->shares; 1648 shares = tg->shares;
1652 1649
1653 for_each_cpu(i, sched_domain_span(sd)) 1650 for_each_cpu(i, sched_domain_span(sd))
1654 update_group_shares_cpu(tg, i, shares, rq_weight, usd); 1651 update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
1655 1652
1656 local_irq_restore(flags); 1653 local_irq_restore(flags);
1657 1654
@@ -2311,7 +2308,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2311{ 2308{
2312 int cpu, orig_cpu, this_cpu, success = 0; 2309 int cpu, orig_cpu, this_cpu, success = 0;
2313 unsigned long flags; 2310 unsigned long flags;
2314 struct rq *rq; 2311 struct rq *rq, *orig_rq;
2315 2312
2316 if (!sched_feat(SYNC_WAKEUPS)) 2313 if (!sched_feat(SYNC_WAKEUPS))
2317 wake_flags &= ~WF_SYNC; 2314 wake_flags &= ~WF_SYNC;
@@ -2319,7 +2316,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2319 this_cpu = get_cpu(); 2316 this_cpu = get_cpu();
2320 2317
2321 smp_wmb(); 2318 smp_wmb();
2322 rq = task_rq_lock(p, &flags); 2319 rq = orig_rq = task_rq_lock(p, &flags);
2323 update_rq_clock(rq); 2320 update_rq_clock(rq);
2324 if (!(p->state & state)) 2321 if (!(p->state & state))
2325 goto out; 2322 goto out;
@@ -2350,6 +2347,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2350 set_task_cpu(p, cpu); 2347 set_task_cpu(p, cpu);
2351 2348
2352 rq = task_rq_lock(p, &flags); 2349 rq = task_rq_lock(p, &flags);
2350
2351 if (rq != orig_rq)
2352 update_rq_clock(rq);
2353
2353 WARN_ON(p->state != TASK_WAKING); 2354 WARN_ON(p->state != TASK_WAKING);
2354 cpu = task_cpu(p); 2355 cpu = task_cpu(p);
2355 2356
@@ -3656,6 +3657,7 @@ static void update_group_power(struct sched_domain *sd, int cpu)
3656 3657
3657/** 3658/**
3658 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 3659 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3660 * @sd: The sched_domain whose statistics are to be updated.
3659 * @group: sched_group whose statistics are to be updated. 3661 * @group: sched_group whose statistics are to be updated.
3660 * @this_cpu: Cpu for which load balance is currently performed. 3662 * @this_cpu: Cpu for which load balance is currently performed.
3661 * @idle: Idle status of this_cpu 3663 * @idle: Idle status of this_cpu
@@ -6718,9 +6720,6 @@ EXPORT_SYMBOL(yield);
6718/* 6720/*
6719 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 6721 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
6720 * that process accounting knows that this is a task in IO wait state. 6722 * that process accounting knows that this is a task in IO wait state.
6721 *
6722 * But don't do that if it is a deliberate, throttling IO wait (this task
6723 * has set its backing_dev_info: the queue against which it should throttle)
6724 */ 6723 */
6725void __sched io_schedule(void) 6724void __sched io_schedule(void)
6726{ 6725{
@@ -9404,6 +9403,10 @@ void __init sched_init(void)
9404#endif /* CONFIG_USER_SCHED */ 9403#endif /* CONFIG_USER_SCHED */
9405#endif /* CONFIG_GROUP_SCHED */ 9404#endif /* CONFIG_GROUP_SCHED */
9406 9405
9406#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
9407 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
9408 __alignof__(unsigned long));
9409#endif
9407 for_each_possible_cpu(i) { 9410 for_each_possible_cpu(i) {
9408 struct rq *rq; 9411 struct rq *rq;
9409 9412