diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-29 12:19:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-29 12:19:29 -0400 |
commit | 8633322c5fd5b2a986b279f88a7559d8409f7da3 (patch) | |
tree | 2db612751e9fa5c3624f008c7e4d520e77944852 /kernel/sched.c | |
parent | 9532faeb293f5a5f0ff06f567de14e557698dbde (diff) | |
parent | 4a6cc4bd32e580722882115d4c8b964d732c11e4 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
sched: move rq_weight data array out of .percpu
percpu: allow pcpu_alloc() to be called with IRQs off
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e88689522e66..a455dca884a6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1564,11 +1564,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1564 | 1564 | ||
1565 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1565 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1566 | 1566 | ||
1567 | struct update_shares_data { | 1567 | static __read_mostly unsigned long *update_shares_data; |
1568 | unsigned long rq_weight[NR_CPUS]; | ||
1569 | }; | ||
1570 | |||
1571 | static DEFINE_PER_CPU(struct update_shares_data, update_shares_data); | ||
1572 | 1568 | ||
1573 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); | 1569 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); |
1574 | 1570 | ||
@@ -1578,12 +1574,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares); | |||
1578 | static void update_group_shares_cpu(struct task_group *tg, int cpu, | 1574 | static void update_group_shares_cpu(struct task_group *tg, int cpu, |
1579 | unsigned long sd_shares, | 1575 | unsigned long sd_shares, |
1580 | unsigned long sd_rq_weight, | 1576 | unsigned long sd_rq_weight, |
1581 | struct update_shares_data *usd) | 1577 | unsigned long *usd_rq_weight) |
1582 | { | 1578 | { |
1583 | unsigned long shares, rq_weight; | 1579 | unsigned long shares, rq_weight; |
1584 | int boost = 0; | 1580 | int boost = 0; |
1585 | 1581 | ||
1586 | rq_weight = usd->rq_weight[cpu]; | 1582 | rq_weight = usd_rq_weight[cpu]; |
1587 | if (!rq_weight) { | 1583 | if (!rq_weight) { |
1588 | boost = 1; | 1584 | boost = 1; |
1589 | rq_weight = NICE_0_LOAD; | 1585 | rq_weight = NICE_0_LOAD; |
@@ -1618,7 +1614,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1618 | static int tg_shares_up(struct task_group *tg, void *data) | 1614 | static int tg_shares_up(struct task_group *tg, void *data) |
1619 | { | 1615 | { |
1620 | unsigned long weight, rq_weight = 0, shares = 0; | 1616 | unsigned long weight, rq_weight = 0, shares = 0; |
1621 | struct update_shares_data *usd; | 1617 | unsigned long *usd_rq_weight; |
1622 | struct sched_domain *sd = data; | 1618 | struct sched_domain *sd = data; |
1623 | unsigned long flags; | 1619 | unsigned long flags; |
1624 | int i; | 1620 | int i; |
@@ -1627,11 +1623,11 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1627 | return 0; | 1623 | return 0; |
1628 | 1624 | ||
1629 | local_irq_save(flags); | 1625 | local_irq_save(flags); |
1630 | usd = &__get_cpu_var(update_shares_data); | 1626 | usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id()); |
1631 | 1627 | ||
1632 | for_each_cpu(i, sched_domain_span(sd)) { | 1628 | for_each_cpu(i, sched_domain_span(sd)) { |
1633 | weight = tg->cfs_rq[i]->load.weight; | 1629 | weight = tg->cfs_rq[i]->load.weight; |
1634 | usd->rq_weight[i] = weight; | 1630 | usd_rq_weight[i] = weight; |
1635 | 1631 | ||
1636 | /* | 1632 | /* |
1637 | * If there are currently no tasks on the cpu pretend there | 1633 | * If there are currently no tasks on the cpu pretend there |
@@ -1652,7 +1648,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1652 | shares = tg->shares; | 1648 | shares = tg->shares; |
1653 | 1649 | ||
1654 | for_each_cpu(i, sched_domain_span(sd)) | 1650 | for_each_cpu(i, sched_domain_span(sd)) |
1655 | update_group_shares_cpu(tg, i, shares, rq_weight, usd); | 1651 | update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight); |
1656 | 1652 | ||
1657 | local_irq_restore(flags); | 1653 | local_irq_restore(flags); |
1658 | 1654 | ||
@@ -9407,6 +9403,10 @@ void __init sched_init(void) | |||
9407 | #endif /* CONFIG_USER_SCHED */ | 9403 | #endif /* CONFIG_USER_SCHED */ |
9408 | #endif /* CONFIG_GROUP_SCHED */ | 9404 | #endif /* CONFIG_GROUP_SCHED */ |
9409 | 9405 | ||
9406 | #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP | ||
9407 | update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), | ||
9408 | __alignof__(unsigned long)); | ||
9409 | #endif | ||
9410 | for_each_possible_cpu(i) { | 9410 | for_each_possible_cpu(i) { |
9411 | struct rq *rq; | 9411 | struct rq *rq; |
9412 | 9412 | ||