aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-08-19 06:33:05 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-19 07:10:11 -0400
commiteb755805f21bd5ded84026e167b7a90887ac42e5 (patch)
treefab10d5e9189f93a5ee3f3f614ed1fbdbb34de6e /kernel
parent0b148fa04852859972abbf848177b92daeef138a (diff)
sched: extract walk_tg_tree()
Extract walk_tg_tree() and make it a little more generic so we can use it in the schedulablity test. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c79
1 files changed, 46 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c1bee5fb8154..8c019a19d052 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1387,38 +1387,24 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1387 update_load_sub(&rq->load, load); 1387 update_load_sub(&rq->load, load);
1388} 1388}
1389 1389
1390#ifdef CONFIG_SMP 1390#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED))
1391static unsigned long source_load(int cpu, int type); 1391typedef int (*tg_visitor)(struct task_group *, void *);
1392static unsigned long target_load(int cpu, int type);
1393static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1394
1395static unsigned long cpu_avg_load_per_task(int cpu)
1396{
1397 struct rq *rq = cpu_rq(cpu);
1398
1399 if (rq->nr_running)
1400 rq->avg_load_per_task = rq->load.weight / rq->nr_running;
1401
1402 return rq->avg_load_per_task;
1403}
1404
1405#ifdef CONFIG_FAIR_GROUP_SCHED
1406
1407typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
1408 1392
1409/* 1393/*
1410 * Iterate the full tree, calling @down when first entering a node and @up when 1394 * Iterate the full tree, calling @down when first entering a node and @up when
1411 * leaving it for the final time. 1395 * leaving it for the final time.
1412 */ 1396 */
1413static void 1397static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1414walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd)
1415{ 1398{
1416 struct task_group *parent, *child; 1399 struct task_group *parent, *child;
1400 int ret;
1417 1401
1418 rcu_read_lock(); 1402 rcu_read_lock();
1419 parent = &root_task_group; 1403 parent = &root_task_group;
1420down: 1404down:
1421 (*down)(parent, cpu, sd); 1405 ret = (*down)(parent, data);
1406 if (ret)
1407 goto out_unlock;
1422 list_for_each_entry_rcu(child, &parent->children, siblings) { 1408 list_for_each_entry_rcu(child, &parent->children, siblings) {
1423 parent = child; 1409 parent = child;
1424 goto down; 1410 goto down;
@@ -1426,14 +1412,42 @@ down:
1426up: 1412up:
1427 continue; 1413 continue;
1428 } 1414 }
1429 (*up)(parent, cpu, sd); 1415 ret = (*up)(parent, data);
1416 if (ret)
1417 goto out_unlock;
1430 1418
1431 child = parent; 1419 child = parent;
1432 parent = parent->parent; 1420 parent = parent->parent;
1433 if (parent) 1421 if (parent)
1434 goto up; 1422 goto up;
1423out_unlock:
1435 rcu_read_unlock(); 1424 rcu_read_unlock();
1425
1426 return ret;
1427}
1428
1429static int tg_nop(struct task_group *tg, void *data)
1430{
1431 return 0;
1436} 1432}
1433#endif
1434
1435#ifdef CONFIG_SMP
1436static unsigned long source_load(int cpu, int type);
1437static unsigned long target_load(int cpu, int type);
1438static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1439
1440static unsigned long cpu_avg_load_per_task(int cpu)
1441{
1442 struct rq *rq = cpu_rq(cpu);
1443
1444 if (rq->nr_running)
1445 rq->avg_load_per_task = rq->load.weight / rq->nr_running;
1446
1447 return rq->avg_load_per_task;
1448}
1449
1450#ifdef CONFIG_FAIR_GROUP_SCHED
1437 1451
1438static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1452static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1439 1453
@@ -1493,11 +1507,11 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
1493 * This needs to be done in a bottom-up fashion because the rq weight of a 1507 * This needs to be done in a bottom-up fashion because the rq weight of a
1494 * parent group depends on the shares of its child groups. 1508 * parent group depends on the shares of its child groups.
1495 */ 1509 */
1496static void 1510static int tg_shares_up(struct task_group *tg, void *data)
1497tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
1498{ 1511{
1499 unsigned long rq_weight = 0; 1512 unsigned long rq_weight = 0;
1500 unsigned long shares = 0; 1513 unsigned long shares = 0;
1514 struct sched_domain *sd = data;
1501 int i; 1515 int i;
1502 1516
1503 for_each_cpu_mask(i, sd->span) { 1517 for_each_cpu_mask(i, sd->span) {
@@ -1522,6 +1536,8 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
1522 __update_group_shares_cpu(tg, i, shares, rq_weight); 1536 __update_group_shares_cpu(tg, i, shares, rq_weight);
1523 spin_unlock_irqrestore(&rq->lock, flags); 1537 spin_unlock_irqrestore(&rq->lock, flags);
1524 } 1538 }
1539
1540 return 0;
1525} 1541}
1526 1542
1527/* 1543/*
@@ -1529,10 +1545,10 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
1529 * This needs to be done in a top-down fashion because the load of a child 1545 * This needs to be done in a top-down fashion because the load of a child
1530 * group is a fraction of its parents load. 1546 * group is a fraction of its parents load.
1531 */ 1547 */
1532static void 1548static int tg_load_down(struct task_group *tg, void *data)
1533tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
1534{ 1549{
1535 unsigned long load; 1550 unsigned long load;
1551 long cpu = (long)data;
1536 1552
1537 if (!tg->parent) { 1553 if (!tg->parent) {
1538 load = cpu_rq(cpu)->load.weight; 1554 load = cpu_rq(cpu)->load.weight;
@@ -1543,11 +1559,8 @@ tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
1543 } 1559 }
1544 1560
1545 tg->cfs_rq[cpu]->h_load = load; 1561 tg->cfs_rq[cpu]->h_load = load;
1546}
1547 1562
1548static void 1563 return 0;
1549tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
1550{
1551} 1564}
1552 1565
1553static void update_shares(struct sched_domain *sd) 1566static void update_shares(struct sched_domain *sd)
@@ -1557,7 +1570,7 @@ static void update_shares(struct sched_domain *sd)
1557 1570
1558 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { 1571 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
1559 sd->last_update = now; 1572 sd->last_update = now;
1560 walk_tg_tree(tg_nop, tg_shares_up, 0, sd); 1573 walk_tg_tree(tg_nop, tg_shares_up, sd);
1561 } 1574 }
1562} 1575}
1563 1576
@@ -1568,9 +1581,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1568 spin_lock(&rq->lock); 1581 spin_lock(&rq->lock);
1569} 1582}
1570 1583
1571static void update_h_load(int cpu) 1584static void update_h_load(long cpu)
1572{ 1585{
1573 walk_tg_tree(tg_load_down, tg_nop, cpu, NULL); 1586 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
1574} 1587}
1575 1588
1576#else 1589#else