aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2011-07-21 12:43:35 -0400
committerIngo Molnar <mingo@elte.hu>2011-08-14 06:03:38 -0400
commit8277434ef1202ce30315f8edb3fc760aa6e74493 (patch)
tree455436e29a5a2e16b33f03aa4b4a9a2911bb7746 /kernel/sched.c
parent671fd9dabe5239ad218c7eb48b2b9edee50250e6 (diff)
sched: Allow for positional tg_tree walks
Extend walk_tg_tree to accept a positional argument static int walk_tg_tree_from(struct task_group *from, tg_visitor down, tg_visitor up, void *data) Existing semantics are preserved, caller must hold rcu_lock() or sufficient analogue. Signed-off-by: Paul Turner <pjt@google.com> Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110721184757.677889157@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c50
1 files changed, 37 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4bbabc2c4a77..8ec1e7ac2894 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1591,20 +1591,23 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1591typedef int (*tg_visitor)(struct task_group *, void *); 1591typedef int (*tg_visitor)(struct task_group *, void *);
1592 1592
1593/* 1593/*
1594 * Iterate the full tree, calling @down when first entering a node and @up when 1594 * Iterate task_group tree rooted at *from, calling @down when first entering a
1595 * leaving it for the final time. 1595 * node and @up when leaving it for the final time.
1596 *
1597 * Caller must hold rcu_lock or sufficient equivalent.
1596 */ 1598 */
1597static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 1599static int walk_tg_tree_from(struct task_group *from,
1600 tg_visitor down, tg_visitor up, void *data)
1598{ 1601{
1599 struct task_group *parent, *child; 1602 struct task_group *parent, *child;
1600 int ret; 1603 int ret;
1601 1604
1602 rcu_read_lock(); 1605 parent = from;
1603 parent = &root_task_group; 1606
1604down: 1607down:
1605 ret = (*down)(parent, data); 1608 ret = (*down)(parent, data);
1606 if (ret) 1609 if (ret)
1607 goto out_unlock; 1610 goto out;
1608 list_for_each_entry_rcu(child, &parent->children, siblings) { 1611 list_for_each_entry_rcu(child, &parent->children, siblings) {
1609 parent = child; 1612 parent = child;
1610 goto down; 1613 goto down;
@@ -1613,19 +1616,29 @@ up:
1613 continue; 1616 continue;
1614 } 1617 }
1615 ret = (*up)(parent, data); 1618 ret = (*up)(parent, data);
1616 if (ret) 1619 if (ret || parent == from)
1617 goto out_unlock; 1620 goto out;
1618 1621
1619 child = parent; 1622 child = parent;
1620 parent = parent->parent; 1623 parent = parent->parent;
1621 if (parent) 1624 if (parent)
1622 goto up; 1625 goto up;
1623out_unlock: 1626out:
1624 rcu_read_unlock();
1625
1626 return ret; 1627 return ret;
1627} 1628}
1628 1629
1630/*
1631 * Iterate the full tree, calling @down when first entering a node and @up when
1632 * leaving it for the final time.
1633 *
1634 * Caller must hold rcu_lock or sufficient equivalent.
1635 */
1636
1637static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1638{
1639 return walk_tg_tree_from(&root_task_group, down, up, data);
1640}
1641
1629static int tg_nop(struct task_group *tg, void *data) 1642static int tg_nop(struct task_group *tg, void *data)
1630{ 1643{
1631 return 0; 1644 return 0;
@@ -8870,13 +8883,19 @@ static int tg_rt_schedulable(struct task_group *tg, void *data)
8870 8883
8871static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 8884static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8872{ 8885{
8886 int ret;
8887
8873 struct rt_schedulable_data data = { 8888 struct rt_schedulable_data data = {
8874 .tg = tg, 8889 .tg = tg,
8875 .rt_period = period, 8890 .rt_period = period,
8876 .rt_runtime = runtime, 8891 .rt_runtime = runtime,
8877 }; 8892 };
8878 8893
8879 return walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 8894 rcu_read_lock();
8895 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
8896 rcu_read_unlock();
8897
8898 return ret;
8880} 8899}
8881 8900
8882static int tg_set_rt_bandwidth(struct task_group *tg, 8901static int tg_set_rt_bandwidth(struct task_group *tg,
@@ -9333,6 +9352,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9333 9352
9334static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 9353static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9335{ 9354{
9355 int ret;
9336 struct cfs_schedulable_data data = { 9356 struct cfs_schedulable_data data = {
9337 .tg = tg, 9357 .tg = tg,
9338 .period = period, 9358 .period = period,
@@ -9344,7 +9364,11 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9344 do_div(data.quota, NSEC_PER_USEC); 9364 do_div(data.quota, NSEC_PER_USEC);
9345 } 9365 }
9346 9366
9347 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 9367 rcu_read_lock();
9368 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9369 rcu_read_unlock();
9370
9371 return ret;
9348} 9372}
9349#endif /* CONFIG_CFS_BANDWIDTH */ 9373#endif /* CONFIG_CFS_BANDWIDTH */
9350#endif /* CONFIG_FAIR_GROUP_SCHED */ 9374#endif /* CONFIG_FAIR_GROUP_SCHED */