aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-01-25 15:08:30 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:30 -0500
commit6f505b16425a51270058e4a93441fe64de3dd435 (patch)
treebe21e711d93bc4d088b97c4a4f585a5044dbaa7d /include/linux
parentfa85ae2418e6843953107cd6a06f645752829bc0 (diff)
sched: rt group scheduling
Extend group scheduling to also cover the realtime classes. It uses the time limiting introduced by the previous patch to allow multiple realtime groups. The hard time limit is required to keep behaviour deterministic. The algorithms used make the realtime scheduler O(tg), linear scaling wrt the number of task groups. This is the worst case behaviour I can't seem to get out of, the avg. case of the algorithms can be improved, I focused on correctness and worst case. [ akpm@linux-foundation.org: move side-effects out of BUG_ON(). ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/init_task.h5
-rw-r--r--include/linux/sched.h10
2 files changed, 12 insertions, 3 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index ee65d87bedb7..796019b22b6f 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -130,12 +130,13 @@ extern struct group_info init_groups;
130 .normal_prio = MAX_PRIO-20, \ 130 .normal_prio = MAX_PRIO-20, \
131 .policy = SCHED_NORMAL, \ 131 .policy = SCHED_NORMAL, \
132 .cpus_allowed = CPU_MASK_ALL, \ 132 .cpus_allowed = CPU_MASK_ALL, \
133 .nr_cpus_allowed = NR_CPUS, \
134 .mm = NULL, \ 133 .mm = NULL, \
135 .active_mm = &init_mm, \ 134 .active_mm = &init_mm, \
136 .rt = { \ 135 .rt = { \
137 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ 136 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
138 .time_slice = HZ, }, \ 137 .time_slice = HZ, \
138 .nr_cpus_allowed = NR_CPUS, \
139 }, \
139 .ioprio = 0, \ 140 .ioprio = 0, \
140 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 141 .tasks = LIST_HEAD_INIT(tsk.tasks), \
141 .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ 142 .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d5ea144df836..04eecbf0241e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -934,6 +934,15 @@ struct sched_rt_entity {
934 struct list_head run_list; 934 struct list_head run_list;
935 unsigned int time_slice; 935 unsigned int time_slice;
936 unsigned long timeout; 936 unsigned long timeout;
937 int nr_cpus_allowed;
938
939#ifdef CONFIG_FAIR_GROUP_SCHED
940 struct sched_rt_entity *parent;
941 /* rq on which this entity is (to be) queued: */
942 struct rt_rq *rt_rq;
943 /* rq "owned" by this entity/group: */
944 struct rt_rq *my_q;
945#endif
937}; 946};
938 947
939struct task_struct { 948struct task_struct {
@@ -978,7 +987,6 @@ struct task_struct {
978 987
979 unsigned int policy; 988 unsigned int policy;
980 cpumask_t cpus_allowed; 989 cpumask_t cpus_allowed;
981 int nr_cpus_allowed;
982 990
983#ifdef CONFIG_PREEMPT_RCU 991#ifdef CONFIG_PREEMPT_RCU
984 int rcu_read_lock_nesting; 992 int rcu_read_lock_nesting;