aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-01-25 15:08:27 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:27 -0500
commitfa717060f1ab7eb6570f2fb49136f838fc9195a9 (patch)
tree0338460dae3116563645e3bfa1ff5100d39826f8
parent8eb703e4f33488bf75829564d51d427e17f7cd4c (diff)
sched: sched_rt_entity
Move the task_struct members specific to rt scheduling together. A future optimization could be to put sched_entity and sched_rt_entity into a union. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> CC: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/init_task.h5
-rw-r--r--include/linux/sched.h8
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/sched_rt.c20
-rw-r--r--mm/oom_kill.c2
5 files changed, 21 insertions, 16 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 572c65bcc80f..ee65d87bedb7 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -133,9 +133,10 @@ extern struct group_info init_groups;
133 .nr_cpus_allowed = NR_CPUS, \ 133 .nr_cpus_allowed = NR_CPUS, \
134 .mm = NULL, \ 134 .mm = NULL, \
135 .active_mm = &init_mm, \ 135 .active_mm = &init_mm, \
136 .run_list = LIST_HEAD_INIT(tsk.run_list), \ 136 .rt = { \
137 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
138 .time_slice = HZ, }, \
137 .ioprio = 0, \ 139 .ioprio = 0, \
138 .time_slice = HZ, \
139 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 140 .tasks = LIST_HEAD_INIT(tsk.tasks), \
140 .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ 141 .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
141 .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ 142 .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 72e1b8ecfbe1..a06d09ebd5c6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -929,6 +929,11 @@ struct sched_entity {
929#endif 929#endif
930}; 930};
931 931
932struct sched_rt_entity {
933 struct list_head run_list;
934 unsigned int time_slice;
935};
936
932struct task_struct { 937struct task_struct {
933 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 938 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
934 void *stack; 939 void *stack;
@@ -945,9 +950,9 @@ struct task_struct {
945#endif 950#endif
946 951
947 int prio, static_prio, normal_prio; 952 int prio, static_prio, normal_prio;
948 struct list_head run_list;
949 const struct sched_class *sched_class; 953 const struct sched_class *sched_class;
950 struct sched_entity se; 954 struct sched_entity se;
955 struct sched_rt_entity rt;
951 956
952#ifdef CONFIG_PREEMPT_NOTIFIERS 957#ifdef CONFIG_PREEMPT_NOTIFIERS
953 /* list of struct preempt_notifier: */ 958 /* list of struct preempt_notifier: */
@@ -972,7 +977,6 @@ struct task_struct {
972 unsigned int policy; 977 unsigned int policy;
973 cpumask_t cpus_allowed; 978 cpumask_t cpus_allowed;
974 int nr_cpus_allowed; 979 int nr_cpus_allowed;
975 unsigned int time_slice;
976 980
977#ifdef CONFIG_PREEMPT_RCU 981#ifdef CONFIG_PREEMPT_RCU
978 int rcu_read_lock_nesting; 982 int rcu_read_lock_nesting;
diff --git a/kernel/sched.c b/kernel/sched.c
index 02d468844a91..c2cedd09d895 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1685,7 +1685,7 @@ static void __sched_fork(struct task_struct *p)
1685 p->se.wait_max = 0; 1685 p->se.wait_max = 0;
1686#endif 1686#endif
1687 1687
1688 INIT_LIST_HEAD(&p->run_list); 1688 INIT_LIST_HEAD(&p->rt.run_list);
1689 p->se.on_rq = 0; 1689 p->se.on_rq = 0;
1690 1690
1691#ifdef CONFIG_PREEMPT_NOTIFIERS 1691#ifdef CONFIG_PREEMPT_NOTIFIERS
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 9affb3c9d3db..29963af782ae 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -111,7 +111,7 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
111{ 111{
112 struct rt_prio_array *array = &rq->rt.active; 112 struct rt_prio_array *array = &rq->rt.active;
113 113
114 list_add_tail(&p->run_list, array->queue + p->prio); 114 list_add_tail(&p->rt.run_list, array->queue + p->prio);
115 __set_bit(p->prio, array->bitmap); 115 __set_bit(p->prio, array->bitmap);
116 inc_cpu_load(rq, p->se.load.weight); 116 inc_cpu_load(rq, p->se.load.weight);
117 117
@@ -127,7 +127,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
127 127
128 update_curr_rt(rq); 128 update_curr_rt(rq);
129 129
130 list_del(&p->run_list); 130 list_del(&p->rt.run_list);
131 if (list_empty(array->queue + p->prio)) 131 if (list_empty(array->queue + p->prio))
132 __clear_bit(p->prio, array->bitmap); 132 __clear_bit(p->prio, array->bitmap);
133 dec_cpu_load(rq, p->se.load.weight); 133 dec_cpu_load(rq, p->se.load.weight);
@@ -143,7 +143,7 @@ static void requeue_task_rt(struct rq *rq, struct task_struct *p)
143{ 143{
144 struct rt_prio_array *array = &rq->rt.active; 144 struct rt_prio_array *array = &rq->rt.active;
145 145
146 list_move_tail(&p->run_list, array->queue + p->prio); 146 list_move_tail(&p->rt.run_list, array->queue + p->prio);
147} 147}
148 148
149static void 149static void
@@ -212,7 +212,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
212 return NULL; 212 return NULL;
213 213
214 queue = array->queue + idx; 214 queue = array->queue + idx;
215 next = list_entry(queue->next, struct task_struct, run_list); 215 next = list_entry(queue->next, struct task_struct, rt.run_list);
216 216
217 next->se.exec_start = rq->clock; 217 next->se.exec_start = rq->clock;
218 218
@@ -261,14 +261,14 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
261 queue = array->queue + idx; 261 queue = array->queue + idx;
262 BUG_ON(list_empty(queue)); 262 BUG_ON(list_empty(queue));
263 263
264 next = list_entry(queue->next, struct task_struct, run_list); 264 next = list_entry(queue->next, struct task_struct, rt.run_list);
265 if (unlikely(pick_rt_task(rq, next, cpu))) 265 if (unlikely(pick_rt_task(rq, next, cpu)))
266 goto out; 266 goto out;
267 267
268 if (queue->next->next != queue) { 268 if (queue->next->next != queue) {
269 /* same prio task */ 269 /* same prio task */
270 next = list_entry(queue->next->next, struct task_struct, 270 next = list_entry(queue->next->next, struct task_struct,
271 run_list); 271 rt.run_list);
272 if (pick_rt_task(rq, next, cpu)) 272 if (pick_rt_task(rq, next, cpu))
273 goto out; 273 goto out;
274 } 274 }
@@ -282,7 +282,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
282 queue = array->queue + idx; 282 queue = array->queue + idx;
283 BUG_ON(list_empty(queue)); 283 BUG_ON(list_empty(queue));
284 284
285 list_for_each_entry(next, queue, run_list) { 285 list_for_each_entry(next, queue, rt.run_list) {
286 if (pick_rt_task(rq, next, cpu)) 286 if (pick_rt_task(rq, next, cpu))
287 goto out; 287 goto out;
288 } 288 }
@@ -846,16 +846,16 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
846 if (p->policy != SCHED_RR) 846 if (p->policy != SCHED_RR)
847 return; 847 return;
848 848
849 if (--p->time_slice) 849 if (--p->rt.time_slice)
850 return; 850 return;
851 851
852 p->time_slice = DEF_TIMESLICE; 852 p->rt.time_slice = DEF_TIMESLICE;
853 853
854 /* 854 /*
855 * Requeue to the end of queue if we are not the only element 855 * Requeue to the end of queue if we are not the only element
856 * on the queue: 856 * on the queue:
857 */ 857 */
858 if (p->run_list.prev != p->run_list.next) { 858 if (p->rt.run_list.prev != p->rt.run_list.next) {
859 requeue_task_rt(rq, p); 859 requeue_task_rt(rq, p);
860 set_tsk_need_resched(p); 860 set_tsk_need_resched(p);
861 } 861 }
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 91a081a82f55..96473b482099 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -286,7 +286,7 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
286 * all the memory it needs. That way it should be able to 286 * all the memory it needs. That way it should be able to
287 * exit() and clear out its resources quickly... 287 * exit() and clear out its resources quickly...
288 */ 288 */
289 p->time_slice = HZ; 289 p->rt.time_slice = HZ;
290 set_tsk_thread_flag(p, TIF_MEMDIE); 290 set_tsk_thread_flag(p, TIF_MEMDIE);
291 291
292 force_sig(SIGKILL, p); 292 force_sig(SIGKILL, p);