aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2007-04-17 20:26:40 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2007-04-17 20:26:40 -0400
commit2155b28a6f4c74485fd6a99c4a7b4c75333c6883 (patch)
treec43b77e06144b039e25e1f0b5ff4d477de9d9123
parent2649f2baf0f70f460c7f73a6076c7b4edccc5486 (diff)
Call curr_plugin->finish_switch() after every task switch.
GSN-EDF needs to be called after every task switch. This patch changes the existing plugins to handle the changed behavior.
-rw-r--r--include/linux/sched_plugin.h5
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/sched_edf_hsb.c3
-rw-r--r--kernel/sched_global_edf.c3
-rw-r--r--kernel/sched_part_edf.c3
-rw-r--r--kernel/sched_pfair.c3
6 files changed, 15 insertions, 5 deletions
diff --git a/include/linux/sched_plugin.h b/include/linux/sched_plugin.h
index 59e9e2f353..5c223695ab 100644
--- a/include/linux/sched_plugin.h
+++ b/include/linux/sched_plugin.h
@@ -38,9 +38,8 @@ typedef reschedule_check_t (*scheduler_tick_t) (void);
38typedef int (*schedule_t) (struct task_struct * prev, 38typedef int (*schedule_t) (struct task_struct * prev,
39 struct task_struct ** next, 39 struct task_struct ** next,
40 runqueue_t * rq); 40 runqueue_t * rq);
41/* clean up after the task switch has occured 41/* Clean up after the task switch has occured.
42 * it is guaranteed that the function is only called if prev is a real-time 42 * This function is called after every (even non-rt) task switch.
43 * task
44 */ 43 */
45typedef void (*finish_switch_t)(struct task_struct *prev); 44typedef void (*finish_switch_t)(struct task_struct *prev);
46 45
diff --git a/kernel/sched.c b/kernel/sched.c
index aa4a9e9426..1e1a9cfd4d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1846,8 +1846,7 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
1846 /* Requeue previous real-time task before we drop the rq lock, cause 1846 /* Requeue previous real-time task before we drop the rq lock, cause
1847 * that may lead to a preemption. 1847 * that may lead to a preemption.
1848 */ 1848 */
1849 if (is_realtime(prev) && prev_state == TASK_RUNNING) 1849 curr_sched_plugin->finish_switch(prev);
1850 curr_sched_plugin->finish_switch(prev);
1851 sched_trace_task_scheduled(current); 1850 sched_trace_task_scheduled(current);
1852 finish_lock_switch(rq, prev); 1851 finish_lock_switch(rq, prev);
1853 if (mm) 1852 if (mm)
diff --git a/kernel/sched_edf_hsb.c b/kernel/sched_edf_hsb.c
index 5ba9be8acf..d190426ff8 100644
--- a/kernel/sched_edf_hsb.c
+++ b/kernel/sched_edf_hsb.c
@@ -1438,6 +1438,9 @@ static inline void hsb_add_ready(struct task_struct *t)
1438 */ 1438 */
1439static void hsb_finish_switch(struct task_struct *prev) 1439static void hsb_finish_switch(struct task_struct *prev)
1440{ 1440{
1441 if (!is_realtime(prev) || !is_running(prev))
1442 return;
1443
1441 TRACE("finish switch for %d\n", prev->pid); 1444 TRACE("finish switch for %d\n", prev->pid);
1442 1445
1443 if (is_be(prev)) { 1446 if (is_be(prev)) {
diff --git a/kernel/sched_global_edf.c b/kernel/sched_global_edf.c
index ac4201a872..0781de139e 100644
--- a/kernel/sched_global_edf.c
+++ b/kernel/sched_global_edf.c
@@ -255,6 +255,9 @@ static int gedf_schedule(struct task_struct * prev,
255 */ 255 */
256static void gedf_finish_switch(struct task_struct *prev) 256static void gedf_finish_switch(struct task_struct *prev)
257{ 257{
258 if (!is_realtime(prev) || !is_running(prev))
259 return;
260
258 /*printk(KERN_INFO "gedf finish switch for %d\n", prev->pid);*/ 261 /*printk(KERN_INFO "gedf finish switch for %d\n", prev->pid);*/
259 if (get_rt_flags(prev) == RT_F_SLEEP || 262 if (get_rt_flags(prev) == RT_F_SLEEP ||
260 get_rt_mode() != MODE_RT_RUN) { 263 get_rt_mode() != MODE_RT_RUN) {
diff --git a/kernel/sched_part_edf.c b/kernel/sched_part_edf.c
index 3a93e5aa34..cdc75165b2 100644
--- a/kernel/sched_part_edf.c
+++ b/kernel/sched_part_edf.c
@@ -167,6 +167,9 @@ static void part_edf_finish_switch(struct task_struct *prev)
167{ 167{
168 edf_domain_t* edf = &__get_cpu_var(part_edf_domains).domain; 168 edf_domain_t* edf = &__get_cpu_var(part_edf_domains).domain;
169 169
170 if (!is_realtime(prev) || !is_running(prev))
171 return;
172
170 if (get_rt_flags(prev) == RT_F_SLEEP || 173 if (get_rt_flags(prev) == RT_F_SLEEP ||
171 get_rt_mode() != MODE_RT_RUN) { 174 get_rt_mode() != MODE_RT_RUN) {
172 /* this task has expired 175 /* this task has expired
diff --git a/kernel/sched_pfair.c b/kernel/sched_pfair.c
index 1c36ba5892..5f6cf1f496 100644
--- a/kernel/sched_pfair.c
+++ b/kernel/sched_pfair.c
@@ -288,6 +288,9 @@ out:
288 288
289static void pfair_finish_task_switch(struct task_struct *t) 289static void pfair_finish_task_switch(struct task_struct *t)
290{ 290{
291 if (!is_realtime(t) || !is_running(prev))
292 return;
293
291 queue_lock(&pfair.pfair_lock); 294 queue_lock(&pfair.pfair_lock);
292 /* Release in real-time mode only, 295 /* Release in real-time mode only,
293 * if the mode is non real-time, then 296 * if the mode is non real-time, then