diff options
| author | Glenn Elliott <gelliott@cs.unc.edu> | 2011-01-27 15:57:55 -0500 |
|---|---|---|
| committer | Glenn Elliott <gelliott@cs.unc.edu> | 2011-01-27 15:57:55 -0500 |
| commit | 088ee7427d154649325badcdaa9e87fe57df13de (patch) | |
| tree | a5b754f5cbd1a7055fc40c01fcf878c3075f602b | |
| parent | 178914fda62f345d45c0873f000f4760293b24ab (diff) | |
Use hr_timer's active checks instead of having own flag.wip-edzl-critique
| -rw-r--r-- | include/litmus/rt_param.h | 17 | ||||
| -rw-r--r-- | litmus/sched_edzl.c | 10 |
2 files changed, 10 insertions, 17 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 53741727d5d0..63dc87b65551 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
| @@ -82,14 +82,6 @@ struct rt_job { | |||
| 82 | /* How much service has this job received so far? */ | 82 | /* How much service has this job received so far? */ |
| 83 | lt_t exec_time; | 83 | lt_t exec_time; |
| 84 | 84 | ||
| 85 | #ifdef CONFIG_PLUGIN_EDZL | ||
| 86 | /* boolean indicating zero-laxity state. We will | ||
| 87 | set this flag explicitly at zero-laxity detection. | ||
| 88 | This makes priority comparison operations more | ||
| 89 | predictable since laxity varies with time */ | ||
| 90 | int zero_laxity; | ||
| 91 | #endif | ||
| 92 | |||
| 93 | /* Which job is this. This is used to let user space | 85 | /* Which job is this. This is used to let user space |
| 94 | * specify which job to wait for, which is important if jobs | 86 | * specify which job to wait for, which is important if jobs |
| 95 | * overrun. If we just call sys_sleep_next_period() then we | 87 | * overrun. If we just call sys_sleep_next_period() then we |
| @@ -98,6 +90,14 @@ struct rt_job { | |||
| 98 | * Increase this sequence number when a job is released. | 90 | * Increase this sequence number when a job is released. |
| 99 | */ | 91 | */ |
| 100 | unsigned int job_no; | 92 | unsigned int job_no; |
| 93 | |||
| 94 | #ifdef CONFIG_PLUGIN_EDZL | ||
| 95 | /* boolean indicating zero-laxity state. We will | ||
| 96 | set this flag explicitly at zero-laxity detection. | ||
| 97 | This makes priority comparison operations more | ||
| 98 | predictable since laxity varies with time */ | ||
| 99 | unsigned int zero_laxity:1; | ||
| 100 | #endif | ||
| 101 | }; | 101 | }; |
| 102 | 102 | ||
| 103 | struct pfair_param; | 103 | struct pfair_param; |
| @@ -123,7 +123,6 @@ struct rt_param { | |||
| 123 | struct rt_job job_params; | 123 | struct rt_job job_params; |
| 124 | 124 | ||
| 125 | #ifdef CONFIG_PLUGIN_EDZL | 125 | #ifdef CONFIG_PLUGIN_EDZL |
| 126 | unsigned int zl_timer_armed:1; | ||
| 127 | /* used to trigger zero-laxity detection */ | 126 | /* used to trigger zero-laxity detection */ |
| 128 | struct hrtimer zl_timer; | 127 | struct hrtimer zl_timer; |
| 129 | #endif | 128 | #endif |
diff --git a/litmus/sched_edzl.c b/litmus/sched_edzl.c index c0c8f848f00b..e9357c34561b 100644 --- a/litmus/sched_edzl.c +++ b/litmus/sched_edzl.c | |||
| @@ -247,7 +247,6 @@ static enum hrtimer_restart on_zero_laxity(struct hrtimer *timer) | |||
| 247 | get_deadline(t) - budget_remaining(t), | 247 | get_deadline(t) - budget_remaining(t), |
| 248 | get_deadline(t) - now); | 248 | get_deadline(t) - now); |
| 249 | 249 | ||
| 250 | tsk_rt(t)->zl_timer_armed = 0; | ||
| 251 | set_zerolaxity(t); | 250 | set_zerolaxity(t); |
| 252 | update_queue_position(t); | 251 | update_queue_position(t); |
| 253 | 252 | ||
| @@ -265,14 +264,13 @@ static inline struct task_struct* __edzl_take_ready(rt_domain_t* rt) | |||
| 265 | { | 264 | { |
| 266 | if(get_zerolaxity(t) == 0) | 265 | if(get_zerolaxity(t) == 0) |
| 267 | { | 266 | { |
| 268 | if(tsk_rt(t)->zl_timer_armed) | 267 | if(hrtimer_active(&tsk_rt(t)->zl_timer)) |
| 269 | { | 268 | { |
| 270 | int cancel_ret; | 269 | int cancel_ret; |
| 271 | 270 | ||
| 272 | TRACE_TASK(t, "Canceling zero-laxity timer.\n"); | 271 | TRACE_TASK(t, "Canceling zero-laxity timer.\n"); |
| 273 | cancel_ret = hrtimer_try_to_cancel(&tsk_rt(t)->zl_timer); | 272 | cancel_ret = hrtimer_try_to_cancel(&tsk_rt(t)->zl_timer); |
| 274 | WARN_ON(cancel_ret == 0); /* should never be inactive. */ | 273 | WARN_ON(cancel_ret == 0); /* should never be inactive. */ |
| 275 | tsk_rt(t)->zl_timer_armed = 0; | ||
| 276 | } | 274 | } |
| 277 | } | 275 | } |
| 278 | else | 276 | else |
| @@ -305,8 +303,6 @@ static inline void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new) | |||
| 305 | 0, | 303 | 0, |
| 306 | HRTIMER_MODE_ABS_PINNED, | 304 | HRTIMER_MODE_ABS_PINNED, |
| 307 | 0); | 305 | 0); |
| 308 | |||
| 309 | tsk_rt(new)->zl_timer_armed = 1; | ||
| 310 | } | 306 | } |
| 311 | else | 307 | else |
| 312 | { | 308 | { |
| @@ -662,7 +658,6 @@ static void edzl_task_new(struct task_struct * t, int on_rq, int running) | |||
| 662 | 658 | ||
| 663 | raw_spin_lock_irqsave(&edzl_lock, flags); | 659 | raw_spin_lock_irqsave(&edzl_lock, flags); |
| 664 | 660 | ||
| 665 | t->rt_param.zl_timer_armed = 0; | ||
| 666 | hrtimer_init(&t->rt_param.zl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 661 | hrtimer_init(&t->rt_param.zl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
| 667 | t->rt_param.zl_timer.function = on_zero_laxity; | 662 | t->rt_param.zl_timer.function = on_zero_laxity; |
| 668 | 663 | ||
| @@ -754,12 +749,11 @@ static void edzl_task_exit(struct task_struct * t) | |||
| 754 | tsk_rt(t)->scheduled_on = NO_CPU; | 749 | tsk_rt(t)->scheduled_on = NO_CPU; |
| 755 | } | 750 | } |
| 756 | 751 | ||
| 757 | if(tsk_rt(t)->zl_timer_armed) | 752 | if(hrtimer_active(&tsk_rt(t)->zl_timer)) |
| 758 | { | 753 | { |
| 759 | /* BUG if reached? */ | 754 | /* BUG if reached? */ |
| 760 | TRACE_TASK(t, "Canceled armed timer while exiting.\n"); | 755 | TRACE_TASK(t, "Canceled armed timer while exiting.\n"); |
| 761 | hrtimer_cancel(&tsk_rt(t)->zl_timer); | 756 | hrtimer_cancel(&tsk_rt(t)->zl_timer); |
| 762 | tsk_rt(t)->zl_timer_armed = 0; | ||
| 763 | } | 757 | } |
| 764 | 758 | ||
| 765 | raw_spin_unlock_irqrestore(&edzl_lock, flags); | 759 | raw_spin_unlock_irqrestore(&edzl_lock, flags); |
