From d5f64980b4e9970bf9bdcb0acf35cfc6e3dfa701 Mon Sep 17 00:00:00 2001 From: "John M. Calandrino" Date: Wed, 30 Apr 2008 17:23:35 -0400 Subject: LITMUS CORE: Release jobs with hrtimers John's proposal for how to release jobs with hrtimers. --- include/litmus/edf_common.h | 3 ++- include/litmus/litmus.h | 12 +++++++++ include/litmus/rt_domain.h | 7 +++-- include/litmus/rt_param.h | 4 +++ litmus/edf_common.c | 5 ++-- litmus/rt_domain.c | 46 ++++++++++++++++++++++++++++----- litmus/sched_gsn_edf.c | 63 +++++++++++++++++++++++++++------------------ litmus/sched_litmus.c | 6 ++--- litmus/sched_psn_edf.c | 53 ++++++++++++++++++++++++++++---------- 9 files changed, 147 insertions(+), 52 deletions(-) diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h index f3c930b137..669900bc4c 100644 --- a/include/litmus/edf_common.h +++ b/include/litmus/edf_common.h @@ -12,7 +12,8 @@ #include -void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched); +void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, + release_at_t release); int edf_higher_prio(struct task_struct* first, struct task_struct* second); diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 6e99e651d7..7a27c987b6 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h @@ -163,6 +163,18 @@ inline static int budget_exhausted(struct task_struct* t) #define get_release(t) ((t)->rt_param.job_params.release) +/* Our notion of time within LITMUS: kernel monotonic time. */ +static inline lt_t litmus_clock(void) +{ + return ktime_to_ns(ktime_get()); +} + +/* A macro to convert from nanoseconds to ktime_t. */ +#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) + +/* The high-resolution release timer for a task. */ +#define release_timer(t) ((t)->rt_param.job_params.release_timer) + /* Honor the flag in the preempt_count variable that is set * when scheduling is in progress. */ diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index 79b6034f22..fd3c205bcc 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h @@ -8,7 +8,7 @@ struct _rt_domain; typedef int (*check_resched_needed_t)(struct _rt_domain *rt); -typedef void (*release_at_t)(struct task_struct *t, lt_t start); +typedef void (*release_at_t)(struct task_struct *t); typedef struct _rt_domain { /* runnable rt tasks are in here */ @@ -22,6 +22,9 @@ typedef struct _rt_domain { /* how do we check if we need to kick another CPU? */ check_resched_needed_t check_resched; + /* how do we setup a job release? */ + release_at_t setup_release; + /* how are tasks ordered in the ready queue? */ list_cmp_t order; } rt_domain_t; @@ -33,7 +36,7 @@ typedef struct _rt_domain { (!list_empty(&(rt)->ready_queue)) void rt_domain_init(rt_domain_t *rt, check_resched_needed_t f, - list_cmp_t order); + release_at_t g, list_cmp_t order); void __add_ready(rt_domain_t* rt, struct task_struct *new); void __add_release(rt_domain_t* rt, struct task_struct *task); diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 9fb5b19b78..3704924729 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -42,6 +42,10 @@ struct rt_job { lt_t release; /* What is the current deadline? */ lt_t deadline; + + /* The high-resolution timer used to control its release. */ + struct hrtimer release_timer; + /* How much service has this job received so far? */ lt_t exec_time; diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 3d9dca852d..2e055222ea 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c @@ -74,9 +74,10 @@ void edf_release_at(struct task_struct *t, lt_t start) set_rt_flags(t, RT_F_RUNNING); } -void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched) +void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, + release_at_t release) { - rt_domain_init(rt, resched, edf_ready_order); + rt_domain_init(rt, resched, release, edf_ready_order); } void edf_prepare_for_next_period(struct task_struct *t) diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index fe7bd29b19..d29325f232 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c @@ -22,26 +22,48 @@ static int dummy_resched(rt_domain_t *rt) return 0; } +static void dummy_setup_release(struct task_struct *t) +{ +} + static int dummy_order(struct list_head* a, struct list_head* b) { return 0; } +/* We now set or clear a per_cpu flag indicating if a plugin-specific call + * to setup a timer (that handles a job release) needs to be made. There is + * no need to setup multiple timers for jobs that are released at the same + * time. The actual clearing of this flag is a side effect of the release_order + * comparison function that is used when inserting a task into the + * release queue. + */ +DEFINE_PER_CPU(int, call_setup_release) = 1; int release_order(struct list_head* a, struct list_head* b) { - return earlier_release( - list_entry(a, struct task_struct, rt_list), - list_entry(b, struct task_struct, rt_list)); + struct task_struct *task_a = list_entry(a, struct task_struct, rt_list); + struct task_struct *task_b = list_entry(b, struct task_struct, rt_list); + + /* If the release times are equal, clear the flag. */ + if (get_release(task_a) == get_release(task_b)) { + __get_cpu_var(call_setup_release) = 0; + return 0; + } + + return earlier_release(task_a, task_b); } void rt_domain_init(rt_domain_t *rt, check_resched_needed_t f, + release_at_t g, list_cmp_t order) { BUG_ON(!rt); if (!f) f = dummy_resched; + if (!g) + g = dummy_setup_release; if (!order) order = dummy_order; INIT_LIST_HEAD(&rt->ready_queue); @@ -49,6 +71,7 @@ void rt_domain_init(rt_domain_t *rt, rt->ready_lock = RW_LOCK_UNLOCKED; rt->release_lock = SPIN_LOCK_UNLOCKED; rt->check_resched = f; + rt->setup_release = g; rt->order = order; } @@ -57,9 +80,9 @@ void rt_domain_init(rt_domain_t *rt, */ void __add_ready(rt_domain_t* rt, struct task_struct *new) { - TRACE("rt: adding %s/%d (%llu, %llu) to ready queue at %llu\n", + TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n", new->comm, new->pid, get_exec_cost(new), get_rt_period(new), - sched_clock()); + get_release(new), litmus_clock()); if (!list_insert(&new->rt_list, &rt->ready_queue, rt->order)) rt->check_resched(rt); @@ -92,14 +115,25 @@ void __add_release(rt_domain_t* rt, struct task_struct *task) task->comm, task->pid, get_exec_cost(task), get_rt_period(task), get_release(task)); + /* Set flag assuming that we will need to setup another timer for + * the release of this job. If it turns out that this is unnecessary + * (because another job is already being released at that time, + * and setting up two timers is redundant and inefficient), then + * we will clear that flag so another release timer isn't setup. + */ + __get_cpu_var(call_setup_release) = 1; list_insert(&task->rt_list, &rt->release_queue, release_order); + + /* Setup a job release -- this typically involves a timer. */ + if (__get_cpu_var(call_setup_release)) + rt->setup_release(task); } void __release_pending(rt_domain_t* rt) { struct list_head *pos, *save; struct task_struct *queued; - lt_t now = sched_clock(); + lt_t now = litmus_clock(); list_for_each_safe(pos, save, &rt->release_queue) { queued = list_entry(pos, struct task_struct, rt_list); if (likely(is_released(queued, now))) { diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index e879b02888..0381ddf98b 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -264,7 +264,7 @@ static noinline void requeue(struct task_struct* task) * the release and * deadline. We just must check if it has been released. */ - if (is_released(task, sched_clock())) + if (is_released(task, litmus_clock())) __add_ready(&gsnedf, task); else { /* it has got to wait */ @@ -306,17 +306,16 @@ static noinline void gsnedf_job_arrival(struct task_struct* task) } /* check for current job releases */ -static noinline void gsnedf_release_jobs(void) +static noinline void gsnedf_release_jobs(void) { struct list_head *pos, *save; struct task_struct *queued; - lt_t now = sched_clock(); - + lt_t now = litmus_clock(); list_for_each_safe(pos, save, &gsnedf.release_queue) { queued = list_entry(pos, struct task_struct, rt_list); if (likely(is_released(queued, now))) { - /* this one is ready to go*/ + /* this one is ready to go */ list_del(pos); set_rt_flags(queued, RT_F_RUNNING); @@ -329,6 +328,37 @@ static noinline void gsnedf_release_jobs(void) } } +/* handles job releases when a timer expires */ +static enum hrtimer_restart gsnedf_release_job_timer(struct hrtimer *timer) +{ + unsigned long flags; + + spin_lock_irqsave(&gsnedf_lock, flags); + + /* Release all pending ready jobs. */ + gsnedf_release_jobs(); + + spin_unlock_irqrestore(&gsnedf_lock, flags); + + return HRTIMER_NORESTART; +} + +/* setup a new job release timer */ +static void gsnedf_setup_release_job_timer(struct task_struct *task) +{ + hrtimer_init(&release_timer(task), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + release_timer(task).function = gsnedf_release_job_timer; +#ifdef CONFIG_HIGH_RES_TIMERS + release_timer(task).cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART; +#endif + + /* Expiration time of timer is release time of task. */ + release_timer(task).expires = ns_to_ktime(get_release(task)); + + hrtimer_start(&release_timer(task), release_timer(task).expires, + HRTIMER_MODE_ABS); +} + /* gsnedf_tick - this function is called for every local timer * interrupt. * @@ -337,8 +367,6 @@ static noinline void gsnedf_release_jobs(void) */ static void gsnedf_tick(struct task_struct* t) { - unsigned long flags; - if (is_realtime(t) && budget_exhausted(t)) { if (!is_np(t)) { /* np tasks will be preempted when they become @@ -356,21 +384,6 @@ static void gsnedf_tick(struct task_struct* t) request_exit_np(t); } } - - /* only the first CPU needs to release jobs */ - /* FIXME: drive this from a hrtimer */ - if (smp_processor_id() == 0) { - spin_lock_irqsave(&gsnedf_lock, flags); - - /* Try to release pending jobs */ - gsnedf_release_jobs(); - - /* We don't need to check linked != scheduled since - * set_tsk_need_resched has been set by preempt() if necessary. - */ - - spin_unlock_irqrestore(&gsnedf_lock, flags); - } } /* caller holds gsnedf_lock */ @@ -524,7 +537,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) t->rt_param.linked_on = NO_CPU; /* setup job params */ - edf_release_at(t, sched_clock()); + edf_release_at(t, litmus_clock()); gsnedf_job_arrival(t); spin_unlock_irqrestore(&gsnedf_lock, flags); @@ -543,7 +556,7 @@ static void gsnedf_task_wake_up(struct task_struct *task) if (get_rt_flags(task) == RT_F_EXIT_SEM) { set_rt_flags(task, RT_F_RUNNING); } else { - now = sched_clock(); + now = litmus_clock(); if (is_tardy(task, now)) { /* new sporadic release */ edf_release_at(task, now); @@ -711,7 +724,7 @@ static int __init init_gsn_edf(void) INIT_LIST_HEAD(&entry->list); } - edf_domain_init(&gsnedf, NULL); + edf_domain_init(&gsnedf, NULL, gsnedf_setup_release_job_timer); return register_sched_plugin(&gsn_edf_plugin); } diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 89ae3941db..16cdf2db59 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c @@ -5,7 +5,7 @@ static void update_time_litmus(struct rq *rq, struct task_struct *p) { - lt_t now = sched_clock(); + lt_t now = litmus_clock(); p->rt_param.job_params.exec_time += now - p->rt_param.job_params.exec_start; p->rt_param.job_params.exec_start = now; @@ -88,7 +88,7 @@ static struct task_struct *pick_next_task_litmus(struct rq *rq) struct task_struct* picked = rq->litmus_next; rq->litmus_next = NULL; if (picked) - picked->rt_param.job_params.exec_start = sched_clock(); + picked->rt_param.job_params.exec_start = litmus_clock(); return picked; } @@ -103,7 +103,7 @@ static void task_tick_litmus(struct rq *rq, struct task_struct *p) */ static void set_curr_task_litmus(struct rq *rq) { - rq->curr->rt_param.job_params.exec_start = sched_clock(); + rq->curr->rt_param.job_params.exec_start = litmus_clock(); } diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 961680d0a6..b60c2ce0b7 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c @@ -42,9 +42,10 @@ DEFINE_PER_CPU(psnedf_domain_t, psnedf_domains); static void psnedf_domain_init(psnedf_domain_t* pedf, check_resched_needed_t check, + release_at_t release, int cpu) { - edf_domain_init(&pedf->domain, check); + edf_domain_init(&pedf->domain, check, release); pedf->cpu = cpu; pedf->lock = SPIN_LOCK_UNLOCKED; pedf->scheduled = NULL; @@ -59,7 +60,7 @@ static void requeue(struct task_struct* t, rt_domain_t *edf) TRACE_TASK(t, "requeue: !TASK_RUNNING"); set_rt_flags(t, RT_F_RUNNING); - if (is_released(t, sched_clock())) + if (is_released(t, litmus_clock())) __add_ready(edf, t); else __add_release(edf, t); /* it has got to wait */ @@ -98,11 +99,41 @@ static int psnedf_check_resched(rt_domain_t *edf) return ret; } +/* handles job releases when a timer expires */ +static enum hrtimer_restart psnedf_release_job_timer(struct hrtimer *timer) +{ + unsigned long flags; + rt_domain_t *edf = local_edf; + psnedf_domain_t *pedf = local_pedf; + + spin_lock_irqsave(&pedf->lock, flags); + + /* Release all pending ready jobs. */ + __release_pending(edf); + + spin_unlock_irqrestore(&pedf->lock, flags); + + return HRTIMER_NORESTART; +} + +/* setup a new job release timer */ +static void psnedf_setup_release_job_timer(struct task_struct *task) +{ + hrtimer_init(&release_timer(task), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + release_timer(task).function = psnedf_release_job_timer; +#ifdef CONFIG_HIGH_RES_TIMERS + release_timer(task).cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART; +#endif + + /* Expiration time of timer is release time of task. */ + release_timer(task).expires = ns_to_ktime(get_release(task)); + + hrtimer_start(&release_timer(task), release_timer(task).expires, + HRTIMER_MODE_ABS); +} static void psnedf_tick(struct task_struct *t) { - unsigned long flags; - rt_domain_t *edf = local_edf; psnedf_domain_t *pedf = local_pedf; /* Check for inconsistency. We don't need the lock for this since @@ -121,11 +152,6 @@ static void psnedf_tick(struct task_struct *t) request_exit_np(t); } } - - spin_lock_irqsave(&pedf->lock, flags); - /* FIXME: release via hrtimer */ - __release_pending(edf); - spin_unlock_irqrestore(&pedf->lock, flags); } static void job_completion(struct task_struct* t) @@ -225,7 +251,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running) smp_processor_id(), t->pid, get_partition(t)); /* setup job parameters */ - edf_release_at(t, sched_clock()); + edf_release_at(t, litmus_clock()); /* The task should be running in the queue, otherwise signal * code will try to wake it up with fatal consequences. @@ -258,7 +284,7 @@ static void psnedf_task_wake_up(struct task_struct *task) * * FIXME: This should be done in some more predictable and userspace-controlled way. */ - now = sched_clock(); + now = litmus_clock(); if (is_tardy(task, now) && get_rt_flags(task) != RT_F_EXIT_SEM) { /* new sporadic release */ @@ -319,7 +345,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem, /* queued in domain*/ list_del(&t->rt_list); /* readd to make priority change take place */ - if (is_released(t, sched_clock())) + if (is_released(t, litmus_clock())) __add_ready(edf, t); else __add_release(edf, t); @@ -430,7 +456,8 @@ static int __init init_psn_edf(void) for (i = 0; i < NR_CPUS; i++) { psnedf_domain_init(remote_pedf(i), - psnedf_check_resched, i); + psnedf_check_resched, + psnedf_setup_release_job_timer, i); } return register_sched_plugin(&psn_edf_plugin); } -- cgit v1.2.2