aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2008-05-02 20:51:29 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2008-05-02 20:51:29 -0400
commita1f204ec33f806c6db8a4bfe6cc1a1e6109e8ef8 (patch)
tree0c501aea2199e0c3af478150cb33dcd1f779206d
parent2f030cd48bbdfc6f4155c38684d0e8b98195f4f5 (diff)
LITMUS: rework rt_domain to not cause circular lockig dependencies
-rw-r--r--include/litmus/edf_common.h4
-rw-r--r--include/litmus/rt_domain.h33
-rw-r--r--litmus/edf_common.c4
-rw-r--r--litmus/rt_domain.c132
-rw-r--r--litmus/sched_gsn_edf.c97
-rw-r--r--litmus/sched_psn_edf.c78
6 files changed, 123 insertions, 225 deletions
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h
index f79bd76e17..37630e5c26 100644
--- a/include/litmus/edf_common.h
+++ b/include/litmus/edf_common.h
@@ -12,8 +12,8 @@
12#include <litmus/rt_domain.h> 12#include <litmus/rt_domain.h>
13 13
14 14
15void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, 15void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
16 release_at_t release); 16 release_job_t release);
17 17
18int edf_higher_prio(struct task_struct* first, 18int edf_higher_prio(struct task_struct* first,
19 struct task_struct* second); 19 struct task_struct* second);
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index fd3c205bcc..7b2a11c0f2 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -5,14 +5,18 @@
5#ifndef __UNC_RT_DOMAIN_H__ 5#ifndef __UNC_RT_DOMAIN_H__
6#define __UNC_RT_DOMAIN_H__ 6#define __UNC_RT_DOMAIN_H__
7 7
8#include <linux/interrupt.h>
9
8struct _rt_domain; 10struct _rt_domain;
9 11
10typedef int (*check_resched_needed_t)(struct _rt_domain *rt); 12typedef int (*check_resched_needed_t)(struct _rt_domain *rt);
11typedef void (*release_at_t)(struct task_struct *t); 13typedef void (*release_job_t)(struct task_struct *t, struct _rt_domain *rt);
12 14
13typedef struct _rt_domain { 15typedef struct _rt_domain {
16 struct tasklet_struct release_tasklet;
17
14 /* runnable rt tasks are in here */ 18 /* runnable rt tasks are in here */
15 rwlock_t ready_lock; 19 spinlock_t ready_lock;
16 struct list_head ready_queue; 20 struct list_head ready_queue;
17 21
18 /* real-time tasks waiting for release are in here */ 22 /* real-time tasks waiting for release are in here */
@@ -22,8 +26,8 @@ typedef struct _rt_domain {
22 /* how do we check if we need to kick another CPU? */ 26 /* how do we check if we need to kick another CPU? */
23 check_resched_needed_t check_resched; 27 check_resched_needed_t check_resched;
24 28
25 /* how do we setup a job release? */ 29 /* how do we release a job? */
26 release_at_t setup_release; 30 release_job_t release_job;
27 31
28 /* how are tasks ordered in the ready queue? */ 32 /* how are tasks ordered in the ready queue? */
29 list_cmp_t order; 33 list_cmp_t order;
@@ -35,8 +39,9 @@ typedef struct _rt_domain {
35#define ready_jobs_pending(rt) \ 39#define ready_jobs_pending(rt) \
36 (!list_empty(&(rt)->ready_queue)) 40 (!list_empty(&(rt)->ready_queue))
37 41
38void rt_domain_init(rt_domain_t *rt, check_resched_needed_t f, 42void rt_domain_init(rt_domain_t *rt, list_cmp_t order,
39 release_at_t g, list_cmp_t order); 43 check_resched_needed_t check,
44 release_job_t relase);
40 45
41void __add_ready(rt_domain_t* rt, struct task_struct *new); 46void __add_ready(rt_domain_t* rt, struct task_struct *new);
42void __add_release(rt_domain_t* rt, struct task_struct *task); 47void __add_release(rt_domain_t* rt, struct task_struct *task);
@@ -44,16 +49,13 @@ void __add_release(rt_domain_t* rt, struct task_struct *task);
44struct task_struct* __take_ready(rt_domain_t* rt); 49struct task_struct* __take_ready(rt_domain_t* rt);
45struct task_struct* __peek_ready(rt_domain_t* rt); 50struct task_struct* __peek_ready(rt_domain_t* rt);
46 51
47void try_release_pending(rt_domain_t* rt);
48void __release_pending(rt_domain_t* rt);
49
50static inline void add_ready(rt_domain_t* rt, struct task_struct *new) 52static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
51{ 53{
52 unsigned long flags; 54 unsigned long flags;
53 /* first we need the write lock for rt_ready_queue */ 55 /* first we need the write lock for rt_ready_queue */
54 write_lock_irqsave(&rt->ready_lock, flags); 56 spin_lock_irqsave(&rt->ready_lock, flags);
55 __add_ready(rt, new); 57 __add_ready(rt, new);
56 write_unlock_irqrestore(&rt->ready_lock, flags); 58 spin_unlock_irqrestore(&rt->ready_lock, flags);
57} 59}
58 60
59static inline struct task_struct* take_ready(rt_domain_t* rt) 61static inline struct task_struct* take_ready(rt_domain_t* rt)
@@ -61,9 +63,9 @@ static inline struct task_struct* take_ready(rt_domain_t* rt)
61 unsigned long flags; 63 unsigned long flags;
62 struct task_struct* ret; 64 struct task_struct* ret;
63 /* first we need the write lock for rt_ready_queue */ 65 /* first we need the write lock for rt_ready_queue */
64 write_lock_irqsave(&rt->ready_lock, flags); 66 spin_lock_irqsave(&rt->ready_lock, flags);
65 ret = __take_ready(rt); 67 ret = __take_ready(rt);
66 write_unlock_irqrestore(&rt->ready_lock, flags); 68 spin_unlock_irqrestore(&rt->ready_lock, flags);
67 return ret; 69 return ret;
68} 70}
69 71
@@ -87,11 +89,10 @@ static inline int jobs_pending(rt_domain_t* rt)
87 unsigned long flags; 89 unsigned long flags;
88 int ret; 90 int ret;
89 /* first we need the write lock for rt_ready_queue */ 91 /* first we need the write lock for rt_ready_queue */
90 read_lock_irqsave(&rt->ready_lock, flags); 92 spin_lock_irqsave(&rt->ready_lock, flags);
91 ret = __jobs_pending(rt); 93 ret = __jobs_pending(rt);
92 read_unlock_irqrestore(&rt->ready_lock, flags); 94 spin_unlock_irqrestore(&rt->ready_lock, flags);
93 return ret; 95 return ret;
94} 96}
95 97
96
97#endif 98#endif
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index 0b05194a04..68c6a401af 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -68,9 +68,9 @@ int edf_ready_order(struct list_head* a, struct list_head* b)
68} 68}
69 69
70void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, 70void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
71 release_at_t release) 71 release_job_t release)
72{ 72{
73 rt_domain_init(rt, resched, release, edf_ready_order); 73 rt_domain_init(rt, edf_ready_order, resched, release);
74} 74}
75 75
76/* need_to_preempt - check whether the task t needs to be preempted 76/* need_to_preempt - check whether the task t needs to be preempted
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index d29325f232..cbedbac1dc 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -22,57 +22,80 @@ static int dummy_resched(rt_domain_t *rt)
22 return 0; 22 return 0;
23} 23}
24 24
25static void dummy_setup_release(struct task_struct *t) 25static int dummy_order(struct list_head* a, struct list_head* b)
26{ 26{
27 return 0;
27} 28}
28 29
29static int dummy_order(struct list_head* a, struct list_head* b) 30/* default implementation: use default lock */
31static void default_release_job(struct task_struct* t, rt_domain_t* rt)
30{ 32{
31 return 0; 33 add_ready(rt, t);
32} 34}
33 35
34/* We now set or clear a per_cpu flag indicating if a plugin-specific call 36static enum hrtimer_restart release_job_timer(struct hrtimer *timer)
35 * to setup a timer (that handles a job release) needs to be made. There is
36 * no need to setup multiple timers for jobs that are released at the same
37 * time. The actual clearing of this flag is a side effect of the release_order
38 * comparison function that is used when inserting a task into the
39 * release queue.
40 */
41DEFINE_PER_CPU(int, call_setup_release) = 1;
42int release_order(struct list_head* a, struct list_head* b)
43{ 37{
44 struct task_struct *task_a = list_entry(a, struct task_struct, rt_list); 38 /* call the current plugin */
45 struct task_struct *task_b = list_entry(b, struct task_struct, rt_list); 39 return HRTIMER_NORESTART;
40}
46 41
47 /* If the release times are equal, clear the flag. */ 42static void setup_job_release_timer(struct task_struct *task)
48 if (get_release(task_a) == get_release(task_b)) { 43{
49 __get_cpu_var(call_setup_release) = 0; 44 hrtimer_init(&release_timer(task), CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
50 return 0; 45 release_timer(task).function = release_job_timer;
46#ifdef CONFIG_HIGH_RES_TIMERS
47 release_timer(task).cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
48#endif
49 /* Expiration time of timer is release time of task. */
50 release_timer(task).expires = ns_to_ktime(get_release(task));
51
52 TRACE_TASK(task, "arming release timer rel=%llu at\n",
53 get_release(task), litmus_clock());
54
55 hrtimer_start(&release_timer(task), release_timer(task).expires,
56 HRTIMER_MODE_ABS);
57}
58
59static void arm_release_timers(unsigned long _rt)
60{
61 rt_domain_t *rt = (rt_domain_t*) _rt;
62 unsigned long flags;
63 struct list_head *pos, *safe;
64 struct task_struct* t;
65
66 spin_lock_irqsave(&rt->release_lock, flags);
67
68 list_for_each_safe(pos, safe, &rt->release_queue) {
69 t = list_entry(pos, struct task_struct, rt_list);
70 list_del(pos);
71 setup_job_release_timer(t);
51 } 72 }
52 73
53 return earlier_release(task_a, task_b); 74 spin_unlock_irqrestore(&rt->release_lock, flags);
54} 75}
55 76
56 77
57void rt_domain_init(rt_domain_t *rt, 78void rt_domain_init(rt_domain_t *rt,
58 check_resched_needed_t f, 79 list_cmp_t order,
59 release_at_t g, 80 check_resched_needed_t check,
60 list_cmp_t order) 81 release_job_t release
82 )
61{ 83{
62 BUG_ON(!rt); 84 BUG_ON(!rt);
63 if (!f) 85 if (!check)
64 f = dummy_resched; 86 check = dummy_resched;
65 if (!g) 87 if (!release)
66 g = dummy_setup_release; 88 release = default_release_job;
67 if (!order) 89 if (!order)
68 order = dummy_order; 90 order = dummy_order;
69 INIT_LIST_HEAD(&rt->ready_queue); 91 INIT_LIST_HEAD(&rt->ready_queue);
70 INIT_LIST_HEAD(&rt->release_queue); 92 INIT_LIST_HEAD(&rt->release_queue);
71 rt->ready_lock = RW_LOCK_UNLOCKED; 93 rt->ready_lock = SPIN_LOCK_UNLOCKED;
72 rt->release_lock = SPIN_LOCK_UNLOCKED; 94 rt->release_lock = SPIN_LOCK_UNLOCKED;
73 rt->check_resched = f; 95 rt->check_resched = check;
74 rt->setup_release = g; 96 rt->release_job = release;
75 rt->order = order; 97 rt->order = order;
98 tasklet_init(&rt->release_tasklet, arm_release_timers, (unsigned long) rt);
76} 99}
77 100
78/* add_ready - add a real-time task to the rt ready queue. It must be runnable. 101/* add_ready - add a real-time task to the rt ready queue. It must be runnable.
@@ -111,54 +134,7 @@ struct task_struct* __peek_ready(rt_domain_t* rt)
111 */ 134 */
112void __add_release(rt_domain_t* rt, struct task_struct *task) 135void __add_release(rt_domain_t* rt, struct task_struct *task)
113{ 136{
114 TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to release queue\n", 137 list_add(&task->rt_list, &rt->release_queue);
115 task->comm, task->pid, get_exec_cost(task), get_rt_period(task), 138 tasklet_hi_schedule(&rt->release_tasklet);
116 get_release(task));
117
118 /* Set flag assuming that we will need to setup another timer for
119 * the release of this job. If it turns out that this is unnecessary
120 * (because another job is already being released at that time,
121 * and setting up two timers is redundant and inefficient), then
122 * we will clear that flag so another release timer isn't setup.
123 */
124 __get_cpu_var(call_setup_release) = 1;
125 list_insert(&task->rt_list, &rt->release_queue, release_order);
126
127 /* Setup a job release -- this typically involves a timer. */
128 if (__get_cpu_var(call_setup_release))
129 rt->setup_release(task);
130}
131
132void __release_pending(rt_domain_t* rt)
133{
134 struct list_head *pos, *save;
135 struct task_struct *queued;
136 lt_t now = litmus_clock();
137 list_for_each_safe(pos, save, &rt->release_queue) {
138 queued = list_entry(pos, struct task_struct, rt_list);
139 if (likely(is_released(queued, now))) {
140 /* this one is ready to go*/
141 list_del(pos);
142 set_rt_flags(queued, RT_F_RUNNING);
143
144 sched_trace_job_release(queued);
145
146 /* now it can be picked up */
147 barrier();
148 add_ready(rt, queued);
149 }
150 else
151 /* the release queue is ordered */
152 break;
153 }
154} 139}
155 140
156void try_release_pending(rt_domain_t* rt)
157{
158 unsigned long flags;
159
160 if (spin_trylock_irqsave(&rt->release_lock, flags)) {
161 __release_pending(rt);
162 spin_unlock_irqrestore(&rt->release_lock, flags);
163 }
164}
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index c60b6ddd49..c988e91e6e 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -109,15 +109,11 @@ DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);
109 109
110#define NO_CPU 0xffffffff 110#define NO_CPU 0xffffffff
111 111
112/* The gsnedf_lock is used to serialize all scheduling events.
113 * It protects
114 */
115static DEFINE_SPINLOCK(gsnedf_lock);
116/* the cpus queue themselves according to priority in here */ 112/* the cpus queue themselves according to priority in here */
117static LIST_HEAD(gsnedf_cpu_queue); 113static LIST_HEAD(gsnedf_cpu_queue);
118 114
119static rt_domain_t gsnedf; 115static rt_domain_t gsnedf;
120 116#define gsnedf_lock (gsnedf.ready_lock)
121 117
122/* update_cpu_position - Move the cpu entry to the correct place to maintain 118/* update_cpu_position - Move the cpu entry to the correct place to maintain
123 * order in the cpu queue. Caller must hold gsnedf lock. 119 * order in the cpu queue. Caller must hold gsnedf lock.
@@ -269,7 +265,7 @@ static noinline void requeue(struct task_struct* task)
269 __add_ready(&gsnedf, task); 265 __add_ready(&gsnedf, task);
270 else { 266 else {
271 /* it has got to wait */ 267 /* it has got to wait */
272 __add_release(&gsnedf, task); 268 add_release(&gsnedf, task);
273 } 269 }
274 270
275 } else 271 } else
@@ -307,60 +303,37 @@ static noinline void gsnedf_job_arrival(struct task_struct* task)
307} 303}
308 304
309/* check for current job releases */ 305/* check for current job releases */
310static noinline void gsnedf_release_jobs(void) 306static void gsnedf_job_release(struct task_struct* t, rt_domain_t* _)
311{
312 struct list_head *pos, *save;
313 struct task_struct *queued;
314 lt_t now = litmus_clock();
315
316 list_for_each_safe(pos, save, &gsnedf.release_queue) {
317 queued = list_entry(pos, struct task_struct, rt_list);
318 if (likely(is_released(queued, now))) {
319 /* this one is ready to go */
320 list_del(pos);
321 set_rt_flags(queued, RT_F_RUNNING);
322
323 sched_trace_job_release(queued);
324 gsnedf_job_arrival(queued);
325 }
326 else
327 /* the release queue is ordered */
328 break;
329 }
330}
331
332/* handles job releases when a timer expires */
333static enum hrtimer_restart gsnedf_release_job_timer(struct hrtimer *timer)
334{ 307{
335 unsigned long flags; 308 unsigned long flags;
336 309
337 spin_lock_irqsave(&gsnedf_lock, flags); 310 spin_lock_irqsave(&gsnedf_lock, flags);
338 311
339 /* Release all pending ready jobs. */ 312 sched_trace_job_release(queued);
340 gsnedf_release_jobs(); 313 gsnedf_job_arrival(t);
341 314
342 spin_unlock_irqrestore(&gsnedf_lock, flags); 315 spin_unlock_irqrestore(&gsnedf_lock, flags);
343
344 return HRTIMER_NORESTART;
345} 316}
346 317
347/* setup a new job release timer */ 318/* caller holds gsnedf_lock */
348static void gsnedf_setup_release_job_timer(struct task_struct *task) 319static noinline void job_completion(struct task_struct *t)
349{ 320{
350 hrtimer_init(&release_timer(task), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 321 BUG_ON(!t);
351 release_timer(task).function = gsnedf_release_job_timer; 322
352#ifdef CONFIG_HIGH_RES_TIMERS 323 sched_trace_job_completion(t);
353 release_timer(task).cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART; 324
354#endif 325 TRACE_TASK(t, "job_completion().\n");
355 326
356 /* Expiration time of timer is release time of task. */ 327 /* set flags */
357 TRACE_TASK(task, "prog timer, rel=%llu, at %llu\n", 328 set_rt_flags(t, RT_F_SLEEP);
358 get_release(task), 329 /* prepare for next period */
359 litmus_clock()); 330 prepare_for_next_period(t);
360 release_timer(task).expires = ns_to_ktime(get_release(task)); 331 /* unlink */
361 332 unlink(t);
362 hrtimer_start(&release_timer(task), release_timer(task).expires, 333 /* requeue
363 HRTIMER_MODE_ABS); 334 * But don't requeue a blocking task. */
335 if (is_running(t))
336 gsnedf_job_arrival(t);
364} 337}
365 338
366/* gsnedf_tick - this function is called for every local timer 339/* gsnedf_tick - this function is called for every local timer
@@ -390,28 +363,6 @@ static void gsnedf_tick(struct task_struct* t)
390 } 363 }
391} 364}
392 365
393/* caller holds gsnedf_lock */
394static noinline void job_completion(struct task_struct *t)
395{
396 BUG_ON(!t);
397
398 sched_trace_job_completion(t);
399
400 TRACE_TASK(t, "job_completion().\n");
401
402 /* set flags */
403 set_rt_flags(t, RT_F_SLEEP);
404 /* prepare for next period */
405 prepare_for_next_period(t);
406 /* unlink */
407 unlink(t);
408 /* requeue
409 * But don't requeue a blocking task. */
410 if (is_running(t))
411 gsnedf_job_arrival(t);
412}
413
414
415/* Getting schedule() right is a bit tricky. schedule() may not make any 366/* Getting schedule() right is a bit tricky. schedule() may not make any
416 * assumptions on the state of the current task since it may be called for a 367 * assumptions on the state of the current task since it may be called for a
417 * number of reasons. The reasons include a scheduler_tick() determined that it 368 * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -748,7 +699,7 @@ static int __init init_gsn_edf(void)
748 INIT_LIST_HEAD(&entry->list); 699 INIT_LIST_HEAD(&entry->list);
749 } 700 }
750 701
751 edf_domain_init(&gsnedf, NULL, gsnedf_setup_release_job_timer); 702 edf_domain_init(&gsnedf, NULL, gsnedf_job_release);
752 return register_sched_plugin(&gsn_edf_plugin); 703 return register_sched_plugin(&gsn_edf_plugin);
753} 704}
754 705
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index d4d01789b0..cc7b09108f 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -26,9 +26,13 @@ typedef struct {
26 rt_domain_t domain; 26 rt_domain_t domain;
27 int cpu; 27 int cpu;
28 struct task_struct* scheduled; /* only RT tasks */ 28 struct task_struct* scheduled; /* only RT tasks */
29 spinlock_t lock; /* protects the domain and 29
30 * serializes scheduling decisions 30/* scheduling lock
31 */ 31 */
32#define slock domain.ready_lock
33/* protects the domain and
34 * serializes scheduling decisions
35 */
32} psnedf_domain_t; 36} psnedf_domain_t;
33 37
34DEFINE_PER_CPU(psnedf_domain_t, psnedf_domains); 38DEFINE_PER_CPU(psnedf_domain_t, psnedf_domains);
@@ -42,13 +46,12 @@ DEFINE_PER_CPU(psnedf_domain_t, psnedf_domains);
42 46
43 47
44static void psnedf_domain_init(psnedf_domain_t* pedf, 48static void psnedf_domain_init(psnedf_domain_t* pedf,
45 check_resched_needed_t check, 49 check_resched_needed_t check,
46 release_at_t release, 50 release_job_t release,
47 int cpu) 51 int cpu)
48{ 52{
49 edf_domain_init(&pedf->domain, check, release); 53 edf_domain_init(&pedf->domain, check, release);
50 pedf->cpu = cpu; 54 pedf->cpu = cpu;
51 pedf->lock = SPIN_LOCK_UNLOCKED;
52 pedf->scheduled = NULL; 55 pedf->scheduled = NULL;
53} 56}
54 57
@@ -64,7 +67,7 @@ static void requeue(struct task_struct* t, rt_domain_t *edf)
64 if (is_released(t, litmus_clock())) 67 if (is_released(t, litmus_clock()))
65 __add_ready(edf, t); 68 __add_ready(edf, t);
66 else 69 else
67 __add_release(edf, t); /* it has got to wait */ 70 add_release(edf, t); /* it has got to wait */
68} 71}
69 72
70/* we assume the lock is being held */ 73/* we assume the lock is being held */
@@ -100,39 +103,6 @@ static int psnedf_check_resched(rt_domain_t *edf)
100 return ret; 103 return ret;
101} 104}
102 105
103/* handles job releases when a timer expires */
104static enum hrtimer_restart psnedf_release_job_timer(struct hrtimer *timer)
105{
106 unsigned long flags;
107 rt_domain_t *edf = local_edf;
108 psnedf_domain_t *pedf = local_pedf;
109
110 spin_lock_irqsave(&pedf->lock, flags);
111
112 /* Release all pending ready jobs. */
113 __release_pending(edf);
114
115 spin_unlock_irqrestore(&pedf->lock, flags);
116
117 return HRTIMER_NORESTART;
118}
119
120/* setup a new job release timer */
121static void psnedf_setup_release_job_timer(struct task_struct *task)
122{
123 hrtimer_init(&release_timer(task), CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
124 release_timer(task).function = psnedf_release_job_timer;
125#ifdef CONFIG_HIGH_RES_TIMERS
126 release_timer(task).cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART;
127#endif
128
129 /* Expiration time of timer is release time of task. */
130 release_timer(task).expires = ns_to_ktime(get_release(task));
131
132 hrtimer_start(&release_timer(task), release_timer(task).expires,
133 HRTIMER_MODE_ABS);
134}
135
136static void psnedf_tick(struct task_struct *t) 106static void psnedf_tick(struct task_struct *t)
137{ 107{
138 psnedf_domain_t *pedf = local_pedf; 108 psnedf_domain_t *pedf = local_pedf;
@@ -171,7 +141,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
171 int out_of_time, sleep, preempt, 141 int out_of_time, sleep, preempt,
172 np, exists, blocks, resched; 142 np, exists, blocks, resched;
173 143
174 spin_lock(&pedf->lock); 144 spin_lock(&pedf->slock);
175 145
176 /* sanity checking */ 146 /* sanity checking */
177 BUG_ON(pedf->scheduled && pedf->scheduled != prev); 147 BUG_ON(pedf->scheduled && pedf->scheduled != prev);
@@ -234,7 +204,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
234 set_rt_flags(next, RT_F_RUNNING); 204 set_rt_flags(next, RT_F_RUNNING);
235 205
236 pedf->scheduled = next; 206 pedf->scheduled = next;
237 spin_unlock(&pedf->lock); 207 spin_unlock(&pedf->slock);
238 return next; 208 return next;
239} 209}
240 210
@@ -257,7 +227,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
257 /* The task should be running in the queue, otherwise signal 227 /* The task should be running in the queue, otherwise signal
258 * code will try to wake it up with fatal consequences. 228 * code will try to wake it up with fatal consequences.
259 */ 229 */
260 spin_lock_irqsave(&pedf->lock, flags); 230 spin_lock_irqsave(&pedf->slock, flags);
261 if (running) { 231 if (running) {
262 /* there shouldn't be anything else running at the time */ 232 /* there shouldn't be anything else running at the time */
263 BUG_ON(pedf->scheduled); 233 BUG_ON(pedf->scheduled);
@@ -267,7 +237,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
267 /* maybe we have to reschedule */ 237 /* maybe we have to reschedule */
268 preempt(pedf); 238 preempt(pedf);
269 } 239 }
270 spin_unlock_irqrestore(&pedf->lock, flags); 240 spin_unlock_irqrestore(&pedf->slock, flags);
271} 241}
272 242
273static void psnedf_task_wake_up(struct task_struct *task) 243static void psnedf_task_wake_up(struct task_struct *task)
@@ -277,7 +247,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
277 rt_domain_t* edf = task_edf(task); 247 rt_domain_t* edf = task_edf(task);
278 lt_t now; 248 lt_t now;
279 249
280 spin_lock_irqsave(&pedf->lock, flags); 250 spin_lock_irqsave(&pedf->slock, flags);
281 BUG_ON(in_list(&task->rt_list)); 251 BUG_ON(in_list(&task->rt_list));
282 /* We need to take suspensions because of semaphores into 252 /* We need to take suspensions because of semaphores into
283 * account! If a job resumes after being suspended due to acquiring 253 * account! If a job resumes after being suspended due to acquiring
@@ -293,7 +263,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
293 sched_trace_job_release(task); 263 sched_trace_job_release(task);
294 } 264 }
295 requeue(task, edf); 265 requeue(task, edf);
296 spin_unlock_irqrestore(&pedf->lock, flags); 266 spin_unlock_irqrestore(&pedf->slock, flags);
297} 267}
298 268
299static void psnedf_task_block(struct task_struct *t) 269static void psnedf_task_block(struct task_struct *t)
@@ -308,13 +278,13 @@ static void psnedf_task_exit(struct task_struct * t)
308 unsigned long flags; 278 unsigned long flags;
309 psnedf_domain_t* pedf = task_pedf(t); 279 psnedf_domain_t* pedf = task_pedf(t);
310 280
311 spin_lock_irqsave(&pedf->lock, flags); 281 spin_lock_irqsave(&pedf->slock, flags);
312 282
313 if (in_list(&t->rt_list)) 283 if (in_list(&t->rt_list))
314 /* dequeue */ 284 /* dequeue */
315 list_del(&t->rt_list); 285 list_del(&t->rt_list);
316 preempt(pedf); 286 preempt(pedf);
317 spin_unlock_irqrestore(&pedf->lock, flags); 287 spin_unlock_irqrestore(&pedf->slock, flags);
318} 288}
319 289
320static long psnedf_pi_block(struct pi_semaphore *sem, 290static long psnedf_pi_block(struct pi_semaphore *sem,
@@ -333,7 +303,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
333 edf = task_edf(new_waiter); 303 edf = task_edf(new_waiter);
334 304
335 /* interrupts already disabled */ 305 /* interrupts already disabled */
336 spin_lock(&pedf->lock); 306 spin_lock(&pedf->slock);
337 307
338 /* store new highest-priority task */ 308 /* store new highest-priority task */
339 sem->hp.cpu_task[cpu] = new_waiter; 309 sem->hp.cpu_task[cpu] = new_waiter;
@@ -357,7 +327,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
357 if (edf_preemption_needed(edf, current)) 327 if (edf_preemption_needed(edf, current))
358 preempt(pedf); 328 preempt(pedf);
359 329
360 spin_unlock(&pedf->lock); 330 spin_unlock(&pedf->slock);
361 } 331 }
362 332
363 return 0; 333 return 0;
@@ -411,7 +381,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
411 TRACE_CUR("return priority of %s/%d\n", 381 TRACE_CUR("return priority of %s/%d\n",
412 current->rt_param.inh_task->comm, 382 current->rt_param.inh_task->comm,
413 current->rt_param.inh_task->pid); 383 current->rt_param.inh_task->pid);
414 spin_lock(&pedf->lock); 384 spin_lock(&pedf->slock);
415 385
416 /* Reset inh_task to NULL. */ 386 /* Reset inh_task to NULL. */
417 current->rt_param.inh_task = NULL; 387 current->rt_param.inh_task = NULL;
@@ -420,7 +390,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
420 if (edf_preemption_needed(edf, current)) 390 if (edf_preemption_needed(edf, current))
421 preempt(pedf); 391 preempt(pedf);
422 392
423 spin_unlock(&pedf->lock); 393 spin_unlock(&pedf->slock);
424 } else 394 } else
425 TRACE_CUR(" no priority to return %p\n", sem); 395 TRACE_CUR(" no priority to return %p\n", sem);
426 396
@@ -460,7 +430,7 @@ static int __init init_psn_edf(void)
460 { 430 {
461 psnedf_domain_init(remote_pedf(i), 431 psnedf_domain_init(remote_pedf(i),
462 psnedf_check_resched, 432 psnedf_check_resched,
463 psnedf_setup_release_job_timer, i); 433 NULL, i);
464 } 434 }
465 return register_sched_plugin(&psn_edf_plugin); 435 return register_sched_plugin(&psn_edf_plugin);
466} 436}