diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2008-09-11 14:45:33 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2008-09-11 14:45:33 -0400 |
commit | ddb418f16e5a6e75e3d32cac31251b51e8bb82e8 (patch) | |
tree | 0f5327bafd553df21a30b6715d60c7c8a48280a3 | |
parent | 5809f5408f551971a5950e8f046eb6b73db4842f (diff) | |
parent | aad151f9aba8c351f999002ca27fc0c8fb6bd3a3 (diff) |
Merge branch 'master' into pfair_fix
-rw-r--r-- | include/asm-sparc64/spinlock.h | 113 | ||||
-rw-r--r-- | include/asm-sparc64/spinlock_types.h | 5 | ||||
-rw-r--r-- | include/litmus/edf_common.h | 4 | ||||
-rw-r--r-- | include/litmus/heap.h | 34 | ||||
-rw-r--r-- | include/litmus/litmus.h | 26 | ||||
-rw-r--r-- | include/litmus/rt_domain.h | 60 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 3 | ||||
-rw-r--r-- | litmus/edf_common.c | 9 | ||||
-rw-r--r-- | litmus/rt_domain.c | 230 | ||||
-rwxr-xr-x | litmus/sched_cedf.c | 57 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 96 | ||||
-rwxr-xr-x | litmus/sched_pfair.c | 24 | ||||
-rw-r--r-- | litmus/sched_psn_edf.c | 2 |
13 files changed, 443 insertions, 220 deletions
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h index 0006fe9f8c..16931d4cd9 100644 --- a/include/asm-sparc64/spinlock.h +++ b/include/asm-sparc64/spinlock.h | |||
@@ -15,93 +15,80 @@ | |||
15 | * and rebuild your kernel. | 15 | * and rebuild your kernel. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | /* All of these locking primitives are expected to work properly | 18 | #define __raw_spin_is_locked(lp) ((lp)->tail != (lp)->head) |
19 | * even in an RMO memory model, which currently is what the kernel | ||
20 | * runs in. | ||
21 | * | ||
22 | * There is another issue. Because we play games to save cycles | ||
23 | * in the non-contention case, we need to be extra careful about | ||
24 | * branch targets into the "spinning" code. They live in their | ||
25 | * own section, but the newer V9 branches have a shorter range | ||
26 | * than the traditional 32-bit sparc branch variants. The rule | ||
27 | * is that the branches that go into and out of the spinner sections | ||
28 | * must be pre-V9 branches. | ||
29 | */ | ||
30 | |||
31 | #define __raw_spin_is_locked(lp) ((lp)->lock != 0) | ||
32 | 19 | ||
33 | #define __raw_spin_unlock_wait(lp) \ | 20 | #define __raw_spin_unlock_wait(lp) \ |
34 | do { rmb(); \ | 21 | do { rmb(); \ |
35 | } while((lp)->lock) | 22 | } while((lp)->tail != (lp)->head) |
23 | |||
24 | |||
36 | 25 | ||
37 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
38 | { | 27 | { |
39 | unsigned long tmp; | 28 | int ticket, tmp; |
40 | |||
41 | __asm__ __volatile__( | 29 | __asm__ __volatile__( |
42 | "1: ldstub [%1], %0\n" | 30 | "1: lduw [%2], %0 \n" /* read ticket */ |
43 | " membar #StoreLoad | #StoreStore\n" | 31 | " add %0, 1, %1 \n" |
44 | " brnz,pn %0, 2f\n" | 32 | " cas [%2], %0, %1 \n" |
45 | " nop\n" | 33 | " cmp %0, %1 \n" |
46 | " .subsection 2\n" | 34 | " be,a,pt %%icc, 2f \n" |
47 | "2: ldub [%1], %0\n" | 35 | " nop \n" |
48 | " membar #LoadLoad\n" | 36 | " membar #LoadLoad | #StoreLoad | #LoadStore\n" |
49 | " brnz,pt %0, 2b\n" | 37 | " ba 1b\n" |
50 | " nop\n" | 38 | " nop \n" |
51 | " ba,a,pt %%xcc, 1b\n" | 39 | "2: lduw [%3], %1 \n" |
52 | " .previous" | 40 | " cmp %0, %1 \n" |
53 | : "=&r" (tmp) | 41 | " be,a,pt %%icc, 3f \n" |
54 | : "r" (lock) | 42 | " nop \n" |
43 | " membar #LoadLoad | #StoreLoad | #LoadStore\n" | ||
44 | " ba 2b\n" | ||
45 | "3: membar #StoreStore | #StoreLoad" | ||
46 | : "=&r" (ticket), "=&r" (tmp) | ||
47 | : "r" (&lock->tail), "r" (&lock->head) | ||
55 | : "memory"); | 48 | : "memory"); |
56 | } | 49 | } |
57 | 50 | ||
58 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 51 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
59 | { | 52 | { |
60 | unsigned long result; | 53 | int tail, head; |
61 | |||
62 | __asm__ __volatile__( | 54 | __asm__ __volatile__( |
63 | " ldstub [%1], %0\n" | 55 | " lduw [%2], %0 \n" /* read tail */ |
64 | " membar #StoreLoad | #StoreStore" | 56 | " lduw [%3], %1 \n" /* read head */ |
65 | : "=r" (result) | 57 | " cmp %0, %1 \n" |
66 | : "r" (lock) | 58 | " bne,a,pn %%icc, 1f \n" |
59 | " nop \n" | ||
60 | " inc %1 \n" | ||
61 | " cas [%2], %0, %1 \n" /* try to inc ticket */ | ||
62 | " membar #StoreStore | #StoreLoad \n" | ||
63 | "1: " | ||
64 | : "=&r" (tail), "=&r" (head) | ||
65 | : "r" (&lock->tail), "r" (&lock->head) | ||
67 | : "memory"); | 66 | : "memory"); |
68 | 67 | ||
69 | return (result == 0UL); | 68 | return tail == head; |
70 | } | 69 | } |
71 | 70 | ||
72 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 71 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
73 | { | 72 | { |
73 | int tmp; | ||
74 | __asm__ __volatile__( | 74 | __asm__ __volatile__( |
75 | " membar #StoreStore | #LoadStore\n" | 75 | " membar #StoreStore | #LoadStore \n" |
76 | " stb %%g0, [%0]" | 76 | " lduw [%1], %0 \n" |
77 | : /* No outputs */ | 77 | " inc %0 \n" |
78 | : "r" (lock) | 78 | " st %0, [%1] \n" |
79 | " membar #StoreStore | #StoreLoad" | ||
80 | : "=&r" (tmp) | ||
81 | : "r" (&lock->head) | ||
79 | : "memory"); | 82 | : "memory"); |
80 | } | 83 | } |
81 | 84 | ||
82 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 85 | /* We don't handle this yet, but it looks like not re-enabling the interrupts |
83 | { | 86 | * works fine, too. For example, lockdep also does it like this. |
84 | unsigned long tmp1, tmp2; | 87 | */ |
88 | #define __raw_spin_lock_flags(l, f) __raw_spin_lock(l) | ||
89 | |||
90 | |||
85 | 91 | ||
86 | __asm__ __volatile__( | ||
87 | "1: ldstub [%2], %0\n" | ||
88 | " membar #StoreLoad | #StoreStore\n" | ||
89 | " brnz,pn %0, 2f\n" | ||
90 | " nop\n" | ||
91 | " .subsection 2\n" | ||
92 | "2: rdpr %%pil, %1\n" | ||
93 | " wrpr %3, %%pil\n" | ||
94 | "3: ldub [%2], %0\n" | ||
95 | " membar #LoadLoad\n" | ||
96 | " brnz,pt %0, 3b\n" | ||
97 | " nop\n" | ||
98 | " ba,pt %%xcc, 1b\n" | ||
99 | " wrpr %1, %%pil\n" | ||
100 | " .previous" | ||
101 | : "=&r" (tmp1), "=&r" (tmp2) | ||
102 | : "r"(lock), "r"(flags) | ||
103 | : "memory"); | ||
104 | } | ||
105 | 92 | ||
106 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
107 | 94 | ||
diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h index e128112a0d..1a2e24b29c 100644 --- a/include/asm-sparc64/spinlock_types.h +++ b/include/asm-sparc64/spinlock_types.h | |||
@@ -6,10 +6,11 @@ | |||
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned char lock; | 9 | int tail; |
10 | int head; | ||
10 | } raw_spinlock_t; | 11 | } raw_spinlock_t; |
11 | 12 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 13 | #define __RAW_SPIN_LOCK_UNLOCKED { 0, 0 } |
13 | 14 | ||
14 | typedef struct { | 15 | typedef struct { |
15 | volatile unsigned int lock; | 16 | volatile unsigned int lock; |
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h index 4dff77ae48..32dcf9bd6b 100644 --- a/include/litmus/edf_common.h +++ b/include/litmus/edf_common.h | |||
@@ -12,8 +12,8 @@ | |||
12 | #include <litmus/rt_domain.h> | 12 | #include <litmus/rt_domain.h> |
13 | 13 | ||
14 | 14 | ||
15 | void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | 15 | void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, |
16 | release_job_t release); | 16 | release_jobs_t release); |
17 | 17 | ||
18 | int edf_higher_prio(struct task_struct* first, | 18 | int edf_higher_prio(struct task_struct* first, |
19 | struct task_struct* second); | 19 | struct task_struct* second); |
diff --git a/include/litmus/heap.h b/include/litmus/heap.h index 0b0ede4542..508e0b9f16 100644 --- a/include/litmus/heap.h +++ b/include/litmus/heap.h | |||
@@ -298,4 +298,38 @@ static inline void heap_delete(heap_prio_t higher_prio, struct heap* heap, | |||
298 | node->degree = NOT_IN_HEAP; | 298 | node->degree = NOT_IN_HEAP; |
299 | } | 299 | } |
300 | 300 | ||
301 | /* un-inline, make it a kmemcache_t */ | ||
302 | static inline struct heap_node* alloc_heap_node(int gfp_flags) | ||
303 | { | ||
304 | return kmalloc(sizeof(struct heap_node), gfp_flags); | ||
305 | } | ||
306 | |||
307 | static inline void free_heap_node(struct heap_node* hn) | ||
308 | { | ||
309 | kfree(hn); | ||
310 | } | ||
311 | |||
312 | /* allocate a heap node for value and insert into the heap */ | ||
313 | static inline int heap_add(heap_prio_t higher_prio, struct heap* heap, | ||
314 | void* value, int gfp_flags) | ||
315 | { | ||
316 | struct heap_node* hn = alloc_heap_node(gfp_flags); | ||
317 | if (likely(hn)) { | ||
318 | heap_node_init(&hn, value); | ||
319 | heap_insert(higher_prio, heap, hn); | ||
320 | } | ||
321 | return hn != NULL; | ||
322 | } | ||
323 | |||
324 | static inline void* heap_take_del(heap_prio_t higher_prio, | ||
325 | struct heap* heap) | ||
326 | { | ||
327 | struct heap_node* hn = heap_take(higher_prio, heap); | ||
328 | void* ret = NULL; | ||
329 | if (hn) { | ||
330 | ret = hn->value; | ||
331 | free_heap_node(hn); | ||
332 | } | ||
333 | return ret; | ||
334 | } | ||
301 | #endif | 335 | #endif |
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index de2a3c2547..d3cd6e4437 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -141,10 +141,6 @@ static inline lt_t litmus_clock(void) | |||
141 | /* A macro to convert from nanoseconds to ktime_t. */ | 141 | /* A macro to convert from nanoseconds to ktime_t. */ |
142 | #define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) | 142 | #define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) |
143 | 143 | ||
144 | /* The high-resolution release timer for a task. */ | ||
145 | #define release_timer(t) (tsk_rt(t)->release_timer) | ||
146 | |||
147 | /* The high-resolution release timer for a task. */ | ||
148 | #define get_domain(t) (tsk_rt(t)->domain) | 144 | #define get_domain(t) (tsk_rt(t)->domain) |
149 | 145 | ||
150 | /* Honor the flag in the preempt_count variable that is set | 146 | /* Honor the flag in the preempt_count variable that is set |
@@ -201,5 +197,27 @@ static inline int is_np(struct task_struct *t) | |||
201 | 197 | ||
202 | #endif | 198 | #endif |
203 | 199 | ||
200 | /* make the unit explicit */ | ||
201 | typedef unsigned long quanta_t; | ||
202 | |||
203 | enum round { | ||
204 | FLOOR, | ||
205 | CEIL | ||
206 | }; | ||
207 | |||
208 | |||
209 | /* Tick period is used to convert ns-specified execution | ||
210 | * costs and periods into tick-based equivalents. | ||
211 | */ | ||
212 | extern ktime_t tick_period; | ||
213 | |||
214 | static inline quanta_t time2quanta(lt_t time, enum round round) | ||
215 | { | ||
216 | s64 quantum_length = ktime_to_ns(tick_period); | ||
217 | |||
218 | if (do_div(time, quantum_length) && round == CEIL) | ||
219 | time++; | ||
220 | return (quanta_t) time; | ||
221 | } | ||
204 | 222 | ||
205 | #endif | 223 | #endif |
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index 47cd123b76..84d71d40d9 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h | |||
@@ -8,13 +8,36 @@ | |||
8 | #include <litmus/norqlock.h> | 8 | #include <litmus/norqlock.h> |
9 | #include <litmus/heap.h> | 9 | #include <litmus/heap.h> |
10 | 10 | ||
11 | #define RELEASE_QUEUE_SLOTS 127 /* prime */ | ||
12 | |||
11 | struct _rt_domain; | 13 | struct _rt_domain; |
12 | 14 | ||
13 | typedef int (*check_resched_needed_t)(struct _rt_domain *rt); | 15 | typedef int (*check_resched_needed_t)(struct _rt_domain *rt); |
14 | typedef void (*release_job_t)(struct task_struct *t, struct _rt_domain *rt); | 16 | typedef void (*release_jobs_t)(struct _rt_domain *rt, struct heap* tasks); |
17 | |||
18 | int heap_earlier_release(struct heap_node *_a, struct heap_node *_b); | ||
19 | |||
20 | struct release_heap { | ||
21 | struct list_head list; | ||
22 | lt_t release_time; | ||
23 | struct heap heap; | ||
24 | }; | ||
25 | |||
26 | struct release_queue { | ||
27 | /* each slot maintains a list of release heaps sorted by release time */ | ||
28 | struct list_head slot[RELEASE_QUEUE_SLOTS]; | ||
29 | /* the heap of heaps orderd by release time */ | ||
30 | struct heap rel_heap; | ||
31 | /* the actual timer used to trigger releases */ | ||
32 | struct hrtimer timer; | ||
33 | /* used to determine when to start the timer */ | ||
34 | int timer_armed; | ||
35 | /* when will it go off? */ | ||
36 | lt_t timer_time; | ||
37 | }; | ||
15 | 38 | ||
16 | typedef struct _rt_domain { | 39 | typedef struct _rt_domain { |
17 | struct no_rqlock_work arm_timers; | 40 | struct no_rqlock_work arm_timer; |
18 | 41 | ||
19 | /* runnable rt tasks are in here */ | 42 | /* runnable rt tasks are in here */ |
20 | spinlock_t ready_lock; | 43 | spinlock_t ready_lock; |
@@ -22,18 +45,32 @@ typedef struct _rt_domain { | |||
22 | 45 | ||
23 | /* real-time tasks waiting for release are in here */ | 46 | /* real-time tasks waiting for release are in here */ |
24 | spinlock_t release_lock; | 47 | spinlock_t release_lock; |
25 | struct list_head release_queue; | 48 | struct release_queue release_queue; |
49 | |||
50 | /* for moving tasks to the release queue */ | ||
51 | spinlock_t tobe_lock; | ||
52 | struct list_head tobe_released; | ||
26 | 53 | ||
27 | /* how do we check if we need to kick another CPU? */ | 54 | /* how do we check if we need to kick another CPU? */ |
28 | check_resched_needed_t check_resched; | 55 | check_resched_needed_t check_resched; |
29 | 56 | ||
30 | /* how do we release a job? */ | 57 | /* how do we release a job? */ |
31 | release_job_t release_job; | 58 | release_jobs_t release_jobs; |
32 | 59 | ||
33 | /* how are tasks ordered in the ready queue? */ | 60 | /* how are tasks ordered in the ready queue? */ |
34 | heap_prio_t order; | 61 | heap_prio_t order; |
35 | } rt_domain_t; | 62 | } rt_domain_t; |
36 | 63 | ||
64 | /* caller must hold release_lock */ | ||
65 | static inline int next_release(rt_domain_t *rt, lt_t *time) | ||
66 | { | ||
67 | struct heap_node* top = heap_peek(heap_earlier_release, | ||
68 | &rt->release_queue.rel_heap); | ||
69 | if (top) | ||
70 | *time = ((struct release_heap*) top->value)->release_time; | ||
71 | return top != NULL; | ||
72 | } | ||
73 | |||
37 | static inline struct task_struct* __next_ready(rt_domain_t* rt) | 74 | static inline struct task_struct* __next_ready(rt_domain_t* rt) |
38 | { | 75 | { |
39 | struct heap_node *hn = heap_peek(rt->order, &rt->ready_queue); | 76 | struct heap_node *hn = heap_peek(rt->order, &rt->ready_queue); |
@@ -45,9 +82,10 @@ static inline struct task_struct* __next_ready(rt_domain_t* rt) | |||
45 | 82 | ||
46 | void rt_domain_init(rt_domain_t *rt, heap_prio_t order, | 83 | void rt_domain_init(rt_domain_t *rt, heap_prio_t order, |
47 | check_resched_needed_t check, | 84 | check_resched_needed_t check, |
48 | release_job_t relase); | 85 | release_jobs_t relase); |
49 | 86 | ||
50 | void __add_ready(rt_domain_t* rt, struct task_struct *new); | 87 | void __add_ready(rt_domain_t* rt, struct task_struct *new); |
88 | void __merge_ready(rt_domain_t* rt, struct heap *tasks); | ||
51 | void __add_release(rt_domain_t* rt, struct task_struct *task); | 89 | void __add_release(rt_domain_t* rt, struct task_struct *task); |
52 | 90 | ||
53 | static inline struct task_struct* __take_ready(rt_domain_t* rt) | 91 | static inline struct task_struct* __take_ready(rt_domain_t* rt) |
@@ -87,6 +125,14 @@ static inline void add_ready(rt_domain_t* rt, struct task_struct *new) | |||
87 | spin_unlock_irqrestore(&rt->ready_lock, flags); | 125 | spin_unlock_irqrestore(&rt->ready_lock, flags); |
88 | } | 126 | } |
89 | 127 | ||
128 | static inline void merge_ready(rt_domain_t* rt, struct heap* tasks) | ||
129 | { | ||
130 | unsigned long flags; | ||
131 | spin_lock_irqsave(&rt->ready_lock, flags); | ||
132 | __merge_ready(rt, tasks); | ||
133 | spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
134 | } | ||
135 | |||
90 | static inline struct task_struct* take_ready(rt_domain_t* rt) | 136 | static inline struct task_struct* take_ready(rt_domain_t* rt) |
91 | { | 137 | { |
92 | unsigned long flags; | 138 | unsigned long flags; |
@@ -103,9 +149,9 @@ static inline void add_release(rt_domain_t* rt, struct task_struct *task) | |||
103 | { | 149 | { |
104 | unsigned long flags; | 150 | unsigned long flags; |
105 | /* first we need the write lock for rt_ready_queue */ | 151 | /* first we need the write lock for rt_ready_queue */ |
106 | spin_lock_irqsave(&rt->release_lock, flags); | 152 | spin_lock_irqsave(&rt->tobe_lock, flags); |
107 | __add_release(rt, task); | 153 | __add_release(rt, task); |
108 | spin_unlock_irqrestore(&rt->release_lock, flags); | 154 | spin_unlock_irqrestore(&rt->tobe_lock, flags); |
109 | } | 155 | } |
110 | 156 | ||
111 | static inline int __jobs_pending(rt_domain_t* rt) | 157 | static inline int __jobs_pending(rt_domain_t* rt) |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 7bb568432e..403ebc8cc9 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -137,9 +137,6 @@ struct rt_param { | |||
137 | int old_policy; | 137 | int old_policy; |
138 | int old_prio; | 138 | int old_prio; |
139 | 139 | ||
140 | /* The high-resolution timer used to control its release. */ | ||
141 | struct hrtimer release_timer; | ||
142 | |||
143 | /* ready queue for this task */ | 140 | /* ready queue for this task */ |
144 | struct _rt_domain* domain; | 141 | struct _rt_domain* domain; |
145 | 142 | ||
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index d7567ac98e..84ece3e95e 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -18,8 +18,7 @@ | |||
18 | /* edf_higher_prio - returns true if first has a higher EDF priority | 18 | /* edf_higher_prio - returns true if first has a higher EDF priority |
19 | * than second. Deadline ties are broken by PID. | 19 | * than second. Deadline ties are broken by PID. |
20 | * | 20 | * |
21 | * first first must not be NULL and a real-time task. | 21 | * both first and second may be NULL |
22 | * second may be NULL or a non-rt task. | ||
23 | */ | 22 | */ |
24 | int edf_higher_prio(struct task_struct* first, | 23 | int edf_higher_prio(struct task_struct* first, |
25 | struct task_struct* second) | 24 | struct task_struct* second) |
@@ -36,6 +35,8 @@ int edf_higher_prio(struct task_struct* first, | |||
36 | second_task = second->rt_param.inh_task; | 35 | second_task = second->rt_param.inh_task; |
37 | 36 | ||
38 | return | 37 | return |
38 | /* it has to exist in order to have higher priority */ | ||
39 | first_task && ( | ||
39 | /* does the second task exist and is it a real-time task? If | 40 | /* does the second task exist and is it a real-time task? If |
40 | * not, the first task (which is a RT task) has higher | 41 | * not, the first task (which is a RT task) has higher |
41 | * priority. | 42 | * priority. |
@@ -57,7 +58,7 @@ int edf_higher_prio(struct task_struct* first, | |||
57 | * priority wins. | 58 | * priority wins. |
58 | */ | 59 | */ |
59 | (first_task->pid == second_task->pid && | 60 | (first_task->pid == second_task->pid && |
60 | !second->rt_param.inh_task))); | 61 | !second->rt_param.inh_task)))); |
61 | } | 62 | } |
62 | 63 | ||
63 | int edf_ready_order(struct heap_node* a, struct heap_node* b) | 64 | int edf_ready_order(struct heap_node* a, struct heap_node* b) |
@@ -66,7 +67,7 @@ int edf_ready_order(struct heap_node* a, struct heap_node* b) | |||
66 | } | 67 | } |
67 | 68 | ||
68 | void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | 69 | void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, |
69 | release_job_t release) | 70 | release_jobs_t release) |
70 | { | 71 | { |
71 | rt_domain_init(rt, edf_ready_order, resched, release); | 72 | rt_domain_init(rt, edf_ready_order, resched, release); |
72 | } | 73 | } |
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 28803087b3..27bc9143e7 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -18,6 +18,8 @@ | |||
18 | 18 | ||
19 | #include <litmus/trace.h> | 19 | #include <litmus/trace.h> |
20 | 20 | ||
21 | #include <litmus/heap.h> | ||
22 | |||
21 | static int dummy_resched(rt_domain_t *rt) | 23 | static int dummy_resched(rt_domain_t *rt) |
22 | { | 24 | { |
23 | return 0; | 25 | return 0; |
@@ -29,89 +31,228 @@ static int dummy_order(struct heap_node* a, struct heap_node* b) | |||
29 | } | 31 | } |
30 | 32 | ||
31 | /* default implementation: use default lock */ | 33 | /* default implementation: use default lock */ |
32 | static void default_release_job(struct task_struct* t, rt_domain_t* rt) | 34 | static void default_release_jobs(rt_domain_t* rt, struct heap* tasks) |
35 | { | ||
36 | merge_ready(rt, tasks); | ||
37 | } | ||
38 | |||
39 | static unsigned int time2slot(lt_t time) | ||
40 | { | ||
41 | return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS; | ||
42 | } | ||
43 | |||
44 | int heap_earlier_release(struct heap_node *_a, struct heap_node *_b) | ||
33 | { | 45 | { |
34 | add_ready(rt, t); | 46 | struct release_heap *a = _a->value; |
47 | struct release_heap *b = _b->value; | ||
48 | return lt_before(a->release_time, b->release_time); | ||
35 | } | 49 | } |
36 | 50 | ||
37 | static enum hrtimer_restart release_job_timer(struct hrtimer *timer) | 51 | /* Caller most hold release lock. |
52 | * Will return heap for given time. If no such heap exists prior to the invocation | ||
53 | * it will be created. | ||
54 | */ | ||
55 | static struct release_heap* get_release_heap(rt_domain_t *rt, lt_t release_time) | ||
38 | { | 56 | { |
39 | struct task_struct *t; | 57 | struct list_head* pos; |
58 | struct release_heap* heap = NULL; | ||
59 | struct release_heap* rh; | ||
60 | unsigned int slot = time2slot(release_time); | ||
61 | int inserted; | ||
62 | |||
63 | /* initialize pos for the case that the list is empty */ | ||
64 | pos = rt->release_queue.slot[slot].next; | ||
65 | list_for_each(pos, &rt->release_queue.slot[slot]) { | ||
66 | rh = list_entry(pos, struct release_heap, list); | ||
67 | if (release_time == rh->release_time) { | ||
68 | /* perfect match -- this happens on hyperperiod | ||
69 | * boundaries | ||
70 | */ | ||
71 | heap = rh; | ||
72 | break; | ||
73 | } else if (lt_before(release_time, rh->release_time)) { | ||
74 | /* we need to insert a new node since rh is | ||
75 | * already in the future | ||
76 | */ | ||
77 | break; | ||
78 | } | ||
79 | } | ||
80 | if (!heap) { | ||
81 | /* must create new node */ | ||
82 | /* FIXME: use a kmemcache_t */ | ||
83 | rh = kmalloc(sizeof(struct release_heap), GFP_ATOMIC); | ||
84 | if (unlikely(!rh)) | ||
85 | /* Should be handled somehow. | ||
86 | * For now, let's just hope there is | ||
87 | * sufficient memory. | ||
88 | */ | ||
89 | panic("rt_domain: no more memory?"); | ||
90 | rh->release_time = release_time; | ||
91 | heap_init(&rh->heap); | ||
92 | list_add(&rh->list, pos->prev); | ||
93 | inserted = heap_add(heap_earlier_release, | ||
94 | &rt->release_queue.rel_heap, rh, | ||
95 | GFP_ATOMIC); | ||
96 | if (unlikely(!inserted)) | ||
97 | panic("rt_domain: no more heap memory?"); | ||
98 | heap = rh; | ||
99 | } | ||
100 | return heap; | ||
101 | } | ||
102 | |||
103 | static enum hrtimer_restart on_release_timer(struct hrtimer *timer) | ||
104 | { | ||
105 | long flags; | ||
106 | rt_domain_t *rt; | ||
107 | struct release_heap* rh; | ||
108 | struct heap tasks; | ||
109 | struct list_head list, *pos, *safe; | ||
110 | lt_t release = 109; | ||
111 | int pending; | ||
112 | int repeat; | ||
113 | enum hrtimer_mode ret = HRTIMER_NORESTART; | ||
40 | 114 | ||
41 | TS_RELEASE_START; | 115 | TS_RELEASE_START; |
42 | 116 | ||
43 | t = container_of(timer, struct task_struct, | 117 | INIT_LIST_HEAD(&list); |
44 | rt_param.release_timer); | 118 | heap_init(&tasks); |
45 | 119 | ||
46 | get_domain(t)->release_job(t, get_domain(t)); | 120 | rt = container_of(timer, rt_domain_t, |
121 | release_queue.timer); | ||
47 | 122 | ||
48 | TS_RELEASE_END; | 123 | do { |
124 | list_for_each_safe(pos, safe, &list) { | ||
125 | rh = list_entry(pos, struct release_heap, list); | ||
126 | heap_union(rt->order, &tasks, &rh->heap); | ||
127 | list_del(pos); | ||
128 | kfree(rh); | ||
129 | } | ||
49 | 130 | ||
50 | return HRTIMER_NORESTART; | 131 | /* call release callback */ |
51 | } | 132 | rt->release_jobs(rt, &tasks); |
52 | 133 | ||
53 | static void setup_job_release_timer(struct task_struct *task) | ||
54 | { | ||
55 | hrtimer_init(&release_timer(task), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
56 | release_timer(task).function = release_job_timer; | ||
57 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
58 | release_timer(task).cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART; | ||
59 | #endif | ||
60 | /* Expiration time of timer is release time of task. */ | ||
61 | release_timer(task).expires = ns_to_ktime(get_release(task)); | ||
62 | 134 | ||
63 | TRACE_TASK(task, "arming release timer rel=%llu at %llu\n", | 135 | spin_lock_irqsave(&rt->release_lock, flags); |
64 | get_release(task), litmus_clock()); | 136 | while ((pending = next_release(rt, &release))) { |
137 | if (lt_before(release, litmus_clock())) { | ||
138 | /* pick for release */ | ||
139 | rh = heap_take_del(heap_earlier_release, | ||
140 | &rt->release_queue.rel_heap); | ||
141 | list_move(&rh->list, &list); | ||
142 | } else | ||
143 | break; | ||
144 | } | ||
145 | repeat = !list_empty(&list); | ||
146 | if (!repeat) { | ||
147 | /* last iteration, setup timers, etc. */ | ||
148 | if (!pending) { | ||
149 | rt->release_queue.timer_armed = 0; | ||
150 | ret = HRTIMER_NORESTART; | ||
151 | } else { | ||
152 | rt->release_queue.timer_time = release; | ||
153 | timer->expires = ns_to_ktime(release); | ||
154 | ret = HRTIMER_RESTART; | ||
155 | } | ||
156 | } | ||
157 | spin_unlock_irqrestore(&rt->release_lock, flags); | ||
158 | } while (repeat); | ||
65 | 159 | ||
66 | hrtimer_start(&release_timer(task), release_timer(task).expires, | 160 | TS_RELEASE_END; |
67 | HRTIMER_MODE_ABS); | 161 | |
162 | return ret; | ||
68 | } | 163 | } |
69 | 164 | ||
70 | static void arm_release_timers(unsigned long _rt) | 165 | static void arm_release_timer(unsigned long _rt) |
71 | { | 166 | { |
72 | rt_domain_t *rt = (rt_domain_t*) _rt; | 167 | rt_domain_t *rt = (rt_domain_t*) _rt; |
73 | unsigned long flags; | 168 | unsigned long flags; |
74 | struct list_head alt; | 169 | struct list_head list; |
75 | struct list_head *pos, *safe; | 170 | struct list_head *pos, *safe; |
76 | struct task_struct* t; | 171 | struct task_struct* t; |
172 | struct release_heap* rh; | ||
173 | int earlier, armed; | ||
174 | lt_t release = 0; | ||
175 | |||
176 | local_irq_save(flags); | ||
177 | spin_lock(&rt->tobe_lock); | ||
178 | list_replace_init(&rt->tobe_released, &list); | ||
179 | spin_unlock(&rt->tobe_lock); | ||
77 | 180 | ||
78 | spin_lock_irqsave(&rt->release_lock, flags); | 181 | /* We only have to defend against the ISR since norq callbacks |
79 | list_replace_init(&rt->release_queue, &alt); | 182 | * are serialized. |
80 | spin_unlock_irqrestore(&rt->release_lock, flags); | 183 | */ |
81 | 184 | spin_lock(&rt->release_lock); | |
82 | list_for_each_safe(pos, safe, &alt) { | 185 | |
186 | list_for_each_safe(pos, safe, &list) { | ||
83 | t = list_entry(pos, struct task_struct, rt_param.list); | 187 | t = list_entry(pos, struct task_struct, rt_param.list); |
84 | list_del(pos); | 188 | list_del(pos); |
85 | setup_job_release_timer(t); | 189 | rh = get_release_heap(rt, get_release(t)); |
190 | heap_add(rt->order, &rh->heap, t, GFP_ATOMIC); | ||
86 | } | 191 | } |
87 | } | ||
88 | 192 | ||
193 | next_release(rt, &release); | ||
194 | armed = rt->release_queue.timer_armed; | ||
195 | earlier = lt_before(release, rt->release_queue.timer_time); | ||
196 | /* We'll do the actual arming in a sec. The ISR doesn't care what these | ||
197 | * flags say, and it'll be true before another instance of this | ||
198 | * function can observe the flag due to the sequential nature of norq | ||
199 | * work. | ||
200 | */ | ||
201 | rt->release_queue.timer_armed = 1; | ||
202 | rt->release_queue.timer_time = release; | ||
203 | spin_unlock(&rt->release_lock); | ||
204 | if (!armed || earlier) { | ||
205 | if (armed) { | ||
206 | /* need to cancel first */ | ||
207 | hrtimer_cancel(&rt->release_queue.timer); | ||
208 | } | ||
209 | hrtimer_start(&rt->release_queue.timer, | ||
210 | ns_to_ktime(release), | ||
211 | HRTIMER_MODE_ABS); | ||
212 | } | ||
213 | local_irq_restore(flags); | ||
214 | } | ||
89 | 215 | ||
90 | void rt_domain_init(rt_domain_t *rt, | 216 | void rt_domain_init(rt_domain_t *rt, |
91 | heap_prio_t order, | 217 | heap_prio_t order, |
92 | check_resched_needed_t check, | 218 | check_resched_needed_t check, |
93 | release_job_t release | 219 | release_jobs_t release |
94 | ) | 220 | ) |
95 | { | 221 | { |
222 | int i; | ||
223 | |||
96 | BUG_ON(!rt); | 224 | BUG_ON(!rt); |
97 | if (!check) | 225 | if (!check) |
98 | check = dummy_resched; | 226 | check = dummy_resched; |
99 | if (!release) | 227 | if (!release) |
100 | release = default_release_job; | 228 | release = default_release_jobs; |
101 | if (!order) | 229 | if (!order) |
102 | order = dummy_order; | 230 | order = dummy_order; |
231 | |||
103 | heap_init(&rt->ready_queue); | 232 | heap_init(&rt->ready_queue); |
104 | INIT_LIST_HEAD(&rt->release_queue); | 233 | INIT_LIST_HEAD(&rt->tobe_released); |
234 | rt->release_queue.timer_armed = 0; | ||
235 | for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) | ||
236 | INIT_LIST_HEAD(&rt->release_queue.slot[i]); | ||
237 | |||
238 | hrtimer_init(&rt->release_queue.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
239 | rt->release_queue.timer.function = on_release_timer; | ||
240 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
241 | rt->release_queue.timer.cb_mode = HRTIMER_CB_IRQSAFE; | ||
242 | #endif | ||
243 | |||
105 | spin_lock_init(&rt->ready_lock); | 244 | spin_lock_init(&rt->ready_lock); |
106 | spin_lock_init(&rt->release_lock); | 245 | spin_lock_init(&rt->release_lock); |
246 | spin_lock_init(&rt->tobe_lock); | ||
247 | |||
107 | rt->check_resched = check; | 248 | rt->check_resched = check; |
108 | rt->release_job = release; | 249 | rt->release_jobs = release; |
109 | rt->order = order; | 250 | rt->order = order; |
110 | init_no_rqlock_work(&rt->arm_timers, arm_release_timers, (unsigned long) rt); | 251 | init_no_rqlock_work(&rt->arm_timer, arm_release_timer, (unsigned long) rt); |
111 | } | 252 | } |
112 | 253 | ||
113 | /* add_ready - add a real-time task to the rt ready queue. It must be runnable. | 254 | /* add_ready - add a real-time task to the rt ready queue. It must be runnable. |
114 | * @new: the newly released task | 255 | * @new: the newly released task |
115 | */ | 256 | */ |
116 | void __add_ready(rt_domain_t* rt, struct task_struct *new) | 257 | void __add_ready(rt_domain_t* rt, struct task_struct *new) |
117 | { | 258 | { |
@@ -125,14 +266,23 @@ void __add_ready(rt_domain_t* rt, struct task_struct *new) | |||
125 | rt->check_resched(rt); | 266 | rt->check_resched(rt); |
126 | } | 267 | } |
127 | 268 | ||
269 | /* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable. | ||
270 | * @tasks - the newly released tasks | ||
271 | */ | ||
272 | void __merge_ready(rt_domain_t* rt, struct heap* tasks) | ||
273 | { | ||
274 | heap_union(rt->order, &rt->ready_queue, tasks); | ||
275 | rt->check_resched(rt); | ||
276 | } | ||
277 | |||
128 | /* add_release - add a real-time task to the rt release queue. | 278 | /* add_release - add a real-time task to the rt release queue. |
129 | * @task: the sleeping task | 279 | * @task: the sleeping task |
130 | */ | 280 | */ |
131 | void __add_release(rt_domain_t* rt, struct task_struct *task) | 281 | void __add_release(rt_domain_t* rt, struct task_struct *task) |
132 | { | 282 | { |
133 | TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task)); | 283 | TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task)); |
134 | list_add(&tsk_rt(task)->list, &rt->release_queue); | 284 | list_add(&tsk_rt(task)->list, &rt->tobe_released); |
135 | task->rt_param.domain = rt; | 285 | task->rt_param.domain = rt; |
136 | do_without_rqlock(&rt->arm_timers); | 286 | do_without_rqlock(&rt->arm_timer); |
137 | } | 287 | } |
138 | 288 | ||
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 2ac14cded9..bebb2c7ba5 100755 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -325,56 +325,57 @@ static noinline void requeue(struct task_struct* task) | |||
325 | __add_ready(edf, task); | 325 | __add_ready(edf, task); |
326 | } | 326 | } |
327 | 327 | ||
328 | static void check_for_preemptions(cedf_domain_t* cedf) | ||
329 | { | ||
330 | cpu_entry_t *last; | ||
331 | struct task_struct *task; | ||
332 | struct list_head *cedf_cpu_queue; | ||
333 | cedf_cpu_queue = &cedf->cedf_cpu_queue; | ||
334 | |||
335 | for(last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list); | ||
336 | edf_preemption_needed(&cedf->domain, last->linked); | ||
337 | last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list)) { | ||
338 | /* preemption necessary */ | ||
339 | task = __take_ready(&cedf->domain); | ||
340 | TRACE("check_for_preemptions: task %d linked to %d, state:%d\n", | ||
341 | task->pid, last->cpu, task->state); | ||
342 | if (last->linked) | ||
343 | requeue(last->linked); | ||
344 | link_task_to_cpu(task, last); | ||
345 | preempt(last); | ||
346 | } | ||
347 | |||
348 | } | ||
349 | |||
328 | /* cedf_job_arrival: task is either resumed or released */ | 350 | /* cedf_job_arrival: task is either resumed or released */ |
329 | static noinline void cedf_job_arrival(struct task_struct* task) | 351 | static noinline void cedf_job_arrival(struct task_struct* task) |
330 | { | 352 | { |
331 | cpu_entry_t* last; | ||
332 | cedf_domain_t* cedf; | 353 | cedf_domain_t* cedf; |
333 | rt_domain_t* edf; | 354 | rt_domain_t* edf; |
334 | struct list_head *cedf_cpu_queue; | ||
335 | 355 | ||
336 | BUG_ON(!task); | 356 | BUG_ON(!task); |
337 | 357 | ||
338 | /* Get correct real-time domain. */ | 358 | /* Get correct real-time domain. */ |
339 | cedf = task_cedf(task); | 359 | cedf = task_cedf(task); |
340 | edf = &cedf->domain; | 360 | edf = &cedf->domain; |
341 | cedf_cpu_queue = &cedf->cedf_cpu_queue; | ||
342 | |||
343 | BUG_ON(!cedf); | ||
344 | BUG_ON(!edf); | ||
345 | BUG_ON(!cedf_cpu_queue); | ||
346 | BUG_ON(list_empty(cedf_cpu_queue)); | ||
347 | 361 | ||
348 | /* first queue arriving job */ | 362 | /* first queue arriving job */ |
349 | requeue(task); | 363 | requeue(task); |
350 | 364 | ||
351 | /* then check for any necessary preemptions */ | 365 | /* then check for any necessary preemptions */ |
352 | last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list); | 366 | check_for_preemptions(cedf); |
353 | if (edf_preemption_needed(edf, last->linked)) { | ||
354 | /* preemption necessary */ | ||
355 | task = __take_ready(edf); | ||
356 | TRACE("job_arrival: task %d linked to %d, state:%d\n", | ||
357 | task->pid, last->cpu, task->state); | ||
358 | if (last->linked) | ||
359 | requeue(last->linked); | ||
360 | |||
361 | link_task_to_cpu(task, last); | ||
362 | preempt(last); | ||
363 | } | ||
364 | } | 367 | } |
365 | 368 | ||
366 | /* check for current job releases */ | 369 | /* check for current job releases */ |
367 | static void cedf_job_release(struct task_struct* t, rt_domain_t* _) | 370 | static void cedf_release_jobs(rt_domain_t* rt, struct heap* tasks) |
368 | { | 371 | { |
369 | cedf_domain_t* cedf = task_cedf(t); | 372 | cedf_domain_t* cedf = container_of(rt, cedf_domain_t, domain); |
370 | unsigned long flags; | 373 | unsigned long flags; |
371 | 374 | ||
372 | BUG_ON(!t); | ||
373 | BUG_ON(!cedf); | ||
374 | |||
375 | spin_lock_irqsave(&cedf->slock, flags); | 375 | spin_lock_irqsave(&cedf->slock, flags); |
376 | sched_trace_job_release(queued); | 376 | |
377 | cedf_job_arrival(t); | 377 | __merge_ready(&cedf->domain, tasks); |
378 | check_for_preemptions(cedf); | ||
378 | spin_unlock_irqrestore(&cedf->slock, flags); | 379 | spin_unlock_irqrestore(&cedf->slock, flags); |
379 | } | 380 | } |
380 | 381 | ||
@@ -678,7 +679,7 @@ static void cedf_domain_init(int first_cpu, int last_cpu) | |||
678 | 679 | ||
679 | /* Initialize cluster domain. */ | 680 | /* Initialize cluster domain. */ |
680 | edf_domain_init(&new_cedf_domain->domain, NULL, | 681 | edf_domain_init(&new_cedf_domain->domain, NULL, |
681 | cedf_job_release); | 682 | cedf_release_jobs); |
682 | new_cedf_domain->first_cpu = first_cpu; | 683 | new_cedf_domain->first_cpu = first_cpu; |
683 | new_cedf_domain->last_cpu = last_cpu; | 684 | new_cedf_domain->last_cpu = last_cpu; |
684 | INIT_LIST_HEAD(&new_cedf_domain->cedf_cpu_queue); | 685 | INIT_LIST_HEAD(&new_cedf_domain->cedf_cpu_queue); |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index e32a4e8458..171d66c18f 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/list.h> | ||
15 | 14 | ||
16 | #include <litmus/litmus.h> | 15 | #include <litmus/litmus.h> |
17 | #include <litmus/jobs.h> | 16 | #include <litmus/jobs.h> |
@@ -19,6 +18,8 @@ | |||
19 | #include <litmus/edf_common.h> | 18 | #include <litmus/edf_common.h> |
20 | #include <litmus/sched_trace.h> | 19 | #include <litmus/sched_trace.h> |
21 | 20 | ||
21 | #include <litmus/heap.h> | ||
22 | |||
22 | #include <linux/module.h> | 23 | #include <linux/module.h> |
23 | 24 | ||
24 | /* Overview of GSN-EDF operations. | 25 | /* Overview of GSN-EDF operations. |
@@ -94,8 +95,8 @@ typedef struct { | |||
94 | int cpu; | 95 | int cpu; |
95 | struct task_struct* linked; /* only RT tasks */ | 96 | struct task_struct* linked; /* only RT tasks */ |
96 | struct task_struct* scheduled; /* only RT tasks */ | 97 | struct task_struct* scheduled; /* only RT tasks */ |
97 | struct list_head list; | ||
98 | atomic_t will_schedule; /* prevent unneeded IPIs */ | 98 | atomic_t will_schedule; /* prevent unneeded IPIs */ |
99 | struct heap_node* hn; | ||
99 | } cpu_entry_t; | 100 | } cpu_entry_t; |
100 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | 101 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); |
101 | 102 | ||
@@ -112,39 +113,43 @@ cpu_entry_t* gsnedf_cpus[NR_CPUS]; | |||
112 | #define NO_CPU 0xffffffff | 113 | #define NO_CPU 0xffffffff |
113 | 114 | ||
114 | /* the cpus queue themselves according to priority in here */ | 115 | /* the cpus queue themselves according to priority in here */ |
115 | static LIST_HEAD(gsnedf_cpu_queue); | 116 | static struct heap_node gsnedf_heap_node[NR_CPUS]; |
117 | static struct heap gsnedf_cpu_heap; | ||
116 | 118 | ||
117 | static rt_domain_t gsnedf; | 119 | static rt_domain_t gsnedf; |
118 | #define gsnedf_lock (gsnedf.ready_lock) | 120 | #define gsnedf_lock (gsnedf.ready_lock) |
119 | 121 | ||
122 | |||
123 | static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b) | ||
124 | { | ||
125 | cpu_entry_t *a, *b; | ||
126 | a = _a->value; | ||
127 | b = _b->value; | ||
128 | /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
129 | * the top of the heap. | ||
130 | */ | ||
131 | return edf_higher_prio(b->linked, a->linked); | ||
132 | } | ||
133 | |||
120 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | 134 | /* update_cpu_position - Move the cpu entry to the correct place to maintain |
121 | * order in the cpu queue. Caller must hold gsnedf lock. | 135 | * order in the cpu queue. Caller must hold gsnedf lock. |
122 | * | ||
123 | * This really should be a heap. | ||
124 | */ | 136 | */ |
125 | static void update_cpu_position(cpu_entry_t *entry) | 137 | static void update_cpu_position(cpu_entry_t *entry) |
126 | { | 138 | { |
127 | cpu_entry_t *other; | 139 | if (likely(heap_node_in_heap(entry->hn))) |
128 | struct list_head *pos; | 140 | heap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); |
141 | heap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
142 | } | ||
129 | 143 | ||
130 | if (likely(in_list(&entry->list))) | 144 | /* caller must hold gsnedf lock */ |
131 | list_del(&entry->list); | 145 | static cpu_entry_t* lowest_prio_cpu(void) |
132 | /* if we do not execute real-time jobs we just move | 146 | { |
133 | * to the end of the queue | 147 | struct heap_node* hn; |
134 | */ | 148 | hn = heap_peek(cpu_lower_prio, &gsnedf_cpu_heap); |
135 | if (entry->linked) { | 149 | return hn->value; |
136 | list_for_each(pos, &gsnedf_cpu_queue) { | ||
137 | other = list_entry(pos, cpu_entry_t, list); | ||
138 | if (edf_higher_prio(entry->linked, other->linked)) { | ||
139 | __list_add(&entry->list, pos->prev, pos); | ||
140 | return; | ||
141 | } | ||
142 | } | ||
143 | } | ||
144 | /* if we get this far we have the lowest priority job */ | ||
145 | list_add_tail(&entry->list, &gsnedf_cpu_queue); | ||
146 | } | 150 | } |
147 | 151 | ||
152 | |||
148 | /* link_task_to_cpu - Update the link of a CPU. | 153 | /* link_task_to_cpu - Update the link of a CPU. |
149 | * Handles the case where the to-be-linked task is already | 154 | * Handles the case where the to-be-linked task is already |
150 | * scheduled on a different CPU. | 155 | * scheduled on a different CPU. |
@@ -285,41 +290,44 @@ static noinline void requeue(struct task_struct* task) | |||
285 | __add_ready(&gsnedf, task); | 290 | __add_ready(&gsnedf, task); |
286 | } | 291 | } |
287 | 292 | ||
288 | /* gsnedf_job_arrival: task is either resumed or released */ | 293 | |
289 | static noinline void gsnedf_job_arrival(struct task_struct* task) | 294 | /* check for any necessary preemptions */ |
295 | static void check_for_preemptions(void) | ||
290 | { | 296 | { |
297 | struct task_struct *task; | ||
291 | cpu_entry_t* last; | 298 | cpu_entry_t* last; |
292 | 299 | ||
293 | BUG_ON(list_empty(&gsnedf_cpu_queue)); | 300 | for(last = lowest_prio_cpu(); |
294 | BUG_ON(!task); | 301 | edf_preemption_needed(&gsnedf, last->linked); |
295 | 302 | last = lowest_prio_cpu()) { | |
296 | /* first queue arriving job */ | ||
297 | requeue(task); | ||
298 | |||
299 | /* then check for any necessary preemptions */ | ||
300 | last = list_entry(gsnedf_cpu_queue.prev, cpu_entry_t, list); | ||
301 | if (edf_preemption_needed(&gsnedf, last->linked)) { | ||
302 | /* preemption necessary */ | 303 | /* preemption necessary */ |
303 | task = __take_ready(&gsnedf); | 304 | task = __take_ready(&gsnedf); |
304 | TRACE("job_arrival: attempting to link task %d to %d\n", | 305 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", |
305 | task->pid, last->cpu); | 306 | task->pid, last->cpu); |
306 | if (last->linked) | 307 | if (last->linked) |
307 | requeue(last->linked); | 308 | requeue(last->linked); |
308 | |||
309 | link_task_to_cpu(task, last); | 309 | link_task_to_cpu(task, last); |
310 | preempt(last); | 310 | preempt(last); |
311 | } | 311 | } |
312 | } | 312 | } |
313 | 313 | ||
314 | /* check for current job releases */ | 314 | /* gsnedf_job_arrival: task is either resumed or released */ |
315 | static void gsnedf_job_release(struct task_struct* t, rt_domain_t* _) | 315 | static noinline void gsnedf_job_arrival(struct task_struct* task) |
316 | { | ||
317 | BUG_ON(!task); | ||
318 | |||
319 | requeue(task); | ||
320 | check_for_preemptions(); | ||
321 | } | ||
322 | |||
323 | static void gsnedf_release_jobs(rt_domain_t* rt, struct heap* tasks) | ||
316 | { | 324 | { |
317 | unsigned long flags; | 325 | unsigned long flags; |
318 | 326 | ||
319 | spin_lock_irqsave(&gsnedf_lock, flags); | 327 | spin_lock_irqsave(&gsnedf_lock, flags); |
320 | 328 | ||
321 | sched_trace_job_release(queued); | 329 | __merge_ready(rt, tasks); |
322 | gsnedf_job_arrival(t); | 330 | check_for_preemptions(); |
323 | 331 | ||
324 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 332 | spin_unlock_irqrestore(&gsnedf_lock, flags); |
325 | } | 333 | } |
@@ -712,6 +720,7 @@ static int __init init_gsn_edf(void) | |||
712 | int cpu; | 720 | int cpu; |
713 | cpu_entry_t *entry; | 721 | cpu_entry_t *entry; |
714 | 722 | ||
723 | heap_init(&gsnedf_cpu_heap); | ||
715 | /* initialize CPU state */ | 724 | /* initialize CPU state */ |
716 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 725 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
717 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 726 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
@@ -720,10 +729,11 @@ static int __init init_gsn_edf(void) | |||
720 | entry->linked = NULL; | 729 | entry->linked = NULL; |
721 | entry->scheduled = NULL; | 730 | entry->scheduled = NULL; |
722 | entry->cpu = cpu; | 731 | entry->cpu = cpu; |
723 | INIT_LIST_HEAD(&entry->list); | 732 | entry->hn = &gsnedf_heap_node[cpu]; |
733 | heap_node_init(&entry->hn, entry); | ||
734 | /*heap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn);*/ | ||
724 | } | 735 | } |
725 | 736 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | |
726 | edf_domain_init(&gsnedf, NULL, gsnedf_job_release); | ||
727 | return register_sched_plugin(&gsn_edf_plugin); | 737 | return register_sched_plugin(&gsn_edf_plugin); |
728 | } | 738 | } |
729 | 739 | ||
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index ad082a3a6f..e9944121f2 100755 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c | |||
@@ -21,14 +21,6 @@ | |||
21 | 21 | ||
22 | #include <litmus/heap.h> | 22 | #include <litmus/heap.h> |
23 | 23 | ||
24 | /* Tick period is used to convert ns-specified execution | ||
25 | * costs and periods into tick-based equivalents. | ||
26 | */ | ||
27 | extern ktime_t tick_period; | ||
28 | |||
29 | /* make the unit explicit */ | ||
30 | typedef unsigned long quanta_t; | ||
31 | |||
32 | struct subtask { | 24 | struct subtask { |
33 | /* measured in quanta relative to job release */ | 25 | /* measured in quanta relative to job release */ |
34 | quanta_t release; | 26 | quanta_t release; |
@@ -160,20 +152,6 @@ static quanta_t cur_group_deadline(struct task_struct* t) | |||
160 | return gdl; | 152 | return gdl; |
161 | } | 153 | } |
162 | 154 | ||
163 | enum round { | ||
164 | FLOOR, | ||
165 | CEIL | ||
166 | }; | ||
167 | |||
168 | static quanta_t time2quanta(lt_t time, enum round round) | ||
169 | { | ||
170 | s64 quantum_length = ktime_to_ns(tick_period); | ||
171 | |||
172 | if (do_div(time, quantum_length) && round == CEIL) | ||
173 | time++; | ||
174 | return (quanta_t) time; | ||
175 | } | ||
176 | |||
177 | static int pfair_higher_prio(struct task_struct* first, | 155 | static int pfair_higher_prio(struct task_struct* first, |
178 | struct task_struct* second) | 156 | struct task_struct* second) |
179 | { | 157 | { |
@@ -244,7 +222,7 @@ static void pfair_add_release(struct task_struct* t) | |||
244 | /* pull released tasks from the release queue */ | 222 | /* pull released tasks from the release queue */ |
245 | static void poll_releases(quanta_t time) | 223 | static void poll_releases(quanta_t time) |
246 | { | 224 | { |
247 | heap_union(pfair_ready_order, &pfair.ready_queue, relq(time)); | 225 | __merge_ready(&pfair, relq(time)); |
248 | merge_time = time; | 226 | merge_time = time; |
249 | } | 227 | } |
250 | 228 | ||
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 0e9c9dd80a..71dcfa1fcd 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -47,7 +47,7 @@ DEFINE_PER_CPU(psnedf_domain_t, psnedf_domains); | |||
47 | 47 | ||
48 | static void psnedf_domain_init(psnedf_domain_t* pedf, | 48 | static void psnedf_domain_init(psnedf_domain_t* pedf, |
49 | check_resched_needed_t check, | 49 | check_resched_needed_t check, |
50 | release_job_t release, | 50 | release_jobs_t release, |
51 | int cpu) | 51 | int cpu) |
52 | { | 52 | { |
53 | edf_domain_init(&pedf->domain, check, release); | 53 | edf_domain_init(&pedf->domain, check, release); |