diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-08-23 17:55:08 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-08-23 17:55:08 -0400 |
commit | 8b6d4e51e23c3c4137c2ab5c0352dc8233dfa3f9 (patch) | |
tree | 28949609d34d45f91f0dc9923879c0105bcb705d | |
parent | c6b0b69c55658bb0c88433444dc288f91b0cb357 (diff) |
Introduce rt_domain_t.
-rw-r--r-- | include/linux/edf_common.h | 57 | ||||
-rw-r--r-- | include/linux/list.h | 4 | ||||
-rw-r--r-- | include/linux/rt_domain.h | 69 | ||||
-rw-r--r-- | kernel/Makefile | 2 | ||||
-rw-r--r-- | kernel/edf_common.c | 196 | ||||
-rw-r--r-- | kernel/rt_domain.c | 170 | ||||
-rw-r--r-- | kernel/sched_edf_hsb.c | 69 | ||||
-rw-r--r-- | kernel/sched_global_edf.c | 19 | ||||
-rw-r--r-- | kernel/sched_part_edf.c | 32 |
9 files changed, 327 insertions, 291 deletions
diff --git a/include/linux/edf_common.h b/include/linux/edf_common.h index 6b0eb2ff17..f940308fdd 100644 --- a/include/linux/edf_common.h +++ b/include/linux/edf_common.h | |||
@@ -9,68 +9,27 @@ | |||
9 | #ifndef __UNC_EDF_COMMON_H__ | 9 | #ifndef __UNC_EDF_COMMON_H__ |
10 | #define __UNC_EDF_COMMON_H__ | 10 | #define __UNC_EDF_COMMON_H__ |
11 | 11 | ||
12 | struct _edf_domain; | 12 | #include <linux/rt_domain.h> |
13 | 13 | ||
14 | typedef int (*edf_check_resched_needed_t)(struct _edf_domain *edf); | ||
15 | typedef struct _edf_domain { | ||
16 | /* runnable rt tasks are in here */ | ||
17 | rwlock_t ready_lock; | ||
18 | struct list_head ready_queue; | ||
19 | 14 | ||
20 | /* real-time tasks waiting for release are in here */ | 15 | void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched); |
21 | spinlock_t release_lock; | ||
22 | struct list_head release_queue; | ||
23 | |||
24 | /* how do we check if we need to kick another CPU? */ | ||
25 | edf_check_resched_needed_t check_resched; | ||
26 | } edf_domain_t; | ||
27 | |||
28 | #define next_ready(edf) \ | ||
29 | (list_entry((edf)->ready_queue.next, struct task_struct, rt_list)) | ||
30 | |||
31 | void edf_domain_init(edf_domain_t *edf, edf_check_resched_needed_t f); | ||
32 | 16 | ||
33 | int edf_higher_prio(struct task_struct* first, | 17 | int edf_higher_prio(struct task_struct* first, |
34 | struct task_struct* second); | 18 | struct task_struct* second); |
35 | 19 | ||
36 | void __add_ready(edf_domain_t* edf, struct task_struct *new); | 20 | int edf_ready_order(struct list_head* a, struct list_head* b); |
37 | void __add_release(edf_domain_t* edf, struct task_struct *task); | ||
38 | |||
39 | struct task_struct* __take_ready(edf_domain_t* edf); | ||
40 | struct task_struct* __peek_ready(edf_domain_t* edf); | ||
41 | 21 | ||
22 | void edf_release_at(struct task_struct *t, jiffie_t start); | ||
23 | #define edf_release_now(t) edf_release_at(t, jiffies) | ||
42 | 24 | ||
43 | void try_release_pending(edf_domain_t* edf); | 25 | int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); |
44 | void __release_pending(edf_domain_t* edf); | ||
45 | void __prepare_new_release(struct task_struct *t, jiffie_t start); | ||
46 | #define prepare_new_release(t) __prepare_new_release(t, jiffies) | ||
47 | void prepare_for_next_period(struct task_struct *t); | ||
48 | void prepare_new_releases(edf_domain_t *edf, jiffie_t start); | ||
49 | void __prepare_new_releases(edf_domain_t *edf, jiffie_t start); | ||
50 | int preemption_needed(edf_domain_t* edf, struct task_struct *t); | ||
51 | long edf_sleep_next_period(void); | 26 | long edf_sleep_next_period(void); |
52 | 27 | ||
28 | void edf_prepare_for_next_period(struct task_struct *t); | ||
29 | |||
53 | #define job_completed(t) (!is_be(t) && \ | 30 | #define job_completed(t) (!is_be(t) && \ |
54 | (t)->rt_param.times.exec_time == (t)->rt_param.basic_params.exec_cost) | 31 | (t)->rt_param.times.exec_time == (t)->rt_param.basic_params.exec_cost) |
55 | 32 | ||
56 | static inline void add_ready(edf_domain_t* edf, struct task_struct *new) | ||
57 | { | ||
58 | unsigned long flags; | ||
59 | /* first we need the write lock for edf_ready_queue */ | ||
60 | write_lock_irqsave(&edf->ready_lock, flags); | ||
61 | __add_ready(edf, new); | ||
62 | write_unlock_irqrestore(&edf->ready_lock, flags); | ||
63 | } | ||
64 | |||
65 | static inline void add_release(edf_domain_t* edf, struct task_struct *task) | ||
66 | { | ||
67 | unsigned long flags; | ||
68 | /* first we need the write lock for edf_ready_queue */ | ||
69 | spin_lock_irqsave(&edf->release_lock, flags); | ||
70 | __add_release(edf, task); | ||
71 | spin_unlock_irqrestore(&edf->release_lock, flags); | ||
72 | } | ||
73 | |||
74 | int edf_set_hp_task(struct pi_semaphore *sem); | 33 | int edf_set_hp_task(struct pi_semaphore *sem); |
75 | int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu); | 34 | int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu); |
76 | 35 | ||
diff --git a/include/linux/list.h b/include/linux/list.h index e7d758c5fa..319c5eda13 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -903,7 +903,7 @@ typedef int (*list_cmp_t)(struct list_head*, struct list_head*); | |||
903 | 903 | ||
904 | static inline unsigned int list_insert(struct list_head* new, | 904 | static inline unsigned int list_insert(struct list_head* new, |
905 | struct list_head* head, | 905 | struct list_head* head, |
906 | list_cmp_t is_less) | 906 | list_cmp_t order_before) |
907 | { | 907 | { |
908 | struct list_head *pos; | 908 | struct list_head *pos; |
909 | unsigned int passed = 0; | 909 | unsigned int passed = 0; |
@@ -912,7 +912,7 @@ static inline unsigned int list_insert(struct list_head* new, | |||
912 | 912 | ||
913 | /* find a spot where the new entry is less than the next */ | 913 | /* find a spot where the new entry is less than the next */ |
914 | list_for_each(pos, head) { | 914 | list_for_each(pos, head) { |
915 | if (unlikely(is_less(new, pos))) { | 915 | if (unlikely(order_before(new, pos))) { |
916 | /* pos is not less than new, thus insert here */ | 916 | /* pos is not less than new, thus insert here */ |
917 | __list_add(new, pos->prev, pos); | 917 | __list_add(new, pos->prev, pos); |
918 | goto out; | 918 | goto out; |
diff --git a/include/linux/rt_domain.h b/include/linux/rt_domain.h new file mode 100644 index 0000000000..7157a04ac2 --- /dev/null +++ b/include/linux/rt_domain.h | |||
@@ -0,0 +1,69 @@ | |||
1 | /* CLEANUP: Add comments and make it less messy. | ||
2 | * | ||
3 | */ | ||
4 | |||
5 | #ifndef __UNC_RT_DOMAIN_H__ | ||
6 | #define __UNC_RT_DOMAIN_H__ | ||
7 | |||
8 | struct _rt_domain; | ||
9 | |||
10 | typedef int (*check_resched_needed_t)(struct _rt_domain *rt); | ||
11 | typedef void (*release_at_t)(struct task_struct *t, jiffie_t start); | ||
12 | |||
13 | typedef struct _rt_domain { | ||
14 | /* runnable rt tasks are in here */ | ||
15 | rwlock_t ready_lock; | ||
16 | struct list_head ready_queue; | ||
17 | |||
18 | /* real-time tasks waiting for release are in here */ | ||
19 | spinlock_t release_lock; | ||
20 | struct list_head release_queue; | ||
21 | |||
22 | /* how do we check if we need to kick another CPU? */ | ||
23 | check_resched_needed_t check_resched; | ||
24 | |||
25 | /* how are tasks ordered in the ready queue? */ | ||
26 | list_cmp_t order; | ||
27 | } rt_domain_t; | ||
28 | |||
29 | #define next_ready(rt) \ | ||
30 | (list_entry((rt)->ready_queue.next, struct task_struct, rt_list)) | ||
31 | |||
32 | #define ready_jobs_pending(rt) \ | ||
33 | (!list_empty(&(rt)->ready_queue)) | ||
34 | |||
35 | void rt_domain_init(rt_domain_t *rt, check_resched_needed_t f, | ||
36 | list_cmp_t order); | ||
37 | |||
38 | void __add_ready(rt_domain_t* rt, struct task_struct *new); | ||
39 | void __add_release(rt_domain_t* rt, struct task_struct *task); | ||
40 | |||
41 | struct task_struct* __take_ready(rt_domain_t* rt); | ||
42 | struct task_struct* __peek_ready(rt_domain_t* rt); | ||
43 | |||
44 | void try_release_pending(rt_domain_t* rt); | ||
45 | void __release_pending(rt_domain_t* rt); | ||
46 | |||
47 | void rerelease_all(rt_domain_t *rt, release_at_t release); | ||
48 | void __rerelease_all(rt_domain_t *rt, release_at_t release); | ||
49 | |||
50 | static inline void add_ready(rt_domain_t* rt, struct task_struct *new) | ||
51 | { | ||
52 | unsigned long flags; | ||
53 | /* first we need the write lock for rt_ready_queue */ | ||
54 | write_lock_irqsave(&rt->ready_lock, flags); | ||
55 | __add_ready(rt, new); | ||
56 | write_unlock_irqrestore(&rt->ready_lock, flags); | ||
57 | } | ||
58 | |||
59 | static inline void add_release(rt_domain_t* rt, struct task_struct *task) | ||
60 | { | ||
61 | unsigned long flags; | ||
62 | /* first we need the write lock for rt_ready_queue */ | ||
63 | spin_lock_irqsave(&rt->release_lock, flags); | ||
64 | __add_release(rt, task); | ||
65 | spin_unlock_irqrestore(&rt->release_lock, flags); | ||
66 | } | ||
67 | |||
68 | |||
69 | #endif | ||
diff --git a/kernel/Makefile b/kernel/Makefile index ce9dfa08b9..1b6957b160 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -13,7 +13,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ | |||
13 | edf_common.o fifo_common.o pfair_common.o\ | 13 | edf_common.o fifo_common.o pfair_common.o\ |
14 | sched_global_edf.o sched_part_edf.o sched_edf_hsb.o sched_pfair.o \ | 14 | sched_global_edf.o sched_part_edf.o sched_edf_hsb.o sched_pfair.o \ |
15 | sched_gsn_edf.o sched_psn_edf.o litmus_sem.o \ | 15 | sched_gsn_edf.o sched_psn_edf.o litmus_sem.o \ |
16 | trace.o ft_event.o | 16 | trace.o ft_event.o rt_domain.o |
17 | 17 | ||
18 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 18 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
19 | obj-y += time/ | 19 | obj-y += time/ |
diff --git a/kernel/edf_common.c b/kernel/edf_common.c index fa83450fe0..065e798a8a 100644 --- a/kernel/edf_common.c +++ b/kernel/edf_common.c | |||
@@ -12,26 +12,8 @@ | |||
12 | #include <linux/sched_plugin.h> | 12 | #include <linux/sched_plugin.h> |
13 | #include <linux/sched_trace.h> | 13 | #include <linux/sched_trace.h> |
14 | 14 | ||
15 | #include <linux/edf_common.h> | ||
16 | |||
17 | |||
18 | static int dummy_resched(edf_domain_t *edf) | ||
19 | { | ||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | void edf_domain_init(edf_domain_t *edf, edf_check_resched_needed_t f) | ||
24 | { | ||
25 | BUG_ON(!edf); | ||
26 | if (!f) | ||
27 | f = dummy_resched; | ||
28 | INIT_LIST_HEAD(&edf->ready_queue); | ||
29 | INIT_LIST_HEAD(&edf->release_queue); | ||
30 | edf->ready_lock = RW_LOCK_UNLOCKED; | ||
31 | edf->release_lock = SPIN_LOCK_UNLOCKED; | ||
32 | edf->check_resched = f; | ||
33 | } | ||
34 | 15 | ||
16 | #include <linux/edf_common.h> | ||
35 | 17 | ||
36 | /* edf_higher_prio - returns true if first has a higher EDF priority | 18 | /* edf_higher_prio - returns true if first has a higher EDF priority |
37 | * than second. Deadline ties are broken by PID. | 19 | * than second. Deadline ties are broken by PID. |
@@ -78,131 +60,27 @@ int edf_higher_prio(struct task_struct* first, | |||
78 | !second->rt_param.inh_task))); | 60 | !second->rt_param.inh_task))); |
79 | } | 61 | } |
80 | 62 | ||
81 | 63 | int edf_ready_order(struct list_head* a, struct list_head* b) | |
82 | /* add_ready - add a real-time task to the edf ready queue. It must be runnable. | ||
83 | * @new: the newly released task | ||
84 | */ | ||
85 | void __add_ready(edf_domain_t* edf, struct task_struct *new) | ||
86 | { | ||
87 | struct list_head *pos; | ||
88 | struct task_struct *queued; | ||
89 | unsigned int passed = 0; | ||
90 | |||
91 | BUG_ON(!new); | ||
92 | TRACE("edf: adding %s/%d (%u, %u) to ready queue\n", | ||
93 | new->comm, new->pid, get_exec_cost(new), get_rt_period(new)); | ||
94 | |||
95 | /* find a spot where our deadline is earlier than the next */ | ||
96 | list_for_each(pos, &edf->ready_queue) { | ||
97 | queued = list_entry(pos, struct task_struct, rt_list); | ||
98 | if (unlikely(edf_higher_prio(new, queued))) { | ||
99 | /* the task at pos has a later deadline */ | ||
100 | /* insert the new task in front of it */ | ||
101 | __list_add(&new->rt_list, pos->prev, pos); | ||
102 | goto out; | ||
103 | } | ||
104 | passed++; | ||
105 | } | ||
106 | /* if we get to this point either the list is empty or new has the | ||
107 | * lowest priority. Let's add it to the end. */ | ||
108 | list_add_tail(&new->rt_list, &edf->ready_queue); | ||
109 | out: | ||
110 | if (!passed) | ||
111 | edf->check_resched(edf); | ||
112 | } | ||
113 | |||
114 | struct task_struct* __take_ready(edf_domain_t* edf) | ||
115 | { | ||
116 | struct task_struct *t = __peek_ready(edf); | ||
117 | |||
118 | /* kick it out of the ready list */ | ||
119 | if (t) | ||
120 | list_del(&t->rt_list); | ||
121 | return t; | ||
122 | } | ||
123 | |||
124 | |||
125 | struct task_struct* __peek_ready(edf_domain_t* edf) | ||
126 | { | ||
127 | struct task_struct *t = NULL; | ||
128 | /* either not yet released, preempted, or non-rt */ | ||
129 | if (!list_empty(&edf->ready_queue)) | ||
130 | /* take next rt task */ | ||
131 | t = list_entry(edf->ready_queue.next, struct task_struct, | ||
132 | rt_list); | ||
133 | return t; | ||
134 | } | ||
135 | |||
136 | |||
137 | /* add_release - add a real-time task to the edf release queue. | ||
138 | * @task: the sleeping task | ||
139 | */ | ||
140 | void __add_release(edf_domain_t* edf, struct task_struct *task) | ||
141 | { | ||
142 | struct list_head *pos; | ||
143 | struct task_struct *queued; | ||
144 | |||
145 | BUG_ON(!task); | ||
146 | /* first we need the lock for edf_release_queue */ | ||
147 | TRACE("edf: adding %s/%d (%u, %u) to release queue\n", | ||
148 | task->comm, task->pid, get_exec_cost(task), get_rt_period(task)); | ||
149 | |||
150 | /* find a spot where our deadline is earlier than the next */ | ||
151 | list_for_each_prev(pos, &edf->release_queue) { | ||
152 | queued = list_entry(pos, struct task_struct, rt_list); | ||
153 | if ((unlikely(earlier_release(queued, task)))) { | ||
154 | /* the task at pos has an earlier release */ | ||
155 | /* insert the new task in behind it */ | ||
156 | __list_add(&task->rt_list, pos, pos->next); | ||
157 | return; | ||
158 | } | ||
159 | } | ||
160 | /* if we get to this point either the list is empty or task has the | ||
161 | * earliest release. Let's add it to the front. */ | ||
162 | list_add(&task->rt_list, &edf->release_queue); | ||
163 | } | ||
164 | |||
165 | void __release_pending(edf_domain_t* edf) | ||
166 | { | 64 | { |
167 | struct list_head *pos, *save; | 65 | return edf_higher_prio( |
168 | struct task_struct *queued; | 66 | list_entry(a, struct task_struct, rt_list), |
169 | list_for_each_safe(pos, save, &edf->release_queue) { | 67 | list_entry(b, struct task_struct, rt_list)); |
170 | queued = list_entry(pos, struct task_struct, rt_list); | ||
171 | if (likely(is_released(queued))) { | ||
172 | /* this one is ready to go*/ | ||
173 | list_del(pos); | ||
174 | set_rt_flags(queued, RT_F_RUNNING); | ||
175 | |||
176 | sched_trace_job_release(queued); | ||
177 | |||
178 | /* now it can be picked up */ | ||
179 | barrier(); | ||
180 | add_ready(edf, queued); | ||
181 | } | ||
182 | else | ||
183 | /* the release queue is ordered */ | ||
184 | break; | ||
185 | } | ||
186 | } | 68 | } |
187 | 69 | ||
188 | void try_release_pending(edf_domain_t* edf) | 70 | void edf_release_at(struct task_struct *t, jiffie_t start) |
189 | { | 71 | { |
190 | unsigned long flags; | ||
191 | |||
192 | if (spin_trylock_irqsave(&edf->release_lock, flags)) { | ||
193 | __release_pending(edf); | ||
194 | spin_unlock_irqrestore(&edf->release_lock, flags); | ||
195 | } | ||
196 | } | ||
197 | |||
198 | void __prepare_new_release(struct task_struct *t, jiffie_t start) { | ||
199 | t->rt_param.times.deadline = start; | 72 | t->rt_param.times.deadline = start; |
200 | t->rt_param.stats.nontardy_jobs_ctr = 0xf0000000; | 73 | t->rt_param.stats.nontardy_jobs_ctr = 0xf0000000; |
201 | prepare_for_next_period(t); | 74 | edf_prepare_for_next_period(t); |
202 | set_rt_flags(t, RT_F_RUNNING); | 75 | set_rt_flags(t, RT_F_RUNNING); |
203 | } | 76 | } |
204 | 77 | ||
205 | void prepare_for_next_period(struct task_struct *t) | 78 | void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched) |
79 | { | ||
80 | rt_domain_init(rt, resched, edf_ready_order); | ||
81 | } | ||
82 | |||
83 | void edf_prepare_for_next_period(struct task_struct *t) | ||
206 | { | 84 | { |
207 | BUG_ON(!t); | 85 | BUG_ON(!t); |
208 | /* update tardy job ctr */ | 86 | /* update tardy job ctr */ |
@@ -220,56 +98,14 @@ void prepare_for_next_period(struct task_struct *t) | |||
220 | t->first_time_slice = 0; | 98 | t->first_time_slice = 0; |
221 | } | 99 | } |
222 | 100 | ||
223 | void prepare_new_releases(edf_domain_t *edf, jiffie_t start) | ||
224 | { | ||
225 | unsigned long flags; | ||
226 | |||
227 | spin_lock_irqsave(&edf->release_lock, flags); | ||
228 | write_lock(&edf->ready_lock); | ||
229 | |||
230 | __prepare_new_releases(edf, start); | ||
231 | |||
232 | write_unlock(&edf->ready_lock); | ||
233 | spin_unlock_irqrestore(&edf->release_lock, flags); | ||
234 | } | ||
235 | |||
236 | void __prepare_new_releases(edf_domain_t *edf, jiffie_t start) | ||
237 | { | ||
238 | |||
239 | struct list_head tmp_list; | ||
240 | struct list_head *pos, *n; | ||
241 | struct task_struct *t; | ||
242 | |||
243 | INIT_LIST_HEAD(&tmp_list); | ||
244 | |||
245 | while (!list_empty(&edf->release_queue)) { | ||
246 | pos = edf->release_queue.next; | ||
247 | list_del(pos); | ||
248 | list_add(pos, &tmp_list); | ||
249 | } | ||
250 | while (!list_empty(&edf->ready_queue)) { | ||
251 | pos = edf->ready_queue.next; | ||
252 | list_del(pos); | ||
253 | list_add(pos, &tmp_list); | ||
254 | } | ||
255 | |||
256 | list_for_each_safe(pos, n, &tmp_list) { | ||
257 | t = list_entry(pos, struct task_struct, rt_list); | ||
258 | list_del(pos); | ||
259 | __prepare_new_release(t, start); | ||
260 | __add_release(edf, t); | ||
261 | } | ||
262 | |||
263 | } | ||
264 | |||
265 | /* need_to_preempt - check whether the task t needs to be preempted | 101 | /* need_to_preempt - check whether the task t needs to be preempted |
266 | * call only with irqs disabled and with ready_lock acquired | 102 | * call only with irqs disabled and with ready_lock acquired |
267 | */ | 103 | */ |
268 | int preemption_needed(edf_domain_t* edf, struct task_struct *t) | 104 | int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t) |
269 | { | 105 | { |
270 | /* we need the read lock for edf_ready_queue */ | 106 | /* we need the read lock for edf_ready_queue */ |
271 | /* no need to preempt if there is nothing pending */ | 107 | /* no need to preempt if there is nothing pending */ |
272 | if (list_empty(&edf->ready_queue)) | 108 | if (!ready_jobs_pending(rt)) |
273 | return 0; | 109 | return 0; |
274 | /* we need to reschedule if t doesn't exist */ | 110 | /* we need to reschedule if t doesn't exist */ |
275 | if (!t) | 111 | if (!t) |
@@ -277,7 +113,7 @@ int preemption_needed(edf_domain_t* edf, struct task_struct *t) | |||
277 | /* don't preempt if t is non-preemptable */ | 113 | /* don't preempt if t is non-preemptable */ |
278 | if (!is_np(t)) | 114 | if (!is_np(t)) |
279 | /* make sure to get non-rt stuff out of the way */ | 115 | /* make sure to get non-rt stuff out of the way */ |
280 | return !is_realtime(t) || edf_higher_prio(next_ready(edf), t); | 116 | return !is_realtime(t) || edf_higher_prio(next_ready(rt), t); |
281 | return 0; | 117 | return 0; |
282 | } | 118 | } |
283 | 119 | ||
diff --git a/kernel/rt_domain.c b/kernel/rt_domain.c new file mode 100644 index 0000000000..52752d98c8 --- /dev/null +++ b/kernel/rt_domain.c | |||
@@ -0,0 +1,170 @@ | |||
1 | /* | ||
2 | * kernel/rt_domain.c | ||
3 | * | ||
4 | * LITMUS real-time infrastructure. This file contains the | ||
5 | * functions that manipulate RT domains. RT domains are an abstraction | ||
6 | * of a ready queue and a release queue. | ||
7 | */ | ||
8 | |||
9 | #include <linux/percpu.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/list.h> | ||
12 | |||
13 | #include <linux/litmus.h> | ||
14 | #include <linux/sched_plugin.h> | ||
15 | #include <linux/sched_trace.h> | ||
16 | |||
17 | #include <linux/rt_domain.h> | ||
18 | |||
19 | |||
20 | static int dummy_resched(rt_domain_t *rt) | ||
21 | { | ||
22 | return 0; | ||
23 | } | ||
24 | |||
25 | static int dummy_order(struct list_head* a, struct list_head* b) | ||
26 | { | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | int release_order(struct list_head* a, struct list_head* b) | ||
31 | { | ||
32 | return earlier_release( | ||
33 | list_entry(a, struct task_struct, rt_list), | ||
34 | list_entry(b, struct task_struct, rt_list)); | ||
35 | } | ||
36 | |||
37 | |||
38 | void rt_domain_init(rt_domain_t *rt, | ||
39 | check_resched_needed_t f, | ||
40 | list_cmp_t order) | ||
41 | { | ||
42 | BUG_ON(!rt); | ||
43 | if (!f) | ||
44 | f = dummy_resched; | ||
45 | if (!order) | ||
46 | order = dummy_order; | ||
47 | INIT_LIST_HEAD(&rt->ready_queue); | ||
48 | INIT_LIST_HEAD(&rt->release_queue); | ||
49 | rt->ready_lock = RW_LOCK_UNLOCKED; | ||
50 | rt->release_lock = SPIN_LOCK_UNLOCKED; | ||
51 | rt->check_resched = f; | ||
52 | rt->order = order; | ||
53 | } | ||
54 | |||
55 | /* add_ready - add a real-time task to the rt ready queue. It must be runnable. | ||
56 | * @new: the newly released task | ||
57 | */ | ||
58 | void __add_ready(rt_domain_t* rt, struct task_struct *new) | ||
59 | { | ||
60 | TRACE("rt: adding %s/%d (%u, %u) to ready queue\n", | ||
61 | new->comm, new->pid, get_exec_cost(new), get_rt_period(new)); | ||
62 | |||
63 | if (!list_insert(&new->rt_list, &rt->ready_queue, rt->order)) | ||
64 | rt->check_resched(rt); | ||
65 | } | ||
66 | |||
67 | struct task_struct* __take_ready(rt_domain_t* rt) | ||
68 | { | ||
69 | struct task_struct *t = __peek_ready(rt); | ||
70 | |||
71 | /* kick it out of the ready list */ | ||
72 | if (t) | ||
73 | list_del(&t->rt_list); | ||
74 | return t; | ||
75 | } | ||
76 | |||
77 | struct task_struct* __peek_ready(rt_domain_t* rt) | ||
78 | { | ||
79 | if (!list_empty(&rt->ready_queue)) | ||
80 | return next_ready(rt); | ||
81 | else | ||
82 | return NULL; | ||
83 | } | ||
84 | |||
85 | /* add_release - add a real-time task to the rt release queue. | ||
86 | * @task: the sleeping task | ||
87 | */ | ||
88 | void __add_release(rt_domain_t* rt, struct task_struct *task) | ||
89 | { | ||
90 | list_insert(&task->rt_list, &rt->release_queue, release_order); | ||
91 | } | ||
92 | |||
93 | void __release_pending(rt_domain_t* rt) | ||
94 | { | ||
95 | struct list_head *pos, *save; | ||
96 | struct task_struct *queued; | ||
97 | list_for_each_safe(pos, save, &rt->release_queue) { | ||
98 | queued = list_entry(pos, struct task_struct, rt_list); | ||
99 | if (likely(is_released(queued))) { | ||
100 | /* this one is ready to go*/ | ||
101 | list_del(pos); | ||
102 | set_rt_flags(queued, RT_F_RUNNING); | ||
103 | |||
104 | sched_trace_job_release(queued); | ||
105 | |||
106 | /* now it can be picked up */ | ||
107 | barrier(); | ||
108 | add_ready(rt, queued); | ||
109 | } | ||
110 | else | ||
111 | /* the release queue is ordered */ | ||
112 | break; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | void try_release_pending(rt_domain_t* rt) | ||
117 | { | ||
118 | unsigned long flags; | ||
119 | |||
120 | if (spin_trylock_irqsave(&rt->release_lock, flags)) { | ||
121 | __release_pending(rt); | ||
122 | spin_unlock_irqrestore(&rt->release_lock, flags); | ||
123 | } | ||
124 | } | ||
125 | |||
126 | void rerelease_all(rt_domain_t *rt, | ||
127 | release_at_t release) | ||
128 | { | ||
129 | unsigned long flags; | ||
130 | |||
131 | spin_lock_irqsave(&rt->release_lock, flags); | ||
132 | write_lock(&rt->ready_lock); | ||
133 | |||
134 | __prepare_new_releases(rt, start, release); | ||
135 | |||
136 | write_unlock(&rt->ready_lock); | ||
137 | spin_unlock_irqrestore(&rt->release_lock, flags); | ||
138 | } | ||
139 | |||
140 | void __rerelease_all(rt_domain_t *rt, | ||
141 | release_at_t release) | ||
142 | { | ||
143 | jiffie_t start = jiffies + 10; | ||
144 | struct list_head tmp_list; | ||
145 | struct list_head *pos, *n; | ||
146 | struct task_struct *t; | ||
147 | |||
148 | INIT_LIST_HEAD(&tmp_list); | ||
149 | |||
150 | while (!list_empty(&rt->release_queue)) { | ||
151 | pos = rt->release_queue.next; | ||
152 | list_del(pos); | ||
153 | list_add(pos, &tmp_list); | ||
154 | } | ||
155 | while (!list_empty(&rt->ready_queue)) { | ||
156 | pos = rt->ready_queue.next; | ||
157 | list_del(pos); | ||
158 | list_add(pos, &tmp_list); | ||
159 | } | ||
160 | |||
161 | list_for_each_safe(pos, n, &tmp_list) { | ||
162 | t = list_entry(pos, struct task_struct, rt_list); | ||
163 | list_del(pos); | ||
164 | release(t, start); | ||
165 | __add_release(rt, t); | ||
166 | } | ||
167 | |||
168 | } | ||
169 | |||
170 | |||
diff --git a/kernel/sched_edf_hsb.c b/kernel/sched_edf_hsb.c index d190426ff8..b8ac1ab0f8 100644 --- a/kernel/sched_edf_hsb.c +++ b/kernel/sched_edf_hsb.c | |||
@@ -211,7 +211,7 @@ capacity_t* __take_capacity(capacity_queue_t* queue, jiffie_t deadline, int dead | |||
211 | * HRT servers are strictly periodic and retain their budget. | 211 | * HRT servers are strictly periodic and retain their budget. |
212 | */ | 212 | */ |
213 | typedef struct { | 213 | typedef struct { |
214 | edf_domain_t domain; | 214 | rt_domain_t domain; |
215 | 215 | ||
216 | unsigned int period; | 216 | unsigned int period; |
217 | unsigned int wcet; | 217 | unsigned int wcet; |
@@ -302,7 +302,7 @@ static inline int hrt_client_eligible(hrt_server_t *srv) | |||
302 | } | 302 | } |
303 | 303 | ||
304 | static void hsb_cpu_state_init(cpu_state_t* cpu_state, | 304 | static void hsb_cpu_state_init(cpu_state_t* cpu_state, |
305 | edf_check_resched_needed_t check, | 305 | check_resched_needed_t check, |
306 | int cpu) | 306 | int cpu) |
307 | { | 307 | { |
308 | edf_domain_init(&cpu_state->hrt.domain, check); | 308 | edf_domain_init(&cpu_state->hrt.domain, check); |
@@ -333,7 +333,7 @@ static void hsb_cpu_state_init(cpu_state_t* cpu_state, | |||
333 | (a)->release, (b)->release)) | 333 | (a)->release, (b)->release)) |
334 | 334 | ||
335 | 335 | ||
336 | static void be_add_ready(edf_domain_t* edf, be_server_t *new) | 336 | static void be_add_ready(rt_domain_t* edf, be_server_t *new) |
337 | { | 337 | { |
338 | unsigned long flags; | 338 | unsigned long flags; |
339 | struct list_head *pos; | 339 | struct list_head *pos; |
@@ -341,7 +341,7 @@ static void be_add_ready(edf_domain_t* edf, be_server_t *new) | |||
341 | unsigned int passed = 0; | 341 | unsigned int passed = 0; |
342 | 342 | ||
343 | BUG_ON(!new); | 343 | BUG_ON(!new); |
344 | /* first we need the write lock for edf_ready_queue */ | 344 | /* first we need the write lock for rt_ready_queue */ |
345 | write_lock_irqsave(&edf->ready_lock, flags); | 345 | write_lock_irqsave(&edf->ready_lock, flags); |
346 | /* find a spot where our deadline is earlier than the next */ | 346 | /* find a spot where our deadline is earlier than the next */ |
347 | list_for_each(pos, &edf->ready_queue) { | 347 | list_for_each(pos, &edf->ready_queue) { |
@@ -361,7 +361,7 @@ static void be_add_ready(edf_domain_t* edf, be_server_t *new) | |||
361 | write_unlock_irqrestore(&edf->ready_lock, flags); | 361 | write_unlock_irqrestore(&edf->ready_lock, flags); |
362 | } | 362 | } |
363 | 363 | ||
364 | static be_server_t* be_take_ready(edf_domain_t* edf) | 364 | static be_server_t* be_take_ready(rt_domain_t* edf) |
365 | { | 365 | { |
366 | be_server_t *t = NULL; | 366 | be_server_t *t = NULL; |
367 | 367 | ||
@@ -373,7 +373,7 @@ static be_server_t* be_take_ready(edf_domain_t* edf) | |||
373 | return t; | 373 | return t; |
374 | } | 374 | } |
375 | 375 | ||
376 | /*static be_server_t* get_be_server(edf_domain_t* edf) | 376 | /*static be_server_t* get_be_server(rt_domain_t* edf) |
377 | { | 377 | { |
378 | be_server_t *t = NULL; | 378 | be_server_t *t = NULL; |
379 | 379 | ||
@@ -392,7 +392,7 @@ static be_server_t* be_take_ready(edf_domain_t* edf) | |||
392 | return t; | 392 | return t; |
393 | }*/ | 393 | }*/ |
394 | 394 | ||
395 | static void be_add_release(edf_domain_t* edf, be_server_t *srv) | 395 | static void be_add_release(rt_domain_t* edf, be_server_t *srv) |
396 | { | 396 | { |
397 | unsigned long flags; | 397 | unsigned long flags; |
398 | struct list_head *pos; | 398 | struct list_head *pos; |
@@ -414,7 +414,7 @@ static void be_add_release(edf_domain_t* edf, be_server_t *srv) | |||
414 | spin_unlock_irqrestore(&edf->release_lock, flags); | 414 | spin_unlock_irqrestore(&edf->release_lock, flags); |
415 | } | 415 | } |
416 | 416 | ||
417 | static void be_try_release_pending(edf_domain_t* edf) | 417 | static void be_try_release_pending(rt_domain_t* edf) |
418 | { | 418 | { |
419 | unsigned long flags; | 419 | unsigned long flags; |
420 | struct list_head *pos, *save; | 420 | struct list_head *pos, *save; |
@@ -445,7 +445,7 @@ static void be_prepare_new_release(be_server_t *t, jiffie_t start) { | |||
445 | t->budget = t->wcet; | 445 | t->budget = t->wcet; |
446 | } | 446 | } |
447 | 447 | ||
448 | static void be_prepare_new_releases(edf_domain_t *edf, jiffie_t start) | 448 | static void be_prepare_new_releases(rt_domain_t *edf, jiffie_t start) |
449 | { | 449 | { |
450 | unsigned long flags; | 450 | unsigned long flags; |
451 | struct list_head tmp_list; | 451 | struct list_head tmp_list; |
@@ -499,9 +499,9 @@ static void be_prepare_for_next_period(be_server_t *t) | |||
499 | /* need_to_preempt - check whether the task t needs to be preempted by a | 499 | /* need_to_preempt - check whether the task t needs to be preempted by a |
500 | * best-effort server. | 500 | * best-effort server. |
501 | */ | 501 | */ |
502 | static inline int be_preemption_needed(edf_domain_t* edf, cpu_state_t* state) | 502 | static inline int be_preemption_needed(rt_domain_t* edf, cpu_state_t* state) |
503 | { | 503 | { |
504 | /* we need the read lock for edf_ready_queue */ | 504 | /* we need the read lock for rt_ready_queue */ |
505 | if (!list_empty(&edf->ready_queue)) | 505 | if (!list_empty(&edf->ready_queue)) |
506 | { | 506 | { |
507 | 507 | ||
@@ -520,7 +520,7 @@ static inline int be_preemption_needed(edf_domain_t* edf, cpu_state_t* state) | |||
520 | return 0; | 520 | return 0; |
521 | } | 521 | } |
522 | 522 | ||
523 | static void be_enqueue(edf_domain_t* edf, be_server_t* srv) | 523 | static void be_enqueue(rt_domain_t* edf, be_server_t* srv) |
524 | { | 524 | { |
525 | int new_release = 0; | 525 | int new_release = 0; |
526 | if (!srv->budget) { | 526 | if (!srv->budget) { |
@@ -539,7 +539,7 @@ static void be_enqueue(edf_domain_t* edf, be_server_t* srv) | |||
539 | be_add_release(edf, srv); | 539 | be_add_release(edf, srv); |
540 | } | 540 | } |
541 | 541 | ||
542 | static void be_preempt(edf_domain_t *be, cpu_state_t *state) | 542 | static void be_preempt(rt_domain_t *be, cpu_state_t *state) |
543 | { | 543 | { |
544 | be_server_t *srv; | 544 | be_server_t *srv; |
545 | 545 | ||
@@ -565,12 +565,12 @@ static LIST_HEAD(hsb_cpu_queue); | |||
565 | 565 | ||
566 | 566 | ||
567 | /* the global soft real-time domain */ | 567 | /* the global soft real-time domain */ |
568 | static edf_domain_t srt; | 568 | static rt_domain_t srt; |
569 | /* the global best-effort server domain | 569 | /* the global best-effort server domain |
570 | * belongs conceptually to the srt domain, but has | 570 | * belongs conceptually to the srt domain, but has |
571 | * be_server_t* queued instead of tast_t* | 571 | * be_server_t* queued instead of tast_t* |
572 | */ | 572 | */ |
573 | static edf_domain_t be; | 573 | static rt_domain_t be; |
574 | 574 | ||
575 | static fifo_domain_t hsb_fifo; | 575 | static fifo_domain_t hsb_fifo; |
576 | 576 | ||
@@ -653,7 +653,7 @@ static void adjust_cpu_queue(task_class_t class, jiffie_t deadline, | |||
653 | /* hrt_check_resched - check whether the HRT server on given CPU needs to | 653 | /* hrt_check_resched - check whether the HRT server on given CPU needs to |
654 | * preempt the running task. | 654 | * preempt the running task. |
655 | */ | 655 | */ |
656 | static int hrt_check_resched(edf_domain_t *edf) | 656 | static int hrt_check_resched(rt_domain_t *edf) |
657 | { | 657 | { |
658 | hrt_server_t *srv = container_of(edf, hrt_server_t, domain); | 658 | hrt_server_t *srv = container_of(edf, hrt_server_t, domain); |
659 | cpu_state_t *state = container_of(srv, cpu_state_t, hrt); | 659 | cpu_state_t *state = container_of(srv, cpu_state_t, hrt); |
@@ -687,7 +687,7 @@ static int hrt_check_resched(edf_domain_t *edf) | |||
687 | * | 687 | * |
688 | * Caller must hold edf->ready_lock! | 688 | * Caller must hold edf->ready_lock! |
689 | */ | 689 | */ |
690 | static int srt_check_resched(edf_domain_t *edf) | 690 | static int srt_check_resched(rt_domain_t *edf) |
691 | { | 691 | { |
692 | cpu_state_t *last; | 692 | cpu_state_t *last; |
693 | int ret = 0; | 693 | int ret = 0; |
@@ -722,7 +722,7 @@ static int srt_check_resched(edf_domain_t *edf) | |||
722 | * | 722 | * |
723 | * Caller must hold edf->ready_lock! | 723 | * Caller must hold edf->ready_lock! |
724 | */ | 724 | */ |
725 | static int be_check_resched(edf_domain_t *edf) | 725 | static int be_check_resched(rt_domain_t *edf) |
726 | { | 726 | { |
727 | cpu_state_t *last; | 727 | cpu_state_t *last; |
728 | int soft, bg; | 728 | int soft, bg; |
@@ -828,9 +828,9 @@ int fifo_check_resched(void) | |||
828 | 828 | ||
829 | 829 | ||
830 | 830 | ||
831 | static inline int hsb_preemption_needed(edf_domain_t* edf, cpu_state_t* state) | 831 | static inline int hsb_preemption_needed(rt_domain_t* edf, cpu_state_t* state) |
832 | { | 832 | { |
833 | /* we need the read lock for edf_ready_queue */ | 833 | /* we need the read lock for rt_ready_queue */ |
834 | if (!list_empty(&edf->ready_queue)) | 834 | if (!list_empty(&edf->ready_queue)) |
835 | { | 835 | { |
836 | if (state->exec_class == RT_CLASS_SOFT) { | 836 | if (state->exec_class == RT_CLASS_SOFT) { |
@@ -848,7 +848,7 @@ static inline int hsb_preemption_needed(edf_domain_t* edf, cpu_state_t* state) | |||
848 | 848 | ||
849 | static inline int cap_preemption_needed(capacity_queue_t* q, cpu_state_t* state) | 849 | static inline int cap_preemption_needed(capacity_queue_t* q, cpu_state_t* state) |
850 | { | 850 | { |
851 | /* we need the read lock for edf_ready_queue */ | 851 | /* we need the read lock for rt_ready_queue */ |
852 | if (!list_empty(&q->queue)) | 852 | if (!list_empty(&q->queue)) |
853 | { | 853 | { |
854 | if (state->exec_class == RT_CLASS_SOFT) { | 854 | if (state->exec_class == RT_CLASS_SOFT) { |
@@ -938,8 +938,9 @@ static reschedule_check_t hsb_scheduler_tick(void) | |||
938 | case RT_CLASS_HARD: | 938 | case RT_CLASS_HARD: |
939 | read_lock_irqsave(&state->hrt.domain.ready_lock, | 939 | read_lock_irqsave(&state->hrt.domain.ready_lock, |
940 | flags); | 940 | flags); |
941 | resched = preemption_needed(&state->hrt.domain, | 941 | resched = edf_preemption_needed( |
942 | t); | 942 | &state->hrt.domain, |
943 | t); | ||
943 | read_unlock_irqrestore( | 944 | read_unlock_irqrestore( |
944 | &state->hrt.domain.ready_lock, flags); | 945 | &state->hrt.domain.ready_lock, flags); |
945 | break; | 946 | break; |
@@ -1017,7 +1018,7 @@ static int schedule_hrt(struct task_struct * prev, | |||
1017 | } | 1018 | } |
1018 | 1019 | ||
1019 | if (is_hrt(prev) && is_released(prev) && is_running(prev) | 1020 | if (is_hrt(prev) && is_released(prev) && is_running(prev) |
1020 | && !preemption_needed(&state->hrt.domain, prev)) { | 1021 | && !edf_preemption_needed(&state->hrt.domain, prev)) { |
1021 | /* This really should only happen if the task has | 1022 | /* This really should only happen if the task has |
1022 | * 100% utilization or when we got a bogus/delayed | 1023 | * 100% utilization or when we got a bogus/delayed |
1023 | * resched IPI. | 1024 | * resched IPI. |
@@ -1046,7 +1047,7 @@ static int schedule_hrt(struct task_struct * prev, | |||
1046 | 1047 | ||
1047 | 1048 | ||
1048 | static struct task_struct* find_min_slack_task(struct task_struct *prev, | 1049 | static struct task_struct* find_min_slack_task(struct task_struct *prev, |
1049 | edf_domain_t* edf) | 1050 | rt_domain_t* edf) |
1050 | { | 1051 | { |
1051 | struct list_head *pos; | 1052 | struct list_head *pos; |
1052 | struct task_struct* tsk = NULL; | 1053 | struct task_struct* tsk = NULL; |
@@ -1064,7 +1065,7 @@ static struct task_struct* find_min_slack_task(struct task_struct *prev, | |||
1064 | } | 1065 | } |
1065 | 1066 | ||
1066 | static struct task_struct* null_heuristic(struct task_struct *prev, | 1067 | static struct task_struct* null_heuristic(struct task_struct *prev, |
1067 | edf_domain_t* edf, | 1068 | rt_domain_t* edf, |
1068 | fifo_domain_t* fifo) | 1069 | fifo_domain_t* fifo) |
1069 | { | 1070 | { |
1070 | if (fifo_jobs_pending( fifo)) | 1071 | if (fifo_jobs_pending( fifo)) |
@@ -1076,7 +1077,7 @@ static struct task_struct* null_heuristic(struct task_struct *prev, | |||
1076 | return NULL; | 1077 | return NULL; |
1077 | } | 1078 | } |
1078 | 1079 | ||
1079 | /*static struct task_struct* history_heuristic(struct task_struct *prev, edf_domain_t* edf) | 1080 | /*static struct task_struct* history_heuristic(struct task_struct *prev, rt_domain_t* edf) |
1080 | { | 1081 | { |
1081 | struct list_head *pos; | 1082 | struct list_head *pos; |
1082 | struct task_struct* tsk = NULL; | 1083 | struct task_struct* tsk = NULL; |
@@ -1099,7 +1100,7 @@ static struct task_struct* null_heuristic(struct task_struct *prev, | |||
1099 | } | 1100 | } |
1100 | */ | 1101 | */ |
1101 | /* TODO: write slack heuristic.*/ | 1102 | /* TODO: write slack heuristic.*/ |
1102 | /*static struct task_struct* slack_heuristic(struct task_struct *prev, edf_domain_t* edf) | 1103 | /*static struct task_struct* slack_heuristic(struct task_struct *prev, rt_domain_t* edf) |
1103 | { | 1104 | { |
1104 | struct list_head *pos; | 1105 | struct list_head *pos; |
1105 | struct task_struct* tsk = NULL; | 1106 | struct task_struct* tsk = NULL; |
@@ -1182,7 +1183,7 @@ static int schedule_capacity(struct task_struct *prev, | |||
1182 | #define BE 2 | 1183 | #define BE 2 |
1183 | #define CAP 3 | 1184 | #define CAP 3 |
1184 | 1185 | ||
1185 | static inline int what_first(edf_domain_t *be, edf_domain_t *srt, capacity_queue_t* q) | 1186 | static inline int what_first(rt_domain_t *be, rt_domain_t *srt, capacity_queue_t* q) |
1186 | { | 1187 | { |
1187 | jiffie_t sdl = 0, bdl= 0, cdl = 0, cur; | 1188 | jiffie_t sdl = 0, bdl= 0, cdl = 0, cur; |
1188 | int _srt = !list_empty(&srt->ready_queue); | 1189 | int _srt = !list_empty(&srt->ready_queue); |
@@ -1370,7 +1371,7 @@ static int hsb_schedule(struct task_struct * prev, struct task_struct ** next, | |||
1370 | TRACE("preparing %d for next period\n", prev->pid); | 1371 | TRACE("preparing %d for next period\n", prev->pid); |
1371 | release_capacity(&cap_queue, prev->time_slice, | 1372 | release_capacity(&cap_queue, prev->time_slice, |
1372 | prev->rt_param.times.deadline, prev); | 1373 | prev->rt_param.times.deadline, prev); |
1373 | prepare_for_next_period(prev); | 1374 | edf_prepare_for_next_period(prev); |
1374 | } | 1375 | } |
1375 | 1376 | ||
1376 | if (get_rt_mode() == MODE_RT_RUN) { | 1377 | if (get_rt_mode() == MODE_RT_RUN) { |
@@ -1490,7 +1491,7 @@ static long hsb_prepare_task(struct task_struct * t) | |||
1490 | /* The action is already on. | 1491 | /* The action is already on. |
1491 | * Prepare immediate release | 1492 | * Prepare immediate release |
1492 | */ | 1493 | */ |
1493 | prepare_new_release(t); | 1494 | edf_release_now(t); |
1494 | /* The task should be running in the queue, otherwise signal | 1495 | /* The task should be running in the queue, otherwise signal |
1495 | * code will try to wake it up with fatal consequences. | 1496 | * code will try to wake it up with fatal consequences. |
1496 | */ | 1497 | */ |
@@ -1523,7 +1524,7 @@ static void hsb_wake_up_task(struct task_struct *task) | |||
1523 | } | 1524 | } |
1524 | else if (is_tardy(task)) { | 1525 | else if (is_tardy(task)) { |
1525 | /* new sporadic release */ | 1526 | /* new sporadic release */ |
1526 | prepare_new_release(task); | 1527 | edf_release_now(task); |
1527 | sched_trace_job_release(task); | 1528 | sched_trace_job_release(task); |
1528 | hsb_add_ready(task); | 1529 | hsb_add_ready(task); |
1529 | } | 1530 | } |
@@ -1591,7 +1592,7 @@ static int hsb_mode_change(int new_mode) | |||
1591 | new_mode); | 1592 | new_mode); |
1592 | if (new_mode == MODE_RT_RUN) { | 1593 | if (new_mode == MODE_RT_RUN) { |
1593 | start = jiffies + 20; | 1594 | start = jiffies + 20; |
1594 | prepare_new_releases(&srt, start); | 1595 | rerelease_all(&srt, edf_release_at); |
1595 | be_prepare_new_releases(&be, start); | 1596 | be_prepare_new_releases(&be, start); |
1596 | 1597 | ||
1597 | /* initialize per CPU state | 1598 | /* initialize per CPU state |
@@ -1613,7 +1614,7 @@ static int hsb_mode_change(int new_mode) | |||
1613 | entry->cur_deadline = 0; | 1614 | entry->cur_deadline = 0; |
1614 | list_add(&entry->list, &hsb_cpu_queue); | 1615 | list_add(&entry->list, &hsb_cpu_queue); |
1615 | 1616 | ||
1616 | prepare_new_releases(&entry->hrt.domain, start); | 1617 | rerelease_all(&entry->hrt.domain, edf_release_at); |
1617 | prepare_hrt_release(&entry->hrt, start); | 1618 | prepare_hrt_release(&entry->hrt, start); |
1618 | } | 1619 | } |
1619 | spin_unlock(&hsb_cpu_lock); | 1620 | spin_unlock(&hsb_cpu_lock); |
diff --git a/kernel/sched_global_edf.c b/kernel/sched_global_edf.c index 0781de139e..3ca46c1fb9 100644 --- a/kernel/sched_global_edf.c +++ b/kernel/sched_global_edf.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/litmus.h> | 14 | #include <linux/litmus.h> |
15 | #include <linux/sched_plugin.h> | 15 | #include <linux/sched_plugin.h> |
16 | |||
16 | #include <linux/edf_common.h> | 17 | #include <linux/edf_common.h> |
17 | #include <linux/sched_trace.h> | 18 | #include <linux/sched_trace.h> |
18 | 19 | ||
@@ -43,7 +44,7 @@ static spinlock_t gedf_cpu_lock = SPIN_LOCK_UNLOCKED; | |||
43 | static LIST_HEAD(gedf_cpu_queue); | 44 | static LIST_HEAD(gedf_cpu_queue); |
44 | 45 | ||
45 | 46 | ||
46 | static edf_domain_t gedf; | 47 | static rt_domain_t gedf; |
47 | 48 | ||
48 | #define DUMP(args...) TRACE(args) | 49 | #define DUMP(args...) TRACE(args) |
49 | 50 | ||
@@ -98,7 +99,7 @@ static void adjust_cpu_queue(int exec_rt, jiffie_t deadline) | |||
98 | * hard to detect reliably. Too many schedules will hurt performance | 99 | * hard to detect reliably. Too many schedules will hurt performance |
99 | * but do not cause incorrect schedules. | 100 | * but do not cause incorrect schedules. |
100 | */ | 101 | */ |
101 | static int gedf_check_resched(edf_domain_t *edf) | 102 | static int gedf_check_resched(rt_domain_t *edf) |
102 | { | 103 | { |
103 | cpu_entry_t *last; | 104 | cpu_entry_t *last; |
104 | int ret = 0; | 105 | int ret = 0; |
@@ -161,7 +162,7 @@ static reschedule_check_t gedf_scheduler_tick(void) | |||
161 | if (want_resched != FORCE_RESCHED) | 162 | if (want_resched != FORCE_RESCHED) |
162 | { | 163 | { |
163 | read_lock_irqsave(&gedf.ready_lock, flags); | 164 | read_lock_irqsave(&gedf.ready_lock, flags); |
164 | if (preemption_needed(&gedf, t)) | 165 | if (edf_preemption_needed(&gedf, t)) |
165 | { | 166 | { |
166 | want_resched = FORCE_RESCHED; | 167 | want_resched = FORCE_RESCHED; |
167 | set_will_schedule(); | 168 | set_will_schedule(); |
@@ -190,7 +191,7 @@ static int gedf_schedule(struct task_struct * prev, | |||
190 | if (is_realtime(prev) && get_rt_flags(prev) == RT_F_SLEEP) | 191 | if (is_realtime(prev) && get_rt_flags(prev) == RT_F_SLEEP) |
191 | { | 192 | { |
192 | DUMP("preparing %d for next period\n", prev->pid); | 193 | DUMP("preparing %d for next period\n", prev->pid); |
193 | prepare_for_next_period(prev); | 194 | edf_prepare_for_next_period(prev); |
194 | } | 195 | } |
195 | 196 | ||
196 | if (get_rt_mode() == MODE_RT_RUN) { | 197 | if (get_rt_mode() == MODE_RT_RUN) { |
@@ -199,7 +200,7 @@ static int gedf_schedule(struct task_struct * prev, | |||
199 | clear_will_schedule(); | 200 | clear_will_schedule(); |
200 | 201 | ||
201 | if (is_realtime(prev) && is_released(prev) && is_running(prev) | 202 | if (is_realtime(prev) && is_released(prev) && is_running(prev) |
202 | && !preemption_needed(&gedf, prev)) { | 203 | && !edf_preemption_needed(&gedf, prev)) { |
203 | /* Our current task's next job has already been | 204 | /* Our current task's next job has already been |
204 | * released and has higher priority than the highest | 205 | * released and has higher priority than the highest |
205 | * prioriy waiting task; in other words: it is tardy. | 206 | * prioriy waiting task; in other words: it is tardy. |
@@ -302,7 +303,7 @@ static long gedf_prepare_task(struct task_struct * t) | |||
302 | /* The action is already on. | 303 | /* The action is already on. |
303 | * Prepare immediate release | 304 | * Prepare immediate release |
304 | */ | 305 | */ |
305 | prepare_new_release(t); | 306 | edf_release_now(t); |
306 | /* The task should be running in the queue, otherwise signal | 307 | /* The task should be running in the queue, otherwise signal |
307 | * code will try to wake it up with fatal consequences. | 308 | * code will try to wake it up with fatal consequences. |
308 | */ | 309 | */ |
@@ -329,7 +330,7 @@ static void gedf_wake_up_task(struct task_struct *task) | |||
329 | task->state = TASK_RUNNING; | 330 | task->state = TASK_RUNNING; |
330 | if (is_tardy(task)) { | 331 | if (is_tardy(task)) { |
331 | /* new sporadic release */ | 332 | /* new sporadic release */ |
332 | prepare_new_release(task); | 333 | edf_release_now(task); |
333 | sched_trace_job_release(task); | 334 | sched_trace_job_release(task); |
334 | add_ready(&gedf, task); | 335 | add_ready(&gedf, task); |
335 | } | 336 | } |
@@ -387,7 +388,7 @@ static int gedf_mode_change(int new_mode) | |||
387 | /* printk(KERN_INFO "[%d] global edf: mode changed to %d\n", smp_processor_id(), | 388 | /* printk(KERN_INFO "[%d] global edf: mode changed to %d\n", smp_processor_id(), |
388 | new_mode);*/ | 389 | new_mode);*/ |
389 | if (new_mode == MODE_RT_RUN) { | 390 | if (new_mode == MODE_RT_RUN) { |
390 | prepare_new_releases(&gedf, jiffies + 10); | 391 | rerelease_all(&gedf, edf_release_at); |
391 | 392 | ||
392 | /* initialize per CPU state | 393 | /* initialize per CPU state |
393 | * we can't do this at boot time because we don't know | 394 | * we can't do this at boot time because we don't know |
@@ -500,7 +501,7 @@ static reschedule_check_t gedf_np_scheduler_tick(void) | |||
500 | * sure that it is not the last entry or that a reschedule is not necessary. | 501 | * sure that it is not the last entry or that a reschedule is not necessary. |
501 | * | 502 | * |
502 | */ | 503 | */ |
503 | static int gedf_np_check_resched(edf_domain_t *edf) | 504 | static int gedf_np_check_resched(rt_domain_t *edf) |
504 | { | 505 | { |
505 | cpu_entry_t *last; | 506 | cpu_entry_t *last; |
506 | int ret = 0; | 507 | int ret = 0; |
diff --git a/kernel/sched_part_edf.c b/kernel/sched_part_edf.c index c382722a24..dee761525c 100644 --- a/kernel/sched_part_edf.c +++ b/kernel/sched_part_edf.c | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | 16 | ||
17 | typedef struct { | 17 | typedef struct { |
18 | edf_domain_t domain; | 18 | rt_domain_t domain; |
19 | int cpu; | 19 | int cpu; |
20 | struct task_struct* scheduled; /* only RT tasks */ | 20 | struct task_struct* scheduled; /* only RT tasks */ |
21 | spinlock_t lock; | 21 | spinlock_t lock; |
@@ -29,7 +29,7 @@ typedef struct { | |||
29 | #define task_edf(task) remote_edf(get_partition(task)) | 29 | #define task_edf(task) remote_edf(get_partition(task)) |
30 | 30 | ||
31 | static void part_edf_domain_init(part_edf_domain_t* pedf, | 31 | static void part_edf_domain_init(part_edf_domain_t* pedf, |
32 | edf_check_resched_needed_t check, | 32 | check_resched_needed_t check, |
33 | int cpu) | 33 | int cpu) |
34 | { | 34 | { |
35 | edf_domain_init(&pedf->domain, check); | 35 | edf_domain_init(&pedf->domain, check); |
@@ -44,17 +44,17 @@ DEFINE_PER_CPU(part_edf_domain_t, part_edf_domains); | |||
44 | * the CPU of the partition. | 44 | * the CPU of the partition. |
45 | * | 45 | * |
46 | */ | 46 | */ |
47 | static int part_edf_check_resched(edf_domain_t *edf) | 47 | static int part_edf_check_resched(rt_domain_t *edf) |
48 | { | 48 | { |
49 | part_edf_domain_t *pedf = container_of(edf, part_edf_domain_t, domain); | 49 | part_edf_domain_t *pedf = container_of(edf, part_edf_domain_t, domain); |
50 | int ret = 0; | 50 | int ret = 0; |
51 | 51 | ||
52 | spin_lock(&pedf->lock); | 52 | spin_lock(&pedf->lock); |
53 | 53 | ||
54 | /* because this is a callback from edf_domain_t we already hold | 54 | /* because this is a callback from rt_domain_t we already hold |
55 | * the necessary lock for the ready queue | 55 | * the necessary lock for the ready queue |
56 | */ | 56 | */ |
57 | if (preemption_needed(edf, pedf->scheduled)) { | 57 | if (edf_preemption_needed(edf, pedf->scheduled)) { |
58 | if (pedf->cpu == smp_processor_id()) | 58 | if (pedf->cpu == smp_processor_id()) |
59 | set_tsk_need_resched(current); | 59 | set_tsk_need_resched(current); |
60 | else | 60 | else |
@@ -71,7 +71,7 @@ static reschedule_check_t part_edf_scheduler_tick(void) | |||
71 | unsigned long flags; | 71 | unsigned long flags; |
72 | struct task_struct *t = current; | 72 | struct task_struct *t = current; |
73 | reschedule_check_t want_resched = NO_RESCHED; | 73 | reschedule_check_t want_resched = NO_RESCHED; |
74 | edf_domain_t *edf = local_edf; | 74 | rt_domain_t *edf = local_edf; |
75 | part_edf_domain_t *pedf = local_pedf; | 75 | part_edf_domain_t *pedf = local_pedf; |
76 | 76 | ||
77 | /* Check for inconsistency. We don't need the lock for this since | 77 | /* Check for inconsistency. We don't need the lock for this since |
@@ -100,7 +100,7 @@ static reschedule_check_t part_edf_scheduler_tick(void) | |||
100 | if (want_resched != FORCE_RESCHED) | 100 | if (want_resched != FORCE_RESCHED) |
101 | { | 101 | { |
102 | read_lock_irqsave(&edf->ready_lock, flags); | 102 | read_lock_irqsave(&edf->ready_lock, flags); |
103 | if (preemption_needed(edf, t)) | 103 | if (edf_preemption_needed(edf, t)) |
104 | want_resched = FORCE_RESCHED; | 104 | want_resched = FORCE_RESCHED; |
105 | read_unlock_irqrestore(&edf->ready_lock, flags); | 105 | read_unlock_irqrestore(&edf->ready_lock, flags); |
106 | } | 106 | } |
@@ -114,16 +114,16 @@ static int part_edf_schedule(struct task_struct * prev, | |||
114 | { | 114 | { |
115 | int need_deactivate = 1; | 115 | int need_deactivate = 1; |
116 | part_edf_domain_t* pedf = local_pedf; | 116 | part_edf_domain_t* pedf = local_pedf; |
117 | edf_domain_t* edf = &pedf->domain; | 117 | rt_domain_t* edf = &pedf->domain; |
118 | 118 | ||
119 | 119 | ||
120 | if (is_realtime(prev) && get_rt_flags(prev) == RT_F_SLEEP) | 120 | if (is_realtime(prev) && get_rt_flags(prev) == RT_F_SLEEP) |
121 | prepare_for_next_period(prev); | 121 | edf_prepare_for_next_period(prev); |
122 | 122 | ||
123 | if (get_rt_mode() == MODE_RT_RUN) { | 123 | if (get_rt_mode() == MODE_RT_RUN) { |
124 | write_lock(&edf->ready_lock); | 124 | write_lock(&edf->ready_lock); |
125 | if (is_realtime(prev) && is_released(prev) && is_running(prev) | 125 | if (is_realtime(prev) && is_released(prev) && is_running(prev) |
126 | && !preemption_needed(edf, prev)) { | 126 | && !edf_preemption_needed(edf, prev)) { |
127 | /* this really should only happen if the task has | 127 | /* this really should only happen if the task has |
128 | * 100% utilization... | 128 | * 100% utilization... |
129 | */ | 129 | */ |
@@ -159,7 +159,7 @@ static int part_edf_schedule(struct task_struct * prev, | |||
159 | 159 | ||
160 | static void part_edf_finish_switch(struct task_struct *prev) | 160 | static void part_edf_finish_switch(struct task_struct *prev) |
161 | { | 161 | { |
162 | edf_domain_t* edf = local_edf; | 162 | rt_domain_t* edf = local_edf; |
163 | 163 | ||
164 | if (!is_realtime(prev) || !is_running(prev)) | 164 | if (!is_realtime(prev) || !is_running(prev)) |
165 | return; | 165 | return; |
@@ -195,7 +195,7 @@ static void part_edf_finish_switch(struct task_struct *prev) | |||
195 | */ | 195 | */ |
196 | static long part_edf_prepare_task(struct task_struct * t) | 196 | static long part_edf_prepare_task(struct task_struct * t) |
197 | { | 197 | { |
198 | edf_domain_t* edf = task_edf(t); | 198 | rt_domain_t* edf = task_edf(t); |
199 | 199 | ||
200 | 200 | ||
201 | TRACE("[%d] part edf: prepare task %d on CPU %d\n", | 201 | TRACE("[%d] part edf: prepare task %d on CPU %d\n", |
@@ -207,7 +207,7 @@ static long part_edf_prepare_task(struct task_struct * t) | |||
207 | /* The action is already on. | 207 | /* The action is already on. |
208 | * Prepare immediate release. | 208 | * Prepare immediate release. |
209 | */ | 209 | */ |
210 | prepare_new_release(t); | 210 | edf_release_now(t); |
211 | /* The task should be running in the queue, otherwise signal | 211 | /* The task should be running in the queue, otherwise signal |
212 | * code will try to wake it up with fatal consequences. | 212 | * code will try to wake it up with fatal consequences. |
213 | */ | 213 | */ |
@@ -220,7 +220,7 @@ static long part_edf_prepare_task(struct task_struct * t) | |||
220 | 220 | ||
221 | static void part_edf_wake_up_task(struct task_struct *task) | 221 | static void part_edf_wake_up_task(struct task_struct *task) |
222 | { | 222 | { |
223 | edf_domain_t* edf; | 223 | rt_domain_t* edf; |
224 | 224 | ||
225 | edf = task_edf(task); | 225 | edf = task_edf(task); |
226 | 226 | ||
@@ -237,7 +237,7 @@ static void part_edf_wake_up_task(struct task_struct *task) | |||
237 | task->state = TASK_RUNNING; | 237 | task->state = TASK_RUNNING; |
238 | if (is_tardy(task)) { | 238 | if (is_tardy(task)) { |
239 | /* new sporadic release */ | 239 | /* new sporadic release */ |
240 | prepare_new_release(task); | 240 | edf_release_now(task); |
241 | add_ready(edf, task); | 241 | add_ready(edf, task); |
242 | 242 | ||
243 | } else if (task->time_slice) { | 243 | } else if (task->time_slice) { |
@@ -290,7 +290,7 @@ static int part_edf_mode_change(int new_mode) | |||
290 | 290 | ||
291 | if (new_mode == MODE_RT_RUN) | 291 | if (new_mode == MODE_RT_RUN) |
292 | for_each_online_cpu(cpu) | 292 | for_each_online_cpu(cpu) |
293 | prepare_new_releases(remote_edf(cpu), jiffies); | 293 | rerelease_all(remote_edf(cpu), edf_release_at); |
294 | TRACE("[%d] part edf: mode changed to %d\n", | 294 | TRACE("[%d] part edf: mode changed to %d\n", |
295 | smp_processor_id(), new_mode); | 295 | smp_processor_id(), new_mode); |
296 | return 0; | 296 | return 0; |