diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-09-21 20:41:56 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-09-21 20:41:56 -0400 |
commit | cf64f69b82e9d641aebdbbd98f468218e41d6105 (patch) | |
tree | a7f74afe8ebbc91df0012df1738012397435ec08 | |
parent | f58e23935ece11239b095451ab15953a0dbcadf4 (diff) |
EDF-WM: cleanup names a bit
-rw-r--r-- | litmus/sched_edf_wm.c | 107 |
1 files changed, 52 insertions, 55 deletions
diff --git a/litmus/sched_edf_wm.c b/litmus/sched_edf_wm.c index ac0d2177eb7e..ed8cc24e9fe6 100644 --- a/litmus/sched_edf_wm.c +++ b/litmus/sched_edf_wm.c | |||
@@ -34,13 +34,11 @@ DEFINE_PER_CPU(wm_domain_t, wm_domains); | |||
34 | 34 | ||
35 | #define TRACE_DOM(dom, fmt, args...) \ | 35 | #define TRACE_DOM(dom, fmt, args...) \ |
36 | TRACE("(wm_domains[%d]) " fmt, (dom)->cpu, ##args) | 36 | TRACE("(wm_domains[%d]) " fmt, (dom)->cpu, ##args) |
37 | #define local_edf (&__get_cpu_var(wm_domains).domain) | ||
38 | #define local_pedf (&__get_cpu_var(wm_domains)) | ||
39 | #define remote_edf(cpu) (&per_cpu(wm_domains, cpu).domain) | ||
40 | #define remote_pedf(cpu) (&per_cpu(wm_domains, cpu)) | ||
41 | #define task_edf(task) remote_edf(get_partition(task)) | ||
42 | #define task_pedf(task) remote_pedf(get_partition(task)) | ||
43 | 37 | ||
38 | |||
39 | #define local_domain (&__get_cpu_var(wm_domains)) | ||
40 | #define remote_domain(cpu) (&per_cpu(wm_domains, cpu)) | ||
41 | #define domain_of_task(task) (remote_domain(get_partition(task))) | ||
44 | #define domain_from_timer(t) (container_of((t), wm_domain_t, enforcement_timer)) | 42 | #define domain_from_timer(t) (container_of((t), wm_domain_t, enforcement_timer)) |
45 | 43 | ||
46 | /* we assume the lock is being held */ | 44 | /* we assume the lock is being held */ |
@@ -98,13 +96,13 @@ static void requeue(struct task_struct* t, rt_domain_t *edf) | |||
98 | */ | 96 | */ |
99 | static int wm_check_resched(rt_domain_t *edf) | 97 | static int wm_check_resched(rt_domain_t *edf) |
100 | { | 98 | { |
101 | wm_domain_t *pedf = container_of(edf, wm_domain_t, domain); | 99 | wm_domain_t *dom = container_of(edf, wm_domain_t, domain); |
102 | 100 | ||
103 | /* because this is a callback from rt_domain_t we already hold | 101 | /* because this is a callback from rt_domain_t we already hold |
104 | * the necessary lock for the ready queue | 102 | * the necessary lock for the ready queue |
105 | */ | 103 | */ |
106 | if (edf_preemption_needed(edf, pedf->scheduled)) { | 104 | if (edf_preemption_needed(edf, dom->scheduled)) { |
107 | preempt(pedf); | 105 | preempt(dom); |
108 | return 1; | 106 | return 1; |
109 | } else | 107 | } else |
110 | return 0; | 108 | return 0; |
@@ -121,13 +119,13 @@ static void job_completion(struct task_struct* t, int forced) | |||
121 | 119 | ||
122 | static void wm_tick(struct task_struct *t) | 120 | static void wm_tick(struct task_struct *t) |
123 | { | 121 | { |
124 | wm_domain_t *pedf = local_pedf; | 122 | wm_domain_t *dom = local_domain; |
125 | 123 | ||
126 | /* Check for inconsistency. We don't need the lock for this since | 124 | /* Check for inconsistency. We don't need the lock for this since |
127 | * ->scheduled is only changed in schedule, which obviously is not | 125 | * ->scheduled is only changed in schedule, which obviously is not |
128 | * executing in parallel on this CPU | 126 | * executing in parallel on this CPU |
129 | */ | 127 | */ |
130 | BUG_ON(is_realtime(t) && t != pedf->scheduled); | 128 | BUG_ON(is_realtime(t) && t != dom->scheduled); |
131 | 129 | ||
132 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | 130 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { |
133 | if (!is_np(t)) { | 131 | if (!is_np(t)) { |
@@ -146,30 +144,30 @@ static void wm_tick(struct task_struct *t) | |||
146 | 144 | ||
147 | static struct task_struct* wm_schedule(struct task_struct * prev) | 145 | static struct task_struct* wm_schedule(struct task_struct * prev) |
148 | { | 146 | { |
149 | wm_domain_t* pedf = local_pedf; | 147 | wm_domain_t* dom = local_domain; |
150 | rt_domain_t* edf = &pedf->domain; | 148 | rt_domain_t* edf = &dom->domain; |
151 | struct task_struct* next; | 149 | struct task_struct* next; |
152 | 150 | ||
153 | int out_of_time, sleep, preempt, | 151 | int out_of_time, sleep, preempt, |
154 | np, exists, blocks, resched; | 152 | np, exists, blocks, resched; |
155 | 153 | ||
156 | raw_spin_lock(&pedf->slock); | 154 | raw_spin_lock(&dom->slock); |
157 | 155 | ||
158 | /* sanity checking | 156 | /* sanity checking |
159 | * differently from gedf, when a task exits (dead) | 157 | * differently from gedf, when a task exits (dead) |
160 | * pedf->schedule may be null and prev _is_ realtime | 158 | * dom->schedule may be null and prev _is_ realtime |
161 | */ | 159 | */ |
162 | BUG_ON(pedf->scheduled && pedf->scheduled != prev); | 160 | BUG_ON(dom->scheduled && dom->scheduled != prev); |
163 | BUG_ON(pedf->scheduled && !is_realtime(prev)); | 161 | BUG_ON(dom->scheduled && !is_realtime(prev)); |
164 | 162 | ||
165 | /* (0) Determine state */ | 163 | /* (0) Determine state */ |
166 | exists = pedf->scheduled != NULL; | 164 | exists = dom->scheduled != NULL; |
167 | blocks = exists && !is_running(pedf->scheduled); | 165 | blocks = exists && !is_running(dom->scheduled); |
168 | out_of_time = exists && | 166 | out_of_time = exists && |
169 | budget_enforced(pedf->scheduled) && | 167 | budget_enforced(dom->scheduled) && |
170 | budget_exhausted(pedf->scheduled); | 168 | budget_exhausted(dom->scheduled); |
171 | np = exists && is_np(pedf->scheduled); | 169 | np = exists && is_np(dom->scheduled); |
172 | sleep = exists && get_rt_flags(pedf->scheduled) == RT_F_SLEEP; | 170 | sleep = exists && get_rt_flags(dom->scheduled) == RT_F_SLEEP; |
173 | preempt = edf_preemption_needed(edf, prev); | 171 | preempt = edf_preemption_needed(edf, prev); |
174 | 172 | ||
175 | /* If we need to preempt do so. | 173 | /* If we need to preempt do so. |
@@ -187,14 +185,14 @@ static struct task_struct* wm_schedule(struct task_struct * prev) | |||
187 | * Multiple calls to request_exit_np() don't hurt. | 185 | * Multiple calls to request_exit_np() don't hurt. |
188 | */ | 186 | */ |
189 | if (np && (out_of_time || preempt || sleep)) | 187 | if (np && (out_of_time || preempt || sleep)) |
190 | request_exit_np(pedf->scheduled); | 188 | request_exit_np(dom->scheduled); |
191 | 189 | ||
192 | /* Any task that is preemptable and either exhausts its execution | 190 | /* Any task that is preemptable and either exhausts its execution |
193 | * budget or wants to sleep completes. We may have to reschedule after | 191 | * budget or wants to sleep completes. We may have to reschedule after |
194 | * this. | 192 | * this. |
195 | */ | 193 | */ |
196 | if (!np && (out_of_time || sleep) && !blocks) { | 194 | if (!np && (out_of_time || sleep) && !blocks) { |
197 | job_completion(pedf->scheduled, !sleep); | 195 | job_completion(dom->scheduled, !sleep); |
198 | resched = 1; | 196 | resched = 1; |
199 | } | 197 | } |
200 | 198 | ||
@@ -207,8 +205,8 @@ static struct task_struct* wm_schedule(struct task_struct * prev) | |||
207 | /* Take care of a previously scheduled | 205 | /* Take care of a previously scheduled |
208 | * job by taking it out of the Linux runqueue. | 206 | * job by taking it out of the Linux runqueue. |
209 | */ | 207 | */ |
210 | if (pedf->scheduled && !blocks) | 208 | if (dom->scheduled && !blocks) |
211 | requeue(pedf->scheduled, edf); | 209 | requeue(dom->scheduled, edf); |
212 | next = __take_ready(edf); | 210 | next = __take_ready(edf); |
213 | } else | 211 | } else |
214 | /* Only override Linux scheduler if we have a real-time task | 212 | /* Only override Linux scheduler if we have a real-time task |
@@ -224,8 +222,8 @@ static struct task_struct* wm_schedule(struct task_struct * prev) | |||
224 | TRACE("becoming idle at %llu\n", litmus_clock()); | 222 | TRACE("becoming idle at %llu\n", litmus_clock()); |
225 | } | 223 | } |
226 | 224 | ||
227 | pedf->scheduled = next; | 225 | dom->scheduled = next; |
228 | raw_spin_unlock(&pedf->slock); | 226 | raw_spin_unlock(&dom->slock); |
229 | 227 | ||
230 | return next; | 228 | return next; |
231 | } | 229 | } |
@@ -235,9 +233,9 @@ static struct task_struct* wm_schedule(struct task_struct * prev) | |||
235 | */ | 233 | */ |
236 | static void wm_task_new(struct task_struct * t, int on_rq, int running) | 234 | static void wm_task_new(struct task_struct * t, int on_rq, int running) |
237 | { | 235 | { |
238 | rt_domain_t* edf = task_edf(t); | 236 | wm_domain_t* dom = domain_of_task(t); |
239 | wm_domain_t* pedf = task_pedf(t); | 237 | rt_domain_t* edf = &dom->domain; |
240 | unsigned long flags; | 238 | unsigned long flags; |
241 | 239 | ||
242 | TRACE_TASK(t, "psn edf: task new, cpu = %d\n", | 240 | TRACE_TASK(t, "psn edf: task new, cpu = %d\n", |
243 | t->rt_param.task_params.cpu); | 241 | t->rt_param.task_params.cpu); |
@@ -248,28 +246,28 @@ static void wm_task_new(struct task_struct * t, int on_rq, int running) | |||
248 | /* The task should be running in the queue, otherwise signal | 246 | /* The task should be running in the queue, otherwise signal |
249 | * code will try to wake it up with fatal consequences. | 247 | * code will try to wake it up with fatal consequences. |
250 | */ | 248 | */ |
251 | raw_spin_lock_irqsave(&pedf->slock, flags); | 249 | raw_spin_lock_irqsave(&dom->slock, flags); |
252 | if (running) { | 250 | if (running) { |
253 | /* there shouldn't be anything else running at the time */ | 251 | /* there shouldn't be anything else running at the time */ |
254 | BUG_ON(pedf->scheduled); | 252 | BUG_ON(dom->scheduled); |
255 | pedf->scheduled = t; | 253 | dom->scheduled = t; |
256 | } else { | 254 | } else { |
257 | requeue(t, edf); | 255 | requeue(t, edf); |
258 | /* maybe we have to reschedule */ | 256 | /* maybe we have to reschedule */ |
259 | preempt(pedf); | 257 | preempt(dom); |
260 | } | 258 | } |
261 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | 259 | raw_spin_unlock_irqrestore(&dom->slock, flags); |
262 | } | 260 | } |
263 | 261 | ||
264 | static void wm_task_wake_up(struct task_struct *task) | 262 | static void wm_task_wake_up(struct task_struct *task) |
265 | { | 263 | { |
266 | unsigned long flags; | 264 | unsigned long flags; |
267 | wm_domain_t* pedf = task_pedf(task); | 265 | wm_domain_t* dom = domain_of_task(task); |
268 | rt_domain_t* edf = task_edf(task); | 266 | rt_domain_t* edf = &dom->domain; |
269 | lt_t now; | 267 | lt_t now; |
270 | 268 | ||
271 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 269 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
272 | raw_spin_lock_irqsave(&pedf->slock, flags); | 270 | raw_spin_lock_irqsave(&dom->slock, flags); |
273 | BUG_ON(is_queued(task)); | 271 | BUG_ON(is_queued(task)); |
274 | /* We need to take suspensions because of semaphores into | 272 | /* We need to take suspensions because of semaphores into |
275 | * account! If a job resumes after being suspended due to acquiring | 273 | * account! If a job resumes after being suspended due to acquiring |
@@ -291,10 +289,10 @@ static void wm_task_wake_up(struct task_struct *task) | |||
291 | * de-scheduling the task, i.e., wake_up() raced with schedule() | 289 | * de-scheduling the task, i.e., wake_up() raced with schedule() |
292 | * and won. | 290 | * and won. |
293 | */ | 291 | */ |
294 | if (pedf->scheduled != task) | 292 | if (dom->scheduled != task) |
295 | requeue(task, edf); | 293 | requeue(task, edf); |
296 | 294 | ||
297 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | 295 | raw_spin_unlock_irqrestore(&dom->slock, flags); |
298 | TRACE_TASK(task, "wake up done\n"); | 296 | TRACE_TASK(task, "wake up done\n"); |
299 | } | 297 | } |
300 | 298 | ||
@@ -310,22 +308,21 @@ static void wm_task_block(struct task_struct *t) | |||
310 | static void wm_task_exit(struct task_struct * t) | 308 | static void wm_task_exit(struct task_struct * t) |
311 | { | 309 | { |
312 | unsigned long flags; | 310 | unsigned long flags; |
313 | wm_domain_t* pedf = task_pedf(t); | 311 | wm_domain_t* dom = domain_of_task(t); |
314 | rt_domain_t* edf; | 312 | rt_domain_t* edf = &dom->domain; |
315 | 313 | ||
316 | raw_spin_lock_irqsave(&pedf->slock, flags); | 314 | raw_spin_lock_irqsave(&dom->slock, flags); |
317 | if (is_queued(t)) { | 315 | if (is_queued(t)) { |
318 | /* dequeue */ | 316 | /* dequeue */ |
319 | edf = task_edf(t); | ||
320 | remove(edf, t); | 317 | remove(edf, t); |
321 | } | 318 | } |
322 | if (pedf->scheduled == t) | 319 | if (dom->scheduled == t) |
323 | pedf->scheduled = NULL; | 320 | dom->scheduled = NULL; |
324 | 321 | ||
325 | TRACE_TASK(t, "RIP, now reschedule\n"); | 322 | TRACE_TASK(t, "RIP, now reschedule\n"); |
326 | 323 | ||
327 | preempt(pedf); | 324 | preempt(dom); |
328 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | 325 | raw_spin_unlock_irqrestore(&dom->slock, flags); |
329 | } | 326 | } |
330 | 327 | ||
331 | static long wm_admit_task(struct task_struct* tsk) | 328 | static long wm_admit_task(struct task_struct* tsk) |
@@ -359,9 +356,9 @@ static int __init init_edf_wm(void) | |||
359 | * we cannot use num_online_cpu() | 356 | * we cannot use num_online_cpu() |
360 | */ | 357 | */ |
361 | for (i = 0; i < num_online_cpus(); i++) { | 358 | for (i = 0; i < num_online_cpus(); i++) { |
362 | wm_domain_init(remote_pedf(i), | 359 | wm_domain_init(remote_domain(i), |
363 | wm_check_resched, | 360 | wm_check_resched, |
364 | NULL, i); | 361 | NULL, i); |
365 | } | 362 | } |
366 | return register_sched_plugin(&edf_wm_plugin); | 363 | return register_sched_plugin(&edf_wm_plugin); |
367 | } | 364 | } |