diff options
Diffstat (limited to 'litmus/sched_pfp.c')
-rw-r--r-- | litmus/sched_pfp.c | 1693 |
1 files changed, 1693 insertions, 0 deletions
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c new file mode 100644 index 00000000000..b1d5b4326a0 --- /dev/null +++ b/litmus/sched_pfp.c | |||
@@ -0,0 +1,1693 @@ | |||
1 | /* | ||
2 | * litmus/sched_pfp.c | ||
3 | * | ||
4 | * Implementation of partitioned fixed-priority scheduling. | ||
5 | * Based on PSN-EDF. | ||
6 | */ | ||
7 | |||
8 | #include <linux/percpu.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/list.h> | ||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/module.h> | ||
13 | |||
14 | #include <litmus/litmus.h> | ||
15 | #include <litmus/wait.h> | ||
16 | #include <litmus/jobs.h> | ||
17 | #include <litmus/preempt.h> | ||
18 | #include <litmus/fp_common.h> | ||
19 | #include <litmus/sched_plugin.h> | ||
20 | #include <litmus/sched_trace.h> | ||
21 | #include <litmus/trace.h> | ||
22 | #include <litmus/budget.h> | ||
23 | |||
24 | #include <linux/uaccess.h> | ||
25 | |||
26 | |||
27 | typedef struct { | ||
28 | rt_domain_t domain; | ||
29 | struct fp_prio_queue ready_queue; | ||
30 | int cpu; | ||
31 | struct task_struct* scheduled; /* only RT tasks */ | ||
32 | /* | ||
33 | * scheduling lock slock | ||
34 | * protects the domain and serializes scheduling decisions | ||
35 | */ | ||
36 | #define slock domain.ready_lock | ||
37 | |||
38 | } pfp_domain_t; | ||
39 | |||
40 | DEFINE_PER_CPU(pfp_domain_t, pfp_domains); | ||
41 | |||
42 | pfp_domain_t* pfp_doms[NR_CPUS]; | ||
43 | |||
44 | #define local_pfp (&__get_cpu_var(pfp_domains)) | ||
45 | #define remote_dom(cpu) (&per_cpu(pfp_domains, cpu).domain) | ||
46 | #define remote_pfp(cpu) (&per_cpu(pfp_domains, cpu)) | ||
47 | #define task_dom(task) remote_dom(get_partition(task)) | ||
48 | #define task_pfp(task) remote_pfp(get_partition(task)) | ||
49 | |||
50 | /* we assume the lock is being held */ | ||
51 | static void preempt(pfp_domain_t *pfp) | ||
52 | { | ||
53 | preempt_if_preemptable(pfp->scheduled, pfp->cpu); | ||
54 | } | ||
55 | |||
56 | static unsigned int priority_index(struct task_struct* t) | ||
57 | { | ||
58 | #ifdef CONFIG_LOCKING | ||
59 | if (unlikely(t->rt_param.inh_task)) | ||
60 | /* use effective priority */ | ||
61 | t = t->rt_param.inh_task; | ||
62 | |||
63 | if (is_priority_boosted(t)) { | ||
64 | /* zero is reserved for priority-boosted tasks */ | ||
65 | return 0; | ||
66 | } else | ||
67 | #endif | ||
68 | return get_priority(t); | ||
69 | } | ||
70 | |||
71 | |||
72 | static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
73 | { | ||
74 | pfp_domain_t *pfp = container_of(rt, pfp_domain_t, domain); | ||
75 | unsigned long flags; | ||
76 | struct task_struct* t; | ||
77 | struct bheap_node* hn; | ||
78 | |||
79 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
80 | |||
81 | while (!bheap_empty(tasks)) { | ||
82 | hn = bheap_take(fp_ready_order, tasks); | ||
83 | t = bheap2task(hn); | ||
84 | TRACE_TASK(t, "released (part:%d prio:%d)\n", | ||
85 | get_partition(t), get_priority(t)); | ||
86 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); | ||
87 | } | ||
88 | |||
89 | /* do we need to preempt? */ | ||
90 | if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) { | ||
91 | TRACE_CUR("preempted by new release\n"); | ||
92 | preempt(pfp); | ||
93 | } | ||
94 | |||
95 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
96 | } | ||
97 | |||
98 | static void pfp_preempt_check(pfp_domain_t *pfp) | ||
99 | { | ||
100 | if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) | ||
101 | preempt(pfp); | ||
102 | } | ||
103 | |||
104 | static void pfp_domain_init(pfp_domain_t* pfp, | ||
105 | int cpu) | ||
106 | { | ||
107 | fp_domain_init(&pfp->domain, NULL, pfp_release_jobs); | ||
108 | pfp->cpu = cpu; | ||
109 | pfp->scheduled = NULL; | ||
110 | fp_prio_queue_init(&pfp->ready_queue); | ||
111 | } | ||
112 | |||
113 | static void requeue(struct task_struct* t, pfp_domain_t *pfp) | ||
114 | { | ||
115 | if (t->state != TASK_RUNNING) | ||
116 | TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); | ||
117 | |||
118 | set_rt_flags(t, RT_F_RUNNING); | ||
119 | if (is_released(t, litmus_clock())) | ||
120 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); | ||
121 | else | ||
122 | add_release(&pfp->domain, t); /* it has got to wait */ | ||
123 | } | ||
124 | |||
125 | static void job_completion(struct task_struct* t, int forced) | ||
126 | { | ||
127 | sched_trace_task_completion(t,forced); | ||
128 | TRACE_TASK(t, "job_completion().\n"); | ||
129 | |||
130 | set_rt_flags(t, RT_F_SLEEP); | ||
131 | prepare_for_next_period(t); | ||
132 | } | ||
133 | |||
134 | static void pfp_tick(struct task_struct *t) | ||
135 | { | ||
136 | pfp_domain_t *pfp = local_pfp; | ||
137 | |||
138 | /* Check for inconsistency. We don't need the lock for this since | ||
139 | * ->scheduled is only changed in schedule, which obviously is not | ||
140 | * executing in parallel on this CPU | ||
141 | */ | ||
142 | BUG_ON(is_realtime(t) && t != pfp->scheduled); | ||
143 | |||
144 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
145 | if (!is_np(t)) { | ||
146 | litmus_reschedule_local(); | ||
147 | TRACE("pfp_scheduler_tick: " | ||
148 | "%d is preemptable " | ||
149 | " => FORCE_RESCHED\n", t->pid); | ||
150 | } else if (is_user_np(t)) { | ||
151 | TRACE("pfp_scheduler_tick: " | ||
152 | "%d is non-preemptable, " | ||
153 | "preemption delayed.\n", t->pid); | ||
154 | request_exit_np(t); | ||
155 | } | ||
156 | } | ||
157 | } | ||
158 | |||
159 | static struct task_struct* pfp_schedule(struct task_struct * prev) | ||
160 | { | ||
161 | pfp_domain_t* pfp = local_pfp; | ||
162 | struct task_struct* next; | ||
163 | |||
164 | int out_of_time, sleep, preempt, np, exists, blocks, resched, migrate; | ||
165 | |||
166 | raw_spin_lock(&pfp->slock); | ||
167 | |||
168 | /* sanity checking | ||
169 | * differently from gedf, when a task exits (dead) | ||
170 | * pfp->schedule may be null and prev _is_ realtime | ||
171 | */ | ||
172 | BUG_ON(pfp->scheduled && pfp->scheduled != prev); | ||
173 | BUG_ON(pfp->scheduled && !is_realtime(prev)); | ||
174 | |||
175 | /* (0) Determine state */ | ||
176 | exists = pfp->scheduled != NULL; | ||
177 | blocks = exists && !is_running(pfp->scheduled); | ||
178 | out_of_time = exists && | ||
179 | budget_enforced(pfp->scheduled) && | ||
180 | budget_exhausted(pfp->scheduled); | ||
181 | np = exists && is_np(pfp->scheduled); | ||
182 | sleep = exists && get_rt_flags(pfp->scheduled) == RT_F_SLEEP; | ||
183 | migrate = exists && get_partition(pfp->scheduled) != pfp->cpu; | ||
184 | preempt = migrate || fp_preemption_needed(&pfp->ready_queue, prev); | ||
185 | |||
186 | /* If we need to preempt do so. | ||
187 | * The following checks set resched to 1 in case of special | ||
188 | * circumstances. | ||
189 | */ | ||
190 | resched = preempt; | ||
191 | |||
192 | /* If a task blocks we have no choice but to reschedule. | ||
193 | */ | ||
194 | if (blocks) | ||
195 | resched = 1; | ||
196 | |||
197 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
198 | * Multiple calls to request_exit_np() don't hurt. | ||
199 | */ | ||
200 | if (np && (out_of_time || preempt || sleep)) | ||
201 | request_exit_np(pfp->scheduled); | ||
202 | |||
203 | /* Any task that is preemptable and either exhausts its execution | ||
204 | * budget or wants to sleep completes. We may have to reschedule after | ||
205 | * this. | ||
206 | */ | ||
207 | if (!np && (out_of_time || sleep) && !blocks && !migrate) { | ||
208 | job_completion(pfp->scheduled, !sleep); | ||
209 | resched = 1; | ||
210 | } | ||
211 | |||
212 | /* The final scheduling decision. Do we need to switch for some reason? | ||
213 | * Switch if we are in RT mode and have no task or if we need to | ||
214 | * resched. | ||
215 | */ | ||
216 | next = NULL; | ||
217 | if ((!np || blocks) && (resched || !exists)) { | ||
218 | /* When preempting a task that does not block, then | ||
219 | * re-insert it into either the ready queue or the | ||
220 | * release queue (if it completed). requeue() picks | ||
221 | * the appropriate queue. | ||
222 | */ | ||
223 | if (pfp->scheduled && !blocks && !migrate) | ||
224 | requeue(pfp->scheduled, pfp); | ||
225 | next = fp_prio_take(&pfp->ready_queue); | ||
226 | } else | ||
227 | /* Only override Linux scheduler if we have a real-time task | ||
228 | * scheduled that needs to continue. | ||
229 | */ | ||
230 | if (exists) | ||
231 | next = prev; | ||
232 | |||
233 | if (next) { | ||
234 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
235 | set_rt_flags(next, RT_F_RUNNING); | ||
236 | } else { | ||
237 | TRACE("becoming idle at %llu\n", litmus_clock()); | ||
238 | } | ||
239 | |||
240 | pfp->scheduled = next; | ||
241 | sched_state_task_picked(); | ||
242 | raw_spin_unlock(&pfp->slock); | ||
243 | |||
244 | return next; | ||
245 | } | ||
246 | |||
247 | #ifdef CONFIG_LITMUS_LOCKING | ||
248 | |||
249 | /* prev is no longer scheduled --- see if it needs to migrate */ | ||
250 | static void pfp_finish_switch(struct task_struct *prev) | ||
251 | { | ||
252 | pfp_domain_t *to; | ||
253 | |||
254 | if (is_realtime(prev) && | ||
255 | is_running(prev) && | ||
256 | get_partition(prev) != smp_processor_id()) { | ||
257 | TRACE_TASK(prev, "needs to migrate from P%d to P%d\n", | ||
258 | smp_processor_id(), get_partition(prev)); | ||
259 | |||
260 | to = task_pfp(prev); | ||
261 | |||
262 | raw_spin_lock(&to->slock); | ||
263 | |||
264 | TRACE_TASK(prev, "adding to queue on P%d\n", to->cpu); | ||
265 | requeue(prev, to); | ||
266 | if (fp_preemption_needed(&to->ready_queue, to->scheduled)) | ||
267 | preempt(to); | ||
268 | |||
269 | raw_spin_unlock(&to->slock); | ||
270 | |||
271 | } | ||
272 | } | ||
273 | |||
274 | #endif | ||
275 | |||
276 | /* Prepare a task for running in RT mode | ||
277 | */ | ||
278 | static void pfp_task_new(struct task_struct * t, int on_rq, int running) | ||
279 | { | ||
280 | pfp_domain_t* pfp = task_pfp(t); | ||
281 | unsigned long flags; | ||
282 | |||
283 | TRACE_TASK(t, "P-FP: task new, cpu = %d\n", | ||
284 | t->rt_param.task_params.cpu); | ||
285 | |||
286 | /* setup job parameters */ | ||
287 | release_at(t, litmus_clock()); | ||
288 | |||
289 | /* The task should be running in the queue, otherwise signal | ||
290 | * code will try to wake it up with fatal consequences. | ||
291 | */ | ||
292 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
293 | if (running) { | ||
294 | /* there shouldn't be anything else running at the time */ | ||
295 | BUG_ON(pfp->scheduled); | ||
296 | pfp->scheduled = t; | ||
297 | } else { | ||
298 | requeue(t, pfp); | ||
299 | /* maybe we have to reschedule */ | ||
300 | pfp_preempt_check(pfp); | ||
301 | } | ||
302 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
303 | } | ||
304 | |||
305 | static void pfp_task_wake_up(struct task_struct *task) | ||
306 | { | ||
307 | unsigned long flags; | ||
308 | pfp_domain_t* pfp = task_pfp(task); | ||
309 | lt_t now; | ||
310 | |||
311 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
312 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
313 | |||
314 | #ifdef CONFIG_LITMUS_LOCKING | ||
315 | /* Should only be queued when processing a fake-wake up due to a | ||
316 | * migration-related state change. */ | ||
317 | if (unlikely(is_queued(task))) { | ||
318 | TRACE_TASK(task, "WARNING: waking task still queued. Is this right?\n"); | ||
319 | goto out_unlock; | ||
320 | } | ||
321 | #else | ||
322 | BUG_ON(is_queued(task)); | ||
323 | #endif | ||
324 | now = litmus_clock(); | ||
325 | if (is_tardy(task, now) | ||
326 | #ifdef CONFIG_LITMUS_LOCKING | ||
327 | /* We need to take suspensions because of semaphores into | ||
328 | * account! If a job resumes after being suspended due to acquiring | ||
329 | * a semaphore, it should never be treated as a new job release. | ||
330 | */ | ||
331 | && !is_priority_boosted(task) | ||
332 | #endif | ||
333 | ) { | ||
334 | /* new sporadic release */ | ||
335 | release_at(task, now); | ||
336 | sched_trace_task_release(task); | ||
337 | } | ||
338 | |||
339 | /* Only add to ready queue if it is not the currently-scheduled | ||
340 | * task. This could be the case if a task was woken up concurrently | ||
341 | * on a remote CPU before the executing CPU got around to actually | ||
342 | * de-scheduling the task, i.e., wake_up() raced with schedule() | ||
343 | * and won. Also, don't requeue if it is still queued, which can | ||
344 | * happen under the DPCP due wake-ups racing with migrations. | ||
345 | */ | ||
346 | if (pfp->scheduled != task) { | ||
347 | requeue(task, pfp); | ||
348 | pfp_preempt_check(pfp); | ||
349 | } | ||
350 | |||
351 | out_unlock: | ||
352 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
353 | TRACE_TASK(task, "wake up done\n"); | ||
354 | } | ||
355 | |||
356 | static void pfp_task_block(struct task_struct *t) | ||
357 | { | ||
358 | /* only running tasks can block, thus t is in no queue */ | ||
359 | TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state); | ||
360 | |||
361 | BUG_ON(!is_realtime(t)); | ||
362 | |||
363 | /* If this task blocked normally, it shouldn't be queued. The exception is | ||
364 | * if this is a simulated block()/wakeup() pair from the pull-migration code path. | ||
365 | * This should only happen if the DPCP is being used. | ||
366 | */ | ||
367 | #ifdef CONFIG_LITMUS_LOCKING | ||
368 | if (unlikely(is_queued(t))) | ||
369 | TRACE_TASK(t, "WARNING: blocking task still queued. Is this right?\n"); | ||
370 | #else | ||
371 | BUG_ON(is_queued(t)); | ||
372 | #endif | ||
373 | } | ||
374 | |||
375 | static void pfp_task_exit(struct task_struct * t) | ||
376 | { | ||
377 | unsigned long flags; | ||
378 | pfp_domain_t* pfp = task_pfp(t); | ||
379 | rt_domain_t* dom; | ||
380 | |||
381 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
382 | if (is_queued(t)) { | ||
383 | BUG(); /* This currently doesn't work. */ | ||
384 | /* dequeue */ | ||
385 | dom = task_dom(t); | ||
386 | remove(dom, t); | ||
387 | } | ||
388 | if (pfp->scheduled == t) { | ||
389 | pfp->scheduled = NULL; | ||
390 | preempt(pfp); | ||
391 | } | ||
392 | TRACE_TASK(t, "RIP, now reschedule\n"); | ||
393 | |||
394 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
395 | } | ||
396 | |||
397 | #ifdef CONFIG_LITMUS_LOCKING | ||
398 | |||
399 | #include <litmus/fdso.h> | ||
400 | #include <litmus/srp.h> | ||
401 | |||
402 | static void fp_dequeue(pfp_domain_t* pfp, struct task_struct* t) | ||
403 | { | ||
404 | BUG_ON(pfp->scheduled == t && is_queued(t)); | ||
405 | if (is_queued(t)) | ||
406 | fp_prio_remove(&pfp->ready_queue, t, priority_index(t)); | ||
407 | } | ||
408 | |||
409 | static void fp_set_prio_inh(pfp_domain_t* pfp, struct task_struct* t, | ||
410 | struct task_struct* prio_inh) | ||
411 | { | ||
412 | int requeue; | ||
413 | |||
414 | if (!t || t->rt_param.inh_task == prio_inh) { | ||
415 | /* no update required */ | ||
416 | if (t) | ||
417 | TRACE_TASK(t, "no prio-inh update required\n"); | ||
418 | return; | ||
419 | } | ||
420 | |||
421 | requeue = is_queued(t); | ||
422 | TRACE_TASK(t, "prio-inh: is_queued:%d\n", requeue); | ||
423 | |||
424 | if (requeue) | ||
425 | /* first remove */ | ||
426 | fp_dequeue(pfp, t); | ||
427 | |||
428 | t->rt_param.inh_task = prio_inh; | ||
429 | |||
430 | if (requeue) | ||
431 | /* add again to the right queue */ | ||
432 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); | ||
433 | } | ||
434 | |||
435 | static int effective_agent_priority(int prio) | ||
436 | { | ||
437 | /* make sure agents have higher priority */ | ||
438 | return prio - LITMUS_MAX_PRIORITY; | ||
439 | } | ||
440 | |||
441 | static lt_t prio_point(int eprio) | ||
442 | { | ||
443 | /* make sure we have non-negative prio points */ | ||
444 | return eprio + LITMUS_MAX_PRIORITY; | ||
445 | } | ||
446 | |||
447 | static int prio_from_point(lt_t prio_point) | ||
448 | { | ||
449 | return ((int) prio_point) - LITMUS_MAX_PRIORITY; | ||
450 | } | ||
451 | |||
452 | static void boost_priority(struct task_struct* t, lt_t priority_point) | ||
453 | { | ||
454 | unsigned long flags; | ||
455 | pfp_domain_t* pfp = task_pfp(t); | ||
456 | |||
457 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
458 | |||
459 | |||
460 | TRACE_TASK(t, "priority boosted at %llu\n", litmus_clock()); | ||
461 | |||
462 | tsk_rt(t)->priority_boosted = 1; | ||
463 | /* tie-break by protocol-specific priority point */ | ||
464 | tsk_rt(t)->boost_start_time = priority_point; | ||
465 | |||
466 | if (pfp->scheduled != t) { | ||
467 | /* holder may be queued: first stop queue changes */ | ||
468 | raw_spin_lock(&pfp->domain.release_lock); | ||
469 | if (is_queued(t) && | ||
470 | /* If it is queued, then we need to re-order. */ | ||
471 | bheap_decrease(fp_ready_order, tsk_rt(t)->heap_node) && | ||
472 | /* If we bubbled to the top, then we need to check for preemptions. */ | ||
473 | fp_preemption_needed(&pfp->ready_queue, pfp->scheduled)) | ||
474 | preempt(pfp); | ||
475 | raw_spin_unlock(&pfp->domain.release_lock); | ||
476 | } /* else: nothing to do since the job is not queued while scheduled */ | ||
477 | |||
478 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
479 | } | ||
480 | |||
481 | static void unboost_priority(struct task_struct* t) | ||
482 | { | ||
483 | unsigned long flags; | ||
484 | pfp_domain_t* pfp = task_pfp(t); | ||
485 | lt_t now; | ||
486 | |||
487 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
488 | now = litmus_clock(); | ||
489 | |||
490 | /* assumption: this only happens when the job is scheduled */ | ||
491 | BUG_ON(pfp->scheduled != t); | ||
492 | |||
493 | TRACE_TASK(t, "priority restored at %llu\n", now); | ||
494 | |||
495 | /* priority boosted jobs must be scheduled */ | ||
496 | BUG_ON(pfp->scheduled != t); | ||
497 | |||
498 | tsk_rt(t)->priority_boosted = 0; | ||
499 | tsk_rt(t)->boost_start_time = 0; | ||
500 | |||
501 | /* check if this changes anything */ | ||
502 | if (fp_preemption_needed(&pfp->ready_queue, pfp->scheduled)) | ||
503 | preempt(pfp); | ||
504 | |||
505 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
506 | } | ||
507 | |||
508 | /* ******************** SRP support ************************ */ | ||
509 | |||
510 | static unsigned int pfp_get_srp_prio(struct task_struct* t) | ||
511 | { | ||
512 | return get_priority(t); | ||
513 | } | ||
514 | |||
515 | /* ******************** FMLP support ********************** */ | ||
516 | |||
517 | struct fmlp_semaphore { | ||
518 | struct litmus_lock litmus_lock; | ||
519 | |||
520 | /* current resource holder */ | ||
521 | struct task_struct *owner; | ||
522 | |||
523 | /* FIFO queue of waiting tasks */ | ||
524 | wait_queue_head_t wait; | ||
525 | }; | ||
526 | |||
527 | static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) | ||
528 | { | ||
529 | return container_of(lock, struct fmlp_semaphore, litmus_lock); | ||
530 | } | ||
531 | int pfp_fmlp_lock(struct litmus_lock* l) | ||
532 | { | ||
533 | struct task_struct* t = current; | ||
534 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
535 | wait_queue_t wait; | ||
536 | unsigned long flags; | ||
537 | lt_t time_of_request; | ||
538 | |||
539 | if (!is_realtime(t)) | ||
540 | return -EPERM; | ||
541 | |||
542 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
543 | |||
544 | /* tie-break by this point in time */ | ||
545 | time_of_request = litmus_clock(); | ||
546 | |||
547 | /* Priority-boost ourself *before* we suspend so that | ||
548 | * our priority is boosted when we resume. */ | ||
549 | boost_priority(t, time_of_request); | ||
550 | |||
551 | if (sem->owner) { | ||
552 | /* resource is not free => must suspend and wait */ | ||
553 | |||
554 | init_waitqueue_entry(&wait, t); | ||
555 | |||
556 | /* FIXME: interruptible would be nice some day */ | ||
557 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
558 | |||
559 | __add_wait_queue_tail_exclusive(&sem->wait, &wait); | ||
560 | |||
561 | TS_LOCK_SUSPEND; | ||
562 | |||
563 | /* release lock before sleeping */ | ||
564 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
565 | |||
566 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
567 | * when we wake up; we are guaranteed to have the lock since | ||
568 | * there is only one wake up per release. | ||
569 | */ | ||
570 | |||
571 | schedule(); | ||
572 | |||
573 | TS_LOCK_RESUME; | ||
574 | |||
575 | /* Since we hold the lock, no other task will change | ||
576 | * ->owner. We can thus check it without acquiring the spin | ||
577 | * lock. */ | ||
578 | BUG_ON(sem->owner != t); | ||
579 | } else { | ||
580 | /* it's ours now */ | ||
581 | sem->owner = t; | ||
582 | |||
583 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
584 | } | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | int pfp_fmlp_unlock(struct litmus_lock* l) | ||
590 | { | ||
591 | struct task_struct *t = current, *next; | ||
592 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
593 | unsigned long flags; | ||
594 | int err = 0; | ||
595 | |||
596 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
597 | |||
598 | if (sem->owner != t) { | ||
599 | err = -EINVAL; | ||
600 | goto out; | ||
601 | } | ||
602 | |||
603 | /* we lose the benefit of priority boosting */ | ||
604 | |||
605 | unboost_priority(t); | ||
606 | |||
607 | /* check if there are jobs waiting for this resource */ | ||
608 | next = __waitqueue_remove_first(&sem->wait); | ||
609 | if (next) { | ||
610 | /* next becomes the resouce holder */ | ||
611 | sem->owner = next; | ||
612 | |||
613 | /* Wake up next. The waiting job is already priority-boosted. */ | ||
614 | wake_up_process(next); | ||
615 | } else | ||
616 | /* resource becomes available */ | ||
617 | sem->owner = NULL; | ||
618 | |||
619 | out: | ||
620 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
621 | return err; | ||
622 | } | ||
623 | |||
624 | int pfp_fmlp_close(struct litmus_lock* l) | ||
625 | { | ||
626 | struct task_struct *t = current; | ||
627 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
628 | unsigned long flags; | ||
629 | |||
630 | int owner; | ||
631 | |||
632 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
633 | |||
634 | owner = sem->owner == t; | ||
635 | |||
636 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
637 | |||
638 | if (owner) | ||
639 | pfp_fmlp_unlock(l); | ||
640 | |||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | void pfp_fmlp_free(struct litmus_lock* lock) | ||
645 | { | ||
646 | kfree(fmlp_from_lock(lock)); | ||
647 | } | ||
648 | |||
649 | static struct litmus_lock_ops pfp_fmlp_lock_ops = { | ||
650 | .close = pfp_fmlp_close, | ||
651 | .lock = pfp_fmlp_lock, | ||
652 | .unlock = pfp_fmlp_unlock, | ||
653 | .deallocate = pfp_fmlp_free, | ||
654 | }; | ||
655 | |||
656 | static struct litmus_lock* pfp_new_fmlp(void) | ||
657 | { | ||
658 | struct fmlp_semaphore* sem; | ||
659 | |||
660 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
661 | if (!sem) | ||
662 | return NULL; | ||
663 | |||
664 | sem->owner = NULL; | ||
665 | init_waitqueue_head(&sem->wait); | ||
666 | sem->litmus_lock.ops = &pfp_fmlp_lock_ops; | ||
667 | |||
668 | return &sem->litmus_lock; | ||
669 | } | ||
670 | |||
671 | /* ******************** MPCP support ********************** */ | ||
672 | |||
673 | struct mpcp_semaphore { | ||
674 | struct litmus_lock litmus_lock; | ||
675 | |||
676 | /* current resource holder */ | ||
677 | struct task_struct *owner; | ||
678 | |||
679 | /* priority queue of waiting tasks */ | ||
680 | wait_queue_head_t wait; | ||
681 | |||
682 | /* priority ceiling per cpu */ | ||
683 | unsigned int prio_ceiling[NR_CPUS]; | ||
684 | |||
685 | /* should jobs spin "virtually" for this resource? */ | ||
686 | int vspin; | ||
687 | }; | ||
688 | |||
689 | #define OMEGA_CEILING UINT_MAX | ||
690 | |||
691 | /* Since jobs spin "virtually" while waiting to acquire a lock, | ||
692 | * they first must aquire a local per-cpu resource. | ||
693 | */ | ||
694 | static DEFINE_PER_CPU(wait_queue_head_t, mpcpvs_vspin_wait); | ||
695 | static DEFINE_PER_CPU(struct task_struct*, mpcpvs_vspin); | ||
696 | |||
697 | /* called with preemptions off <=> no local modifications */ | ||
698 | static void mpcp_vspin_enter(void) | ||
699 | { | ||
700 | struct task_struct* t = current; | ||
701 | |||
702 | while (1) { | ||
703 | if (__get_cpu_var(mpcpvs_vspin) == NULL) { | ||
704 | /* good, we get to issue our request */ | ||
705 | __get_cpu_var(mpcpvs_vspin) = t; | ||
706 | break; | ||
707 | } else { | ||
708 | /* some job is spinning => enqueue in request queue */ | ||
709 | prio_wait_queue_t wait; | ||
710 | wait_queue_head_t* vspin = &__get_cpu_var(mpcpvs_vspin_wait); | ||
711 | unsigned long flags; | ||
712 | |||
713 | /* ordered by regular priority */ | ||
714 | init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t))); | ||
715 | |||
716 | spin_lock_irqsave(&vspin->lock, flags); | ||
717 | |||
718 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
719 | |||
720 | __add_wait_queue_prio_exclusive(vspin, &wait); | ||
721 | |||
722 | spin_unlock_irqrestore(&vspin->lock, flags); | ||
723 | |||
724 | TS_LOCK_SUSPEND; | ||
725 | |||
726 | preempt_enable_no_resched(); | ||
727 | |||
728 | schedule(); | ||
729 | |||
730 | preempt_disable(); | ||
731 | |||
732 | TS_LOCK_RESUME; | ||
733 | /* Recheck if we got it --- some higher-priority process might | ||
734 | * have swooped in. */ | ||
735 | } | ||
736 | } | ||
737 | /* ok, now it is ours */ | ||
738 | } | ||
739 | |||
740 | /* called with preemptions off */ | ||
741 | static void mpcp_vspin_exit(void) | ||
742 | { | ||
743 | struct task_struct* t = current, *next; | ||
744 | unsigned long flags; | ||
745 | wait_queue_head_t* vspin = &__get_cpu_var(mpcpvs_vspin_wait); | ||
746 | |||
747 | BUG_ON(__get_cpu_var(mpcpvs_vspin) != t); | ||
748 | |||
749 | /* no spinning job */ | ||
750 | __get_cpu_var(mpcpvs_vspin) = NULL; | ||
751 | |||
752 | /* see if anyone is waiting for us to stop "spinning" */ | ||
753 | spin_lock_irqsave(&vspin->lock, flags); | ||
754 | next = __waitqueue_remove_first(vspin); | ||
755 | |||
756 | if (next) | ||
757 | wake_up_process(next); | ||
758 | |||
759 | spin_unlock_irqrestore(&vspin->lock, flags); | ||
760 | } | ||
761 | |||
762 | static inline struct mpcp_semaphore* mpcp_from_lock(struct litmus_lock* lock) | ||
763 | { | ||
764 | return container_of(lock, struct mpcp_semaphore, litmus_lock); | ||
765 | } | ||
766 | |||
767 | int pfp_mpcp_lock(struct litmus_lock* l) | ||
768 | { | ||
769 | struct task_struct* t = current; | ||
770 | struct mpcp_semaphore *sem = mpcp_from_lock(l); | ||
771 | prio_wait_queue_t wait; | ||
772 | unsigned long flags; | ||
773 | |||
774 | if (!is_realtime(t)) | ||
775 | return -EPERM; | ||
776 | |||
777 | preempt_disable(); | ||
778 | |||
779 | if (sem->vspin) | ||
780 | mpcp_vspin_enter(); | ||
781 | |||
782 | /* Priority-boost ourself *before* we suspend so that | ||
783 | * our priority is boosted when we resume. Use the priority | ||
784 | * ceiling for the local partition. */ | ||
785 | boost_priority(t, sem->prio_ceiling[get_partition(t)]); | ||
786 | |||
787 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
788 | |||
789 | preempt_enable_no_resched(); | ||
790 | |||
791 | if (sem->owner) { | ||
792 | /* resource is not free => must suspend and wait */ | ||
793 | |||
794 | /* ordered by regular priority */ | ||
795 | init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t))); | ||
796 | |||
797 | /* FIXME: interruptible would be nice some day */ | ||
798 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
799 | |||
800 | __add_wait_queue_prio_exclusive(&sem->wait, &wait); | ||
801 | |||
802 | TS_LOCK_SUSPEND; | ||
803 | |||
804 | /* release lock before sleeping */ | ||
805 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
806 | |||
807 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
808 | * when we wake up; we are guaranteed to have the lock since | ||
809 | * there is only one wake up per release. | ||
810 | */ | ||
811 | |||
812 | schedule(); | ||
813 | |||
814 | TS_LOCK_RESUME; | ||
815 | |||
816 | /* Since we hold the lock, no other task will change | ||
817 | * ->owner. We can thus check it without acquiring the spin | ||
818 | * lock. */ | ||
819 | BUG_ON(sem->owner != t); | ||
820 | } else { | ||
821 | /* it's ours now */ | ||
822 | sem->owner = t; | ||
823 | |||
824 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
825 | } | ||
826 | |||
827 | return 0; | ||
828 | } | ||
829 | |||
830 | int pfp_mpcp_unlock(struct litmus_lock* l) | ||
831 | { | ||
832 | struct task_struct *t = current, *next; | ||
833 | struct mpcp_semaphore *sem = mpcp_from_lock(l); | ||
834 | unsigned long flags; | ||
835 | int err = 0; | ||
836 | |||
837 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
838 | |||
839 | if (sem->owner != t) { | ||
840 | err = -EINVAL; | ||
841 | goto out; | ||
842 | } | ||
843 | |||
844 | /* we lose the benefit of priority boosting */ | ||
845 | |||
846 | unboost_priority(t); | ||
847 | |||
848 | /* check if there are jobs waiting for this resource */ | ||
849 | next = __waitqueue_remove_first(&sem->wait); | ||
850 | if (next) { | ||
851 | /* next becomes the resouce holder */ | ||
852 | sem->owner = next; | ||
853 | |||
854 | /* Wake up next. The waiting job is already priority-boosted. */ | ||
855 | wake_up_process(next); | ||
856 | } else | ||
857 | /* resource becomes available */ | ||
858 | sem->owner = NULL; | ||
859 | |||
860 | out: | ||
861 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
862 | |||
863 | if (sem->vspin && err == 0) { | ||
864 | preempt_disable(); | ||
865 | mpcp_vspin_exit(); | ||
866 | preempt_enable(); | ||
867 | } | ||
868 | |||
869 | return err; | ||
870 | } | ||
871 | |||
872 | int pfp_mpcp_open(struct litmus_lock* l, void* config) | ||
873 | { | ||
874 | struct task_struct *t = current; | ||
875 | struct mpcp_semaphore *sem = mpcp_from_lock(l); | ||
876 | int cpu, local_cpu; | ||
877 | unsigned long flags; | ||
878 | |||
879 | if (!is_realtime(t)) | ||
880 | /* we need to know the real-time priority */ | ||
881 | return -EPERM; | ||
882 | |||
883 | local_cpu = get_partition(t); | ||
884 | |||
885 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
886 | |||
887 | for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
888 | if (cpu != local_cpu) | ||
889 | { | ||
890 | sem->prio_ceiling[cpu] = min(sem->prio_ceiling[cpu], | ||
891 | get_priority(t)); | ||
892 | TRACE_CUR("priority ceiling for sem %p is now %d on cpu %d\n", | ||
893 | sem, sem->prio_ceiling[cpu], cpu); | ||
894 | } | ||
895 | |||
896 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
897 | |||
898 | return 0; | ||
899 | } | ||
900 | |||
901 | int pfp_mpcp_close(struct litmus_lock* l) | ||
902 | { | ||
903 | struct task_struct *t = current; | ||
904 | struct mpcp_semaphore *sem = mpcp_from_lock(l); | ||
905 | unsigned long flags; | ||
906 | |||
907 | int owner; | ||
908 | |||
909 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
910 | |||
911 | owner = sem->owner == t; | ||
912 | |||
913 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
914 | |||
915 | if (owner) | ||
916 | pfp_mpcp_unlock(l); | ||
917 | |||
918 | return 0; | ||
919 | } | ||
920 | |||
921 | void pfp_mpcp_free(struct litmus_lock* lock) | ||
922 | { | ||
923 | kfree(mpcp_from_lock(lock)); | ||
924 | } | ||
925 | |||
926 | static struct litmus_lock_ops pfp_mpcp_lock_ops = { | ||
927 | .close = pfp_mpcp_close, | ||
928 | .lock = pfp_mpcp_lock, | ||
929 | .open = pfp_mpcp_open, | ||
930 | .unlock = pfp_mpcp_unlock, | ||
931 | .deallocate = pfp_mpcp_free, | ||
932 | }; | ||
933 | |||
934 | static struct litmus_lock* pfp_new_mpcp(int vspin) | ||
935 | { | ||
936 | struct mpcp_semaphore* sem; | ||
937 | int cpu; | ||
938 | |||
939 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
940 | if (!sem) | ||
941 | return NULL; | ||
942 | |||
943 | sem->owner = NULL; | ||
944 | init_waitqueue_head(&sem->wait); | ||
945 | sem->litmus_lock.ops = &pfp_mpcp_lock_ops; | ||
946 | |||
947 | for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
948 | sem->prio_ceiling[cpu] = OMEGA_CEILING; | ||
949 | |||
950 | /* mark as virtual spinning */ | ||
951 | sem->vspin = vspin; | ||
952 | |||
953 | return &sem->litmus_lock; | ||
954 | } | ||
955 | |||
956 | |||
957 | /* ******************** PCP support ********************** */ | ||
958 | |||
959 | |||
960 | struct pcp_semaphore { | ||
961 | struct litmus_lock litmus_lock; | ||
962 | |||
963 | struct list_head ceiling; | ||
964 | |||
965 | /* current resource holder */ | ||
966 | struct task_struct *owner; | ||
967 | |||
968 | /* priority ceiling --- can be negative due to DPCP support */ | ||
969 | int prio_ceiling; | ||
970 | |||
971 | /* on which processor is this PCP semaphore allocated? */ | ||
972 | int on_cpu; | ||
973 | }; | ||
974 | |||
975 | static inline struct pcp_semaphore* pcp_from_lock(struct litmus_lock* lock) | ||
976 | { | ||
977 | return container_of(lock, struct pcp_semaphore, litmus_lock); | ||
978 | } | ||
979 | |||
980 | |||
981 | struct pcp_state { | ||
982 | struct list_head system_ceiling; | ||
983 | |||
984 | /* highest-priority waiting task */ | ||
985 | struct task_struct* hp_waiter; | ||
986 | |||
987 | /* list of jobs waiting to get past the system ceiling */ | ||
988 | wait_queue_head_t ceiling_blocked; | ||
989 | }; | ||
990 | |||
991 | static void pcp_init_state(struct pcp_state* s) | ||
992 | { | ||
993 | INIT_LIST_HEAD(&s->system_ceiling); | ||
994 | s->hp_waiter = NULL; | ||
995 | init_waitqueue_head(&s->ceiling_blocked); | ||
996 | } | ||
997 | |||
998 | static DEFINE_PER_CPU(struct pcp_state, pcp_state); | ||
999 | |||
1000 | /* assumes preemptions are off */ | ||
1001 | static struct pcp_semaphore* pcp_get_ceiling(void) | ||
1002 | { | ||
1003 | struct list_head* top = __get_cpu_var(pcp_state).system_ceiling.next; | ||
1004 | |||
1005 | if (top) | ||
1006 | return list_entry(top, struct pcp_semaphore, ceiling); | ||
1007 | else | ||
1008 | return NULL; | ||
1009 | } | ||
1010 | |||
1011 | /* assumes preempt off */ | ||
1012 | static void pcp_add_ceiling(struct pcp_semaphore* sem) | ||
1013 | { | ||
1014 | struct list_head *pos; | ||
1015 | struct list_head *in_use = &__get_cpu_var(pcp_state).system_ceiling; | ||
1016 | struct pcp_semaphore* held; | ||
1017 | |||
1018 | BUG_ON(sem->on_cpu != smp_processor_id()); | ||
1019 | BUG_ON(in_list(&sem->ceiling)); | ||
1020 | |||
1021 | list_for_each(pos, in_use) { | ||
1022 | held = list_entry(pos, struct pcp_semaphore, ceiling); | ||
1023 | if (held->prio_ceiling >= sem->prio_ceiling) { | ||
1024 | __list_add(&sem->ceiling, pos->prev, pos); | ||
1025 | return; | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | /* we hit the end of the list */ | ||
1030 | |||
1031 | list_add_tail(&sem->ceiling, in_use); | ||
1032 | } | ||
1033 | |||
1034 | /* assumes preempt off */ | ||
1035 | static int pcp_exceeds_ceiling(struct pcp_semaphore* ceiling, | ||
1036 | struct task_struct* task, | ||
1037 | int effective_prio) | ||
1038 | { | ||
1039 | return ceiling == NULL || | ||
1040 | ceiling->prio_ceiling > effective_prio || | ||
1041 | ceiling->owner == task; | ||
1042 | } | ||
1043 | |||
1044 | /* assumes preempt off */ | ||
1045 | static void pcp_priority_inheritance(void) | ||
1046 | { | ||
1047 | unsigned long flags; | ||
1048 | pfp_domain_t* pfp = local_pfp; | ||
1049 | |||
1050 | struct pcp_semaphore* ceiling = pcp_get_ceiling(); | ||
1051 | struct task_struct *blocker, *blocked; | ||
1052 | |||
1053 | blocker = ceiling ? ceiling->owner : NULL; | ||
1054 | blocked = __get_cpu_var(pcp_state).hp_waiter; | ||
1055 | |||
1056 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
1057 | |||
1058 | /* Current is no longer inheriting anything by default. This should be | ||
1059 | * the currently scheduled job, and hence not currently queued. */ | ||
1060 | BUG_ON(current != pfp->scheduled); | ||
1061 | |||
1062 | fp_set_prio_inh(pfp, current, NULL); | ||
1063 | fp_set_prio_inh(pfp, blocked, NULL); | ||
1064 | fp_set_prio_inh(pfp, blocker, NULL); | ||
1065 | |||
1066 | |||
1067 | /* Let blocking job inherit priority of blocked job, if required. */ | ||
1068 | if (blocker && blocked && | ||
1069 | fp_higher_prio(blocked, blocker)) { | ||
1070 | TRACE_TASK(blocker, "PCP inherits from %s/%d (prio %u -> %u) \n", | ||
1071 | blocked->comm, blocked->pid, | ||
1072 | get_priority(blocker), get_priority(blocked)); | ||
1073 | fp_set_prio_inh(pfp, blocker, blocked); | ||
1074 | } | ||
1075 | |||
1076 | /* check if anything changed */ | ||
1077 | if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) | ||
1078 | preempt(pfp); | ||
1079 | |||
1080 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
1081 | } | ||
1082 | |||
1083 | /* called with preemptions off */ | ||
1084 | static void pcp_raise_ceiling(struct pcp_semaphore* sem, | ||
1085 | int effective_prio) | ||
1086 | { | ||
1087 | struct task_struct* t = current; | ||
1088 | struct pcp_semaphore* ceiling; | ||
1089 | prio_wait_queue_t wait; | ||
1090 | unsigned int waiting_higher_prio; | ||
1091 | |||
1092 | do { | ||
1093 | ceiling = pcp_get_ceiling(); | ||
1094 | if (pcp_exceeds_ceiling(ceiling, t, effective_prio)) | ||
1095 | break; | ||
1096 | |||
1097 | TRACE_CUR("PCP ceiling-blocked, wanted sem %p, but %s/%d has the ceiling \n", | ||
1098 | sem, ceiling->owner->comm, ceiling->owner->pid); | ||
1099 | |||
1100 | /* we need to wait until the ceiling is lowered */ | ||
1101 | |||
1102 | /* enqueue in priority order */ | ||
1103 | init_prio_waitqueue_entry(&wait, t, prio_point(effective_prio)); | ||
1104 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1105 | waiting_higher_prio = add_wait_queue_prio_exclusive( | ||
1106 | &__get_cpu_var(pcp_state).ceiling_blocked, &wait); | ||
1107 | |||
1108 | if (waiting_higher_prio == 0) { | ||
1109 | TRACE_CUR("PCP new highest-prio waiter => prio inheritance\n"); | ||
1110 | |||
1111 | /* we are the new highest-priority waiting job | ||
1112 | * => update inheritance */ | ||
1113 | __get_cpu_var(pcp_state).hp_waiter = t; | ||
1114 | pcp_priority_inheritance(); | ||
1115 | } | ||
1116 | |||
1117 | TS_LOCK_SUSPEND; | ||
1118 | |||
1119 | preempt_enable_no_resched(); | ||
1120 | schedule(); | ||
1121 | preempt_disable(); | ||
1122 | |||
1123 | /* pcp_resume_unblocked() removed us from wait queue */ | ||
1124 | |||
1125 | TS_LOCK_RESUME; | ||
1126 | } while(1); | ||
1127 | |||
1128 | TRACE_CUR("PCP got the ceiling and sem %p\n", sem); | ||
1129 | |||
1130 | /* We are good to go. The semaphore should be available. */ | ||
1131 | BUG_ON(sem->owner != NULL); | ||
1132 | |||
1133 | sem->owner = t; | ||
1134 | |||
1135 | pcp_add_ceiling(sem); | ||
1136 | } | ||
1137 | |||
1138 | static void pcp_resume_unblocked(void) | ||
1139 | { | ||
1140 | wait_queue_head_t *blocked = &__get_cpu_var(pcp_state).ceiling_blocked; | ||
1141 | unsigned long flags; | ||
1142 | prio_wait_queue_t* q; | ||
1143 | struct task_struct* t = NULL; | ||
1144 | |||
1145 | struct pcp_semaphore* ceiling = pcp_get_ceiling(); | ||
1146 | |||
1147 | spin_lock_irqsave(&blocked->lock, flags); | ||
1148 | |||
1149 | while (waitqueue_active(blocked)) { | ||
1150 | /* check first == highest-priority waiting job */ | ||
1151 | q = list_entry(blocked->task_list.next, | ||
1152 | prio_wait_queue_t, wq.task_list); | ||
1153 | t = (struct task_struct*) q->wq.private; | ||
1154 | |||
1155 | /* can it proceed now? => let it go */ | ||
1156 | if (pcp_exceeds_ceiling(ceiling, t, | ||
1157 | prio_from_point(q->priority))) { | ||
1158 | __remove_wait_queue(blocked, &q->wq); | ||
1159 | wake_up_process(t); | ||
1160 | } else { | ||
1161 | /* We are done. Update highest-priority waiter. */ | ||
1162 | __get_cpu_var(pcp_state).hp_waiter = t; | ||
1163 | goto out; | ||
1164 | } | ||
1165 | } | ||
1166 | /* If we get here, then there are no more waiting | ||
1167 | * jobs. */ | ||
1168 | __get_cpu_var(pcp_state).hp_waiter = NULL; | ||
1169 | out: | ||
1170 | spin_unlock_irqrestore(&blocked->lock, flags); | ||
1171 | } | ||
1172 | |||
1173 | /* assumes preempt off */ | ||
1174 | static void pcp_lower_ceiling(struct pcp_semaphore* sem) | ||
1175 | { | ||
1176 | BUG_ON(!in_list(&sem->ceiling)); | ||
1177 | BUG_ON(sem->owner != current); | ||
1178 | BUG_ON(sem->on_cpu != smp_processor_id()); | ||
1179 | |||
1180 | /* remove from ceiling list */ | ||
1181 | list_del(&sem->ceiling); | ||
1182 | |||
1183 | /* release */ | ||
1184 | sem->owner = NULL; | ||
1185 | |||
1186 | TRACE_CUR("PCP released sem %p\n", sem); | ||
1187 | |||
1188 | /* Wake up all ceiling-blocked jobs that now pass the ceiling. */ | ||
1189 | pcp_resume_unblocked(); | ||
1190 | |||
1191 | pcp_priority_inheritance(); | ||
1192 | } | ||
1193 | |||
1194 | static void pcp_update_prio_ceiling(struct pcp_semaphore* sem, | ||
1195 | int effective_prio) | ||
1196 | { | ||
1197 | /* This needs to be synchronized on something. | ||
1198 | * Might as well use waitqueue lock for the processor. | ||
1199 | * We assume this happens only before the task set starts execution, | ||
1200 | * (i.e., during initialization), but it may happen on multiple processors | ||
1201 | * at the same time. | ||
1202 | */ | ||
1203 | unsigned long flags; | ||
1204 | |||
1205 | struct pcp_state* s = &per_cpu(pcp_state, sem->on_cpu); | ||
1206 | |||
1207 | spin_lock_irqsave(&s->ceiling_blocked.lock, flags); | ||
1208 | |||
1209 | sem->prio_ceiling = min(sem->prio_ceiling, effective_prio); | ||
1210 | |||
1211 | spin_unlock_irqrestore(&s->ceiling_blocked.lock, flags); | ||
1212 | } | ||
1213 | |||
1214 | static void pcp_init_semaphore(struct pcp_semaphore* sem, int cpu) | ||
1215 | { | ||
1216 | sem->owner = NULL; | ||
1217 | INIT_LIST_HEAD(&sem->ceiling); | ||
1218 | sem->prio_ceiling = INT_MAX; | ||
1219 | sem->on_cpu = cpu; | ||
1220 | } | ||
1221 | |||
1222 | int pfp_pcp_lock(struct litmus_lock* l) | ||
1223 | { | ||
1224 | struct task_struct* t = current; | ||
1225 | struct pcp_semaphore *sem = pcp_from_lock(l); | ||
1226 | |||
1227 | int eprio = effective_agent_priority(get_priority(t)); | ||
1228 | int from = get_partition(t); | ||
1229 | int to = sem->on_cpu; | ||
1230 | |||
1231 | if (!is_realtime(t) || from != to) | ||
1232 | return -EPERM; | ||
1233 | |||
1234 | preempt_disable(); | ||
1235 | |||
1236 | pcp_raise_ceiling(sem, eprio); | ||
1237 | |||
1238 | preempt_enable(); | ||
1239 | |||
1240 | return 0; | ||
1241 | } | ||
1242 | |||
1243 | int pfp_pcp_unlock(struct litmus_lock* l) | ||
1244 | { | ||
1245 | struct task_struct *t = current; | ||
1246 | struct pcp_semaphore *sem = pcp_from_lock(l); | ||
1247 | |||
1248 | int err = 0; | ||
1249 | |||
1250 | preempt_disable(); | ||
1251 | |||
1252 | if (sem->on_cpu != smp_processor_id() || sem->owner != t) { | ||
1253 | err = -EINVAL; | ||
1254 | goto out; | ||
1255 | } | ||
1256 | |||
1257 | /* give it back */ | ||
1258 | pcp_lower_ceiling(sem); | ||
1259 | |||
1260 | out: | ||
1261 | preempt_enable(); | ||
1262 | |||
1263 | return err; | ||
1264 | } | ||
1265 | |||
1266 | int pfp_pcp_open(struct litmus_lock* l, void* __user config) | ||
1267 | { | ||
1268 | struct task_struct *t = current; | ||
1269 | struct pcp_semaphore *sem = pcp_from_lock(l); | ||
1270 | |||
1271 | int cpu, eprio; | ||
1272 | |||
1273 | if (!is_realtime(t)) | ||
1274 | /* we need to know the real-time priority */ | ||
1275 | return -EPERM; | ||
1276 | |||
1277 | if (get_user(cpu, (int*) config)) | ||
1278 | return -EFAULT; | ||
1279 | |||
1280 | /* make sure the resource location matches */ | ||
1281 | if (cpu != sem->on_cpu) | ||
1282 | return -EINVAL; | ||
1283 | |||
1284 | eprio = effective_agent_priority(get_priority(t)); | ||
1285 | |||
1286 | pcp_update_prio_ceiling(sem, eprio); | ||
1287 | |||
1288 | return 0; | ||
1289 | } | ||
1290 | |||
1291 | int pfp_pcp_close(struct litmus_lock* l) | ||
1292 | { | ||
1293 | struct task_struct *t = current; | ||
1294 | struct pcp_semaphore *sem = pcp_from_lock(l); | ||
1295 | |||
1296 | int owner = 0; | ||
1297 | |||
1298 | preempt_disable(); | ||
1299 | |||
1300 | if (sem->on_cpu == smp_processor_id()) | ||
1301 | owner = sem->owner == t; | ||
1302 | |||
1303 | preempt_enable(); | ||
1304 | |||
1305 | if (owner) | ||
1306 | pfp_pcp_unlock(l); | ||
1307 | |||
1308 | return 0; | ||
1309 | } | ||
1310 | |||
1311 | void pfp_pcp_free(struct litmus_lock* lock) | ||
1312 | { | ||
1313 | kfree(pcp_from_lock(lock)); | ||
1314 | } | ||
1315 | |||
1316 | |||
1317 | static struct litmus_lock_ops pfp_pcp_lock_ops = { | ||
1318 | .close = pfp_pcp_close, | ||
1319 | .lock = pfp_pcp_lock, | ||
1320 | .open = pfp_pcp_open, | ||
1321 | .unlock = pfp_pcp_unlock, | ||
1322 | .deallocate = pfp_pcp_free, | ||
1323 | }; | ||
1324 | |||
1325 | |||
1326 | static struct litmus_lock* pfp_new_pcp(int on_cpu) | ||
1327 | { | ||
1328 | struct pcp_semaphore* sem; | ||
1329 | |||
1330 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1331 | if (!sem) | ||
1332 | return NULL; | ||
1333 | |||
1334 | sem->litmus_lock.ops = &pfp_pcp_lock_ops; | ||
1335 | pcp_init_semaphore(sem, on_cpu); | ||
1336 | |||
1337 | return &sem->litmus_lock; | ||
1338 | } | ||
1339 | |||
1340 | /* ******************** DPCP support ********************** */ | ||
1341 | |||
1342 | struct dpcp_semaphore { | ||
1343 | struct litmus_lock litmus_lock; | ||
1344 | struct pcp_semaphore pcp; | ||
1345 | int owner_cpu; | ||
1346 | }; | ||
1347 | |||
1348 | static inline struct dpcp_semaphore* dpcp_from_lock(struct litmus_lock* lock) | ||
1349 | { | ||
1350 | return container_of(lock, struct dpcp_semaphore, litmus_lock); | ||
1351 | } | ||
1352 | |||
1353 | /* called with preemptions disabled */ | ||
1354 | static void pfp_migrate_to(int target_cpu) | ||
1355 | { | ||
1356 | struct task_struct* t = current; | ||
1357 | pfp_domain_t *from; | ||
1358 | |||
1359 | if (get_partition(t) == target_cpu) | ||
1360 | return; | ||
1361 | |||
1362 | /* make sure target_cpu makes sense */ | ||
1363 | BUG_ON(!cpu_online(target_cpu)); | ||
1364 | |||
1365 | local_irq_disable(); | ||
1366 | |||
1367 | /* scheduled task should not be in any ready or release queue */ | ||
1368 | BUG_ON(is_queued(t)); | ||
1369 | |||
1370 | /* lock both pfp domains in order of address */ | ||
1371 | from = task_pfp(t); | ||
1372 | |||
1373 | raw_spin_lock(&from->slock); | ||
1374 | |||
1375 | /* switch partitions */ | ||
1376 | tsk_rt(t)->task_params.cpu = target_cpu; | ||
1377 | |||
1378 | raw_spin_unlock(&from->slock); | ||
1379 | |||
1380 | /* Don't trace scheduler costs as part of | ||
1381 | * locking overhead. Scheduling costs are accounted for | ||
1382 | * explicitly. */ | ||
1383 | TS_LOCK_SUSPEND; | ||
1384 | |||
1385 | local_irq_enable(); | ||
1386 | preempt_enable_no_resched(); | ||
1387 | |||
1388 | /* deschedule to be migrated */ | ||
1389 | schedule(); | ||
1390 | |||
1391 | /* we are now on the target processor */ | ||
1392 | preempt_disable(); | ||
1393 | |||
1394 | /* start recording costs again */ | ||
1395 | TS_LOCK_RESUME; | ||
1396 | |||
1397 | BUG_ON(smp_processor_id() != target_cpu); | ||
1398 | } | ||
1399 | |||
1400 | int pfp_dpcp_lock(struct litmus_lock* l) | ||
1401 | { | ||
1402 | struct task_struct* t = current; | ||
1403 | struct dpcp_semaphore *sem = dpcp_from_lock(l); | ||
1404 | int eprio = effective_agent_priority(get_priority(t)); | ||
1405 | int from = get_partition(t); | ||
1406 | int to = sem->pcp.on_cpu; | ||
1407 | |||
1408 | if (!is_realtime(t)) | ||
1409 | return -EPERM; | ||
1410 | |||
1411 | preempt_disable(); | ||
1412 | |||
1413 | /* Priority-boost ourself *before* we suspend so that | ||
1414 | * our priority is boosted when we resume. */ | ||
1415 | |||
1416 | boost_priority(t, get_priority(t)); | ||
1417 | |||
1418 | pfp_migrate_to(to); | ||
1419 | |||
1420 | pcp_raise_ceiling(&sem->pcp, eprio); | ||
1421 | |||
1422 | /* yep, we got it => execute request */ | ||
1423 | sem->owner_cpu = from; | ||
1424 | |||
1425 | preempt_enable(); | ||
1426 | |||
1427 | return 0; | ||
1428 | } | ||
1429 | |||
1430 | int pfp_dpcp_unlock(struct litmus_lock* l) | ||
1431 | { | ||
1432 | struct task_struct *t = current; | ||
1433 | struct dpcp_semaphore *sem = dpcp_from_lock(l); | ||
1434 | int err = 0; | ||
1435 | int home; | ||
1436 | |||
1437 | preempt_disable(); | ||
1438 | |||
1439 | if (sem->pcp.on_cpu != smp_processor_id() || sem->pcp.owner != t) { | ||
1440 | err = -EINVAL; | ||
1441 | goto out; | ||
1442 | } | ||
1443 | |||
1444 | home = sem->owner_cpu; | ||
1445 | |||
1446 | /* give it back */ | ||
1447 | pcp_lower_ceiling(&sem->pcp); | ||
1448 | |||
1449 | /* we lose the benefit of priority boosting */ | ||
1450 | unboost_priority(t); | ||
1451 | |||
1452 | pfp_migrate_to(home); | ||
1453 | |||
1454 | out: | ||
1455 | preempt_enable(); | ||
1456 | |||
1457 | return err; | ||
1458 | } | ||
1459 | |||
1460 | int pfp_dpcp_open(struct litmus_lock* l, void* __user config) | ||
1461 | { | ||
1462 | struct task_struct *t = current; | ||
1463 | struct dpcp_semaphore *sem = dpcp_from_lock(l); | ||
1464 | int cpu, eprio; | ||
1465 | |||
1466 | if (!is_realtime(t)) | ||
1467 | /* we need to know the real-time priority */ | ||
1468 | return -EPERM; | ||
1469 | |||
1470 | if (get_user(cpu, (int*) config)) | ||
1471 | return -EFAULT; | ||
1472 | |||
1473 | /* make sure the resource location matches */ | ||
1474 | if (cpu != sem->pcp.on_cpu) | ||
1475 | return -EINVAL; | ||
1476 | |||
1477 | eprio = effective_agent_priority(get_priority(t)); | ||
1478 | |||
1479 | pcp_update_prio_ceiling(&sem->pcp, eprio); | ||
1480 | |||
1481 | return 0; | ||
1482 | } | ||
1483 | |||
1484 | int pfp_dpcp_close(struct litmus_lock* l) | ||
1485 | { | ||
1486 | struct task_struct *t = current; | ||
1487 | struct dpcp_semaphore *sem = dpcp_from_lock(l); | ||
1488 | int owner = 0; | ||
1489 | |||
1490 | preempt_disable(); | ||
1491 | |||
1492 | if (sem->pcp.on_cpu == smp_processor_id()) | ||
1493 | owner = sem->pcp.owner == t; | ||
1494 | |||
1495 | preempt_enable(); | ||
1496 | |||
1497 | if (owner) | ||
1498 | pfp_dpcp_unlock(l); | ||
1499 | |||
1500 | return 0; | ||
1501 | } | ||
1502 | |||
1503 | void pfp_dpcp_free(struct litmus_lock* lock) | ||
1504 | { | ||
1505 | kfree(dpcp_from_lock(lock)); | ||
1506 | } | ||
1507 | |||
1508 | static struct litmus_lock_ops pfp_dpcp_lock_ops = { | ||
1509 | .close = pfp_dpcp_close, | ||
1510 | .lock = pfp_dpcp_lock, | ||
1511 | .open = pfp_dpcp_open, | ||
1512 | .unlock = pfp_dpcp_unlock, | ||
1513 | .deallocate = pfp_dpcp_free, | ||
1514 | }; | ||
1515 | |||
1516 | static struct litmus_lock* pfp_new_dpcp(int on_cpu) | ||
1517 | { | ||
1518 | struct dpcp_semaphore* sem; | ||
1519 | |||
1520 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1521 | if (!sem) | ||
1522 | return NULL; | ||
1523 | |||
1524 | sem->litmus_lock.ops = &pfp_dpcp_lock_ops; | ||
1525 | sem->owner_cpu = NO_CPU; | ||
1526 | pcp_init_semaphore(&sem->pcp, on_cpu); | ||
1527 | |||
1528 | return &sem->litmus_lock; | ||
1529 | } | ||
1530 | |||
1531 | |||
1532 | /* **** lock constructor **** */ | ||
1533 | |||
1534 | |||
1535 | static long pfp_allocate_lock(struct litmus_lock **lock, int type, | ||
1536 | void* __user config) | ||
1537 | { | ||
1538 | int err = -ENXIO, cpu; | ||
1539 | struct srp_semaphore* srp; | ||
1540 | |||
1541 | /* P-FP currently supports the SRP for local resources and the FMLP | ||
1542 | * for global resources. */ | ||
1543 | switch (type) { | ||
1544 | case FMLP_SEM: | ||
1545 | /* FIFO Mutex Locking Protocol */ | ||
1546 | *lock = pfp_new_fmlp(); | ||
1547 | if (*lock) | ||
1548 | err = 0; | ||
1549 | else | ||
1550 | err = -ENOMEM; | ||
1551 | break; | ||
1552 | |||
1553 | case MPCP_SEM: | ||
1554 | /* Multiprocesor Priority Ceiling Protocol */ | ||
1555 | *lock = pfp_new_mpcp(0); | ||
1556 | if (*lock) | ||
1557 | err = 0; | ||
1558 | else | ||
1559 | err = -ENOMEM; | ||
1560 | break; | ||
1561 | |||
1562 | case MPCP_VS_SEM: | ||
1563 | /* Multiprocesor Priority Ceiling Protocol with virtual spinning */ | ||
1564 | *lock = pfp_new_mpcp(1); | ||
1565 | if (*lock) | ||
1566 | err = 0; | ||
1567 | else | ||
1568 | err = -ENOMEM; | ||
1569 | break; | ||
1570 | |||
1571 | case DPCP_SEM: | ||
1572 | /* Distributed Priority Ceiling Protocol */ | ||
1573 | if (get_user(cpu, (int*) config)) | ||
1574 | return -EFAULT; | ||
1575 | |||
1576 | if (!cpu_online(cpu)) | ||
1577 | return -EINVAL; | ||
1578 | |||
1579 | *lock = pfp_new_dpcp(cpu); | ||
1580 | if (*lock) | ||
1581 | err = 0; | ||
1582 | else | ||
1583 | err = -ENOMEM; | ||
1584 | break; | ||
1585 | |||
1586 | case SRP_SEM: | ||
1587 | /* Baker's Stack Resource Policy */ | ||
1588 | srp = allocate_srp_semaphore(); | ||
1589 | if (srp) { | ||
1590 | *lock = &srp->litmus_lock; | ||
1591 | err = 0; | ||
1592 | } else | ||
1593 | err = -ENOMEM; | ||
1594 | break; | ||
1595 | |||
1596 | case PCP_SEM: | ||
1597 | /* Priority Ceiling Protocol */ | ||
1598 | if (get_user(cpu, (int*) config)) | ||
1599 | return -EFAULT; | ||
1600 | |||
1601 | if (!cpu_online(cpu)) | ||
1602 | return -EINVAL; | ||
1603 | |||
1604 | *lock = pfp_new_pcp(cpu); | ||
1605 | if (*lock) | ||
1606 | err = 0; | ||
1607 | else | ||
1608 | err = -ENOMEM; | ||
1609 | break; | ||
1610 | }; | ||
1611 | |||
1612 | return err; | ||
1613 | } | ||
1614 | |||
1615 | #endif | ||
1616 | |||
1617 | static long pfp_admit_task(struct task_struct* tsk) | ||
1618 | { | ||
1619 | if (task_cpu(tsk) == tsk->rt_param.task_params.cpu && | ||
1620 | #ifdef CONFIG_RELEASE_MASTER | ||
1621 | /* don't allow tasks on release master CPU */ | ||
1622 | task_cpu(tsk) != remote_dom(task_cpu(tsk))->release_master && | ||
1623 | #endif | ||
1624 | litmus_is_valid_fixed_prio(get_priority(tsk))) | ||
1625 | return 0; | ||
1626 | else | ||
1627 | return -EINVAL; | ||
1628 | } | ||
1629 | |||
1630 | static long pfp_activate_plugin(void) | ||
1631 | { | ||
1632 | #if defined(CONFIG_RELEASE_MASTER) || defined(CONFIG_LITMUS_LOCKING) | ||
1633 | int cpu; | ||
1634 | #endif | ||
1635 | |||
1636 | #ifdef CONFIG_RELEASE_MASTER | ||
1637 | for_each_online_cpu(cpu) { | ||
1638 | remote_dom(cpu)->release_master = atomic_read(&release_master_cpu); | ||
1639 | } | ||
1640 | #endif | ||
1641 | |||
1642 | #ifdef CONFIG_LITMUS_LOCKING | ||
1643 | get_srp_prio = pfp_get_srp_prio; | ||
1644 | |||
1645 | for_each_online_cpu(cpu) { | ||
1646 | init_waitqueue_head(&per_cpu(mpcpvs_vspin_wait, cpu)); | ||
1647 | per_cpu(mpcpvs_vspin, cpu) = NULL; | ||
1648 | |||
1649 | pcp_init_state(&per_cpu(pcp_state, cpu)); | ||
1650 | pfp_doms[cpu] = remote_pfp(cpu); | ||
1651 | } | ||
1652 | |||
1653 | #endif | ||
1654 | |||
1655 | return 0; | ||
1656 | } | ||
1657 | |||
1658 | |||
1659 | /* Plugin object */ | ||
1660 | static struct sched_plugin pfp_plugin __cacheline_aligned_in_smp = { | ||
1661 | .plugin_name = "P-FP", | ||
1662 | .tick = pfp_tick, | ||
1663 | .task_new = pfp_task_new, | ||
1664 | .complete_job = complete_job, | ||
1665 | .task_exit = pfp_task_exit, | ||
1666 | .schedule = pfp_schedule, | ||
1667 | .task_wake_up = pfp_task_wake_up, | ||
1668 | .task_block = pfp_task_block, | ||
1669 | .admit_task = pfp_admit_task, | ||
1670 | .activate_plugin = pfp_activate_plugin, | ||
1671 | #ifdef CONFIG_LITMUS_LOCKING | ||
1672 | .allocate_lock = pfp_allocate_lock, | ||
1673 | .finish_switch = pfp_finish_switch, | ||
1674 | #endif | ||
1675 | }; | ||
1676 | |||
1677 | |||
1678 | static int __init init_pfp(void) | ||
1679 | { | ||
1680 | int i; | ||
1681 | |||
1682 | /* We do not really want to support cpu hotplug, do we? ;) | ||
1683 | * However, if we are so crazy to do so, | ||
1684 | * we cannot use num_online_cpu() | ||
1685 | */ | ||
1686 | for (i = 0; i < num_online_cpus(); i++) { | ||
1687 | pfp_domain_init(remote_pfp(i), i); | ||
1688 | } | ||
1689 | return register_sched_plugin(&pfp_plugin); | ||
1690 | } | ||
1691 | |||
1692 | module_init(init_pfp); | ||
1693 | |||