aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_pfp.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_pfp.c')
-rw-r--r--litmus/sched_pfp.c1542
1 files changed, 1542 insertions, 0 deletions
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
new file mode 100644
index 000000000000..74a77e7a4959
--- /dev/null
+++ b/litmus/sched_pfp.c
@@ -0,0 +1,1542 @@
1/*
2 * litmus/sched_pfp.c
3 *
4 * Implementation of partitioned fixed-priority scheduling.
5 * Based on PSN-EDF.
6 */
7
8#include <linux/percpu.h>
9#include <linux/sched.h>
10#include <linux/list.h>
11#include <linux/spinlock.h>
12#include <linux/module.h>
13
14#include <litmus/litmus.h>
15#include <litmus/wait.h>
16#include <litmus/jobs.h>
17#include <litmus/preempt.h>
18#include <litmus/fp_common.h>
19#include <litmus/sched_plugin.h>
20#include <litmus/sched_trace.h>
21#include <litmus/trace.h>
22
23#include <linux/uaccess.h>
24
25
26typedef struct {
27 rt_domain_t domain;
28 struct fp_prio_queue ready_queue;
29 int cpu;
30 struct task_struct* scheduled; /* only RT tasks */
31/*
32 * scheduling lock slock
33 * protects the domain and serializes scheduling decisions
34 */
35#define slock domain.ready_lock
36
37} pfp_domain_t;
38
39DEFINE_PER_CPU(pfp_domain_t, pfp_domains);
40
41pfp_domain_t* pfp_doms[NR_CPUS];
42
43#define local_pfp (&__get_cpu_var(pfp_domains))
44#define remote_dom(cpu) (&per_cpu(pfp_domains, cpu).domain)
45#define remote_pfp(cpu) (&per_cpu(pfp_domains, cpu))
46#define task_dom(task) remote_dom(get_partition(task))
47#define task_pfp(task) remote_pfp(get_partition(task))
48
49/* we assume the lock is being held */
50static void preempt(pfp_domain_t *pfp)
51{
52 preempt_if_preemptable(pfp->scheduled, pfp->cpu);
53}
54
55static unsigned int priority_index(struct task_struct* t)
56{
57#ifdef CONFIG_LOCKING
58 if (unlikely(t->rt_param.inh_task))
59 /* use effective priority */
60 t = t->rt_param.inh_task;
61
62 if (is_priority_boosted(t)) {
63 /* zero is reserved for priority-boosted tasks */
64 return 0;
65 } else
66#endif
67 return get_priority(t);
68}
69
70
71static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks)
72{
73 pfp_domain_t *pfp = container_of(rt, pfp_domain_t, domain);
74 unsigned long flags;
75 struct task_struct* t;
76 struct bheap_node* hn;
77
78 raw_spin_lock_irqsave(&pfp->slock, flags);
79
80 while (!bheap_empty(tasks)) {
81 hn = bheap_take(fp_ready_order, tasks);
82 t = bheap2task(hn);
83 TRACE_TASK(t, "released (part:%d prio:%d)\n",
84 get_partition(t), get_priority(t));
85 fp_prio_add(&pfp->ready_queue, t, priority_index(t));
86 }
87
88 /* do we need to preempt? */
89 if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) {
90 TRACE_CUR("preempted by new release\n");
91 preempt(pfp);
92 }
93
94 raw_spin_unlock_irqrestore(&pfp->slock, flags);
95}
96
97static void pfp_domain_init(pfp_domain_t* pfp,
98 int cpu)
99{
100 fp_domain_init(&pfp->domain, NULL, pfp_release_jobs);
101 pfp->cpu = cpu;
102 pfp->scheduled = NULL;
103 fp_prio_queue_init(&pfp->ready_queue);
104}
105
106static void requeue(struct task_struct* t, pfp_domain_t *pfp)
107{
108 if (t->state != TASK_RUNNING)
109 TRACE_TASK(t, "requeue: !TASK_RUNNING\n");
110
111 set_rt_flags(t, RT_F_RUNNING);
112 if (is_released(t, litmus_clock()))
113 fp_prio_add(&pfp->ready_queue, t, priority_index(t));
114 else
115 add_release(&pfp->domain, t); /* it has got to wait */
116}
117
118static void job_completion(struct task_struct* t, int forced)
119{
120 sched_trace_task_completion(t,forced);
121 TRACE_TASK(t, "job_completion().\n");
122
123 set_rt_flags(t, RT_F_SLEEP);
124 prepare_for_next_period(t);
125}
126
127static void pfp_tick(struct task_struct *t)
128{
129 pfp_domain_t *pfp = local_pfp;
130
131 /* Check for inconsistency. We don't need the lock for this since
132 * ->scheduled is only changed in schedule, which obviously is not
133 * executing in parallel on this CPU
134 */
135 BUG_ON(is_realtime(t) && t != pfp->scheduled);
136
137 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
138 if (!is_np(t)) {
139 litmus_reschedule_local();
140 TRACE("pfp_scheduler_tick: "
141 "%d is preemptable "
142 " => FORCE_RESCHED\n", t->pid);
143 } else if (is_user_np(t)) {
144 TRACE("pfp_scheduler_tick: "
145 "%d is non-preemptable, "
146 "preemption delayed.\n", t->pid);
147 request_exit_np(t);
148 }
149 }
150}
151
152static struct task_struct* pfp_schedule(struct task_struct * prev)
153{
154 pfp_domain_t* pfp = local_pfp;
155 struct task_struct* next;
156
157 int out_of_time, sleep, preempt, np, exists, blocks, resched, migrate;
158
159 raw_spin_lock(&pfp->slock);
160
161 /* sanity checking
162 * differently from gedf, when a task exits (dead)
163 * pfp->schedule may be null and prev _is_ realtime
164 */
165 BUG_ON(pfp->scheduled && pfp->scheduled != prev);
166 BUG_ON(pfp->scheduled && !is_realtime(prev));
167
168 /* (0) Determine state */
169 exists = pfp->scheduled != NULL;
170 blocks = exists && !is_running(pfp->scheduled);
171 out_of_time = exists &&
172 budget_enforced(pfp->scheduled) &&
173 budget_exhausted(pfp->scheduled);
174 np = exists && is_np(pfp->scheduled);
175 sleep = exists && get_rt_flags(pfp->scheduled) == RT_F_SLEEP;
176 migrate = exists && get_partition(pfp->scheduled) != pfp->cpu;
177 preempt = migrate || fp_preemption_needed(&pfp->ready_queue, prev);
178
179 /* If we need to preempt do so.
180 * The following checks set resched to 1 in case of special
181 * circumstances.
182 */
183 resched = preempt;
184
185 /* If a task blocks we have no choice but to reschedule.
186 */
187 if (blocks)
188 resched = 1;
189
190 /* Request a sys_exit_np() call if we would like to preempt but cannot.
191 * Multiple calls to request_exit_np() don't hurt.
192 */
193 if (np && (out_of_time || preempt || sleep))
194 request_exit_np(pfp->scheduled);
195
196 /* Any task that is preemptable and either exhausts its execution
197 * budget or wants to sleep completes. We may have to reschedule after
198 * this.
199 */
200 if (!np && (out_of_time || sleep) && !blocks && !migrate) {
201 job_completion(pfp->scheduled, !sleep);
202 resched = 1;
203 }
204
205 /* The final scheduling decision. Do we need to switch for some reason?
206 * Switch if we are in RT mode and have no task or if we need to
207 * resched.
208 */
209 next = NULL;
210 if ((!np || blocks) && (resched || !exists)) {
211 /* When preempting a task that does not block, then
212 * re-insert it into either the ready queue or the
213 * release queue (if it completed). requeue() picks
214 * the appropriate queue.
215 */
216 if (pfp->scheduled && !blocks && !migrate)
217 requeue(pfp->scheduled, pfp);
218 next = fp_prio_take(&pfp->ready_queue);
219 } else
220 /* Only override Linux scheduler if we have a real-time task
221 * scheduled that needs to continue.
222 */
223 if (exists)
224 next = prev;
225
226 if (next) {
227 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
228 set_rt_flags(next, RT_F_RUNNING);
229 } else {
230 TRACE("becoming idle at %llu\n", litmus_clock());
231 }
232
233 pfp->scheduled = next;
234 sched_state_task_picked();
235 raw_spin_unlock(&pfp->slock);
236
237 return next;
238}
239
240#ifdef CONFIG_LITMUS_LOCKING
241
242/* prev is no longer scheduled --- see if it needs to migrate */
243static void pfp_finish_switch(struct task_struct *prev)
244{
245 pfp_domain_t *to;
246
247 if (is_realtime(prev) &&
248 is_running(prev) &&
249 get_partition(prev) != smp_processor_id()) {
250 TRACE_TASK(prev, "needs to migrate from P%d to P%d\n",
251 smp_processor_id(), get_partition(prev));
252
253 to = task_pfp(prev);
254
255 raw_spin_lock(&to->slock);
256
257 TRACE_TASK(prev, "adding to queue on P%d\n", to->cpu);
258 requeue(prev, to);
259 if (fp_preemption_needed(&to->ready_queue, to->scheduled))
260 preempt(to);
261
262 raw_spin_unlock(&to->slock);
263
264 }
265}
266
267#endif
268
269/* Prepare a task for running in RT mode
270 */
271static void pfp_task_new(struct task_struct * t, int on_rq, int running)
272{
273 pfp_domain_t* pfp = task_pfp(t);
274 unsigned long flags;
275
276 TRACE_TASK(t, "P-FP: task new, cpu = %d\n",
277 t->rt_param.task_params.cpu);
278
279 /* setup job parameters */
280 release_at(t, litmus_clock());
281
282 /* The task should be running in the queue, otherwise signal
283 * code will try to wake it up with fatal consequences.
284 */
285 raw_spin_lock_irqsave(&pfp->slock, flags);
286 if (running) {
287 /* there shouldn't be anything else running at the time */
288 BUG_ON(pfp->scheduled);
289 pfp->scheduled = t;
290 } else {
291 requeue(t, pfp);
292 /* maybe we have to reschedule */
293 preempt(pfp);
294 }
295 raw_spin_unlock_irqrestore(&pfp->slock, flags);
296}
297
298static void pfp_task_wake_up(struct task_struct *task)
299{
300 unsigned long flags;
301 pfp_domain_t* pfp = task_pfp(task);
302 lt_t now;
303
304 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
305 raw_spin_lock_irqsave(&pfp->slock, flags);
306
307#ifdef CONFIG_LITMUS_LOCKING
308 /* Should only be queued when processing a fake-wake up due to a
309 * migration-related state change. */
310 if (unlikely(is_queued(task))) {
311 TRACE_TASK(task, "WARNING: waking task still queued. Is this right?\n");
312 goto out_unlock;
313 }
314#else
315 BUG_ON(is_queued(task));
316#endif
317 now = litmus_clock();
318 if (is_tardy(task, now)
319#ifdef CONFIG_LITMUS_LOCKING
320 /* We need to take suspensions because of semaphores into
321 * account! If a job resumes after being suspended due to acquiring
322 * a semaphore, it should never be treated as a new job release.
323 */
324 && !is_priority_boosted(task)
325#endif
326 ) {
327 /* new sporadic release */
328 release_at(task, now);
329 sched_trace_task_release(task);
330 }
331
332 /* Only add to ready queue if it is not the currently-scheduled
333 * task. This could be the case if a task was woken up concurrently
334 * on a remote CPU before the executing CPU got around to actually
335 * de-scheduling the task, i.e., wake_up() raced with schedule()
336 * and won. Also, don't requeue if it is still queued, which can
337 * happen under the DPCP due wake-ups racing with migrations.
338 */
339 if (pfp->scheduled != task)
340 requeue(task, pfp);
341
342out_unlock:
343 raw_spin_unlock_irqrestore(&pfp->slock, flags);
344 TRACE_TASK(task, "wake up done\n");
345}
346
347static void pfp_task_block(struct task_struct *t)
348{
349 /* only running tasks can block, thus t is in no queue */
350 TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state);
351
352 BUG_ON(!is_realtime(t));
353
354 /* If this task blocked normally, it shouldn't be queued. The exception is
355 * if this is a simulated block()/wakeup() pair from the pull-migration code path.
356 * This should only happen if the DPCP is being used.
357 */
358#ifdef CONFIG_LITMUS_LOCKING
359 if (unlikely(is_queued(t)))
360 TRACE_TASK(t, "WARNING: blocking task still queued. Is this right?\n");
361#else
362 BUG_ON(is_queued(t));
363#endif
364}
365
366static void pfp_task_exit(struct task_struct * t)
367{
368 unsigned long flags;
369 pfp_domain_t* pfp = task_pfp(t);
370 rt_domain_t* dom;
371
372 raw_spin_lock_irqsave(&pfp->slock, flags);
373 if (is_queued(t)) {
374 BUG(); /* This currently doesn't work. */
375 /* dequeue */
376 dom = task_dom(t);
377 remove(dom, t);
378 }
379 if (pfp->scheduled == t) {
380 pfp->scheduled = NULL;
381 preempt(pfp);
382 }
383 TRACE_TASK(t, "RIP, now reschedule\n");
384
385 raw_spin_unlock_irqrestore(&pfp->slock, flags);
386}
387
388#ifdef CONFIG_LITMUS_LOCKING
389
390#include <litmus/fdso.h>
391#include <litmus/srp.h>
392
393static void fp_dequeue(pfp_domain_t* pfp, struct task_struct* t)
394{
395 BUG_ON(pfp->scheduled == t && is_queued(t));
396 if (is_queued(t))
397 fp_prio_remove(&pfp->ready_queue, t, priority_index(t));
398}
399
400static void fp_set_prio_inh(pfp_domain_t* pfp, struct task_struct* t,
401 struct task_struct* prio_inh)
402{
403 int requeue;
404
405 if (!t || t->rt_param.inh_task == prio_inh) {
406 /* no update required */
407 if (t)
408 TRACE_TASK(t, "no prio-inh update required\n");
409 return;
410 }
411
412 requeue = is_queued(t);
413 TRACE_TASK(t, "prio-inh: is_queued:%d\n", requeue);
414
415 if (requeue)
416 /* first remove */
417 fp_dequeue(pfp, t);
418
419 t->rt_param.inh_task = prio_inh;
420
421 if (requeue)
422 /* add again to the right queue */
423 fp_prio_add(&pfp->ready_queue, t, priority_index(t));
424}
425
426static int effective_agent_priority(int prio)
427{
428 /* make sure agents have higher priority */
429 return prio - LITMUS_MAX_PRIORITY;
430}
431
432static lt_t prio_point(int eprio)
433{
434 /* make sure we have non-negative prio points */
435 return eprio + LITMUS_MAX_PRIORITY;
436}
437
438static int prio_from_point(lt_t prio_point)
439{
440 return ((int) prio_point) - LITMUS_MAX_PRIORITY;
441}
442
443static void boost_priority(struct task_struct* t, lt_t priority_point)
444{
445 unsigned long flags;
446 pfp_domain_t* pfp = task_pfp(t);
447
448 raw_spin_lock_irqsave(&pfp->slock, flags);
449
450
451 TRACE_TASK(t, "priority boosted at %llu\n", litmus_clock());
452
453 tsk_rt(t)->priority_boosted = 1;
454 /* tie-break by protocol-specific priority point */
455 tsk_rt(t)->boost_start_time = priority_point;
456
457 if (pfp->scheduled != t) {
458 /* holder may be queued: first stop queue changes */
459 raw_spin_lock(&pfp->domain.release_lock);
460 if (is_queued(t) &&
461 /* If it is queued, then we need to re-order. */
462 bheap_decrease(fp_ready_order, tsk_rt(t)->heap_node) &&
463 /* If we bubbled to the top, then we need to check for preemptions. */
464 fp_preemption_needed(&pfp->ready_queue, pfp->scheduled))
465 preempt(pfp);
466 raw_spin_unlock(&pfp->domain.release_lock);
467 } /* else: nothing to do since the job is not queued while scheduled */
468
469 raw_spin_unlock_irqrestore(&pfp->slock, flags);
470}
471
472static void unboost_priority(struct task_struct* t)
473{
474 unsigned long flags;
475 pfp_domain_t* pfp = task_pfp(t);
476 lt_t now;
477
478 raw_spin_lock_irqsave(&pfp->slock, flags);
479 now = litmus_clock();
480
481 /* assumption: this only happens when the job is scheduled */
482 BUG_ON(pfp->scheduled != t);
483
484 TRACE_TASK(t, "priority restored at %llu\n", now);
485
486 /* priority boosted jobs must be scheduled */
487 BUG_ON(pfp->scheduled != t);
488
489 tsk_rt(t)->priority_boosted = 0;
490 tsk_rt(t)->boost_start_time = 0;
491
492 /* check if this changes anything */
493 if (fp_preemption_needed(&pfp->ready_queue, pfp->scheduled))
494 preempt(pfp);
495
496 raw_spin_unlock_irqrestore(&pfp->slock, flags);
497}
498
499/* ******************** SRP support ************************ */
500
501static unsigned int pfp_get_srp_prio(struct task_struct* t)
502{
503 return get_priority(t);
504}
505
506/* ******************** FMLP support ********************** */
507
508struct fmlp_semaphore {
509 struct litmus_lock litmus_lock;
510
511 /* current resource holder */
512 struct task_struct *owner;
513
514 /* FIFO queue of waiting tasks */
515 wait_queue_head_t wait;
516};
517
518static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock)
519{
520 return container_of(lock, struct fmlp_semaphore, litmus_lock);
521}
522int pfp_fmlp_lock(struct litmus_lock* l)
523{
524 struct task_struct* t = current;
525 struct fmlp_semaphore *sem = fmlp_from_lock(l);
526 wait_queue_t wait;
527 unsigned long flags;
528 lt_t time_of_request;
529
530 if (!is_realtime(t))
531 return -EPERM;
532
533 spin_lock_irqsave(&sem->wait.lock, flags);
534
535 /* tie-break by this point in time */
536 time_of_request = litmus_clock();
537
538 /* Priority-boost ourself *before* we suspend so that
539 * our priority is boosted when we resume. */
540 boost_priority(t, time_of_request);
541
542 if (sem->owner) {
543 /* resource is not free => must suspend and wait */
544
545 init_waitqueue_entry(&wait, t);
546
547 /* FIXME: interruptible would be nice some day */
548 set_task_state(t, TASK_UNINTERRUPTIBLE);
549
550 __add_wait_queue_tail_exclusive(&sem->wait, &wait);
551
552 TS_LOCK_SUSPEND;
553
554 /* release lock before sleeping */
555 spin_unlock_irqrestore(&sem->wait.lock, flags);
556
557 /* We depend on the FIFO order. Thus, we don't need to recheck
558 * when we wake up; we are guaranteed to have the lock since
559 * there is only one wake up per release.
560 */
561
562 schedule();
563
564 TS_LOCK_RESUME;
565
566 /* Since we hold the lock, no other task will change
567 * ->owner. We can thus check it without acquiring the spin
568 * lock. */
569 BUG_ON(sem->owner != t);
570 } else {
571 /* it's ours now */
572 sem->owner = t;
573
574 spin_unlock_irqrestore(&sem->wait.lock, flags);
575 }
576
577 return 0;
578}
579
580int pfp_fmlp_unlock(struct litmus_lock* l)
581{
582 struct task_struct *t = current, *next;
583 struct fmlp_semaphore *sem = fmlp_from_lock(l);
584 unsigned long flags;
585 int err = 0;
586
587 spin_lock_irqsave(&sem->wait.lock, flags);
588
589 if (sem->owner != t) {
590 err = -EINVAL;
591 goto out;
592 }
593
594 /* we lose the benefit of priority boosting */
595
596 unboost_priority(t);
597
598 /* check if there are jobs waiting for this resource */
599 next = __waitqueue_remove_first(&sem->wait);
600 if (next) {
601 /* next becomes the resouce holder */
602 sem->owner = next;
603
604 /* Wake up next. The waiting job is already priority-boosted. */
605 wake_up_process(next);
606 } else
607 /* resource becomes available */
608 sem->owner = NULL;
609
610out:
611 spin_unlock_irqrestore(&sem->wait.lock, flags);
612 return err;
613}
614
615int pfp_fmlp_close(struct litmus_lock* l)
616{
617 struct task_struct *t = current;
618 struct fmlp_semaphore *sem = fmlp_from_lock(l);
619 unsigned long flags;
620
621 int owner;
622
623 spin_lock_irqsave(&sem->wait.lock, flags);
624
625 owner = sem->owner == t;
626
627 spin_unlock_irqrestore(&sem->wait.lock, flags);
628
629 if (owner)
630 pfp_fmlp_unlock(l);
631
632 return 0;
633}
634
635void pfp_fmlp_free(struct litmus_lock* lock)
636{
637 kfree(fmlp_from_lock(lock));
638}
639
640static struct litmus_lock_ops pfp_fmlp_lock_ops = {
641 .close = pfp_fmlp_close,
642 .lock = pfp_fmlp_lock,
643 .unlock = pfp_fmlp_unlock,
644 .deallocate = pfp_fmlp_free,
645};
646
647static struct litmus_lock* pfp_new_fmlp(void)
648{
649 struct fmlp_semaphore* sem;
650
651 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
652 if (!sem)
653 return NULL;
654
655 sem->owner = NULL;
656 init_waitqueue_head(&sem->wait);
657 sem->litmus_lock.ops = &pfp_fmlp_lock_ops;
658
659 return &sem->litmus_lock;
660}
661
662/* ******************** MPCP support ********************** */
663
664struct mpcp_semaphore {
665 struct litmus_lock litmus_lock;
666
667 /* current resource holder */
668 struct task_struct *owner;
669
670 /* priority queue of waiting tasks */
671 wait_queue_head_t wait;
672
673 /* priority ceiling per cpu */
674 unsigned int prio_ceiling[NR_CPUS];
675
676 /* should jobs spin "virtually" for this resource? */
677 int vspin;
678};
679
680#define OMEGA_CEILING UINT_MAX
681
682/* Since jobs spin "virtually" while waiting to acquire a lock,
683 * they first must aquire a local per-cpu resource.
684 */
685static DEFINE_PER_CPU(wait_queue_head_t, mpcpvs_vspin_wait);
686static DEFINE_PER_CPU(struct task_struct*, mpcpvs_vspin);
687
688/* called with preemptions off <=> no local modifications */
689static void mpcp_vspin_enter(void)
690{
691 struct task_struct* t = current;
692
693 while (1) {
694 if (__get_cpu_var(mpcpvs_vspin) == NULL) {
695 /* good, we get to issue our request */
696 __get_cpu_var(mpcpvs_vspin) = t;
697 break;
698 } else {
699 /* some job is spinning => enqueue in request queue */
700 prio_wait_queue_t wait;
701 wait_queue_head_t* vspin = &__get_cpu_var(mpcpvs_vspin_wait);
702 unsigned long flags;
703
704 /* ordered by regular priority */
705 init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t)));
706
707 spin_lock_irqsave(&vspin->lock, flags);
708
709 set_task_state(t, TASK_UNINTERRUPTIBLE);
710
711 __add_wait_queue_prio_exclusive(vspin, &wait);
712
713 spin_unlock_irqrestore(&vspin->lock, flags);
714
715 TS_LOCK_SUSPEND;
716
717 preempt_enable_no_resched();
718
719 schedule();
720
721 preempt_disable();
722
723 TS_LOCK_RESUME;
724 /* Recheck if we got it --- some higher-priority process might
725 * have swooped in. */
726 }
727 }
728 /* ok, now it is ours */
729}
730
731/* called with preemptions off */
732static void mpcp_vspin_exit(void)
733{
734 struct task_struct* t = current, *next;
735 unsigned long flags;
736 wait_queue_head_t* vspin = &__get_cpu_var(mpcpvs_vspin_wait);
737
738 BUG_ON(__get_cpu_var(mpcpvs_vspin) != t);
739
740 /* no spinning job */
741 __get_cpu_var(mpcpvs_vspin) = NULL;
742
743 /* see if anyone is waiting for us to stop "spinning" */
744 spin_lock_irqsave(&vspin->lock, flags);
745 next = __waitqueue_remove_first(vspin);
746
747 if (next)
748 wake_up_process(next);
749
750 spin_unlock_irqrestore(&vspin->lock, flags);
751}
752
753static inline struct mpcp_semaphore* mpcp_from_lock(struct litmus_lock* lock)
754{
755 return container_of(lock, struct mpcp_semaphore, litmus_lock);
756}
757
758int pfp_mpcp_lock(struct litmus_lock* l)
759{
760 struct task_struct* t = current;
761 struct mpcp_semaphore *sem = mpcp_from_lock(l);
762 prio_wait_queue_t wait;
763 unsigned long flags;
764
765 if (!is_realtime(t))
766 return -EPERM;
767
768 preempt_disable();
769
770 if (sem->vspin)
771 mpcp_vspin_enter();
772
773 /* Priority-boost ourself *before* we suspend so that
774 * our priority is boosted when we resume. Use the priority
775 * ceiling for the local partition. */
776 boost_priority(t, sem->prio_ceiling[get_partition(t)]);
777
778 spin_lock_irqsave(&sem->wait.lock, flags);
779
780 preempt_enable_no_resched();
781
782 if (sem->owner) {
783 /* resource is not free => must suspend and wait */
784
785 /* ordered by regular priority */
786 init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t)));
787
788 /* FIXME: interruptible would be nice some day */
789 set_task_state(t, TASK_UNINTERRUPTIBLE);
790
791 __add_wait_queue_prio_exclusive(&sem->wait, &wait);
792
793 TS_LOCK_SUSPEND;
794
795 /* release lock before sleeping */
796 spin_unlock_irqrestore(&sem->wait.lock, flags);
797
798 /* We depend on the FIFO order. Thus, we don't need to recheck
799 * when we wake up; we are guaranteed to have the lock since
800 * there is only one wake up per release.
801 */
802
803 schedule();
804
805 TS_LOCK_RESUME;
806
807 /* Since we hold the lock, no other task will change
808 * ->owner. We can thus check it without acquiring the spin
809 * lock. */
810 BUG_ON(sem->owner != t);
811 } else {
812 /* it's ours now */
813 sem->owner = t;
814
815 spin_unlock_irqrestore(&sem->wait.lock, flags);
816 }
817
818 return 0;
819}
820
821int pfp_mpcp_unlock(struct litmus_lock* l)
822{
823 struct task_struct *t = current, *next;
824 struct mpcp_semaphore *sem = mpcp_from_lock(l);
825 unsigned long flags;
826 int err = 0;
827
828 spin_lock_irqsave(&sem->wait.lock, flags);
829
830 if (sem->owner != t) {
831 err = -EINVAL;
832 goto out;
833 }
834
835 /* we lose the benefit of priority boosting */
836
837 unboost_priority(t);
838
839 /* check if there are jobs waiting for this resource */
840 next = __waitqueue_remove_first(&sem->wait);
841 if (next) {
842 /* next becomes the resouce holder */
843 sem->owner = next;
844
845 /* Wake up next. The waiting job is already priority-boosted. */
846 wake_up_process(next);
847 } else
848 /* resource becomes available */
849 sem->owner = NULL;
850
851out:
852 spin_unlock_irqrestore(&sem->wait.lock, flags);
853
854 if (sem->vspin && err == 0) {
855 preempt_disable();
856 mpcp_vspin_exit();
857 preempt_enable();
858 }
859
860 return err;
861}
862
863int pfp_mpcp_open(struct litmus_lock* l, void* config)
864{
865 struct task_struct *t = current;
866 struct mpcp_semaphore *sem = mpcp_from_lock(l);
867 int cpu, local_cpu;
868 unsigned long flags;
869
870 if (!is_realtime(t))
871 /* we need to know the real-time priority */
872 return -EPERM;
873
874 local_cpu = get_partition(t);
875
876 spin_lock_irqsave(&sem->wait.lock, flags);
877
878 for (cpu = 0; cpu < NR_CPUS; cpu++)
879 if (cpu != local_cpu)
880 {
881 sem->prio_ceiling[cpu] = min(sem->prio_ceiling[cpu],
882 get_priority(t));
883 TRACE_CUR("priority ceiling for sem %p is now %d on cpu %d\n",
884 sem, sem->prio_ceiling[cpu], cpu);
885 }
886
887 spin_unlock_irqrestore(&sem->wait.lock, flags);
888
889 return 0;
890}
891
892int pfp_mpcp_close(struct litmus_lock* l)
893{
894 struct task_struct *t = current;
895 struct mpcp_semaphore *sem = mpcp_from_lock(l);
896 unsigned long flags;
897
898 int owner;
899
900 spin_lock_irqsave(&sem->wait.lock, flags);
901
902 owner = sem->owner == t;
903
904 spin_unlock_irqrestore(&sem->wait.lock, flags);
905
906 if (owner)
907 pfp_mpcp_unlock(l);
908
909 return 0;
910}
911
912void pfp_mpcp_free(struct litmus_lock* lock)
913{
914 kfree(mpcp_from_lock(lock));
915}
916
917static struct litmus_lock_ops pfp_mpcp_lock_ops = {
918 .close = pfp_mpcp_close,
919 .lock = pfp_mpcp_lock,
920 .open = pfp_mpcp_open,
921 .unlock = pfp_mpcp_unlock,
922 .deallocate = pfp_mpcp_free,
923};
924
925static struct litmus_lock* pfp_new_mpcp(int vspin)
926{
927 struct mpcp_semaphore* sem;
928 int cpu;
929
930 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
931 if (!sem)
932 return NULL;
933
934 sem->owner = NULL;
935 init_waitqueue_head(&sem->wait);
936 sem->litmus_lock.ops = &pfp_mpcp_lock_ops;
937
938 for (cpu = 0; cpu < NR_CPUS; cpu++)
939 sem->prio_ceiling[cpu] = OMEGA_CEILING;
940
941 /* mark as virtual spinning */
942 sem->vspin = vspin;
943
944 return &sem->litmus_lock;
945}
946
947
948/* ******************** PCP support ********************** */
949
950
951struct pcp_semaphore {
952 struct list_head ceiling;
953
954 /* current resource holder */
955 struct task_struct *owner;
956
957 /* priority ceiling --- can be negative due to DPCP support */
958 int prio_ceiling;
959
960 /* on which processor is this PCP semaphore allocated? */
961 int on_cpu;
962};
963
964struct pcp_state {
965 struct list_head system_ceiling;
966
967 /* highest-priority waiting task */
968 struct task_struct* hp_waiter;
969
970 /* list of jobs waiting to get past the system ceiling */
971 wait_queue_head_t ceiling_blocked;
972};
973
974static void pcp_init_state(struct pcp_state* s)
975{
976 INIT_LIST_HEAD(&s->system_ceiling);
977 s->hp_waiter = NULL;
978 init_waitqueue_head(&s->ceiling_blocked);
979}
980
981static DEFINE_PER_CPU(struct pcp_state, pcp_state);
982
983/* assumes preemptions are off */
984static struct pcp_semaphore* pcp_get_ceiling(void)
985{
986 struct list_head* top = __get_cpu_var(pcp_state).system_ceiling.next;
987
988 if (top)
989 return list_entry(top, struct pcp_semaphore, ceiling);
990 else
991 return NULL;
992}
993
994/* assumes preempt off */
995static void pcp_add_ceiling(struct pcp_semaphore* sem)
996{
997 struct list_head *pos;
998 struct list_head *in_use = &__get_cpu_var(pcp_state).system_ceiling;
999 struct pcp_semaphore* held;
1000
1001 BUG_ON(sem->on_cpu != smp_processor_id());
1002 BUG_ON(in_list(&sem->ceiling));
1003
1004 list_for_each(pos, in_use) {
1005 held = list_entry(pos, struct pcp_semaphore, ceiling);
1006 if (held->prio_ceiling >= sem->prio_ceiling) {
1007 __list_add(&sem->ceiling, pos->prev, pos);
1008 return;
1009 }
1010 }
1011
1012 /* we hit the end of the list */
1013
1014 list_add_tail(&sem->ceiling, in_use);
1015}
1016
1017/* assumes preempt off */
1018static int pcp_exceeds_ceiling(struct pcp_semaphore* ceiling,
1019 struct task_struct* task,
1020 int effective_prio)
1021{
1022 return ceiling == NULL ||
1023 ceiling->prio_ceiling > effective_prio ||
1024 ceiling->owner == task;
1025}
1026
1027/* assumes preempt off */
1028static void pcp_priority_inheritance(void)
1029{
1030 unsigned long flags;
1031 pfp_domain_t* pfp = local_pfp;
1032
1033 struct pcp_semaphore* ceiling = pcp_get_ceiling();
1034 struct task_struct *blocker, *blocked;
1035
1036 blocker = ceiling ? ceiling->owner : NULL;
1037 blocked = __get_cpu_var(pcp_state).hp_waiter;
1038
1039 raw_spin_lock_irqsave(&pfp->slock, flags);
1040
1041 /* Current is no longer inheriting anything by default. This should be
1042 * the currently scheduled job, and hence not currently queued. */
1043 BUG_ON(current != pfp->scheduled);
1044
1045 fp_set_prio_inh(pfp, current, NULL);
1046 fp_set_prio_inh(pfp, blocked, NULL);
1047 fp_set_prio_inh(pfp, blocker, NULL);
1048
1049
1050 /* Let blocking job inherit priority of blocked job, if required. */
1051 if (blocker && blocked &&
1052 fp_higher_prio(blocked, blocker)) {
1053 TRACE_TASK(blocker, "PCP inherits from %s/%d (prio %u -> %u) \n",
1054 blocked->comm, blocked->pid,
1055 get_priority(blocker), get_priority(blocked));
1056 fp_set_prio_inh(pfp, blocker, blocked);
1057 }
1058
1059 /* check if anything changed */
1060 if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled))
1061 preempt(pfp);
1062
1063 raw_spin_unlock_irqrestore(&pfp->slock, flags);
1064}
1065
1066/* called with preemptions off */
1067static void pcp_raise_ceiling(struct pcp_semaphore* sem,
1068 int effective_prio)
1069{
1070 struct task_struct* t = current;
1071 struct pcp_semaphore* ceiling;
1072 prio_wait_queue_t wait;
1073 unsigned int waiting_higher_prio;
1074
1075 do {
1076 ceiling = pcp_get_ceiling();
1077 if (pcp_exceeds_ceiling(ceiling, t, effective_prio))
1078 break;
1079
1080 TRACE_CUR("PCP ceiling-blocked, wanted sem %p, but %s/%d has the ceiling \n",
1081 sem, ceiling->owner->comm, ceiling->owner->pid);
1082
1083 /* we need to wait until the ceiling is lowered */
1084
1085 /* enqueue in priority order */
1086 init_prio_waitqueue_entry(&wait, t, prio_point(effective_prio));
1087 set_task_state(t, TASK_UNINTERRUPTIBLE);
1088 waiting_higher_prio = add_wait_queue_prio_exclusive(
1089 &__get_cpu_var(pcp_state).ceiling_blocked, &wait);
1090
1091 if (waiting_higher_prio == 0) {
1092 TRACE_CUR("PCP new highest-prio waiter => prio inheritance\n");
1093
1094 /* we are the new highest-priority waiting job
1095 * => update inheritance */
1096 __get_cpu_var(pcp_state).hp_waiter = t;
1097 pcp_priority_inheritance();
1098 }
1099
1100 TS_LOCK_SUSPEND;
1101
1102 preempt_enable_no_resched();
1103 schedule();
1104 preempt_disable();
1105
1106 /* pcp_resume_unblocked() removed us from wait queue */
1107
1108 TS_LOCK_RESUME;
1109 } while(1);
1110
1111 TRACE_CUR("PCP got the ceiling and sem %p\n", sem);
1112
1113 /* We are good to go. The semaphore should be available. */
1114 BUG_ON(sem->owner != NULL);
1115
1116 sem->owner = t;
1117
1118 pcp_add_ceiling(sem);
1119}
1120
1121static void pcp_resume_unblocked(void)
1122{
1123 wait_queue_head_t *blocked = &__get_cpu_var(pcp_state).ceiling_blocked;
1124 unsigned long flags;
1125 prio_wait_queue_t* q;
1126 struct task_struct* t = NULL;
1127
1128 struct pcp_semaphore* ceiling = pcp_get_ceiling();
1129
1130 spin_lock_irqsave(&blocked->lock, flags);
1131
1132 while (waitqueue_active(blocked)) {
1133 /* check first == highest-priority waiting job */
1134 q = list_entry(blocked->task_list.next,
1135 prio_wait_queue_t, wq.task_list);
1136 t = (struct task_struct*) q->wq.private;
1137
1138 /* can it proceed now? => let it go */
1139 if (pcp_exceeds_ceiling(ceiling, t,
1140 prio_from_point(q->priority))) {
1141 __remove_wait_queue(blocked, &q->wq);
1142 wake_up_process(t);
1143 } else {
1144 /* We are done. Update highest-priority waiter. */
1145 __get_cpu_var(pcp_state).hp_waiter = t;
1146 goto out;
1147 }
1148 }
1149 /* If we get here, then there are no more waiting
1150 * jobs. */
1151 __get_cpu_var(pcp_state).hp_waiter = NULL;
1152out:
1153 spin_unlock_irqrestore(&blocked->lock, flags);
1154}
1155
1156/* assumes preempt off */
1157static void pcp_lower_ceiling(struct pcp_semaphore* sem)
1158{
1159 BUG_ON(!in_list(&sem->ceiling));
1160 BUG_ON(sem->owner != current);
1161 BUG_ON(sem->on_cpu != smp_processor_id());
1162
1163 /* remove from ceiling list */
1164 list_del(&sem->ceiling);
1165
1166 /* release */
1167 sem->owner = NULL;
1168
1169 TRACE_CUR("PCP released sem %p\n", sem);
1170
1171 /* Wake up all ceiling-blocked jobs that now pass the ceiling. */
1172 pcp_resume_unblocked();
1173
1174 pcp_priority_inheritance();
1175}
1176
1177static void pcp_update_prio_ceiling(struct pcp_semaphore* sem,
1178 int effective_prio)
1179{
1180 /* This needs to be synchronized on something.
1181 * Might as well use waitqueue lock for the processor.
1182 * We assume this happens only before the task set starts execution,
1183 * (i.e., during initialization), but it may happen on multiple processors
1184 * at the same time.
1185 */
1186 unsigned long flags;
1187
1188 struct pcp_state* s = &per_cpu(pcp_state, sem->on_cpu);
1189
1190 spin_lock_irqsave(&s->ceiling_blocked.lock, flags);
1191
1192 sem->prio_ceiling = min(sem->prio_ceiling, effective_prio);
1193
1194 spin_unlock_irqrestore(&s->ceiling_blocked.lock, flags);
1195}
1196
1197static void pcp_init_semaphore(struct pcp_semaphore* sem, int cpu)
1198{
1199 sem->owner = NULL;
1200 INIT_LIST_HEAD(&sem->ceiling);
1201 sem->prio_ceiling = INT_MAX;
1202 sem->on_cpu = cpu;
1203}
1204
1205
1206/* ******************** DPCP support ********************** */
1207
1208struct dpcp_semaphore {
1209 struct litmus_lock litmus_lock;
1210 struct pcp_semaphore pcp;
1211 int owner_cpu;
1212};
1213
1214static inline struct dpcp_semaphore* dpcp_from_lock(struct litmus_lock* lock)
1215{
1216 return container_of(lock, struct dpcp_semaphore, litmus_lock);
1217}
1218
1219/* called with preemptions disabled */
1220static void pfp_migrate_to(int target_cpu)
1221{
1222 struct task_struct* t = current;
1223 pfp_domain_t *from;
1224
1225 if (get_partition(t) == target_cpu)
1226 return;
1227
1228 /* make sure target_cpu makes sense */
1229 BUG_ON(!cpu_online(target_cpu));
1230
1231 local_irq_disable();
1232
1233 /* scheduled task should not be in any ready or release queue */
1234 BUG_ON(is_queued(t));
1235
1236 /* lock both pfp domains in order of address */
1237 from = task_pfp(t);
1238
1239 raw_spin_lock(&from->slock);
1240
1241 /* switch partitions */
1242 tsk_rt(t)->task_params.cpu = target_cpu;
1243
1244 raw_spin_unlock(&from->slock);
1245
1246 /* Don't trace scheduler costs as part of
1247 * locking overhead. Scheduling costs are accounted for
1248 * explicitly. */
1249 TS_LOCK_SUSPEND;
1250
1251 local_irq_enable();
1252 preempt_enable_no_resched();
1253
1254 /* deschedule to be migrated */
1255 schedule();
1256
1257 /* we are now on the target processor */
1258 preempt_disable();
1259
1260 /* start recording costs again */
1261 TS_LOCK_RESUME;
1262
1263 BUG_ON(smp_processor_id() != target_cpu);
1264}
1265
1266int pfp_dpcp_lock(struct litmus_lock* l)
1267{
1268 struct task_struct* t = current;
1269 struct dpcp_semaphore *sem = dpcp_from_lock(l);
1270 int eprio = effective_agent_priority(get_priority(t));
1271 int from = get_partition(t);
1272 int to = sem->pcp.on_cpu;
1273
1274 if (!is_realtime(t))
1275 return -EPERM;
1276
1277 preempt_disable();
1278
1279 /* Priority-boost ourself *before* we suspend so that
1280 * our priority is boosted when we resume. */
1281
1282 boost_priority(t, get_priority(t));
1283
1284 pfp_migrate_to(to);
1285
1286 pcp_raise_ceiling(&sem->pcp, eprio);
1287
1288 /* yep, we got it => execute request */
1289 sem->owner_cpu = from;
1290
1291 preempt_enable();
1292
1293 return 0;
1294}
1295
1296int pfp_dpcp_unlock(struct litmus_lock* l)
1297{
1298 struct task_struct *t = current;
1299 struct dpcp_semaphore *sem = dpcp_from_lock(l);
1300 int err = 0;
1301 int home;
1302
1303 preempt_disable();
1304
1305 if (sem->pcp.on_cpu != smp_processor_id() || sem->pcp.owner != t) {
1306 err = -EINVAL;
1307 goto out;
1308 }
1309
1310 home = sem->owner_cpu;
1311
1312 /* give it back */
1313 pcp_lower_ceiling(&sem->pcp);
1314
1315 /* we lose the benefit of priority boosting */
1316 unboost_priority(t);
1317
1318 pfp_migrate_to(home);
1319
1320out:
1321 preempt_enable();
1322
1323 return err;
1324}
1325
1326int pfp_dpcp_open(struct litmus_lock* l, void* __user config)
1327{
1328 struct task_struct *t = current;
1329 struct dpcp_semaphore *sem = dpcp_from_lock(l);
1330 int cpu, eprio;
1331
1332 if (!is_realtime(t))
1333 /* we need to know the real-time priority */
1334 return -EPERM;
1335
1336 if (get_user(cpu, (int*) config))
1337 return -EFAULT;
1338
1339 /* make sure the resource location matches */
1340 if (cpu != sem->pcp.on_cpu)
1341 return -EINVAL;
1342
1343 eprio = effective_agent_priority(get_priority(t));
1344
1345 pcp_update_prio_ceiling(&sem->pcp, eprio);
1346
1347 return 0;
1348}
1349
1350int pfp_dpcp_close(struct litmus_lock* l)
1351{
1352 struct task_struct *t = current;
1353 struct dpcp_semaphore *sem = dpcp_from_lock(l);
1354 int owner = 0;
1355
1356 preempt_disable();
1357
1358 if (sem->pcp.on_cpu == smp_processor_id())
1359 owner = sem->pcp.owner == t;
1360
1361 preempt_enable();
1362
1363 if (owner)
1364 pfp_dpcp_unlock(l);
1365
1366 return 0;
1367}
1368
1369void pfp_dpcp_free(struct litmus_lock* lock)
1370{
1371 kfree(dpcp_from_lock(lock));
1372}
1373
1374static struct litmus_lock_ops pfp_dpcp_lock_ops = {
1375 .close = pfp_dpcp_close,
1376 .lock = pfp_dpcp_lock,
1377 .open = pfp_dpcp_open,
1378 .unlock = pfp_dpcp_unlock,
1379 .deallocate = pfp_dpcp_free,
1380};
1381
1382static struct litmus_lock* pfp_new_dpcp(int on_cpu)
1383{
1384 struct dpcp_semaphore* sem;
1385
1386 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1387 if (!sem)
1388 return NULL;
1389
1390 sem->litmus_lock.ops = &pfp_dpcp_lock_ops;
1391 sem->owner_cpu = NO_CPU;
1392 pcp_init_semaphore(&sem->pcp, on_cpu);
1393
1394 return &sem->litmus_lock;
1395}
1396
1397
1398/* **** lock constructor **** */
1399
1400
1401static long pfp_allocate_lock(struct litmus_lock **lock, int type,
1402 void* __user config)
1403{
1404 int err = -ENXIO, cpu;
1405 struct srp_semaphore* srp;
1406
1407 /* P-FP currently supports the SRP for local resources and the FMLP
1408 * for global resources. */
1409 switch (type) {
1410 case FMLP_SEM:
1411 /* FIFO Mutex Locking Protocol */
1412 *lock = pfp_new_fmlp();
1413 if (*lock)
1414 err = 0;
1415 else
1416 err = -ENOMEM;
1417 break;
1418
1419 case MPCP_SEM:
1420 /* Multiprocesor Priority Ceiling Protocol */
1421 *lock = pfp_new_mpcp(0);
1422 if (*lock)
1423 err = 0;
1424 else
1425 err = -ENOMEM;
1426 break;
1427
1428 case MPCP_VS_SEM:
1429 /* Multiprocesor Priority Ceiling Protocol with virtual spinning */
1430 *lock = pfp_new_mpcp(1);
1431 if (*lock)
1432 err = 0;
1433 else
1434 err = -ENOMEM;
1435 break;
1436
1437 case DPCP_SEM:
1438 /* Distributed Priority Ceiling Protocol */
1439 if (get_user(cpu, (int*) config))
1440 return -EFAULT;
1441
1442 if (!cpu_online(cpu))
1443 return -EINVAL;
1444
1445 *lock = pfp_new_dpcp(cpu);
1446 if (*lock)
1447 err = 0;
1448 else
1449 err = -ENOMEM;
1450 break;
1451
1452 case SRP_SEM:
1453 /* Baker's Stack Resource Policy */
1454 srp = allocate_srp_semaphore();
1455 if (srp) {
1456 *lock = &srp->litmus_lock;
1457 err = 0;
1458 } else
1459 err = -ENOMEM;
1460 break;
1461 };
1462
1463 return err;
1464}
1465
1466#endif
1467
1468static long pfp_admit_task(struct task_struct* tsk)
1469{
1470 if (task_cpu(tsk) == tsk->rt_param.task_params.cpu &&
1471#ifdef CONFIG_RELEASE_MASTER
1472 /* don't allow tasks on release master CPU */
1473 task_cpu(tsk) != remote_dom(task_cpu(tsk))->release_master &&
1474#endif
1475 get_priority(tsk) > 0)
1476 return 0;
1477 else
1478 return -EINVAL;
1479}
1480
1481static long pfp_activate_plugin(void)
1482{
1483#ifdef CONFIG_RELEASE_MASTER
1484 int cpu;
1485
1486 for_each_online_cpu(cpu) {
1487 remote_dom(cpu)->release_master = atomic_read(&release_master_cpu);
1488 }
1489#endif
1490
1491#ifdef CONFIG_LITMUS_LOCKING
1492 get_srp_prio = pfp_get_srp_prio;
1493
1494 for_each_online_cpu(cpu) {
1495 init_waitqueue_head(&per_cpu(mpcpvs_vspin_wait, cpu));
1496 per_cpu(mpcpvs_vspin, cpu) = NULL;
1497
1498 pcp_init_state(&per_cpu(pcp_state, cpu));
1499 pfp_doms[cpu] = remote_pfp(cpu);
1500 }
1501
1502#endif
1503
1504 return 0;
1505}
1506
1507
1508/* Plugin object */
1509static struct sched_plugin pfp_plugin __cacheline_aligned_in_smp = {
1510 .plugin_name = "P-FP",
1511 .tick = pfp_tick,
1512 .task_new = pfp_task_new,
1513 .complete_job = complete_job,
1514 .task_exit = pfp_task_exit,
1515 .schedule = pfp_schedule,
1516 .task_wake_up = pfp_task_wake_up,
1517 .task_block = pfp_task_block,
1518 .admit_task = pfp_admit_task,
1519 .activate_plugin = pfp_activate_plugin,
1520#ifdef CONFIG_LITMUS_LOCKING
1521 .allocate_lock = pfp_allocate_lock,
1522 .finish_switch = pfp_finish_switch,
1523#endif
1524};
1525
1526
1527static int __init init_pfp(void)
1528{
1529 int i;
1530
1531 /* We do not really want to support cpu hotplug, do we? ;)
1532 * However, if we are so crazy to do so,
1533 * we cannot use num_online_cpu()
1534 */
1535 for (i = 0; i < num_online_cpus(); i++) {
1536 pfp_domain_init(remote_pfp(i), i);
1537 }
1538 return register_sched_plugin(&pfp_plugin);
1539}
1540
1541module_init(init_pfp);
1542