diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-04-19 17:31:52 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-04-19 17:31:52 -0400 |
commit | f70a290e8a889caa905ab7650c696f2bb299be1a (patch) | |
tree | 56f0886d839499e9f522f189999024b3e86f9be2 /litmus/sched_pfp.c | |
parent | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (diff) | |
parent | 7ef4a793a624c6e66c16ca1051847f75161f5bec (diff) |
Merge branch 'wip-nested-locking' into tegra-nested-lockingwip-nested-locking
Conflicts:
Makefile
include/linux/fs.h
Diffstat (limited to 'litmus/sched_pfp.c')
-rw-r--r-- | litmus/sched_pfp.c | 1751 |
1 files changed, 1751 insertions, 0 deletions
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c new file mode 100644 index 00000000000..aade0904491 --- /dev/null +++ b/litmus/sched_pfp.c | |||
@@ -0,0 +1,1751 @@ | |||
1 | /* | ||
2 | * litmus/sched_pfp.c | ||
3 | * | ||
4 | * Implementation of partitioned fixed-priority scheduling. | ||
5 | * Based on PSN-EDF. | ||
6 | */ | ||
7 | |||
8 | #include <linux/percpu.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/list.h> | ||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/module.h> | ||
13 | |||
14 | #include <litmus/litmus.h> | ||
15 | #include <litmus/wait.h> | ||
16 | #include <litmus/jobs.h> | ||
17 | #include <litmus/preempt.h> | ||
18 | #include <litmus/fp_common.h> | ||
19 | #include <litmus/sched_plugin.h> | ||
20 | #include <litmus/sched_trace.h> | ||
21 | #include <litmus/trace.h> | ||
22 | #include <litmus/budget.h> | ||
23 | |||
24 | #include <linux/uaccess.h> | ||
25 | |||
26 | |||
27 | typedef struct { | ||
28 | rt_domain_t domain; | ||
29 | struct fp_prio_queue ready_queue; | ||
30 | int cpu; | ||
31 | struct task_struct* scheduled; /* only RT tasks */ | ||
32 | /* | ||
33 | * scheduling lock slock | ||
34 | * protects the domain and serializes scheduling decisions | ||
35 | */ | ||
36 | #define slock domain.ready_lock | ||
37 | |||
38 | } pfp_domain_t; | ||
39 | |||
40 | DEFINE_PER_CPU(pfp_domain_t, pfp_domains); | ||
41 | |||
42 | pfp_domain_t* pfp_doms[NR_CPUS]; | ||
43 | |||
44 | #define local_pfp (&__get_cpu_var(pfp_domains)) | ||
45 | #define remote_dom(cpu) (&per_cpu(pfp_domains, cpu).domain) | ||
46 | #define remote_pfp(cpu) (&per_cpu(pfp_domains, cpu)) | ||
47 | #define task_dom(task) remote_dom(get_partition(task)) | ||
48 | #define task_pfp(task) remote_pfp(get_partition(task)) | ||
49 | |||
50 | /* we assume the lock is being held */ | ||
51 | static void preempt(pfp_domain_t *pfp) | ||
52 | { | ||
53 | preempt_if_preemptable(pfp->scheduled, pfp->cpu); | ||
54 | } | ||
55 | |||
56 | static unsigned int priority_index(struct task_struct* t) | ||
57 | { | ||
58 | #ifdef CONFIG_LITMUS_LOCKING | ||
59 | if (unlikely(t->rt_param.inh_task)) | ||
60 | /* use effective priority */ | ||
61 | t = t->rt_param.inh_task; | ||
62 | |||
63 | if (is_priority_boosted(t)) { | ||
64 | /* zero is reserved for priority-boosted tasks */ | ||
65 | return 0; | ||
66 | } else | ||
67 | #endif | ||
68 | return get_priority(t); | ||
69 | } | ||
70 | |||
71 | |||
72 | static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
73 | { | ||
74 | pfp_domain_t *pfp = container_of(rt, pfp_domain_t, domain); | ||
75 | unsigned long flags; | ||
76 | struct task_struct* t; | ||
77 | struct bheap_node* hn; | ||
78 | |||
79 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
80 | |||
81 | while (!bheap_empty(tasks)) { | ||
82 | hn = bheap_take(fp_ready_order, tasks); | ||
83 | t = bheap2task(hn); | ||
84 | TRACE_TASK(t, "released (part:%d prio:%d)\n", | ||
85 | get_partition(t), get_priority(t)); | ||
86 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); | ||
87 | } | ||
88 | |||
89 | /* do we need to preempt? */ | ||
90 | if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) { | ||
91 | TRACE_CUR("preempted by new release\n"); | ||
92 | preempt(pfp); | ||
93 | } | ||
94 | |||
95 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
96 | } | ||
97 | |||
98 | static void pfp_preempt_check(pfp_domain_t *pfp) | ||
99 | { | ||
100 | if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) | ||
101 | preempt(pfp); | ||
102 | } | ||
103 | |||
104 | static void pfp_domain_init(pfp_domain_t* pfp, | ||
105 | int cpu) | ||
106 | { | ||
107 | fp_domain_init(&pfp->domain, NULL, pfp_release_jobs); | ||
108 | pfp->cpu = cpu; | ||
109 | pfp->scheduled = NULL; | ||
110 | fp_prio_queue_init(&pfp->ready_queue); | ||
111 | } | ||
112 | |||
113 | static void requeue(struct task_struct* t, pfp_domain_t *pfp) | ||
114 | { | ||
115 | BUG_ON(!is_running(t)); | ||
116 | |||
117 | tsk_rt(t)->completed = 0; | ||
118 | if (is_released(t, litmus_clock())) | ||
119 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); | ||
120 | else | ||
121 | add_release(&pfp->domain, t); /* it has got to wait */ | ||
122 | } | ||
123 | |||
124 | static void job_completion(struct task_struct* t, int forced) | ||
125 | { | ||
126 | sched_trace_task_completion(t,forced); | ||
127 | TRACE_TASK(t, "job_completion().\n"); | ||
128 | |||
129 | tsk_rt(t)->completed = 1; | ||
130 | prepare_for_next_period(t); | ||
131 | if (is_released(t, litmus_clock())) | ||
132 | sched_trace_task_release(t); | ||
133 | } | ||
134 | |||
135 | static void pfp_tick(struct task_struct *t) | ||
136 | { | ||
137 | pfp_domain_t *pfp = local_pfp; | ||
138 | |||
139 | /* Check for inconsistency. We don't need the lock for this since | ||
140 | * ->scheduled is only changed in schedule, which obviously is not | ||
141 | * executing in parallel on this CPU | ||
142 | */ | ||
143 | BUG_ON(is_realtime(t) && t != pfp->scheduled); | ||
144 | |||
145 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
146 | if (!is_np(t)) { | ||
147 | litmus_reschedule_local(); | ||
148 | TRACE("pfp_scheduler_tick: " | ||
149 | "%d is preemptable " | ||
150 | " => FORCE_RESCHED\n", t->pid); | ||
151 | } else if (is_user_np(t)) { | ||
152 | TRACE("pfp_scheduler_tick: " | ||
153 | "%d is non-preemptable, " | ||
154 | "preemption delayed.\n", t->pid); | ||
155 | request_exit_np(t); | ||
156 | } | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static struct task_struct* pfp_schedule(struct task_struct * prev) | ||
161 | { | ||
162 | pfp_domain_t* pfp = local_pfp; | ||
163 | struct task_struct* next; | ||
164 | |||
165 | int out_of_time, sleep, preempt, np, exists, blocks, resched, migrate; | ||
166 | |||
167 | raw_spin_lock(&pfp->slock); | ||
168 | |||
169 | /* sanity checking | ||
170 | * differently from gedf, when a task exits (dead) | ||
171 | * pfp->schedule may be null and prev _is_ realtime | ||
172 | */ | ||
173 | BUG_ON(pfp->scheduled && pfp->scheduled != prev); | ||
174 | BUG_ON(pfp->scheduled && !is_realtime(prev)); | ||
175 | |||
176 | /* (0) Determine state */ | ||
177 | exists = pfp->scheduled != NULL; | ||
178 | blocks = exists && !is_running(pfp->scheduled); | ||
179 | out_of_time = exists && | ||
180 | budget_enforced(pfp->scheduled) && | ||
181 | budget_exhausted(pfp->scheduled); | ||
182 | np = exists && is_np(pfp->scheduled); | ||
183 | sleep = exists && is_completed(pfp->scheduled); | ||
184 | migrate = exists && get_partition(pfp->scheduled) != pfp->cpu; | ||
185 | preempt = !blocks && (migrate || fp_preemption_needed(&pfp->ready_queue, prev)); | ||
186 | |||
187 | /* If we need to preempt do so. | ||
188 | * The following checks set resched to 1 in case of special | ||
189 | * circumstances. | ||
190 | */ | ||
191 | resched = preempt; | ||
192 | |||
193 | /* If a task blocks we have no choice but to reschedule. | ||
194 | */ | ||
195 | if (blocks) | ||
196 | resched = 1; | ||
197 | |||
198 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
199 | * Multiple calls to request_exit_np() don't hurt. | ||
200 | */ | ||
201 | if (np && (out_of_time || preempt || sleep)) | ||
202 | request_exit_np(pfp->scheduled); | ||
203 | |||
204 | /* Any task that is preemptable and either exhausts its execution | ||
205 | * budget or wants to sleep completes. We may have to reschedule after | ||
206 | * this. | ||
207 | */ | ||
208 | if (!np && (out_of_time || sleep) && !blocks && !migrate) { | ||
209 | job_completion(pfp->scheduled, !sleep); | ||
210 | resched = 1; | ||
211 | } | ||
212 | |||
213 | /* The final scheduling decision. Do we need to switch for some reason? | ||
214 | * Switch if we are in RT mode and have no task or if we need to | ||
215 | * resched. | ||
216 | */ | ||
217 | next = NULL; | ||
218 | if ((!np || blocks) && (resched || !exists)) { | ||
219 | /* When preempting a task that does not block, then | ||
220 | * re-insert it into either the ready queue or the | ||
221 | * release queue (if it completed). requeue() picks | ||
222 | * the appropriate queue. | ||
223 | */ | ||
224 | if (pfp->scheduled && !blocks && !migrate) | ||
225 | requeue(pfp->scheduled, pfp); | ||
226 | next = fp_prio_take(&pfp->ready_queue); | ||
227 | if (next == prev) { | ||
228 | struct task_struct *t = fp_prio_peek(&pfp->ready_queue); | ||
229 | TRACE_TASK(next, "next==prev sleep=%d oot=%d np=%d preempt=%d migrate=%d " | ||
230 | "boost=%d empty=%d prio-idx=%u prio=%u\n", | ||
231 | sleep, out_of_time, np, preempt, migrate, | ||
232 | is_priority_boosted(next), | ||
233 | t == NULL, | ||
234 | priority_index(next), | ||
235 | get_priority(next)); | ||
236 | if (t) | ||
237 | TRACE_TASK(t, "waiter boost=%d prio-idx=%u prio=%u\n", | ||
238 | is_priority_boosted(t), | ||
239 | priority_index(t), | ||
240 | get_priority(t)); | ||
241 | } | ||
242 | /* If preempt is set, we should not see the same task again. */ | ||
243 | BUG_ON(preempt && next == prev); | ||
244 | /* Similarly, if preempt is set, then next may not be NULL, | ||
245 | * unless it's a migration. */ | ||
246 | BUG_ON(preempt && !migrate && next == NULL); | ||
247 | } else | ||
248 | /* Only override Linux scheduler if we have a real-time task | ||
249 | * scheduled that needs to continue. | ||
250 | */ | ||
251 | if (exists) | ||
252 | next = prev; | ||
253 | |||
254 | if (next) { | ||
255 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
256 | tsk_rt(next)->completed = 0; | ||
257 | } else { | ||
258 | TRACE("becoming idle at %llu\n", litmus_clock()); | ||
259 | } | ||
260 | |||
261 | pfp->scheduled = next; | ||
262 | sched_state_task_picked(); | ||
263 | raw_spin_unlock(&pfp->slock); | ||
264 | |||
265 | return next; | ||
266 | } | ||
267 | |||
268 | #ifdef CONFIG_LITMUS_LOCKING | ||
269 | |||
270 | /* prev is no longer scheduled --- see if it needs to migrate */ | ||
271 | static void pfp_finish_switch(struct task_struct *prev) | ||
272 | { | ||
273 | pfp_domain_t *to; | ||
274 | |||
275 | if (is_realtime(prev) && | ||
276 | is_running(prev) && | ||
277 | get_partition(prev) != smp_processor_id()) { | ||
278 | TRACE_TASK(prev, "needs to migrate from P%d to P%d\n", | ||
279 | smp_processor_id(), get_partition(prev)); | ||
280 | |||
281 | to = task_pfp(prev); | ||
282 | |||
283 | raw_spin_lock(&to->slock); | ||
284 | |||
285 | TRACE_TASK(prev, "adding to queue on P%d\n", to->cpu); | ||
286 | requeue(prev, to); | ||
287 | if (fp_preemption_needed(&to->ready_queue, to->scheduled)) | ||
288 | preempt(to); | ||
289 | |||
290 | raw_spin_unlock(&to->slock); | ||
291 | |||
292 | } | ||
293 | } | ||
294 | |||
295 | #endif | ||
296 | |||
297 | /* Prepare a task for running in RT mode | ||
298 | */ | ||
299 | static void pfp_task_new(struct task_struct * t, int on_rq, int running) | ||
300 | { | ||
301 | pfp_domain_t* pfp = task_pfp(t); | ||
302 | unsigned long flags; | ||
303 | |||
304 | TRACE_TASK(t, "P-FP: task new, cpu = %d\n", | ||
305 | t->rt_param.task_params.cpu); | ||
306 | |||
307 | /* setup job parameters */ | ||
308 | release_at(t, litmus_clock()); | ||
309 | |||
310 | /* The task should be running in the queue, otherwise signal | ||
311 | * code will try to wake it up with fatal consequences. | ||
312 | */ | ||
313 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
314 | if (running) { | ||
315 | /* there shouldn't be anything else running at the time */ | ||
316 | BUG_ON(pfp->scheduled); | ||
317 | pfp->scheduled = t; | ||
318 | } else { | ||
319 | requeue(t, pfp); | ||
320 | /* maybe we have to reschedule */ | ||
321 | pfp_preempt_check(pfp); | ||
322 | } | ||
323 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
324 | } | ||
325 | |||
326 | static void pfp_task_wake_up(struct task_struct *task) | ||
327 | { | ||
328 | unsigned long flags; | ||
329 | pfp_domain_t* pfp = task_pfp(task); | ||
330 | lt_t now; | ||
331 | |||
332 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
333 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
334 | |||
335 | #ifdef CONFIG_LITMUS_LOCKING | ||
336 | /* Should only be queued when processing a fake-wake up due to a | ||
337 | * migration-related state change. */ | ||
338 | if (unlikely(is_queued(task))) { | ||
339 | TRACE_TASK(task, "WARNING: waking task still queued. Is this right?\n"); | ||
340 | goto out_unlock; | ||
341 | } | ||
342 | #else | ||
343 | BUG_ON(is_queued(task)); | ||
344 | #endif | ||
345 | now = litmus_clock(); | ||
346 | if (is_sporadic(task) && is_tardy(task, now) | ||
347 | #ifdef CONFIG_LITMUS_LOCKING | ||
348 | /* We need to take suspensions because of semaphores into | ||
349 | * account! If a job resumes after being suspended due to acquiring | ||
350 | * a semaphore, it should never be treated as a new job release. | ||
351 | */ | ||
352 | && !is_priority_boosted(task) | ||
353 | #endif | ||
354 | ) { | ||
355 | /* new sporadic release */ | ||
356 | release_at(task, now); | ||
357 | sched_trace_task_release(task); | ||
358 | } | ||
359 | |||
360 | /* Only add to ready queue if it is not the currently-scheduled | ||
361 | * task. This could be the case if a task was woken up concurrently | ||
362 | * on a remote CPU before the executing CPU got around to actually | ||
363 | * de-scheduling the task, i.e., wake_up() raced with schedule() | ||
364 | * and won. Also, don't requeue if it is still queued, which can | ||
365 | * happen under the DPCP due wake-ups racing with migrations. | ||
366 | */ | ||
367 | if (pfp->scheduled != task) { | ||
368 | requeue(task, pfp); | ||
369 | pfp_preempt_check(pfp); | ||
370 | } | ||
371 | |||
372 | #ifdef CONFIG_LITMUS_LOCKING | ||
373 | out_unlock: | ||
374 | #endif | ||
375 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
376 | TRACE_TASK(task, "wake up done\n"); | ||
377 | } | ||
378 | |||
379 | static void pfp_task_block(struct task_struct *t) | ||
380 | { | ||
381 | /* only running tasks can block, thus t is in no queue */ | ||
382 | TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state); | ||
383 | |||
384 | BUG_ON(!is_realtime(t)); | ||
385 | |||
386 | /* If this task blocked normally, it shouldn't be queued. The exception is | ||
387 | * if this is a simulated block()/wakeup() pair from the pull-migration code path. | ||
388 | * This should only happen if the DPCP is being used. | ||
389 | */ | ||
390 | #ifdef CONFIG_LITMUS_LOCKING | ||
391 | if (unlikely(is_queued(t))) | ||
392 | TRACE_TASK(t, "WARNING: blocking task still queued. Is this right?\n"); | ||
393 | #else | ||
394 | BUG_ON(is_queued(t)); | ||
395 | #endif | ||
396 | } | ||
397 | |||
398 | static void pfp_task_exit(struct task_struct * t) | ||
399 | { | ||
400 | unsigned long flags; | ||
401 | pfp_domain_t* pfp = task_pfp(t); | ||
402 | rt_domain_t* dom; | ||
403 | |||
404 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
405 | if (is_queued(t)) { | ||
406 | BUG(); /* This currently doesn't work. */ | ||
407 | /* dequeue */ | ||
408 | dom = task_dom(t); | ||
409 | remove(dom, t); | ||
410 | } | ||
411 | if (pfp->scheduled == t) { | ||
412 | pfp->scheduled = NULL; | ||
413 | preempt(pfp); | ||
414 | } | ||
415 | TRACE_TASK(t, "RIP, now reschedule\n"); | ||
416 | |||
417 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
418 | } | ||
419 | |||
420 | #ifdef CONFIG_LITMUS_LOCKING | ||
421 | |||
422 | #include <litmus/fdso.h> | ||
423 | #include <litmus/srp.h> | ||
424 | |||
425 | static void fp_dequeue(pfp_domain_t* pfp, struct task_struct* t) | ||
426 | { | ||
427 | BUG_ON(pfp->scheduled == t && is_queued(t)); | ||
428 | if (is_queued(t)) | ||
429 | fp_prio_remove(&pfp->ready_queue, t, priority_index(t)); | ||
430 | } | ||
431 | |||
432 | static void fp_set_prio_inh(pfp_domain_t* pfp, struct task_struct* t, | ||
433 | struct task_struct* prio_inh) | ||
434 | { | ||
435 | int requeue; | ||
436 | |||
437 | if (!t || t->rt_param.inh_task == prio_inh) { | ||
438 | /* no update required */ | ||
439 | if (t) | ||
440 | TRACE_TASK(t, "no prio-inh update required\n"); | ||
441 | return; | ||
442 | } | ||
443 | |||
444 | requeue = is_queued(t); | ||
445 | TRACE_TASK(t, "prio-inh: is_queued:%d\n", requeue); | ||
446 | |||
447 | if (requeue) | ||
448 | /* first remove */ | ||
449 | fp_dequeue(pfp, t); | ||
450 | |||
451 | t->rt_param.inh_task = prio_inh; | ||
452 | |||
453 | if (requeue) | ||
454 | /* add again to the right queue */ | ||
455 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); | ||
456 | } | ||
457 | |||
458 | static int effective_agent_priority(int prio) | ||
459 | { | ||
460 | /* make sure agents have higher priority */ | ||
461 | return prio - LITMUS_MAX_PRIORITY; | ||
462 | } | ||
463 | |||
464 | static lt_t prio_point(int eprio) | ||
465 | { | ||
466 | /* make sure we have non-negative prio points */ | ||
467 | return eprio + LITMUS_MAX_PRIORITY; | ||
468 | } | ||
469 | |||
470 | static int prio_from_point(lt_t prio_point) | ||
471 | { | ||
472 | return ((int) prio_point) - LITMUS_MAX_PRIORITY; | ||
473 | } | ||
474 | |||
475 | static void boost_priority(struct task_struct* t, lt_t priority_point) | ||
476 | { | ||
477 | unsigned long flags; | ||
478 | pfp_domain_t* pfp = task_pfp(t); | ||
479 | |||
480 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
481 | |||
482 | |||
483 | TRACE_TASK(t, "priority boosted at %llu\n", litmus_clock()); | ||
484 | |||
485 | tsk_rt(t)->priority_boosted = 1; | ||
486 | /* tie-break by protocol-specific priority point */ | ||
487 | tsk_rt(t)->boost_start_time = priority_point; | ||
488 | |||
489 | /* Priority boosting currently only takes effect for already-scheduled | ||
490 | * tasks. This is sufficient since priority boosting only kicks in as | ||
491 | * part of lock acquisitions. */ | ||
492 | BUG_ON(pfp->scheduled != t); | ||
493 | |||
494 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
495 | } | ||
496 | |||
497 | static void unboost_priority(struct task_struct* t) | ||
498 | { | ||
499 | unsigned long flags; | ||
500 | pfp_domain_t* pfp = task_pfp(t); | ||
501 | lt_t now; | ||
502 | |||
503 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
504 | now = litmus_clock(); | ||
505 | |||
506 | /* assumption: this only happens when the job is scheduled */ | ||
507 | BUG_ON(pfp->scheduled != t); | ||
508 | |||
509 | TRACE_TASK(t, "priority restored at %llu\n", now); | ||
510 | |||
511 | /* priority boosted jobs must be scheduled */ | ||
512 | BUG_ON(pfp->scheduled != t); | ||
513 | |||
514 | tsk_rt(t)->priority_boosted = 0; | ||
515 | tsk_rt(t)->boost_start_time = 0; | ||
516 | |||
517 | /* check if this changes anything */ | ||
518 | if (fp_preemption_needed(&pfp->ready_queue, pfp->scheduled)) | ||
519 | preempt(pfp); | ||
520 | |||
521 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
522 | } | ||
523 | |||
524 | /* ******************** SRP support ************************ */ | ||
525 | |||
526 | static unsigned int pfp_get_srp_prio(struct task_struct* t) | ||
527 | { | ||
528 | return get_priority(t); | ||
529 | } | ||
530 | |||
531 | /* ******************** FMLP support ********************** */ | ||
532 | |||
533 | struct fmlp_semaphore { | ||
534 | struct litmus_lock litmus_lock; | ||
535 | |||
536 | /* current resource holder */ | ||
537 | struct task_struct *owner; | ||
538 | |||
539 | /* FIFO queue of waiting tasks */ | ||
540 | wait_queue_head_t wait; | ||
541 | }; | ||
542 | |||
543 | static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) | ||
544 | { | ||
545 | return container_of(lock, struct fmlp_semaphore, litmus_lock); | ||
546 | } | ||
547 | int pfp_fmlp_lock(struct litmus_lock* l) | ||
548 | { | ||
549 | struct task_struct* t = current; | ||
550 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
551 | wait_queue_t wait; | ||
552 | unsigned long flags; | ||
553 | lt_t time_of_request; | ||
554 | |||
555 | if (!is_realtime(t)) | ||
556 | return -EPERM; | ||
557 | |||
558 | /* prevent nested lock acquisition --- not supported by FMLP */ | ||
559 | if (tsk_rt(t)->num_locks_held || | ||
560 | tsk_rt(t)->num_local_locks_held) | ||
561 | return -EBUSY; | ||
562 | |||
563 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
564 | |||
565 | /* tie-break by this point in time */ | ||
566 | time_of_request = litmus_clock(); | ||
567 | |||
568 | /* Priority-boost ourself *before* we suspend so that | ||
569 | * our priority is boosted when we resume. */ | ||
570 | boost_priority(t, time_of_request); | ||
571 | |||
572 | if (sem->owner) { | ||
573 | /* resource is not free => must suspend and wait */ | ||
574 | |||
575 | init_waitqueue_entry(&wait, t); | ||
576 | |||
577 | /* FIXME: interruptible would be nice some day */ | ||
578 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
579 | |||
580 | __add_wait_queue_tail_exclusive(&sem->wait, &wait); | ||
581 | |||
582 | TS_LOCK_SUSPEND; | ||
583 | |||
584 | /* release lock before sleeping */ | ||
585 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
586 | |||
587 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
588 | * when we wake up; we are guaranteed to have the lock since | ||
589 | * there is only one wake up per release. | ||
590 | */ | ||
591 | |||
592 | schedule(); | ||
593 | |||
594 | TS_LOCK_RESUME; | ||
595 | |||
596 | /* Since we hold the lock, no other task will change | ||
597 | * ->owner. We can thus check it without acquiring the spin | ||
598 | * lock. */ | ||
599 | BUG_ON(sem->owner != t); | ||
600 | } else { | ||
601 | /* it's ours now */ | ||
602 | sem->owner = t; | ||
603 | |||
604 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
605 | } | ||
606 | |||
607 | tsk_rt(t)->num_locks_held++; | ||
608 | |||
609 | return 0; | ||
610 | } | ||
611 | |||
612 | int pfp_fmlp_unlock(struct litmus_lock* l) | ||
613 | { | ||
614 | struct task_struct *t = current, *next; | ||
615 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
616 | unsigned long flags; | ||
617 | int err = 0; | ||
618 | |||
619 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
620 | |||
621 | if (sem->owner != t) { | ||
622 | err = -EINVAL; | ||
623 | goto out; | ||
624 | } | ||
625 | |||
626 | tsk_rt(t)->num_locks_held--; | ||
627 | |||
628 | /* we lose the benefit of priority boosting */ | ||
629 | |||
630 | unboost_priority(t); | ||
631 | |||
632 | /* check if there are jobs waiting for this resource */ | ||
633 | next = __waitqueue_remove_first(&sem->wait); | ||
634 | if (next) { | ||
635 | /* next becomes the resouce holder */ | ||
636 | sem->owner = next; | ||
637 | |||
638 | /* Wake up next. The waiting job is already priority-boosted. */ | ||
639 | wake_up_process(next); | ||
640 | } else | ||
641 | /* resource becomes available */ | ||
642 | sem->owner = NULL; | ||
643 | |||
644 | out: | ||
645 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
646 | return err; | ||
647 | } | ||
648 | |||
649 | int pfp_fmlp_close(struct litmus_lock* l) | ||
650 | { | ||
651 | struct task_struct *t = current; | ||
652 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
653 | unsigned long flags; | ||
654 | |||
655 | int owner; | ||
656 | |||
657 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
658 | |||
659 | owner = sem->owner == t; | ||
660 | |||
661 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
662 | |||
663 | if (owner) | ||
664 | pfp_fmlp_unlock(l); | ||
665 | |||
666 | return 0; | ||
667 | } | ||
668 | |||
669 | void pfp_fmlp_free(struct litmus_lock* lock) | ||
670 | { | ||
671 | kfree(fmlp_from_lock(lock)); | ||
672 | } | ||
673 | |||
674 | static struct litmus_lock_ops pfp_fmlp_lock_ops = { | ||
675 | .close = pfp_fmlp_close, | ||
676 | .lock = pfp_fmlp_lock, | ||
677 | .unlock = pfp_fmlp_unlock, | ||
678 | .deallocate = pfp_fmlp_free, | ||
679 | }; | ||
680 | |||
681 | static struct litmus_lock* pfp_new_fmlp(void) | ||
682 | { | ||
683 | struct fmlp_semaphore* sem; | ||
684 | |||
685 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
686 | if (!sem) | ||
687 | return NULL; | ||
688 | |||
689 | sem->owner = NULL; | ||
690 | init_waitqueue_head(&sem->wait); | ||
691 | sem->litmus_lock.ops = &pfp_fmlp_lock_ops; | ||
692 | |||
693 | return &sem->litmus_lock; | ||
694 | } | ||
695 | |||
696 | /* ******************** MPCP support ********************** */ | ||
697 | |||
698 | struct mpcp_semaphore { | ||
699 | struct litmus_lock litmus_lock; | ||
700 | |||
701 | /* current resource holder */ | ||
702 | struct task_struct *owner; | ||
703 | |||
704 | /* priority queue of waiting tasks */ | ||
705 | wait_queue_head_t wait; | ||
706 | |||
707 | /* priority ceiling per cpu */ | ||
708 | unsigned int prio_ceiling[NR_CPUS]; | ||
709 | |||
710 | /* should jobs spin "virtually" for this resource? */ | ||
711 | int vspin; | ||
712 | }; | ||
713 | |||
714 | #define OMEGA_CEILING UINT_MAX | ||
715 | |||
716 | /* Since jobs spin "virtually" while waiting to acquire a lock, | ||
717 | * they first must aquire a local per-cpu resource. | ||
718 | */ | ||
719 | static DEFINE_PER_CPU(wait_queue_head_t, mpcpvs_vspin_wait); | ||
720 | static DEFINE_PER_CPU(struct task_struct*, mpcpvs_vspin); | ||
721 | |||
722 | /* called with preemptions off <=> no local modifications */ | ||
723 | static void mpcp_vspin_enter(void) | ||
724 | { | ||
725 | struct task_struct* t = current; | ||
726 | |||
727 | while (1) { | ||
728 | if (__get_cpu_var(mpcpvs_vspin) == NULL) { | ||
729 | /* good, we get to issue our request */ | ||
730 | __get_cpu_var(mpcpvs_vspin) = t; | ||
731 | break; | ||
732 | } else { | ||
733 | /* some job is spinning => enqueue in request queue */ | ||
734 | prio_wait_queue_t wait; | ||
735 | wait_queue_head_t* vspin = &__get_cpu_var(mpcpvs_vspin_wait); | ||
736 | unsigned long flags; | ||
737 | |||
738 | /* ordered by regular priority */ | ||
739 | init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t))); | ||
740 | |||
741 | spin_lock_irqsave(&vspin->lock, flags); | ||
742 | |||
743 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
744 | |||
745 | __add_wait_queue_prio_exclusive(vspin, &wait); | ||
746 | |||
747 | spin_unlock_irqrestore(&vspin->lock, flags); | ||
748 | |||
749 | TS_LOCK_SUSPEND; | ||
750 | |||
751 | preempt_enable_no_resched(); | ||
752 | |||
753 | schedule(); | ||
754 | |||
755 | preempt_disable(); | ||
756 | |||
757 | TS_LOCK_RESUME; | ||
758 | /* Recheck if we got it --- some higher-priority process might | ||
759 | * have swooped in. */ | ||
760 | } | ||
761 | } | ||
762 | /* ok, now it is ours */ | ||
763 | } | ||
764 | |||
765 | /* called with preemptions off */ | ||
766 | static void mpcp_vspin_exit(void) | ||
767 | { | ||
768 | struct task_struct* t = current, *next; | ||
769 | unsigned long flags; | ||
770 | wait_queue_head_t* vspin = &__get_cpu_var(mpcpvs_vspin_wait); | ||
771 | |||
772 | BUG_ON(__get_cpu_var(mpcpvs_vspin) != t); | ||
773 | |||
774 | /* no spinning job */ | ||
775 | __get_cpu_var(mpcpvs_vspin) = NULL; | ||
776 | |||
777 | /* see if anyone is waiting for us to stop "spinning" */ | ||
778 | spin_lock_irqsave(&vspin->lock, flags); | ||
779 | next = __waitqueue_remove_first(vspin); | ||
780 | |||
781 | if (next) | ||
782 | wake_up_process(next); | ||
783 | |||
784 | spin_unlock_irqrestore(&vspin->lock, flags); | ||
785 | } | ||
786 | |||
787 | static inline struct mpcp_semaphore* mpcp_from_lock(struct litmus_lock* lock) | ||
788 | { | ||
789 | return container_of(lock, struct mpcp_semaphore, litmus_lock); | ||
790 | } | ||
791 | |||
792 | int pfp_mpcp_lock(struct litmus_lock* l) | ||
793 | { | ||
794 | struct task_struct* t = current; | ||
795 | struct mpcp_semaphore *sem = mpcp_from_lock(l); | ||
796 | prio_wait_queue_t wait; | ||
797 | unsigned long flags; | ||
798 | |||
799 | if (!is_realtime(t)) | ||
800 | return -EPERM; | ||
801 | |||
802 | /* prevent nested lock acquisition */ | ||
803 | if (tsk_rt(t)->num_locks_held || | ||
804 | tsk_rt(t)->num_local_locks_held) | ||
805 | return -EBUSY; | ||
806 | |||
807 | preempt_disable(); | ||
808 | |||
809 | if (sem->vspin) | ||
810 | mpcp_vspin_enter(); | ||
811 | |||
812 | /* Priority-boost ourself *before* we suspend so that | ||
813 | * our priority is boosted when we resume. Use the priority | ||
814 | * ceiling for the local partition. */ | ||
815 | boost_priority(t, sem->prio_ceiling[get_partition(t)]); | ||
816 | |||
817 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
818 | |||
819 | preempt_enable_no_resched(); | ||
820 | |||
821 | if (sem->owner) { | ||
822 | /* resource is not free => must suspend and wait */ | ||
823 | |||
824 | /* ordered by regular priority */ | ||
825 | init_prio_waitqueue_entry(&wait, t, prio_point(get_priority(t))); | ||
826 | |||
827 | /* FIXME: interruptible would be nice some day */ | ||
828 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
829 | |||
830 | __add_wait_queue_prio_exclusive(&sem->wait, &wait); | ||
831 | |||
832 | TS_LOCK_SUSPEND; | ||
833 | |||
834 | /* release lock before sleeping */ | ||
835 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
836 | |||
837 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
838 | * when we wake up; we are guaranteed to have the lock since | ||
839 | * there is only one wake up per release. | ||
840 | */ | ||
841 | |||
842 | schedule(); | ||
843 | |||
844 | TS_LOCK_RESUME; | ||
845 | |||
846 | /* Since we hold the lock, no other task will change | ||
847 | * ->owner. We can thus check it without acquiring the spin | ||
848 | * lock. */ | ||
849 | BUG_ON(sem->owner != t); | ||
850 | } else { | ||
851 | /* it's ours now */ | ||
852 | sem->owner = t; | ||
853 | |||
854 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
855 | } | ||
856 | |||
857 | tsk_rt(t)->num_locks_held++; | ||
858 | |||
859 | return 0; | ||
860 | } | ||
861 | |||
862 | int pfp_mpcp_unlock(struct litmus_lock* l) | ||
863 | { | ||
864 | struct task_struct *t = current, *next; | ||
865 | struct mpcp_semaphore *sem = mpcp_from_lock(l); | ||
866 | unsigned long flags; | ||
867 | int err = 0; | ||
868 | |||
869 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
870 | |||
871 | if (sem->owner != t) { | ||
872 | err = -EINVAL; | ||
873 | goto out; | ||
874 | } | ||
875 | |||
876 | |||
877 | tsk_rt(t)->num_locks_held--; | ||
878 | |||
879 | /* we lose the benefit of priority boosting */ | ||
880 | |||
881 | unboost_priority(t); | ||
882 | |||
883 | /* check if there are jobs waiting for this resource */ | ||
884 | next = __waitqueue_remove_first(&sem->wait); | ||
885 | if (next) { | ||
886 | /* next becomes the resouce holder */ | ||
887 | sem->owner = next; | ||
888 | |||
889 | /* Wake up next. The waiting job is already priority-boosted. */ | ||
890 | wake_up_process(next); | ||
891 | } else | ||
892 | /* resource becomes available */ | ||
893 | sem->owner = NULL; | ||
894 | |||
895 | out: | ||
896 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
897 | |||
898 | if (sem->vspin && err == 0) { | ||
899 | preempt_disable(); | ||
900 | mpcp_vspin_exit(); | ||
901 | preempt_enable(); | ||
902 | } | ||
903 | |||
904 | return err; | ||
905 | } | ||
906 | |||
907 | int pfp_mpcp_open(struct litmus_lock* l, void* config) | ||
908 | { | ||
909 | struct task_struct *t = current; | ||
910 | struct mpcp_semaphore *sem = mpcp_from_lock(l); | ||
911 | int cpu, local_cpu; | ||
912 | unsigned long flags; | ||
913 | |||
914 | if (!is_realtime(t)) | ||
915 | /* we need to know the real-time priority */ | ||
916 | return -EPERM; | ||
917 | |||
918 | local_cpu = get_partition(t); | ||
919 | |||
920 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
921 | |||
922 | for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
923 | if (cpu != local_cpu) | ||
924 | { | ||
925 | sem->prio_ceiling[cpu] = min(sem->prio_ceiling[cpu], | ||
926 | get_priority(t)); | ||
927 | TRACE_CUR("priority ceiling for sem %p is now %d on cpu %d\n", | ||
928 | sem, sem->prio_ceiling[cpu], cpu); | ||
929 | } | ||
930 | |||
931 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
932 | |||
933 | return 0; | ||
934 | } | ||
935 | |||
936 | int pfp_mpcp_close(struct litmus_lock* l) | ||
937 | { | ||
938 | struct task_struct *t = current; | ||
939 | struct mpcp_semaphore *sem = mpcp_from_lock(l); | ||
940 | unsigned long flags; | ||
941 | |||
942 | int owner; | ||
943 | |||
944 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
945 | |||
946 | owner = sem->owner == t; | ||
947 | |||
948 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
949 | |||
950 | if (owner) | ||
951 | pfp_mpcp_unlock(l); | ||
952 | |||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | void pfp_mpcp_free(struct litmus_lock* lock) | ||
957 | { | ||
958 | kfree(mpcp_from_lock(lock)); | ||
959 | } | ||
960 | |||
961 | static struct litmus_lock_ops pfp_mpcp_lock_ops = { | ||
962 | .close = pfp_mpcp_close, | ||
963 | .lock = pfp_mpcp_lock, | ||
964 | .open = pfp_mpcp_open, | ||
965 | .unlock = pfp_mpcp_unlock, | ||
966 | .deallocate = pfp_mpcp_free, | ||
967 | }; | ||
968 | |||
969 | static struct litmus_lock* pfp_new_mpcp(int vspin) | ||
970 | { | ||
971 | struct mpcp_semaphore* sem; | ||
972 | int cpu; | ||
973 | |||
974 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
975 | if (!sem) | ||
976 | return NULL; | ||
977 | |||
978 | sem->owner = NULL; | ||
979 | init_waitqueue_head(&sem->wait); | ||
980 | sem->litmus_lock.ops = &pfp_mpcp_lock_ops; | ||
981 | |||
982 | for (cpu = 0; cpu < NR_CPUS; cpu++) | ||
983 | sem->prio_ceiling[cpu] = OMEGA_CEILING; | ||
984 | |||
985 | /* mark as virtual spinning */ | ||
986 | sem->vspin = vspin; | ||
987 | |||
988 | return &sem->litmus_lock; | ||
989 | } | ||
990 | |||
991 | |||
992 | /* ******************** PCP support ********************** */ | ||
993 | |||
994 | |||
995 | struct pcp_semaphore { | ||
996 | struct litmus_lock litmus_lock; | ||
997 | |||
998 | struct list_head ceiling; | ||
999 | |||
1000 | /* current resource holder */ | ||
1001 | struct task_struct *owner; | ||
1002 | |||
1003 | /* priority ceiling --- can be negative due to DPCP support */ | ||
1004 | int prio_ceiling; | ||
1005 | |||
1006 | /* on which processor is this PCP semaphore allocated? */ | ||
1007 | int on_cpu; | ||
1008 | }; | ||
1009 | |||
1010 | static inline struct pcp_semaphore* pcp_from_lock(struct litmus_lock* lock) | ||
1011 | { | ||
1012 | return container_of(lock, struct pcp_semaphore, litmus_lock); | ||
1013 | } | ||
1014 | |||
1015 | |||
1016 | struct pcp_state { | ||
1017 | struct list_head system_ceiling; | ||
1018 | |||
1019 | /* highest-priority waiting task */ | ||
1020 | struct task_struct* hp_waiter; | ||
1021 | |||
1022 | /* list of jobs waiting to get past the system ceiling */ | ||
1023 | wait_queue_head_t ceiling_blocked; | ||
1024 | }; | ||
1025 | |||
1026 | static void pcp_init_state(struct pcp_state* s) | ||
1027 | { | ||
1028 | INIT_LIST_HEAD(&s->system_ceiling); | ||
1029 | s->hp_waiter = NULL; | ||
1030 | init_waitqueue_head(&s->ceiling_blocked); | ||
1031 | } | ||
1032 | |||
1033 | static DEFINE_PER_CPU(struct pcp_state, pcp_state); | ||
1034 | |||
1035 | /* assumes preemptions are off */ | ||
1036 | static struct pcp_semaphore* pcp_get_ceiling(void) | ||
1037 | { | ||
1038 | struct list_head* top = __get_cpu_var(pcp_state).system_ceiling.next; | ||
1039 | |||
1040 | if (top) | ||
1041 | return list_entry(top, struct pcp_semaphore, ceiling); | ||
1042 | else | ||
1043 | return NULL; | ||
1044 | } | ||
1045 | |||
1046 | /* assumes preempt off */ | ||
1047 | static void pcp_add_ceiling(struct pcp_semaphore* sem) | ||
1048 | { | ||
1049 | struct list_head *pos; | ||
1050 | struct list_head *in_use = &__get_cpu_var(pcp_state).system_ceiling; | ||
1051 | struct pcp_semaphore* held; | ||
1052 | |||
1053 | BUG_ON(sem->on_cpu != smp_processor_id()); | ||
1054 | BUG_ON(in_list(&sem->ceiling)); | ||
1055 | |||
1056 | list_for_each(pos, in_use) { | ||
1057 | held = list_entry(pos, struct pcp_semaphore, ceiling); | ||
1058 | if (held->prio_ceiling >= sem->prio_ceiling) { | ||
1059 | __list_add(&sem->ceiling, pos->prev, pos); | ||
1060 | return; | ||
1061 | } | ||
1062 | } | ||
1063 | |||
1064 | /* we hit the end of the list */ | ||
1065 | |||
1066 | list_add_tail(&sem->ceiling, in_use); | ||
1067 | } | ||
1068 | |||
1069 | /* assumes preempt off */ | ||
1070 | static int pcp_exceeds_ceiling(struct pcp_semaphore* ceiling, | ||
1071 | struct task_struct* task, | ||
1072 | int effective_prio) | ||
1073 | { | ||
1074 | return ceiling == NULL || | ||
1075 | ceiling->prio_ceiling > effective_prio || | ||
1076 | ceiling->owner == task; | ||
1077 | } | ||
1078 | |||
1079 | /* assumes preempt off */ | ||
1080 | static void pcp_priority_inheritance(void) | ||
1081 | { | ||
1082 | unsigned long flags; | ||
1083 | pfp_domain_t* pfp = local_pfp; | ||
1084 | |||
1085 | struct pcp_semaphore* ceiling = pcp_get_ceiling(); | ||
1086 | struct task_struct *blocker, *blocked; | ||
1087 | |||
1088 | blocker = ceiling ? ceiling->owner : NULL; | ||
1089 | blocked = __get_cpu_var(pcp_state).hp_waiter; | ||
1090 | |||
1091 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
1092 | |||
1093 | /* Current is no longer inheriting anything by default. This should be | ||
1094 | * the currently scheduled job, and hence not currently queued. */ | ||
1095 | BUG_ON(current != pfp->scheduled); | ||
1096 | |||
1097 | fp_set_prio_inh(pfp, current, NULL); | ||
1098 | fp_set_prio_inh(pfp, blocked, NULL); | ||
1099 | fp_set_prio_inh(pfp, blocker, NULL); | ||
1100 | |||
1101 | |||
1102 | /* Let blocking job inherit priority of blocked job, if required. */ | ||
1103 | if (blocker && blocked && | ||
1104 | fp_higher_prio(blocked, blocker)) { | ||
1105 | TRACE_TASK(blocker, "PCP inherits from %s/%d (prio %u -> %u) \n", | ||
1106 | blocked->comm, blocked->pid, | ||
1107 | get_priority(blocker), get_priority(blocked)); | ||
1108 | fp_set_prio_inh(pfp, blocker, blocked); | ||
1109 | } | ||
1110 | |||
1111 | /* Check if anything changed. If the blocked job is current, then it is | ||
1112 | * just blocking and hence is going to call the scheduler anyway. */ | ||
1113 | if (blocked != current && | ||
1114 | fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) | ||
1115 | preempt(pfp); | ||
1116 | |||
1117 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
1118 | } | ||
1119 | |||
1120 | /* called with preemptions off */ | ||
1121 | static void pcp_raise_ceiling(struct pcp_semaphore* sem, | ||
1122 | int effective_prio) | ||
1123 | { | ||
1124 | struct task_struct* t = current; | ||
1125 | struct pcp_semaphore* ceiling; | ||
1126 | prio_wait_queue_t wait; | ||
1127 | unsigned int waiting_higher_prio; | ||
1128 | |||
1129 | do { | ||
1130 | ceiling = pcp_get_ceiling(); | ||
1131 | if (pcp_exceeds_ceiling(ceiling, t, effective_prio)) | ||
1132 | break; | ||
1133 | |||
1134 | TRACE_CUR("PCP ceiling-blocked, wanted sem %p, but %s/%d has the ceiling \n", | ||
1135 | sem, ceiling->owner->comm, ceiling->owner->pid); | ||
1136 | |||
1137 | /* we need to wait until the ceiling is lowered */ | ||
1138 | |||
1139 | /* enqueue in priority order */ | ||
1140 | init_prio_waitqueue_entry(&wait, t, prio_point(effective_prio)); | ||
1141 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1142 | waiting_higher_prio = add_wait_queue_prio_exclusive( | ||
1143 | &__get_cpu_var(pcp_state).ceiling_blocked, &wait); | ||
1144 | |||
1145 | if (waiting_higher_prio == 0) { | ||
1146 | TRACE_CUR("PCP new highest-prio waiter => prio inheritance\n"); | ||
1147 | |||
1148 | /* we are the new highest-priority waiting job | ||
1149 | * => update inheritance */ | ||
1150 | __get_cpu_var(pcp_state).hp_waiter = t; | ||
1151 | pcp_priority_inheritance(); | ||
1152 | } | ||
1153 | |||
1154 | TS_LOCK_SUSPEND; | ||
1155 | |||
1156 | preempt_enable_no_resched(); | ||
1157 | schedule(); | ||
1158 | preempt_disable(); | ||
1159 | |||
1160 | /* pcp_resume_unblocked() removed us from wait queue */ | ||
1161 | |||
1162 | TS_LOCK_RESUME; | ||
1163 | } while(1); | ||
1164 | |||
1165 | TRACE_CUR("PCP got the ceiling and sem %p\n", sem); | ||
1166 | |||
1167 | /* We are good to go. The semaphore should be available. */ | ||
1168 | BUG_ON(sem->owner != NULL); | ||
1169 | |||
1170 | sem->owner = t; | ||
1171 | |||
1172 | pcp_add_ceiling(sem); | ||
1173 | } | ||
1174 | |||
1175 | static void pcp_resume_unblocked(void) | ||
1176 | { | ||
1177 | wait_queue_head_t *blocked = &__get_cpu_var(pcp_state).ceiling_blocked; | ||
1178 | unsigned long flags; | ||
1179 | prio_wait_queue_t* q; | ||
1180 | struct task_struct* t = NULL; | ||
1181 | |||
1182 | struct pcp_semaphore* ceiling = pcp_get_ceiling(); | ||
1183 | |||
1184 | spin_lock_irqsave(&blocked->lock, flags); | ||
1185 | |||
1186 | while (waitqueue_active(blocked)) { | ||
1187 | /* check first == highest-priority waiting job */ | ||
1188 | q = list_entry(blocked->task_list.next, | ||
1189 | prio_wait_queue_t, wq.task_list); | ||
1190 | t = (struct task_struct*) q->wq.private; | ||
1191 | |||
1192 | /* can it proceed now? => let it go */ | ||
1193 | if (pcp_exceeds_ceiling(ceiling, t, | ||
1194 | prio_from_point(q->priority))) { | ||
1195 | __remove_wait_queue(blocked, &q->wq); | ||
1196 | wake_up_process(t); | ||
1197 | } else { | ||
1198 | /* We are done. Update highest-priority waiter. */ | ||
1199 | __get_cpu_var(pcp_state).hp_waiter = t; | ||
1200 | goto out; | ||
1201 | } | ||
1202 | } | ||
1203 | /* If we get here, then there are no more waiting | ||
1204 | * jobs. */ | ||
1205 | __get_cpu_var(pcp_state).hp_waiter = NULL; | ||
1206 | out: | ||
1207 | spin_unlock_irqrestore(&blocked->lock, flags); | ||
1208 | } | ||
1209 | |||
1210 | /* assumes preempt off */ | ||
1211 | static void pcp_lower_ceiling(struct pcp_semaphore* sem) | ||
1212 | { | ||
1213 | BUG_ON(!in_list(&sem->ceiling)); | ||
1214 | BUG_ON(sem->owner != current); | ||
1215 | BUG_ON(sem->on_cpu != smp_processor_id()); | ||
1216 | |||
1217 | /* remove from ceiling list */ | ||
1218 | list_del(&sem->ceiling); | ||
1219 | |||
1220 | /* release */ | ||
1221 | sem->owner = NULL; | ||
1222 | |||
1223 | TRACE_CUR("PCP released sem %p\n", sem); | ||
1224 | |||
1225 | pcp_priority_inheritance(); | ||
1226 | |||
1227 | /* Wake up all ceiling-blocked jobs that now pass the ceiling. */ | ||
1228 | pcp_resume_unblocked(); | ||
1229 | } | ||
1230 | |||
1231 | static void pcp_update_prio_ceiling(struct pcp_semaphore* sem, | ||
1232 | int effective_prio) | ||
1233 | { | ||
1234 | /* This needs to be synchronized on something. | ||
1235 | * Might as well use waitqueue lock for the processor. | ||
1236 | * We assume this happens only before the task set starts execution, | ||
1237 | * (i.e., during initialization), but it may happen on multiple processors | ||
1238 | * at the same time. | ||
1239 | */ | ||
1240 | unsigned long flags; | ||
1241 | |||
1242 | struct pcp_state* s = &per_cpu(pcp_state, sem->on_cpu); | ||
1243 | |||
1244 | spin_lock_irqsave(&s->ceiling_blocked.lock, flags); | ||
1245 | |||
1246 | sem->prio_ceiling = min(sem->prio_ceiling, effective_prio); | ||
1247 | |||
1248 | spin_unlock_irqrestore(&s->ceiling_blocked.lock, flags); | ||
1249 | } | ||
1250 | |||
1251 | static void pcp_init_semaphore(struct pcp_semaphore* sem, int cpu) | ||
1252 | { | ||
1253 | sem->owner = NULL; | ||
1254 | INIT_LIST_HEAD(&sem->ceiling); | ||
1255 | sem->prio_ceiling = INT_MAX; | ||
1256 | sem->on_cpu = cpu; | ||
1257 | } | ||
1258 | |||
1259 | int pfp_pcp_lock(struct litmus_lock* l) | ||
1260 | { | ||
1261 | struct task_struct* t = current; | ||
1262 | struct pcp_semaphore *sem = pcp_from_lock(l); | ||
1263 | |||
1264 | int eprio = effective_agent_priority(get_priority(t)); | ||
1265 | int from = get_partition(t); | ||
1266 | int to = sem->on_cpu; | ||
1267 | |||
1268 | if (!is_realtime(t) || from != to) | ||
1269 | return -EPERM; | ||
1270 | |||
1271 | /* prevent nested lock acquisition in global critical section */ | ||
1272 | if (tsk_rt(t)->num_locks_held) | ||
1273 | return -EBUSY; | ||
1274 | |||
1275 | preempt_disable(); | ||
1276 | |||
1277 | pcp_raise_ceiling(sem, eprio); | ||
1278 | |||
1279 | preempt_enable(); | ||
1280 | |||
1281 | tsk_rt(t)->num_local_locks_held++; | ||
1282 | |||
1283 | return 0; | ||
1284 | } | ||
1285 | |||
1286 | int pfp_pcp_unlock(struct litmus_lock* l) | ||
1287 | { | ||
1288 | struct task_struct *t = current; | ||
1289 | struct pcp_semaphore *sem = pcp_from_lock(l); | ||
1290 | |||
1291 | int err = 0; | ||
1292 | |||
1293 | preempt_disable(); | ||
1294 | |||
1295 | if (sem->on_cpu != smp_processor_id() || sem->owner != t) { | ||
1296 | err = -EINVAL; | ||
1297 | goto out; | ||
1298 | } | ||
1299 | |||
1300 | tsk_rt(t)->num_local_locks_held--; | ||
1301 | |||
1302 | /* give it back */ | ||
1303 | pcp_lower_ceiling(sem); | ||
1304 | |||
1305 | out: | ||
1306 | preempt_enable(); | ||
1307 | |||
1308 | return err; | ||
1309 | } | ||
1310 | |||
1311 | int pfp_pcp_open(struct litmus_lock* l, void* __user config) | ||
1312 | { | ||
1313 | struct task_struct *t = current; | ||
1314 | struct pcp_semaphore *sem = pcp_from_lock(l); | ||
1315 | |||
1316 | int cpu, eprio; | ||
1317 | |||
1318 | if (!is_realtime(t)) | ||
1319 | /* we need to know the real-time priority */ | ||
1320 | return -EPERM; | ||
1321 | |||
1322 | if (!config) | ||
1323 | cpu = get_partition(t); | ||
1324 | else if (get_user(cpu, (int*) config)) | ||
1325 | return -EFAULT; | ||
1326 | |||
1327 | /* make sure the resource location matches */ | ||
1328 | if (cpu != sem->on_cpu) | ||
1329 | return -EINVAL; | ||
1330 | |||
1331 | eprio = effective_agent_priority(get_priority(t)); | ||
1332 | |||
1333 | pcp_update_prio_ceiling(sem, eprio); | ||
1334 | |||
1335 | return 0; | ||
1336 | } | ||
1337 | |||
1338 | int pfp_pcp_close(struct litmus_lock* l) | ||
1339 | { | ||
1340 | struct task_struct *t = current; | ||
1341 | struct pcp_semaphore *sem = pcp_from_lock(l); | ||
1342 | |||
1343 | int owner = 0; | ||
1344 | |||
1345 | preempt_disable(); | ||
1346 | |||
1347 | if (sem->on_cpu == smp_processor_id()) | ||
1348 | owner = sem->owner == t; | ||
1349 | |||
1350 | preempt_enable(); | ||
1351 | |||
1352 | if (owner) | ||
1353 | pfp_pcp_unlock(l); | ||
1354 | |||
1355 | return 0; | ||
1356 | } | ||
1357 | |||
1358 | void pfp_pcp_free(struct litmus_lock* lock) | ||
1359 | { | ||
1360 | kfree(pcp_from_lock(lock)); | ||
1361 | } | ||
1362 | |||
1363 | |||
1364 | static struct litmus_lock_ops pfp_pcp_lock_ops = { | ||
1365 | .close = pfp_pcp_close, | ||
1366 | .lock = pfp_pcp_lock, | ||
1367 | .open = pfp_pcp_open, | ||
1368 | .unlock = pfp_pcp_unlock, | ||
1369 | .deallocate = pfp_pcp_free, | ||
1370 | }; | ||
1371 | |||
1372 | |||
1373 | static struct litmus_lock* pfp_new_pcp(int on_cpu) | ||
1374 | { | ||
1375 | struct pcp_semaphore* sem; | ||
1376 | |||
1377 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1378 | if (!sem) | ||
1379 | return NULL; | ||
1380 | |||
1381 | sem->litmus_lock.ops = &pfp_pcp_lock_ops; | ||
1382 | pcp_init_semaphore(sem, on_cpu); | ||
1383 | |||
1384 | return &sem->litmus_lock; | ||
1385 | } | ||
1386 | |||
1387 | /* ******************** DPCP support ********************** */ | ||
1388 | |||
1389 | struct dpcp_semaphore { | ||
1390 | struct litmus_lock litmus_lock; | ||
1391 | struct pcp_semaphore pcp; | ||
1392 | int owner_cpu; | ||
1393 | }; | ||
1394 | |||
1395 | static inline struct dpcp_semaphore* dpcp_from_lock(struct litmus_lock* lock) | ||
1396 | { | ||
1397 | return container_of(lock, struct dpcp_semaphore, litmus_lock); | ||
1398 | } | ||
1399 | |||
1400 | /* called with preemptions disabled */ | ||
1401 | static void pfp_migrate_to(int target_cpu) | ||
1402 | { | ||
1403 | struct task_struct* t = current; | ||
1404 | pfp_domain_t *from; | ||
1405 | |||
1406 | if (get_partition(t) == target_cpu) | ||
1407 | return; | ||
1408 | |||
1409 | /* make sure target_cpu makes sense */ | ||
1410 | BUG_ON(!cpu_online(target_cpu)); | ||
1411 | |||
1412 | local_irq_disable(); | ||
1413 | |||
1414 | /* scheduled task should not be in any ready or release queue */ | ||
1415 | BUG_ON(is_queued(t)); | ||
1416 | |||
1417 | /* lock both pfp domains in order of address */ | ||
1418 | from = task_pfp(t); | ||
1419 | |||
1420 | raw_spin_lock(&from->slock); | ||
1421 | |||
1422 | /* switch partitions */ | ||
1423 | tsk_rt(t)->task_params.cpu = target_cpu; | ||
1424 | |||
1425 | raw_spin_unlock(&from->slock); | ||
1426 | |||
1427 | /* Don't trace scheduler costs as part of | ||
1428 | * locking overhead. Scheduling costs are accounted for | ||
1429 | * explicitly. */ | ||
1430 | TS_LOCK_SUSPEND; | ||
1431 | |||
1432 | local_irq_enable(); | ||
1433 | preempt_enable_no_resched(); | ||
1434 | |||
1435 | /* deschedule to be migrated */ | ||
1436 | schedule(); | ||
1437 | |||
1438 | /* we are now on the target processor */ | ||
1439 | preempt_disable(); | ||
1440 | |||
1441 | /* start recording costs again */ | ||
1442 | TS_LOCK_RESUME; | ||
1443 | |||
1444 | BUG_ON(smp_processor_id() != target_cpu); | ||
1445 | } | ||
1446 | |||
1447 | int pfp_dpcp_lock(struct litmus_lock* l) | ||
1448 | { | ||
1449 | struct task_struct* t = current; | ||
1450 | struct dpcp_semaphore *sem = dpcp_from_lock(l); | ||
1451 | int eprio = effective_agent_priority(get_priority(t)); | ||
1452 | int from = get_partition(t); | ||
1453 | int to = sem->pcp.on_cpu; | ||
1454 | |||
1455 | if (!is_realtime(t)) | ||
1456 | return -EPERM; | ||
1457 | |||
1458 | /* prevent nested lock accquisition */ | ||
1459 | if (tsk_rt(t)->num_locks_held || | ||
1460 | tsk_rt(t)->num_local_locks_held) | ||
1461 | return -EBUSY; | ||
1462 | |||
1463 | preempt_disable(); | ||
1464 | |||
1465 | /* Priority-boost ourself *before* we suspend so that | ||
1466 | * our priority is boosted when we resume. */ | ||
1467 | |||
1468 | boost_priority(t, get_priority(t)); | ||
1469 | |||
1470 | pfp_migrate_to(to); | ||
1471 | |||
1472 | pcp_raise_ceiling(&sem->pcp, eprio); | ||
1473 | |||
1474 | /* yep, we got it => execute request */ | ||
1475 | sem->owner_cpu = from; | ||
1476 | |||
1477 | preempt_enable(); | ||
1478 | |||
1479 | tsk_rt(t)->num_locks_held++; | ||
1480 | |||
1481 | return 0; | ||
1482 | } | ||
1483 | |||
1484 | int pfp_dpcp_unlock(struct litmus_lock* l) | ||
1485 | { | ||
1486 | struct task_struct *t = current; | ||
1487 | struct dpcp_semaphore *sem = dpcp_from_lock(l); | ||
1488 | int err = 0; | ||
1489 | int home; | ||
1490 | |||
1491 | preempt_disable(); | ||
1492 | |||
1493 | if (sem->pcp.on_cpu != smp_processor_id() || sem->pcp.owner != t) { | ||
1494 | err = -EINVAL; | ||
1495 | goto out; | ||
1496 | } | ||
1497 | |||
1498 | tsk_rt(t)->num_locks_held--; | ||
1499 | |||
1500 | home = sem->owner_cpu; | ||
1501 | |||
1502 | /* give it back */ | ||
1503 | pcp_lower_ceiling(&sem->pcp); | ||
1504 | |||
1505 | /* we lose the benefit of priority boosting */ | ||
1506 | unboost_priority(t); | ||
1507 | |||
1508 | pfp_migrate_to(home); | ||
1509 | |||
1510 | out: | ||
1511 | preempt_enable(); | ||
1512 | |||
1513 | return err; | ||
1514 | } | ||
1515 | |||
1516 | int pfp_dpcp_open(struct litmus_lock* l, void* __user config) | ||
1517 | { | ||
1518 | struct task_struct *t = current; | ||
1519 | struct dpcp_semaphore *sem = dpcp_from_lock(l); | ||
1520 | int cpu, eprio; | ||
1521 | |||
1522 | if (!is_realtime(t)) | ||
1523 | /* we need to know the real-time priority */ | ||
1524 | return -EPERM; | ||
1525 | |||
1526 | if (get_user(cpu, (int*) config)) | ||
1527 | return -EFAULT; | ||
1528 | |||
1529 | /* make sure the resource location matches */ | ||
1530 | if (cpu != sem->pcp.on_cpu) | ||
1531 | return -EINVAL; | ||
1532 | |||
1533 | eprio = effective_agent_priority(get_priority(t)); | ||
1534 | |||
1535 | pcp_update_prio_ceiling(&sem->pcp, eprio); | ||
1536 | |||
1537 | return 0; | ||
1538 | } | ||
1539 | |||
1540 | int pfp_dpcp_close(struct litmus_lock* l) | ||
1541 | { | ||
1542 | struct task_struct *t = current; | ||
1543 | struct dpcp_semaphore *sem = dpcp_from_lock(l); | ||
1544 | int owner = 0; | ||
1545 | |||
1546 | preempt_disable(); | ||
1547 | |||
1548 | if (sem->pcp.on_cpu == smp_processor_id()) | ||
1549 | owner = sem->pcp.owner == t; | ||
1550 | |||
1551 | preempt_enable(); | ||
1552 | |||
1553 | if (owner) | ||
1554 | pfp_dpcp_unlock(l); | ||
1555 | |||
1556 | return 0; | ||
1557 | } | ||
1558 | |||
1559 | void pfp_dpcp_free(struct litmus_lock* lock) | ||
1560 | { | ||
1561 | kfree(dpcp_from_lock(lock)); | ||
1562 | } | ||
1563 | |||
1564 | static struct litmus_lock_ops pfp_dpcp_lock_ops = { | ||
1565 | .close = pfp_dpcp_close, | ||
1566 | .lock = pfp_dpcp_lock, | ||
1567 | .open = pfp_dpcp_open, | ||
1568 | .unlock = pfp_dpcp_unlock, | ||
1569 | .deallocate = pfp_dpcp_free, | ||
1570 | }; | ||
1571 | |||
1572 | static struct litmus_lock* pfp_new_dpcp(int on_cpu) | ||
1573 | { | ||
1574 | struct dpcp_semaphore* sem; | ||
1575 | |||
1576 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1577 | if (!sem) | ||
1578 | return NULL; | ||
1579 | |||
1580 | sem->litmus_lock.ops = &pfp_dpcp_lock_ops; | ||
1581 | sem->owner_cpu = NO_CPU; | ||
1582 | pcp_init_semaphore(&sem->pcp, on_cpu); | ||
1583 | |||
1584 | return &sem->litmus_lock; | ||
1585 | } | ||
1586 | |||
1587 | |||
1588 | /* **** lock constructor **** */ | ||
1589 | |||
1590 | |||
1591 | static long pfp_allocate_lock(struct litmus_lock **lock, int type, | ||
1592 | void* __user config) | ||
1593 | { | ||
1594 | int err = -ENXIO, cpu; | ||
1595 | struct srp_semaphore* srp; | ||
1596 | |||
1597 | /* P-FP currently supports the SRP for local resources and the FMLP | ||
1598 | * for global resources. */ | ||
1599 | switch (type) { | ||
1600 | case FMLP_SEM: | ||
1601 | /* FIFO Mutex Locking Protocol */ | ||
1602 | *lock = pfp_new_fmlp(); | ||
1603 | if (*lock) | ||
1604 | err = 0; | ||
1605 | else | ||
1606 | err = -ENOMEM; | ||
1607 | break; | ||
1608 | |||
1609 | case MPCP_SEM: | ||
1610 | /* Multiprocesor Priority Ceiling Protocol */ | ||
1611 | *lock = pfp_new_mpcp(0); | ||
1612 | if (*lock) | ||
1613 | err = 0; | ||
1614 | else | ||
1615 | err = -ENOMEM; | ||
1616 | break; | ||
1617 | |||
1618 | case MPCP_VS_SEM: | ||
1619 | /* Multiprocesor Priority Ceiling Protocol with virtual spinning */ | ||
1620 | *lock = pfp_new_mpcp(1); | ||
1621 | if (*lock) | ||
1622 | err = 0; | ||
1623 | else | ||
1624 | err = -ENOMEM; | ||
1625 | break; | ||
1626 | |||
1627 | case DPCP_SEM: | ||
1628 | /* Distributed Priority Ceiling Protocol */ | ||
1629 | if (get_user(cpu, (int*) config)) | ||
1630 | return -EFAULT; | ||
1631 | |||
1632 | if (!cpu_online(cpu)) | ||
1633 | return -EINVAL; | ||
1634 | |||
1635 | *lock = pfp_new_dpcp(cpu); | ||
1636 | if (*lock) | ||
1637 | err = 0; | ||
1638 | else | ||
1639 | err = -ENOMEM; | ||
1640 | break; | ||
1641 | |||
1642 | case SRP_SEM: | ||
1643 | /* Baker's Stack Resource Policy */ | ||
1644 | srp = allocate_srp_semaphore(); | ||
1645 | if (srp) { | ||
1646 | *lock = &srp->litmus_lock; | ||
1647 | err = 0; | ||
1648 | } else | ||
1649 | err = -ENOMEM; | ||
1650 | break; | ||
1651 | |||
1652 | case PCP_SEM: | ||
1653 | /* Priority Ceiling Protocol */ | ||
1654 | if (!config) | ||
1655 | cpu = get_partition(current); | ||
1656 | else if (get_user(cpu, (int*) config)) | ||
1657 | return -EFAULT; | ||
1658 | |||
1659 | if (!cpu_online(cpu)) | ||
1660 | return -EINVAL; | ||
1661 | |||
1662 | *lock = pfp_new_pcp(cpu); | ||
1663 | if (*lock) | ||
1664 | err = 0; | ||
1665 | else | ||
1666 | err = -ENOMEM; | ||
1667 | break; | ||
1668 | }; | ||
1669 | |||
1670 | return err; | ||
1671 | } | ||
1672 | |||
1673 | #endif | ||
1674 | |||
1675 | static long pfp_admit_task(struct task_struct* tsk) | ||
1676 | { | ||
1677 | if (task_cpu(tsk) == tsk->rt_param.task_params.cpu && | ||
1678 | #ifdef CONFIG_RELEASE_MASTER | ||
1679 | /* don't allow tasks on release master CPU */ | ||
1680 | task_cpu(tsk) != remote_dom(task_cpu(tsk))->release_master && | ||
1681 | #endif | ||
1682 | litmus_is_valid_fixed_prio(get_priority(tsk))) | ||
1683 | return 0; | ||
1684 | else | ||
1685 | return -EINVAL; | ||
1686 | } | ||
1687 | |||
1688 | static long pfp_activate_plugin(void) | ||
1689 | { | ||
1690 | #if defined(CONFIG_RELEASE_MASTER) || defined(CONFIG_LITMUS_LOCKING) | ||
1691 | int cpu; | ||
1692 | #endif | ||
1693 | |||
1694 | #ifdef CONFIG_RELEASE_MASTER | ||
1695 | for_each_online_cpu(cpu) { | ||
1696 | remote_dom(cpu)->release_master = atomic_read(&release_master_cpu); | ||
1697 | } | ||
1698 | #endif | ||
1699 | |||
1700 | #ifdef CONFIG_LITMUS_LOCKING | ||
1701 | get_srp_prio = pfp_get_srp_prio; | ||
1702 | |||
1703 | for_each_online_cpu(cpu) { | ||
1704 | init_waitqueue_head(&per_cpu(mpcpvs_vspin_wait, cpu)); | ||
1705 | per_cpu(mpcpvs_vspin, cpu) = NULL; | ||
1706 | |||
1707 | pcp_init_state(&per_cpu(pcp_state, cpu)); | ||
1708 | pfp_doms[cpu] = remote_pfp(cpu); | ||
1709 | } | ||
1710 | |||
1711 | #endif | ||
1712 | |||
1713 | return 0; | ||
1714 | } | ||
1715 | |||
1716 | |||
1717 | /* Plugin object */ | ||
1718 | static struct sched_plugin pfp_plugin __cacheline_aligned_in_smp = { | ||
1719 | .plugin_name = "P-FP", | ||
1720 | .tick = pfp_tick, | ||
1721 | .task_new = pfp_task_new, | ||
1722 | .complete_job = complete_job, | ||
1723 | .task_exit = pfp_task_exit, | ||
1724 | .schedule = pfp_schedule, | ||
1725 | .task_wake_up = pfp_task_wake_up, | ||
1726 | .task_block = pfp_task_block, | ||
1727 | .admit_task = pfp_admit_task, | ||
1728 | .activate_plugin = pfp_activate_plugin, | ||
1729 | #ifdef CONFIG_LITMUS_LOCKING | ||
1730 | .allocate_lock = pfp_allocate_lock, | ||
1731 | .finish_switch = pfp_finish_switch, | ||
1732 | #endif | ||
1733 | }; | ||
1734 | |||
1735 | |||
1736 | static int __init init_pfp(void) | ||
1737 | { | ||
1738 | int i; | ||
1739 | |||
1740 | /* We do not really want to support cpu hotplug, do we? ;) | ||
1741 | * However, if we are so crazy to do so, | ||
1742 | * we cannot use num_online_cpu() | ||
1743 | */ | ||
1744 | for (i = 0; i < num_online_cpus(); i++) { | ||
1745 | pfp_domain_init(remote_pfp(i), i); | ||
1746 | } | ||
1747 | return register_sched_plugin(&pfp_plugin); | ||
1748 | } | ||
1749 | |||
1750 | module_init(init_pfp); | ||
1751 | |||