diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-01-28 19:06:11 -0500 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-02-01 16:30:42 -0500 |
commit | e705aa52df711112d434ccc87ee5fb5838c205a2 (patch) | |
tree | dae3f630e54fc50d08d1657845cca2f9e7ed0c66 /litmus | |
parent | e593c9dbe858c82e284ff85e625837ae3ab32f1c (diff) |
PSN-EDF: re-implement FMLP support
Implement the partitioned FMLP with priority boosting based on the
generic lock API.
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/locking.c | 13 | ||||
-rw-r--r-- | litmus/sched_psn_edf.c | 244 |
2 files changed, 251 insertions, 6 deletions
diff --git a/litmus/locking.c b/litmus/locking.c index ab643475093f..d39afaeefffe 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -108,6 +108,19 @@ asmlinkage long sys_litmus_unlock(int lock_od) | |||
108 | return err; | 108 | return err; |
109 | } | 109 | } |
110 | 110 | ||
111 | struct task_struct* waitqueue_first(wait_queue_head_t *wq) | ||
112 | { | ||
113 | wait_queue_t *q; | ||
114 | |||
115 | if (waitqueue_active(wq)) { | ||
116 | q = list_entry(wq->task_list.next, | ||
117 | wait_queue_t, task_list); | ||
118 | return (struct task_struct*) q->private; | ||
119 | } else | ||
120 | return NULL; | ||
121 | } | ||
122 | |||
123 | |||
111 | #else | 124 | #else |
112 | 125 | ||
113 | struct fdso_ops generic_lock_ops = {}; | 126 | struct fdso_ops generic_lock_ops = {}; |
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index fc64c1722ae9..801bc92c5835 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -71,6 +71,66 @@ static void preempt(psnedf_domain_t *pedf) | |||
71 | preempt_if_preemptable(pedf->scheduled, pedf->cpu); | 71 | preempt_if_preemptable(pedf->scheduled, pedf->cpu); |
72 | } | 72 | } |
73 | 73 | ||
74 | #ifdef CONFIG_LITMUS_LOCKING | ||
75 | |||
76 | static void boost_priority(struct task_struct* t) | ||
77 | { | ||
78 | unsigned long flags; | ||
79 | psnedf_domain_t* pedf = task_pedf(t); | ||
80 | lt_t now; | ||
81 | |||
82 | raw_spin_lock_irqsave(&pedf->slock, flags); | ||
83 | now = litmus_clock(); | ||
84 | |||
85 | TRACE_TASK(t, "priority boosted at %llu\n", now); | ||
86 | |||
87 | tsk_rt(t)->priority_boosted = 1; | ||
88 | tsk_rt(t)->boost_start_time = now; | ||
89 | |||
90 | if (pedf->scheduled != t) { | ||
91 | /* holder may be queued: first stop queue changes */ | ||
92 | raw_spin_lock(&pedf->domain.release_lock); | ||
93 | if (is_queued(t) && | ||
94 | /* If it is queued, then we need to re-order. */ | ||
95 | bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node) && | ||
96 | /* If we bubbled to the top, then we need to check for preemptions. */ | ||
97 | edf_preemption_needed(&pedf->domain, pedf->scheduled)) | ||
98 | preempt(pedf); | ||
99 | raw_spin_unlock(&pedf->domain.release_lock); | ||
100 | } /* else: nothing to do since the job is not queued while scheduled */ | ||
101 | |||
102 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | ||
103 | } | ||
104 | |||
105 | static void unboost_priority(struct task_struct* t) | ||
106 | { | ||
107 | unsigned long flags; | ||
108 | psnedf_domain_t* pedf = task_pedf(t); | ||
109 | lt_t now; | ||
110 | |||
111 | raw_spin_lock_irqsave(&pedf->slock, flags); | ||
112 | now = litmus_clock(); | ||
113 | |||
114 | /* assumption: this only happens when the job is scheduled */ | ||
115 | BUG_ON(pedf->scheduled != t); | ||
116 | |||
117 | TRACE_TASK(t, "priority restored at %llu\n", now); | ||
118 | |||
119 | /* priority boosted jobs must be scheduled */ | ||
120 | BUG_ON(pedf->scheduled != t); | ||
121 | |||
122 | tsk_rt(t)->priority_boosted = 0; | ||
123 | tsk_rt(t)->boost_start_time = 0; | ||
124 | |||
125 | /* check if this changes anything */ | ||
126 | if (edf_preemption_needed(&pedf->domain, pedf->scheduled)) | ||
127 | preempt(pedf); | ||
128 | |||
129 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | ||
130 | } | ||
131 | |||
132 | #endif | ||
133 | |||
74 | /* This check is trivial in partioned systems as we only have to consider | 134 | /* This check is trivial in partioned systems as we only have to consider |
75 | * the CPU of the partition. | 135 | * the CPU of the partition. |
76 | */ | 136 | */ |
@@ -252,15 +312,16 @@ static void psnedf_task_wake_up(struct task_struct *task) | |||
252 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 312 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
253 | raw_spin_lock_irqsave(&pedf->slock, flags); | 313 | raw_spin_lock_irqsave(&pedf->slock, flags); |
254 | BUG_ON(is_queued(task)); | 314 | BUG_ON(is_queued(task)); |
315 | now = litmus_clock(); | ||
316 | if (is_tardy(task, now) | ||
317 | #ifdef CONFIG_LITMUS_LOCKING | ||
255 | /* We need to take suspensions because of semaphores into | 318 | /* We need to take suspensions because of semaphores into |
256 | * account! If a job resumes after being suspended due to acquiring | 319 | * account! If a job resumes after being suspended due to acquiring |
257 | * a semaphore, it should never be treated as a new job release. | 320 | * a semaphore, it should never be treated as a new job release. |
258 | * | ||
259 | * FIXME: This should be done in some more predictable and userspace-controlled way. | ||
260 | */ | 321 | */ |
261 | now = litmus_clock(); | 322 | && !is_priority_boosted(task) |
262 | if (is_tardy(task, now) && | 323 | #endif |
263 | get_rt_flags(task) != RT_F_EXIT_SEM) { | 324 | ) { |
264 | /* new sporadic release */ | 325 | /* new sporadic release */ |
265 | release_at(task, now); | 326 | release_at(task, now); |
266 | sched_trace_task_release(task); | 327 | sched_trace_task_release(task); |
@@ -314,6 +375,8 @@ static void psnedf_task_exit(struct task_struct * t) | |||
314 | #include <litmus/fdso.h> | 375 | #include <litmus/fdso.h> |
315 | #include <litmus/srp.h> | 376 | #include <litmus/srp.h> |
316 | 377 | ||
378 | /* ******************** SRP support ************************ */ | ||
379 | |||
317 | static unsigned int psnedf_get_srp_prio(struct task_struct* t) | 380 | static unsigned int psnedf_get_srp_prio(struct task_struct* t) |
318 | { | 381 | { |
319 | /* assumes implicit deadlines */ | 382 | /* assumes implicit deadlines */ |
@@ -326,14 +389,183 @@ static long psnedf_activate_plugin(void) | |||
326 | return 0; | 389 | return 0; |
327 | } | 390 | } |
328 | 391 | ||
392 | /* ******************** FMLP support ********************** */ | ||
393 | |||
394 | /* struct for semaphore with priority inheritance */ | ||
395 | struct fmlp_semaphore { | ||
396 | struct litmus_lock litmus_lock; | ||
397 | |||
398 | /* current resource holder */ | ||
399 | struct task_struct *owner; | ||
400 | |||
401 | /* FIFO queue of waiting tasks */ | ||
402 | wait_queue_head_t wait; | ||
403 | }; | ||
404 | |||
405 | static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) | ||
406 | { | ||
407 | return container_of(lock, struct fmlp_semaphore, litmus_lock); | ||
408 | } | ||
409 | int psnedf_fmlp_lock(struct litmus_lock* l) | ||
410 | { | ||
411 | struct task_struct* t = current; | ||
412 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
413 | wait_queue_t wait; | ||
414 | unsigned long flags; | ||
415 | |||
416 | if (!is_realtime(t)) | ||
417 | return -EPERM; | ||
418 | |||
419 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
420 | |||
421 | if (sem->owner) { | ||
422 | /* resource is not free => must suspend and wait */ | ||
423 | |||
424 | init_waitqueue_entry(&wait, t); | ||
425 | |||
426 | /* FIXME: interruptible would be nice some day */ | ||
427 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
428 | |||
429 | __add_wait_queue_tail_exclusive(&sem->wait, &wait); | ||
430 | |||
431 | /* release lock before sleeping */ | ||
432 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
433 | |||
434 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
435 | * when we wake up; we are guaranteed to have the lock since | ||
436 | * there is only one wake up per release. | ||
437 | */ | ||
438 | |||
439 | schedule(); | ||
440 | |||
441 | /* Since we hold the lock, no other task will change | ||
442 | * ->owner. We can thus check it without acquiring the spin | ||
443 | * lock. */ | ||
444 | BUG_ON(sem->owner != t); | ||
445 | |||
446 | /* FIXME: could we punt the dequeuing to the previous job, | ||
447 | * which is holding the spinlock anyway? */ | ||
448 | remove_wait_queue(&sem->wait, &wait); | ||
449 | } else { | ||
450 | /* it's ours now */ | ||
451 | sem->owner = t; | ||
452 | |||
453 | /* mark the task as priority-boosted. */ | ||
454 | boost_priority(t); | ||
455 | |||
456 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
457 | } | ||
458 | |||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | int psnedf_fmlp_unlock(struct litmus_lock* l) | ||
463 | { | ||
464 | struct task_struct *t = current, *next; | ||
465 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
466 | unsigned long flags; | ||
467 | int err = 0; | ||
468 | |||
469 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
470 | |||
471 | if (sem->owner != t) { | ||
472 | err = -EINVAL; | ||
473 | goto out; | ||
474 | } | ||
475 | |||
476 | /* we lose the benefit of priority boosting */ | ||
477 | |||
478 | unboost_priority(t); | ||
479 | |||
480 | /* check if there are jobs waiting for this resource */ | ||
481 | next = waitqueue_first(&sem->wait); | ||
482 | if (next) { | ||
483 | /* boost next job */ | ||
484 | boost_priority(next); | ||
485 | |||
486 | /* next becomes the resouce holder */ | ||
487 | sem->owner = next; | ||
488 | |||
489 | /* wake up next */ | ||
490 | wake_up_process(next); | ||
491 | } else | ||
492 | /* resource becomes available */ | ||
493 | sem->owner = NULL; | ||
494 | |||
495 | out: | ||
496 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
497 | return err; | ||
498 | } | ||
499 | |||
500 | int psnedf_fmlp_close(struct litmus_lock* l) | ||
501 | { | ||
502 | struct task_struct *t = current; | ||
503 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
504 | unsigned long flags; | ||
505 | |||
506 | int owner; | ||
507 | |||
508 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
509 | |||
510 | owner = sem->owner == t; | ||
511 | |||
512 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
513 | |||
514 | if (owner) | ||
515 | psnedf_fmlp_unlock(l); | ||
516 | |||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | void psnedf_fmlp_free(struct litmus_lock* lock) | ||
521 | { | ||
522 | kfree(fmlp_from_lock(lock)); | ||
523 | } | ||
524 | |||
525 | static struct litmus_lock_ops psnedf_fmlp_lock_ops = { | ||
526 | .close = psnedf_fmlp_close, | ||
527 | .lock = psnedf_fmlp_lock, | ||
528 | .unlock = psnedf_fmlp_unlock, | ||
529 | .deallocate = psnedf_fmlp_free, | ||
530 | }; | ||
531 | |||
532 | static struct litmus_lock* psnedf_new_fmlp(void) | ||
533 | { | ||
534 | struct fmlp_semaphore* sem; | ||
535 | |||
536 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
537 | if (!sem) | ||
538 | return NULL; | ||
539 | |||
540 | sem->owner = NULL; | ||
541 | init_waitqueue_head(&sem->wait); | ||
542 | sem->litmus_lock.ops = &psnedf_fmlp_lock_ops; | ||
543 | |||
544 | return &sem->litmus_lock; | ||
545 | } | ||
546 | |||
547 | /* **** lock constructor **** */ | ||
548 | |||
549 | |||
329 | static long psnedf_allocate_lock(struct litmus_lock **lock, int type) | 550 | static long psnedf_allocate_lock(struct litmus_lock **lock, int type) |
330 | { | 551 | { |
331 | int err = -ENXIO; | 552 | int err = -ENXIO; |
332 | struct srp_semaphore* srp; | 553 | struct srp_semaphore* srp; |
333 | 554 | ||
555 | /* PSN-EDF currently supports the SRP for local resources and the FMLP | ||
556 | * for global resources. */ | ||
334 | switch (type) { | 557 | switch (type) { |
558 | case FMLP_SEM: | ||
559 | /* Flexible Multiprocessor Locking Protocol */ | ||
560 | *lock = psnedf_new_fmlp(); | ||
561 | if (*lock) | ||
562 | err = 0; | ||
563 | else | ||
564 | err = -ENOMEM; | ||
565 | break; | ||
566 | |||
335 | case SRP_SEM: | 567 | case SRP_SEM: |
336 | /* Baker's SRP */ | 568 | /* Baker's Stack Resource Policy */ |
337 | srp = allocate_srp_semaphore(); | 569 | srp = allocate_srp_semaphore(); |
338 | if (srp) { | 570 | if (srp) { |
339 | *lock = &srp->litmus_lock; | 571 | *lock = &srp->litmus_lock; |