diff options
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r-- | litmus/sched_gsn_edf.c | 1162 |
1 files changed, 1119 insertions, 43 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 6ed504f4750e..3ddab5875c8c 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/uaccess.h> | ||
16 | |||
15 | 17 | ||
16 | #include <litmus/litmus.h> | 18 | #include <litmus/litmus.h> |
17 | #include <litmus/jobs.h> | 19 | #include <litmus/jobs.h> |
@@ -30,6 +32,24 @@ | |||
30 | 32 | ||
31 | #include <linux/module.h> | 33 | #include <linux/module.h> |
32 | 34 | ||
35 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
36 | #include <litmus/affinity.h> | ||
37 | #endif | ||
38 | |||
39 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
40 | #include <litmus/litmus_softirq.h> | ||
41 | #endif | ||
42 | |||
43 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
44 | #include <linux/interrupt.h> | ||
45 | #include <litmus/trace.h> | ||
46 | #endif | ||
47 | |||
48 | #ifdef CONFIG_LITMUS_NVIDIA | ||
49 | #include <litmus/nvidia_info.h> | ||
50 | #endif | ||
51 | |||
52 | |||
33 | /* Overview of GSN-EDF operations. | 53 | /* Overview of GSN-EDF operations. |
34 | * | 54 | * |
35 | * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This | 55 | * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This |
@@ -116,6 +136,16 @@ static struct bheap gsnedf_cpu_heap; | |||
116 | static rt_domain_t gsnedf; | 136 | static rt_domain_t gsnedf; |
117 | #define gsnedf_lock (gsnedf.ready_lock) | 137 | #define gsnedf_lock (gsnedf.ready_lock) |
118 | 138 | ||
139 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
140 | struct tasklet_head | ||
141 | { | ||
142 | struct tasklet_struct *head; | ||
143 | struct tasklet_struct **tail; | ||
144 | }; | ||
145 | |||
146 | struct tasklet_head gsnedf_pending_tasklets; | ||
147 | #endif | ||
148 | |||
119 | 149 | ||
120 | /* Uncomment this if you want to see all scheduling decisions in the | 150 | /* Uncomment this if you want to see all scheduling decisions in the |
121 | * TRACE() log. | 151 | * TRACE() log. |
@@ -313,7 +343,7 @@ static void check_for_preemptions(void) | |||
313 | static noinline void gsnedf_job_arrival(struct task_struct* task) | 343 | static noinline void gsnedf_job_arrival(struct task_struct* task) |
314 | { | 344 | { |
315 | BUG_ON(!task); | 345 | BUG_ON(!task); |
316 | 346 | ||
317 | requeue(task); | 347 | requeue(task); |
318 | check_for_preemptions(); | 348 | check_for_preemptions(); |
319 | } | 349 | } |
@@ -334,9 +364,13 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
334 | static noinline void job_completion(struct task_struct *t, int forced) | 364 | static noinline void job_completion(struct task_struct *t, int forced) |
335 | { | 365 | { |
336 | BUG_ON(!t); | 366 | BUG_ON(!t); |
337 | 367 | ||
338 | sched_trace_task_completion(t, forced); | 368 | sched_trace_task_completion(t, forced); |
339 | 369 | ||
370 | #ifdef CONFIG_LITMUS_NVIDIA | ||
371 | atomic_set(&tsk_rt(t)->nv_int_count, 0); | ||
372 | #endif | ||
373 | |||
340 | TRACE_TASK(t, "job_completion().\n"); | 374 | TRACE_TASK(t, "job_completion().\n"); |
341 | 375 | ||
342 | /* set flags */ | 376 | /* set flags */ |
@@ -379,6 +413,414 @@ static void gsnedf_tick(struct task_struct* t) | |||
379 | } | 413 | } |
380 | } | 414 | } |
381 | 415 | ||
416 | |||
417 | |||
418 | |||
419 | |||
420 | |||
421 | |||
422 | |||
423 | |||
424 | |||
425 | |||
426 | |||
427 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
428 | |||
429 | |||
430 | static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) | ||
431 | { | ||
432 | if (!atomic_read(&tasklet->count)) { | ||
433 | sched_trace_tasklet_begin(tasklet->owner); | ||
434 | |||
435 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) | ||
436 | { | ||
437 | BUG(); | ||
438 | } | ||
439 | TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); | ||
440 | tasklet->func(tasklet->data); | ||
441 | tasklet_unlock(tasklet); | ||
442 | |||
443 | sched_trace_tasklet_end(tasklet->owner, flushed); | ||
444 | } | ||
445 | else { | ||
446 | BUG(); | ||
447 | } | ||
448 | } | ||
449 | |||
450 | |||
451 | static void __extract_tasklets(struct task_struct* task, struct tasklet_head* task_tasklets) | ||
452 | { | ||
453 | struct tasklet_struct* step; | ||
454 | struct tasklet_struct* tasklet; | ||
455 | struct tasklet_struct* prev; | ||
456 | |||
457 | task_tasklets->head = NULL; | ||
458 | task_tasklets->tail = &(task_tasklets->head); | ||
459 | |||
460 | prev = NULL; | ||
461 | for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) | ||
462 | { | ||
463 | if(step->owner == task) | ||
464 | { | ||
465 | TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); | ||
466 | |||
467 | tasklet = step; | ||
468 | |||
469 | if(prev) { | ||
470 | prev->next = tasklet->next; | ||
471 | } | ||
472 | else if(gsnedf_pending_tasklets.head == tasklet) { | ||
473 | // we're at the head. | ||
474 | gsnedf_pending_tasklets.head = tasklet->next; | ||
475 | } | ||
476 | |||
477 | if(gsnedf_pending_tasklets.tail == &tasklet) { | ||
478 | // we're at the tail | ||
479 | if(prev) { | ||
480 | gsnedf_pending_tasklets.tail = &prev; | ||
481 | } | ||
482 | else { | ||
483 | gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); | ||
484 | } | ||
485 | } | ||
486 | |||
487 | tasklet->next = NULL; | ||
488 | *(task_tasklets->tail) = tasklet; | ||
489 | task_tasklets->tail = &(tasklet->next); | ||
490 | } | ||
491 | else { | ||
492 | prev = step; | ||
493 | } | ||
494 | } | ||
495 | } | ||
496 | |||
497 | static void flush_tasklets(struct task_struct* task) | ||
498 | { | ||
499 | unsigned long flags; | ||
500 | struct tasklet_head task_tasklets; | ||
501 | struct tasklet_struct* step; | ||
502 | |||
503 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
504 | __extract_tasklets(task, &task_tasklets); | ||
505 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
506 | |||
507 | if(gsnedf_pending_tasklets.head != NULL) { | ||
508 | TRACE("%s: Flushing tasklets for %d...\n", __FUNCTION__, task->pid); | ||
509 | } | ||
510 | |||
511 | // now execute any flushed tasklets. | ||
512 | for(step = gsnedf_pending_tasklets.head; step != NULL; /**/) | ||
513 | { | ||
514 | struct tasklet_struct* temp = step->next; | ||
515 | |||
516 | step->next = NULL; | ||
517 | __do_lit_tasklet(step, 1ul); | ||
518 | |||
519 | step = temp; | ||
520 | } | ||
521 | } | ||
522 | |||
523 | |||
524 | static void do_lit_tasklets(struct task_struct* sched_task) | ||
525 | { | ||
526 | int work_to_do = 1; | ||
527 | struct tasklet_struct *tasklet = NULL; | ||
528 | //struct tasklet_struct *step; | ||
529 | unsigned long flags; | ||
530 | |||
531 | while(work_to_do) { | ||
532 | |||
533 | TS_NV_SCHED_BOTISR_START; | ||
534 | |||
535 | // remove tasklet at head of list if it has higher priority. | ||
536 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
537 | |||
538 | /* | ||
539 | step = gsnedf_pending_tasklets.head; | ||
540 | TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); | ||
541 | while(step != NULL){ | ||
542 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
543 | step = step->next; | ||
544 | } | ||
545 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1); | ||
546 | TRACE("%s: done.\n", __FUNCTION__); | ||
547 | */ | ||
548 | |||
549 | |||
550 | if(gsnedf_pending_tasklets.head != NULL) { | ||
551 | // remove tasklet at head. | ||
552 | tasklet = gsnedf_pending_tasklets.head; | ||
553 | |||
554 | if(edf_higher_prio(tasklet->owner, sched_task)) { | ||
555 | |||
556 | if(NULL == tasklet->next) { | ||
557 | // tasklet is at the head, list only has one element | ||
558 | TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
559 | gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); | ||
560 | } | ||
561 | |||
562 | // remove the tasklet from the queue | ||
563 | gsnedf_pending_tasklets.head = tasklet->next; | ||
564 | |||
565 | TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
566 | } | ||
567 | else { | ||
568 | TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); | ||
569 | tasklet = NULL; | ||
570 | } | ||
571 | } | ||
572 | else { | ||
573 | TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); | ||
574 | } | ||
575 | |||
576 | |||
577 | /* | ||
578 | step = gsnedf_pending_tasklets.head; | ||
579 | TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); | ||
580 | while(step != NULL){ | ||
581 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
582 | step = step->next; | ||
583 | } | ||
584 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1); | ||
585 | TRACE("%s: done.\n", __FUNCTION__); | ||
586 | */ | ||
587 | |||
588 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
589 | |||
590 | TS_NV_SCHED_BOTISR_END; | ||
591 | |||
592 | if(tasklet) { | ||
593 | __do_lit_tasklet(tasklet, 0ul); | ||
594 | tasklet = NULL; | ||
595 | } | ||
596 | else { | ||
597 | work_to_do = 0; | ||
598 | } | ||
599 | } | ||
600 | |||
601 | //TRACE("%s: exited.\n", __FUNCTION__); | ||
602 | } | ||
603 | |||
604 | |||
605 | static void run_tasklets(struct task_struct* sched_task) | ||
606 | { | ||
607 | #if 0 | ||
608 | int task_is_rt = is_realtime(sched_task); | ||
609 | cedf_domain_t* cluster; | ||
610 | |||
611 | if(is_realtime(sched_task)) { | ||
612 | cluster = task_cpu_cluster(sched_task); | ||
613 | } | ||
614 | else { | ||
615 | cluster = remote_cluster(get_cpu()); | ||
616 | } | ||
617 | |||
618 | if(cluster && gsnedf_pending_tasklets.head != NULL) { | ||
619 | TRACE("%s: There are tasklets to process.\n", __FUNCTION__); | ||
620 | |||
621 | do_lit_tasklets(cluster, sched_task); | ||
622 | } | ||
623 | |||
624 | if(!task_is_rt) { | ||
625 | put_cpu_no_resched(); | ||
626 | } | ||
627 | #else | ||
628 | |||
629 | preempt_disable(); | ||
630 | |||
631 | if(gsnedf_pending_tasklets.head != NULL) { | ||
632 | TRACE("%s: There are tasklets to process.\n", __FUNCTION__); | ||
633 | do_lit_tasklets(sched_task); | ||
634 | } | ||
635 | |||
636 | preempt_enable_no_resched(); | ||
637 | |||
638 | #endif | ||
639 | } | ||
640 | |||
641 | |||
642 | static void __add_pai_tasklet(struct tasklet_struct* tasklet) | ||
643 | { | ||
644 | struct tasklet_struct* step; | ||
645 | |||
646 | /* | ||
647 | step = gsnedf_pending_tasklets.head; | ||
648 | TRACE("%s: (BEFORE) dumping tasklet queue...\n", __FUNCTION__); | ||
649 | while(step != NULL){ | ||
650 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
651 | step = step->next; | ||
652 | } | ||
653 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1); | ||
654 | TRACE("%s: done.\n", __FUNCTION__); | ||
655 | */ | ||
656 | |||
657 | |||
658 | tasklet->next = NULL; // make sure there are no old values floating around | ||
659 | |||
660 | step = gsnedf_pending_tasklets.head; | ||
661 | if(step == NULL) { | ||
662 | TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); | ||
663 | // insert at tail. | ||
664 | *(gsnedf_pending_tasklets.tail) = tasklet; | ||
665 | gsnedf_pending_tasklets.tail = &(tasklet->next); | ||
666 | } | ||
667 | else if((*(gsnedf_pending_tasklets.tail) != NULL) && | ||
668 | edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) { | ||
669 | // insert at tail. | ||
670 | TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); | ||
671 | |||
672 | *(gsnedf_pending_tasklets.tail) = tasklet; | ||
673 | gsnedf_pending_tasklets.tail = &(tasklet->next); | ||
674 | } | ||
675 | else { | ||
676 | |||
677 | //WARN_ON(1 == 1); | ||
678 | |||
679 | // insert the tasklet somewhere in the middle. | ||
680 | |||
681 | TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__); | ||
682 | |||
683 | while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) { | ||
684 | step = step->next; | ||
685 | } | ||
686 | |||
687 | // insert tasklet right before step->next. | ||
688 | |||
689 | TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); | ||
690 | |||
691 | tasklet->next = step->next; | ||
692 | step->next = tasklet; | ||
693 | |||
694 | // patch up the head if needed. | ||
695 | if(gsnedf_pending_tasklets.head == step) | ||
696 | { | ||
697 | TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid); | ||
698 | gsnedf_pending_tasklets.head = tasklet; | ||
699 | } | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | step = gsnedf_pending_tasklets.head; | ||
704 | TRACE("%s: (AFTER) dumping tasklet queue...\n", __FUNCTION__); | ||
705 | while(step != NULL){ | ||
706 | TRACE("%s: %p (%d)\n", __FUNCTION__, step, step->owner->pid); | ||
707 | step = step->next; | ||
708 | } | ||
709 | TRACE("%s: tail = %p (%d)\n", __FUNCTION__, *(gsnedf_pending_tasklets.tail), (*(gsnedf_pending_tasklets.tail) != NULL) ? (*(gsnedf_pending_tasklets.tail))->owner->pid : -1); | ||
710 | TRACE("%s: done.\n", __FUNCTION__); | ||
711 | */ | ||
712 | |||
713 | // TODO: Maintain this list in priority order. | ||
714 | // tasklet->next = NULL; | ||
715 | // *(gsnedf_pending_tasklets.tail) = tasklet; | ||
716 | // gsnedf_pending_tasklets.tail = &tasklet->next; | ||
717 | } | ||
718 | |||
719 | static int enqueue_pai_tasklet(struct tasklet_struct* tasklet) | ||
720 | { | ||
721 | cpu_entry_t *targetCPU = NULL; | ||
722 | int thisCPU; | ||
723 | int runLocal = 0; | ||
724 | int runNow = 0; | ||
725 | unsigned long flags; | ||
726 | |||
727 | if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) | ||
728 | { | ||
729 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
730 | return 0; | ||
731 | } | ||
732 | |||
733 | |||
734 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
735 | |||
736 | thisCPU = smp_processor_id(); | ||
737 | |||
738 | #if 1 | ||
739 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
740 | { | ||
741 | cpu_entry_t* affinity = NULL; | ||
742 | |||
743 | // use this CPU if it is in our cluster and isn't running any RT work. | ||
744 | if( | ||
745 | #ifdef CONFIG_RELEASE_MASTER | ||
746 | (thisCPU != gsnedf.release_master) && | ||
747 | #endif | ||
748 | (__get_cpu_var(gsnedf_cpu_entries).linked == NULL)) { | ||
749 | affinity = &(__get_cpu_var(gsnedf_cpu_entries)); | ||
750 | } | ||
751 | else { | ||
752 | // this CPU is busy or shouldn't run tasklet in this cluster. | ||
753 | // look for available near by CPUs. | ||
754 | // NOTE: Affinity towards owner and not this CPU. Is this right? | ||
755 | affinity = | ||
756 | gsnedf_get_nearest_available_cpu( | ||
757 | &per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner))); | ||
758 | } | ||
759 | |||
760 | targetCPU = affinity; | ||
761 | } | ||
762 | #endif | ||
763 | #endif | ||
764 | |||
765 | if (targetCPU == NULL) { | ||
766 | targetCPU = lowest_prio_cpu(); | ||
767 | } | ||
768 | |||
769 | if (edf_higher_prio(tasklet->owner, targetCPU->linked)) { | ||
770 | if (thisCPU == targetCPU->cpu) { | ||
771 | TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__); | ||
772 | runLocal = 1; | ||
773 | runNow = 1; | ||
774 | } | ||
775 | else { | ||
776 | TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__); | ||
777 | runLocal = 0; | ||
778 | runNow = 1; | ||
779 | } | ||
780 | } | ||
781 | else { | ||
782 | runLocal = 0; | ||
783 | runNow = 0; | ||
784 | } | ||
785 | |||
786 | if(!runLocal) { | ||
787 | // enqueue the tasklet | ||
788 | __add_pai_tasklet(tasklet); | ||
789 | } | ||
790 | |||
791 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
792 | |||
793 | |||
794 | if (runLocal /*&& runNow */) { // runNow == 1 is implied | ||
795 | TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); | ||
796 | __do_lit_tasklet(tasklet, 0ul); | ||
797 | } | ||
798 | else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied | ||
799 | TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu); | ||
800 | preempt(targetCPU); // need to be protected by cedf_lock? | ||
801 | } | ||
802 | else { | ||
803 | TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); | ||
804 | } | ||
805 | |||
806 | return(1); // success | ||
807 | } | ||
808 | |||
809 | |||
810 | #endif | ||
811 | |||
812 | |||
813 | |||
814 | |||
815 | |||
816 | |||
817 | |||
818 | |||
819 | |||
820 | |||
821 | |||
822 | |||
823 | |||
382 | /* Getting schedule() right is a bit tricky. schedule() may not make any | 824 | /* Getting schedule() right is a bit tricky. schedule() may not make any |
383 | * assumptions on the state of the current task since it may be called for a | 825 | * assumptions on the state of the current task since it may be called for a |
384 | * number of reasons. The reasons include a scheduler_tick() determined that it | 826 | * number of reasons. The reasons include a scheduler_tick() determined that it |
@@ -437,17 +879,19 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
437 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | 879 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); |
438 | #endif | 880 | #endif |
439 | 881 | ||
882 | /* | ||
440 | if (exists) | 883 | if (exists) |
441 | TRACE_TASK(prev, | 884 | TRACE_TASK(prev, |
442 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | 885 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " |
443 | "state:%d sig:%d\n", | 886 | "state:%d sig:%d\n", |
444 | blocks, out_of_time, np, sleep, preempt, | 887 | blocks, out_of_time, np, sleep, preempt, |
445 | prev->state, signal_pending(prev)); | 888 | prev->state, signal_pending(prev)); |
889 | */ | ||
890 | |||
446 | if (entry->linked && preempt) | 891 | if (entry->linked && preempt) |
447 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 892 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
448 | entry->linked->comm, entry->linked->pid); | 893 | entry->linked->comm, entry->linked->pid); |
449 | 894 | ||
450 | |||
451 | /* If a task blocks we have no choice but to reschedule. | 895 | /* If a task blocks we have no choice but to reschedule. |
452 | */ | 896 | */ |
453 | if (blocks) | 897 | if (blocks) |
@@ -492,12 +936,15 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
492 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | 936 | entry->scheduled->rt_param.scheduled_on = NO_CPU; |
493 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | 937 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); |
494 | } | 938 | } |
495 | } else | 939 | } |
940 | else | ||
941 | { | ||
496 | /* Only override Linux scheduler if we have a real-time task | 942 | /* Only override Linux scheduler if we have a real-time task |
497 | * scheduled that needs to continue. | 943 | * scheduled that needs to continue. |
498 | */ | 944 | */ |
499 | if (exists) | 945 | if (exists) |
500 | next = prev; | 946 | next = prev; |
947 | } | ||
501 | 948 | ||
502 | sched_state_task_picked(); | 949 | sched_state_task_picked(); |
503 | 950 | ||
@@ -522,8 +969,9 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
522 | static void gsnedf_finish_switch(struct task_struct *prev) | 969 | static void gsnedf_finish_switch(struct task_struct *prev) |
523 | { | 970 | { |
524 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | 971 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); |
525 | 972 | ||
526 | entry->scheduled = is_realtime(current) ? current : NULL; | 973 | entry->scheduled = is_realtime(current) ? current : NULL; |
974 | |||
527 | #ifdef WANT_ALL_SCHED_EVENTS | 975 | #ifdef WANT_ALL_SCHED_EVENTS |
528 | TRACE_TASK(prev, "switched away from\n"); | 976 | TRACE_TASK(prev, "switched away from\n"); |
529 | #endif | 977 | #endif |
@@ -572,11 +1020,14 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
572 | static void gsnedf_task_wake_up(struct task_struct *task) | 1020 | static void gsnedf_task_wake_up(struct task_struct *task) |
573 | { | 1021 | { |
574 | unsigned long flags; | 1022 | unsigned long flags; |
575 | lt_t now; | 1023 | //lt_t now; |
576 | 1024 | ||
577 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 1025 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
578 | 1026 | ||
579 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1027 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
1028 | |||
1029 | |||
1030 | #if 0 // sporadic task model | ||
580 | /* We need to take suspensions because of semaphores into | 1031 | /* We need to take suspensions because of semaphores into |
581 | * account! If a job resumes after being suspended due to acquiring | 1032 | * account! If a job resumes after being suspended due to acquiring |
582 | * a semaphore, it should never be treated as a new job release. | 1033 | * a semaphore, it should never be treated as a new job release. |
@@ -598,19 +1049,26 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
598 | } | 1049 | } |
599 | } | 1050 | } |
600 | } | 1051 | } |
1052 | #else // periodic task model | ||
1053 | set_rt_flags(task, RT_F_RUNNING); | ||
1054 | #endif | ||
1055 | |||
601 | gsnedf_job_arrival(task); | 1056 | gsnedf_job_arrival(task); |
602 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1057 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
603 | } | 1058 | } |
604 | 1059 | ||
605 | static void gsnedf_task_block(struct task_struct *t) | 1060 | static void gsnedf_task_block(struct task_struct *t) |
606 | { | 1061 | { |
1062 | // TODO: is this called on preemption?? | ||
607 | unsigned long flags; | 1063 | unsigned long flags; |
608 | 1064 | ||
609 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | 1065 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); |
610 | 1066 | ||
611 | /* unlink if necessary */ | 1067 | /* unlink if necessary */ |
612 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1068 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
1069 | |||
613 | unlink(t); | 1070 | unlink(t); |
1071 | |||
614 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1072 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
615 | 1073 | ||
616 | BUG_ON(!is_realtime(t)); | 1074 | BUG_ON(!is_realtime(t)); |
@@ -621,6 +1079,10 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
621 | { | 1079 | { |
622 | unsigned long flags; | 1080 | unsigned long flags; |
623 | 1081 | ||
1082 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1083 | flush_tasklets(t); | ||
1084 | #endif | ||
1085 | |||
624 | /* unlink if necessary */ | 1086 | /* unlink if necessary */ |
625 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1087 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
626 | unlink(t); | 1088 | unlink(t); |
@@ -629,7 +1091,7 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
629 | tsk_rt(t)->scheduled_on = NO_CPU; | 1091 | tsk_rt(t)->scheduled_on = NO_CPU; |
630 | } | 1092 | } |
631 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1093 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
632 | 1094 | ||
633 | BUG_ON(!is_realtime(t)); | 1095 | BUG_ON(!is_realtime(t)); |
634 | TRACE_TASK(t, "RIP\n"); | 1096 | TRACE_TASK(t, "RIP\n"); |
635 | } | 1097 | } |
@@ -644,51 +1106,53 @@ static long gsnedf_admit_task(struct task_struct* tsk) | |||
644 | 1106 | ||
645 | #include <litmus/fdso.h> | 1107 | #include <litmus/fdso.h> |
646 | 1108 | ||
647 | /* called with IRQs off */ | 1109 | |
648 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 1110 | static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
649 | { | 1111 | { |
650 | int linked_on; | 1112 | int linked_on; |
651 | int check_preempt = 0; | 1113 | int check_preempt = 0; |
652 | 1114 | ||
653 | raw_spin_lock(&gsnedf_lock); | 1115 | if(prio_inh != NULL) |
654 | 1116 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | |
655 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | 1117 | else |
1118 | TRACE_TASK(t, "inherits priority from %p\n", prio_inh); | ||
1119 | |||
1120 | sched_trace_eff_prio_change(t, prio_inh); | ||
1121 | |||
656 | tsk_rt(t)->inh_task = prio_inh; | 1122 | tsk_rt(t)->inh_task = prio_inh; |
657 | 1123 | ||
658 | linked_on = tsk_rt(t)->linked_on; | 1124 | linked_on = tsk_rt(t)->linked_on; |
659 | 1125 | ||
660 | /* If it is scheduled, then we need to reorder the CPU heap. */ | 1126 | /* If it is scheduled, then we need to reorder the CPU heap. */ |
661 | if (linked_on != NO_CPU) { | 1127 | if (linked_on != NO_CPU) { |
662 | TRACE_TASK(t, "%s: linked on %d\n", | 1128 | TRACE_TASK(t, "%s: linked on %d\n", |
663 | __FUNCTION__, linked_on); | 1129 | __FUNCTION__, linked_on); |
664 | /* Holder is scheduled; need to re-order CPUs. | 1130 | /* Holder is scheduled; need to re-order CPUs. |
665 | * We can't use heap_decrease() here since | 1131 | * We can't use heap_decrease() here since |
666 | * the cpu_heap is ordered in reverse direction, so | 1132 | * the cpu_heap is ordered in reverse direction, so |
667 | * it is actually an increase. */ | 1133 | * it is actually an increase. */ |
668 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | 1134 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, |
669 | gsnedf_cpus[linked_on]->hn); | 1135 | gsnedf_cpus[linked_on]->hn); |
670 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | 1136 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, |
671 | gsnedf_cpus[linked_on]->hn); | 1137 | gsnedf_cpus[linked_on]->hn); |
672 | } else { | 1138 | } else { |
673 | /* holder may be queued: first stop queue changes */ | 1139 | /* holder may be queued: first stop queue changes */ |
674 | raw_spin_lock(&gsnedf.release_lock); | 1140 | raw_spin_lock(&gsnedf.release_lock); |
675 | if (is_queued(t)) { | 1141 | if (is_queued(t)) { |
676 | TRACE_TASK(t, "%s: is queued\n", | 1142 | TRACE_TASK(t, "%s: is queued\n", __FUNCTION__); |
677 | __FUNCTION__); | 1143 | |
678 | /* We need to update the position of holder in some | 1144 | /* We need to update the position of holder in some |
679 | * heap. Note that this could be a release heap if we | 1145 | * heap. Note that this could be a release heap if we |
680 | * budget enforcement is used and this job overran. */ | 1146 | * budget enforcement is used and this job overran. */ |
681 | check_preempt = | 1147 | check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node); |
682 | !bheap_decrease(edf_ready_order, | 1148 | |
683 | tsk_rt(t)->heap_node); | ||
684 | } else { | 1149 | } else { |
685 | /* Nothing to do: if it is not queued and not linked | 1150 | /* Nothing to do: if it is not queued and not linked |
686 | * then it is either sleeping or currently being moved | 1151 | * then it is either sleeping or currently being moved |
687 | * by other code (e.g., a timer interrupt handler) that | 1152 | * by other code (e.g., a timer interrupt handler) that |
688 | * will use the correct priority when enqueuing the | 1153 | * will use the correct priority when enqueuing the |
689 | * task. */ | 1154 | * task. */ |
690 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | 1155 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__); |
691 | __FUNCTION__); | ||
692 | } | 1156 | } |
693 | raw_spin_unlock(&gsnedf.release_lock); | 1157 | raw_spin_unlock(&gsnedf.release_lock); |
694 | 1158 | ||
@@ -702,34 +1166,148 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct* | |||
702 | /* heap_decrease() hit the top level of the heap: make | 1166 | /* heap_decrease() hit the top level of the heap: make |
703 | * sure preemption checks get the right task, not the | 1167 | * sure preemption checks get the right task, not the |
704 | * potentially stale cache. */ | 1168 | * potentially stale cache. */ |
705 | bheap_uncache_min(edf_ready_order, | 1169 | bheap_uncache_min(edf_ready_order, &gsnedf.ready_queue); |
706 | &gsnedf.ready_queue); | ||
707 | check_for_preemptions(); | 1170 | check_for_preemptions(); |
708 | } | 1171 | } |
709 | } | 1172 | } |
1173 | } | ||
1174 | |||
1175 | /* called with IRQs off */ | ||
1176 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
1177 | { | ||
1178 | raw_spin_lock(&gsnedf_lock); | ||
710 | 1179 | ||
1180 | __set_priority_inheritance(t, prio_inh); | ||
1181 | |||
1182 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1183 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
1184 | { | ||
1185 | TRACE_TASK(t, "%s/%d inherits a new priority!\n", | ||
1186 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
1187 | |||
1188 | __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); | ||
1189 | } | ||
1190 | #endif | ||
1191 | |||
711 | raw_spin_unlock(&gsnedf_lock); | 1192 | raw_spin_unlock(&gsnedf_lock); |
712 | } | 1193 | } |
713 | 1194 | ||
1195 | |||
1196 | /* called with IRQs off */ | ||
1197 | static void __clear_priority_inheritance(struct task_struct* t) | ||
1198 | { | ||
1199 | TRACE_TASK(t, "priority restored\n"); | ||
1200 | |||
1201 | if(tsk_rt(t)->scheduled_on != NO_CPU) | ||
1202 | { | ||
1203 | sched_trace_eff_prio_change(t, NULL); | ||
1204 | |||
1205 | tsk_rt(t)->inh_task = NULL; | ||
1206 | |||
1207 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
1208 | * since the priority was effectively lowered. */ | ||
1209 | unlink(t); | ||
1210 | gsnedf_job_arrival(t); | ||
1211 | } | ||
1212 | else | ||
1213 | { | ||
1214 | __set_priority_inheritance(t, NULL); | ||
1215 | } | ||
1216 | |||
1217 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1218 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
1219 | { | ||
1220 | TRACE_TASK(t, "%s/%d inheritance set back to owner.\n", | ||
1221 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
1222 | |||
1223 | if(tsk_rt(tsk_rt(t)->cur_klitirqd)->scheduled_on != NO_CPU) | ||
1224 | { | ||
1225 | sched_trace_eff_prio_change(tsk_rt(t)->cur_klitirqd, t); | ||
1226 | |||
1227 | tsk_rt(tsk_rt(t)->cur_klitirqd)->inh_task = t; | ||
1228 | |||
1229 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
1230 | * since the priority was effectively lowered. */ | ||
1231 | unlink(tsk_rt(t)->cur_klitirqd); | ||
1232 | gsnedf_job_arrival(tsk_rt(t)->cur_klitirqd); | ||
1233 | } | ||
1234 | else | ||
1235 | { | ||
1236 | __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, t); | ||
1237 | } | ||
1238 | } | ||
1239 | #endif | ||
1240 | } | ||
1241 | |||
714 | /* called with IRQs off */ | 1242 | /* called with IRQs off */ |
715 | static void clear_priority_inheritance(struct task_struct* t) | 1243 | static void clear_priority_inheritance(struct task_struct* t) |
716 | { | 1244 | { |
717 | raw_spin_lock(&gsnedf_lock); | 1245 | raw_spin_lock(&gsnedf_lock); |
1246 | __clear_priority_inheritance(t); | ||
1247 | raw_spin_unlock(&gsnedf_lock); | ||
1248 | } | ||
718 | 1249 | ||
719 | /* A job only stops inheriting a priority when it releases a | 1250 | #ifdef CONFIG_LITMUS_SOFTIRQD |
720 | * resource. Thus we can make the following assumption.*/ | 1251 | /* called with IRQs off */ |
721 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | 1252 | static void set_priority_inheritance_klitirqd(struct task_struct* klitirqd, |
722 | 1253 | struct task_struct* old_owner, | |
723 | TRACE_TASK(t, "priority restored\n"); | 1254 | struct task_struct* new_owner) |
724 | tsk_rt(t)->inh_task = NULL; | 1255 | { |
1256 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
1257 | |||
1258 | raw_spin_lock(&gsnedf_lock); | ||
1259 | |||
1260 | if(old_owner != new_owner) | ||
1261 | { | ||
1262 | if(old_owner) | ||
1263 | { | ||
1264 | // unreachable? | ||
1265 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
1266 | } | ||
1267 | |||
1268 | TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n", | ||
1269 | new_owner->comm, new_owner->pid); | ||
725 | 1270 | ||
726 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 1271 | tsk_rt(new_owner)->cur_klitirqd = klitirqd; |
727 | * since the priority was effectively lowered. */ | 1272 | } |
728 | unlink(t); | 1273 | |
729 | gsnedf_job_arrival(t); | 1274 | __set_priority_inheritance(klitirqd, |
1275 | (tsk_rt(new_owner)->inh_task == NULL) ? | ||
1276 | new_owner : | ||
1277 | tsk_rt(new_owner)->inh_task); | ||
1278 | |||
1279 | raw_spin_unlock(&gsnedf_lock); | ||
1280 | } | ||
730 | 1281 | ||
1282 | /* called with IRQs off */ | ||
1283 | static void clear_priority_inheritance_klitirqd(struct task_struct* klitirqd, | ||
1284 | struct task_struct* old_owner) | ||
1285 | { | ||
1286 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
1287 | |||
1288 | raw_spin_lock(&gsnedf_lock); | ||
1289 | |||
1290 | TRACE_TASK(klitirqd, "priority restored\n"); | ||
1291 | |||
1292 | if(tsk_rt(klitirqd)->scheduled_on != NO_CPU) | ||
1293 | { | ||
1294 | tsk_rt(klitirqd)->inh_task = NULL; | ||
1295 | |||
1296 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
1297 | * since the priority was effectively lowered. */ | ||
1298 | unlink(klitirqd); | ||
1299 | gsnedf_job_arrival(klitirqd); | ||
1300 | } | ||
1301 | else | ||
1302 | { | ||
1303 | __set_priority_inheritance(klitirqd, NULL); | ||
1304 | } | ||
1305 | |||
1306 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
1307 | |||
731 | raw_spin_unlock(&gsnedf_lock); | 1308 | raw_spin_unlock(&gsnedf_lock); |
732 | } | 1309 | } |
1310 | #endif | ||
733 | 1311 | ||
734 | 1312 | ||
735 | /* ******************** FMLP support ********************** */ | 1313 | /* ******************** FMLP support ********************** */ |
@@ -932,11 +1510,483 @@ static struct litmus_lock* gsnedf_new_fmlp(void) | |||
932 | return &sem->litmus_lock; | 1510 | return &sem->litmus_lock; |
933 | } | 1511 | } |
934 | 1512 | ||
1513 | |||
1514 | |||
1515 | |||
1516 | |||
1517 | |||
1518 | |||
1519 | /* ******************** KFMLP support ********************** */ | ||
1520 | |||
1521 | /* struct for semaphore with priority inheritance */ | ||
1522 | struct kfmlp_queue | ||
1523 | { | ||
1524 | wait_queue_head_t wait; | ||
1525 | struct task_struct* owner; | ||
1526 | struct task_struct* hp_waiter; | ||
1527 | int count; /* number of waiters + holder */ | ||
1528 | }; | ||
1529 | |||
1530 | struct kfmlp_semaphore | ||
1531 | { | ||
1532 | struct litmus_lock litmus_lock; | ||
1533 | |||
1534 | spinlock_t lock; | ||
1535 | |||
1536 | int num_resources; /* aka k */ | ||
1537 | |||
1538 | struct kfmlp_queue *queues; /* array */ | ||
1539 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ | ||
1540 | }; | ||
1541 | |||
1542 | static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock) | ||
1543 | { | ||
1544 | return container_of(lock, struct kfmlp_semaphore, litmus_lock); | ||
1545 | } | ||
1546 | |||
1547 | static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem, | ||
1548 | struct kfmlp_queue* queue) | ||
1549 | { | ||
1550 | return (queue - &sem->queues[0]); | ||
1551 | } | ||
1552 | |||
1553 | static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem, | ||
1554 | struct task_struct* holder) | ||
1555 | { | ||
1556 | int i; | ||
1557 | for(i = 0; i < sem->num_resources; ++i) | ||
1558 | if(sem->queues[i].owner == holder) | ||
1559 | return(&sem->queues[i]); | ||
1560 | return(NULL); | ||
1561 | } | ||
1562 | |||
1563 | /* caller is responsible for locking */ | ||
1564 | static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue, | ||
1565 | struct task_struct *skip) | ||
1566 | { | ||
1567 | struct list_head *pos; | ||
1568 | struct task_struct *queued, *found = NULL; | ||
1569 | |||
1570 | list_for_each(pos, &kqueue->wait.task_list) { | ||
1571 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1572 | task_list)->private; | ||
1573 | |||
1574 | /* Compare task prios, find high prio task. */ | ||
1575 | if (queued != skip && edf_higher_prio(queued, found)) | ||
1576 | found = queued; | ||
1577 | } | ||
1578 | return found; | ||
1579 | } | ||
1580 | |||
1581 | static inline struct kfmlp_queue* kfmlp_find_shortest( | ||
1582 | struct kfmlp_semaphore* sem, | ||
1583 | struct kfmlp_queue* search_start) | ||
1584 | { | ||
1585 | // we start our search at search_start instead of at the beginning of the | ||
1586 | // queue list to load-balance across all resources. | ||
1587 | struct kfmlp_queue* step = search_start; | ||
1588 | struct kfmlp_queue* shortest = sem->shortest_queue; | ||
1589 | |||
1590 | do | ||
1591 | { | ||
1592 | step = (step+1 != &sem->queues[sem->num_resources]) ? | ||
1593 | step+1 : &sem->queues[0]; | ||
1594 | |||
1595 | if(step->count < shortest->count) | ||
1596 | { | ||
1597 | shortest = step; | ||
1598 | if(step->count == 0) | ||
1599 | break; /* can't get any shorter */ | ||
1600 | } | ||
1601 | |||
1602 | }while(step != search_start); | ||
1603 | |||
1604 | return(shortest); | ||
1605 | } | ||
1606 | |||
1607 | static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) | ||
1608 | { | ||
1609 | /* must hold sem->lock */ | ||
1610 | |||
1611 | struct kfmlp_queue *my_queue = NULL; | ||
1612 | struct task_struct *max_hp = NULL; | ||
1613 | |||
1614 | |||
1615 | struct list_head *pos; | ||
1616 | struct task_struct *queued; | ||
1617 | int i; | ||
1618 | |||
1619 | for(i = 0; i < sem->num_resources; ++i) | ||
1620 | { | ||
1621 | if( (sem->queues[i].count > 1) && | ||
1622 | ((my_queue == NULL) || | ||
1623 | (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) ) | ||
1624 | { | ||
1625 | my_queue = &sem->queues[i]; | ||
1626 | } | ||
1627 | } | ||
1628 | |||
1629 | if(my_queue) | ||
1630 | { | ||
1631 | max_hp = my_queue->hp_waiter; | ||
1632 | |||
1633 | BUG_ON(!max_hp); | ||
1634 | |||
1635 | TRACE_CUR("queue %d: stealing %s/%d from queue %d\n", | ||
1636 | kfmlp_get_idx(sem, my_queue), | ||
1637 | max_hp->comm, max_hp->pid, | ||
1638 | kfmlp_get_idx(sem, my_queue)); | ||
1639 | |||
1640 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); | ||
1641 | |||
1642 | /* | ||
1643 | if(my_queue->hp_waiter) | ||
1644 | TRACE_CUR("queue %d: new hp_waiter is %s/%d\n", | ||
1645 | kfmlp_get_idx(sem, my_queue), | ||
1646 | my_queue->hp_waiter->comm, | ||
1647 | my_queue->hp_waiter->pid); | ||
1648 | else | ||
1649 | TRACE_CUR("queue %d: new hp_waiter is %p\n", | ||
1650 | kfmlp_get_idx(sem, my_queue), NULL); | ||
1651 | */ | ||
1652 | |||
1653 | raw_spin_lock(&gsnedf_lock); | ||
1654 | |||
1655 | /* | ||
1656 | if(my_queue->owner) | ||
1657 | TRACE_CUR("queue %d: owner is %s/%d\n", | ||
1658 | kfmlp_get_idx(sem, my_queue), | ||
1659 | my_queue->owner->comm, | ||
1660 | my_queue->owner->pid); | ||
1661 | else | ||
1662 | TRACE_CUR("queue %d: owner is %p\n", | ||
1663 | kfmlp_get_idx(sem, my_queue), | ||
1664 | NULL); | ||
1665 | */ | ||
1666 | |||
1667 | if(tsk_rt(my_queue->owner)->inh_task == max_hp) | ||
1668 | { | ||
1669 | __clear_priority_inheritance(my_queue->owner); | ||
1670 | if(my_queue->hp_waiter != NULL) | ||
1671 | { | ||
1672 | __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1673 | } | ||
1674 | } | ||
1675 | raw_spin_unlock(&gsnedf_lock); | ||
1676 | |||
1677 | list_for_each(pos, &my_queue->wait.task_list) | ||
1678 | { | ||
1679 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
1680 | task_list)->private; | ||
1681 | /* Compare task prios, find high prio task. */ | ||
1682 | if (queued == max_hp) | ||
1683 | { | ||
1684 | /* | ||
1685 | TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n", | ||
1686 | kfmlp_get_idx(sem, my_queue)); | ||
1687 | */ | ||
1688 | __remove_wait_queue(&my_queue->wait, | ||
1689 | list_entry(pos, wait_queue_t, task_list)); | ||
1690 | break; | ||
1691 | } | ||
1692 | } | ||
1693 | --(my_queue->count); | ||
1694 | } | ||
1695 | |||
1696 | return(max_hp); | ||
1697 | } | ||
1698 | |||
1699 | int gsnedf_kfmlp_lock(struct litmus_lock* l) | ||
1700 | { | ||
1701 | struct task_struct* t = current; | ||
1702 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1703 | struct kfmlp_queue* my_queue; | ||
1704 | wait_queue_t wait; | ||
1705 | unsigned long flags; | ||
1706 | |||
1707 | if (!is_realtime(t)) | ||
1708 | return -EPERM; | ||
1709 | |||
1710 | spin_lock_irqsave(&sem->lock, flags); | ||
1711 | |||
1712 | my_queue = sem->shortest_queue; | ||
1713 | |||
1714 | if (my_queue->owner) { | ||
1715 | /* resource is not free => must suspend and wait */ | ||
1716 | TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n", | ||
1717 | kfmlp_get_idx(sem, my_queue)); | ||
1718 | |||
1719 | init_waitqueue_entry(&wait, t); | ||
1720 | |||
1721 | /* FIXME: interruptible would be nice some day */ | ||
1722 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
1723 | |||
1724 | __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); | ||
1725 | |||
1726 | /* check if we need to activate priority inheritance */ | ||
1727 | if (edf_higher_prio(t, my_queue->hp_waiter)) | ||
1728 | { | ||
1729 | my_queue->hp_waiter = t; | ||
1730 | if (edf_higher_prio(t, my_queue->owner)) | ||
1731 | { | ||
1732 | set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); | ||
1733 | } | ||
1734 | } | ||
1735 | |||
1736 | ++(my_queue->count); | ||
1737 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1738 | |||
1739 | /* release lock before sleeping */ | ||
1740 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1741 | |||
1742 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
1743 | * when we wake up; we are guaranteed to have the lock since | ||
1744 | * there is only one wake up per release (or steal). | ||
1745 | */ | ||
1746 | schedule(); | ||
1747 | |||
1748 | |||
1749 | if(my_queue->owner == t) | ||
1750 | { | ||
1751 | TRACE_CUR("queue %d: acquired through waiting\n", | ||
1752 | kfmlp_get_idx(sem, my_queue)); | ||
1753 | } | ||
1754 | else | ||
1755 | { | ||
1756 | /* this case may happen if our wait entry was stolen | ||
1757 | between queues. record where we went. */ | ||
1758 | my_queue = kfmlp_get_queue(sem, t); | ||
1759 | |||
1760 | BUG_ON(!my_queue); | ||
1761 | TRACE_CUR("queue %d: acquired through stealing\n", | ||
1762 | kfmlp_get_idx(sem, my_queue)); | ||
1763 | } | ||
1764 | } | ||
1765 | else | ||
1766 | { | ||
1767 | TRACE_CUR("queue %d: acquired immediately\n", | ||
1768 | kfmlp_get_idx(sem, my_queue)); | ||
1769 | |||
1770 | my_queue->owner = t; | ||
1771 | |||
1772 | ++(my_queue->count); | ||
1773 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
1774 | |||
1775 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1776 | } | ||
1777 | |||
1778 | return kfmlp_get_idx(sem, my_queue); | ||
1779 | } | ||
1780 | |||
1781 | int gsnedf_kfmlp_unlock(struct litmus_lock* l) | ||
1782 | { | ||
1783 | struct task_struct *t = current, *next; | ||
1784 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1785 | struct kfmlp_queue *my_queue; | ||
1786 | unsigned long flags; | ||
1787 | int err = 0; | ||
1788 | |||
1789 | spin_lock_irqsave(&sem->lock, flags); | ||
1790 | |||
1791 | my_queue = kfmlp_get_queue(sem, t); | ||
1792 | |||
1793 | if (!my_queue) { | ||
1794 | err = -EINVAL; | ||
1795 | goto out; | ||
1796 | } | ||
1797 | |||
1798 | /* check if there are jobs waiting for this resource */ | ||
1799 | next = __waitqueue_remove_first(&my_queue->wait); | ||
1800 | if (next) { | ||
1801 | /* | ||
1802 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", | ||
1803 | kfmlp_get_idx(sem, my_queue), | ||
1804 | next->comm, next->pid); | ||
1805 | */ | ||
1806 | /* next becomes the resouce holder */ | ||
1807 | my_queue->owner = next; | ||
1808 | |||
1809 | --(my_queue->count); | ||
1810 | // the '=' of '<=' is a dumb method to attempt to build | ||
1811 | // affinity until tasks can tell us where they ran last... | ||
1812 | if(my_queue->count <= sem->shortest_queue->count) | ||
1813 | { | ||
1814 | sem->shortest_queue = my_queue; | ||
1815 | } | ||
1816 | |||
1817 | TRACE_CUR("queue %d: lock ownership passed to %s/%d\n", | ||
1818 | kfmlp_get_idx(sem, my_queue), next->comm, next->pid); | ||
1819 | |||
1820 | /* determine new hp_waiter if necessary */ | ||
1821 | if (next == my_queue->hp_waiter) { | ||
1822 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1823 | /* next has the highest priority --- it doesn't need to | ||
1824 | * inherit. However, we need to make sure that the | ||
1825 | * next-highest priority in the queue is reflected in | ||
1826 | * hp_waiter. */ | ||
1827 | my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next); | ||
1828 | if (my_queue->hp_waiter) | ||
1829 | TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue)); | ||
1830 | else | ||
1831 | TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue)); | ||
1832 | } else { | ||
1833 | /* Well, if next is not the highest-priority waiter, | ||
1834 | * then it ought to inherit the highest-priority | ||
1835 | * waiter's priority. */ | ||
1836 | set_priority_inheritance(next, my_queue->hp_waiter); | ||
1837 | } | ||
1838 | |||
1839 | /* wake up next */ | ||
1840 | wake_up_process(next); | ||
1841 | } | ||
1842 | else | ||
1843 | { | ||
1844 | TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue)); | ||
1845 | |||
1846 | next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ | ||
1847 | |||
1848 | /* | ||
1849 | if(next) | ||
1850 | TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n", | ||
1851 | kfmlp_get_idx(sem, my_queue), | ||
1852 | next->comm, next->pid); | ||
1853 | */ | ||
1854 | |||
1855 | my_queue->owner = next; | ||
1856 | |||
1857 | if(next) | ||
1858 | { | ||
1859 | TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n", | ||
1860 | kfmlp_get_idx(sem, my_queue), | ||
1861 | next->comm, next->pid); | ||
1862 | |||
1863 | /* wake up next */ | ||
1864 | wake_up_process(next); | ||
1865 | } | ||
1866 | else | ||
1867 | { | ||
1868 | TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue)); | ||
1869 | |||
1870 | --(my_queue->count); | ||
1871 | // the '=' of '<=' is a dumb method to attempt to build | ||
1872 | // affinity until tasks can tell us where they ran last... | ||
1873 | if(my_queue->count <= sem->shortest_queue->count) | ||
1874 | { | ||
1875 | sem->shortest_queue = my_queue; | ||
1876 | } | ||
1877 | } | ||
1878 | } | ||
1879 | |||
1880 | /* we lose the benefit of priority inheritance (if any) */ | ||
1881 | if (tsk_rt(t)->inh_task) | ||
1882 | clear_priority_inheritance(t); | ||
1883 | |||
1884 | out: | ||
1885 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1886 | |||
1887 | return err; | ||
1888 | } | ||
1889 | |||
1890 | int gsnedf_kfmlp_close(struct litmus_lock* l) | ||
1891 | { | ||
1892 | struct task_struct *t = current; | ||
1893 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1894 | struct kfmlp_queue *my_queue; | ||
1895 | unsigned long flags; | ||
1896 | |||
1897 | int owner; | ||
1898 | |||
1899 | spin_lock_irqsave(&sem->lock, flags); | ||
1900 | |||
1901 | my_queue = kfmlp_get_queue(sem, t); | ||
1902 | owner = (my_queue) ? (my_queue->owner == t) : 0; | ||
1903 | |||
1904 | spin_unlock_irqrestore(&sem->lock, flags); | ||
1905 | |||
1906 | if (owner) | ||
1907 | gsnedf_kfmlp_unlock(l); | ||
1908 | |||
1909 | return 0; | ||
1910 | } | ||
1911 | |||
1912 | void gsnedf_kfmlp_free(struct litmus_lock* l) | ||
1913 | { | ||
1914 | struct kfmlp_semaphore *sem = kfmlp_from_lock(l); | ||
1915 | kfree(sem->queues); | ||
1916 | kfree(sem); | ||
1917 | } | ||
1918 | |||
1919 | static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = { | ||
1920 | .close = gsnedf_kfmlp_close, | ||
1921 | .lock = gsnedf_kfmlp_lock, | ||
1922 | .unlock = gsnedf_kfmlp_unlock, | ||
1923 | .deallocate = gsnedf_kfmlp_free, | ||
1924 | }; | ||
1925 | |||
1926 | static struct litmus_lock* gsnedf_new_kfmlp(void* __user arg, int* ret_code) | ||
1927 | { | ||
1928 | struct kfmlp_semaphore* sem; | ||
1929 | int num_resources = 0; | ||
1930 | int i; | ||
1931 | |||
1932 | if(!access_ok(VERIFY_READ, arg, sizeof(num_resources))) | ||
1933 | { | ||
1934 | *ret_code = -EINVAL; | ||
1935 | return(NULL); | ||
1936 | } | ||
1937 | if(__copy_from_user(&num_resources, arg, sizeof(num_resources))) | ||
1938 | { | ||
1939 | *ret_code = -EINVAL; | ||
1940 | return(NULL); | ||
1941 | } | ||
1942 | if(num_resources < 1) | ||
1943 | { | ||
1944 | *ret_code = -EINVAL; | ||
1945 | return(NULL); | ||
1946 | } | ||
1947 | |||
1948 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1949 | if(!sem) | ||
1950 | { | ||
1951 | *ret_code = -ENOMEM; | ||
1952 | return NULL; | ||
1953 | } | ||
1954 | |||
1955 | sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL); | ||
1956 | if(!sem->queues) | ||
1957 | { | ||
1958 | kfree(sem); | ||
1959 | *ret_code = -ENOMEM; | ||
1960 | return NULL; | ||
1961 | } | ||
1962 | |||
1963 | sem->litmus_lock.ops = &gsnedf_kfmlp_lock_ops; | ||
1964 | spin_lock_init(&sem->lock); | ||
1965 | sem->num_resources = num_resources; | ||
1966 | |||
1967 | for(i = 0; i < num_resources; ++i) | ||
1968 | { | ||
1969 | sem->queues[i].owner = NULL; | ||
1970 | sem->queues[i].hp_waiter = NULL; | ||
1971 | init_waitqueue_head(&sem->queues[i].wait); | ||
1972 | sem->queues[i].count = 0; | ||
1973 | } | ||
1974 | |||
1975 | sem->shortest_queue = &sem->queues[0]; | ||
1976 | |||
1977 | *ret_code = 0; | ||
1978 | return &sem->litmus_lock; | ||
1979 | } | ||
1980 | |||
1981 | |||
1982 | |||
1983 | |||
1984 | |||
935 | /* **** lock constructor **** */ | 1985 | /* **** lock constructor **** */ |
936 | 1986 | ||
937 | 1987 | ||
938 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | 1988 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, |
939 | void* __user unused) | 1989 | void* __user arg) |
940 | { | 1990 | { |
941 | int err = -ENXIO; | 1991 | int err = -ENXIO; |
942 | 1992 | ||
@@ -951,7 +2001,10 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
951 | else | 2001 | else |
952 | err = -ENOMEM; | 2002 | err = -ENOMEM; |
953 | break; | 2003 | break; |
954 | 2004 | ||
2005 | case KFMLP_SEM: | ||
2006 | *lock = gsnedf_new_kfmlp(arg, &err); | ||
2007 | break; | ||
955 | }; | 2008 | }; |
956 | 2009 | ||
957 | return err; | 2010 | return err; |
@@ -959,7 +2012,6 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
959 | 2012 | ||
960 | #endif | 2013 | #endif |
961 | 2014 | ||
962 | |||
963 | static long gsnedf_activate_plugin(void) | 2015 | static long gsnedf_activate_plugin(void) |
964 | { | 2016 | { |
965 | int cpu; | 2017 | int cpu; |
@@ -986,6 +2038,20 @@ static long gsnedf_activate_plugin(void) | |||
986 | } | 2038 | } |
987 | #endif | 2039 | #endif |
988 | } | 2040 | } |
2041 | |||
2042 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
2043 | gsnedf_pending_tasklets.head = NULL; | ||
2044 | gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); | ||
2045 | #endif | ||
2046 | |||
2047 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
2048 | spawn_klitirqd(NULL); | ||
2049 | #endif | ||
2050 | |||
2051 | #ifdef CONFIG_LITMUS_NVIDIA | ||
2052 | init_nvidia_info(); | ||
2053 | #endif | ||
2054 | |||
989 | return 0; | 2055 | return 0; |
990 | } | 2056 | } |
991 | 2057 | ||
@@ -1003,7 +2069,17 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
1003 | .admit_task = gsnedf_admit_task, | 2069 | .admit_task = gsnedf_admit_task, |
1004 | .activate_plugin = gsnedf_activate_plugin, | 2070 | .activate_plugin = gsnedf_activate_plugin, |
1005 | #ifdef CONFIG_LITMUS_LOCKING | 2071 | #ifdef CONFIG_LITMUS_LOCKING |
1006 | .allocate_lock = gsnedf_allocate_lock, | 2072 | .allocate_lock = gsnedf_allocate_lock, |
2073 | .set_prio_inh = set_priority_inheritance, | ||
2074 | .clear_prio_inh = clear_priority_inheritance, | ||
2075 | #endif | ||
2076 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
2077 | .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, | ||
2078 | .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd, | ||
2079 | #endif | ||
2080 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
2081 | .enqueue_pai_tasklet = enqueue_pai_tasklet, | ||
2082 | .run_tasklets = run_tasklets, | ||
1007 | #endif | 2083 | #endif |
1008 | }; | 2084 | }; |
1009 | 2085 | ||