diff options
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r-- | litmus/sched_gsn_edf.c | 1085 |
1 files changed, 967 insertions, 118 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index c3344b9d288f..83b2f04b1532 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -12,24 +12,50 @@ | |||
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/uaccess.h> | ||
16 | #include <linux/module.h> | ||
15 | 17 | ||
16 | #include <litmus/litmus.h> | 18 | #include <litmus/litmus.h> |
17 | #include <litmus/jobs.h> | 19 | #include <litmus/jobs.h> |
18 | #include <litmus/sched_plugin.h> | 20 | #include <litmus/sched_plugin.h> |
19 | #include <litmus/edf_common.h> | 21 | #include <litmus/edf_common.h> |
20 | #include <litmus/sched_trace.h> | 22 | #include <litmus/sched_trace.h> |
21 | #include <litmus/trace.h> | ||
22 | 23 | ||
23 | #include <litmus/preempt.h> | 24 | #include <litmus/preempt.h> |
24 | #include <litmus/budget.h> | 25 | #include <litmus/budget.h> |
25 | 26 | ||
26 | #include <litmus/bheap.h> | 27 | #include <litmus/bheap.h> |
28 | #include <litmus/binheap.h> | ||
29 | |||
30 | #ifdef CONFIG_LITMUS_LOCKING | ||
31 | #include <litmus/kfmlp_lock.h> | ||
32 | #endif | ||
33 | |||
34 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
35 | #include <litmus/rsm_lock.h> | ||
36 | #include <litmus/ikglp_lock.h> | ||
37 | #endif | ||
27 | 38 | ||
28 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 39 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
29 | #include <litmus/affinity.h> | 40 | #include <litmus/affinity.h> |
30 | #endif | 41 | #endif |
31 | 42 | ||
32 | #include <linux/module.h> | 43 | #ifdef CONFIG_LITMUS_SOFTIRQD |
44 | #include <litmus/litmus_softirq.h> | ||
45 | #endif | ||
46 | |||
47 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
48 | #include <linux/interrupt.h> | ||
49 | #include <litmus/trace.h> | ||
50 | #endif | ||
51 | |||
52 | #ifdef CONFIG_LITMUS_NVIDIA | ||
53 | #include <litmus/nvidia_info.h> | ||
54 | #endif | ||
55 | |||
56 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | ||
57 | #include <litmus/gpu_affinity.h> | ||
58 | #endif | ||
33 | 59 | ||
34 | /* Overview of GSN-EDF operations. | 60 | /* Overview of GSN-EDF operations. |
35 | * | 61 | * |
@@ -104,52 +130,70 @@ typedef struct { | |||
104 | int cpu; | 130 | int cpu; |
105 | struct task_struct* linked; /* only RT tasks */ | 131 | struct task_struct* linked; /* only RT tasks */ |
106 | struct task_struct* scheduled; /* only RT tasks */ | 132 | struct task_struct* scheduled; /* only RT tasks */ |
107 | struct bheap_node* hn; | 133 | struct binheap_node hn; |
108 | } cpu_entry_t; | 134 | } cpu_entry_t; |
109 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | 135 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); |
110 | 136 | ||
111 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; | 137 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; |
112 | 138 | ||
113 | /* the cpus queue themselves according to priority in here */ | 139 | /* the cpus queue themselves according to priority in here */ |
114 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; | 140 | static struct binheap gsnedf_cpu_heap; |
115 | static struct bheap gsnedf_cpu_heap; | ||
116 | 141 | ||
117 | static rt_domain_t gsnedf; | 142 | static rt_domain_t gsnedf; |
118 | #define gsnedf_lock (gsnedf.ready_lock) | 143 | #define gsnedf_lock (gsnedf.ready_lock) |
119 | 144 | ||
145 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
146 | static raw_spinlock_t dgl_lock; | ||
147 | |||
148 | static raw_spinlock_t* gsnedf_get_dgl_spinlock(struct task_struct *t) | ||
149 | { | ||
150 | return(&dgl_lock); | ||
151 | } | ||
152 | #endif | ||
153 | |||
154 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
155 | struct tasklet_head | ||
156 | { | ||
157 | struct tasklet_struct *head; | ||
158 | struct tasklet_struct **tail; | ||
159 | }; | ||
160 | |||
161 | struct tasklet_head gsnedf_pending_tasklets; | ||
162 | #endif | ||
163 | |||
120 | 164 | ||
121 | /* Uncomment this if you want to see all scheduling decisions in the | 165 | /* Uncomment this if you want to see all scheduling decisions in the |
122 | * TRACE() log. | 166 | * TRACE() log. |
123 | #define WANT_ALL_SCHED_EVENTS | 167 | #define WANT_ALL_SCHED_EVENTS |
124 | */ | 168 | */ |
125 | 169 | ||
126 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | 170 | static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) |
127 | { | 171 | { |
128 | cpu_entry_t *a, *b; | 172 | cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn); |
129 | a = _a->value; | 173 | cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn); |
130 | b = _b->value; | 174 | |
131 | /* Note that a and b are inverted: we want the lowest-priority CPU at | 175 | /* Note that a and b are inverted: we want the lowest-priority CPU at |
132 | * the top of the heap. | 176 | * the top of the heap. |
133 | */ | 177 | */ |
134 | return edf_higher_prio(b->linked, a->linked); | 178 | return edf_higher_prio(b->linked, a->linked); |
135 | } | 179 | } |
136 | 180 | ||
181 | |||
137 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | 182 | /* update_cpu_position - Move the cpu entry to the correct place to maintain |
138 | * order in the cpu queue. Caller must hold gsnedf lock. | 183 | * order in the cpu queue. Caller must hold gsnedf lock. |
139 | */ | 184 | */ |
140 | static void update_cpu_position(cpu_entry_t *entry) | 185 | static void update_cpu_position(cpu_entry_t *entry) |
141 | { | 186 | { |
142 | if (likely(bheap_node_in_heap(entry->hn))) | 187 | if (likely(binheap_is_in_heap(&entry->hn))) { |
143 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | 188 | binheap_delete(&entry->hn, &gsnedf_cpu_heap); |
144 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | 189 | } |
190 | binheap_add(&entry->hn, &gsnedf_cpu_heap, cpu_entry_t, hn); | ||
145 | } | 191 | } |
146 | 192 | ||
147 | /* caller must hold gsnedf lock */ | 193 | /* caller must hold gsnedf lock */ |
148 | static cpu_entry_t* lowest_prio_cpu(void) | 194 | static cpu_entry_t* lowest_prio_cpu(void) |
149 | { | 195 | { |
150 | struct bheap_node* hn; | 196 | return binheap_top_entry(&gsnedf_cpu_heap, cpu_entry_t, hn); |
151 | hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap); | ||
152 | return hn->value; | ||
153 | } | 197 | } |
154 | 198 | ||
155 | 199 | ||
@@ -338,6 +382,10 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
338 | 382 | ||
339 | sched_trace_task_completion(t, forced); | 383 | sched_trace_task_completion(t, forced); |
340 | 384 | ||
385 | #ifdef CONFIG_LITMUS_NVIDIA | ||
386 | atomic_set(&tsk_rt(t)->nv_int_count, 0); | ||
387 | #endif | ||
388 | |||
341 | TRACE_TASK(t, "job_completion().\n"); | 389 | TRACE_TASK(t, "job_completion().\n"); |
342 | 390 | ||
343 | /* set flags */ | 391 | /* set flags */ |
@@ -362,24 +410,344 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
362 | */ | 410 | */ |
363 | static void gsnedf_tick(struct task_struct* t) | 411 | static void gsnedf_tick(struct task_struct* t) |
364 | { | 412 | { |
365 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | 413 | if (is_realtime(t) && budget_exhausted(t)) |
366 | if (!is_np(t)) { | 414 | { |
367 | /* np tasks will be preempted when they become | 415 | if (budget_signalled(t) && !sigbudget_sent(t)) { |
368 | * preemptable again | 416 | /* signal exhaustion */ |
369 | */ | 417 | send_sigbudget(t); |
370 | litmus_reschedule_local(); | 418 | } |
371 | TRACE("gsnedf_scheduler_tick: " | 419 | |
372 | "%d is preemptable " | 420 | if (budget_enforced(t)) { |
373 | " => FORCE_RESCHED\n", t->pid); | 421 | if (!is_np(t)) { |
374 | } else if (is_user_np(t)) { | 422 | /* np tasks will be preempted when they become |
375 | TRACE("gsnedf_scheduler_tick: " | 423 | * preemptable again |
376 | "%d is non-preemptable, " | 424 | */ |
377 | "preemption delayed.\n", t->pid); | 425 | litmus_reschedule_local(); |
378 | request_exit_np(t); | 426 | TRACE("gsnedf_scheduler_tick: " |
427 | "%d is preemptable " | ||
428 | " => FORCE_RESCHED\n", t->pid); | ||
429 | } else if (is_user_np(t)) { | ||
430 | TRACE("gsnedf_scheduler_tick: " | ||
431 | "%d is non-preemptable, " | ||
432 | "preemption delayed.\n", t->pid); | ||
433 | request_exit_np(t); | ||
434 | } | ||
435 | } | ||
436 | } | ||
437 | } | ||
438 | |||
439 | |||
440 | |||
441 | |||
442 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
443 | |||
444 | |||
445 | static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) | ||
446 | { | ||
447 | if (!atomic_read(&tasklet->count)) { | ||
448 | if(tasklet->owner) { | ||
449 | sched_trace_tasklet_begin(tasklet->owner); | ||
450 | } | ||
451 | |||
452 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) | ||
453 | { | ||
454 | BUG(); | ||
455 | } | ||
456 | TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", | ||
457 | __FUNCTION__, | ||
458 | (tasklet->owner) ? tasklet->owner->pid : -1, | ||
459 | (tasklet->owner) ? 0 : 1); | ||
460 | tasklet->func(tasklet->data); | ||
461 | tasklet_unlock(tasklet); | ||
462 | |||
463 | if(tasklet->owner) { | ||
464 | sched_trace_tasklet_end(tasklet->owner, flushed); | ||
465 | } | ||
466 | } | ||
467 | else { | ||
468 | BUG(); | ||
469 | } | ||
470 | } | ||
471 | |||
472 | static void do_lit_tasklets(struct task_struct* sched_task) | ||
473 | { | ||
474 | int work_to_do = 1; | ||
475 | struct tasklet_struct *tasklet = NULL; | ||
476 | unsigned long flags; | ||
477 | |||
478 | while(work_to_do) { | ||
479 | |||
480 | TS_NV_SCHED_BOTISR_START; | ||
481 | |||
482 | // execute one tasklet that has higher priority | ||
483 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
484 | |||
485 | if(gsnedf_pending_tasklets.head != NULL) { | ||
486 | struct tasklet_struct *prev = NULL; | ||
487 | tasklet = gsnedf_pending_tasklets.head; | ||
488 | |||
489 | while(tasklet && edf_higher_prio(sched_task, tasklet->owner)) { | ||
490 | prev = tasklet; | ||
491 | tasklet = tasklet->next; | ||
492 | } | ||
493 | |||
494 | // remove the tasklet from the queue | ||
495 | if(prev) { | ||
496 | prev->next = tasklet->next; | ||
497 | if(prev->next == NULL) { | ||
498 | TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
499 | gsnedf_pending_tasklets.tail = &(prev); | ||
500 | } | ||
501 | } | ||
502 | else { | ||
503 | gsnedf_pending_tasklets.head = tasklet->next; | ||
504 | if(tasklet->next == NULL) { | ||
505 | TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
506 | gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); | ||
507 | } | ||
508 | } | ||
509 | } | ||
510 | else { | ||
511 | TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); | ||
512 | } | ||
513 | |||
514 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
515 | |||
516 | if(tasklet) { | ||
517 | __do_lit_tasklet(tasklet, 0ul); | ||
518 | tasklet = NULL; | ||
519 | } | ||
520 | else { | ||
521 | work_to_do = 0; | ||
379 | } | 522 | } |
523 | |||
524 | TS_NV_SCHED_BOTISR_END; | ||
380 | } | 525 | } |
381 | } | 526 | } |
382 | 527 | ||
528 | //static void do_lit_tasklets(struct task_struct* sched_task) | ||
529 | //{ | ||
530 | // int work_to_do = 1; | ||
531 | // struct tasklet_struct *tasklet = NULL; | ||
532 | // //struct tasklet_struct *step; | ||
533 | // unsigned long flags; | ||
534 | // | ||
535 | // while(work_to_do) { | ||
536 | // | ||
537 | // TS_NV_SCHED_BOTISR_START; | ||
538 | // | ||
539 | // // remove tasklet at head of list if it has higher priority. | ||
540 | // raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
541 | // | ||
542 | // if(gsnedf_pending_tasklets.head != NULL) { | ||
543 | // // remove tasklet at head. | ||
544 | // tasklet = gsnedf_pending_tasklets.head; | ||
545 | // | ||
546 | // if(edf_higher_prio(tasklet->owner, sched_task)) { | ||
547 | // | ||
548 | // if(NULL == tasklet->next) { | ||
549 | // // tasklet is at the head, list only has one element | ||
550 | // TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
551 | // gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); | ||
552 | // } | ||
553 | // | ||
554 | // // remove the tasklet from the queue | ||
555 | // gsnedf_pending_tasklets.head = tasklet->next; | ||
556 | // | ||
557 | // TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
558 | // } | ||
559 | // else { | ||
560 | // TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); | ||
561 | // tasklet = NULL; | ||
562 | // } | ||
563 | // } | ||
564 | // else { | ||
565 | // TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); | ||
566 | // } | ||
567 | // | ||
568 | // raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
569 | // | ||
570 | // TS_NV_SCHED_BOTISR_END; | ||
571 | // | ||
572 | // if(tasklet) { | ||
573 | // __do_lit_tasklet(tasklet, 0ul); | ||
574 | // tasklet = NULL; | ||
575 | // } | ||
576 | // else { | ||
577 | // work_to_do = 0; | ||
578 | // } | ||
579 | // } | ||
580 | // | ||
581 | // //TRACE("%s: exited.\n", __FUNCTION__); | ||
582 | //} | ||
583 | |||
584 | static void __add_pai_tasklet(struct tasklet_struct* tasklet) | ||
585 | { | ||
586 | struct tasklet_struct* step; | ||
587 | |||
588 | tasklet->next = NULL; // make sure there are no old values floating around | ||
589 | |||
590 | step = gsnedf_pending_tasklets.head; | ||
591 | if(step == NULL) { | ||
592 | TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); | ||
593 | // insert at tail. | ||
594 | *(gsnedf_pending_tasklets.tail) = tasklet; | ||
595 | gsnedf_pending_tasklets.tail = &(tasklet->next); | ||
596 | } | ||
597 | else if((*(gsnedf_pending_tasklets.tail) != NULL) && | ||
598 | edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) { | ||
599 | // insert at tail. | ||
600 | TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); | ||
601 | |||
602 | *(gsnedf_pending_tasklets.tail) = tasklet; | ||
603 | gsnedf_pending_tasklets.tail = &(tasklet->next); | ||
604 | } | ||
605 | else { | ||
606 | // insert the tasklet somewhere in the middle. | ||
607 | |||
608 | TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__); | ||
609 | |||
610 | while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) { | ||
611 | step = step->next; | ||
612 | } | ||
613 | |||
614 | // insert tasklet right before step->next. | ||
615 | |||
616 | TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); | ||
617 | |||
618 | tasklet->next = step->next; | ||
619 | step->next = tasklet; | ||
620 | |||
621 | // patch up the head if needed. | ||
622 | if(gsnedf_pending_tasklets.head == step) | ||
623 | { | ||
624 | TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid); | ||
625 | gsnedf_pending_tasklets.head = tasklet; | ||
626 | } | ||
627 | } | ||
628 | } | ||
629 | |||
630 | static void gsnedf_run_tasklets(struct task_struct* sched_task) | ||
631 | { | ||
632 | preempt_disable(); | ||
633 | |||
634 | if(gsnedf_pending_tasklets.head != NULL) { | ||
635 | TRACE("%s: There are tasklets to process.\n", __FUNCTION__); | ||
636 | do_lit_tasklets(sched_task); | ||
637 | } | ||
638 | |||
639 | preempt_enable_no_resched(); | ||
640 | } | ||
641 | |||
642 | static int gsnedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet) | ||
643 | { | ||
644 | cpu_entry_t *targetCPU = NULL; | ||
645 | int thisCPU; | ||
646 | int runLocal = 0; | ||
647 | int runNow = 0; | ||
648 | unsigned long flags; | ||
649 | |||
650 | if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) | ||
651 | { | ||
652 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
653 | return 0; | ||
654 | } | ||
655 | |||
656 | |||
657 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
658 | |||
659 | thisCPU = smp_processor_id(); | ||
660 | |||
661 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
662 | { | ||
663 | cpu_entry_t* affinity = NULL; | ||
664 | |||
665 | // use this CPU if it is in our cluster and isn't running any RT work. | ||
666 | if( | ||
667 | #ifdef CONFIG_RELEASE_MASTER | ||
668 | (thisCPU != gsnedf.release_master) && | ||
669 | #endif | ||
670 | (__get_cpu_var(gsnedf_cpu_entries).linked == NULL)) { | ||
671 | affinity = &(__get_cpu_var(gsnedf_cpu_entries)); | ||
672 | } | ||
673 | else { | ||
674 | // this CPU is busy or shouldn't run tasklet in this cluster. | ||
675 | // look for available near by CPUs. | ||
676 | // NOTE: Affinity towards owner and not this CPU. Is this right? | ||
677 | affinity = | ||
678 | gsnedf_get_nearest_available_cpu( | ||
679 | &per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner))); | ||
680 | } | ||
681 | |||
682 | targetCPU = affinity; | ||
683 | } | ||
684 | #endif | ||
685 | |||
686 | if (targetCPU == NULL) { | ||
687 | targetCPU = lowest_prio_cpu(); | ||
688 | } | ||
689 | |||
690 | if (edf_higher_prio(tasklet->owner, targetCPU->linked)) { | ||
691 | if (thisCPU == targetCPU->cpu) { | ||
692 | TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__); | ||
693 | runLocal = 1; | ||
694 | runNow = 1; | ||
695 | } | ||
696 | else { | ||
697 | TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__); | ||
698 | runLocal = 0; | ||
699 | runNow = 1; | ||
700 | } | ||
701 | } | ||
702 | else { | ||
703 | runLocal = 0; | ||
704 | runNow = 0; | ||
705 | } | ||
706 | |||
707 | if(!runLocal) { | ||
708 | // enqueue the tasklet | ||
709 | __add_pai_tasklet(tasklet); | ||
710 | } | ||
711 | |||
712 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
713 | |||
714 | |||
715 | if (runLocal /*&& runNow */) { // runNow == 1 is implied | ||
716 | TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); | ||
717 | __do_lit_tasklet(tasklet, 0ul); | ||
718 | } | ||
719 | else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied | ||
720 | TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu); | ||
721 | preempt(targetCPU); // need to be protected by cedf_lock? | ||
722 | } | ||
723 | else { | ||
724 | TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); | ||
725 | } | ||
726 | |||
727 | return(1); // success | ||
728 | } | ||
729 | |||
730 | static void gsnedf_change_prio_pai_tasklet(struct task_struct *old_prio, | ||
731 | struct task_struct *new_prio) | ||
732 | { | ||
733 | struct tasklet_struct* step; | ||
734 | unsigned long flags; | ||
735 | |||
736 | if(gsnedf_pending_tasklets.head != NULL) { | ||
737 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
738 | for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) { | ||
739 | if(step->owner == old_prio) { | ||
740 | TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid); | ||
741 | step->owner = new_prio; | ||
742 | } | ||
743 | } | ||
744 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
745 | } | ||
746 | } | ||
747 | |||
748 | #endif // end PAI | ||
749 | |||
750 | |||
383 | /* Getting schedule() right is a bit tricky. schedule() may not make any | 751 | /* Getting schedule() right is a bit tricky. schedule() may not make any |
384 | * assumptions on the state of the current task since it may be called for a | 752 | * assumptions on the state of the current task since it may be called for a |
385 | * number of reasons. The reasons include a scheduler_tick() determined that it | 753 | * number of reasons. The reasons include a scheduler_tick() determined that it |
@@ -404,7 +772,7 @@ static void gsnedf_tick(struct task_struct* t) | |||
404 | static struct task_struct* gsnedf_schedule(struct task_struct * prev) | 772 | static struct task_struct* gsnedf_schedule(struct task_struct * prev) |
405 | { | 773 | { |
406 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | 774 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); |
407 | int out_of_time, sleep, preempt, np, exists, blocks; | 775 | int out_of_time, signal_budget, sleep, preempt, np, exists, blocks; |
408 | struct task_struct* next = NULL; | 776 | struct task_struct* next = NULL; |
409 | 777 | ||
410 | #ifdef CONFIG_RELEASE_MASTER | 778 | #ifdef CONFIG_RELEASE_MASTER |
@@ -427,8 +795,13 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
427 | /* (0) Determine state */ | 795 | /* (0) Determine state */ |
428 | exists = entry->scheduled != NULL; | 796 | exists = entry->scheduled != NULL; |
429 | blocks = exists && !is_running(entry->scheduled); | 797 | blocks = exists && !is_running(entry->scheduled); |
430 | out_of_time = exists && budget_enforced(entry->scheduled) | 798 | out_of_time = exists && |
431 | && budget_exhausted(entry->scheduled); | 799 | budget_enforced(entry->scheduled) && |
800 | budget_exhausted(entry->scheduled); | ||
801 | signal_budget = exists && | ||
802 | budget_signalled(entry->scheduled) && | ||
803 | budget_exhausted(entry->scheduled) && | ||
804 | !sigbudget_sent(entry->scheduled); | ||
432 | np = exists && is_np(entry->scheduled); | 805 | np = exists && is_np(entry->scheduled); |
433 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | 806 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; |
434 | preempt = entry->scheduled != entry->linked; | 807 | preempt = entry->scheduled != entry->linked; |
@@ -437,21 +810,36 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
437 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | 810 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); |
438 | #endif | 811 | #endif |
439 | 812 | ||
813 | /* | ||
440 | if (exists) | 814 | if (exists) |
441 | TRACE_TASK(prev, | 815 | TRACE_TASK(prev, |
442 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | 816 | "blocks:%d out_of_time:%d signal_budget: %d np:%d sleep:%d preempt:%d " |
443 | "state:%d sig:%d\n", | 817 | "state:%d sig:%d\n", |
444 | blocks, out_of_time, np, sleep, preempt, | 818 | blocks, out_of_time, signal_budget, np, sleep, preempt, |
445 | prev->state, signal_pending(prev)); | 819 | prev->state, signal_pending(prev)); |
820 | */ | ||
821 | |||
446 | if (entry->linked && preempt) | 822 | if (entry->linked && preempt) |
447 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 823 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
448 | entry->linked->comm, entry->linked->pid); | 824 | entry->linked->comm, entry->linked->pid); |
449 | 825 | ||
826 | /* Send the signal that the budget has been exhausted */ | ||
827 | if (signal_budget) | ||
828 | send_sigbudget(entry->scheduled); | ||
450 | 829 | ||
451 | /* If a task blocks we have no choice but to reschedule. | 830 | /* If a task blocks we have no choice but to reschedule. |
452 | */ | 831 | */ |
453 | if (blocks) | 832 | if (blocks) { |
454 | unlink(entry->scheduled); | 833 | unlink(entry->scheduled); |
834 | } | ||
835 | |||
836 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | ||
837 | if(exists && is_realtime(entry->scheduled) && tsk_rt(entry->scheduled)->held_gpus) { | ||
838 | if(!blocks || tsk_rt(entry->scheduled)->suspend_gpu_tracker_on_block) { | ||
839 | stop_gpu_tracker(entry->scheduled); | ||
840 | } | ||
841 | } | ||
842 | #endif | ||
455 | 843 | ||
456 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | 844 | /* Request a sys_exit_np() call if we would like to preempt but cannot. |
457 | * We need to make sure to update the link structure anyway in case | 845 | * We need to make sure to update the link structure anyway in case |
@@ -492,12 +880,15 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
492 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | 880 | entry->scheduled->rt_param.scheduled_on = NO_CPU; |
493 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | 881 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); |
494 | } | 882 | } |
495 | } else | 883 | } |
884 | else | ||
885 | { | ||
496 | /* Only override Linux scheduler if we have a real-time task | 886 | /* Only override Linux scheduler if we have a real-time task |
497 | * scheduled that needs to continue. | 887 | * scheduled that needs to continue. |
498 | */ | 888 | */ |
499 | if (exists) | 889 | if (exists) |
500 | next = prev; | 890 | next = prev; |
891 | } | ||
501 | 892 | ||
502 | sched_state_task_picked(); | 893 | sched_state_task_picked(); |
503 | 894 | ||
@@ -524,6 +915,7 @@ static void gsnedf_finish_switch(struct task_struct *prev) | |||
524 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | 915 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); |
525 | 916 | ||
526 | entry->scheduled = is_realtime(current) ? current : NULL; | 917 | entry->scheduled = is_realtime(current) ? current : NULL; |
918 | |||
527 | #ifdef WANT_ALL_SCHED_EVENTS | 919 | #ifdef WANT_ALL_SCHED_EVENTS |
528 | TRACE_TASK(prev, "switched away from\n"); | 920 | TRACE_TASK(prev, "switched away from\n"); |
529 | #endif | 921 | #endif |
@@ -572,11 +964,14 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
572 | static void gsnedf_task_wake_up(struct task_struct *task) | 964 | static void gsnedf_task_wake_up(struct task_struct *task) |
573 | { | 965 | { |
574 | unsigned long flags; | 966 | unsigned long flags; |
575 | lt_t now; | 967 | //lt_t now; |
576 | 968 | ||
577 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 969 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
578 | 970 | ||
579 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 971 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
972 | |||
973 | |||
974 | #if 0 // sporadic task model | ||
580 | /* We need to take suspensions because of semaphores into | 975 | /* We need to take suspensions because of semaphores into |
581 | * account! If a job resumes after being suspended due to acquiring | 976 | * account! If a job resumes after being suspended due to acquiring |
582 | * a semaphore, it should never be treated as a new job release. | 977 | * a semaphore, it should never be treated as a new job release. |
@@ -598,19 +993,26 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
598 | } | 993 | } |
599 | } | 994 | } |
600 | } | 995 | } |
996 | #else // periodic task model | ||
997 | set_rt_flags(task, RT_F_RUNNING); | ||
998 | #endif | ||
999 | |||
601 | gsnedf_job_arrival(task); | 1000 | gsnedf_job_arrival(task); |
602 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1001 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
603 | } | 1002 | } |
604 | 1003 | ||
605 | static void gsnedf_task_block(struct task_struct *t) | 1004 | static void gsnedf_task_block(struct task_struct *t) |
606 | { | 1005 | { |
1006 | // TODO: is this called on preemption?? | ||
607 | unsigned long flags; | 1007 | unsigned long flags; |
608 | 1008 | ||
609 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | 1009 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); |
610 | 1010 | ||
611 | /* unlink if necessary */ | 1011 | /* unlink if necessary */ |
612 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1012 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
1013 | |||
613 | unlink(t); | 1014 | unlink(t); |
1015 | |||
614 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1016 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
615 | 1017 | ||
616 | BUG_ON(!is_realtime(t)); | 1018 | BUG_ON(!is_realtime(t)); |
@@ -621,6 +1023,10 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
621 | { | 1023 | { |
622 | unsigned long flags; | 1024 | unsigned long flags; |
623 | 1025 | ||
1026 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1027 | gsnedf_change_prio_pai_tasklet(t, NULL); | ||
1028 | #endif | ||
1029 | |||
624 | /* unlink if necessary */ | 1030 | /* unlink if necessary */ |
625 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1031 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
626 | unlink(t); | 1032 | unlink(t); |
@@ -637,101 +1043,423 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
637 | 1043 | ||
638 | static long gsnedf_admit_task(struct task_struct* tsk) | 1044 | static long gsnedf_admit_task(struct task_struct* tsk) |
639 | { | 1045 | { |
1046 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1047 | INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, | ||
1048 | edf_max_heap_base_priority_order); | ||
1049 | #endif | ||
1050 | |||
640 | return 0; | 1051 | return 0; |
641 | } | 1052 | } |
642 | 1053 | ||
1054 | |||
1055 | |||
1056 | |||
1057 | |||
1058 | |||
643 | #ifdef CONFIG_LITMUS_LOCKING | 1059 | #ifdef CONFIG_LITMUS_LOCKING |
644 | 1060 | ||
645 | #include <litmus/fdso.h> | 1061 | #include <litmus/fdso.h> |
646 | 1062 | ||
647 | /* called with IRQs off */ | 1063 | /* called with IRQs off */ |
648 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 1064 | static void __increase_priority_inheritance(struct task_struct* t, |
1065 | struct task_struct* prio_inh) | ||
649 | { | 1066 | { |
650 | int linked_on; | 1067 | int linked_on; |
651 | int check_preempt = 0; | 1068 | int check_preempt = 0; |
652 | 1069 | ||
1070 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1071 | /* this sanity check allows for weaker locking in protocols */ | ||
1072 | /* TODO (klitirqd): Skip this check if 't' is a proxy thread (???) */ | ||
1073 | if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { | ||
1074 | #endif | ||
1075 | TRACE_TASK(t, "inherits priority from %s/%d\n", | ||
1076 | prio_inh->comm, prio_inh->pid); | ||
1077 | tsk_rt(t)->inh_task = prio_inh; | ||
1078 | |||
1079 | linked_on = tsk_rt(t)->linked_on; | ||
1080 | |||
1081 | /* If it is scheduled, then we need to reorder the CPU heap. */ | ||
1082 | if (linked_on != NO_CPU) { | ||
1083 | TRACE_TASK(t, "%s: linked on %d\n", | ||
1084 | __FUNCTION__, linked_on); | ||
1085 | /* Holder is scheduled; need to re-order CPUs. | ||
1086 | * We can't use heap_decrease() here since | ||
1087 | * the cpu_heap is ordered in reverse direction, so | ||
1088 | * it is actually an increase. */ | ||
1089 | binheap_delete(&gsnedf_cpus[linked_on]->hn, &gsnedf_cpu_heap); | ||
1090 | binheap_add(&gsnedf_cpus[linked_on]->hn, | ||
1091 | &gsnedf_cpu_heap, cpu_entry_t, hn); | ||
1092 | } else { | ||
1093 | /* holder may be queued: first stop queue changes */ | ||
1094 | raw_spin_lock(&gsnedf.release_lock); | ||
1095 | if (is_queued(t)) { | ||
1096 | TRACE_TASK(t, "%s: is queued\n", | ||
1097 | __FUNCTION__); | ||
1098 | /* We need to update the position of holder in some | ||
1099 | * heap. Note that this could be a release heap if we | ||
1100 | * budget enforcement is used and this job overran. */ | ||
1101 | check_preempt = | ||
1102 | !bheap_decrease(edf_ready_order, | ||
1103 | tsk_rt(t)->heap_node); | ||
1104 | } else { | ||
1105 | /* Nothing to do: if it is not queued and not linked | ||
1106 | * then it is either sleeping or currently being moved | ||
1107 | * by other code (e.g., a timer interrupt handler) that | ||
1108 | * will use the correct priority when enqueuing the | ||
1109 | * task. */ | ||
1110 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | ||
1111 | __FUNCTION__); | ||
1112 | } | ||
1113 | raw_spin_unlock(&gsnedf.release_lock); | ||
1114 | |||
1115 | /* If holder was enqueued in a release heap, then the following | ||
1116 | * preemption check is pointless, but we can't easily detect | ||
1117 | * that case. If you want to fix this, then consider that | ||
1118 | * simply adding a state flag requires O(n) time to update when | ||
1119 | * releasing n tasks, which conflicts with the goal to have | ||
1120 | * O(log n) merges. */ | ||
1121 | if (check_preempt) { | ||
1122 | /* heap_decrease() hit the top level of the heap: make | ||
1123 | * sure preemption checks get the right task, not the | ||
1124 | * potentially stale cache. */ | ||
1125 | bheap_uncache_min(edf_ready_order, | ||
1126 | &gsnedf.ready_queue); | ||
1127 | check_for_preemptions(); | ||
1128 | } | ||
1129 | } | ||
1130 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1131 | } | ||
1132 | else { | ||
1133 | TRACE_TASK(t, "Spurious invalid priority increase. " | ||
1134 | "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" | ||
1135 | "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", | ||
1136 | t->comm, t->pid, | ||
1137 | effective_priority(t)->comm, effective_priority(t)->pid, | ||
1138 | (prio_inh) ? prio_inh->comm : "nil", | ||
1139 | (prio_inh) ? prio_inh->pid : -1); | ||
1140 | WARN_ON(!prio_inh); | ||
1141 | } | ||
1142 | #endif | ||
1143 | } | ||
1144 | |||
1145 | /* called with IRQs off */ | ||
1146 | static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
1147 | { | ||
653 | raw_spin_lock(&gsnedf_lock); | 1148 | raw_spin_lock(&gsnedf_lock); |
654 | 1149 | ||
655 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | 1150 | __increase_priority_inheritance(t, prio_inh); |
656 | tsk_rt(t)->inh_task = prio_inh; | 1151 | |
657 | 1152 | #ifdef CONFIG_LITMUS_SOFTIRQD | |
658 | linked_on = tsk_rt(t)->linked_on; | 1153 | if(tsk_rt(t)->cur_klitirqd != NULL) |
659 | 1154 | { | |
660 | /* If it is scheduled, then we need to reorder the CPU heap. */ | 1155 | TRACE_TASK(t, "%s/%d inherits a new priority!\n", |
661 | if (linked_on != NO_CPU) { | 1156 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); |
662 | TRACE_TASK(t, "%s: linked on %d\n", | 1157 | |
663 | __FUNCTION__, linked_on); | 1158 | __increase_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); |
664 | /* Holder is scheduled; need to re-order CPUs. | 1159 | } |
665 | * We can't use heap_decrease() here since | 1160 | #endif |
666 | * the cpu_heap is ordered in reverse direction, so | 1161 | |
667 | * it is actually an increase. */ | 1162 | raw_spin_unlock(&gsnedf_lock); |
668 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | 1163 | |
669 | gsnedf_cpus[linked_on]->hn); | 1164 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) |
670 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | 1165 | if(tsk_rt(t)->held_gpus) { |
671 | gsnedf_cpus[linked_on]->hn); | 1166 | int i; |
672 | } else { | 1167 | for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); |
673 | /* holder may be queued: first stop queue changes */ | 1168 | i < NV_DEVICE_NUM; |
674 | raw_spin_lock(&gsnedf.release_lock); | 1169 | i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { |
675 | if (is_queued(t)) { | 1170 | pai_check_priority_increase(t, i); |
676 | TRACE_TASK(t, "%s: is queued\n", | 1171 | } |
677 | __FUNCTION__); | 1172 | } |
678 | /* We need to update the position of holder in some | 1173 | #endif |
679 | * heap. Note that this could be a release heap if we | 1174 | } |
680 | * budget enforcement is used and this job overran. */ | 1175 | |
681 | check_preempt = | 1176 | |
682 | !bheap_decrease(edf_ready_order, | 1177 | /* called with IRQs off */ |
683 | tsk_rt(t)->heap_node); | 1178 | static void __decrease_priority_inheritance(struct task_struct* t, |
684 | } else { | 1179 | struct task_struct* prio_inh) |
685 | /* Nothing to do: if it is not queued and not linked | 1180 | { |
686 | * then it is either sleeping or currently being moved | 1181 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
687 | * by other code (e.g., a timer interrupt handler) that | 1182 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { |
688 | * will use the correct priority when enqueuing the | 1183 | #endif |
689 | * task. */ | 1184 | /* A job only stops inheriting a priority when it releases a |
690 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | 1185 | * resource. Thus we can make the following assumption.*/ |
691 | __FUNCTION__); | 1186 | if(prio_inh) |
1187 | TRACE_TASK(t, "EFFECTIVE priority decreased to %s/%d\n", | ||
1188 | prio_inh->comm, prio_inh->pid); | ||
1189 | else | ||
1190 | TRACE_TASK(t, "base priority restored.\n"); | ||
1191 | |||
1192 | tsk_rt(t)->inh_task = prio_inh; | ||
1193 | |||
1194 | if(tsk_rt(t)->scheduled_on != NO_CPU) { | ||
1195 | TRACE_TASK(t, "is scheduled.\n"); | ||
1196 | |||
1197 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
1198 | * since the priority was effectively lowered. */ | ||
1199 | unlink(t); | ||
1200 | gsnedf_job_arrival(t); | ||
692 | } | 1201 | } |
693 | raw_spin_unlock(&gsnedf.release_lock); | 1202 | else { |
694 | 1203 | /* task is queued */ | |
695 | /* If holder was enqueued in a release heap, then the following | 1204 | raw_spin_lock(&gsnedf.release_lock); |
696 | * preemption check is pointless, but we can't easily detect | 1205 | if (is_queued(t)) { |
697 | * that case. If you want to fix this, then consider that | 1206 | TRACE_TASK(t, "is queued.\n"); |
698 | * simply adding a state flag requires O(n) time to update when | 1207 | |
699 | * releasing n tasks, which conflicts with the goal to have | 1208 | /* decrease in priority, so we have to re-add to binomial heap */ |
700 | * O(log n) merges. */ | 1209 | unlink(t); |
701 | if (check_preempt) { | 1210 | gsnedf_job_arrival(t); |
702 | /* heap_decrease() hit the top level of the heap: make | 1211 | } |
703 | * sure preemption checks get the right task, not the | 1212 | else { |
704 | * potentially stale cache. */ | 1213 | TRACE_TASK(t, "is not in scheduler. Probably on wait queue somewhere.\n"); |
705 | bheap_uncache_min(edf_ready_order, | 1214 | } |
706 | &gsnedf.ready_queue); | 1215 | raw_spin_unlock(&gsnedf.release_lock); |
707 | check_for_preemptions(); | ||
708 | } | 1216 | } |
1217 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1218 | } | ||
1219 | else { | ||
1220 | TRACE_TASK(t, "Spurious invalid priority decrease. " | ||
1221 | "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" | ||
1222 | "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", | ||
1223 | t->comm, t->pid, | ||
1224 | effective_priority(t)->comm, effective_priority(t)->pid, | ||
1225 | (prio_inh) ? prio_inh->comm : "nil", | ||
1226 | (prio_inh) ? prio_inh->pid : -1); | ||
709 | } | 1227 | } |
1228 | #endif | ||
1229 | } | ||
1230 | |||
1231 | static void decrease_priority_inheritance(struct task_struct* t, | ||
1232 | struct task_struct* prio_inh) | ||
1233 | { | ||
1234 | raw_spin_lock(&gsnedf_lock); | ||
1235 | __decrease_priority_inheritance(t, prio_inh); | ||
1236 | |||
1237 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1238 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
1239 | { | ||
1240 | TRACE_TASK(t, "%s/%d decreases in priority!\n", | ||
1241 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
1242 | |||
1243 | __decrease_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); | ||
1244 | } | ||
1245 | #endif | ||
710 | 1246 | ||
711 | raw_spin_unlock(&gsnedf_lock); | 1247 | raw_spin_unlock(&gsnedf_lock); |
1248 | |||
1249 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) | ||
1250 | if(tsk_rt(t)->held_gpus) { | ||
1251 | int i; | ||
1252 | for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); | ||
1253 | i < NV_DEVICE_NUM; | ||
1254 | i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { | ||
1255 | pai_check_priority_decrease(t, i); | ||
1256 | } | ||
1257 | } | ||
1258 | #endif | ||
712 | } | 1259 | } |
713 | 1260 | ||
1261 | |||
1262 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
714 | /* called with IRQs off */ | 1263 | /* called with IRQs off */ |
715 | static void clear_priority_inheritance(struct task_struct* t) | 1264 | static void increase_priority_inheritance_klitirqd(struct task_struct* klitirqd, |
1265 | struct task_struct* old_owner, | ||
1266 | struct task_struct* new_owner) | ||
716 | { | 1267 | { |
1268 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
1269 | |||
717 | raw_spin_lock(&gsnedf_lock); | 1270 | raw_spin_lock(&gsnedf_lock); |
718 | 1271 | ||
719 | /* A job only stops inheriting a priority when it releases a | 1272 | if(old_owner != new_owner) |
720 | * resource. Thus we can make the following assumption.*/ | 1273 | { |
721 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | 1274 | if(old_owner) |
1275 | { | ||
1276 | // unreachable? | ||
1277 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
1278 | } | ||
722 | 1279 | ||
723 | TRACE_TASK(t, "priority restored\n"); | 1280 | TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n", |
724 | tsk_rt(t)->inh_task = NULL; | 1281 | new_owner->comm, new_owner->pid); |
725 | 1282 | ||
726 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 1283 | tsk_rt(new_owner)->cur_klitirqd = klitirqd; |
727 | * since the priority was effectively lowered. */ | 1284 | } |
728 | unlink(t); | 1285 | |
729 | gsnedf_job_arrival(t); | 1286 | __decrease_priority_inheritance(klitirqd, NULL); // kludge to clear out cur prio. |
1287 | |||
1288 | __increase_priority_inheritance(klitirqd, | ||
1289 | (tsk_rt(new_owner)->inh_task == NULL) ? | ||
1290 | new_owner : | ||
1291 | tsk_rt(new_owner)->inh_task); | ||
730 | 1292 | ||
731 | raw_spin_unlock(&gsnedf_lock); | 1293 | raw_spin_unlock(&gsnedf_lock); |
732 | } | 1294 | } |
733 | 1295 | ||
734 | 1296 | ||
1297 | /* called with IRQs off */ | ||
1298 | static void decrease_priority_inheritance_klitirqd(struct task_struct* klitirqd, | ||
1299 | struct task_struct* old_owner, | ||
1300 | struct task_struct* new_owner) | ||
1301 | { | ||
1302 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
1303 | |||
1304 | raw_spin_lock(&gsnedf_lock); | ||
1305 | |||
1306 | TRACE_TASK(klitirqd, "priority restored\n"); | ||
1307 | |||
1308 | __decrease_priority_inheritance(klitirqd, new_owner); | ||
1309 | |||
1310 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
1311 | |||
1312 | raw_spin_unlock(&gsnedf_lock); | ||
1313 | } | ||
1314 | #endif | ||
1315 | |||
1316 | |||
1317 | |||
1318 | |||
1319 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1320 | |||
1321 | /* called with IRQs off */ | ||
1322 | /* preconditions: | ||
1323 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. | ||
1324 | (2) The lock 'to_unlock' is held. | ||
1325 | */ | ||
1326 | static void nested_increase_priority_inheritance(struct task_struct* t, | ||
1327 | struct task_struct* prio_inh, | ||
1328 | raw_spinlock_t *to_unlock, | ||
1329 | unsigned long irqflags) | ||
1330 | { | ||
1331 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; | ||
1332 | |||
1333 | if(tsk_rt(t)->inh_task != prio_inh) { // shield redundent calls. | ||
1334 | increase_priority_inheritance(t, prio_inh); // increase our prio. | ||
1335 | } | ||
1336 | |||
1337 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. | ||
1338 | |||
1339 | |||
1340 | if(blocked_lock) { | ||
1341 | if(blocked_lock->ops->propagate_increase_inheritance) { | ||
1342 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", | ||
1343 | blocked_lock->ident); | ||
1344 | |||
1345 | // beware: recursion | ||
1346 | blocked_lock->ops->propagate_increase_inheritance(blocked_lock, | ||
1347 | t, to_unlock, | ||
1348 | irqflags); | ||
1349 | } | ||
1350 | else { | ||
1351 | TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", | ||
1352 | blocked_lock->ident); | ||
1353 | unlock_fine_irqrestore(to_unlock, irqflags); | ||
1354 | } | ||
1355 | } | ||
1356 | else { | ||
1357 | TRACE_TASK(t, "is not blocked. No propagation.\n"); | ||
1358 | unlock_fine_irqrestore(to_unlock, irqflags); | ||
1359 | } | ||
1360 | } | ||
1361 | |||
1362 | /* called with IRQs off */ | ||
1363 | /* preconditions: | ||
1364 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. | ||
1365 | (2) The lock 'to_unlock' is held. | ||
1366 | */ | ||
1367 | static void nested_decrease_priority_inheritance(struct task_struct* t, | ||
1368 | struct task_struct* prio_inh, | ||
1369 | raw_spinlock_t *to_unlock, | ||
1370 | unsigned long irqflags) | ||
1371 | { | ||
1372 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; | ||
1373 | decrease_priority_inheritance(t, prio_inh); | ||
1374 | |||
1375 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. | ||
1376 | |||
1377 | if(blocked_lock) { | ||
1378 | if(blocked_lock->ops->propagate_decrease_inheritance) { | ||
1379 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", | ||
1380 | blocked_lock->ident); | ||
1381 | |||
1382 | // beware: recursion | ||
1383 | blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, | ||
1384 | to_unlock, | ||
1385 | irqflags); | ||
1386 | } | ||
1387 | else { | ||
1388 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", | ||
1389 | blocked_lock); | ||
1390 | unlock_fine_irqrestore(to_unlock, irqflags); | ||
1391 | } | ||
1392 | } | ||
1393 | else { | ||
1394 | TRACE_TASK(t, "is not blocked. No propagation.\n"); | ||
1395 | unlock_fine_irqrestore(to_unlock, irqflags); | ||
1396 | } | ||
1397 | } | ||
1398 | |||
1399 | |||
1400 | /* ******************** RSM MUTEX ********************** */ | ||
1401 | |||
1402 | static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = { | ||
1403 | .lock = rsm_mutex_lock, | ||
1404 | .unlock = rsm_mutex_unlock, | ||
1405 | .close = rsm_mutex_close, | ||
1406 | .deallocate = rsm_mutex_free, | ||
1407 | |||
1408 | .propagate_increase_inheritance = rsm_mutex_propagate_increase_inheritance, | ||
1409 | .propagate_decrease_inheritance = rsm_mutex_propagate_decrease_inheritance, | ||
1410 | |||
1411 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1412 | .dgl_lock = rsm_mutex_dgl_lock, | ||
1413 | .is_owner = rsm_mutex_is_owner, | ||
1414 | .enable_priority = rsm_mutex_enable_priority, | ||
1415 | #endif | ||
1416 | }; | ||
1417 | |||
1418 | static struct litmus_lock* gsnedf_new_rsm_mutex(void) | ||
1419 | { | ||
1420 | return rsm_mutex_new(&gsnedf_rsm_mutex_lock_ops); | ||
1421 | } | ||
1422 | |||
1423 | /* ******************** IKGLP ********************** */ | ||
1424 | |||
1425 | static struct litmus_lock_ops gsnedf_ikglp_lock_ops = { | ||
1426 | .lock = ikglp_lock, | ||
1427 | .unlock = ikglp_unlock, | ||
1428 | .close = ikglp_close, | ||
1429 | .deallocate = ikglp_free, | ||
1430 | |||
1431 | // ikglp can only be an outer-most lock. | ||
1432 | .propagate_increase_inheritance = NULL, | ||
1433 | .propagate_decrease_inheritance = NULL, | ||
1434 | }; | ||
1435 | |||
1436 | static struct litmus_lock* gsnedf_new_ikglp(void* __user arg) | ||
1437 | { | ||
1438 | return ikglp_new(num_online_cpus(), &gsnedf_ikglp_lock_ops, arg); | ||
1439 | } | ||
1440 | |||
1441 | #endif /* CONFIG_LITMUS_NESTED_LOCKING */ | ||
1442 | |||
1443 | |||
1444 | /* ******************** KFMLP support ********************** */ | ||
1445 | |||
1446 | static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = { | ||
1447 | .lock = kfmlp_lock, | ||
1448 | .unlock = kfmlp_unlock, | ||
1449 | .close = kfmlp_close, | ||
1450 | .deallocate = kfmlp_free, | ||
1451 | |||
1452 | // kfmlp can only be an outer-most lock. | ||
1453 | .propagate_increase_inheritance = NULL, | ||
1454 | .propagate_decrease_inheritance = NULL, | ||
1455 | }; | ||
1456 | |||
1457 | |||
1458 | static struct litmus_lock* gsnedf_new_kfmlp(void* __user arg) | ||
1459 | { | ||
1460 | return kfmlp_new(&gsnedf_kfmlp_lock_ops, arg); | ||
1461 | } | ||
1462 | |||
735 | /* ******************** FMLP support ********************** */ | 1463 | /* ******************** FMLP support ********************** */ |
736 | 1464 | ||
737 | /* struct for semaphore with priority inheritance */ | 1465 | /* struct for semaphore with priority inheritance */ |
@@ -797,7 +1525,7 @@ int gsnedf_fmlp_lock(struct litmus_lock* l) | |||
797 | if (edf_higher_prio(t, sem->hp_waiter)) { | 1525 | if (edf_higher_prio(t, sem->hp_waiter)) { |
798 | sem->hp_waiter = t; | 1526 | sem->hp_waiter = t; |
799 | if (edf_higher_prio(t, sem->owner)) | 1527 | if (edf_higher_prio(t, sem->owner)) |
800 | set_priority_inheritance(sem->owner, sem->hp_waiter); | 1528 | increase_priority_inheritance(sem->owner, sem->hp_waiter); |
801 | } | 1529 | } |
802 | 1530 | ||
803 | TS_LOCK_SUSPEND; | 1531 | TS_LOCK_SUSPEND; |
@@ -865,7 +1593,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l) | |||
865 | /* Well, if next is not the highest-priority waiter, | 1593 | /* Well, if next is not the highest-priority waiter, |
866 | * then it ought to inherit the highest-priority | 1594 | * then it ought to inherit the highest-priority |
867 | * waiter's priority. */ | 1595 | * waiter's priority. */ |
868 | set_priority_inheritance(next, sem->hp_waiter); | 1596 | increase_priority_inheritance(next, sem->hp_waiter); |
869 | } | 1597 | } |
870 | 1598 | ||
871 | /* wake up next */ | 1599 | /* wake up next */ |
@@ -876,7 +1604,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l) | |||
876 | 1604 | ||
877 | /* we lose the benefit of priority inheritance (if any) */ | 1605 | /* we lose the benefit of priority inheritance (if any) */ |
878 | if (tsk_rt(t)->inh_task) | 1606 | if (tsk_rt(t)->inh_task) |
879 | clear_priority_inheritance(t); | 1607 | decrease_priority_inheritance(t, NULL); |
880 | 1608 | ||
881 | out: | 1609 | out: |
882 | spin_unlock_irqrestore(&sem->wait.lock, flags); | 1610 | spin_unlock_irqrestore(&sem->wait.lock, flags); |
@@ -914,6 +1642,11 @@ static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { | |||
914 | .lock = gsnedf_fmlp_lock, | 1642 | .lock = gsnedf_fmlp_lock, |
915 | .unlock = gsnedf_fmlp_unlock, | 1643 | .unlock = gsnedf_fmlp_unlock, |
916 | .deallocate = gsnedf_fmlp_free, | 1644 | .deallocate = gsnedf_fmlp_free, |
1645 | |||
1646 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1647 | .propagate_increase_inheritance = NULL, | ||
1648 | .propagate_decrease_inheritance = NULL | ||
1649 | #endif | ||
917 | }; | 1650 | }; |
918 | 1651 | ||
919 | static struct litmus_lock* gsnedf_new_fmlp(void) | 1652 | static struct litmus_lock* gsnedf_new_fmlp(void) |
@@ -932,47 +1665,121 @@ static struct litmus_lock* gsnedf_new_fmlp(void) | |||
932 | return &sem->litmus_lock; | 1665 | return &sem->litmus_lock; |
933 | } | 1666 | } |
934 | 1667 | ||
935 | /* **** lock constructor **** */ | ||
936 | |||
937 | 1668 | ||
938 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | 1669 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, |
939 | void* __user unused) | 1670 | void* __user args) |
940 | { | 1671 | { |
941 | int err = -ENXIO; | 1672 | int err; |
942 | 1673 | ||
943 | /* GSN-EDF currently only supports the FMLP for global resources. */ | ||
944 | switch (type) { | 1674 | switch (type) { |
945 | 1675 | ||
946 | case FMLP_SEM: | 1676 | case FMLP_SEM: |
947 | /* Flexible Multiprocessor Locking Protocol */ | 1677 | /* Flexible Multiprocessor Locking Protocol */ |
948 | *lock = gsnedf_new_fmlp(); | 1678 | *lock = gsnedf_new_fmlp(); |
949 | if (*lock) | 1679 | break; |
950 | err = 0; | 1680 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
951 | else | 1681 | case RSM_MUTEX: |
952 | err = -ENOMEM; | 1682 | *lock = gsnedf_new_rsm_mutex(); |
953 | break; | 1683 | break; |
954 | 1684 | ||
1685 | case IKGLP_SEM: | ||
1686 | *lock = gsnedf_new_ikglp(args); | ||
1687 | break; | ||
1688 | #endif | ||
1689 | case KFMLP_SEM: | ||
1690 | *lock = gsnedf_new_kfmlp(args); | ||
1691 | break; | ||
1692 | default: | ||
1693 | err = -ENXIO; | ||
1694 | goto UNSUPPORTED_LOCK; | ||
955 | }; | 1695 | }; |
956 | 1696 | ||
1697 | if (*lock) | ||
1698 | err = 0; | ||
1699 | else | ||
1700 | err = -ENOMEM; | ||
1701 | |||
1702 | UNSUPPORTED_LOCK: | ||
957 | return err; | 1703 | return err; |
958 | } | 1704 | } |
959 | 1705 | ||
1706 | #endif // CONFIG_LITMUS_LOCKING | ||
1707 | |||
1708 | |||
1709 | |||
1710 | |||
1711 | |||
1712 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
1713 | static struct affinity_observer_ops gsnedf_kfmlp_affinity_ops = { | ||
1714 | .close = kfmlp_aff_obs_close, | ||
1715 | .deallocate = kfmlp_aff_obs_free, | ||
1716 | }; | ||
1717 | |||
1718 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1719 | static struct affinity_observer_ops gsnedf_ikglp_affinity_ops = { | ||
1720 | .close = ikglp_aff_obs_close, | ||
1721 | .deallocate = ikglp_aff_obs_free, | ||
1722 | }; | ||
960 | #endif | 1723 | #endif |
961 | 1724 | ||
1725 | static long gsnedf_allocate_affinity_observer( | ||
1726 | struct affinity_observer **aff_obs, | ||
1727 | int type, | ||
1728 | void* __user args) | ||
1729 | { | ||
1730 | int err; | ||
1731 | |||
1732 | switch (type) { | ||
1733 | |||
1734 | case KFMLP_SIMPLE_GPU_AFF_OBS: | ||
1735 | *aff_obs = kfmlp_simple_gpu_aff_obs_new(&gsnedf_kfmlp_affinity_ops, args); | ||
1736 | break; | ||
1737 | |||
1738 | case KFMLP_GPU_AFF_OBS: | ||
1739 | *aff_obs = kfmlp_gpu_aff_obs_new(&gsnedf_kfmlp_affinity_ops, args); | ||
1740 | break; | ||
1741 | |||
1742 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1743 | case IKGLP_SIMPLE_GPU_AFF_OBS: | ||
1744 | *aff_obs = ikglp_simple_gpu_aff_obs_new(&gsnedf_ikglp_affinity_ops, args); | ||
1745 | break; | ||
1746 | |||
1747 | case IKGLP_GPU_AFF_OBS: | ||
1748 | *aff_obs = ikglp_gpu_aff_obs_new(&gsnedf_ikglp_affinity_ops, args); | ||
1749 | break; | ||
1750 | #endif | ||
1751 | default: | ||
1752 | err = -ENXIO; | ||
1753 | goto UNSUPPORTED_AFF_OBS; | ||
1754 | }; | ||
1755 | |||
1756 | if (*aff_obs) | ||
1757 | err = 0; | ||
1758 | else | ||
1759 | err = -ENOMEM; | ||
1760 | |||
1761 | UNSUPPORTED_AFF_OBS: | ||
1762 | return err; | ||
1763 | } | ||
1764 | #endif | ||
1765 | |||
1766 | |||
1767 | |||
1768 | |||
962 | 1769 | ||
963 | static long gsnedf_activate_plugin(void) | 1770 | static long gsnedf_activate_plugin(void) |
964 | { | 1771 | { |
965 | int cpu; | 1772 | int cpu; |
966 | cpu_entry_t *entry; | 1773 | cpu_entry_t *entry; |
967 | 1774 | ||
968 | bheap_init(&gsnedf_cpu_heap); | 1775 | INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio); |
969 | #ifdef CONFIG_RELEASE_MASTER | 1776 | #ifdef CONFIG_RELEASE_MASTER |
970 | gsnedf.release_master = atomic_read(&release_master_cpu); | 1777 | gsnedf.release_master = atomic_read(&release_master_cpu); |
971 | #endif | 1778 | #endif |
972 | 1779 | ||
973 | for_each_online_cpu(cpu) { | 1780 | for_each_online_cpu(cpu) { |
974 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 1781 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
975 | bheap_node_init(&entry->hn, entry); | 1782 | INIT_BINHEAP_NODE(&entry->hn); |
976 | entry->linked = NULL; | 1783 | entry->linked = NULL; |
977 | entry->scheduled = NULL; | 1784 | entry->scheduled = NULL; |
978 | #ifdef CONFIG_RELEASE_MASTER | 1785 | #ifdef CONFIG_RELEASE_MASTER |
@@ -986,6 +1793,20 @@ static long gsnedf_activate_plugin(void) | |||
986 | } | 1793 | } |
987 | #endif | 1794 | #endif |
988 | } | 1795 | } |
1796 | |||
1797 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1798 | gsnedf_pending_tasklets.head = NULL; | ||
1799 | gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); | ||
1800 | #endif | ||
1801 | |||
1802 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1803 | spawn_klitirqd(NULL); | ||
1804 | #endif | ||
1805 | |||
1806 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1807 | init_nvidia_info(); | ||
1808 | #endif | ||
1809 | |||
989 | return 0; | 1810 | return 0; |
990 | } | 1811 | } |
991 | 1812 | ||
@@ -1002,8 +1823,31 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
1002 | .task_block = gsnedf_task_block, | 1823 | .task_block = gsnedf_task_block, |
1003 | .admit_task = gsnedf_admit_task, | 1824 | .admit_task = gsnedf_admit_task, |
1004 | .activate_plugin = gsnedf_activate_plugin, | 1825 | .activate_plugin = gsnedf_activate_plugin, |
1826 | .compare = edf_higher_prio, | ||
1005 | #ifdef CONFIG_LITMUS_LOCKING | 1827 | #ifdef CONFIG_LITMUS_LOCKING |
1006 | .allocate_lock = gsnedf_allocate_lock, | 1828 | .allocate_lock = gsnedf_allocate_lock, |
1829 | .increase_prio = increase_priority_inheritance, | ||
1830 | .decrease_prio = decrease_priority_inheritance, | ||
1831 | #endif | ||
1832 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1833 | .nested_increase_prio = nested_increase_priority_inheritance, | ||
1834 | .nested_decrease_prio = nested_decrease_priority_inheritance, | ||
1835 | .__compare = __edf_higher_prio, | ||
1836 | #endif | ||
1837 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1838 | .get_dgl_spinlock = gsnedf_get_dgl_spinlock, | ||
1839 | #endif | ||
1840 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
1841 | .allocate_aff_obs = gsnedf_allocate_affinity_observer, | ||
1842 | #endif | ||
1843 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1844 | .increase_prio_klitirqd = increase_priority_inheritance_klitirqd, | ||
1845 | .decrease_prio_klitirqd = decrease_priority_inheritance_klitirqd, | ||
1846 | #endif | ||
1847 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1848 | .enqueue_pai_tasklet = gsnedf_enqueue_pai_tasklet, | ||
1849 | .change_prio_pai_tasklet = gsnedf_change_prio_pai_tasklet, | ||
1850 | .run_tasklets = gsnedf_run_tasklets, | ||
1007 | #endif | 1851 | #endif |
1008 | }; | 1852 | }; |
1009 | 1853 | ||
@@ -1013,15 +1857,20 @@ static int __init init_gsn_edf(void) | |||
1013 | int cpu; | 1857 | int cpu; |
1014 | cpu_entry_t *entry; | 1858 | cpu_entry_t *entry; |
1015 | 1859 | ||
1016 | bheap_init(&gsnedf_cpu_heap); | 1860 | INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio); |
1017 | /* initialize CPU state */ | 1861 | /* initialize CPU state */ |
1018 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 1862 | for (cpu = 0; cpu < NR_CPUS; ++cpu) { |
1019 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 1863 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
1020 | gsnedf_cpus[cpu] = entry; | 1864 | gsnedf_cpus[cpu] = entry; |
1021 | entry->cpu = cpu; | 1865 | entry->cpu = cpu; |
1022 | entry->hn = &gsnedf_heap_node[cpu]; | 1866 | |
1023 | bheap_node_init(&entry->hn, entry); | 1867 | INIT_BINHEAP_NODE(&entry->hn); |
1024 | } | 1868 | } |
1869 | |||
1870 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1871 | raw_spin_lock_init(&dgl_lock); | ||
1872 | #endif | ||
1873 | |||
1025 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | 1874 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); |
1026 | return register_sched_plugin(&gsn_edf_plugin); | 1875 | return register_sched_plugin(&gsn_edf_plugin); |
1027 | } | 1876 | } |