diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-01-28 17:30:14 -0500 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-02-01 16:30:40 -0500 |
commit | fc6482bb7a6a638474565c90159997bd59069297 (patch) | |
tree | 01aaffcba1ad903c89d2b8a90e37ad8d46b1b9f2 | |
parent | e1b81e70c3af9d19d639bc8bdaa5a8fc13bf17a8 (diff) |
FMLP: remove old implementation
-rw-r--r-- | include/litmus/edf_common.h | 2 | ||||
-rw-r--r-- | litmus/Makefile | 1 | ||||
-rw-r--r-- | litmus/fdso.c | 3 | ||||
-rw-r--r-- | litmus/fmlp.c | 214 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 155 | ||||
-rw-r--r-- | litmus/sched_psn_edf.c | 126 |
6 files changed, 1 insertions, 500 deletions
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h index 80d4321cc87e..bbaf22ea7f12 100644 --- a/include/litmus/edf_common.h +++ b/include/litmus/edf_common.h | |||
@@ -22,6 +22,4 @@ int edf_ready_order(struct bheap_node* a, struct bheap_node* b); | |||
22 | 22 | ||
23 | int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); | 23 | int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); |
24 | 24 | ||
25 | int edf_set_hp_task(struct pi_semaphore *sem); | ||
26 | int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu); | ||
27 | #endif | 25 | #endif |
diff --git a/litmus/Makefile b/litmus/Makefile index 4e019d4a6e0c..62a20e266eeb 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -13,7 +13,6 @@ obj-y = sched_plugin.o litmus.o \ | |||
13 | fdso.o \ | 13 | fdso.o \ |
14 | locking.o \ | 14 | locking.o \ |
15 | srp.o \ | 15 | srp.o \ |
16 | fmlp.o \ | ||
17 | bheap.o \ | 16 | bheap.o \ |
18 | ctrldev.o \ | 17 | ctrldev.o \ |
19 | sched_gsn_edf.o \ | 18 | sched_gsn_edf.o \ |
diff --git a/litmus/fdso.c b/litmus/fdso.c index 209431f3ce11..b3a95f13d651 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c | |||
@@ -18,11 +18,10 @@ | |||
18 | 18 | ||
19 | #include <litmus/fdso.h> | 19 | #include <litmus/fdso.h> |
20 | 20 | ||
21 | extern struct fdso_ops fmlp_sem_ops; | ||
22 | extern struct fdso_ops generic_lock_ops; | 21 | extern struct fdso_ops generic_lock_ops; |
23 | 22 | ||
24 | static const struct fdso_ops* fdso_ops[] = { | 23 | static const struct fdso_ops* fdso_ops[] = { |
25 | &fmlp_sem_ops, | 24 | &generic_lock_ops, /* FMLP_SEM */ |
26 | &generic_lock_ops, /* SRP_SEM */ | 25 | &generic_lock_ops, /* SRP_SEM */ |
27 | }; | 26 | }; |
28 | 27 | ||
diff --git a/litmus/fmlp.c b/litmus/fmlp.c deleted file mode 100644 index 6e3ddadbc429..000000000000 --- a/litmus/fmlp.c +++ /dev/null | |||
@@ -1,214 +0,0 @@ | |||
1 | /* | ||
2 | * FMLP implementation. | ||
3 | * Much of the code here is borrowed from include/asm-i386/semaphore.h | ||
4 | */ | ||
5 | |||
6 | #include <asm/atomic.h> | ||
7 | |||
8 | #include <linux/semaphore.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/wait.h> | ||
11 | #include <linux/spinlock.h> | ||
12 | |||
13 | #include <litmus/litmus.h> | ||
14 | #include <litmus/sched_plugin.h> | ||
15 | #include <litmus/edf_common.h> | ||
16 | |||
17 | #include <litmus/fdso.h> | ||
18 | |||
19 | #include <litmus/trace.h> | ||
20 | |||
21 | #ifdef CONFIG_FMLP | ||
22 | |||
23 | static void* create_fmlp_semaphore(obj_type_t type) | ||
24 | { | ||
25 | struct pi_semaphore* sem; | ||
26 | int i; | ||
27 | |||
28 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
29 | if (!sem) | ||
30 | return NULL; | ||
31 | atomic_set(&sem->count, 1); | ||
32 | sem->sleepers = 0; | ||
33 | init_waitqueue_head(&sem->wait); | ||
34 | sem->hp.task = NULL; | ||
35 | sem->holder = NULL; | ||
36 | for (i = 0; i < NR_CPUS; i++) | ||
37 | sem->hp.cpu_task[i] = NULL; | ||
38 | return sem; | ||
39 | } | ||
40 | |||
41 | static int open_fmlp_semaphore(struct od_table_entry* entry, void* __user arg) | ||
42 | { | ||
43 | if (!fmlp_active()) | ||
44 | return -EBUSY; | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | static void destroy_fmlp_semaphore(obj_type_t type, void* sem) | ||
49 | { | ||
50 | /* XXX assert invariants */ | ||
51 | kfree(sem); | ||
52 | } | ||
53 | |||
54 | struct fdso_ops fmlp_sem_ops = { | ||
55 | .create = create_fmlp_semaphore, | ||
56 | .open = open_fmlp_semaphore, | ||
57 | .destroy = destroy_fmlp_semaphore | ||
58 | }; | ||
59 | |||
60 | struct wq_pair { | ||
61 | struct task_struct* tsk; | ||
62 | struct pi_semaphore* sem; | ||
63 | }; | ||
64 | |||
65 | static int rt_pi_wake_up(wait_queue_t *wait, unsigned mode, int sync, | ||
66 | void *key) | ||
67 | { | ||
68 | struct wq_pair* wqp = (struct wq_pair*) wait->private; | ||
69 | set_rt_flags(wqp->tsk, RT_F_EXIT_SEM); | ||
70 | litmus->inherit_priority(wqp->sem, wqp->tsk); | ||
71 | TRACE_TASK(wqp->tsk, | ||
72 | "woken up by rt_pi_wake_up() (RT_F_SEM_EXIT, PI)\n"); | ||
73 | /* point to task for default_wake_function() */ | ||
74 | wait->private = wqp->tsk; | ||
75 | default_wake_function(wait, mode, sync, key); | ||
76 | |||
77 | /* Always return true since we know that if we encountered a task | ||
78 | * that was already running the wake_up raced with the schedule in | ||
79 | * rt_pi_down(). In that case the task in rt_pi_down() will be scheduled | ||
80 | * immediately and own the lock. We must not wake up another task in | ||
81 | * any case. | ||
82 | */ | ||
83 | return 1; | ||
84 | } | ||
85 | |||
86 | /* caller is responsible for locking */ | ||
87 | int edf_set_hp_task(struct pi_semaphore *sem) | ||
88 | { | ||
89 | struct list_head *tmp, *next; | ||
90 | struct task_struct *queued; | ||
91 | int ret = 0; | ||
92 | |||
93 | sem->hp.task = NULL; | ||
94 | list_for_each_safe(tmp, next, &sem->wait.task_list) { | ||
95 | queued = ((struct wq_pair*) | ||
96 | list_entry(tmp, wait_queue_t, | ||
97 | task_list)->private)->tsk; | ||
98 | |||
99 | /* Compare task prios, find high prio task. */ | ||
100 | if (edf_higher_prio(queued, sem->hp.task)) { | ||
101 | sem->hp.task = queued; | ||
102 | ret = 1; | ||
103 | } | ||
104 | } | ||
105 | return ret; | ||
106 | } | ||
107 | |||
108 | /* caller is responsible for locking */ | ||
109 | int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu) | ||
110 | { | ||
111 | struct list_head *tmp, *next; | ||
112 | struct task_struct *queued; | ||
113 | int ret = 0; | ||
114 | |||
115 | sem->hp.cpu_task[cpu] = NULL; | ||
116 | list_for_each_safe(tmp, next, &sem->wait.task_list) { | ||
117 | queued = ((struct wq_pair*) | ||
118 | list_entry(tmp, wait_queue_t, | ||
119 | task_list)->private)->tsk; | ||
120 | |||
121 | /* Compare task prios, find high prio task. */ | ||
122 | if (get_partition(queued) == cpu && | ||
123 | edf_higher_prio(queued, sem->hp.cpu_task[cpu])) { | ||
124 | sem->hp.cpu_task[cpu] = queued; | ||
125 | ret = 1; | ||
126 | } | ||
127 | } | ||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | static int do_fmlp_down(struct pi_semaphore* sem) | ||
132 | { | ||
133 | unsigned long flags; | ||
134 | struct task_struct *tsk = current; | ||
135 | struct wq_pair pair; | ||
136 | int suspended = 1; | ||
137 | wait_queue_t wait = { | ||
138 | .private = &pair, | ||
139 | .func = rt_pi_wake_up, | ||
140 | .task_list = {NULL, NULL} | ||
141 | }; | ||
142 | |||
143 | pair.tsk = tsk; | ||
144 | pair.sem = sem; | ||
145 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
146 | |||
147 | if (atomic_dec_return(&sem->count) < 0 || | ||
148 | waitqueue_active(&sem->wait)) { | ||
149 | /* we need to suspend */ | ||
150 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
151 | __add_wait_queue_tail_exclusive(&sem->wait, &wait); | ||
152 | |||
153 | TRACE_CUR("suspends on PI lock %p\n", sem); | ||
154 | litmus->pi_block(sem, tsk); | ||
155 | |||
156 | /* release lock before sleeping */ | ||
157 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
158 | |||
159 | TS_PI_DOWN_END; | ||
160 | preempt_enable_no_resched(); | ||
161 | |||
162 | |||
163 | /* we depend on the FIFO order | ||
164 | * Thus, we don't need to recheck when we wake up, we | ||
165 | * are guaranteed to have the lock since there is only one | ||
166 | * wake up per release | ||
167 | */ | ||
168 | schedule(); | ||
169 | |||
170 | TRACE_CUR("woke up, now owns PI lock %p\n", sem); | ||
171 | |||
172 | /* try_to_wake_up() set our state to TASK_RUNNING, | ||
173 | * all we need to do is to remove our wait queue entry | ||
174 | */ | ||
175 | remove_wait_queue(&sem->wait, &wait); | ||
176 | } else { | ||
177 | /* no priority inheritance necessary, since there are no queued | ||
178 | * tasks. | ||
179 | */ | ||
180 | suspended = 0; | ||
181 | TRACE_CUR("acquired PI lock %p, no contention\n", sem); | ||
182 | sem->holder = tsk; | ||
183 | |||
184 | /* don't know if we're global or partitioned. */ | ||
185 | sem->hp.task = tsk; | ||
186 | sem->hp.cpu_task[get_partition(tsk)] = tsk; | ||
187 | |||
188 | litmus->inherit_priority(sem, tsk); | ||
189 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
190 | } | ||
191 | return suspended; | ||
192 | } | ||
193 | |||
194 | static void do_fmlp_up(struct pi_semaphore* sem) | ||
195 | { | ||
196 | unsigned long flags; | ||
197 | |||
198 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
199 | |||
200 | TRACE_CUR("releases PI lock %p\n", sem); | ||
201 | litmus->return_priority(sem); | ||
202 | sem->holder = NULL; | ||
203 | if (atomic_inc_return(&sem->count) < 1) | ||
204 | /* there is a task queued */ | ||
205 | wake_up_locked(&sem->wait); | ||
206 | |||
207 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
208 | } | ||
209 | |||
210 | #else | ||
211 | |||
212 | struct fdso_ops fmlp_sem_ops = {}; | ||
213 | |||
214 | #endif | ||
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 4ad95dba4a04..5de0980e3faa 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -594,161 +594,6 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
594 | TRACE_TASK(t, "RIP\n"); | 594 | TRACE_TASK(t, "RIP\n"); |
595 | } | 595 | } |
596 | 596 | ||
597 | #if 0 | ||
598 | |||
599 | /* Update the queue position of a task that got it's priority boosted via | ||
600 | * priority inheritance. */ | ||
601 | static void update_queue_position(struct task_struct *holder) | ||
602 | { | ||
603 | /* We don't know whether holder is in the ready queue. It should, but | ||
604 | * on a budget overrun it may already be in a release queue. Hence, | ||
605 | * calling unlink() is not possible since it assumes that the task is | ||
606 | * not in a release queue. However, we can safely check whether | ||
607 | * sem->holder is currently in a queue or scheduled after locking both | ||
608 | * the release and the ready queue lock. */ | ||
609 | |||
610 | /* Assumption: caller holds gsnedf_lock */ | ||
611 | |||
612 | int check_preempt = 0; | ||
613 | |||
614 | if (tsk_rt(holder)->linked_on != NO_CPU) { | ||
615 | TRACE_TASK(holder, "%s: linked on %d\n", | ||
616 | __FUNCTION__, tsk_rt(holder)->linked_on); | ||
617 | /* Holder is scheduled; need to re-order CPUs. | ||
618 | * We can't use heap_decrease() here since | ||
619 | * the cpu_heap is ordered in reverse direction, so | ||
620 | * it is actually an increase. */ | ||
621 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | ||
622 | gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | ||
623 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | ||
624 | gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | ||
625 | } else { | ||
626 | /* holder may be queued: first stop queue changes */ | ||
627 | raw_spin_lock(&gsnedf.release_lock); | ||
628 | if (is_queued(holder)) { | ||
629 | TRACE_TASK(holder, "%s: is queued\n", | ||
630 | __FUNCTION__); | ||
631 | /* We need to update the position | ||
632 | * of holder in some heap. Note that this | ||
633 | * may be a release heap. */ | ||
634 | check_preempt = | ||
635 | !bheap_decrease(edf_ready_order, | ||
636 | tsk_rt(holder)->heap_node); | ||
637 | } else { | ||
638 | /* Nothing to do: if it is not queued and not linked | ||
639 | * then it is currently being moved by other code | ||
640 | * (e.g., a timer interrupt handler) that will use the | ||
641 | * correct priority when enqueuing the task. */ | ||
642 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", | ||
643 | __FUNCTION__); | ||
644 | } | ||
645 | raw_spin_unlock(&gsnedf.release_lock); | ||
646 | |||
647 | /* If holder was enqueued in a release heap, then the following | ||
648 | * preemption check is pointless, but we can't easily detect | ||
649 | * that case. If you want to fix this, then consider that | ||
650 | * simply adding a state flag requires O(n) time to update when | ||
651 | * releasing n tasks, which conflicts with the goal to have | ||
652 | * O(log n) merges. */ | ||
653 | if (check_preempt) { | ||
654 | /* heap_decrease() hit the top level of the heap: make | ||
655 | * sure preemption checks get the right task, not the | ||
656 | * potentially stale cache. */ | ||
657 | bheap_uncache_min(edf_ready_order, | ||
658 | &gsnedf.ready_queue); | ||
659 | check_for_preemptions(); | ||
660 | } | ||
661 | } | ||
662 | } | ||
663 | |||
664 | static long gsnedf_pi_block(struct pi_semaphore *sem, | ||
665 | struct task_struct *new_waiter) | ||
666 | { | ||
667 | /* This callback has to handle the situation where a new waiter is | ||
668 | * added to the wait queue of the semaphore. | ||
669 | * | ||
670 | * We must check if has a higher priority than the currently | ||
671 | * highest-priority task, and then potentially reschedule. | ||
672 | */ | ||
673 | |||
674 | BUG_ON(!new_waiter); | ||
675 | |||
676 | if (edf_higher_prio(new_waiter, sem->hp.task)) { | ||
677 | TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); | ||
678 | /* called with IRQs disabled */ | ||
679 | raw_spin_lock(&gsnedf_lock); | ||
680 | /* store new highest-priority task */ | ||
681 | sem->hp.task = new_waiter; | ||
682 | if (sem->holder) { | ||
683 | TRACE_TASK(sem->holder, | ||
684 | " holds %p and will inherit from %s/%d\n", | ||
685 | sem, | ||
686 | new_waiter->comm, new_waiter->pid); | ||
687 | /* let holder inherit */ | ||
688 | sem->holder->rt_param.inh_task = new_waiter; | ||
689 | update_queue_position(sem->holder); | ||
690 | } | ||
691 | raw_spin_unlock(&gsnedf_lock); | ||
692 | } | ||
693 | |||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | static long gsnedf_inherit_priority(struct pi_semaphore *sem, | ||
698 | struct task_struct *new_owner) | ||
699 | { | ||
700 | /* We don't need to acquire the gsnedf_lock since at the time of this | ||
701 | * call new_owner isn't actually scheduled yet (it's still sleeping) | ||
702 | * and since the calling function already holds sem->wait.lock, which | ||
703 | * prevents concurrent sem->hp.task changes. | ||
704 | */ | ||
705 | |||
706 | if (sem->hp.task && sem->hp.task != new_owner) { | ||
707 | new_owner->rt_param.inh_task = sem->hp.task; | ||
708 | TRACE_TASK(new_owner, "inherited priority from %s/%d\n", | ||
709 | sem->hp.task->comm, sem->hp.task->pid); | ||
710 | } else | ||
711 | TRACE_TASK(new_owner, | ||
712 | "cannot inherit priority, " | ||
713 | "no higher priority job waits.\n"); | ||
714 | return 0; | ||
715 | } | ||
716 | |||
717 | /* This function is called on a semaphore release, and assumes that | ||
718 | * the current task is also the semaphore holder. | ||
719 | */ | ||
720 | static long gsnedf_return_priority(struct pi_semaphore *sem) | ||
721 | { | ||
722 | struct task_struct* t = current; | ||
723 | int ret = 0; | ||
724 | |||
725 | /* Find new highest-priority semaphore task | ||
726 | * if holder task is the current hp.task. | ||
727 | * | ||
728 | * Calling function holds sem->wait.lock. | ||
729 | */ | ||
730 | if (t == sem->hp.task) | ||
731 | edf_set_hp_task(sem); | ||
732 | |||
733 | TRACE_CUR("gsnedf_return_priority for lock %p\n", sem); | ||
734 | |||
735 | if (t->rt_param.inh_task) { | ||
736 | /* interrupts already disabled by PI code */ | ||
737 | raw_spin_lock(&gsnedf_lock); | ||
738 | |||
739 | /* Reset inh_task to NULL. */ | ||
740 | t->rt_param.inh_task = NULL; | ||
741 | |||
742 | /* Check if rescheduling is necessary */ | ||
743 | unlink(t); | ||
744 | gsnedf_job_arrival(t); | ||
745 | raw_spin_unlock(&gsnedf_lock); | ||
746 | } | ||
747 | |||
748 | return ret; | ||
749 | } | ||
750 | |||
751 | #endif | ||
752 | 597 | ||
753 | static long gsnedf_admit_task(struct task_struct* tsk) | 598 | static long gsnedf_admit_task(struct task_struct* tsk) |
754 | { | 599 | { |
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index c1e27960576b..fc64c1722ae9 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -309,132 +309,6 @@ static void psnedf_task_exit(struct task_struct * t) | |||
309 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | 309 | raw_spin_unlock_irqrestore(&pedf->slock, flags); |
310 | } | 310 | } |
311 | 311 | ||
312 | #if 0 | ||
313 | static long psnedf_pi_block(struct pi_semaphore *sem, | ||
314 | struct task_struct *new_waiter) | ||
315 | { | ||
316 | psnedf_domain_t* pedf; | ||
317 | rt_domain_t* edf; | ||
318 | struct task_struct* t; | ||
319 | int cpu = get_partition(new_waiter); | ||
320 | |||
321 | BUG_ON(!new_waiter); | ||
322 | |||
323 | if (edf_higher_prio(new_waiter, sem->hp.cpu_task[cpu])) { | ||
324 | TRACE_TASK(new_waiter, " boosts priority\n"); | ||
325 | pedf = task_pedf(new_waiter); | ||
326 | edf = task_edf(new_waiter); | ||
327 | |||
328 | /* interrupts already disabled */ | ||
329 | raw_spin_lock(&pedf->slock); | ||
330 | |||
331 | /* store new highest-priority task */ | ||
332 | sem->hp.cpu_task[cpu] = new_waiter; | ||
333 | if (sem->holder && | ||
334 | get_partition(sem->holder) == get_partition(new_waiter)) { | ||
335 | /* let holder inherit */ | ||
336 | sem->holder->rt_param.inh_task = new_waiter; | ||
337 | t = sem->holder; | ||
338 | if (is_queued(t)) { | ||
339 | /* queued in domain*/ | ||
340 | remove(edf, t); | ||
341 | /* readd to make priority change take place */ | ||
342 | /* FIXME: this looks outdated */ | ||
343 | if (is_released(t, litmus_clock())) | ||
344 | __add_ready(edf, t); | ||
345 | else | ||
346 | add_release(edf, t); | ||
347 | } | ||
348 | } | ||
349 | |||
350 | /* check if we need to reschedule */ | ||
351 | if (edf_preemption_needed(edf, current)) | ||
352 | preempt(pedf); | ||
353 | |||
354 | raw_spin_unlock(&pedf->slock); | ||
355 | } | ||
356 | |||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | static long psnedf_inherit_priority(struct pi_semaphore *sem, | ||
361 | struct task_struct *new_owner) | ||
362 | { | ||
363 | int cpu = get_partition(new_owner); | ||
364 | |||
365 | new_owner->rt_param.inh_task = sem->hp.cpu_task[cpu]; | ||
366 | if (sem->hp.cpu_task[cpu] && new_owner != sem->hp.cpu_task[cpu]) { | ||
367 | TRACE_TASK(new_owner, | ||
368 | "inherited priority from %s/%d\n", | ||
369 | sem->hp.cpu_task[cpu]->comm, | ||
370 | sem->hp.cpu_task[cpu]->pid); | ||
371 | } else | ||
372 | TRACE_TASK(new_owner, | ||
373 | "cannot inherit priority: " | ||
374 | "no higher priority job waits on this CPU!\n"); | ||
375 | /* make new owner non-preemptable as required by FMLP under | ||
376 | * PSN-EDF. | ||
377 | */ | ||
378 | make_np(new_owner); | ||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | |||
383 | /* This function is called on a semaphore release, and assumes that | ||
384 | * the current task is also the semaphore holder. | ||
385 | */ | ||
386 | static long psnedf_return_priority(struct pi_semaphore *sem) | ||
387 | { | ||
388 | struct task_struct* t = current; | ||
389 | psnedf_domain_t* pedf = task_pedf(t); | ||
390 | rt_domain_t* edf = task_edf(t); | ||
391 | int ret = 0; | ||
392 | int cpu = get_partition(current); | ||
393 | int still_np; | ||
394 | |||
395 | |||
396 | /* Find new highest-priority semaphore task | ||
397 | * if holder task is the current hp.cpu_task[cpu]. | ||
398 | * | ||
399 | * Calling function holds sem->wait.lock. | ||
400 | */ | ||
401 | if (t == sem->hp.cpu_task[cpu]) | ||
402 | edf_set_hp_cpu_task(sem, cpu); | ||
403 | |||
404 | still_np = take_np(current); | ||
405 | |||
406 | /* Since we don't nest resources, this | ||
407 | * should always be zero */ | ||
408 | BUG_ON(still_np); | ||
409 | |||
410 | if (current->rt_param.inh_task) { | ||
411 | TRACE_CUR("return priority of %s/%d\n", | ||
412 | current->rt_param.inh_task->comm, | ||
413 | current->rt_param.inh_task->pid); | ||
414 | } else | ||
415 | TRACE_CUR(" no priority to return %p\n", sem); | ||
416 | |||
417 | |||
418 | /* Always check for delayed preemptions that might have become | ||
419 | * necessary due to non-preemptive execution. | ||
420 | */ | ||
421 | raw_spin_lock(&pedf->slock); | ||
422 | |||
423 | /* Reset inh_task to NULL. */ | ||
424 | current->rt_param.inh_task = NULL; | ||
425 | |||
426 | /* check if we need to reschedule */ | ||
427 | if (edf_preemption_needed(edf, current)) | ||
428 | preempt(pedf); | ||
429 | |||
430 | raw_spin_unlock(&pedf->slock); | ||
431 | |||
432 | |||
433 | return ret; | ||
434 | } | ||
435 | |||
436 | #endif | ||
437 | |||
438 | #ifdef CONFIG_LITMUS_LOCKING | 312 | #ifdef CONFIG_LITMUS_LOCKING |
439 | 313 | ||
440 | #include <litmus/fdso.h> | 314 | #include <litmus/fdso.h> |