aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
Diffstat (limited to 'litmus')
-rw-r--r--litmus/fp_common.c19
-rw-r--r--litmus/sched_pfp.c7
2 files changed, 11 insertions, 15 deletions
diff --git a/litmus/fp_common.c b/litmus/fp_common.c
index 7898955fd991..8536da9ca98a 100644
--- a/litmus/fp_common.c
+++ b/litmus/fp_common.c
@@ -66,13 +66,9 @@ int fp_higher_prio(struct task_struct* first,
66 66
67 return !is_realtime(second_task) || 67 return !is_realtime(second_task) ||
68 68
69 /* is the deadline of the first task earlier?
70 * Then it has higher priority.
71 */
72 get_priority(first_task) < get_priority(second_task) || 69 get_priority(first_task) < get_priority(second_task) ||
73 70
74 /* Do we have a deadline tie? 71 /* Break by PID.
75 * Then break by PID.
76 */ 72 */
77 (get_priority(first_task) == get_priority(second_task) && 73 (get_priority(first_task) == get_priority(second_task) &&
78 (first_task->pid < second_task->pid || 74 (first_task->pid < second_task->pid ||
@@ -97,18 +93,19 @@ void fp_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
97 93
98/* need_to_preempt - check whether the task t needs to be preempted 94/* need_to_preempt - check whether the task t needs to be preempted
99 */ 95 */
100int fp_preemption_needed(rt_domain_t* rt, struct task_struct *t) 96int fp_preemption_needed(struct fp_prio_queue *q, struct task_struct *t)
101{ 97{
102 /* we need the read lock for edf_ready_queue */ 98 struct task_struct *pending;
103 /* no need to preempt if there is nothing pending */ 99
104 if (!__jobs_pending(rt)) 100 pending = fp_prio_peek(q);
101
102 if (!pending)
105 return 0; 103 return 0;
106 /* we need to reschedule if t doesn't exist */
107 if (!t) 104 if (!t)
108 return 1; 105 return 1;
109 106
110 /* make sure to get non-rt stuff out of the way */ 107 /* make sure to get non-rt stuff out of the way */
111 return !is_realtime(t) || fp_higher_prio(__next_ready(rt), t); 108 return !is_realtime(t) || fp_higher_prio(pending, t);
112} 109}
113 110
114void fp_prio_queue_init(struct fp_prio_queue* q) 111void fp_prio_queue_init(struct fp_prio_queue* q)
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index 926c391e7c34..cd9c08c236e8 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -138,7 +138,6 @@ static void pfp_tick(struct task_struct *t)
138static struct task_struct* pfp_schedule(struct task_struct * prev) 138static struct task_struct* pfp_schedule(struct task_struct * prev)
139{ 139{
140 pfp_domain_t* pfp = local_pfp; 140 pfp_domain_t* pfp = local_pfp;
141 rt_domain_t* dom = &pfp->domain;
142 struct task_struct* next; 141 struct task_struct* next;
143 142
144 int out_of_time, sleep, preempt, 143 int out_of_time, sleep, preempt,
@@ -161,7 +160,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
161 budget_exhausted(pfp->scheduled); 160 budget_exhausted(pfp->scheduled);
162 np = exists && is_np(pfp->scheduled); 161 np = exists && is_np(pfp->scheduled);
163 sleep = exists && get_rt_flags(pfp->scheduled) == RT_F_SLEEP; 162 sleep = exists && get_rt_flags(pfp->scheduled) == RT_F_SLEEP;
164 preempt = fp_preemption_needed(dom, prev); 163 preempt = fp_preemption_needed(&pfp->ready_queue, prev);
165 164
166 /* If we need to preempt do so. 165 /* If we need to preempt do so.
167 * The following checks set resched to 1 in case of special 166 * The following checks set resched to 1 in case of special
@@ -347,7 +346,7 @@ static void boost_priority(struct task_struct* t)
347 /* If it is queued, then we need to re-order. */ 346 /* If it is queued, then we need to re-order. */
348 bheap_decrease(fp_ready_order, tsk_rt(t)->heap_node) && 347 bheap_decrease(fp_ready_order, tsk_rt(t)->heap_node) &&
349 /* If we bubbled to the top, then we need to check for preemptions. */ 348 /* If we bubbled to the top, then we need to check for preemptions. */
350 fp_preemption_needed(&pfp->domain, pfp->scheduled)) 349 fp_preemption_needed(&pfp->ready_queue, pfp->scheduled))
351 preempt(pfp); 350 preempt(pfp);
352 raw_spin_unlock(&pfp->domain.release_lock); 351 raw_spin_unlock(&pfp->domain.release_lock);
353 } /* else: nothing to do since the job is not queued while scheduled */ 352 } /* else: nothing to do since the job is not queued while scheduled */
@@ -376,7 +375,7 @@ static void unboost_priority(struct task_struct* t)
376 tsk_rt(t)->boost_start_time = 0; 375 tsk_rt(t)->boost_start_time = 0;
377 376
378 /* check if this changes anything */ 377 /* check if this changes anything */
379 if (fp_preemption_needed(&pfp->domain, pfp->scheduled)) 378 if (fp_preemption_needed(&pfp->ready_queue, pfp->scheduled))
380 preempt(pfp); 379 preempt(pfp);
381 380
382 raw_spin_unlock_irqrestore(&pfp->slock, flags); 381 raw_spin_unlock_irqrestore(&pfp->slock, flags);