aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_psn_edf.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-04-05 18:01:21 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2013-04-05 18:01:21 -0400
commitbf7f892f22a6a6804f09168256226cc6c2bc230c (patch)
tree673a806a5a632a0016eb1ef78be01437a49a2a5c /litmus/sched_psn_edf.c
parentc4954ee90811cde64e11cc71bd01404871126206 (diff)
resolve rebase issues
Diffstat (limited to 'litmus/sched_psn_edf.c')
-rw-r--r--litmus/sched_psn_edf.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 720a733aff9e..dffaeb425abf 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -81,7 +81,7 @@ static void boost_priority(struct task_struct* t)
81 psnedf_domain_t* pedf = task_pedf(t); 81 psnedf_domain_t* pedf = task_pedf(t);
82 lt_t now; 82 lt_t now;
83 83
84 raw_spin_lock_irqsave(&pedf->slock, flags); 84 raw_readyq_lock_irqsave(&pedf->slock, flags);
85 now = litmus_clock(); 85 now = litmus_clock();
86 86
87 TRACE_TASK(t, "priority boosted at %llu\n", now); 87 TRACE_TASK(t, "priority boosted at %llu\n", now);
@@ -101,7 +101,7 @@ static void boost_priority(struct task_struct* t)
101 raw_spin_unlock(&pedf->domain.release_lock); 101 raw_spin_unlock(&pedf->domain.release_lock);
102 } /* else: nothing to do since the job is not queued while scheduled */ 102 } /* else: nothing to do since the job is not queued while scheduled */
103 103
104 raw_spin_unlock_irqrestore(&pedf->slock, flags); 104 raw_readyq_unlock_irqrestore(&pedf->slock, flags);
105} 105}
106 106
107static void unboost_priority(struct task_struct* t) 107static void unboost_priority(struct task_struct* t)
@@ -110,7 +110,7 @@ static void unboost_priority(struct task_struct* t)
110 psnedf_domain_t* pedf = task_pedf(t); 110 psnedf_domain_t* pedf = task_pedf(t);
111 lt_t now; 111 lt_t now;
112 112
113 raw_spin_lock_irqsave(&pedf->slock, flags); 113 raw_readyq_lock_irqsave(&pedf->slock, flags);
114 now = litmus_clock(); 114 now = litmus_clock();
115 115
116 /* assumption: this only happens when the job is scheduled */ 116 /* assumption: this only happens when the job is scheduled */
@@ -128,7 +128,7 @@ static void unboost_priority(struct task_struct* t)
128 if (edf_preemption_needed(&pedf->domain, pedf->scheduled)) 128 if (edf_preemption_needed(&pedf->domain, pedf->scheduled))
129 preempt(pedf); 129 preempt(pedf);
130 130
131 raw_spin_unlock_irqrestore(&pedf->slock, flags); 131 raw_readyq_unlock_irqrestore(&pedf->slock, flags);
132} 132}
133 133
134#endif 134#endif
@@ -219,7 +219,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
219 219
220 int out_of_time, sleep, preempt, np, exists, blocks, resched; 220 int out_of_time, sleep, preempt, np, exists, blocks, resched;
221 221
222 raw_spin_lock(&pedf->slock); 222 raw_readyq_lock(&pedf->slock);
223 223
224 /* sanity checking 224 /* sanity checking
225 * differently from gedf, when a task exits (dead) 225 * differently from gedf, when a task exits (dead)
@@ -304,7 +304,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
304 304
305 pedf->scheduled = next; 305 pedf->scheduled = next;
306 sched_state_task_picked(); 306 sched_state_task_picked();
307 raw_spin_unlock(&pedf->slock); 307 raw_readyq_unlock(&pedf->slock);
308 308
309 return next; 309 return next;
310} 310}
@@ -327,7 +327,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
327 /* The task should be running in the queue, otherwise signal 327 /* The task should be running in the queue, otherwise signal
328 * code will try to wake it up with fatal consequences. 328 * code will try to wake it up with fatal consequences.
329 */ 329 */
330 raw_spin_lock_irqsave(&pedf->slock, flags); 330 raw_readyq_lock_irqsave(&pedf->slock, flags);
331 if (running) { 331 if (running) {
332 /* there shouldn't be anything else running at the time */ 332 /* there shouldn't be anything else running at the time */
333 BUG_ON(pedf->scheduled); 333 BUG_ON(pedf->scheduled);
@@ -337,7 +337,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
337 /* maybe we have to reschedule */ 337 /* maybe we have to reschedule */
338 psnedf_preempt_check(pedf); 338 psnedf_preempt_check(pedf);
339 } 339 }
340 raw_spin_unlock_irqrestore(&pedf->slock, flags); 340 raw_readyq_unlock_irqrestore(&pedf->slock, flags);
341} 341}
342 342
343static void psnedf_task_wake_up(struct task_struct *task) 343static void psnedf_task_wake_up(struct task_struct *task)
@@ -348,7 +348,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
348 lt_t now; 348 lt_t now;
349 349
350 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 350 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
351 raw_spin_lock_irqsave(&pedf->slock, flags); 351 raw_readyq_lock_irqsave(&pedf->slock, flags);
352 BUG_ON(is_queued(task)); 352 BUG_ON(is_queued(task));
353 now = litmus_clock(); 353 now = litmus_clock();
354 if (is_sporadic(task) && is_tardy(task, now) 354 if (is_sporadic(task) && is_tardy(task, now)
@@ -376,7 +376,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
376 psnedf_preempt_check(pedf); 376 psnedf_preempt_check(pedf);
377 } 377 }
378 378
379 raw_spin_unlock_irqrestore(&pedf->slock, flags); 379 raw_readyq_unlock_irqrestore(&pedf->slock, flags);
380 TRACE_TASK(task, "wake up done\n"); 380 TRACE_TASK(task, "wake up done\n");
381} 381}
382 382
@@ -395,7 +395,7 @@ static void psnedf_task_exit(struct task_struct * t)
395 psnedf_domain_t* pedf = task_pedf(t); 395 psnedf_domain_t* pedf = task_pedf(t);
396 rt_domain_t* edf; 396 rt_domain_t* edf;
397 397
398 raw_spin_lock_irqsave(&pedf->slock, flags); 398 raw_readyq_lock_irqsave(&pedf->slock, flags);
399 399
400 /* disable budget enforcement */ 400 /* disable budget enforcement */
401 if (tsk_rt(t)->budget.ops) 401 if (tsk_rt(t)->budget.ops)
@@ -412,7 +412,7 @@ static void psnedf_task_exit(struct task_struct * t)
412 TRACE_TASK(t, "RIP, now reschedule\n"); 412 TRACE_TASK(t, "RIP, now reschedule\n");
413 413
414 preempt(pedf); 414 preempt(pedf);
415 raw_spin_unlock_irqrestore(&pedf->slock, flags); 415 raw_readyq_unlock_irqrestore(&pedf->slock, flags);
416} 416}
417 417
418#ifdef CONFIG_LITMUS_LOCKING 418#ifdef CONFIG_LITMUS_LOCKING