From bf7f892f22a6a6804f09168256226cc6c2bc230c Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Fri, 5 Apr 2013 18:01:21 -0400 Subject: resolve rebase issues --- litmus/sched_psn_edf.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'litmus/sched_psn_edf.c') diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 720a733aff9e..dffaeb425abf 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c @@ -81,7 +81,7 @@ static void boost_priority(struct task_struct* t) psnedf_domain_t* pedf = task_pedf(t); lt_t now; - raw_spin_lock_irqsave(&pedf->slock, flags); + raw_readyq_lock_irqsave(&pedf->slock, flags); now = litmus_clock(); TRACE_TASK(t, "priority boosted at %llu\n", now); @@ -101,7 +101,7 @@ static void boost_priority(struct task_struct* t) raw_spin_unlock(&pedf->domain.release_lock); } /* else: nothing to do since the job is not queued while scheduled */ - raw_spin_unlock_irqrestore(&pedf->slock, flags); + raw_readyq_unlock_irqrestore(&pedf->slock, flags); } static void unboost_priority(struct task_struct* t) @@ -110,7 +110,7 @@ static void unboost_priority(struct task_struct* t) psnedf_domain_t* pedf = task_pedf(t); lt_t now; - raw_spin_lock_irqsave(&pedf->slock, flags); + raw_readyq_lock_irqsave(&pedf->slock, flags); now = litmus_clock(); /* assumption: this only happens when the job is scheduled */ @@ -128,7 +128,7 @@ static void unboost_priority(struct task_struct* t) if (edf_preemption_needed(&pedf->domain, pedf->scheduled)) preempt(pedf); - raw_spin_unlock_irqrestore(&pedf->slock, flags); + raw_readyq_unlock_irqrestore(&pedf->slock, flags); } #endif @@ -219,7 +219,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) int out_of_time, sleep, preempt, np, exists, blocks, resched; - raw_spin_lock(&pedf->slock); + raw_readyq_lock(&pedf->slock); /* sanity checking * differently from gedf, when a task exits (dead) @@ -304,7 +304,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) pedf->scheduled = next; sched_state_task_picked(); - raw_spin_unlock(&pedf->slock); + raw_readyq_unlock(&pedf->slock); return next; } @@ -327,7 +327,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running) /* The task should be running in the queue, otherwise signal * code will try to wake it up with fatal consequences. */ - raw_spin_lock_irqsave(&pedf->slock, flags); + raw_readyq_lock_irqsave(&pedf->slock, flags); if (running) { /* there shouldn't be anything else running at the time */ BUG_ON(pedf->scheduled); @@ -337,7 +337,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running) /* maybe we have to reschedule */ psnedf_preempt_check(pedf); } - raw_spin_unlock_irqrestore(&pedf->slock, flags); + raw_readyq_unlock_irqrestore(&pedf->slock, flags); } static void psnedf_task_wake_up(struct task_struct *task) @@ -348,7 +348,7 @@ static void psnedf_task_wake_up(struct task_struct *task) lt_t now; TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); - raw_spin_lock_irqsave(&pedf->slock, flags); + raw_readyq_lock_irqsave(&pedf->slock, flags); BUG_ON(is_queued(task)); now = litmus_clock(); if (is_sporadic(task) && is_tardy(task, now) @@ -376,7 +376,7 @@ static void psnedf_task_wake_up(struct task_struct *task) psnedf_preempt_check(pedf); } - raw_spin_unlock_irqrestore(&pedf->slock, flags); + raw_readyq_unlock_irqrestore(&pedf->slock, flags); TRACE_TASK(task, "wake up done\n"); } @@ -395,7 +395,7 @@ static void psnedf_task_exit(struct task_struct * t) psnedf_domain_t* pedf = task_pedf(t); rt_domain_t* edf; - raw_spin_lock_irqsave(&pedf->slock, flags); + raw_readyq_lock_irqsave(&pedf->slock, flags); /* disable budget enforcement */ if (tsk_rt(t)->budget.ops) @@ -412,7 +412,7 @@ static void psnedf_task_exit(struct task_struct * t) TRACE_TASK(t, "RIP, now reschedule\n"); preempt(pedf); - raw_spin_unlock_irqrestore(&pedf->slock, flags); + raw_readyq_unlock_irqrestore(&pedf->slock, flags); } #ifdef CONFIG_LITMUS_LOCKING -- cgit v1.2.2