aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_psn_edf.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_psn_edf.c')
-rw-r--r--litmus/sched_psn_edf.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index af0b30cb8b89..e50b27391d21 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -131,7 +131,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
131 int out_of_time, sleep, preempt, 131 int out_of_time, sleep, preempt,
132 np, exists, blocks, resched; 132 np, exists, blocks, resched;
133 133
134 spin_lock(&pedf->slock); 134 raw_spin_lock(&pedf->slock);
135 135
136 /* sanity checking 136 /* sanity checking
137 * differently from gedf, when a task exits (dead) 137 * differently from gedf, when a task exits (dead)
@@ -203,7 +203,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
203 } 203 }
204 204
205 pedf->scheduled = next; 205 pedf->scheduled = next;
206 spin_unlock(&pedf->slock); 206 raw_spin_unlock(&pedf->slock);
207 207
208 return next; 208 return next;
209} 209}
@@ -226,7 +226,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
226 /* The task should be running in the queue, otherwise signal 226 /* The task should be running in the queue, otherwise signal
227 * code will try to wake it up with fatal consequences. 227 * code will try to wake it up with fatal consequences.
228 */ 228 */
229 spin_lock_irqsave(&pedf->slock, flags); 229 raw_spin_lock_irqsave(&pedf->slock, flags);
230 if (running) { 230 if (running) {
231 /* there shouldn't be anything else running at the time */ 231 /* there shouldn't be anything else running at the time */
232 BUG_ON(pedf->scheduled); 232 BUG_ON(pedf->scheduled);
@@ -236,7 +236,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
236 /* maybe we have to reschedule */ 236 /* maybe we have to reschedule */
237 preempt(pedf); 237 preempt(pedf);
238 } 238 }
239 spin_unlock_irqrestore(&pedf->slock, flags); 239 raw_spin_unlock_irqrestore(&pedf->slock, flags);
240} 240}
241 241
242static void psnedf_task_wake_up(struct task_struct *task) 242static void psnedf_task_wake_up(struct task_struct *task)
@@ -247,7 +247,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
247 lt_t now; 247 lt_t now;
248 248
249 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 249 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
250 spin_lock_irqsave(&pedf->slock, flags); 250 raw_spin_lock_irqsave(&pedf->slock, flags);
251 BUG_ON(is_queued(task)); 251 BUG_ON(is_queued(task));
252 /* We need to take suspensions because of semaphores into 252 /* We need to take suspensions because of semaphores into
253 * account! If a job resumes after being suspended due to acquiring 253 * account! If a job resumes after being suspended due to acquiring
@@ -272,7 +272,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
272 if (pedf->scheduled != task) 272 if (pedf->scheduled != task)
273 requeue(task, edf); 273 requeue(task, edf);
274 274
275 spin_unlock_irqrestore(&pedf->slock, flags); 275 raw_spin_unlock_irqrestore(&pedf->slock, flags);
276 TRACE_TASK(task, "wake up done\n"); 276 TRACE_TASK(task, "wake up done\n");
277} 277}
278 278
@@ -291,7 +291,7 @@ static void psnedf_task_exit(struct task_struct * t)
291 psnedf_domain_t* pedf = task_pedf(t); 291 psnedf_domain_t* pedf = task_pedf(t);
292 rt_domain_t* edf; 292 rt_domain_t* edf;
293 293
294 spin_lock_irqsave(&pedf->slock, flags); 294 raw_spin_lock_irqsave(&pedf->slock, flags);
295 if (is_queued(t)) { 295 if (is_queued(t)) {
296 /* dequeue */ 296 /* dequeue */
297 edf = task_edf(t); 297 edf = task_edf(t);
@@ -303,7 +303,7 @@ static void psnedf_task_exit(struct task_struct * t)
303 TRACE_TASK(t, "RIP, now reschedule\n"); 303 TRACE_TASK(t, "RIP, now reschedule\n");
304 304
305 preempt(pedf); 305 preempt(pedf);
306 spin_unlock_irqrestore(&pedf->slock, flags); 306 raw_spin_unlock_irqrestore(&pedf->slock, flags);
307} 307}
308 308
309#ifdef CONFIG_FMLP 309#ifdef CONFIG_FMLP
@@ -323,7 +323,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
323 edf = task_edf(new_waiter); 323 edf = task_edf(new_waiter);
324 324
325 /* interrupts already disabled */ 325 /* interrupts already disabled */
326 spin_lock(&pedf->slock); 326 raw_spin_lock(&pedf->slock);
327 327
328 /* store new highest-priority task */ 328 /* store new highest-priority task */
329 sem->hp.cpu_task[cpu] = new_waiter; 329 sem->hp.cpu_task[cpu] = new_waiter;
@@ -348,7 +348,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
348 if (edf_preemption_needed(edf, current)) 348 if (edf_preemption_needed(edf, current))
349 preempt(pedf); 349 preempt(pedf);
350 350
351 spin_unlock(&pedf->slock); 351 raw_spin_unlock(&pedf->slock);
352 } 352 }
353 353
354 return 0; 354 return 0;
@@ -415,7 +415,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
415 /* Always check for delayed preemptions that might have become 415 /* Always check for delayed preemptions that might have become
416 * necessary due to non-preemptive execution. 416 * necessary due to non-preemptive execution.
417 */ 417 */
418 spin_lock(&pedf->slock); 418 raw_spin_lock(&pedf->slock);
419 419
420 /* Reset inh_task to NULL. */ 420 /* Reset inh_task to NULL. */
421 current->rt_param.inh_task = NULL; 421 current->rt_param.inh_task = NULL;
@@ -424,7 +424,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
424 if (edf_preemption_needed(edf, current)) 424 if (edf_preemption_needed(edf, current))
425 preempt(pedf); 425 preempt(pedf);
426 426
427 spin_unlock(&pedf->slock); 427 raw_spin_unlock(&pedf->slock);
428 428
429 429
430 return ret; 430 return ret;