aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_psn_edf.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 23:45:13 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 23:57:07 -0400
commita66246f9e973a68fb9955a2fa7663a2e02afbd30 (patch)
treeebdf77a3cf491c0d0b77af3d9622f33013af5856 /litmus/sched_psn_edf.c
parent6ffc1fee98c4b995eb3a0285f4f8fb467cb0306e (diff)
Change most LitmusRT spinlock_t in raw_spinlock_t
Adapt to new schema for spinlock: (tglx 20091217) spinlock - the weakest one, which might sleep in RT raw_spinlock - spinlock which always spins even on RT arch_spinlock - the hardware level architecture dependent implementation ---- Most probably, all the spinlocks changed by this commit will be true spinning lock (raw_spinlock) in PreemptRT (so hopefully we'll need few changes when porting Litmmus to PreemptRT). There are a couple of spinlock that the kernel still defines as spinlock_t (therefore no changes reported in this commit) that might cause us troubles: - wait_queue_t lock is defined as spinlock_t; it is used in: * fmlp.c -- sem->wait.lock * sync.c -- ts_release.wait.lock - rwlock_t used in fifo implementation in sched_trace.c * this need probably to be changed to something always spinning in RT at the expense of increased locking time. ---- This commit also fixes warnings and errors due to the need to include slab.h when using kmalloc() and friends. ---- This commit does not compile.
Diffstat (limited to 'litmus/sched_psn_edf.c')
-rw-r--r--litmus/sched_psn_edf.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 7f71ecfaaaae..7a548bf5162e 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -131,7 +131,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
131 int out_of_time, sleep, preempt, 131 int out_of_time, sleep, preempt,
132 np, exists, blocks, resched; 132 np, exists, blocks, resched;
133 133
134 spin_lock(&pedf->slock); 134 raw_spin_lock(&pedf->slock);
135 135
136 /* sanity checking 136 /* sanity checking
137 * differently from gedf, when a task exits (dead) 137 * differently from gedf, when a task exits (dead)
@@ -201,7 +201,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
201 } 201 }
202 202
203 pedf->scheduled = next; 203 pedf->scheduled = next;
204 spin_unlock(&pedf->slock); 204 raw_spin_unlock(&pedf->slock);
205 205
206 return next; 206 return next;
207} 207}
@@ -224,7 +224,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
224 /* The task should be running in the queue, otherwise signal 224 /* The task should be running in the queue, otherwise signal
225 * code will try to wake it up with fatal consequences. 225 * code will try to wake it up with fatal consequences.
226 */ 226 */
227 spin_lock_irqsave(&pedf->slock, flags); 227 raw_spin_lock_irqsave(&pedf->slock, flags);
228 if (running) { 228 if (running) {
229 /* there shouldn't be anything else running at the time */ 229 /* there shouldn't be anything else running at the time */
230 BUG_ON(pedf->scheduled); 230 BUG_ON(pedf->scheduled);
@@ -234,7 +234,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
234 /* maybe we have to reschedule */ 234 /* maybe we have to reschedule */
235 preempt(pedf); 235 preempt(pedf);
236 } 236 }
237 spin_unlock_irqrestore(&pedf->slock, flags); 237 raw_spin_unlock_irqrestore(&pedf->slock, flags);
238} 238}
239 239
240static void psnedf_task_wake_up(struct task_struct *task) 240static void psnedf_task_wake_up(struct task_struct *task)
@@ -245,7 +245,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
245 lt_t now; 245 lt_t now;
246 246
247 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 247 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
248 spin_lock_irqsave(&pedf->slock, flags); 248 raw_spin_lock_irqsave(&pedf->slock, flags);
249 BUG_ON(is_queued(task)); 249 BUG_ON(is_queued(task));
250 /* We need to take suspensions because of semaphores into 250 /* We need to take suspensions because of semaphores into
251 * account! If a job resumes after being suspended due to acquiring 251 * account! If a job resumes after being suspended due to acquiring
@@ -270,7 +270,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
270 if (pedf->scheduled != task) 270 if (pedf->scheduled != task)
271 requeue(task, edf); 271 requeue(task, edf);
272 272
273 spin_unlock_irqrestore(&pedf->slock, flags); 273 raw_spin_unlock_irqrestore(&pedf->slock, flags);
274 TRACE_TASK(task, "wake up done\n"); 274 TRACE_TASK(task, "wake up done\n");
275} 275}
276 276
@@ -289,7 +289,7 @@ static void psnedf_task_exit(struct task_struct * t)
289 psnedf_domain_t* pedf = task_pedf(t); 289 psnedf_domain_t* pedf = task_pedf(t);
290 rt_domain_t* edf; 290 rt_domain_t* edf;
291 291
292 spin_lock_irqsave(&pedf->slock, flags); 292 raw_spin_lock_irqsave(&pedf->slock, flags);
293 if (is_queued(t)) { 293 if (is_queued(t)) {
294 /* dequeue */ 294 /* dequeue */
295 edf = task_edf(t); 295 edf = task_edf(t);
@@ -301,7 +301,7 @@ static void psnedf_task_exit(struct task_struct * t)
301 TRACE_TASK(t, "RIP, now reschedule\n"); 301 TRACE_TASK(t, "RIP, now reschedule\n");
302 302
303 preempt(pedf); 303 preempt(pedf);
304 spin_unlock_irqrestore(&pedf->slock, flags); 304 raw_spin_unlock_irqrestore(&pedf->slock, flags);
305} 305}
306 306
307#ifdef CONFIG_FMLP 307#ifdef CONFIG_FMLP
@@ -321,7 +321,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
321 edf = task_edf(new_waiter); 321 edf = task_edf(new_waiter);
322 322
323 /* interrupts already disabled */ 323 /* interrupts already disabled */
324 spin_lock(&pedf->slock); 324 raw_spin_lock(&pedf->slock);
325 325
326 /* store new highest-priority task */ 326 /* store new highest-priority task */
327 sem->hp.cpu_task[cpu] = new_waiter; 327 sem->hp.cpu_task[cpu] = new_waiter;
@@ -346,7 +346,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
346 if (edf_preemption_needed(edf, current)) 346 if (edf_preemption_needed(edf, current))
347 preempt(pedf); 347 preempt(pedf);
348 348
349 spin_unlock(&pedf->slock); 349 raw_spin_unlock(&pedf->slock);
350 } 350 }
351 351
352 return 0; 352 return 0;
@@ -413,7 +413,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
413 /* Always check for delayed preemptions that might have become 413 /* Always check for delayed preemptions that might have become
414 * necessary due to non-preemptive execution. 414 * necessary due to non-preemptive execution.
415 */ 415 */
416 spin_lock(&pedf->slock); 416 raw_spin_lock(&pedf->slock);
417 417
418 /* Reset inh_task to NULL. */ 418 /* Reset inh_task to NULL. */
419 current->rt_param.inh_task = NULL; 419 current->rt_param.inh_task = NULL;
@@ -422,7 +422,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
422 if (edf_preemption_needed(edf, current)) 422 if (edf_preemption_needed(edf, current))
423 preempt(pedf); 423 preempt(pedf);
424 424
425 spin_unlock(&pedf->slock); 425 raw_spin_unlock(&pedf->slock);
426 426
427 427
428 return ret; 428 return ret;