aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_pfp.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_pfp.c')
-rw-r--r--litmus/sched_pfp.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index 01ac97d7f161..251c6cfee130 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -76,7 +76,7 @@ static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks)
76 struct task_struct* t; 76 struct task_struct* t;
77 struct bheap_node* hn; 77 struct bheap_node* hn;
78 78
79 raw_spin_lock_irqsave(&pfp->slock, flags); 79 raw_readyq_lock_irqsave(&pfp->slock, flags);
80 80
81 while (!bheap_empty(tasks)) { 81 while (!bheap_empty(tasks)) {
82 hn = bheap_take(fp_ready_order, tasks); 82 hn = bheap_take(fp_ready_order, tasks);
@@ -92,7 +92,7 @@ static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks)
92 preempt(pfp); 92 preempt(pfp);
93 } 93 }
94 94
95 raw_spin_unlock_irqrestore(&pfp->slock, flags); 95 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
96} 96}
97 97
98static void pfp_preempt_check(pfp_domain_t *pfp) 98static void pfp_preempt_check(pfp_domain_t *pfp)
@@ -164,7 +164,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
164 164
165 int out_of_time, sleep, preempt, np, exists, blocks, resched, migrate; 165 int out_of_time, sleep, preempt, np, exists, blocks, resched, migrate;
166 166
167 raw_spin_lock(&pfp->slock); 167 raw_readyq_lock(&pfp->slock);
168 168
169 /* sanity checking 169 /* sanity checking
170 * differently from gedf, when a task exits (dead) 170 * differently from gedf, when a task exits (dead)
@@ -259,7 +259,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
259 259
260 pfp->scheduled = next; 260 pfp->scheduled = next;
261 sched_state_task_picked(); 261 sched_state_task_picked();
262 raw_spin_unlock(&pfp->slock); 262 raw_readyq_unlock(&pfp->slock);
263 263
264 return next; 264 return next;
265} 265}
@@ -279,14 +279,14 @@ static void pfp_finish_switch(struct task_struct *prev)
279 279
280 to = task_pfp(prev); 280 to = task_pfp(prev);
281 281
282 raw_spin_lock(&to->slock); 282 raw_readyq_lock(&to->slock);
283 283
284 TRACE_TASK(prev, "adding to queue on P%d\n", to->cpu); 284 TRACE_TASK(prev, "adding to queue on P%d\n", to->cpu);
285 requeue(prev, to); 285 requeue(prev, to);
286 if (fp_preemption_needed(&to->ready_queue, to->scheduled)) 286 if (fp_preemption_needed(&to->ready_queue, to->scheduled))
287 preempt(to); 287 preempt(to);
288 288
289 raw_spin_unlock(&to->slock); 289 raw_readyq_unlock(&to->slock);
290 290
291 } 291 }
292} 292}
@@ -306,7 +306,7 @@ static void pfp_task_new(struct task_struct * t, int on_rq, int is_scheduled)
306 /* setup job parameters */ 306 /* setup job parameters */
307 release_at(t, litmus_clock()); 307 release_at(t, litmus_clock());
308 308
309 raw_spin_lock_irqsave(&pfp->slock, flags); 309 raw_readyq_lock_irqsave(&pfp->slock, flags);
310 if (is_scheduled) { 310 if (is_scheduled) {
311 /* there shouldn't be anything else running at the time */ 311 /* there shouldn't be anything else running at the time */
312 BUG_ON(pfp->scheduled); 312 BUG_ON(pfp->scheduled);
@@ -316,7 +316,7 @@ static void pfp_task_new(struct task_struct * t, int on_rq, int is_scheduled)
316 /* maybe we have to reschedule */ 316 /* maybe we have to reschedule */
317 pfp_preempt_check(pfp); 317 pfp_preempt_check(pfp);
318 } 318 }
319 raw_spin_unlock_irqrestore(&pfp->slock, flags); 319 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
320} 320}
321 321
322static void pfp_task_wake_up(struct task_struct *task) 322static void pfp_task_wake_up(struct task_struct *task)
@@ -326,7 +326,7 @@ static void pfp_task_wake_up(struct task_struct *task)
326 lt_t now; 326 lt_t now;
327 327
328 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 328 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
329 raw_spin_lock_irqsave(&pfp->slock, flags); 329 raw_readyq_lock_irqsave(&pfp->slock, flags);
330 330
331#ifdef CONFIG_LITMUS_LOCKING 331#ifdef CONFIG_LITMUS_LOCKING
332 /* Should only be queued when processing a fake-wake up due to a 332 /* Should only be queued when processing a fake-wake up due to a
@@ -368,7 +368,7 @@ static void pfp_task_wake_up(struct task_struct *task)
368#ifdef CONFIG_LITMUS_LOCKING 368#ifdef CONFIG_LITMUS_LOCKING
369out_unlock: 369out_unlock:
370#endif 370#endif
371 raw_spin_unlock_irqrestore(&pfp->slock, flags); 371 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
372 TRACE_TASK(task, "wake up done\n"); 372 TRACE_TASK(task, "wake up done\n");
373} 373}
374 374
@@ -397,7 +397,7 @@ static void pfp_task_exit(struct task_struct * t)
397 pfp_domain_t* pfp = task_pfp(t); 397 pfp_domain_t* pfp = task_pfp(t);
398 rt_domain_t* dom; 398 rt_domain_t* dom;
399 399
400 raw_spin_lock_irqsave(&pfp->slock, flags); 400 raw_readyq_lock_irqsave(&pfp->slock, flags);
401 if (is_queued(t)) { 401 if (is_queued(t)) {
402 BUG(); /* This currently doesn't work. */ 402 BUG(); /* This currently doesn't work. */
403 /* dequeue */ 403 /* dequeue */
@@ -410,7 +410,7 @@ static void pfp_task_exit(struct task_struct * t)
410 } 410 }
411 TRACE_TASK(t, "RIP, now reschedule\n"); 411 TRACE_TASK(t, "RIP, now reschedule\n");
412 412
413 raw_spin_unlock_irqrestore(&pfp->slock, flags); 413 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
414} 414}
415 415
416#ifdef CONFIG_LITMUS_LOCKING 416#ifdef CONFIG_LITMUS_LOCKING
@@ -473,7 +473,7 @@ static void boost_priority(struct task_struct* t, lt_t priority_point)
473 unsigned long flags; 473 unsigned long flags;
474 pfp_domain_t* pfp = task_pfp(t); 474 pfp_domain_t* pfp = task_pfp(t);
475 475
476 raw_spin_lock_irqsave(&pfp->slock, flags); 476 raw_readyq_lock_irqsave(&pfp->slock, flags);
477 477
478 478
479 TRACE_TASK(t, "priority boosted at %llu\n", litmus_clock()); 479 TRACE_TASK(t, "priority boosted at %llu\n", litmus_clock());
@@ -487,7 +487,7 @@ static void boost_priority(struct task_struct* t, lt_t priority_point)
487 * part of lock acquisitions. */ 487 * part of lock acquisitions. */
488 BUG_ON(pfp->scheduled != t); 488 BUG_ON(pfp->scheduled != t);
489 489
490 raw_spin_unlock_irqrestore(&pfp->slock, flags); 490 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
491} 491}
492 492
493static void unboost_priority(struct task_struct* t) 493static void unboost_priority(struct task_struct* t)
@@ -496,7 +496,7 @@ static void unboost_priority(struct task_struct* t)
496 pfp_domain_t* pfp = task_pfp(t); 496 pfp_domain_t* pfp = task_pfp(t);
497 lt_t now; 497 lt_t now;
498 498
499 raw_spin_lock_irqsave(&pfp->slock, flags); 499 raw_readyq_lock_irqsave(&pfp->slock, flags);
500 now = litmus_clock(); 500 now = litmus_clock();
501 501
502 /* assumption: this only happens when the job is scheduled */ 502 /* assumption: this only happens when the job is scheduled */
@@ -514,7 +514,7 @@ static void unboost_priority(struct task_struct* t)
514 if (fp_preemption_needed(&pfp->ready_queue, pfp->scheduled)) 514 if (fp_preemption_needed(&pfp->ready_queue, pfp->scheduled))
515 preempt(pfp); 515 preempt(pfp);
516 516
517 raw_spin_unlock_irqrestore(&pfp->slock, flags); 517 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
518} 518}
519 519
520/* ******************** SRP support ************************ */ 520/* ******************** SRP support ************************ */
@@ -1084,7 +1084,7 @@ static void pcp_priority_inheritance(void)
1084 blocker = ceiling ? ceiling->owner : NULL; 1084 blocker = ceiling ? ceiling->owner : NULL;
1085 blocked = __get_cpu_var(pcp_state).hp_waiter; 1085 blocked = __get_cpu_var(pcp_state).hp_waiter;
1086 1086
1087 raw_spin_lock_irqsave(&pfp->slock, flags); 1087 raw_readyq_lock_irqsave(&pfp->slock, flags);
1088 1088
1089 /* Current is no longer inheriting anything by default. This should be 1089 /* Current is no longer inheriting anything by default. This should be
1090 * the currently scheduled job, and hence not currently queued. */ 1090 * the currently scheduled job, and hence not currently queued. */
@@ -1110,7 +1110,7 @@ static void pcp_priority_inheritance(void)
1110 fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) 1110 fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled))
1111 preempt(pfp); 1111 preempt(pfp);
1112 1112
1113 raw_spin_unlock_irqrestore(&pfp->slock, flags); 1113 raw_readyq_unlock_irqrestore(&pfp->slock, flags);
1114} 1114}
1115 1115
1116/* called with preemptions off */ 1116/* called with preemptions off */
@@ -1413,12 +1413,12 @@ static void pfp_migrate_to(int target_cpu)
1413 /* lock both pfp domains in order of address */ 1413 /* lock both pfp domains in order of address */
1414 from = task_pfp(t); 1414 from = task_pfp(t);
1415 1415
1416 raw_spin_lock(&from->slock); 1416 raw_readyq_lock(&from->slock);
1417 1417
1418 /* switch partitions */ 1418 /* switch partitions */
1419 tsk_rt(t)->task_params.cpu = target_cpu; 1419 tsk_rt(t)->task_params.cpu = target_cpu;
1420 1420
1421 raw_spin_unlock(&from->slock); 1421 raw_readyq_unlock(&from->slock);
1422 1422
1423 /* Don't trace scheduler costs as part of 1423 /* Don't trace scheduler costs as part of
1424 * locking overhead. Scheduling costs are accounted for 1424 * locking overhead. Scheduling costs are accounted for