aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2011-02-10 00:46:14 -0500
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2011-02-10 00:46:14 -0500
commit010d0453ddb52948dcf82a1c74d5944754a4c753 (patch)
treed6544ea9db944777d877bc9ec586953503d92a87 /litmus
parenta31e8cf815907a63f6950d275b48258d76b84ff4 (diff)
P-FP: implement bitmask-based priority queue
Diffstat (limited to 'litmus')
-rw-r--r--litmus/fp_common.c10
-rw-r--r--litmus/sched_pfp.c93
2 files changed, 63 insertions, 40 deletions
diff --git a/litmus/fp_common.c b/litmus/fp_common.c
index fa36574d5992..7898955fd991 100644
--- a/litmus/fp_common.c
+++ b/litmus/fp_common.c
@@ -110,3 +110,13 @@ int fp_preemption_needed(rt_domain_t* rt, struct task_struct *t)
110 /* make sure to get non-rt stuff out of the way */ 110 /* make sure to get non-rt stuff out of the way */
111 return !is_realtime(t) || fp_higher_prio(__next_ready(rt), t); 111 return !is_realtime(t) || fp_higher_prio(__next_ready(rt), t);
112} 112}
113
114void fp_prio_queue_init(struct fp_prio_queue* q)
115{
116 int i;
117
118 for (i = 0; i < FP_PRIO_BIT_WORDS; i++)
119 q->bitmask[i] = 0;
120 for (i = 0; i < LITMUS_MAX_PRIORITY; i++)
121 bheap_init(&q->queue[i]);
122}
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index 183ceedf8308..fb7433dacf40 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -21,6 +21,7 @@
21 21
22typedef struct { 22typedef struct {
23 rt_domain_t domain; 23 rt_domain_t domain;
24 struct fp_prio_queue ready_queue;
24 int cpu; 25 int cpu;
25 struct task_struct* scheduled; /* only RT tasks */ 26 struct task_struct* scheduled; /* only RT tasks */
26/* 27/*
@@ -39,50 +40,65 @@ DEFINE_PER_CPU(pfp_domain_t, pfp_domains);
39#define task_dom(task) remote_dom(get_partition(task)) 40#define task_dom(task) remote_dom(get_partition(task))
40#define task_pfp(task) remote_pfp(get_partition(task)) 41#define task_pfp(task) remote_pfp(get_partition(task))
41 42
43/* we assume the lock is being held */
44static void preempt(pfp_domain_t *pfp)
45{
46 preempt_if_preemptable(pfp->scheduled, pfp->cpu);
47}
48
49static unsigned int priority_index(struct task_struct* t)
50{
51#ifdef CONFIG_LOCKING
52 if (is_priority_boosted(t)) {
53 /* zero is reserved for priority-boosted tasks */
54 return 0;
55 } else
56#endif
57 return get_priority(t);
58}
59
60
61static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks)
62{
63 pfp_domain_t *pfp = container_of(rt, pfp_domain_t, domain);
64 unsigned long flags;
65 struct task_struct* t;
66 struct bheap_node* hn;
67
68 raw_spin_lock_irqsave(&pfp->slock, flags);
69
70 while (!bheap_empty(tasks)) {
71 hn = bheap_take(fp_ready_order, tasks);
72 t = bheap2task(hn);
73 fp_prio_add(&pfp->ready_queue, t, priority_index(t));
74 }
75
76 /* do we need to preempt? */
77 if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled))
78 preempt(pfp);
79
80 raw_spin_unlock_irqrestore(&pfp->slock, flags);
81}
42 82
43static void pfp_domain_init(pfp_domain_t* pfp, 83static void pfp_domain_init(pfp_domain_t* pfp,
44 check_resched_needed_t check,
45 release_jobs_t release,
46 int cpu) 84 int cpu)
47{ 85{
48 fp_domain_init(&pfp->domain, check, release); 86 fp_domain_init(&pfp->domain, NULL, pfp_release_jobs);
49 pfp->cpu = cpu; 87 pfp->cpu = cpu;
50 pfp->scheduled = NULL; 88 pfp->scheduled = NULL;
89 fp_prio_queue_init(&pfp->ready_queue);
51} 90}
52 91
53static void requeue(struct task_struct* t, rt_domain_t *dom) 92static void requeue(struct task_struct* t, pfp_domain_t *pfp)
54{ 93{
55 if (t->state != TASK_RUNNING) 94 if (t->state != TASK_RUNNING)
56 TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); 95 TRACE_TASK(t, "requeue: !TASK_RUNNING\n");
57 96
58 set_rt_flags(t, RT_F_RUNNING); 97 set_rt_flags(t, RT_F_RUNNING);
59 if (is_released(t, litmus_clock())) 98 if (is_released(t, litmus_clock()))
60 __add_ready(dom, t); 99 fp_prio_add(&pfp->ready_queue, t, priority_index(t));
61 else 100 else
62 add_release(dom, t); /* it has got to wait */ 101 add_release(&pfp->domain, t); /* it has got to wait */
63}
64
65/* we assume the lock is being held */
66static void preempt(pfp_domain_t *pfp)
67{
68 preempt_if_preemptable(pfp->scheduled, pfp->cpu);
69}
70
71/* This check is trivial in partioned systems as we only have to consider
72 * the CPU of the partition.
73 */
74static int pfp_check_resched(rt_domain_t *dom)
75{
76 pfp_domain_t *pfp = container_of(dom, pfp_domain_t, domain);
77
78 /* because this is a callback from rt_domain_t we already hold
79 * the necessary lock for the ready queue
80 */
81 if (fp_preemption_needed(dom, pfp->scheduled)) {
82 preempt(pfp);
83 return 1;
84 } else
85 return 0;
86} 102}
87 103
88static void job_completion(struct task_struct* t, int forced) 104static void job_completion(struct task_struct* t, int forced)
@@ -185,8 +201,8 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
185 * the appropriate queue. 201 * the appropriate queue.
186 */ 202 */
187 if (pfp->scheduled && !blocks) 203 if (pfp->scheduled && !blocks)
188 requeue(pfp->scheduled, dom); 204 requeue(pfp->scheduled, pfp);
189 next = __take_ready(dom); 205 next = fp_prio_take(&pfp->ready_queue);
190 } else 206 } else
191 /* Only override Linux scheduler if we have a real-time task 207 /* Only override Linux scheduler if we have a real-time task
192 * scheduled that needs to continue. 208 * scheduled that needs to continue.
@@ -213,7 +229,6 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
213 */ 229 */
214static void pfp_task_new(struct task_struct * t, int on_rq, int running) 230static void pfp_task_new(struct task_struct * t, int on_rq, int running)
215{ 231{
216 rt_domain_t* dom = task_dom(t);
217 pfp_domain_t* pfp = task_pfp(t); 232 pfp_domain_t* pfp = task_pfp(t);
218 unsigned long flags; 233 unsigned long flags;
219 234
@@ -232,7 +247,7 @@ static void pfp_task_new(struct task_struct * t, int on_rq, int running)
232 BUG_ON(pfp->scheduled); 247 BUG_ON(pfp->scheduled);
233 pfp->scheduled = t; 248 pfp->scheduled = t;
234 } else { 249 } else {
235 requeue(t, dom); 250 requeue(t, pfp);
236 /* maybe we have to reschedule */ 251 /* maybe we have to reschedule */
237 preempt(pfp); 252 preempt(pfp);
238 } 253 }
@@ -242,8 +257,7 @@ static void pfp_task_new(struct task_struct * t, int on_rq, int running)
242static void pfp_task_wake_up(struct task_struct *task) 257static void pfp_task_wake_up(struct task_struct *task)
243{ 258{
244 unsigned long flags; 259 unsigned long flags;
245 pfp_domain_t* pfp = task_pfp(task); 260 pfp_domain_t* pfp = task_pfp(task);
246 rt_domain_t* dom = task_dom(task);
247 lt_t now; 261 lt_t now;
248 262
249 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 263 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
@@ -271,7 +285,7 @@ static void pfp_task_wake_up(struct task_struct *task)
271 * and won. 285 * and won.
272 */ 286 */
273 if (pfp->scheduled != task) 287 if (pfp->scheduled != task)
274 requeue(task, dom); 288 requeue(task, pfp);
275 289
276 raw_spin_unlock_irqrestore(&pfp->slock, flags); 290 raw_spin_unlock_irqrestore(&pfp->slock, flags);
277 TRACE_TASK(task, "wake up done\n"); 291 TRACE_TASK(task, "wake up done\n");
@@ -579,7 +593,8 @@ static long pfp_allocate_lock(struct litmus_lock **lock, int type,
579 593
580static long pfp_admit_task(struct task_struct* tsk) 594static long pfp_admit_task(struct task_struct* tsk)
581{ 595{
582 return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; 596 return task_cpu(tsk) == tsk->rt_param.task_params.cpu
597 && get_priority(tsk) > 0 ? 0 : -EINVAL;
583} 598}
584 599
585/* Plugin object */ 600/* Plugin object */
@@ -609,9 +624,7 @@ static int __init init_pfp(void)
609 * we cannot use num_online_cpu() 624 * we cannot use num_online_cpu()
610 */ 625 */
611 for (i = 0; i < num_online_cpus(); i++) { 626 for (i = 0; i < num_online_cpus(); i++) {
612 pfp_domain_init(remote_pfp(i), 627 pfp_domain_init(remote_pfp(i), i);
613 pfp_check_resched,
614 NULL, i);
615 } 628 }
616 return register_sched_plugin(&pfp_plugin); 629 return register_sched_plugin(&pfp_plugin);
617} 630}