diff options
-rw-r--r-- | include/litmus/fp_common.h | 79 | ||||
-rw-r--r-- | litmus/fp_common.c | 10 | ||||
-rw-r--r-- | litmus/sched_pfp.c | 93 |
3 files changed, 142 insertions, 40 deletions
diff --git a/include/litmus/fp_common.h b/include/litmus/fp_common.h index f23727ba913d..170cdf6b0e0b 100644 --- a/include/litmus/fp_common.h +++ b/include/litmus/fp_common.h | |||
@@ -6,6 +6,9 @@ | |||
6 | 6 | ||
7 | #include <litmus/rt_domain.h> | 7 | #include <litmus/rt_domain.h> |
8 | 8 | ||
9 | #include <asm/bitops.h> | ||
10 | |||
11 | |||
9 | void fp_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | 12 | void fp_domain_init(rt_domain_t* rt, check_resched_needed_t resched, |
10 | release_jobs_t release); | 13 | release_jobs_t release); |
11 | 14 | ||
@@ -16,4 +19,80 @@ int fp_ready_order(struct bheap_node* a, struct bheap_node* b); | |||
16 | 19 | ||
17 | int fp_preemption_needed(rt_domain_t* rt, struct task_struct *t); | 20 | int fp_preemption_needed(rt_domain_t* rt, struct task_struct *t); |
18 | 21 | ||
22 | |||
23 | |||
24 | #define FP_PRIO_BIT_WORDS (LITMUS_MAX_PRIORITY / BITS_PER_LONG) | ||
25 | |||
26 | #if (LITMUS_MAX_PRIORITY % BITS_PER_LONG) | ||
27 | #error LITMUS_MAX_PRIORITY must be a multiple of BITS_PER_LONG | ||
28 | #endif | ||
29 | |||
30 | /* bitmask-inexed priority queue */ | ||
31 | struct fp_prio_queue { | ||
32 | unsigned long bitmask[FP_PRIO_BIT_WORDS]; | ||
33 | struct bheap queue[LITMUS_MAX_PRIORITY]; | ||
34 | }; | ||
35 | |||
36 | void fp_prio_queue_init(struct fp_prio_queue* q); | ||
37 | |||
38 | static inline void fpq_set(struct fp_prio_queue* q, unsigned int index) | ||
39 | { | ||
40 | unsigned long *word = q->bitmask + (index / BITS_PER_LONG); | ||
41 | __set_bit(index % BITS_PER_LONG, word); | ||
42 | } | ||
43 | |||
44 | static inline void fpq_clear(struct fp_prio_queue* q, unsigned int index) | ||
45 | { | ||
46 | unsigned long *word = q->bitmask + (index / BITS_PER_LONG); | ||
47 | __clear_bit(index % BITS_PER_LONG, word); | ||
48 | } | ||
49 | |||
50 | static inline unsigned int fpq_find(struct fp_prio_queue* q) | ||
51 | { | ||
52 | int i; | ||
53 | |||
54 | /* loop optimizer should unroll this */ | ||
55 | for (i = 0; i < FP_PRIO_BIT_WORDS; i++) | ||
56 | if (q->bitmask[i]) | ||
57 | return __ffs(q->bitmask[i]) + i * BITS_PER_LONG; | ||
58 | |||
59 | return LITMUS_MAX_PRIORITY; /* nothing found */ | ||
60 | } | ||
61 | |||
62 | static inline void fp_prio_add(struct fp_prio_queue* q, struct task_struct* t, unsigned int index) | ||
63 | { | ||
64 | |||
65 | BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node)); | ||
66 | |||
67 | fpq_set(q, index); | ||
68 | bheap_insert(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node); | ||
69 | } | ||
70 | |||
71 | |||
72 | static inline struct task_struct* fp_prio_peek(struct fp_prio_queue* q) | ||
73 | { | ||
74 | unsigned int idx = fpq_find(q); | ||
75 | struct bheap_node* hn; | ||
76 | |||
77 | if (idx < LITMUS_MAX_PRIORITY) { | ||
78 | hn = bheap_peek(fp_ready_order, &q->queue[idx]); | ||
79 | return bheap2task(hn); | ||
80 | } else | ||
81 | return NULL; | ||
82 | } | ||
83 | |||
84 | static inline struct task_struct* fp_prio_take(struct fp_prio_queue* q) | ||
85 | { | ||
86 | unsigned int idx = fpq_find(q); | ||
87 | struct bheap_node* hn; | ||
88 | |||
89 | if (idx < LITMUS_MAX_PRIORITY) { | ||
90 | hn = bheap_take(fp_ready_order, &q->queue[idx]); | ||
91 | if (likely(bheap_empty(&q->queue[idx]))) | ||
92 | fpq_clear(q, idx); | ||
93 | return bheap2task(hn); | ||
94 | } else | ||
95 | return NULL; | ||
96 | } | ||
97 | |||
19 | #endif | 98 | #endif |
diff --git a/litmus/fp_common.c b/litmus/fp_common.c index fa36574d5992..7898955fd991 100644 --- a/litmus/fp_common.c +++ b/litmus/fp_common.c | |||
@@ -110,3 +110,13 @@ int fp_preemption_needed(rt_domain_t* rt, struct task_struct *t) | |||
110 | /* make sure to get non-rt stuff out of the way */ | 110 | /* make sure to get non-rt stuff out of the way */ |
111 | return !is_realtime(t) || fp_higher_prio(__next_ready(rt), t); | 111 | return !is_realtime(t) || fp_higher_prio(__next_ready(rt), t); |
112 | } | 112 | } |
113 | |||
114 | void fp_prio_queue_init(struct fp_prio_queue* q) | ||
115 | { | ||
116 | int i; | ||
117 | |||
118 | for (i = 0; i < FP_PRIO_BIT_WORDS; i++) | ||
119 | q->bitmask[i] = 0; | ||
120 | for (i = 0; i < LITMUS_MAX_PRIORITY; i++) | ||
121 | bheap_init(&q->queue[i]); | ||
122 | } | ||
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c index 183ceedf8308..fb7433dacf40 100644 --- a/litmus/sched_pfp.c +++ b/litmus/sched_pfp.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | typedef struct { | 22 | typedef struct { |
23 | rt_domain_t domain; | 23 | rt_domain_t domain; |
24 | struct fp_prio_queue ready_queue; | ||
24 | int cpu; | 25 | int cpu; |
25 | struct task_struct* scheduled; /* only RT tasks */ | 26 | struct task_struct* scheduled; /* only RT tasks */ |
26 | /* | 27 | /* |
@@ -39,50 +40,65 @@ DEFINE_PER_CPU(pfp_domain_t, pfp_domains); | |||
39 | #define task_dom(task) remote_dom(get_partition(task)) | 40 | #define task_dom(task) remote_dom(get_partition(task)) |
40 | #define task_pfp(task) remote_pfp(get_partition(task)) | 41 | #define task_pfp(task) remote_pfp(get_partition(task)) |
41 | 42 | ||
43 | /* we assume the lock is being held */ | ||
44 | static void preempt(pfp_domain_t *pfp) | ||
45 | { | ||
46 | preempt_if_preemptable(pfp->scheduled, pfp->cpu); | ||
47 | } | ||
48 | |||
49 | static unsigned int priority_index(struct task_struct* t) | ||
50 | { | ||
51 | #ifdef CONFIG_LOCKING | ||
52 | if (is_priority_boosted(t)) { | ||
53 | /* zero is reserved for priority-boosted tasks */ | ||
54 | return 0; | ||
55 | } else | ||
56 | #endif | ||
57 | return get_priority(t); | ||
58 | } | ||
59 | |||
60 | |||
61 | static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
62 | { | ||
63 | pfp_domain_t *pfp = container_of(rt, pfp_domain_t, domain); | ||
64 | unsigned long flags; | ||
65 | struct task_struct* t; | ||
66 | struct bheap_node* hn; | ||
67 | |||
68 | raw_spin_lock_irqsave(&pfp->slock, flags); | ||
69 | |||
70 | while (!bheap_empty(tasks)) { | ||
71 | hn = bheap_take(fp_ready_order, tasks); | ||
72 | t = bheap2task(hn); | ||
73 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); | ||
74 | } | ||
75 | |||
76 | /* do we need to preempt? */ | ||
77 | if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) | ||
78 | preempt(pfp); | ||
79 | |||
80 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | ||
81 | } | ||
42 | 82 | ||
43 | static void pfp_domain_init(pfp_domain_t* pfp, | 83 | static void pfp_domain_init(pfp_domain_t* pfp, |
44 | check_resched_needed_t check, | ||
45 | release_jobs_t release, | ||
46 | int cpu) | 84 | int cpu) |
47 | { | 85 | { |
48 | fp_domain_init(&pfp->domain, check, release); | 86 | fp_domain_init(&pfp->domain, NULL, pfp_release_jobs); |
49 | pfp->cpu = cpu; | 87 | pfp->cpu = cpu; |
50 | pfp->scheduled = NULL; | 88 | pfp->scheduled = NULL; |
89 | fp_prio_queue_init(&pfp->ready_queue); | ||
51 | } | 90 | } |
52 | 91 | ||
53 | static void requeue(struct task_struct* t, rt_domain_t *dom) | 92 | static void requeue(struct task_struct* t, pfp_domain_t *pfp) |
54 | { | 93 | { |
55 | if (t->state != TASK_RUNNING) | 94 | if (t->state != TASK_RUNNING) |
56 | TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); | 95 | TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); |
57 | 96 | ||
58 | set_rt_flags(t, RT_F_RUNNING); | 97 | set_rt_flags(t, RT_F_RUNNING); |
59 | if (is_released(t, litmus_clock())) | 98 | if (is_released(t, litmus_clock())) |
60 | __add_ready(dom, t); | 99 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); |
61 | else | 100 | else |
62 | add_release(dom, t); /* it has got to wait */ | 101 | add_release(&pfp->domain, t); /* it has got to wait */ |
63 | } | ||
64 | |||
65 | /* we assume the lock is being held */ | ||
66 | static void preempt(pfp_domain_t *pfp) | ||
67 | { | ||
68 | preempt_if_preemptable(pfp->scheduled, pfp->cpu); | ||
69 | } | ||
70 | |||
71 | /* This check is trivial in partioned systems as we only have to consider | ||
72 | * the CPU of the partition. | ||
73 | */ | ||
74 | static int pfp_check_resched(rt_domain_t *dom) | ||
75 | { | ||
76 | pfp_domain_t *pfp = container_of(dom, pfp_domain_t, domain); | ||
77 | |||
78 | /* because this is a callback from rt_domain_t we already hold | ||
79 | * the necessary lock for the ready queue | ||
80 | */ | ||
81 | if (fp_preemption_needed(dom, pfp->scheduled)) { | ||
82 | preempt(pfp); | ||
83 | return 1; | ||
84 | } else | ||
85 | return 0; | ||
86 | } | 102 | } |
87 | 103 | ||
88 | static void job_completion(struct task_struct* t, int forced) | 104 | static void job_completion(struct task_struct* t, int forced) |
@@ -185,8 +201,8 @@ static struct task_struct* pfp_schedule(struct task_struct * prev) | |||
185 | * the appropriate queue. | 201 | * the appropriate queue. |
186 | */ | 202 | */ |
187 | if (pfp->scheduled && !blocks) | 203 | if (pfp->scheduled && !blocks) |
188 | requeue(pfp->scheduled, dom); | 204 | requeue(pfp->scheduled, pfp); |
189 | next = __take_ready(dom); | 205 | next = fp_prio_take(&pfp->ready_queue); |
190 | } else | 206 | } else |
191 | /* Only override Linux scheduler if we have a real-time task | 207 | /* Only override Linux scheduler if we have a real-time task |
192 | * scheduled that needs to continue. | 208 | * scheduled that needs to continue. |
@@ -213,7 +229,6 @@ static struct task_struct* pfp_schedule(struct task_struct * prev) | |||
213 | */ | 229 | */ |
214 | static void pfp_task_new(struct task_struct * t, int on_rq, int running) | 230 | static void pfp_task_new(struct task_struct * t, int on_rq, int running) |
215 | { | 231 | { |
216 | rt_domain_t* dom = task_dom(t); | ||
217 | pfp_domain_t* pfp = task_pfp(t); | 232 | pfp_domain_t* pfp = task_pfp(t); |
218 | unsigned long flags; | 233 | unsigned long flags; |
219 | 234 | ||
@@ -232,7 +247,7 @@ static void pfp_task_new(struct task_struct * t, int on_rq, int running) | |||
232 | BUG_ON(pfp->scheduled); | 247 | BUG_ON(pfp->scheduled); |
233 | pfp->scheduled = t; | 248 | pfp->scheduled = t; |
234 | } else { | 249 | } else { |
235 | requeue(t, dom); | 250 | requeue(t, pfp); |
236 | /* maybe we have to reschedule */ | 251 | /* maybe we have to reschedule */ |
237 | preempt(pfp); | 252 | preempt(pfp); |
238 | } | 253 | } |
@@ -242,8 +257,7 @@ static void pfp_task_new(struct task_struct * t, int on_rq, int running) | |||
242 | static void pfp_task_wake_up(struct task_struct *task) | 257 | static void pfp_task_wake_up(struct task_struct *task) |
243 | { | 258 | { |
244 | unsigned long flags; | 259 | unsigned long flags; |
245 | pfp_domain_t* pfp = task_pfp(task); | 260 | pfp_domain_t* pfp = task_pfp(task); |
246 | rt_domain_t* dom = task_dom(task); | ||
247 | lt_t now; | 261 | lt_t now; |
248 | 262 | ||
249 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 263 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
@@ -271,7 +285,7 @@ static void pfp_task_wake_up(struct task_struct *task) | |||
271 | * and won. | 285 | * and won. |
272 | */ | 286 | */ |
273 | if (pfp->scheduled != task) | 287 | if (pfp->scheduled != task) |
274 | requeue(task, dom); | 288 | requeue(task, pfp); |
275 | 289 | ||
276 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | 290 | raw_spin_unlock_irqrestore(&pfp->slock, flags); |
277 | TRACE_TASK(task, "wake up done\n"); | 291 | TRACE_TASK(task, "wake up done\n"); |
@@ -579,7 +593,8 @@ static long pfp_allocate_lock(struct litmus_lock **lock, int type, | |||
579 | 593 | ||
580 | static long pfp_admit_task(struct task_struct* tsk) | 594 | static long pfp_admit_task(struct task_struct* tsk) |
581 | { | 595 | { |
582 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | 596 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu |
597 | && get_priority(tsk) > 0 ? 0 : -EINVAL; | ||
583 | } | 598 | } |
584 | 599 | ||
585 | /* Plugin object */ | 600 | /* Plugin object */ |
@@ -609,9 +624,7 @@ static int __init init_pfp(void) | |||
609 | * we cannot use num_online_cpu() | 624 | * we cannot use num_online_cpu() |
610 | */ | 625 | */ |
611 | for (i = 0; i < num_online_cpus(); i++) { | 626 | for (i = 0; i < num_online_cpus(); i++) { |
612 | pfp_domain_init(remote_pfp(i), | 627 | pfp_domain_init(remote_pfp(i), i); |
613 | pfp_check_resched, | ||
614 | NULL, i); | ||
615 | } | 628 | } |
616 | return register_sched_plugin(&pfp_plugin); | 629 | return register_sched_plugin(&pfp_plugin); |
617 | } | 630 | } |