aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorHennadiy Leontyev <leontyev@jupiter-cs.cs.unc.edu>2007-02-28 01:12:54 -0500
committerHennadiy Leontyev <leontyev@jupiter-cs.cs.unc.edu>2007-02-28 01:12:54 -0500
commit5637daed062ac00ab1b2a672ebeb662c2f05fb98 (patch)
tree3321f3c8266f47e7a8e0037661d7bb37d8e1782b /kernel
parent4300134c74385b82710672cb25f604ade97f334d (diff)
PFAIR scheduling added
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/litmus.c8
-rw-r--r--kernel/pfair_common.c241
-rw-r--r--kernel/sched_pfair.c504
4 files changed, 751 insertions, 6 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 13832e645e..5cd2351484 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,8 +10,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o rwsem.o latency.o nsproxy.o srcu.o \ 11 hrtimer.o rwsem.o latency.o nsproxy.o srcu.o \
12 sched_plugin.o litmus.o sched_trace.o \ 12 sched_plugin.o litmus.o sched_trace.o \
13 edf_common.o fifo_common.o \ 13 edf_common.o fifo_common.o pfair_common.o\
14 sched_global_edf.o sched_part_edf.o sched_edf_hsb.o 14 sched_global_edf.o sched_part_edf.o sched_edf_hsb.o sched_pfair.o
15 15
16obj-$(CONFIG_STACKTRACE) += stacktrace.o 16obj-$(CONFIG_STACKTRACE) += stacktrace.o
17obj-y += time/ 17obj-y += time/
diff --git a/kernel/litmus.c b/kernel/litmus.c
index 03b57fe0c3..b420ab1464 100644
--- a/kernel/litmus.c
+++ b/kernel/litmus.c
@@ -344,10 +344,10 @@ sched_plugin_t *init_global_edf_plugin(void);
344sched_plugin_t *init_global_edf_np_plugin(void); 344sched_plugin_t *init_global_edf_np_plugin(void);
345sched_plugin_t *init_part_edf_plugin(void); 345sched_plugin_t *init_part_edf_plugin(void);
346sched_plugin_t *init_edf_hsb_plugin(void); 346sched_plugin_t *init_edf_hsb_plugin(void);
347sched_plugin_t *init_pfair_plugin(void);
347 348
348/* 349/*
349CLEANUP: Add init function when the plugin has been ported. 350CLEANUP: Add init function when the plugin has been ported.
350sched_plugin_t *init_pfair_plugin(void);
351sched_plugin_t *init_desync_pfair_plugin(void); 351sched_plugin_t *init_desync_pfair_plugin(void);
352*/ 352*/
353 353
@@ -370,9 +370,9 @@ static struct {
370 PLUGIN(GLOBAL_EDF_NP, global_edf_np), 370 PLUGIN(GLOBAL_EDF_NP, global_edf_np),
371 PLUGIN(GLOBAL_EDF, global_edf), 371 PLUGIN(GLOBAL_EDF, global_edf),
372 PLUGIN(PART_EDF, part_edf), 372 PLUGIN(PART_EDF, part_edf),
373 PLUGIN(EDF_HSB, edf_hsb) 373 PLUGIN(EDF_HSB, edf_hsb),
374/* CLEANUP: Add when ported. 374 PLUGIN(PFAIR, pfair)
375 PLUGIN(PFAIR, pfair), 375/*
376 PLUGIN(PFAIR_DESYNC, desync_pfair), 376 PLUGIN(PFAIR_DESYNC, desync_pfair),
377*/ 377*/
378 378
diff --git a/kernel/pfair_common.c b/kernel/pfair_common.c
new file mode 100644
index 0000000000..a9e636df93
--- /dev/null
+++ b/kernel/pfair_common.c
@@ -0,0 +1,241 @@
1/*
2 * Common functions for PFAIR based scheduler.
3 */
4
5#include <linux/percpu.h>
6#include <linux/sched.h>
7#include <linux/list.h>
8
9#include <linux/litmus.h>
10#include <linux/sched_plugin.h>
11#include <linux/sched_trace.h>
12
13#include <linux/pfair_common.h>
14#include <linux/pfair_math.h>
15/* Comparison of two tasks whether
16 * the lhs has higher priority than the rhs */
17int is_pfair_hp(struct task_struct *lhs, struct task_struct *rhs)
18{
19 /* Favor subtasks with earlier deadlines */
20 if(time_before(get_deadline(lhs), get_deadline(rhs)))
21 return 1;
22 if(get_deadline(lhs) == get_deadline(rhs)) {
23 /* If deadlines are equal,
24 * favor non-zero b-bit (a heavy task) */
25 if(lhs->rt_param.times.b_bit > rhs->rt_param.times.b_bit)
26 return 1;
27
28 if(lhs->rt_param.times.b_bit == rhs->rt_param.times.b_bit &&
29 lhs->rt_param.times.b_bit == 1)
30 /* If b-bit is 1, favor tasks with later
31 * group deadline */
32 return time_after(lhs->rt_param.times.group_deadline,
33 rhs->rt_param.times.group_deadline);
34
35 }
36 return 0;
37}
38
39void pfair_domain_init(pfair_domain_t *pfair)
40{
41 BUG_ON(!pfair);
42 INIT_LIST_HEAD(&pfair->ready_queue);
43 INIT_LIST_HEAD(&pfair->release_queue);
44 queue_lock_init(&pfair->pfair_lock);
45 cpus_setall(pfair->domain_cpus);
46 /* Use cpu 0 to keep the system alive
47 * TODO: Remove later or make it configurable
48 * */
49 cpu_clear(0, pfair->domain_cpus);
50}
51
52
53/* add_ready - add a real-time task to the PFAIR ready queue.
54 * It must be runnable. Global domain lock must be held before
55 * calling this function.
56 *
57 * @new: the newly released task
58 */
59void pfair_add_ready(pfair_domain_t* pfair, struct task_struct *new)
60{
61 struct list_head *pos;
62 struct task_struct *queued;
63
64 BUG_ON(!new);
65 /* find a spot where our deadline is earlier than the next */
66 list_for_each(pos, &pfair->ready_queue) {
67 queued = list_entry(pos, struct task_struct, rt_list);
68 if (unlikely(is_pfair_hp(new, queued))) {
69 /* the task at pos has a later deadline */
70 /* insert the new task in front of it */
71 __list_add(&new->rt_list, pos->prev, pos);
72 return;
73 }
74 }
75 /* if we get to this point either the list is empty or new has the
76 * lowest priority. Let's add it to the end. */
77 list_add_tail(&new->rt_list, &pfair->ready_queue);
78}
79/**
80 * Extraction function.
81 */
82struct task_struct* __pfair_take_ready(pfair_domain_t* pfair)
83{
84 struct task_struct *t = NULL;
85 /* either not yet released, preempted, or non-rt */
86 if (!list_empty(&pfair->ready_queue)) {
87
88 /* take next rt task */
89 t = list_entry(pfair->ready_queue.next, struct task_struct,
90 rt_list);
91
92 /* kick it out of the ready list */
93 list_del(&t->rt_list);
94 }
95 return t;
96}
97
98
99/* add_release - add a real-time task to the PFAIR release queue.
100 * Domain lock must be acquired before the function is called.
101 *
102 * @task: the sleeping task
103 */
104void pfair_add_release(pfair_domain_t* pfair, struct task_struct *task)
105{
106 struct list_head *pos;
107 struct task_struct *queued;
108
109 BUG_ON(!task);
110 /* find a spot where our deadline is earlier than the next */
111 list_for_each_prev(pos, &pfair->release_queue) {
112 queued = list_entry(pos, struct task_struct, rt_list);
113 if ((unlikely(time_before(queued->rt_param.times.release,
114 task->rt_param.times.release)))) {
115 /* the task at pos has an earlier release */
116 /* insert the new task in behind it */
117 __list_add(&task->rt_list, pos, pos->next);
118 return;
119 }
120 }
121 /* if we get to this point either the list is empty or task has the
122 * earliest release. Let's add it to the front. */
123 list_add(&task->rt_list, &pfair->release_queue);
124}
125/**
126 * This function is called from tick handler, it acquires the lock
127 * automatically. Only one processor effectively merges the queues.
128 */
129void pfair_try_release_pending(pfair_domain_t* pfair)
130{
131 unsigned long flags;
132 struct list_head *pos, *save;
133 struct task_struct *queued;
134 queue_lock_irqsave(&pfair->pfair_lock, flags);
135
136 list_for_each_safe(pos, save, &pfair->release_queue) {
137 queued = list_entry(pos, struct task_struct, rt_list);
138 if (likely(time_before_eq(
139 queued->rt_param.times.release, jiffies))) {
140 /* this one is ready to go*/
141 list_del(pos);
142 set_rt_flags(queued, RT_F_RUNNING);
143
144 sched_trace_job_release(queued);
145 /* now it can be picked up */
146 barrier();
147 pfair_add_ready(pfair, queued);
148 }
149 else
150 /* the release queue is ordered */
151 break;
152 }
153 queue_unlock_irqrestore(&pfair->pfair_lock, flags);
154}
155/*
156 * Subtask preparation. Assuming that last_release
157 * denotes the time when the job was released.
158 */
159void pfair_prepare_next_subtask(struct task_struct *t)
160{
161 BUG_ON(!t);
162 /* assign subtask release time, deadline, b-bit,
163 * and group deadline
164 */
165 t->rt_param.times.release = t->rt_param.times.last_release
166 +release_time(t);
167 t->rt_param.times.deadline = t->rt_param.times.last_release
168 +pfair_deadline(t);
169 t->rt_param.times.b_bit = b_bit(t);
170 t->rt_param.times.group_deadline = t->rt_param.times.last_release
171 +group_deadline(t);
172}
173
174void pfair_prepare_next_job(struct task_struct *t)
175{
176 BUG_ON(!t);
177 /* update tardy job ctr */
178 if (jiffies > t->rt_param.times.deadline)
179 t->rt_param.stats.nontardy_jobs_ctr = 0;
180 else
181 t->rt_param.stats.nontardy_jobs_ctr++;
182
183 /* prepare next job release */
184 /* make passed quantums zero so that we could compute new release times
185 * and deadlines for subtasks correctly
186 */
187 t->rt_param.times.exec_time = 0;
188 /* assign job-wide release time,
189 * this is the starting point to
190 * compute subtask releases, deadlines and group deadlines
191 */
192 t->rt_param.times.last_release = t->rt_param.times.last_release
193 +get_rt_period(t);
194 /* Release the first subtask. */
195 pfair_prepare_next_subtask(t);
196 t->first_time_slice = 0;
197}
198
199void __pfair_prepare_new_release(struct task_struct *t, jiffie_t start)
200{
201 t->rt_param.times.release = start;
202 t->rt_param.times.last_release = start;
203 t->rt_param.stats.nontardy_jobs_ctr = 0xf0000000;
204 t->rt_param.times.exec_time = 0;
205 t->first_time_slice = 0;
206 pfair_prepare_next_subtask(t);
207 set_rt_flags(t, RT_F_RUNNING);
208}
209
210void pfair_prepare_new_releases(pfair_domain_t *pfair, jiffie_t start)
211{
212 unsigned long flags;
213 struct list_head tmp_list;
214 struct list_head *pos, *n;
215 struct task_struct *t;
216
217 INIT_LIST_HEAD(&tmp_list);
218
219 queue_lock_irqsave(&pfair->pfair_lock, flags);
220
221
222 while (!list_empty(&pfair->release_queue)) {
223 pos = pfair->release_queue.next;
224 list_del(pos);
225 list_add(pos, &tmp_list);
226 }
227 while (!list_empty(&pfair->ready_queue)) {
228 pos = pfair->ready_queue.next;
229 list_del(pos);
230 list_add(pos, &tmp_list);
231 }
232
233 list_for_each_safe(pos, n, &tmp_list) {
234 t = list_entry(pos, struct task_struct, rt_list);
235 list_del(pos);
236 __pfair_prepare_new_release(t, start);
237 pfair_add_release(pfair, t);
238 }
239 queue_unlock_irqrestore(&pfair->pfair_lock, flags);
240}
241
diff --git a/kernel/sched_pfair.c b/kernel/sched_pfair.c
new file mode 100644
index 0000000000..1c36ba5892
--- /dev/null
+++ b/kernel/sched_pfair.c
@@ -0,0 +1,504 @@
1/*
2 *
3 * Implementation of synchronized PFAIR PD2 scheduler
4 *
5 */
6
7#include <linux/percpu.h>
8#include <linux/sched.h>
9#include <linux/list.h>
10
11#include <linux/litmus.h>
12#include <linux/sched_plugin.h>
13#include <linux/pfair_common.h>
14#include <linux/sched_trace.h>
15#include <linux/queuelock.h>
16
17struct cpu_state {
18 struct task_struct * t;
19 volatile jiffie_t jiffie_marker;
20};
21/* PFAIR scheduling domain, release and ready queues */
22static pfair_domain_t pfair __cacheline_aligned_in_smp;
23
24/* An indicator that quantum boundary was crossed
25 * and a decision has to be made
26 */
27static int sync_go[NR_CPUS];
28
29
30/* A collection of CPU states protected by pfair lock */
31DEFINE_PER_CPU(struct cpu_state, states);
32
33/*
34 * This function gets called by the timer code, with HZ frequency
35 * with interrupts disabled.
36 *
37 * The function merges the release queue with the ready queue
38 * and indicates that quantum boundary was crossed.
39 *
40 * It also suggests to schedule off currently running
41 * real-time task if the mode is non-real-time.
42 */
43static reschedule_check_t pfair_scheduler_tick(void)
44{
45 int want_resched = NO_RESCHED;
46 sync_go[smp_processor_id()] = 0;
47 if (!cpu_isset(smp_processor_id(), pfair.domain_cpus))
48 goto out;
49 /* Now determine if we want current task to be preempted */
50 if (get_rt_mode() == MODE_RT_RUN) {
51 pfair_try_release_pending(&pfair);
52 want_resched = FORCE_RESCHED;
53 /* indicate that the interrupt fired */
54 sync_go[smp_processor_id()] = 1;
55 barrier();
56 } else if (is_realtime(current) && is_running(current)) {
57 /* In non real-time mode we want to
58 * schedule off real-time tasks */
59 want_resched = FORCE_RESCHED;
60 } else if (is_realtime(current) && !is_running(current)) {
61 TRACE("[%d] %d Timer interrupt on not runninng %d\n",
62 smp_processor_id(),
63 jiffies-rt_start_time, current->pid);
64 }
65out:
66 return want_resched;
67}
68
69/**
70 * This function is called by the processor
71 * that performs rescheduling. It saves the timing
72 * parameters of currently running jobs that were not rescheduled yet
73 * and releases next subtask for these jobs placing them into
74 * release and ready queues.
75 */
76static void pretend_release(cpumask_t p)
77{
78 int i = 0;
79 struct task_struct * t = NULL;
80 /* for all the tasks increment the number of used quanta
81 * and release next subtask or job depending on the number
82 * of used quanta
83 */
84 for_each_cpu_mask(i, p) {
85 t = per_cpu(states, i).t;
86 if (t != NULL) {
87 backup_times(t);
88 inc_passed_quanta(t);
89 if ( get_passed_quanta(t) == get_exec_cost(t)) {
90 pfair_prepare_next_job(t);
91 } else {
92 pfair_prepare_next_subtask(t);
93 }
94 /*
95 TRACE("[%d] %d pretending release %d with (%d, %d)\n",
96 smp_processor_id(),
97 jiffies-rt_start_time,t->pid,
98 get_release(t)-rt_start_time,
99 get_deadline(t)-rt_start_time);*/
100 /* detect if the job or subtask has to be released now*/
101 if (time_before_eq(get_release(t), jiffies))
102 pfair_add_ready(&pfair, t);
103 else
104 pfair_add_release(&pfair, t);
105 }
106 }
107}
108/*
109 * Rollback the the pretended release of tasks.
110 * Timing parameters are restored and tasks are removed
111 * from the queues as it was before calling the schedule() function.
112 *
113 */
114static void rollback_release(cpumask_t p)
115{
116 int i = -1;
117 struct task_struct * t = NULL;
118 /*
119 * Rollback the pretended changes
120 */
121 for_each_cpu_mask(i, p) {
122 t = per_cpu(states, i).t;
123 if (t != NULL) {
124 restore_times(t);
125 if(t->rt_list.prev != LIST_POISON1 ||
126 t->rt_list.next != LIST_POISON2) {
127 /* Delete the task from a queue */
128 list_del(&t->rt_list);
129 }
130 }
131 }
132}
133
134/*
135 * The procedure creates a list of cpu's whose tasks have not been
136 * rescheduled yet. These are CPU's with jiffie marker different from
137 * the value of jiffies.
138 */
139static void find_participants(cpumask_t * target)
140{
141 cpumask_t res;int i;
142 cpus_clear(res);
143 for_each_online_cpu(i) {
144 if(per_cpu(states, i).jiffie_marker != jiffies)
145 cpu_set(i, res);
146 }
147 /* Examine only cpus in the domain */
148 cpus_and(res, pfair.domain_cpus, res);
149 (*target) = res;
150}
151
152/*
153 * This is main PFAIR schedule function,
154 * each processor pretends that some currently running tasks are
155 * released in the next quantum and determines whether it should
156 * keep the task that is currently running (this is usually the case
157 * for heavy tasks).
158*/
159static int pfair_schedule(struct task_struct *prev,
160 struct task_struct **next,
161 runqueue_t * rq)
162{
163 int cpu =-1;
164 int k =-1;
165 int need_deactivate = 1;
166 int keep =0;
167 unsigned long flags;
168 cpumask_t participants;
169 /* A temporary array */
170 struct task_struct * rs_old_ptr[NR_CPUS];
171
172 *next = NULL;
173 cpu = smp_processor_id();
174 /* CPU's not in the domain just bypass */
175 if (!cpu_isset(cpu, pfair.domain_cpus)) {
176 goto out;
177 }
178 queue_lock_irqsave(&pfair.pfair_lock, flags);
179
180 /* If we happen to run in non-realtime mode
181 * then we have to schedule off currently running tasks
182 * */
183 if (get_rt_mode() != MODE_RT_RUN) {
184 if (is_realtime(prev)) {
185 per_cpu(states, cpu).t = NULL;
186 TRACE("[%d] %d Suspending %d\n",
187 cpu, jiffies - rt_start_time,
188 prev->pid);
189 /* Move the task to the
190 * release queue for future runs
191 * FIXME: Do something smarter.
192 * For example create a set where
193 * prepared or inactive tasks are placed
194 * and then released.
195 * */
196 set_release(prev, get_release(prev) + 1000);
197 pfair_add_release(&pfair, prev);
198 }
199 goto out_deactivate;
200 }
201 /* If the current task stops or dies */
202 if (is_realtime(prev) && !is_running(prev)) {
203 /* remove it from the running set */
204 per_cpu(states, cpu).t = NULL;
205 }
206 /* Make pfair decisions at quantum boundaries only,
207 * but schedule off stopped or dead tasks */
208
209 if ((sync_go[cpu]--) != 1)
210 goto out_deactivate;
211
212 /*TRACE("[%d] %d Scheduler activation", cpu, jiffies-rt_start_time);
213 cpus_and(res, pfair.domain_cpus, cpu_online_map);
214 for_each_cpu_mask(k, res) {
215 TRACE("%d" ,(per_cpu(states, k).jiffie_marker!=jiffies));
216 }
217 TRACE("\n");*/
218
219 /* Find processors that have not rescheduled yet */
220 find_participants(&participants);
221 /* For each task on remote cpu's pretend release */
222 pretend_release(participants);
223 /* Clear temporary array */
224 for_each_possible_cpu(k) { rs_old_ptr[k] = NULL; }
225 /* Select a new subset of eligible tasks */
226 for_each_cpu_mask(k, participants) {
227 rs_old_ptr[k] = __pfair_take_ready (&pfair);
228 /* Check if our current task must be scheduled in the next quantum */
229 if (rs_old_ptr[k] == per_cpu(states, cpu).t) {
230 /* this is our current task, keep it */
231 *next = per_cpu(states, cpu).t;
232 need_deactivate = 0;
233 keep = 1;
234 break;
235 }
236 }
237 /* Put all the extracted tasks back into the ready queue */
238 for_each_cpu_mask(k, participants) {
239 if (rs_old_ptr[k] != NULL){
240 pfair_add_ready(&pfair, rs_old_ptr[k]);
241 rs_old_ptr[k] = NULL;
242 }
243 }
244 /* Rollback the pretended release,
245 * task parameters are restored and running tasks are removed
246 * from queues */
247 rollback_release(participants);
248 /*
249 * If the current task is not scheduled in the next quantum
250 * then select a new pfair task
251 */
252 if(!keep) {
253 *next = per_cpu(states, cpu).t = __pfair_take_ready(&pfair);
254 if (*next != NULL) {
255 /*TRACE("[%d] %d Scheduling %d with (%d, %d)\n",
256 cpu, jiffies-rt_start_time,
257 get_release(*next),
258 get_deadline(*next));
259 */
260 set_task_cpu(*next, cpu);
261 __activate_task(*next, rq);
262 }
263 } else {
264 if (is_realtime(prev)) {
265 /*TRACE("[%d] %d prev==next %d\n",
266 cpu,jiffies-rt_start_time,
267 (prev)->pid);*/
268
269 /* The task will not be switched off but we
270 * need to track the execution time
271 */
272 inc_passed_quanta(prev);
273 }
274 }
275
276 /*Show that our task does not participate in subsequent selections*/
277 __get_cpu_var(states).jiffie_marker = jiffies;
278
279out_deactivate:
280 if ( is_realtime(prev) && need_deactivate && prev->array) {
281 /* take prev out of the linux run queue */
282 deactivate_task(prev, rq);
283 }
284 queue_unlock_irqrestore(&pfair.pfair_lock, flags);
285out:
286 return 0;
287}
288
289static void pfair_finish_task_switch(struct task_struct *t)
290{
291 queue_lock(&pfair.pfair_lock);
292 /* Release in real-time mode only,
293 * if the mode is non real-time, then
294 * the task is already in the release queue
295 * with the time far in the future
296 */
297 if (get_rt_mode() == MODE_RT_RUN) {
298 inc_passed_quanta(t);
299 if ( get_passed_quanta(t) == get_exec_cost(t)) {
300 sched_trace_job_completion(t);
301 pfair_prepare_next_job(t);
302 } else {
303 pfair_prepare_next_subtask(t);
304 }
305 /*TRACE("[%d] %d releasing %d with (%d, %d)\n",
306 smp_processor_id(),
307 jiffies-rt_start_time,
308 t->pid,
309 get_release(t)-rt_start_time,
310 get_deadline(t)-rt_start_time);*/
311 if (time_before_eq(get_release(t), jiffies))
312 pfair_add_ready(&pfair, t);
313 else
314 pfair_add_release(&pfair, t);
315 }
316 queue_unlock(&pfair.pfair_lock);
317}
318
319/* Prepare a task for running in RT mode
320 * Enqueues the task into master queue data structure
321 * returns
322 * -EPERM if task is not TASK_STOPPED
323 */
324static long pfair_prepare_task(struct task_struct * t)
325{
326 unsigned long flags;
327 TRACE("pfair: prepare task %d\n", t->pid);
328 if (t->state == TASK_STOPPED) {
329 __setscheduler(t, SCHED_FIFO, MAX_RT_PRIO - 1);
330
331 if (get_rt_mode() == MODE_RT_RUN)
332 /* The action is already on.
333 * Prepare immediate release
334 */
335 __pfair_prepare_new_release(t, jiffies);
336 /* The task should be running in the queue, otherwise signal
337 * code will try to wake it up with fatal consequences.
338 */
339 t->state = TASK_RUNNING;
340 queue_lock_irqsave(&pfair.pfair_lock, flags);
341 pfair_add_release(&pfair, t);
342 queue_unlock_irqrestore(&pfair.pfair_lock, flags);
343 return 0;
344 } else
345 return -EPERM;
346}
347
348
349
350static void pfair_wake_up_task(struct task_struct *task)
351{
352
353 unsigned long flags;
354
355 /* We must determine whether task should go into the release
356 * queue or into the ready queue.
357 * The task enters the ready queue if the previous deadline was missed,
358 * so we treat the invoked job as a new sporadic release.
359 *
360 * The job can also enter the ready queue if it was invoked before its
361 * global deadline, but its budjet must be clipped down to one quantum
362 */
363 task->state = TASK_RUNNING;
364 if (time_after_eq(jiffies, task->rt_param.times.last_release
365 + get_rt_period(task))) {
366 /* new sporadic release */
367 TRACE("[%d] Sporadic release of %d at %d\n",
368 smp_processor_id(),
369 jiffies-rt_start_time,
370 task->pid);
371 __pfair_prepare_new_release(task, jiffies);
372 queue_lock_irqsave(&pfair.pfair_lock, flags);
373 sched_trace_job_release(task);
374 pfair_add_ready(&pfair, task);
375 queue_unlock_irqrestore(&pfair.pfair_lock, flags);
376 } else if (task->time_slice) {
377 /* came back in time before deadline
378 * clip the budget to be the last subtask of a job or
379 * the new job.
380 */
381 task->rt_param.times.exec_time = get_exec_cost(task) - 1;
382 if (task->rt_param.times.exec_time == 0) {
383 pfair_prepare_next_job(task);
384 } else {
385 pfair_prepare_next_subtask(task);
386 }
387 TRACE("[%d] %d Resume of %d with %d, %d, %d\n",
388 smp_processor_id(), jiffies-rt_start_time,
389 task->pid, get_release(task)-rt_start_time,
390 get_deadline(task)-rt_start_time,
391 get_passed_quanta(task));
392
393 set_rt_flags(task, RT_F_RUNNING);
394 queue_lock_irqsave(&pfair.pfair_lock, flags);
395 sched_trace_job_release(task);
396 if (time_after_eq(jiffies, get_release(task))) {
397 pfair_add_ready(&pfair, task);
398 } else {
399 pfair_add_release(&pfair, task);
400 }
401 queue_unlock_irqrestore(&pfair.pfair_lock, flags);
402
403 } else {
404 TRACE("[%d] %d Strange release of %d with %d, %d, %d\n",
405 smp_processor_id(), jiffies-rt_start_time,
406 task->pid,
407 get_release(task), get_deadline(task),
408 get_passed_quanta(task));
409
410 queue_lock_irqsave(&pfair.pfair_lock, flags);
411 pfair_add_release(&pfair, task);
412 queue_unlock_irqrestore(&pfair.pfair_lock, flags);
413 }
414}
415
416
417static void pfair_task_blocks(struct task_struct *t)
418{
419 unsigned long flags;
420 int i;
421 cpumask_t res;
422 BUG_ON(!is_realtime(t));
423 /* If the task blocks, then it must be removed from the running set */
424 queue_lock_irqsave(&pfair.pfair_lock, flags);
425 cpus_and(res,pfair.domain_cpus, cpu_online_map);
426 for_each_cpu_mask(i, res) {
427 if (per_cpu(states, i).t == t)
428 per_cpu(states, i).t = NULL;
429 }
430 /* If the task is running and in some
431 * list it might have been released by another
432 * processor
433 */
434 if((t->rt_list.next != LIST_POISON1 ||
435 t->rt_list.prev != LIST_POISON2)) {
436 TRACE("[%d] %d task %d is deleted from the list\n",
437 smp_processor_id(),
438 jiffies-rt_start_time, t->pid);
439 list_del(&t->rt_list);
440 }
441 queue_unlock_irqrestore(&pfair.pfair_lock, flags);
442 TRACE("[%d] %d task %d blocks with budget=%d state=%d\n",
443 smp_processor_id(), jiffies-rt_start_time,
444 t->pid, t->time_slice, t->state);
445}
446
447static long pfair_tear_down(struct task_struct * t)
448{
449 BUG_ON(!is_realtime(t));
450 TRACE("pfair: tear down called for %d \n", t->pid);
451 BUG_ON(t->array);
452 BUG_ON(t->rt_list.next != LIST_POISON1);
453 BUG_ON(t->rt_list.prev != LIST_POISON2);
454 return 0;
455}
456
457static int pfair_mode_change(int new_mode)
458{
459 printk(KERN_INFO "[%d] pfair mode change %d\n",
460 smp_processor_id(), new_mode);
461 if (new_mode == MODE_RT_RUN) {
462 pfair_prepare_new_releases(&pfair, jiffies + 10);
463 }
464 printk(KERN_INFO "[%d] pfair: mode change done\n", smp_processor_id());
465 return 0;
466}
467
468/* Plugin object */
469static sched_plugin_t s_plugin __cacheline_aligned_in_smp = {
470 .ready_to_use = 0
471};
472/*
473* PFAIR plugin initialization macro.
474*/
475#define INIT_PFAIR_PLUGIN (struct sched_plugin){\
476 .plugin_name = "PFAIR",\
477 .ready_to_use = 1,\
478 .algo_scheduler_tick = pfair_scheduler_tick,\
479 .scheduler_tick = rt_scheduler_tick,\
480 .prepare_task = pfair_prepare_task,\
481 .sleep_next_period = 0,\
482 .tear_down = pfair_tear_down,\
483 .shutdown_hook = 0,\
484 .schedule = pfair_schedule,\
485 .finish_switch = pfair_finish_task_switch,\
486 .mode_change = pfair_mode_change,\
487 .wake_up_task = pfair_wake_up_task,\
488 .task_blocks = pfair_task_blocks \
489 }
490
491sched_plugin_t* __init init_pfair_plugin(void)
492{
493 int i=0;
494 if (!s_plugin.ready_to_use) {
495 set_sched_options(SCHED_NONE);
496 pfair_domain_init(&pfair);
497 for (i=0; i<NR_CPUS; i++) {
498 sync_go[i] = 0;
499 per_cpu(states, i).t = NULL;
500 }
501 s_plugin = INIT_PFAIR_PLUGIN;
502 }
503 return &s_plugin;
504}