diff options
Diffstat (limited to 'litmus/sched_edzl.c')
-rw-r--r-- | litmus/sched_edzl.c | 327 |
1 files changed, 327 insertions, 0 deletions
diff --git a/litmus/sched_edzl.c b/litmus/sched_edzl.c new file mode 100644 index 000000000000..0664b78e540b --- /dev/null +++ b/litmus/sched_edzl.c | |||
@@ -0,0 +1,327 @@ | |||
1 | /* | ||
2 | * litmus/sched_edzl.c | ||
3 | * | ||
4 | * Implementation of the EDZL scheduling algorithm. | ||
5 | * | ||
6 | * This version uses the simple approach and serializes all scheduling | ||
7 | * decisions by the use of a queue lock. This is probably not the | ||
8 | * best way to do it, but it should suffice for now. | ||
9 | */ | ||
10 | |||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <linux/sched.h> | ||
14 | |||
15 | #include <litmus/litmus.h> | ||
16 | #include <litmus/jobs.h> | ||
17 | #include <litmus/sched_global_plugin.h> | ||
18 | #include <litmus/edzl_common.h> | ||
19 | #include <litmus/sched_trace.h> | ||
20 | |||
21 | #include <litmus/preempt.h> | ||
22 | |||
23 | #include <litmus/bheap.h> | ||
24 | |||
25 | #include <linux/module.h> | ||
26 | |||
27 | static struct task_struct* __edzl_take_ready(rt_domain_t* rt); | ||
28 | static void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new); | ||
29 | static void edzl_job_arrival(struct task_struct* task); | ||
30 | static void edzl_task_new(struct task_struct * t, int on_rq, int running); | ||
31 | static void edzl_task_wake_up(struct task_struct *task); | ||
32 | static void edzl_task_exit(struct task_struct * t); | ||
33 | static int edzl_preemption_needed(struct task_struct *t); | ||
34 | |||
35 | |||
36 | /* EDZL Plugin object */ | ||
37 | static struct sched_global_plugin edzl_plugin __cacheline_aligned_in_smp = { | ||
38 | .plugin = { | ||
39 | .finish_switch = gblv_finish_switch, | ||
40 | .tick = gblv_tick, | ||
41 | .complete_job = complete_job, | ||
42 | .schedule = gblv_schedule, | ||
43 | .task_block = gblv_task_block, | ||
44 | .admit_task = gblv_admit_task, | ||
45 | .activate_plugin = gbl_activate_plugin, | ||
46 | |||
47 | .plugin_name = "EDZL", | ||
48 | .task_new = edzl_task_new, | ||
49 | .task_wake_up = edzl_task_wake_up, | ||
50 | .task_exit = edzl_task_exit, | ||
51 | }, | ||
52 | |||
53 | .job_completion = gbl_job_completion, | ||
54 | |||
55 | .prio_order = edzl_higher_prio, | ||
56 | .take_ready = __edzl_take_ready, | ||
57 | .add_ready = __edzl_add_ready, | ||
58 | .job_arrival = edzl_job_arrival, | ||
59 | .preemption_needed = edzl_preemption_needed | ||
60 | }; | ||
61 | |||
62 | |||
63 | #define active_gbl_domain (active_gbl_plugin->domain) | ||
64 | #define active_gbl_domain_lock (active_gbl_domain.ready_lock) | ||
65 | |||
66 | DEFINE_PER_CPU(cpu_entry_t, edzl_cpu_entries); | ||
67 | |||
68 | |||
69 | static enum hrtimer_restart on_zero_laxity(struct hrtimer *timer) | ||
70 | { | ||
71 | unsigned long flags; | ||
72 | struct task_struct* t; | ||
73 | |||
74 | lt_t now = litmus_clock(); | ||
75 | |||
76 | TRACE("Zero-laxity timer went off!\n"); | ||
77 | |||
78 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
79 | |||
80 | t = container_of(container_of(timer, struct rt_param, zl_timer), | ||
81 | struct task_struct, | ||
82 | rt_param); | ||
83 | |||
84 | TRACE_TASK(t, "Reached zero-laxity. (now: %llu, zl-pt: %lld, time remaining (now): %lld)\n", | ||
85 | now, | ||
86 | get_deadline(t) - budget_remaining(t), | ||
87 | get_deadline(t) - now); | ||
88 | |||
89 | set_zerolaxity(t); | ||
90 | gbl_update_queue_position(t); | ||
91 | |||
92 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
93 | |||
94 | return HRTIMER_NORESTART; | ||
95 | } | ||
96 | |||
97 | /* __edzl_take_ready - call's __take_ready with EDZL timer cancelation side-effect. */ | ||
98 | static struct task_struct* __edzl_take_ready(rt_domain_t* rt) | ||
99 | { | ||
100 | struct task_struct* t = __take_ready(rt); | ||
101 | |||
102 | if(t) | ||
103 | { | ||
104 | if(get_zerolaxity(t) == 0) | ||
105 | { | ||
106 | if(hrtimer_active(&tsk_rt(t)->zl_timer)) | ||
107 | { | ||
108 | int cancel_ret; | ||
109 | |||
110 | TRACE_TASK(t, "Canceling zero-laxity timer.\n"); | ||
111 | cancel_ret = hrtimer_try_to_cancel(&tsk_rt(t)->zl_timer); | ||
112 | WARN_ON(cancel_ret == 0); /* should never be inactive. */ | ||
113 | } | ||
114 | } | ||
115 | else | ||
116 | { | ||
117 | TRACE_TASK(t, "Task already has zero-laxity flagged.\n"); | ||
118 | } | ||
119 | } | ||
120 | |||
121 | return t; | ||
122 | } | ||
123 | |||
124 | /* __edzl_add_ready - call's __add_ready with EDZL setting timer side-effect. */ | ||
125 | static void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new) | ||
126 | { | ||
127 | __add_ready(rt, new); | ||
128 | |||
129 | if(get_zerolaxity(new) == 0) | ||
130 | { | ||
131 | lt_t when_to_fire; | ||
132 | |||
133 | when_to_fire = get_deadline(new) - budget_remaining(new); | ||
134 | |||
135 | TRACE_TASK(new, "Setting zero-laxity timer for %llu. (deadline: %llu, remaining: %llu)\n", | ||
136 | when_to_fire, | ||
137 | get_deadline(new), | ||
138 | budget_remaining(new)); | ||
139 | |||
140 | __hrtimer_start_range_ns(&tsk_rt(new)->zl_timer, | ||
141 | ns_to_ktime(when_to_fire), | ||
142 | 0, | ||
143 | HRTIMER_MODE_ABS_PINNED, | ||
144 | 0); | ||
145 | } | ||
146 | else | ||
147 | { | ||
148 | TRACE_TASK(new, "Already has zero-laxity when added to ready queue. (deadline: %llu, remaining: %llu))\n", | ||
149 | get_deadline(new), | ||
150 | budget_remaining(new)); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | |||
155 | |||
156 | /* edzl_job_arrival: task is either resumed or released */ | ||
157 | static void edzl_job_arrival(struct task_struct* task) | ||
158 | { | ||
159 | BUG_ON(!task); | ||
160 | |||
161 | /* clear old laxity flag or tag zero-laxity upon release */ | ||
162 | if(laxity_remaining(task)) | ||
163 | clear_zerolaxity(task); | ||
164 | else | ||
165 | set_zerolaxity(task); | ||
166 | |||
167 | gbl_requeue(task); | ||
168 | gbl_check_for_preemptions(); | ||
169 | } | ||
170 | |||
171 | |||
172 | /* Prepare a task for running in RT mode | ||
173 | */ | ||
174 | static void edzl_task_new(struct task_struct * t, int on_rq, int running) | ||
175 | { | ||
176 | unsigned long flags; | ||
177 | cpu_entry_t* entry; | ||
178 | |||
179 | TRACE("edzl: task new %d\n", t->pid); | ||
180 | |||
181 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
182 | |||
183 | hrtimer_init(&t->rt_param.zl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
184 | t->rt_param.zl_timer.function = on_zero_laxity; | ||
185 | |||
186 | /* setup job params */ | ||
187 | release_at(t, litmus_clock()); | ||
188 | |||
189 | if (running) { | ||
190 | entry = active_gbl_plugin->cpus[task_cpu(t)]; | ||
191 | BUG_ON(entry->scheduled); | ||
192 | |||
193 | #ifdef CONFIG_RELEASE_MASTER | ||
194 | if (entry->cpu != active_gbl_domain.release_master) { | ||
195 | #endif | ||
196 | entry->scheduled = t; | ||
197 | tsk_rt(t)->scheduled_on = task_cpu(t); | ||
198 | #ifdef CONFIG_RELEASE_MASTER | ||
199 | } else { | ||
200 | /* do not schedule on release master */ | ||
201 | gbl_preempt(entry); /* force resched */ | ||
202 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
203 | } | ||
204 | #endif | ||
205 | } else { | ||
206 | t->rt_param.scheduled_on = NO_CPU; | ||
207 | } | ||
208 | t->rt_param.linked_on = NO_CPU; | ||
209 | |||
210 | active_gbl_plugin->job_arrival(t); | ||
211 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
212 | } | ||
213 | |||
214 | |||
215 | static void edzl_task_wake_up(struct task_struct *task) | ||
216 | { | ||
217 | unsigned long flags; | ||
218 | lt_t now; | ||
219 | |||
220 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
221 | |||
222 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
223 | /* We need to take suspensions because of semaphores into | ||
224 | * account! If a job resumes after being suspended due to acquiring | ||
225 | * a semaphore, it should never be treated as a new job release. | ||
226 | */ | ||
227 | if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
228 | set_rt_flags(task, RT_F_RUNNING); | ||
229 | } else { | ||
230 | now = litmus_clock(); | ||
231 | if (is_tardy(task, now)) { | ||
232 | /* new sporadic release */ | ||
233 | release_at(task, now); | ||
234 | sched_trace_task_release(task); | ||
235 | } | ||
236 | else { | ||
237 | if (task->rt.time_slice) { | ||
238 | /* came back in time before deadline | ||
239 | */ | ||
240 | set_rt_flags(task, RT_F_RUNNING); | ||
241 | } | ||
242 | } | ||
243 | } | ||
244 | active_gbl_plugin->job_arrival(task); | ||
245 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
246 | } | ||
247 | |||
248 | |||
249 | static void edzl_task_exit(struct task_struct * t) | ||
250 | { | ||
251 | unsigned long flags; | ||
252 | |||
253 | /* unlink if necessary */ | ||
254 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
255 | gbl_unlink(t); | ||
256 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
257 | active_gbl_plugin->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
258 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
259 | } | ||
260 | |||
261 | if(hrtimer_active(&tsk_rt(t)->zl_timer)) | ||
262 | { | ||
263 | /* BUG if reached? */ | ||
264 | TRACE_TASK(t, "Canceled armed timer while exiting.\n"); | ||
265 | hrtimer_cancel(&tsk_rt(t)->zl_timer); | ||
266 | } | ||
267 | |||
268 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
269 | |||
270 | BUG_ON(!is_realtime(t)); | ||
271 | TRACE_TASK(t, "RIP\n"); | ||
272 | } | ||
273 | |||
274 | |||
275 | /* need_to_preempt - check whether the task t needs to be preempted | ||
276 | * call only with irqs disabled and with ready_lock acquired | ||
277 | * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! | ||
278 | */ | ||
279 | static int edzl_preemption_needed(struct task_struct *t) | ||
280 | { | ||
281 | /* we need the read lock for edf_ready_queue */ | ||
282 | /* no need to preempt if there is nothing pending */ | ||
283 | if (!__jobs_pending(&active_gbl_domain)) | ||
284 | return 0; | ||
285 | /* we need to reschedule if t doesn't exist */ | ||
286 | if (!t) | ||
287 | return 1; | ||
288 | /* make sure to get non-rt stuff out of the way */ | ||
289 | if (!is_realtime(t)) | ||
290 | return 1; | ||
291 | |||
292 | /* NOTE: We cannot check for non-preemptibility since we | ||
293 | * don't know what address space we're currently in. | ||
294 | */ | ||
295 | |||
296 | /* Detect zero-laxity as needed. Easier to do it here than in tick. | ||
297 | (No timer is used to detect zero-laxity while a job is running.) */ | ||
298 | if(unlikely(!get_zerolaxity(t) && laxity_remaining(t) == 0)) | ||
299 | { | ||
300 | set_zerolaxity(t); | ||
301 | } | ||
302 | |||
303 | return edzl_higher_prio(__next_ready(&active_gbl_domain), t); | ||
304 | } | ||
305 | |||
306 | |||
307 | static int __init init_edzl(void) | ||
308 | { | ||
309 | int cpu; | ||
310 | cpu_entry_t *entry; | ||
311 | |||
312 | bheap_init(&edzl_plugin.cpu_heap); | ||
313 | /* initialize CPU state */ | ||
314 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
315 | entry = &per_cpu(edzl_cpu_entries, cpu); | ||
316 | edzl_plugin.cpus[cpu] = entry; | ||
317 | entry->cpu = cpu; | ||
318 | entry->hn = &edzl_plugin.heap_node[cpu]; | ||
319 | bheap_node_init(&entry->hn, entry); | ||
320 | } | ||
321 | gbl_domain_init(&edzl_plugin, NULL, gbl_release_jobs); | ||
322 | |||
323 | return register_sched_plugin(&edzl_plugin.plugin); | ||
324 | } | ||
325 | |||
326 | |||
327 | module_init(init_edzl); | ||