diff options
author | Bjoern Brandenburg <bbb@mpi-sws.org> | 2015-08-09 07:18:55 -0400 |
---|---|---|
committer | Bjoern Brandenburg <bbb@mpi-sws.org> | 2017-05-26 17:12:40 -0400 |
commit | b410e1d8a4699e4a1c1edc0fc7d442032e1af7da (patch) | |
tree | b56f64a8eaea7ecd4cb941485fedf43b40fd2dce | |
parent | 8bc21314c60e342460e2e286217df7108b56ecde (diff) |
Add GSN-EDF scheduler plugin
GSN-EDF: fix wrong memset()
GSN-EDF: use sched_trace_last_suspension_as_completion()
GSN-EDF use inferred_sporadic_job_release_at()
GSN-EDF: include np.h
-rw-r--r-- | litmus/Makefile | 1 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 1069 |
2 files changed, 1070 insertions, 0 deletions
diff --git a/litmus/Makefile b/litmus/Makefile index 1871953d3fc6..c7bf0af79764 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -19,6 +19,7 @@ obj-y = sched_plugin.o litmus.o \ | |||
19 | binheap.o \ | 19 | binheap.o \ |
20 | ctrldev.o \ | 20 | ctrldev.o \ |
21 | uncachedev.o \ | 21 | uncachedev.o \ |
22 | sched_gsn_edf.o \ | ||
22 | sched_psn_edf.o | 23 | sched_psn_edf.o |
23 | 24 | ||
24 | 25 | ||
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c new file mode 100644 index 000000000000..980d3ae694f0 --- /dev/null +++ b/litmus/sched_gsn_edf.c | |||
@@ -0,0 +1,1069 @@ | |||
1 | /* | ||
2 | * litmus/sched_gsn_edf.c | ||
3 | * | ||
4 | * Implementation of the GSN-EDF scheduling algorithm. | ||
5 | * | ||
6 | * This version uses the simple approach and serializes all scheduling | ||
7 | * decisions by the use of a queue lock. This is probably not the | ||
8 | * best way to do it, but it should suffice for now. | ||
9 | */ | ||
10 | |||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/slab.h> | ||
15 | |||
16 | #include <litmus/litmus.h> | ||
17 | #include <litmus/jobs.h> | ||
18 | #include <litmus/sched_plugin.h> | ||
19 | #include <litmus/edf_common.h> | ||
20 | #include <litmus/sched_trace.h> | ||
21 | #include <litmus/trace.h> | ||
22 | |||
23 | #include <litmus/preempt.h> | ||
24 | #include <litmus/budget.h> | ||
25 | #include <litmus/np.h> | ||
26 | |||
27 | #include <litmus/bheap.h> | ||
28 | |||
29 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
30 | #include <litmus/affinity.h> | ||
31 | #endif | ||
32 | |||
33 | /* to set up domain/cpu mappings */ | ||
34 | #include <litmus/litmus_proc.h> | ||
35 | |||
36 | #include <linux/module.h> | ||
37 | |||
38 | /* Overview of GSN-EDF operations. | ||
39 | * | ||
40 | * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This | ||
41 | * description only covers how the individual operations are implemented in | ||
42 | * LITMUS. | ||
43 | * | ||
44 | * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage | ||
45 | * structure (NOT the actually scheduled | ||
46 | * task). If there is another linked task To | ||
47 | * already it will set To->linked_on = NO_CPU | ||
48 | * (thereby removing its association with this | ||
49 | * CPU). However, it will not requeue the | ||
50 | * previously linked task (if any). It will set | ||
51 | * T's state to not completed and check whether | ||
52 | * it is already running somewhere else. If T | ||
53 | * is scheduled somewhere else it will link | ||
54 | * it to that CPU instead (and pull the linked | ||
55 | * task to cpu). T may be NULL. | ||
56 | * | ||
57 | * unlink(T) - Unlink removes T from all scheduler data | ||
58 | * structures. If it is linked to some CPU it | ||
59 | * will link NULL to that CPU. If it is | ||
60 | * currently queued in the gsnedf queue it will | ||
61 | * be removed from the rt_domain. It is safe to | ||
62 | * call unlink(T) if T is not linked. T may not | ||
63 | * be NULL. | ||
64 | * | ||
65 | * requeue(T) - Requeue will insert T into the appropriate | ||
66 | * queue. If the system is in real-time mode and | ||
67 | * the T is released already, it will go into the | ||
68 | * ready queue. If the system is not in | ||
69 | * real-time mode is T, then T will go into the | ||
70 | * release queue. If T's release time is in the | ||
71 | * future, it will go into the release | ||
72 | * queue. That means that T's release time/job | ||
73 | * no/etc. has to be updated before requeu(T) is | ||
74 | * called. It is not safe to call requeue(T) | ||
75 | * when T is already queued. T may not be NULL. | ||
76 | * | ||
77 | * gsnedf_job_arrival(T) - This is the catch all function when T enters | ||
78 | * the system after either a suspension or at a | ||
79 | * job release. It will queue T (which means it | ||
80 | * is not safe to call gsnedf_job_arrival(T) if | ||
81 | * T is already queued) and then check whether a | ||
82 | * preemption is necessary. If a preemption is | ||
83 | * necessary it will update the linkage | ||
84 | * accordingly and cause scheduled to be called | ||
85 | * (either with an IPI or need_resched). It is | ||
86 | * safe to call gsnedf_job_arrival(T) if T's | ||
87 | * next job has not been actually released yet | ||
88 | * (releast time in the future). T will be put | ||
89 | * on the release queue in that case. | ||
90 | * | ||
91 | * curr_job_completion() - Take care of everything that needs to be done | ||
92 | * to prepare the current task for its next | ||
93 | * release and place it in the right queue with | ||
94 | * gsnedf_job_arrival(). | ||
95 | * | ||
96 | * | ||
97 | * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is | ||
98 | * equivalent to unlink(T). Note that if you unlink a task from a CPU none of | ||
99 | * the functions will automatically propagate pending task from the ready queue | ||
100 | * to a linked task. This is the job of the calling function ( by means of | ||
101 | * __take_ready). | ||
102 | */ | ||
103 | |||
104 | |||
105 | /* cpu_entry_t - maintain the linked and scheduled state | ||
106 | */ | ||
107 | typedef struct { | ||
108 | int cpu; | ||
109 | struct task_struct* linked; /* only RT tasks */ | ||
110 | struct task_struct* scheduled; /* only RT tasks */ | ||
111 | struct bheap_node* hn; | ||
112 | } cpu_entry_t; | ||
113 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | ||
114 | |||
115 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; | ||
116 | |||
117 | /* the cpus queue themselves according to priority in here */ | ||
118 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; | ||
119 | static struct bheap gsnedf_cpu_heap; | ||
120 | |||
121 | static rt_domain_t gsnedf; | ||
122 | #define gsnedf_lock (gsnedf.ready_lock) | ||
123 | |||
124 | |||
125 | /* Uncomment this if you want to see all scheduling decisions in the | ||
126 | * TRACE() log. | ||
127 | #define WANT_ALL_SCHED_EVENTS | ||
128 | */ | ||
129 | |||
130 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | ||
131 | { | ||
132 | cpu_entry_t *a, *b; | ||
133 | a = _a->value; | ||
134 | b = _b->value; | ||
135 | /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
136 | * the top of the heap. | ||
137 | */ | ||
138 | return edf_higher_prio(b->linked, a->linked); | ||
139 | } | ||
140 | |||
141 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
142 | * order in the cpu queue. Caller must hold gsnedf lock. | ||
143 | */ | ||
144 | static void update_cpu_position(cpu_entry_t *entry) | ||
145 | { | ||
146 | if (likely(bheap_node_in_heap(entry->hn))) | ||
147 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
148 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
149 | } | ||
150 | |||
151 | /* caller must hold gsnedf lock */ | ||
152 | static cpu_entry_t* lowest_prio_cpu(void) | ||
153 | { | ||
154 | struct bheap_node* hn; | ||
155 | hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap); | ||
156 | return hn->value; | ||
157 | } | ||
158 | |||
159 | |||
160 | /* link_task_to_cpu - Update the link of a CPU. | ||
161 | * Handles the case where the to-be-linked task is already | ||
162 | * scheduled on a different CPU. | ||
163 | */ | ||
164 | static noinline void link_task_to_cpu(struct task_struct* linked, | ||
165 | cpu_entry_t *entry) | ||
166 | { | ||
167 | cpu_entry_t *sched; | ||
168 | struct task_struct* tmp; | ||
169 | int on_cpu; | ||
170 | |||
171 | BUG_ON(linked && !is_realtime(linked)); | ||
172 | |||
173 | /* Currently linked task is set to be unlinked. */ | ||
174 | if (entry->linked) { | ||
175 | entry->linked->rt_param.linked_on = NO_CPU; | ||
176 | } | ||
177 | |||
178 | /* Link new task to CPU. */ | ||
179 | if (linked) { | ||
180 | /* handle task is already scheduled somewhere! */ | ||
181 | on_cpu = linked->rt_param.scheduled_on; | ||
182 | if (on_cpu != NO_CPU) { | ||
183 | sched = &per_cpu(gsnedf_cpu_entries, on_cpu); | ||
184 | /* this should only happen if not linked already */ | ||
185 | BUG_ON(sched->linked == linked); | ||
186 | |||
187 | /* If we are already scheduled on the CPU to which we | ||
188 | * wanted to link, we don't need to do the swap -- | ||
189 | * we just link ourselves to the CPU and depend on | ||
190 | * the caller to get things right. | ||
191 | */ | ||
192 | if (entry != sched) { | ||
193 | TRACE_TASK(linked, | ||
194 | "already scheduled on %d, updating link.\n", | ||
195 | sched->cpu); | ||
196 | tmp = sched->linked; | ||
197 | linked->rt_param.linked_on = sched->cpu; | ||
198 | sched->linked = linked; | ||
199 | update_cpu_position(sched); | ||
200 | linked = tmp; | ||
201 | } | ||
202 | } | ||
203 | if (linked) /* might be NULL due to swap */ | ||
204 | linked->rt_param.linked_on = entry->cpu; | ||
205 | } | ||
206 | entry->linked = linked; | ||
207 | #ifdef WANT_ALL_SCHED_EVENTS | ||
208 | if (linked) | ||
209 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
210 | else | ||
211 | TRACE("NULL linked to %d.\n", entry->cpu); | ||
212 | #endif | ||
213 | update_cpu_position(entry); | ||
214 | } | ||
215 | |||
216 | /* unlink - Make sure a task is not linked any longer to an entry | ||
217 | * where it was linked before. Must hold gsnedf_lock. | ||
218 | */ | ||
219 | static noinline void unlink(struct task_struct* t) | ||
220 | { | ||
221 | cpu_entry_t *entry; | ||
222 | |||
223 | if (t->rt_param.linked_on != NO_CPU) { | ||
224 | /* unlink */ | ||
225 | entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); | ||
226 | t->rt_param.linked_on = NO_CPU; | ||
227 | link_task_to_cpu(NULL, entry); | ||
228 | } else if (is_queued(t)) { | ||
229 | /* This is an interesting situation: t is scheduled, | ||
230 | * but was just recently unlinked. It cannot be | ||
231 | * linked anywhere else (because then it would have | ||
232 | * been relinked to this CPU), thus it must be in some | ||
233 | * queue. We must remove it from the list in this | ||
234 | * case. | ||
235 | */ | ||
236 | remove(&gsnedf, t); | ||
237 | } | ||
238 | } | ||
239 | |||
240 | |||
241 | /* preempt - force a CPU to reschedule | ||
242 | */ | ||
243 | static void preempt(cpu_entry_t *entry) | ||
244 | { | ||
245 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
246 | } | ||
247 | |||
248 | /* requeue - Put an unlinked task into gsn-edf domain. | ||
249 | * Caller must hold gsnedf_lock. | ||
250 | */ | ||
251 | static noinline void requeue(struct task_struct* task) | ||
252 | { | ||
253 | BUG_ON(!task); | ||
254 | /* sanity check before insertion */ | ||
255 | BUG_ON(is_queued(task)); | ||
256 | |||
257 | if (is_early_releasing(task) || is_released(task, litmus_clock())) | ||
258 | __add_ready(&gsnedf, task); | ||
259 | else { | ||
260 | /* it has got to wait */ | ||
261 | add_release(&gsnedf, task); | ||
262 | } | ||
263 | } | ||
264 | |||
265 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
266 | static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t *start) | ||
267 | { | ||
268 | cpu_entry_t *affinity; | ||
269 | |||
270 | get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, | ||
271 | #ifdef CONFIG_RELEASE_MASTER | ||
272 | gsnedf.release_master, | ||
273 | #else | ||
274 | NO_CPU, | ||
275 | #endif | ||
276 | cpu_online_mask); | ||
277 | |||
278 | return(affinity); | ||
279 | } | ||
280 | #endif | ||
281 | |||
282 | /* check for any necessary preemptions */ | ||
283 | static void check_for_preemptions(void) | ||
284 | { | ||
285 | struct task_struct *task; | ||
286 | cpu_entry_t *last; | ||
287 | |||
288 | |||
289 | #ifdef CONFIG_PREFER_LOCAL_LINKING | ||
290 | cpu_entry_t *local; | ||
291 | |||
292 | /* Before linking to other CPUs, check first whether the local CPU is | ||
293 | * idle. */ | ||
294 | local = this_cpu_ptr(&gsnedf_cpu_entries); | ||
295 | task = __peek_ready(&gsnedf); | ||
296 | |||
297 | if (task && !local->linked | ||
298 | #ifdef CONFIG_RELEASE_MASTER | ||
299 | && likely(local->cpu != gsnedf.release_master) | ||
300 | #endif | ||
301 | ) { | ||
302 | task = __take_ready(&gsnedf); | ||
303 | TRACE_TASK(task, "linking to local CPU %d to avoid IPI\n", local->cpu); | ||
304 | link_task_to_cpu(task, local); | ||
305 | preempt(local); | ||
306 | } | ||
307 | #endif | ||
308 | |||
309 | for (last = lowest_prio_cpu(); | ||
310 | edf_preemption_needed(&gsnedf, last->linked); | ||
311 | last = lowest_prio_cpu()) { | ||
312 | /* preemption necessary */ | ||
313 | task = __take_ready(&gsnedf); | ||
314 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
315 | task->pid, last->cpu); | ||
316 | |||
317 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
318 | { | ||
319 | cpu_entry_t *affinity = | ||
320 | gsnedf_get_nearest_available_cpu( | ||
321 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); | ||
322 | if (affinity) | ||
323 | last = affinity; | ||
324 | else if (requeue_preempted_job(last->linked)) | ||
325 | requeue(last->linked); | ||
326 | } | ||
327 | #else | ||
328 | if (requeue_preempted_job(last->linked)) | ||
329 | requeue(last->linked); | ||
330 | #endif | ||
331 | |||
332 | link_task_to_cpu(task, last); | ||
333 | preempt(last); | ||
334 | } | ||
335 | } | ||
336 | |||
337 | /* gsnedf_job_arrival: task is either resumed or released */ | ||
338 | static noinline void gsnedf_job_arrival(struct task_struct* task) | ||
339 | { | ||
340 | BUG_ON(!task); | ||
341 | |||
342 | requeue(task); | ||
343 | check_for_preemptions(); | ||
344 | } | ||
345 | |||
346 | static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
347 | { | ||
348 | unsigned long flags; | ||
349 | |||
350 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
351 | |||
352 | __merge_ready(rt, tasks); | ||
353 | check_for_preemptions(); | ||
354 | |||
355 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
356 | } | ||
357 | |||
358 | /* caller holds gsnedf_lock */ | ||
359 | static noinline void curr_job_completion(int forced) | ||
360 | { | ||
361 | struct task_struct *t = current; | ||
362 | BUG_ON(!t); | ||
363 | |||
364 | sched_trace_task_completion(t, forced); | ||
365 | |||
366 | TRACE_TASK(t, "job_completion(forced=%d).\n", forced); | ||
367 | |||
368 | /* set flags */ | ||
369 | tsk_rt(t)->completed = 0; | ||
370 | /* prepare for next period */ | ||
371 | prepare_for_next_period(t); | ||
372 | if (is_early_releasing(t) || is_released(t, litmus_clock())) | ||
373 | sched_trace_task_release(t); | ||
374 | /* unlink */ | ||
375 | unlink(t); | ||
376 | /* requeue | ||
377 | * But don't requeue a blocking task. */ | ||
378 | if (is_current_running()) | ||
379 | gsnedf_job_arrival(t); | ||
380 | } | ||
381 | |||
382 | /* Getting schedule() right is a bit tricky. schedule() may not make any | ||
383 | * assumptions on the state of the current task since it may be called for a | ||
384 | * number of reasons. The reasons include a scheduler_tick() determined that it | ||
385 | * was necessary, because sys_exit_np() was called, because some Linux | ||
386 | * subsystem determined so, or even (in the worst case) because there is a bug | ||
387 | * hidden somewhere. Thus, we must take extreme care to determine what the | ||
388 | * current state is. | ||
389 | * | ||
390 | * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
391 | * | ||
392 | * The following assertions for the scheduled task could hold: | ||
393 | * | ||
394 | * - !is_running(scheduled) // the job blocks | ||
395 | * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
396 | * - is_completed() // the job completed (by syscall) | ||
397 | * - linked != scheduled // we need to reschedule (for any reason) | ||
398 | * - is_np(scheduled) // rescheduling must be delayed, | ||
399 | * sys_exit_np must be requested | ||
400 | * | ||
401 | * Any of these can occur together. | ||
402 | */ | ||
403 | static struct task_struct* gsnedf_schedule(struct task_struct * prev) | ||
404 | { | ||
405 | cpu_entry_t* entry = this_cpu_ptr(&gsnedf_cpu_entries); | ||
406 | int out_of_time, sleep, preempt, np, exists, blocks; | ||
407 | struct task_struct* next = NULL; | ||
408 | |||
409 | #ifdef CONFIG_RELEASE_MASTER | ||
410 | /* Bail out early if we are the release master. | ||
411 | * The release master never schedules any real-time tasks. | ||
412 | */ | ||
413 | if (unlikely(gsnedf.release_master == entry->cpu)) { | ||
414 | sched_state_task_picked(); | ||
415 | return NULL; | ||
416 | } | ||
417 | #endif | ||
418 | |||
419 | raw_spin_lock(&gsnedf_lock); | ||
420 | |||
421 | /* sanity checking */ | ||
422 | BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
423 | BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
424 | BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
425 | |||
426 | /* (0) Determine state */ | ||
427 | exists = entry->scheduled != NULL; | ||
428 | blocks = exists && !is_current_running(); | ||
429 | out_of_time = exists && budget_enforced(entry->scheduled) | ||
430 | && budget_exhausted(entry->scheduled); | ||
431 | np = exists && is_np(entry->scheduled); | ||
432 | sleep = exists && is_completed(entry->scheduled); | ||
433 | preempt = entry->scheduled != entry->linked; | ||
434 | |||
435 | #ifdef WANT_ALL_SCHED_EVENTS | ||
436 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | ||
437 | #endif | ||
438 | |||
439 | if (exists) | ||
440 | TRACE_TASK(prev, | ||
441 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | ||
442 | "state:%d sig:%d\n", | ||
443 | blocks, out_of_time, np, sleep, preempt, | ||
444 | prev->state, signal_pending(prev)); | ||
445 | if (entry->linked && preempt) | ||
446 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
447 | entry->linked->comm, entry->linked->pid); | ||
448 | |||
449 | |||
450 | /* If a task blocks we have no choice but to reschedule. | ||
451 | */ | ||
452 | if (blocks) | ||
453 | unlink(entry->scheduled); | ||
454 | |||
455 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
456 | * We need to make sure to update the link structure anyway in case | ||
457 | * that we are still linked. Multiple calls to request_exit_np() don't | ||
458 | * hurt. | ||
459 | */ | ||
460 | if (np && (out_of_time || preempt || sleep)) { | ||
461 | unlink(entry->scheduled); | ||
462 | request_exit_np(entry->scheduled); | ||
463 | } | ||
464 | |||
465 | /* Any task that is preemptable and either exhausts its execution | ||
466 | * budget or wants to sleep completes. We may have to reschedule after | ||
467 | * this. Don't do a job completion if we block (can't have timers running | ||
468 | * for blocked jobs). | ||
469 | */ | ||
470 | if (!np && (out_of_time || sleep)) | ||
471 | curr_job_completion(!sleep); | ||
472 | |||
473 | /* Link pending task if we became unlinked. | ||
474 | */ | ||
475 | if (!entry->linked) | ||
476 | link_task_to_cpu(__take_ready(&gsnedf), entry); | ||
477 | |||
478 | /* The final scheduling decision. Do we need to switch for some reason? | ||
479 | * If linked is different from scheduled, then select linked as next. | ||
480 | */ | ||
481 | if ((!np || blocks) && | ||
482 | entry->linked != entry->scheduled) { | ||
483 | /* Schedule a linked job? */ | ||
484 | if (entry->linked) { | ||
485 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
486 | next = entry->linked; | ||
487 | TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id()); | ||
488 | } | ||
489 | if (entry->scheduled) { | ||
490 | /* not gonna be scheduled soon */ | ||
491 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
492 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
493 | } | ||
494 | } else | ||
495 | /* Only override Linux scheduler if we have a real-time task | ||
496 | * scheduled that needs to continue. | ||
497 | */ | ||
498 | if (exists) | ||
499 | next = prev; | ||
500 | |||
501 | sched_state_task_picked(); | ||
502 | |||
503 | raw_spin_unlock(&gsnedf_lock); | ||
504 | |||
505 | #ifdef WANT_ALL_SCHED_EVENTS | ||
506 | TRACE("gsnedf_lock released, next=0x%p\n", next); | ||
507 | |||
508 | if (next) | ||
509 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
510 | else if (exists && !next) | ||
511 | TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
512 | #endif | ||
513 | |||
514 | |||
515 | return next; | ||
516 | } | ||
517 | |||
518 | |||
519 | /* _finish_switch - we just finished the switch away from prev | ||
520 | */ | ||
521 | static void gsnedf_finish_switch(struct task_struct *prev) | ||
522 | { | ||
523 | cpu_entry_t* entry = this_cpu_ptr(&gsnedf_cpu_entries); | ||
524 | |||
525 | entry->scheduled = is_realtime(current) ? current : NULL; | ||
526 | #ifdef WANT_ALL_SCHED_EVENTS | ||
527 | TRACE_TASK(prev, "switched away from\n"); | ||
528 | #endif | ||
529 | } | ||
530 | |||
531 | |||
532 | /* Prepare a task for running in RT mode | ||
533 | */ | ||
534 | static void gsnedf_task_new(struct task_struct * t, int on_rq, int is_scheduled) | ||
535 | { | ||
536 | unsigned long flags; | ||
537 | cpu_entry_t* entry; | ||
538 | |||
539 | TRACE("gsn edf: task new %d\n", t->pid); | ||
540 | |||
541 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
542 | |||
543 | /* setup job params */ | ||
544 | release_at(t, litmus_clock()); | ||
545 | |||
546 | if (is_scheduled) { | ||
547 | entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t)); | ||
548 | BUG_ON(entry->scheduled); | ||
549 | |||
550 | #ifdef CONFIG_RELEASE_MASTER | ||
551 | if (entry->cpu != gsnedf.release_master) { | ||
552 | #endif | ||
553 | entry->scheduled = t; | ||
554 | tsk_rt(t)->scheduled_on = task_cpu(t); | ||
555 | #ifdef CONFIG_RELEASE_MASTER | ||
556 | } else { | ||
557 | /* do not schedule on release master */ | ||
558 | preempt(entry); /* force resched */ | ||
559 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
560 | } | ||
561 | #endif | ||
562 | } else { | ||
563 | t->rt_param.scheduled_on = NO_CPU; | ||
564 | } | ||
565 | t->rt_param.linked_on = NO_CPU; | ||
566 | |||
567 | if (on_rq || is_scheduled) | ||
568 | gsnedf_job_arrival(t); | ||
569 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
570 | } | ||
571 | |||
572 | static void gsnedf_task_wake_up(struct task_struct *task) | ||
573 | { | ||
574 | unsigned long flags; | ||
575 | lt_t now; | ||
576 | |||
577 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
578 | |||
579 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
580 | now = litmus_clock(); | ||
581 | if (is_sporadic(task) && is_tardy(task, now)) { | ||
582 | inferred_sporadic_job_release_at(task, now); | ||
583 | } | ||
584 | gsnedf_job_arrival(task); | ||
585 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
586 | } | ||
587 | |||
588 | static void gsnedf_task_block(struct task_struct *t) | ||
589 | { | ||
590 | unsigned long flags; | ||
591 | |||
592 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
593 | |||
594 | /* unlink if necessary */ | ||
595 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
596 | unlink(t); | ||
597 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
598 | |||
599 | BUG_ON(!is_realtime(t)); | ||
600 | } | ||
601 | |||
602 | |||
603 | static void gsnedf_task_exit(struct task_struct * t) | ||
604 | { | ||
605 | unsigned long flags; | ||
606 | |||
607 | /* unlink if necessary */ | ||
608 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
609 | unlink(t); | ||
610 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
611 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
612 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
613 | } | ||
614 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
615 | |||
616 | BUG_ON(!is_realtime(t)); | ||
617 | TRACE_TASK(t, "RIP\n"); | ||
618 | } | ||
619 | |||
620 | |||
621 | static long gsnedf_admit_task(struct task_struct* tsk) | ||
622 | { | ||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | #ifdef CONFIG_LITMUS_LOCKING | ||
627 | |||
628 | #include <litmus/fdso.h> | ||
629 | |||
630 | /* called with IRQs off */ | ||
631 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
632 | { | ||
633 | int linked_on; | ||
634 | int check_preempt = 0; | ||
635 | |||
636 | raw_spin_lock(&gsnedf_lock); | ||
637 | |||
638 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | ||
639 | tsk_rt(t)->inh_task = prio_inh; | ||
640 | |||
641 | linked_on = tsk_rt(t)->linked_on; | ||
642 | |||
643 | /* If it is scheduled, then we need to reorder the CPU heap. */ | ||
644 | if (linked_on != NO_CPU) { | ||
645 | TRACE_TASK(t, "%s: linked on %d\n", | ||
646 | __FUNCTION__, linked_on); | ||
647 | /* Holder is scheduled; need to re-order CPUs. | ||
648 | * We can't use heap_decrease() here since | ||
649 | * the cpu_heap is ordered in reverse direction, so | ||
650 | * it is actually an increase. */ | ||
651 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | ||
652 | gsnedf_cpus[linked_on]->hn); | ||
653 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | ||
654 | gsnedf_cpus[linked_on]->hn); | ||
655 | } else { | ||
656 | /* holder may be queued: first stop queue changes */ | ||
657 | raw_spin_lock(&gsnedf.release_lock); | ||
658 | if (is_queued(t)) { | ||
659 | TRACE_TASK(t, "%s: is queued\n", | ||
660 | __FUNCTION__); | ||
661 | /* We need to update the position of holder in some | ||
662 | * heap. Note that this could be a release heap if we | ||
663 | * budget enforcement is used and this job overran. */ | ||
664 | check_preempt = | ||
665 | !bheap_decrease(edf_ready_order, | ||
666 | tsk_rt(t)->heap_node); | ||
667 | } else { | ||
668 | /* Nothing to do: if it is not queued and not linked | ||
669 | * then it is either sleeping or currently being moved | ||
670 | * by other code (e.g., a timer interrupt handler) that | ||
671 | * will use the correct priority when enqueuing the | ||
672 | * task. */ | ||
673 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | ||
674 | __FUNCTION__); | ||
675 | } | ||
676 | raw_spin_unlock(&gsnedf.release_lock); | ||
677 | |||
678 | /* If holder was enqueued in a release heap, then the following | ||
679 | * preemption check is pointless, but we can't easily detect | ||
680 | * that case. If you want to fix this, then consider that | ||
681 | * simply adding a state flag requires O(n) time to update when | ||
682 | * releasing n tasks, which conflicts with the goal to have | ||
683 | * O(log n) merges. */ | ||
684 | if (check_preempt) { | ||
685 | /* heap_decrease() hit the top level of the heap: make | ||
686 | * sure preemption checks get the right task, not the | ||
687 | * potentially stale cache. */ | ||
688 | bheap_uncache_min(edf_ready_order, | ||
689 | &gsnedf.ready_queue); | ||
690 | check_for_preemptions(); | ||
691 | } | ||
692 | } | ||
693 | |||
694 | raw_spin_unlock(&gsnedf_lock); | ||
695 | } | ||
696 | |||
697 | /* called with IRQs off */ | ||
698 | static void clear_priority_inheritance(struct task_struct* t) | ||
699 | { | ||
700 | raw_spin_lock(&gsnedf_lock); | ||
701 | |||
702 | /* A job only stops inheriting a priority when it releases a | ||
703 | * resource. Thus we can make the following assumption.*/ | ||
704 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | ||
705 | |||
706 | TRACE_TASK(t, "priority restored\n"); | ||
707 | tsk_rt(t)->inh_task = NULL; | ||
708 | |||
709 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
710 | * since the priority was effectively lowered. */ | ||
711 | unlink(t); | ||
712 | gsnedf_job_arrival(t); | ||
713 | |||
714 | raw_spin_unlock(&gsnedf_lock); | ||
715 | } | ||
716 | |||
717 | |||
718 | /* ******************** FMLP support ********************** */ | ||
719 | |||
720 | /* struct for semaphore with priority inheritance */ | ||
721 | struct fmlp_semaphore { | ||
722 | struct litmus_lock litmus_lock; | ||
723 | |||
724 | /* current resource holder */ | ||
725 | struct task_struct *owner; | ||
726 | |||
727 | /* highest-priority waiter */ | ||
728 | struct task_struct *hp_waiter; | ||
729 | |||
730 | /* FIFO queue of waiting tasks */ | ||
731 | wait_queue_head_t wait; | ||
732 | }; | ||
733 | |||
734 | static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) | ||
735 | { | ||
736 | return container_of(lock, struct fmlp_semaphore, litmus_lock); | ||
737 | } | ||
738 | |||
739 | /* caller is responsible for locking */ | ||
740 | struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem, | ||
741 | struct task_struct* skip) | ||
742 | { | ||
743 | struct list_head *pos; | ||
744 | struct task_struct *queued, *found = NULL; | ||
745 | |||
746 | list_for_each(pos, &sem->wait.task_list) { | ||
747 | queued = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
748 | task_list)->private; | ||
749 | |||
750 | /* Compare task prios, find high prio task. */ | ||
751 | if (queued != skip && edf_higher_prio(queued, found)) | ||
752 | found = queued; | ||
753 | } | ||
754 | return found; | ||
755 | } | ||
756 | |||
757 | int gsnedf_fmlp_lock(struct litmus_lock* l) | ||
758 | { | ||
759 | struct task_struct* t = current; | ||
760 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
761 | wait_queue_t wait; | ||
762 | unsigned long flags; | ||
763 | |||
764 | if (!is_realtime(t)) | ||
765 | return -EPERM; | ||
766 | |||
767 | /* prevent nested lock acquisition --- not supported by FMLP */ | ||
768 | if (tsk_rt(t)->num_locks_held) | ||
769 | return -EBUSY; | ||
770 | |||
771 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
772 | |||
773 | if (sem->owner) { | ||
774 | /* resource is not free => must suspend and wait */ | ||
775 | |||
776 | init_waitqueue_entry(&wait, t); | ||
777 | |||
778 | /* FIXME: interruptible would be nice some day */ | ||
779 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
780 | |||
781 | __add_wait_queue_tail_exclusive(&sem->wait, &wait); | ||
782 | |||
783 | /* check if we need to activate priority inheritance */ | ||
784 | if (edf_higher_prio(t, sem->hp_waiter)) { | ||
785 | sem->hp_waiter = t; | ||
786 | if (edf_higher_prio(t, sem->owner)) | ||
787 | set_priority_inheritance(sem->owner, sem->hp_waiter); | ||
788 | } | ||
789 | |||
790 | TS_LOCK_SUSPEND; | ||
791 | |||
792 | /* release lock before sleeping */ | ||
793 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
794 | |||
795 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
796 | * when we wake up; we are guaranteed to have the lock since | ||
797 | * there is only one wake up per release. | ||
798 | */ | ||
799 | |||
800 | schedule(); | ||
801 | |||
802 | TS_LOCK_RESUME; | ||
803 | |||
804 | /* Since we hold the lock, no other task will change | ||
805 | * ->owner. We can thus check it without acquiring the spin | ||
806 | * lock. */ | ||
807 | BUG_ON(sem->owner != t); | ||
808 | } else { | ||
809 | /* it's ours now */ | ||
810 | sem->owner = t; | ||
811 | |||
812 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
813 | } | ||
814 | |||
815 | tsk_rt(t)->num_locks_held++; | ||
816 | |||
817 | return 0; | ||
818 | } | ||
819 | |||
820 | int gsnedf_fmlp_unlock(struct litmus_lock* l) | ||
821 | { | ||
822 | struct task_struct *t = current, *next; | ||
823 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
824 | unsigned long flags; | ||
825 | int err = 0; | ||
826 | |||
827 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
828 | |||
829 | if (sem->owner != t) { | ||
830 | err = -EINVAL; | ||
831 | goto out; | ||
832 | } | ||
833 | |||
834 | tsk_rt(t)->num_locks_held--; | ||
835 | |||
836 | /* check if there are jobs waiting for this resource */ | ||
837 | next = __waitqueue_remove_first(&sem->wait); | ||
838 | if (next) { | ||
839 | /* next becomes the resouce holder */ | ||
840 | sem->owner = next; | ||
841 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
842 | |||
843 | /* determine new hp_waiter if necessary */ | ||
844 | if (next == sem->hp_waiter) { | ||
845 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
846 | /* next has the highest priority --- it doesn't need to | ||
847 | * inherit. However, we need to make sure that the | ||
848 | * next-highest priority in the queue is reflected in | ||
849 | * hp_waiter. */ | ||
850 | sem->hp_waiter = find_hp_waiter(sem, next); | ||
851 | if (sem->hp_waiter) | ||
852 | TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n"); | ||
853 | else | ||
854 | TRACE("no further waiters\n"); | ||
855 | } else { | ||
856 | /* Well, if next is not the highest-priority waiter, | ||
857 | * then it ought to inherit the highest-priority | ||
858 | * waiter's priority. */ | ||
859 | set_priority_inheritance(next, sem->hp_waiter); | ||
860 | } | ||
861 | |||
862 | /* wake up next */ | ||
863 | wake_up_process(next); | ||
864 | } else | ||
865 | /* becomes available */ | ||
866 | sem->owner = NULL; | ||
867 | |||
868 | /* we lose the benefit of priority inheritance (if any) */ | ||
869 | if (tsk_rt(t)->inh_task) | ||
870 | clear_priority_inheritance(t); | ||
871 | |||
872 | out: | ||
873 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
874 | |||
875 | return err; | ||
876 | } | ||
877 | |||
878 | int gsnedf_fmlp_close(struct litmus_lock* l) | ||
879 | { | ||
880 | struct task_struct *t = current; | ||
881 | struct fmlp_semaphore *sem = fmlp_from_lock(l); | ||
882 | unsigned long flags; | ||
883 | |||
884 | int owner; | ||
885 | |||
886 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
887 | |||
888 | owner = sem->owner == t; | ||
889 | |||
890 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
891 | |||
892 | if (owner) | ||
893 | gsnedf_fmlp_unlock(l); | ||
894 | |||
895 | return 0; | ||
896 | } | ||
897 | |||
898 | void gsnedf_fmlp_free(struct litmus_lock* lock) | ||
899 | { | ||
900 | kfree(fmlp_from_lock(lock)); | ||
901 | } | ||
902 | |||
903 | static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { | ||
904 | .close = gsnedf_fmlp_close, | ||
905 | .lock = gsnedf_fmlp_lock, | ||
906 | .unlock = gsnedf_fmlp_unlock, | ||
907 | .deallocate = gsnedf_fmlp_free, | ||
908 | }; | ||
909 | |||
910 | static struct litmus_lock* gsnedf_new_fmlp(void) | ||
911 | { | ||
912 | struct fmlp_semaphore* sem; | ||
913 | |||
914 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
915 | if (!sem) | ||
916 | return NULL; | ||
917 | |||
918 | sem->owner = NULL; | ||
919 | sem->hp_waiter = NULL; | ||
920 | init_waitqueue_head(&sem->wait); | ||
921 | sem->litmus_lock.ops = &gsnedf_fmlp_lock_ops; | ||
922 | |||
923 | return &sem->litmus_lock; | ||
924 | } | ||
925 | |||
926 | /* **** lock constructor **** */ | ||
927 | |||
928 | |||
929 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | ||
930 | void* __user unused) | ||
931 | { | ||
932 | int err = -ENXIO; | ||
933 | |||
934 | /* GSN-EDF currently only supports the FMLP for global resources. */ | ||
935 | switch (type) { | ||
936 | |||
937 | case FMLP_SEM: | ||
938 | /* Flexible Multiprocessor Locking Protocol */ | ||
939 | *lock = gsnedf_new_fmlp(); | ||
940 | if (*lock) | ||
941 | err = 0; | ||
942 | else | ||
943 | err = -ENOMEM; | ||
944 | break; | ||
945 | |||
946 | }; | ||
947 | |||
948 | return err; | ||
949 | } | ||
950 | |||
951 | #endif | ||
952 | |||
953 | static struct domain_proc_info gsnedf_domain_proc_info; | ||
954 | static long gsnedf_get_domain_proc_info(struct domain_proc_info **ret) | ||
955 | { | ||
956 | *ret = &gsnedf_domain_proc_info; | ||
957 | return 0; | ||
958 | } | ||
959 | |||
960 | static void gsnedf_setup_domain_proc(void) | ||
961 | { | ||
962 | int i, cpu; | ||
963 | int release_master = | ||
964 | #ifdef CONFIG_RELEASE_MASTER | ||
965 | atomic_read(&release_master_cpu); | ||
966 | #else | ||
967 | NO_CPU; | ||
968 | #endif | ||
969 | int num_rt_cpus = num_online_cpus() - (release_master != NO_CPU); | ||
970 | struct cd_mapping *map; | ||
971 | |||
972 | memset(&gsnedf_domain_proc_info, 0, sizeof(gsnedf_domain_proc_info)); | ||
973 | init_domain_proc_info(&gsnedf_domain_proc_info, num_rt_cpus, 1); | ||
974 | gsnedf_domain_proc_info.num_cpus = num_rt_cpus; | ||
975 | gsnedf_domain_proc_info.num_domains = 1; | ||
976 | |||
977 | gsnedf_domain_proc_info.domain_to_cpus[0].id = 0; | ||
978 | for (cpu = 0, i = 0; cpu < num_online_cpus(); ++cpu) { | ||
979 | if (cpu == release_master) | ||
980 | continue; | ||
981 | map = &gsnedf_domain_proc_info.cpu_to_domains[i]; | ||
982 | map->id = cpu; | ||
983 | cpumask_set_cpu(0, map->mask); | ||
984 | ++i; | ||
985 | |||
986 | /* add cpu to the domain */ | ||
987 | cpumask_set_cpu(cpu, | ||
988 | gsnedf_domain_proc_info.domain_to_cpus[0].mask); | ||
989 | } | ||
990 | } | ||
991 | |||
992 | static long gsnedf_activate_plugin(void) | ||
993 | { | ||
994 | int cpu; | ||
995 | cpu_entry_t *entry; | ||
996 | |||
997 | bheap_init(&gsnedf_cpu_heap); | ||
998 | #ifdef CONFIG_RELEASE_MASTER | ||
999 | gsnedf.release_master = atomic_read(&release_master_cpu); | ||
1000 | #endif | ||
1001 | |||
1002 | for_each_online_cpu(cpu) { | ||
1003 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | ||
1004 | bheap_node_init(&entry->hn, entry); | ||
1005 | entry->linked = NULL; | ||
1006 | entry->scheduled = NULL; | ||
1007 | #ifdef CONFIG_RELEASE_MASTER | ||
1008 | if (cpu != gsnedf.release_master) { | ||
1009 | #endif | ||
1010 | TRACE("GSN-EDF: Initializing CPU #%d.\n", cpu); | ||
1011 | update_cpu_position(entry); | ||
1012 | #ifdef CONFIG_RELEASE_MASTER | ||
1013 | } else { | ||
1014 | TRACE("GSN-EDF: CPU %d is release master.\n", cpu); | ||
1015 | } | ||
1016 | #endif | ||
1017 | } | ||
1018 | |||
1019 | gsnedf_setup_domain_proc(); | ||
1020 | |||
1021 | return 0; | ||
1022 | } | ||
1023 | |||
1024 | static long gsnedf_deactivate_plugin(void) | ||
1025 | { | ||
1026 | destroy_domain_proc_info(&gsnedf_domain_proc_info); | ||
1027 | return 0; | ||
1028 | } | ||
1029 | |||
1030 | /* Plugin object */ | ||
1031 | static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | ||
1032 | .plugin_name = "GSN-EDF", | ||
1033 | .finish_switch = gsnedf_finish_switch, | ||
1034 | .task_new = gsnedf_task_new, | ||
1035 | .complete_job = complete_job, | ||
1036 | .task_exit = gsnedf_task_exit, | ||
1037 | .schedule = gsnedf_schedule, | ||
1038 | .task_wake_up = gsnedf_task_wake_up, | ||
1039 | .task_block = gsnedf_task_block, | ||
1040 | .admit_task = gsnedf_admit_task, | ||
1041 | .activate_plugin = gsnedf_activate_plugin, | ||
1042 | .deactivate_plugin = gsnedf_deactivate_plugin, | ||
1043 | .get_domain_proc_info = gsnedf_get_domain_proc_info, | ||
1044 | #ifdef CONFIG_LITMUS_LOCKING | ||
1045 | .allocate_lock = gsnedf_allocate_lock, | ||
1046 | #endif | ||
1047 | }; | ||
1048 | |||
1049 | |||
1050 | static int __init init_gsn_edf(void) | ||
1051 | { | ||
1052 | int cpu; | ||
1053 | cpu_entry_t *entry; | ||
1054 | |||
1055 | bheap_init(&gsnedf_cpu_heap); | ||
1056 | /* initialize CPU state */ | ||
1057 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
1058 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | ||
1059 | gsnedf_cpus[cpu] = entry; | ||
1060 | entry->cpu = cpu; | ||
1061 | entry->hn = &gsnedf_heap_node[cpu]; | ||
1062 | bheap_node_init(&entry->hn, entry); | ||
1063 | } | ||
1064 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | ||
1065 | return register_sched_plugin(&gsn_edf_plugin); | ||
1066 | } | ||
1067 | |||
1068 | |||
1069 | module_init(init_gsn_edf); | ||