aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-04-15 15:58:12 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-04-15 15:58:12 -0400
commit911e93a62a050e68d1e79f0f11edcdd68dbfc959 (patch)
tree48b25f2eb06b38e9ede13b7dfda16e7fdb7b6fe5
parent7d791b6850ed52766ac1ed1adad2d03c34d3dc3b (diff)
Update comments
-rw-r--r--litmus/sched_cedf.c82
1 files changed, 15 insertions, 67 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index dace343a62d9..db543f179838 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -3,6 +3,17 @@
3 * 3 *
4 * Implementation of the C-EDF scheduling algorithm. 4 * Implementation of the C-EDF scheduling algorithm.
5 * 5 *
6 * This implementation is based on G-EDF:
7 * - CPUs are clustered around L2 or L3 caches.
8 * - Clusters topology is automatically detected (this is arch dependent
9 * and is working only on x86 at the moment --- and only with modern
10 * cpus that exports cpuid4 information)
11 * - The plugins _does not_ attempt to put tasks in the right cluster i.e.
12 * the programmer needs to be aware of the topology to place tasks
13 * in the desired cluster
14 *
15 * For details on functions, take a look at sched_gsn_edf.c
16 *
6 * This version uses the simple approach and serializes all scheduling 17 * This version uses the simple approach and serializes all scheduling
7 * decisions by the use of a queue lock. This is probably not the 18 * decisions by the use of a queue lock. This is probably not the
8 * best way to do it, but it should suffice for now. 19 * best way to do it, but it should suffice for now.
@@ -22,76 +33,13 @@
22 33
23#include <linux/module.h> 34#include <linux/module.h>
24 35
25/* Overview of C-EDF operations.
26 *
27 * For a detailed explanation of C-EDF have a look at the FMLP paper. This
28 * description only covers how the individual operations are implemented in
29 * LITMUS.
30 *
31 * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage
32 * structure (NOT the actually scheduled
33 * task). If there is another linked task To
34 * already it will set To->linked_on = NO_CPU
35 * (thereby removing its association with this
36 * CPU). However, it will not requeue the
37 * previously linked task (if any). It will set
38 * T's state to RT_F_RUNNING and check whether
39 * it is already running somewhere else. If T
40 * is scheduled somewhere else it will link
41 * it to that CPU instead (and pull the linked
42 * task to cpu). T may be NULL.
43 *
44 * unlink(T) - Unlink removes T from all scheduler data
45 * structures. If it is linked to some CPU it
46 * will link NULL to that CPU. If it is
47 * currently queued in the cedf queue it will
48 * be removed from the rt_domain. It is safe to
49 * call unlink(T) if T is not linked. T may not
50 * be NULL.
51 *
52 * requeue(T) - Requeue will insert T into the appropriate
53 * queue. If the system is in real-time mode and
54 * the T is released already, it will go into the
55 * ready queue. If the system is not in
56 * real-time mode is T, then T will go into the
57 * release queue. If T's release time is in the
58 * future, it will go into the release
59 * queue. That means that T's release time/job
60 * no/etc. has to be updated before requeu(T) is
61 * called. It is not safe to call requeue(T)
62 * when T is already queued. T may not be NULL.
63 *
64 * cedf_job_arrival(T) - This is the catch all function when T enters
65 * the system after either a suspension or at a
66 * job release. It will queue T (which means it
67 * is not safe to call cedf_job_arrival(T) if
68 * T is already queued) and then check whether a
69 * preemption is necessary. If a preemption is
70 * necessary it will update the linkage
71 * accordingly and cause scheduled to be called
72 * (either with an IPI or need_resched). It is
73 * safe to call cedf_job_arrival(T) if T's
74 * next job has not been actually released yet
75 * (releast time in the future). T will be put
76 * on the release queue in that case.
77 *
78 * job_completion(T) - Take care of everything that needs to be done
79 * to prepare T for its next release and place
80 * it in the right queue with
81 * cedf_job_arrival().
82 *
83 *
84 * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is
85 * equivalent to unlink(T). Note that if you unlink a task from a CPU none of
86 * the functions will automatically propagate pending task from the ready queue
87 * to a linked task. This is the job of the calling function ( by means of
88 * __take_ready).
89 */
90
91/* forward declaration... a funny thing with C ;) */ 36/* forward declaration... a funny thing with C ;) */
92struct clusterdomain; 37struct clusterdomain;
93 38
94/* cpu_entry_t - maintain the linked and scheduled state 39/* cpu_entry_t - maintain the linked and scheduled state
40 *
41 * A cpu also contains a pointer to the cedf_domain_t cluster
42 * that owns it (struct clusterdomain*)
95 */ 43 */
96typedef struct { 44typedef struct {
97 int cpu; 45 int cpu;
@@ -131,7 +79,7 @@ typedef struct clusterdomain {
131#define lock domain.ready_lock 79#define lock domain.ready_lock
132} cedf_domain_t; 80} cedf_domain_t;
133 81
134/* a cedf_domain per cluster */ 82/* a cedf_domain per cluster; allocation is done at init/activation time */
135cedf_domain_t *cedf; 83cedf_domain_t *cedf;
136 84
137#define remote_cluster(cpu) ((cedf_domain_t *) per_cpu(cedf_cpu_entries, cpu).cluster) 85#define remote_cluster(cpu) ((cedf_domain_t *) per_cpu(cedf_cpu_entries, cpu).cluster)