diff options
| -rw-r--r-- | include/litmus/sched_global_plugin.h | 99 | ||||
| -rw-r--r-- | include/litmus/sched_plugin.h | 2 | ||||
| -rw-r--r-- | litmus/Makefile | 1 | ||||
| -rw-r--r-- | litmus/litmus.c | 2 | ||||
| -rw-r--r-- | litmus/sched_cedf.c | 2 | ||||
| -rw-r--r-- | litmus/sched_global_plugin.c | 675 | ||||
| -rw-r--r-- | litmus/sched_gsn_edf.c | 688 | ||||
| -rw-r--r-- | litmus/sched_pfair.c | 2 | ||||
| -rw-r--r-- | litmus/sched_plugin.c | 2 |
9 files changed, 833 insertions, 640 deletions
diff --git a/include/litmus/sched_global_plugin.h b/include/litmus/sched_global_plugin.h new file mode 100644 index 000000000000..cac2de63a3ee --- /dev/null +++ b/include/litmus/sched_global_plugin.h | |||
| @@ -0,0 +1,99 @@ | |||
| 1 | #ifndef _LITMUS_SCHED_GLOBAL_PLUGIN_H_ | ||
| 2 | |||
| 3 | |||
| 4 | #include <linux/sched.h> | ||
| 5 | |||
| 6 | #include <litmus/litmus.h> | ||
| 7 | #include <litmus/sched_plugin.h> | ||
| 8 | #include <litmus/rt_domain.h> | ||
| 9 | #include <litmus/bheap.h> | ||
| 10 | |||
| 11 | /* cpu_entry_t - maintain the linked and scheduled state | ||
| 12 | */ | ||
| 13 | typedef struct { | ||
| 14 | int cpu; | ||
| 15 | struct task_struct* linked; /* only RT tasks */ | ||
| 16 | struct task_struct* scheduled; /* only RT tasks */ | ||
| 17 | struct bheap_node* hn; | ||
| 18 | } cpu_entry_t; | ||
| 19 | |||
| 20 | |||
| 21 | /* | ||
| 22 | For use by G-EDF family of plugins (G-EDF, EDZL, etc.). | ||
| 23 | These plugins differ mainly by actual rt_domain_t and | ||
| 24 | priority-order comparator functions and a few hooks for | ||
| 25 | timers. | ||
| 26 | */ | ||
| 27 | typedef int (*prio_compare_t)(struct task_struct*, struct task_struct*); | ||
| 28 | typedef struct task_struct* (*take_ready_t)(rt_domain_t* rt); | ||
| 29 | typedef void (*add_ready_t)(rt_domain_t* rt, struct task_struct *new); | ||
| 30 | typedef void (*job_arrival_t)(struct task_struct* task); | ||
| 31 | typedef void (*job_completion_t)(struct task_struct *t, int forced); | ||
| 32 | |||
| 33 | |||
| 34 | struct sched_global_plugin { | ||
| 35 | |||
| 36 | struct sched_plugin plugin; | ||
| 37 | |||
| 38 | /* function pointers MUST be set by plugin */ | ||
| 39 | prio_compare_t prio_order; | ||
| 40 | take_ready_t take_ready; | ||
| 41 | add_ready_t add_ready; | ||
| 42 | job_arrival_t job_arrival; | ||
| 43 | job_completion_t job_completion; | ||
| 44 | |||
| 45 | rt_domain_t domain; | ||
| 46 | |||
| 47 | cpu_entry_t* cpus[NR_CPUS]; | ||
| 48 | struct bheap_node heap_node[NR_CPUS]; | ||
| 49 | struct bheap cpu_heap; | ||
| 50 | |||
| 51 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
| 52 | |||
| 53 | |||
| 54 | extern struct sched_global_plugin* active_gbl_plugin; | ||
| 55 | |||
| 56 | |||
| 57 | /* | ||
| 58 | * "Member" functions for generic global scheduling. | ||
| 59 | * Will call down into "virtual" functions as needed. | ||
| 60 | * | ||
| 61 | * Use prefix "gbl_" (global) | ||
| 62 | */ | ||
| 63 | int gbl_preemption_needed(struct task_struct *t); | ||
| 64 | int gbl_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
| 65 | int gbl_cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b); | ||
| 66 | void gbl_update_cpu_position(cpu_entry_t *entry); | ||
| 67 | cpu_entry_t* gbl_lowest_prio_cpu(void); | ||
| 68 | void gbl_link_task_to_cpu(struct task_struct* linked, cpu_entry_t *entry); | ||
| 69 | void gbl_unlink(struct task_struct* t); | ||
| 70 | void gbl_preempt(cpu_entry_t *entry); | ||
| 71 | void gbl_requeue(struct task_struct* task); | ||
| 72 | void gbl_check_for_preemptions(void); | ||
| 73 | void gbl_release_jobs(rt_domain_t* rt, struct bheap* tasks); | ||
| 74 | void gbl_job_completion(struct task_struct *t, int forced); | ||
| 75 | |||
| 76 | /* Think of these two functions as "static" member functions */ | ||
| 77 | void gbl_domain_init(struct sched_global_plugin* gbl_plugin, | ||
| 78 | check_resched_needed_t resched, | ||
| 79 | release_jobs_t release); | ||
| 80 | long gbl_activate_plugin(void* plugin); | ||
| 81 | |||
| 82 | |||
| 83 | /* | ||
| 84 | * "Virtual member" functions for generic global scheduling. | ||
| 85 | * For use with sched_plugin or sched_global_plugin. | ||
| 86 | * | ||
| 87 | * Use prefix "gblv_" (global virtual) | ||
| 88 | */ | ||
| 89 | void gblv_job_arrival(struct task_struct* task); | ||
| 90 | void gblv_tick(struct task_struct* t); | ||
| 91 | struct task_struct* gblv_schedule(struct task_struct * prev); | ||
| 92 | void gblv_finish_switch(struct task_struct *prev); | ||
| 93 | void gblv_task_new(struct task_struct * t, int on_rq, int running); | ||
| 94 | void gblv_task_wake_up(struct task_struct *task); | ||
| 95 | void gblv_task_block(struct task_struct *t); | ||
| 96 | void gblv_task_exit(struct task_struct * t); | ||
| 97 | long gblv_admit_task(struct task_struct* tsk); | ||
| 98 | |||
| 99 | #endif \ No newline at end of file | ||
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 2d856d587041..6899816ea321 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
| @@ -23,7 +23,7 @@ struct pi_semaphore { | |||
| 23 | 23 | ||
| 24 | /************************ setup/tear down ********************/ | 24 | /************************ setup/tear down ********************/ |
| 25 | 25 | ||
| 26 | typedef long (*activate_plugin_t) (void); | 26 | typedef long (*activate_plugin_t) (void* plugin); |
| 27 | typedef long (*deactivate_plugin_t) (void); | 27 | typedef long (*deactivate_plugin_t) (void); |
| 28 | 28 | ||
| 29 | 29 | ||
diff --git a/litmus/Makefile b/litmus/Makefile index b7366b530749..820deb7f2263 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
| @@ -15,6 +15,7 @@ obj-y = sched_plugin.o litmus.o \ | |||
| 15 | fmlp.o \ | 15 | fmlp.o \ |
| 16 | bheap.o \ | 16 | bheap.o \ |
| 17 | ctrldev.o \ | 17 | ctrldev.o \ |
| 18 | sched_global_plugin.o \ | ||
| 18 | sched_gsn_edf.o \ | 19 | sched_gsn_edf.o \ |
| 19 | sched_psn_edf.o | 20 | sched_psn_edf.o |
| 20 | 21 | ||
diff --git a/litmus/litmus.c b/litmus/litmus.c index 8efd3f9ef7ee..744880c90eb5 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
| @@ -416,7 +416,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
| 416 | ret = litmus->deactivate_plugin(); | 416 | ret = litmus->deactivate_plugin(); |
| 417 | if (0 != ret) | 417 | if (0 != ret) |
| 418 | goto out; | 418 | goto out; |
| 419 | ret = plugin->activate_plugin(); | 419 | ret = plugin->activate_plugin(plugin); |
| 420 | if (0 != ret) { | 420 | if (0 != ret) { |
| 421 | printk(KERN_INFO "Can't activate %s (%d).\n", | 421 | printk(KERN_INFO "Can't activate %s (%d).\n", |
| 422 | plugin->plugin_name, ret); | 422 | plugin->plugin_name, ret); |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 098a449c2490..0b88d4713602 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
| @@ -641,7 +641,7 @@ static void cleanup_cedf(void) | |||
| 641 | } | 641 | } |
| 642 | } | 642 | } |
| 643 | 643 | ||
| 644 | static long cedf_activate_plugin(void) | 644 | static long cedf_activate_plugin(void* plugin) |
| 645 | { | 645 | { |
| 646 | int i, j, cpu, ccpu, cpu_count; | 646 | int i, j, cpu, ccpu, cpu_count; |
| 647 | cpu_entry_t *entry; | 647 | cpu_entry_t *entry; |
diff --git a/litmus/sched_global_plugin.c b/litmus/sched_global_plugin.c new file mode 100644 index 000000000000..22dffa7d62fc --- /dev/null +++ b/litmus/sched_global_plugin.c | |||
| @@ -0,0 +1,675 @@ | |||
| 1 | /* | ||
| 2 | * litmus/sched_global_plugin.c | ||
| 3 | * | ||
| 4 | * Implementation of the basic operations and architecture needed by | ||
| 5 | * G-EDF/G-FIFO/EDZL/AEDZL global schedulers. | ||
| 6 | * | ||
| 7 | * This version uses the simple approach and serializes all scheduling | ||
| 8 | * decisions by the use of a queue lock. This is probably not the | ||
| 9 | * best way to do it, but it should suffice for now. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/spinlock.h> | ||
| 13 | #include <linux/percpu.h> | ||
| 14 | #include <linux/sched.h> | ||
| 15 | |||
| 16 | #include <litmus/litmus.h> | ||
| 17 | #include <litmus/jobs.h> | ||
| 18 | #include <litmus/sched_global_plugin.h> | ||
| 19 | #include <litmus/sched_trace.h> | ||
| 20 | |||
| 21 | #include <litmus/preempt.h> | ||
| 22 | |||
| 23 | #include <linux/module.h> | ||
| 24 | |||
| 25 | |||
| 26 | /* Overview of Global operations. | ||
| 27 | * | ||
| 28 | * gbl_link_task_to_cpu(T, cpu) - Low-level operation to update the linkage | ||
| 29 | * structure (NOT the actually scheduled | ||
| 30 | * task). If there is another linked task To | ||
| 31 | * already it will set To->linked_on = NO_CPU | ||
| 32 | * (thereby removing its association with this | ||
| 33 | * CPU). However, it will not requeue the | ||
| 34 | * previously linked task (if any). It will set | ||
| 35 | * T's state to RT_F_RUNNING and check whether | ||
| 36 | * it is already running somewhere else. If T | ||
| 37 | * is scheduled somewhere else it will link | ||
| 38 | * it to that CPU instead (and pull the linked | ||
| 39 | * task to cpu). T may be NULL. | ||
| 40 | * | ||
| 41 | * gbl_unlink(T) - Unlink removes T from all scheduler data | ||
| 42 | * structures. If it is linked to some CPU it | ||
| 43 | * will link NULL to that CPU. If it is | ||
| 44 | * currently queued in the gsnedf queue it will | ||
| 45 | * be removed from the rt_domain. It is safe to | ||
| 46 | * call gbl_unlink(T) if T is not linked. T may not | ||
| 47 | * be NULL. | ||
| 48 | * | ||
| 49 | * gbl_requeue(T) - Requeue will insert T into the appropriate | ||
| 50 | * queue. If the system is in real-time mode and | ||
| 51 | * the T is released already, it will go into the | ||
| 52 | * ready queue. If the system is not in | ||
| 53 | * real-time mode is T, then T will go into the | ||
| 54 | * release queue. If T's release time is in the | ||
| 55 | * future, it will go into the release | ||
| 56 | * queue. That means that T's release time/job | ||
| 57 | * no/etc. has to be updated before requeu(T) is | ||
| 58 | * called. It is not safe to call gbl_requeue(T) | ||
| 59 | * when T is already queued. T may not be NULL. | ||
| 60 | * | ||
| 61 | * job_arrival(T) - This is the catch all function when T enters | ||
| 62 | * the system after either a suspension or at a | ||
| 63 | * job release. It will queue T (which means it | ||
| 64 | * is not safe to call job_arrival(T) if | ||
| 65 | * T is already queued) and then check whether a | ||
| 66 | * preemption is necessary. If a preemption is | ||
| 67 | * necessary it will update the linkage | ||
| 68 | * accordingly and cause scheduled to be called | ||
| 69 | * (either with an IPI or need_resched). It is | ||
| 70 | * safe to call job_arrival(T) if T's | ||
| 71 | * next job has not been actually released yet | ||
| 72 | * (releast time in the future). T will be put | ||
| 73 | * on the release queue in that case. | ||
| 74 | * | ||
| 75 | * job_completion(T) - Take care of everything that needs to be done | ||
| 76 | * to prepare T for its next release and place | ||
| 77 | * it in the right queue with | ||
| 78 | * job_arrival(). | ||
| 79 | * | ||
| 80 | * | ||
| 81 | * When we now that T is linked to CPU then gbl_link_task_to_cpu(NULL, CPU) is | ||
| 82 | * equivalent to gbl_unlink(T). Note that if you unlink a task from a CPU none of | ||
| 83 | * the functions will automatically propagate pending task from the ready queue | ||
| 84 | * to a linked task. This is the job of the calling function (by means of | ||
| 85 | * __take_ready). | ||
| 86 | */ | ||
| 87 | |||
| 88 | /* Uncomment this if you want to see all scheduling decisions in the | ||
| 89 | * TRACE() log. | ||
| 90 | #define WANT_ALL_SCHED_EVENTS | ||
| 91 | */ | ||
| 92 | |||
| 93 | |||
| 94 | /* Macros to access the current active global plugin. These are | ||
| 95 | * a lot like C++'s 'this' pointer. | ||
| 96 | */ | ||
| 97 | struct sched_global_plugin* active_gbl_plugin; | ||
| 98 | #define active_gbl_domain (active_gbl_plugin->domain) | ||
| 99 | #define active_gbl_domain_lock (active_gbl_domain.ready_lock) | ||
| 100 | |||
| 101 | |||
| 102 | /*********************************************************************/ | ||
| 103 | /* "Member" functions for both sched_plugin and sched_global_plugin. */ | ||
| 104 | /* NOTE: These will automatically call down into "virtual" functions.*/ | ||
| 105 | /*********************************************************************/ | ||
| 106 | |||
| 107 | /* Priority-related functions */ | ||
| 108 | int gbl_preemption_needed(struct task_struct *t) | ||
| 109 | { | ||
| 110 | /* we need the read lock for active_gbl_domain's ready_queue */ | ||
| 111 | /* no need to preempt if there is nothing pending */ | ||
| 112 | if (!__jobs_pending(&active_gbl_domain)) | ||
| 113 | return 0; | ||
| 114 | /* we need to reschedule if t doesn't exist */ | ||
| 115 | if (!t) | ||
| 116 | return 1; | ||
| 117 | |||
| 118 | /* NOTE: We cannot check for non-preemptibility since we | ||
| 119 | * don't know what address space we're currently in. | ||
| 120 | */ | ||
| 121 | |||
| 122 | /* make sure to get non-rt stuff out of the way */ | ||
| 123 | return !is_realtime(t) || active_gbl_plugin->prio_order(__next_ready(&active_gbl_domain), t); | ||
| 124 | } | ||
| 125 | |||
| 126 | int gbl_ready_order(struct bheap_node* a, struct bheap_node* b) | ||
| 127 | { | ||
| 128 | return active_gbl_plugin->prio_order(bheap2task(a), bheap2task(b)); | ||
| 129 | } | ||
| 130 | |||
| 131 | |||
| 132 | |||
| 133 | int gbl_cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | ||
| 134 | { | ||
| 135 | cpu_entry_t *a, *b; | ||
| 136 | a = _a->value; | ||
| 137 | b = _b->value; | ||
| 138 | |||
| 139 | /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
| 140 | * the top of the heap. | ||
| 141 | */ | ||
| 142 | return active_gbl_plugin->prio_order(b->linked, a->linked); | ||
| 143 | } | ||
| 144 | |||
| 145 | /* gbl_update_cpu_position - Move the cpu entry to the correct place to maintain | ||
| 146 | * order in the cpu queue. Caller must hold gbl_domain_lock. | ||
| 147 | */ | ||
| 148 | void gbl_update_cpu_position(cpu_entry_t *entry) | ||
| 149 | { | ||
| 150 | if (likely(bheap_node_in_heap(entry->hn))) | ||
| 151 | bheap_delete(gbl_cpu_lower_prio, &active_gbl_plugin->cpu_heap, entry->hn); | ||
| 152 | bheap_insert(gbl_cpu_lower_prio, &active_gbl_plugin->cpu_heap, entry->hn); | ||
| 153 | } | ||
| 154 | |||
| 155 | /* caller must hold gsnedf lock */ | ||
| 156 | cpu_entry_t* lowest_prio_cpu(void) | ||
| 157 | { | ||
| 158 | struct bheap_node* hn; | ||
| 159 | hn = bheap_peek(gbl_cpu_lower_prio, &active_gbl_plugin->cpu_heap); | ||
| 160 | return hn->value; | ||
| 161 | } | ||
| 162 | |||
| 163 | |||
| 164 | /* link_task_to_cpu - Update the link of a CPU. | ||
| 165 | * Handles the case where the to-be-linked task is already | ||
| 166 | * scheduled on a different CPU. | ||
| 167 | */ | ||
| 168 | void gbl_link_task_to_cpu(struct task_struct* linked, | ||
| 169 | cpu_entry_t *entry) | ||
| 170 | { | ||
| 171 | cpu_entry_t *sched; | ||
| 172 | struct task_struct* tmp; | ||
| 173 | int on_cpu; | ||
| 174 | |||
| 175 | BUG_ON(linked && !is_realtime(linked)); | ||
| 176 | |||
| 177 | /* Currently linked task is set to be unlinked. */ | ||
| 178 | if (entry->linked) { | ||
| 179 | entry->linked->rt_param.linked_on = NO_CPU; | ||
| 180 | } | ||
| 181 | |||
| 182 | /* Link new task to CPU. */ | ||
| 183 | if (linked) { | ||
| 184 | set_rt_flags(linked, RT_F_RUNNING); | ||
| 185 | /* handle task is already scheduled somewhere! */ | ||
| 186 | on_cpu = linked->rt_param.scheduled_on; | ||
| 187 | if (on_cpu != NO_CPU) { | ||
| 188 | sched = active_gbl_plugin->cpus[on_cpu]; | ||
| 189 | /* this should only happen if not linked already */ | ||
| 190 | BUG_ON(sched->linked == linked); | ||
| 191 | |||
| 192 | /* If we are already scheduled on the CPU to which we | ||
| 193 | * wanted to link, we don't need to do the swap -- | ||
| 194 | * we just link ourselves to the CPU and depend on | ||
| 195 | * the caller to get things right. | ||
| 196 | */ | ||
| 197 | if (entry != sched) { | ||
| 198 | TRACE_TASK(linked, | ||
| 199 | "already scheduled on %d, updating link.\n", | ||
| 200 | sched->cpu); | ||
| 201 | tmp = sched->linked; | ||
| 202 | linked->rt_param.linked_on = sched->cpu; | ||
| 203 | sched->linked = linked; | ||
| 204 | gbl_update_cpu_position(sched); | ||
| 205 | linked = tmp; | ||
| 206 | } | ||
| 207 | } | ||
| 208 | if (linked) /* might be NULL due to swap */ | ||
| 209 | linked->rt_param.linked_on = entry->cpu; | ||
| 210 | } | ||
| 211 | entry->linked = linked; | ||
| 212 | #ifdef WANT_ALL_SCHED_EVENTS | ||
| 213 | if (linked) | ||
| 214 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
| 215 | else | ||
| 216 | TRACE("NULL linked to %d.\n", entry->cpu); | ||
| 217 | #endif | ||
| 218 | gbl_update_cpu_position(entry); | ||
| 219 | } | ||
| 220 | |||
| 221 | /* unlink - Make sure a task is not linked any longer to an entry | ||
| 222 | * where it was linked before. Must hold | ||
| 223 | * active_gbl_domain_lock. | ||
| 224 | */ | ||
| 225 | void gbl_unlink(struct task_struct* t) | ||
| 226 | { | ||
| 227 | cpu_entry_t *entry; | ||
| 228 | |||
| 229 | if (t->rt_param.linked_on != NO_CPU) { | ||
| 230 | /* unlink */ | ||
| 231 | entry = active_gbl_plugin->cpus[t->rt_param.linked_on]; | ||
| 232 | t->rt_param.linked_on = NO_CPU; | ||
| 233 | gbl_link_task_to_cpu(NULL, entry); | ||
| 234 | } else if (is_queued(t)) { | ||
| 235 | /* This is an interesting situation: t is scheduled, | ||
| 236 | * but was just recently unlinked. It cannot be | ||
| 237 | * linked anywhere else (because then it would have | ||
| 238 | * been relinked to this CPU), thus it must be in some | ||
| 239 | * queue. We must remove it from the list in this | ||
| 240 | * case. | ||
| 241 | */ | ||
| 242 | remove(&active_gbl_domain, t); | ||
| 243 | } | ||
| 244 | } | ||
| 245 | |||
| 246 | |||
| 247 | /* preempt - force a CPU to reschedule | ||
| 248 | */ | ||
| 249 | void gbl_preempt(cpu_entry_t *entry) | ||
| 250 | { | ||
| 251 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
| 252 | } | ||
| 253 | |||
| 254 | /* requeue - Put an unlinked task into global domain. | ||
| 255 | * Caller must hold active_gbl_domain. | ||
| 256 | */ | ||
| 257 | void gbl_requeue(struct task_struct* task) | ||
| 258 | { | ||
| 259 | BUG_ON(!task); | ||
| 260 | /* sanity check before insertion */ | ||
| 261 | BUG_ON(is_queued(task)); | ||
| 262 | |||
| 263 | if (is_released(task, litmus_clock())) | ||
| 264 | active_gbl_plugin->add_ready(&active_gbl_domain, task); | ||
| 265 | else { | ||
| 266 | /* it has got to wait */ | ||
| 267 | add_release(&active_gbl_domain, task); | ||
| 268 | } | ||
| 269 | } | ||
| 270 | |||
| 271 | |||
| 272 | /* check for any necessary preemptions */ | ||
| 273 | void gbl_check_for_preemptions(void) | ||
| 274 | { | ||
| 275 | struct task_struct *task; | ||
| 276 | cpu_entry_t* last; | ||
| 277 | |||
| 278 | for(last = lowest_prio_cpu(); | ||
| 279 | gbl_preemption_needed(last->linked); | ||
| 280 | last = lowest_prio_cpu()) | ||
| 281 | { | ||
| 282 | /* preemption necessary */ | ||
| 283 | task = active_gbl_plugin->take_ready(&active_gbl_domain); | ||
| 284 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
| 285 | task->pid, last->cpu); | ||
| 286 | if (last->linked) | ||
| 287 | gbl_requeue(last->linked); | ||
| 288 | gbl_link_task_to_cpu(task, last); | ||
| 289 | gbl_preempt(last); | ||
| 290 | } | ||
| 291 | } | ||
| 292 | |||
| 293 | |||
| 294 | void gbl_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
| 295 | { | ||
| 296 | unsigned long flags; | ||
| 297 | |||
| 298 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
| 299 | |||
| 300 | __merge_ready(rt, tasks); | ||
| 301 | gbl_check_for_preemptions(); | ||
| 302 | |||
| 303 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
| 304 | } | ||
| 305 | |||
| 306 | /* caller holds active_gbl_domain_lock */ | ||
| 307 | void gbl_job_completion(struct task_struct *t, int forced) | ||
| 308 | { | ||
| 309 | BUG_ON(!t); | ||
| 310 | |||
| 311 | sched_trace_task_completion(t, forced); | ||
| 312 | |||
| 313 | TRACE_TASK(t, "job_completion().\n"); | ||
| 314 | |||
| 315 | /* set flags */ | ||
| 316 | set_rt_flags(t, RT_F_SLEEP); | ||
| 317 | /* prepare for next period */ | ||
| 318 | prepare_for_next_period(t); | ||
| 319 | if (is_released(t, litmus_clock())) | ||
| 320 | sched_trace_task_release(t); | ||
| 321 | /* unlink */ | ||
| 322 | gbl_unlink(t); | ||
| 323 | /* requeue | ||
| 324 | * But don't requeue a blocking task. */ | ||
| 325 | if (is_running(t)) | ||
| 326 | active_gbl_plugin->job_arrival(t); | ||
| 327 | } | ||
| 328 | |||
| 329 | |||
| 330 | /*********************************************************************/ | ||
| 331 | /* These two functions can't use active_* defines since the 'litmus' */ | ||
| 332 | /* pointer is undefined/invalid when these are called. Think of them */ | ||
| 333 | /* as static member functions. */ | ||
| 334 | /*********************************************************************/ | ||
| 335 | |||
| 336 | void gbl_domain_init(struct sched_global_plugin* gbl_plugin, | ||
| 337 | check_resched_needed_t resched, | ||
| 338 | release_jobs_t release) | ||
| 339 | { | ||
| 340 | rt_domain_init(&gbl_plugin->domain, gbl_ready_order, resched, release); | ||
| 341 | } | ||
| 342 | |||
| 343 | |||
| 344 | long gbl_activate_plugin(void* plg) | ||
| 345 | { | ||
| 346 | struct sched_plugin* plugin = (struct sched_plugin*)plg; | ||
| 347 | int cpu; | ||
| 348 | cpu_entry_t *entry; | ||
| 349 | |||
| 350 | /* set the active global plugin */ | ||
| 351 | active_gbl_plugin = | ||
| 352 | container_of(plugin, | ||
| 353 | struct sched_global_plugin, | ||
| 354 | plugin); | ||
| 355 | |||
| 356 | bheap_init(&active_gbl_plugin->cpu_heap); | ||
| 357 | #ifdef CONFIG_RELEASE_MASTER | ||
| 358 | active_gbl_domain.release_master = atomic_read(&release_master_cpu); | ||
| 359 | #endif | ||
| 360 | |||
| 361 | for_each_online_cpu(cpu) { | ||
| 362 | entry = active_gbl_plugin->cpus[cpu]; | ||
| 363 | bheap_node_init(&entry->hn, entry); | ||
| 364 | entry->linked = NULL; | ||
| 365 | entry->scheduled = NULL; | ||
| 366 | #ifdef CONFIG_RELEASE_MASTER | ||
| 367 | if (cpu != active_gbl_domain.release_master) { | ||
| 368 | #endif | ||
| 369 | TRACE("Global Plugin: Initializing CPU #%d.\n", cpu); | ||
| 370 | gbl_update_cpu_position(entry); | ||
| 371 | #ifdef CONFIG_RELEASE_MASTER | ||
| 372 | } else { | ||
| 373 | TRACE("Global Plugin: CPU %d is release master.\n", cpu); | ||
| 374 | } | ||
| 375 | #endif | ||
| 376 | } | ||
| 377 | return 0; | ||
| 378 | } | ||
| 379 | |||
| 380 | |||
| 381 | /********************************************************************/ | ||
| 382 | /* "Virtual" functions in both sched_plugin and sched_global_plugin */ | ||
| 383 | /********************************************************************/ | ||
| 384 | |||
| 385 | |||
| 386 | /* gbl_job_arrival: task is either resumed or released */ | ||
| 387 | void gblv_job_arrival(struct task_struct* task) | ||
| 388 | { | ||
| 389 | BUG_ON(!task); | ||
| 390 | |||
| 391 | gbl_requeue(task); | ||
| 392 | gbl_check_for_preemptions(); | ||
| 393 | } | ||
| 394 | |||
| 395 | /* gbl_tick - this function is called for every local timer interrupt. | ||
| 396 | * | ||
| 397 | * checks whether the current task has expired and checks | ||
| 398 | * whether we need to preempt it if it has not expired | ||
| 399 | */ | ||
| 400 | void gblv_tick(struct task_struct* t) | ||
| 401 | { | ||
| 402 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
| 403 | if (!is_np(t)) { | ||
| 404 | /* np tasks will be preempted when they become | ||
| 405 | * preemptable again | ||
| 406 | */ | ||
| 407 | litmus_reschedule_local(); | ||
| 408 | TRACE("gbl_scheduler_tick: " | ||
| 409 | "%d is preemptable " | ||
| 410 | " => FORCE_RESCHED\n", t->pid); | ||
| 411 | } else if (is_user_np(t)) { | ||
| 412 | TRACE("gbl_scheduler_tick: " | ||
| 413 | "%d is non-preemptable, " | ||
| 414 | "preemption delayed.\n", t->pid); | ||
| 415 | request_exit_np(t); | ||
| 416 | } | ||
| 417 | } | ||
| 418 | } | ||
| 419 | |||
| 420 | /* Getting schedule() right is a bit tricky. schedule() may not make any | ||
| 421 | * assumptions on the state of the current task since it may be called for a | ||
| 422 | * number of reasons. The reasons include a scheduler_tick() determined that it | ||
| 423 | * was necessary, because sys_exit_np() was called, because some Linux | ||
| 424 | * subsystem determined so, or even (in the worst case) because there is a bug | ||
| 425 | * hidden somewhere. Thus, we must take extreme care to determine what the | ||
| 426 | * current state is. | ||
| 427 | * | ||
| 428 | * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
| 429 | * | ||
| 430 | * The following assertions for the scheduled task could hold: | ||
| 431 | * | ||
| 432 | * - !is_running(scheduled) // the job blocks | ||
| 433 | * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
| 434 | * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | ||
| 435 | * - linked != scheduled // we need to reschedule (for any reason) | ||
| 436 | * - is_np(scheduled) // rescheduling must be delayed, | ||
| 437 | * sys_exit_np must be requested | ||
| 438 | * | ||
| 439 | * Any of these can occur together. | ||
| 440 | */ | ||
| 441 | struct task_struct* gblv_schedule(struct task_struct * prev) | ||
| 442 | { | ||
| 443 | cpu_entry_t* entry = active_gbl_plugin->cpus[smp_processor_id()]; | ||
| 444 | int out_of_time, sleep, preempt, np, exists, blocks; | ||
| 445 | struct task_struct* next = NULL; | ||
| 446 | |||
| 447 | #ifdef CONFIG_RELEASE_MASTER | ||
| 448 | /* Bail out early if we are the release master. | ||
| 449 | * The release master never schedules any real-time tasks. | ||
| 450 | */ | ||
| 451 | if (active_gbl_domain.release_master == entry->cpu) | ||
| 452 | return NULL; | ||
| 453 | #endif | ||
| 454 | |||
| 455 | raw_spin_lock(&active_gbl_domain_lock); | ||
| 456 | |||
| 457 | /* sanity checking */ | ||
| 458 | BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
| 459 | BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
| 460 | BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
| 461 | |||
| 462 | /* (0) Determine state */ | ||
| 463 | exists = entry->scheduled != NULL; | ||
| 464 | blocks = exists && !is_running(entry->scheduled); | ||
| 465 | out_of_time = exists && | ||
| 466 | budget_enforced(entry->scheduled) && | ||
| 467 | budget_exhausted(entry->scheduled); | ||
| 468 | np = exists && is_np(entry->scheduled); | ||
| 469 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
| 470 | preempt = entry->scheduled != entry->linked; | ||
| 471 | |||
| 472 | #ifdef WANT_ALL_SCHED_EVENTS | ||
| 473 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | ||
| 474 | #endif | ||
| 475 | |||
| 476 | if (exists) | ||
| 477 | TRACE_TASK(prev, | ||
| 478 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | ||
| 479 | "state:%d sig:%d\n", | ||
| 480 | blocks, out_of_time, np, sleep, preempt, | ||
| 481 | prev->state, signal_pending(prev)); | ||
| 482 | if (entry->linked && preempt) | ||
| 483 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
| 484 | entry->linked->comm, entry->linked->pid); | ||
| 485 | |||
| 486 | |||
| 487 | /* If a task blocks we have no choice but to reschedule. | ||
| 488 | */ | ||
| 489 | if (blocks) | ||
| 490 | gbl_unlink(entry->scheduled); | ||
| 491 | |||
| 492 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
| 493 | * We need to make sure to update the link structure anyway in case | ||
| 494 | * that we are still linked. Multiple calls to request_exit_np() don't | ||
| 495 | * hurt. | ||
| 496 | */ | ||
| 497 | if (np && (out_of_time || preempt || sleep)) { | ||
| 498 | gbl_unlink(entry->scheduled); | ||
| 499 | request_exit_np(entry->scheduled); | ||
| 500 | } | ||
| 501 | |||
| 502 | /* Any task that is preemptable and either exhausts its execution | ||
| 503 | * budget or wants to sleep completes. We may have to reschedule after | ||
| 504 | * this. Don't do a job completion if we block (can't have timers running | ||
| 505 | * for blocked jobs). Preemption go first for the same reason. | ||
| 506 | */ | ||
| 507 | if (!np && (out_of_time || sleep) && !blocks && !preempt) | ||
| 508 | active_gbl_plugin->job_completion(entry->scheduled, !sleep); | ||
| 509 | |||
| 510 | /* Link pending task if we became unlinked. | ||
| 511 | */ | ||
| 512 | if (!entry->linked) | ||
| 513 | gbl_link_task_to_cpu(active_gbl_plugin->take_ready(&active_gbl_domain), entry); | ||
| 514 | |||
| 515 | /* The final scheduling decision. Do we need to switch for some reason? | ||
| 516 | * If linked is different from scheduled, then select linked as next. | ||
| 517 | */ | ||
| 518 | if ((!np || blocks) && | ||
| 519 | entry->linked != entry->scheduled) { | ||
| 520 | /* Schedule a linked job? */ | ||
| 521 | if (entry->linked) { | ||
| 522 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
| 523 | next = entry->linked; | ||
| 524 | } | ||
| 525 | if (entry->scheduled) { | ||
| 526 | /* not gonna be scheduled soon */ | ||
| 527 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
| 528 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
| 529 | } | ||
| 530 | } else | ||
| 531 | /* Only override Linux scheduler if we have a real-time task | ||
| 532 | * scheduled that needs to continue. | ||
| 533 | */ | ||
| 534 | if (exists) | ||
| 535 | next = prev; | ||
| 536 | |||
| 537 | sched_state_task_picked(); | ||
| 538 | |||
| 539 | raw_spin_unlock(&active_gbl_domain_lock); | ||
| 540 | |||
| 541 | #ifdef WANT_ALL_SCHED_EVENTS | ||
| 542 | TRACE("active_gbl_domain_lock released, next=0x%p\n", next); | ||
| 543 | |||
| 544 | if (next) | ||
| 545 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
| 546 | else if (exists && !next) | ||
| 547 | TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
| 548 | #endif | ||
| 549 | |||
| 550 | |||
| 551 | return next; | ||
| 552 | } | ||
| 553 | |||
| 554 | |||
| 555 | /* _finish_switch - we just finished the switch away from prev | ||
| 556 | */ | ||
| 557 | void gblv_finish_switch(struct task_struct *prev) | ||
| 558 | { | ||
| 559 | cpu_entry_t* entry = active_gbl_plugin->cpus[smp_processor_id()]; | ||
| 560 | |||
| 561 | entry->scheduled = is_realtime(current) ? current : NULL; | ||
| 562 | #ifdef WANT_ALL_SCHED_EVENTS | ||
| 563 | TRACE_TASK(prev, "switched away from\n"); | ||
| 564 | #endif | ||
| 565 | } | ||
| 566 | |||
| 567 | |||
| 568 | /* Prepare a task for running in RT mode | ||
| 569 | */ | ||
| 570 | void gblv_task_new(struct task_struct * t, int on_rq, int running) | ||
| 571 | { | ||
| 572 | unsigned long flags; | ||
| 573 | cpu_entry_t* entry; | ||
| 574 | |||
| 575 | TRACE("global plugin: task new %d\n", t->pid); | ||
| 576 | |||
| 577 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
| 578 | |||
| 579 | /* setup job params */ | ||
| 580 | release_at(t, litmus_clock()); | ||
| 581 | |||
| 582 | if (running) { | ||
| 583 | entry = active_gbl_plugin->cpus[task_cpu(t)]; | ||
| 584 | BUG_ON(entry->scheduled); | ||
| 585 | |||
| 586 | #ifdef CONFIG_RELEASE_MASTER | ||
| 587 | if (entry->cpu != active_gbl_domain.release_master) { | ||
| 588 | #endif | ||
| 589 | entry->scheduled = t; | ||
| 590 | tsk_rt(t)->scheduled_on = task_cpu(t); | ||
| 591 | #ifdef CONFIG_RELEASE_MASTER | ||
| 592 | } else { | ||
| 593 | /* do not schedule on release master */ | ||
| 594 | gbl_preempt(entry); /* force resched */ | ||
| 595 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
| 596 | } | ||
| 597 | #endif | ||
| 598 | } else { | ||
| 599 | t->rt_param.scheduled_on = NO_CPU; | ||
| 600 | } | ||
| 601 | t->rt_param.linked_on = NO_CPU; | ||
| 602 | |||
| 603 | active_gbl_plugin->job_arrival(t); | ||
| 604 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
| 605 | } | ||
| 606 | |||
| 607 | void gblv_task_wake_up(struct task_struct *task) | ||
| 608 | { | ||
| 609 | unsigned long flags; | ||
| 610 | lt_t now; | ||
| 611 | |||
| 612 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
| 613 | |||
| 614 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
| 615 | /* We need to take suspensions because of semaphores into | ||
| 616 | * account! If a job resumes after being suspended due to acquiring | ||
| 617 | * a semaphore, it should never be treated as a new job release. | ||
| 618 | */ | ||
| 619 | if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
| 620 | set_rt_flags(task, RT_F_RUNNING); | ||
| 621 | } else { | ||
| 622 | now = litmus_clock(); | ||
| 623 | if (is_tardy(task, now)) { | ||
| 624 | /* new sporadic release */ | ||
| 625 | release_at(task, now); | ||
| 626 | sched_trace_task_release(task); | ||
| 627 | } | ||
| 628 | else { | ||
| 629 | if (task->rt.time_slice) { | ||
| 630 | /* came back in time before deadline | ||
| 631 | */ | ||
| 632 | set_rt_flags(task, RT_F_RUNNING); | ||
| 633 | } | ||
| 634 | } | ||
| 635 | } | ||
| 636 | active_gbl_plugin->job_arrival(task); | ||
| 637 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
| 638 | } | ||
| 639 | |||
| 640 | void gblv_task_block(struct task_struct *t) | ||
| 641 | { | ||
| 642 | unsigned long flags; | ||
| 643 | |||
| 644 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
| 645 | |||
| 646 | /* unlink if necessary */ | ||
| 647 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
| 648 | gbl_unlink(t); | ||
| 649 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
| 650 | |||
| 651 | BUG_ON(!is_realtime(t)); | ||
| 652 | } | ||
| 653 | |||
| 654 | |||
| 655 | void gblv_task_exit(struct task_struct * t) | ||
| 656 | { | ||
| 657 | unsigned long flags; | ||
| 658 | |||
| 659 | /* unlink if necessary */ | ||
| 660 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
| 661 | gbl_unlink(t); | ||
| 662 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
| 663 | active_gbl_plugin->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
| 664 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
| 665 | } | ||
| 666 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
| 667 | |||
| 668 | BUG_ON(!is_realtime(t)); | ||
| 669 | TRACE_TASK(t, "RIP\n"); | ||
| 670 | } | ||
| 671 | |||
| 672 | long gblv_admit_task(struct task_struct* tsk) | ||
| 673 | { | ||
| 674 | return 0; | ||
| 675 | } | ||
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index e9c5e531b1ae..7876d707d939 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | 14 | ||
| 15 | #include <litmus/litmus.h> | 15 | #include <litmus/litmus.h> |
| 16 | #include <litmus/jobs.h> | 16 | #include <litmus/jobs.h> |
| 17 | #include <litmus/sched_plugin.h> | 17 | #include <litmus/sched_global_plugin.h> |
| 18 | #include <litmus/edf_common.h> | 18 | #include <litmus/edf_common.h> |
| 19 | #include <litmus/sched_trace.h> | 19 | #include <litmus/sched_trace.h> |
| 20 | 20 | ||
| @@ -24,578 +24,50 @@ | |||
| 24 | 24 | ||
| 25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
| 26 | 26 | ||
| 27 | /* Overview of GSN-EDF operations. | ||
| 28 | * | ||
| 29 | * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This | ||
| 30 | * description only covers how the individual operations are implemented in | ||
| 31 | * LITMUS. | ||
| 32 | * | ||
| 33 | * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage | ||
| 34 | * structure (NOT the actually scheduled | ||
| 35 | * task). If there is another linked task To | ||
| 36 | * already it will set To->linked_on = NO_CPU | ||
| 37 | * (thereby removing its association with this | ||
| 38 | * CPU). However, it will not requeue the | ||
| 39 | * previously linked task (if any). It will set | ||
| 40 | * T's state to RT_F_RUNNING and check whether | ||
| 41 | * it is already running somewhere else. If T | ||
| 42 | * is scheduled somewhere else it will link | ||
| 43 | * it to that CPU instead (and pull the linked | ||
| 44 | * task to cpu). T may be NULL. | ||
| 45 | * | ||
| 46 | * unlink(T) - Unlink removes T from all scheduler data | ||
| 47 | * structures. If it is linked to some CPU it | ||
| 48 | * will link NULL to that CPU. If it is | ||
| 49 | * currently queued in the gsnedf queue it will | ||
| 50 | * be removed from the rt_domain. It is safe to | ||
| 51 | * call unlink(T) if T is not linked. T may not | ||
| 52 | * be NULL. | ||
| 53 | * | ||
| 54 | * requeue(T) - Requeue will insert T into the appropriate | ||
| 55 | * queue. If the system is in real-time mode and | ||
| 56 | * the T is released already, it will go into the | ||
| 57 | * ready queue. If the system is not in | ||
| 58 | * real-time mode is T, then T will go into the | ||
| 59 | * release queue. If T's release time is in the | ||
| 60 | * future, it will go into the release | ||
| 61 | * queue. That means that T's release time/job | ||
| 62 | * no/etc. has to be updated before requeu(T) is | ||
| 63 | * called. It is not safe to call requeue(T) | ||
| 64 | * when T is already queued. T may not be NULL. | ||
| 65 | * | ||
| 66 | * gsnedf_job_arrival(T) - This is the catch all function when T enters | ||
| 67 | * the system after either a suspension or at a | ||
| 68 | * job release. It will queue T (which means it | ||
| 69 | * is not safe to call gsnedf_job_arrival(T) if | ||
| 70 | * T is already queued) and then check whether a | ||
| 71 | * preemption is necessary. If a preemption is | ||
| 72 | * necessary it will update the linkage | ||
| 73 | * accordingly and cause scheduled to be called | ||
| 74 | * (either with an IPI or need_resched). It is | ||
| 75 | * safe to call gsnedf_job_arrival(T) if T's | ||
| 76 | * next job has not been actually released yet | ||
| 77 | * (releast time in the future). T will be put | ||
| 78 | * on the release queue in that case. | ||
| 79 | * | ||
| 80 | * job_completion(T) - Take care of everything that needs to be done | ||
| 81 | * to prepare T for its next release and place | ||
| 82 | * it in the right queue with | ||
| 83 | * gsnedf_job_arrival(). | ||
| 84 | * | ||
| 85 | * | ||
| 86 | * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is | ||
| 87 | * equivalent to unlink(T). Note that if you unlink a task from a CPU none of | ||
| 88 | * the functions will automatically propagate pending task from the ready queue | ||
| 89 | * to a linked task. This is the job of the calling function ( by means of | ||
| 90 | * __take_ready). | ||
| 91 | */ | ||
| 92 | 27 | ||
| 93 | |||
| 94 | /* cpu_entry_t - maintain the linked and scheduled state | ||
| 95 | */ | ||
| 96 | typedef struct { | ||
| 97 | int cpu; | ||
| 98 | struct task_struct* linked; /* only RT tasks */ | ||
| 99 | struct task_struct* scheduled; /* only RT tasks */ | ||
| 100 | struct bheap_node* hn; | ||
| 101 | } cpu_entry_t; | ||
| 102 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | 28 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); |
| 103 | 29 | ||
| 104 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; | 30 | #define gsnedf_lock (gsn_edf_plugin.domain.ready_lock) |
| 105 | |||
| 106 | /* the cpus queue themselves according to priority in here */ | ||
| 107 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; | ||
| 108 | static struct bheap gsnedf_cpu_heap; | ||
| 109 | |||
| 110 | static rt_domain_t gsnedf; | ||
| 111 | #define gsnedf_lock (gsnedf.ready_lock) | ||
| 112 | |||
| 113 | |||
| 114 | /* Uncomment this if you want to see all scheduling decisions in the | ||
| 115 | * TRACE() log. | ||
| 116 | #define WANT_ALL_SCHED_EVENTS | ||
| 117 | */ | ||
| 118 | |||
| 119 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | ||
| 120 | { | ||
| 121 | cpu_entry_t *a, *b; | ||
| 122 | a = _a->value; | ||
| 123 | b = _b->value; | ||
| 124 | /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
| 125 | * the top of the heap. | ||
| 126 | */ | ||
| 127 | return edf_higher_prio(b->linked, a->linked); | ||
| 128 | } | ||
| 129 | |||
| 130 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
| 131 | * order in the cpu queue. Caller must hold gsnedf lock. | ||
| 132 | */ | ||
| 133 | static void update_cpu_position(cpu_entry_t *entry) | ||
| 134 | { | ||
| 135 | if (likely(bheap_node_in_heap(entry->hn))) | ||
| 136 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
| 137 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
| 138 | } | ||
| 139 | 31 | ||
| 140 | /* caller must hold gsnedf lock */ | 32 | #ifdef CONFIG_FMLP |
| 141 | static cpu_entry_t* lowest_prio_cpu(void) | 33 | static long gsnedf_pi_block(struct pi_semaphore *sem, |
| 142 | { | 34 | struct task_struct *new_waiter); |
| 143 | struct bheap_node* hn; | 35 | static long gsnedf_inherit_priority(struct pi_semaphore *sem, |
| 144 | hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap); | 36 | struct task_struct *new_owner); |
| 145 | return hn->value; | 37 | static long gsnedf_return_priority(struct pi_semaphore *sem); |
| 146 | } | ||
| 147 | |||
| 148 | |||
| 149 | /* link_task_to_cpu - Update the link of a CPU. | ||
| 150 | * Handles the case where the to-be-linked task is already | ||
| 151 | * scheduled on a different CPU. | ||
| 152 | */ | ||
| 153 | static noinline void link_task_to_cpu(struct task_struct* linked, | ||
| 154 | cpu_entry_t *entry) | ||
| 155 | { | ||
| 156 | cpu_entry_t *sched; | ||
| 157 | struct task_struct* tmp; | ||
| 158 | int on_cpu; | ||
| 159 | |||
| 160 | BUG_ON(linked && !is_realtime(linked)); | ||
| 161 | |||
| 162 | /* Currently linked task is set to be unlinked. */ | ||
| 163 | if (entry->linked) { | ||
| 164 | entry->linked->rt_param.linked_on = NO_CPU; | ||
| 165 | } | ||
| 166 | |||
| 167 | /* Link new task to CPU. */ | ||
| 168 | if (linked) { | ||
| 169 | set_rt_flags(linked, RT_F_RUNNING); | ||
| 170 | /* handle task is already scheduled somewhere! */ | ||
| 171 | on_cpu = linked->rt_param.scheduled_on; | ||
| 172 | if (on_cpu != NO_CPU) { | ||
| 173 | sched = &per_cpu(gsnedf_cpu_entries, on_cpu); | ||
| 174 | /* this should only happen if not linked already */ | ||
| 175 | BUG_ON(sched->linked == linked); | ||
| 176 | |||
| 177 | /* If we are already scheduled on the CPU to which we | ||
| 178 | * wanted to link, we don't need to do the swap -- | ||
| 179 | * we just link ourselves to the CPU and depend on | ||
| 180 | * the caller to get things right. | ||
| 181 | */ | ||
| 182 | if (entry != sched) { | ||
| 183 | TRACE_TASK(linked, | ||
| 184 | "already scheduled on %d, updating link.\n", | ||
| 185 | sched->cpu); | ||
| 186 | tmp = sched->linked; | ||
| 187 | linked->rt_param.linked_on = sched->cpu; | ||
| 188 | sched->linked = linked; | ||
| 189 | update_cpu_position(sched); | ||
| 190 | linked = tmp; | ||
| 191 | } | ||
| 192 | } | ||
| 193 | if (linked) /* might be NULL due to swap */ | ||
| 194 | linked->rt_param.linked_on = entry->cpu; | ||
| 195 | } | ||
| 196 | entry->linked = linked; | ||
| 197 | #ifdef WANT_ALL_SCHED_EVENTS | ||
| 198 | if (linked) | ||
| 199 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
| 200 | else | ||
| 201 | TRACE("NULL linked to %d.\n", entry->cpu); | ||
| 202 | #endif | ||
| 203 | update_cpu_position(entry); | ||
| 204 | } | ||
| 205 | |||
| 206 | /* unlink - Make sure a task is not linked any longer to an entry | ||
| 207 | * where it was linked before. Must hold gsnedf_lock. | ||
| 208 | */ | ||
| 209 | static noinline void unlink(struct task_struct* t) | ||
| 210 | { | ||
| 211 | cpu_entry_t *entry; | ||
| 212 | |||
| 213 | if (t->rt_param.linked_on != NO_CPU) { | ||
| 214 | /* unlink */ | ||
| 215 | entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); | ||
| 216 | t->rt_param.linked_on = NO_CPU; | ||
| 217 | link_task_to_cpu(NULL, entry); | ||
| 218 | } else if (is_queued(t)) { | ||
| 219 | /* This is an interesting situation: t is scheduled, | ||
| 220 | * but was just recently unlinked. It cannot be | ||
| 221 | * linked anywhere else (because then it would have | ||
| 222 | * been relinked to this CPU), thus it must be in some | ||
| 223 | * queue. We must remove it from the list in this | ||
| 224 | * case. | ||
| 225 | */ | ||
| 226 | remove(&gsnedf, t); | ||
| 227 | } | ||
| 228 | } | ||
| 229 | |||
| 230 | |||
| 231 | /* preempt - force a CPU to reschedule | ||
| 232 | */ | ||
| 233 | static void preempt(cpu_entry_t *entry) | ||
| 234 | { | ||
| 235 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
| 236 | } | ||
| 237 | |||
| 238 | /* requeue - Put an unlinked task into gsn-edf domain. | ||
| 239 | * Caller must hold gsnedf_lock. | ||
| 240 | */ | ||
| 241 | static noinline void requeue(struct task_struct* task) | ||
| 242 | { | ||
| 243 | BUG_ON(!task); | ||
| 244 | /* sanity check before insertion */ | ||
| 245 | BUG_ON(is_queued(task)); | ||
| 246 | |||
| 247 | if (is_released(task, litmus_clock())) | ||
| 248 | __add_ready(&gsnedf, task); | ||
| 249 | else { | ||
| 250 | /* it has got to wait */ | ||
| 251 | add_release(&gsnedf, task); | ||
| 252 | } | ||
| 253 | } | ||
| 254 | |||
| 255 | /* check for any necessary preemptions */ | ||
| 256 | static void check_for_preemptions(void) | ||
| 257 | { | ||
| 258 | struct task_struct *task; | ||
| 259 | cpu_entry_t* last; | ||
| 260 | |||
| 261 | for(last = lowest_prio_cpu(); | ||
| 262 | edf_preemption_needed(&gsnedf, last->linked); | ||
| 263 | last = lowest_prio_cpu()) { | ||
| 264 | /* preemption necessary */ | ||
| 265 | task = __take_ready(&gsnedf); | ||
| 266 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
| 267 | task->pid, last->cpu); | ||
| 268 | if (last->linked) | ||
| 269 | requeue(last->linked); | ||
| 270 | link_task_to_cpu(task, last); | ||
| 271 | preempt(last); | ||
| 272 | } | ||
| 273 | } | ||
| 274 | |||
| 275 | /* gsnedf_job_arrival: task is either resumed or released */ | ||
| 276 | static noinline void gsnedf_job_arrival(struct task_struct* task) | ||
| 277 | { | ||
| 278 | BUG_ON(!task); | ||
| 279 | |||
| 280 | requeue(task); | ||
| 281 | check_for_preemptions(); | ||
| 282 | } | ||
| 283 | |||
| 284 | static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
| 285 | { | ||
| 286 | unsigned long flags; | ||
| 287 | |||
| 288 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
| 289 | |||
| 290 | __merge_ready(rt, tasks); | ||
| 291 | check_for_preemptions(); | ||
| 292 | |||
| 293 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
| 294 | } | ||
| 295 | |||
| 296 | /* caller holds gsnedf_lock */ | ||
| 297 | static noinline void job_completion(struct task_struct *t, int forced) | ||
| 298 | { | ||
| 299 | BUG_ON(!t); | ||
| 300 | |||
| 301 | sched_trace_task_completion(t, forced); | ||
| 302 | |||
| 303 | TRACE_TASK(t, "job_completion().\n"); | ||
| 304 | |||
| 305 | /* set flags */ | ||
| 306 | set_rt_flags(t, RT_F_SLEEP); | ||
| 307 | /* prepare for next period */ | ||
| 308 | prepare_for_next_period(t); | ||
| 309 | if (is_released(t, litmus_clock())) | ||
| 310 | sched_trace_task_release(t); | ||
| 311 | /* unlink */ | ||
| 312 | unlink(t); | ||
| 313 | /* requeue | ||
| 314 | * But don't requeue a blocking task. */ | ||
| 315 | if (is_running(t)) | ||
| 316 | gsnedf_job_arrival(t); | ||
| 317 | } | ||
| 318 | |||
| 319 | /* gsnedf_tick - this function is called for every local timer | ||
| 320 | * interrupt. | ||
| 321 | * | ||
| 322 | * checks whether the current task has expired and checks | ||
| 323 | * whether we need to preempt it if it has not expired | ||
| 324 | */ | ||
| 325 | static void gsnedf_tick(struct task_struct* t) | ||
| 326 | { | ||
| 327 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
| 328 | if (!is_np(t)) { | ||
| 329 | /* np tasks will be preempted when they become | ||
| 330 | * preemptable again | ||
| 331 | */ | ||
| 332 | litmus_reschedule_local(); | ||
| 333 | TRACE("gsnedf_scheduler_tick: " | ||
| 334 | "%d is preemptable " | ||
| 335 | " => FORCE_RESCHED\n", t->pid); | ||
| 336 | } else if (is_user_np(t)) { | ||
| 337 | TRACE("gsnedf_scheduler_tick: " | ||
| 338 | "%d is non-preemptable, " | ||
| 339 | "preemption delayed.\n", t->pid); | ||
| 340 | request_exit_np(t); | ||
| 341 | } | ||
| 342 | } | ||
| 343 | } | ||
| 344 | |||
| 345 | /* Getting schedule() right is a bit tricky. schedule() may not make any | ||
| 346 | * assumptions on the state of the current task since it may be called for a | ||
| 347 | * number of reasons. The reasons include a scheduler_tick() determined that it | ||
| 348 | * was necessary, because sys_exit_np() was called, because some Linux | ||
| 349 | * subsystem determined so, or even (in the worst case) because there is a bug | ||
| 350 | * hidden somewhere. Thus, we must take extreme care to determine what the | ||
| 351 | * current state is. | ||
| 352 | * | ||
| 353 | * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
| 354 | * | ||
| 355 | * The following assertions for the scheduled task could hold: | ||
| 356 | * | ||
| 357 | * - !is_running(scheduled) // the job blocks | ||
| 358 | * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
| 359 | * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | ||
| 360 | * - linked != scheduled // we need to reschedule (for any reason) | ||
| 361 | * - is_np(scheduled) // rescheduling must be delayed, | ||
| 362 | * sys_exit_np must be requested | ||
| 363 | * | ||
| 364 | * Any of these can occur together. | ||
| 365 | */ | ||
| 366 | static struct task_struct* gsnedf_schedule(struct task_struct * prev) | ||
| 367 | { | ||
| 368 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
| 369 | int out_of_time, sleep, preempt, np, exists, blocks; | ||
| 370 | struct task_struct* next = NULL; | ||
| 371 | |||
| 372 | #ifdef CONFIG_RELEASE_MASTER | ||
| 373 | /* Bail out early if we are the release master. | ||
| 374 | * The release master never schedules any real-time tasks. | ||
| 375 | */ | ||
| 376 | if (gsnedf.release_master == entry->cpu) | ||
| 377 | return NULL; | ||
| 378 | #endif | ||
| 379 | |||
| 380 | raw_spin_lock(&gsnedf_lock); | ||
| 381 | |||
| 382 | /* sanity checking */ | ||
| 383 | BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
| 384 | BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
| 385 | BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
| 386 | |||
| 387 | /* (0) Determine state */ | ||
| 388 | exists = entry->scheduled != NULL; | ||
| 389 | blocks = exists && !is_running(entry->scheduled); | ||
| 390 | out_of_time = exists && | ||
| 391 | budget_enforced(entry->scheduled) && | ||
| 392 | budget_exhausted(entry->scheduled); | ||
| 393 | np = exists && is_np(entry->scheduled); | ||
| 394 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
| 395 | preempt = entry->scheduled != entry->linked; | ||
| 396 | |||
| 397 | #ifdef WANT_ALL_SCHED_EVENTS | ||
| 398 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | ||
| 399 | #endif | ||
| 400 | |||
| 401 | if (exists) | ||
| 402 | TRACE_TASK(prev, | ||
| 403 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | ||
| 404 | "state:%d sig:%d\n", | ||
| 405 | blocks, out_of_time, np, sleep, preempt, | ||
| 406 | prev->state, signal_pending(prev)); | ||
| 407 | if (entry->linked && preempt) | ||
| 408 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
| 409 | entry->linked->comm, entry->linked->pid); | ||
| 410 | |||
| 411 | |||
| 412 | /* If a task blocks we have no choice but to reschedule. | ||
| 413 | */ | ||
| 414 | if (blocks) | ||
| 415 | unlink(entry->scheduled); | ||
| 416 | |||
| 417 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
| 418 | * We need to make sure to update the link structure anyway in case | ||
| 419 | * that we are still linked. Multiple calls to request_exit_np() don't | ||
| 420 | * hurt. | ||
| 421 | */ | ||
| 422 | if (np && (out_of_time || preempt || sleep)) { | ||
| 423 | unlink(entry->scheduled); | ||
| 424 | request_exit_np(entry->scheduled); | ||
| 425 | } | ||
| 426 | |||
| 427 | /* Any task that is preemptable and either exhausts its execution | ||
| 428 | * budget or wants to sleep completes. We may have to reschedule after | ||
| 429 | * this. Don't do a job completion if we block (can't have timers running | ||
| 430 | * for blocked jobs). Preemption go first for the same reason. | ||
| 431 | */ | ||
| 432 | if (!np && (out_of_time || sleep) && !blocks && !preempt) | ||
| 433 | job_completion(entry->scheduled, !sleep); | ||
| 434 | |||
| 435 | /* Link pending task if we became unlinked. | ||
| 436 | */ | ||
| 437 | if (!entry->linked) | ||
| 438 | link_task_to_cpu(__take_ready(&gsnedf), entry); | ||
| 439 | |||
| 440 | /* The final scheduling decision. Do we need to switch for some reason? | ||
| 441 | * If linked is different from scheduled, then select linked as next. | ||
| 442 | */ | ||
| 443 | if ((!np || blocks) && | ||
| 444 | entry->linked != entry->scheduled) { | ||
| 445 | /* Schedule a linked job? */ | ||
| 446 | if (entry->linked) { | ||
| 447 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
| 448 | next = entry->linked; | ||
| 449 | } | ||
| 450 | if (entry->scheduled) { | ||
| 451 | /* not gonna be scheduled soon */ | ||
| 452 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
| 453 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
| 454 | } | ||
| 455 | } else | ||
| 456 | /* Only override Linux scheduler if we have a real-time task | ||
| 457 | * scheduled that needs to continue. | ||
| 458 | */ | ||
| 459 | if (exists) | ||
| 460 | next = prev; | ||
| 461 | |||
| 462 | sched_state_task_picked(); | ||
| 463 | |||
| 464 | raw_spin_unlock(&gsnedf_lock); | ||
| 465 | |||
| 466 | #ifdef WANT_ALL_SCHED_EVENTS | ||
| 467 | TRACE("gsnedf_lock released, next=0x%p\n", next); | ||
| 468 | |||
| 469 | if (next) | ||
| 470 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
| 471 | else if (exists && !next) | ||
| 472 | TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
| 473 | #endif | ||
| 474 | |||
| 475 | |||
| 476 | return next; | ||
| 477 | } | ||
| 478 | |||
| 479 | |||
| 480 | /* _finish_switch - we just finished the switch away from prev | ||
| 481 | */ | ||
| 482 | static void gsnedf_finish_switch(struct task_struct *prev) | ||
| 483 | { | ||
| 484 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
| 485 | |||
| 486 | entry->scheduled = is_realtime(current) ? current : NULL; | ||
| 487 | #ifdef WANT_ALL_SCHED_EVENTS | ||
| 488 | TRACE_TASK(prev, "switched away from\n"); | ||
| 489 | #endif | ||
| 490 | } | ||
| 491 | |||
| 492 | |||
| 493 | /* Prepare a task for running in RT mode | ||
| 494 | */ | ||
| 495 | static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | ||
| 496 | { | ||
| 497 | unsigned long flags; | ||
| 498 | cpu_entry_t* entry; | ||
| 499 | |||
| 500 | TRACE("gsn edf: task new %d\n", t->pid); | ||
| 501 | |||
| 502 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
| 503 | |||
| 504 | /* setup job params */ | ||
| 505 | release_at(t, litmus_clock()); | ||
| 506 | |||
| 507 | if (running) { | ||
| 508 | entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t)); | ||
| 509 | BUG_ON(entry->scheduled); | ||
| 510 | |||
| 511 | #ifdef CONFIG_RELEASE_MASTER | ||
| 512 | if (entry->cpu != gsnedf.release_master) { | ||
| 513 | #endif | ||
| 514 | entry->scheduled = t; | ||
| 515 | tsk_rt(t)->scheduled_on = task_cpu(t); | ||
| 516 | #ifdef CONFIG_RELEASE_MASTER | ||
| 517 | } else { | ||
| 518 | /* do not schedule on release master */ | ||
| 519 | preempt(entry); /* force resched */ | ||
| 520 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
| 521 | } | ||
| 522 | #endif | 38 | #endif |
| 523 | } else { | ||
| 524 | t->rt_param.scheduled_on = NO_CPU; | ||
| 525 | } | ||
| 526 | t->rt_param.linked_on = NO_CPU; | ||
| 527 | |||
| 528 | gsnedf_job_arrival(t); | ||
| 529 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
| 530 | } | ||
| 531 | 39 | ||
| 532 | static void gsnedf_task_wake_up(struct task_struct *task) | 40 | /* GSN-EDF Plugin object */ |
| 533 | { | 41 | static struct sched_global_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { |
| 534 | unsigned long flags; | 42 | .plugin = { |
| 535 | lt_t now; | 43 | .plugin_name = "GSN-EDF", |
| 536 | 44 | .finish_switch = gblv_finish_switch, | |
| 537 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 45 | .tick = gblv_tick, |
| 538 | 46 | .task_new = gblv_task_new, | |
| 539 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 47 | .complete_job = complete_job, |
| 540 | /* We need to take suspensions because of semaphores into | 48 | .task_exit = gblv_task_exit, |
| 541 | * account! If a job resumes after being suspended due to acquiring | 49 | .schedule = gblv_schedule, |
| 542 | * a semaphore, it should never be treated as a new job release. | 50 | .task_wake_up = gblv_task_wake_up, |
| 543 | */ | 51 | .task_block = gblv_task_block, |
| 544 | if (get_rt_flags(task) == RT_F_EXIT_SEM) { | 52 | #ifdef CONFIG_FMLP |
| 545 | set_rt_flags(task, RT_F_RUNNING); | 53 | .fmlp_active = 1, |
| 546 | } else { | 54 | .pi_block = gsnedf_pi_block, |
| 547 | now = litmus_clock(); | 55 | .inherit_priority = gsnedf_inherit_priority, |
| 548 | if (is_tardy(task, now)) { | 56 | .return_priority = gsnedf_return_priority, |
| 549 | /* new sporadic release */ | 57 | #endif |
| 550 | release_at(task, now); | 58 | .admit_task = gblv_admit_task, |
| 551 | sched_trace_task_release(task); | 59 | .activate_plugin = gbl_activate_plugin |
| 552 | } | 60 | }, |
| 553 | else { | 61 | |
| 554 | if (task->rt.time_slice) { | 62 | .prio_order = edf_higher_prio, |
| 555 | /* came back in time before deadline | 63 | .take_ready = __take_ready, |
| 556 | */ | 64 | .add_ready = __add_ready, |
| 557 | set_rt_flags(task, RT_F_RUNNING); | 65 | .job_arrival = gblv_job_arrival, |
| 558 | } | 66 | .job_completion = gbl_job_completion |
| 559 | } | 67 | }; |
| 560 | } | ||
| 561 | gsnedf_job_arrival(task); | ||
| 562 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
| 563 | } | ||
| 564 | |||
| 565 | static void gsnedf_task_block(struct task_struct *t) | ||
| 566 | { | ||
| 567 | unsigned long flags; | ||
| 568 | |||
| 569 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
| 570 | |||
| 571 | /* unlink if necessary */ | ||
| 572 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
| 573 | unlink(t); | ||
| 574 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
| 575 | |||
| 576 | BUG_ON(!is_realtime(t)); | ||
| 577 | } | ||
| 578 | |||
| 579 | |||
| 580 | static void gsnedf_task_exit(struct task_struct * t) | ||
| 581 | { | ||
| 582 | unsigned long flags; | ||
| 583 | |||
| 584 | /* unlink if necessary */ | ||
| 585 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
| 586 | unlink(t); | ||
| 587 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
| 588 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
| 589 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
| 590 | } | ||
| 591 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
| 592 | 68 | ||
| 593 | BUG_ON(!is_realtime(t)); | ||
| 594 | TRACE_TASK(t, "RIP\n"); | ||
| 595 | } | ||
| 596 | 69 | ||
| 597 | #ifdef CONFIG_FMLP | 70 | #ifdef CONFIG_FMLP |
| 598 | |||
| 599 | /* Update the queue position of a task that got it's priority boosted via | 71 | /* Update the queue position of a task that got it's priority boosted via |
| 600 | * priority inheritance. */ | 72 | * priority inheritance. */ |
| 601 | static void update_queue_position(struct task_struct *holder) | 73 | static void update_queue_position(struct task_struct *holder) |
| @@ -618,13 +90,13 @@ static void update_queue_position(struct task_struct *holder) | |||
| 618 | * We can't use heap_decrease() here since | 90 | * We can't use heap_decrease() here since |
| 619 | * the cpu_heap is ordered in reverse direction, so | 91 | * the cpu_heap is ordered in reverse direction, so |
| 620 | * it is actually an increase. */ | 92 | * it is actually an increase. */ |
| 621 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | 93 | bheap_delete(gbl_cpu_lower_prio, &gsn_edf_plugin.cpu_heap, |
| 622 | gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | 94 | gsn_edf_plugin.cpus[tsk_rt(holder)->linked_on]->hn); |
| 623 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | 95 | bheap_insert(gbl_cpu_lower_prio, &gsn_edf_plugin.cpu_heap, |
| 624 | gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | 96 | gsn_edf_plugin.cpus[tsk_rt(holder)->linked_on]->hn); |
| 625 | } else { | 97 | } else { |
| 626 | /* holder may be queued: first stop queue changes */ | 98 | /* holder may be queued: first stop queue changes */ |
| 627 | raw_spin_lock(&gsnedf.release_lock); | 99 | raw_spin_lock(&gsn_edf_plugin.domain.release_lock); |
| 628 | if (is_queued(holder)) { | 100 | if (is_queued(holder)) { |
| 629 | TRACE_TASK(holder, "%s: is queued\n", | 101 | TRACE_TASK(holder, "%s: is queued\n", |
| 630 | __FUNCTION__); | 102 | __FUNCTION__); |
| @@ -642,7 +114,7 @@ static void update_queue_position(struct task_struct *holder) | |||
| 642 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", | 114 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", |
| 643 | __FUNCTION__); | 115 | __FUNCTION__); |
| 644 | } | 116 | } |
| 645 | raw_spin_unlock(&gsnedf.release_lock); | 117 | raw_spin_unlock(&gsn_edf_plugin.domain.release_lock); |
| 646 | 118 | ||
| 647 | /* If holder was enqueued in a release heap, then the following | 119 | /* If holder was enqueued in a release heap, then the following |
| 648 | * preemption check is pointless, but we can't easily detect | 120 | * preemption check is pointless, but we can't easily detect |
| @@ -654,9 +126,9 @@ static void update_queue_position(struct task_struct *holder) | |||
| 654 | /* heap_decrease() hit the top level of the heap: make | 126 | /* heap_decrease() hit the top level of the heap: make |
| 655 | * sure preemption checks get the right task, not the | 127 | * sure preemption checks get the right task, not the |
| 656 | * potentially stale cache. */ | 128 | * potentially stale cache. */ |
| 657 | bheap_uncache_min(edf_ready_order, | 129 | bheap_uncache_min(gbl_ready_order, |
| 658 | &gsnedf.ready_queue); | 130 | &gsn_edf_plugin.domain.ready_queue); |
| 659 | check_for_preemptions(); | 131 | gbl_check_for_preemptions(); |
| 660 | } | 132 | } |
| 661 | } | 133 | } |
| 662 | } | 134 | } |
| @@ -740,8 +212,8 @@ static long gsnedf_return_priority(struct pi_semaphore *sem) | |||
| 740 | t->rt_param.inh_task = NULL; | 212 | t->rt_param.inh_task = NULL; |
| 741 | 213 | ||
| 742 | /* Check if rescheduling is necessary */ | 214 | /* Check if rescheduling is necessary */ |
| 743 | unlink(t); | 215 | gbl_unlink(t); |
| 744 | gsnedf_job_arrival(t); | 216 | gsn_edf_plugin.job_arrival(t); |
| 745 | raw_spin_unlock(&gsnedf_lock); | 217 | raw_spin_unlock(&gsnedf_lock); |
| 746 | } | 218 | } |
| 747 | 219 | ||
| @@ -750,78 +222,24 @@ static long gsnedf_return_priority(struct pi_semaphore *sem) | |||
| 750 | 222 | ||
| 751 | #endif | 223 | #endif |
| 752 | 224 | ||
| 753 | static long gsnedf_admit_task(struct task_struct* tsk) | ||
| 754 | { | ||
| 755 | return 0; | ||
| 756 | } | ||
| 757 | |||
| 758 | static long gsnedf_activate_plugin(void) | ||
| 759 | { | ||
| 760 | int cpu; | ||
| 761 | cpu_entry_t *entry; | ||
| 762 | |||
| 763 | bheap_init(&gsnedf_cpu_heap); | ||
| 764 | #ifdef CONFIG_RELEASE_MASTER | ||
| 765 | gsnedf.release_master = atomic_read(&release_master_cpu); | ||
| 766 | #endif | ||
| 767 | |||
| 768 | for_each_online_cpu(cpu) { | ||
| 769 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | ||
| 770 | bheap_node_init(&entry->hn, entry); | ||
| 771 | entry->linked = NULL; | ||
| 772 | entry->scheduled = NULL; | ||
| 773 | #ifdef CONFIG_RELEASE_MASTER | ||
| 774 | if (cpu != gsnedf.release_master) { | ||
| 775 | #endif | ||
| 776 | TRACE("GSN-EDF: Initializing CPU #%d.\n", cpu); | ||
| 777 | update_cpu_position(entry); | ||
| 778 | #ifdef CONFIG_RELEASE_MASTER | ||
| 779 | } else { | ||
| 780 | TRACE("GSN-EDF: CPU %d is release master.\n", cpu); | ||
| 781 | } | ||
| 782 | #endif | ||
| 783 | } | ||
| 784 | return 0; | ||
| 785 | } | ||
| 786 | |||
| 787 | /* Plugin object */ | ||
| 788 | static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | ||
| 789 | .plugin_name = "GSN-EDF", | ||
| 790 | .finish_switch = gsnedf_finish_switch, | ||
| 791 | .tick = gsnedf_tick, | ||
| 792 | .task_new = gsnedf_task_new, | ||
| 793 | .complete_job = complete_job, | ||
| 794 | .task_exit = gsnedf_task_exit, | ||
| 795 | .schedule = gsnedf_schedule, | ||
| 796 | .task_wake_up = gsnedf_task_wake_up, | ||
| 797 | .task_block = gsnedf_task_block, | ||
| 798 | #ifdef CONFIG_FMLP | ||
| 799 | .fmlp_active = 1, | ||
| 800 | .pi_block = gsnedf_pi_block, | ||
| 801 | .inherit_priority = gsnedf_inherit_priority, | ||
| 802 | .return_priority = gsnedf_return_priority, | ||
| 803 | #endif | ||
| 804 | .admit_task = gsnedf_admit_task, | ||
| 805 | .activate_plugin = gsnedf_activate_plugin, | ||
| 806 | }; | ||
| 807 | |||
| 808 | 225 | ||
| 809 | static int __init init_gsn_edf(void) | 226 | static int __init init_gsn_edf(void) |
| 810 | { | 227 | { |
| 811 | int cpu; | 228 | int cpu; |
| 812 | cpu_entry_t *entry; | 229 | cpu_entry_t *entry; |
| 813 | 230 | ||
| 814 | bheap_init(&gsnedf_cpu_heap); | 231 | bheap_init(&gsn_edf_plugin.cpu_heap); |
| 815 | /* initialize CPU state */ | 232 | /* initialize CPU state */ |
| 816 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 233 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
| 817 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 234 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
| 818 | gsnedf_cpus[cpu] = entry; | 235 | gsn_edf_plugin.cpus[cpu] = entry; |
| 819 | entry->cpu = cpu; | 236 | entry->cpu = cpu; |
| 820 | entry->hn = &gsnedf_heap_node[cpu]; | 237 | entry->hn = &gsn_edf_plugin.heap_node[cpu]; |
| 821 | bheap_node_init(&entry->hn, entry); | 238 | bheap_node_init(&entry->hn, entry); |
| 822 | } | 239 | } |
| 823 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | 240 | gbl_domain_init(&gsn_edf_plugin, NULL, gbl_release_jobs); |
| 824 | return register_sched_plugin(&gsn_edf_plugin); | 241 | |
| 242 | return register_sched_plugin(&gsn_edf_plugin.plugin); | ||
| 825 | } | 243 | } |
| 826 | 244 | ||
| 827 | 245 | ||
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index c7d5cf7aa2b3..f2788a962fdb 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c | |||
| @@ -813,7 +813,7 @@ static long pfair_admit_task(struct task_struct* t) | |||
| 813 | return 0; | 813 | return 0; |
| 814 | } | 814 | } |
| 815 | 815 | ||
| 816 | static long pfair_activate_plugin(void) | 816 | static long pfair_activate_plugin(void* plugin) |
| 817 | { | 817 | { |
| 818 | int cpu; | 818 | int cpu; |
| 819 | struct pfair_state* state; | 819 | struct pfair_state* state; |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index d912a6494d20..5d14d469d971 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
| @@ -111,7 +111,7 @@ static long litmus_dummy_complete_job(void) | |||
| 111 | return -ENOSYS; | 111 | return -ENOSYS; |
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | static long litmus_dummy_activate_plugin(void) | 114 | static long litmus_dummy_activate_plugin(void* plugin) |
| 115 | { | 115 | { |
| 116 | return 0; | 116 | return 0; |
| 117 | } | 117 | } |
