aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/edzl_common.h14
-rw-r--r--include/litmus/litmus.h21
-rw-r--r--include/litmus/rt_param.h15
-rw-r--r--include/litmus/sched_global_plugin.h6
-rw-r--r--litmus/Kconfig13
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/edzl_common.c57
-rw-r--r--litmus/sched_edzl.c327
-rw-r--r--litmus/sched_global_plugin.c106
-rw-r--r--litmus/sched_gsn_edf.c70
10 files changed, 537 insertions, 93 deletions
diff --git a/include/litmus/edzl_common.h b/include/litmus/edzl_common.h
new file mode 100644
index 000000000000..d1a89ee08554
--- /dev/null
+++ b/include/litmus/edzl_common.h
@@ -0,0 +1,14 @@
1/*
2 * EDZL common data structures and utility functions shared by all EDZL
3 * based scheduler plugins
4 */
5
6#ifndef __UNC_EDZL_COMMON_H__
7#define __UNC_EDZL_COMMON_H__
8
9#include <litmus/rt_domain.h>
10
11int edzl_higher_prio(struct task_struct* first,
12 struct task_struct* second);
13
14#endif
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 246483783fc0..3203a0809f96 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -54,6 +54,12 @@ void litmus_exit_task(struct task_struct *tsk);
54#define get_release(t) (tsk_rt(t)->job_params.release) 54#define get_release(t) (tsk_rt(t)->job_params.release)
55#define get_class(t) (tsk_rt(t)->task_params.cls) 55#define get_class(t) (tsk_rt(t)->task_params.cls)
56 56
57#ifdef CONFIG_PLUGIN_EDZL
58#define get_zerolaxity(t) (tsk_rt(t)->job_params.zero_laxity)
59#define set_zerolaxity(t) (tsk_rt(t)->job_params.zero_laxity=1)
60#define clear_zerolaxity(t) (tsk_rt(t)->job_params.zero_laxity=0)
61#endif
62
57inline static int budget_exhausted(struct task_struct* t) 63inline static int budget_exhausted(struct task_struct* t)
58{ 64{
59 return get_exec_time(t) >= get_exec_cost(t); 65 return get_exec_time(t) >= get_exec_cost(t);
@@ -86,6 +92,21 @@ static inline lt_t litmus_clock(void)
86 return ktime_to_ns(ktime_get()); 92 return ktime_to_ns(ktime_get());
87} 93}
88 94
95#ifdef CONFIG_PLUGIN_EDZL
96inline static lt_t laxity_remaining(struct task_struct* t)
97{
98 lt_t now = litmus_clock();
99 lt_t remaining = budget_remaining(t);
100 lt_t deadline = get_deadline(t);
101
102 if(lt_before(now + remaining, deadline))
103 return (deadline - (now + remaining));
104 else
105 return 0;
106}
107#endif
108
109
89/* A macro to convert from nanoseconds to ktime_t. */ 110/* A macro to convert from nanoseconds to ktime_t. */
90#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) 111#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t)
91 112
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index a7a183f34a80..41768f446436 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -90,6 +90,14 @@ struct rt_job {
90 * Increase this sequence number when a job is released. 90 * Increase this sequence number when a job is released.
91 */ 91 */
92 unsigned int job_no; 92 unsigned int job_no;
93
94#ifdef CONFIG_PLUGIN_EDZL
95 /* boolean indicating zero-laxity state. We will
96 set this flag explicitly at zero-laxity detection.
97 This makes priority comparison operations more
98 predictable since laxity varies with time */
99 unsigned int zero_laxity:1;
100#endif
93}; 101};
94 102
95struct pfair_param; 103struct pfair_param;
@@ -113,6 +121,11 @@ struct rt_param {
113 121
114 /* timing parameters */ 122 /* timing parameters */
115 struct rt_job job_params; 123 struct rt_job job_params;
124
125#ifdef CONFIG_PLUGIN_EDZL
126 /* used to trigger zero-laxity detection */
127 struct hrtimer zl_timer;
128#endif
116 129
117 /* task representing the current "inherited" task 130 /* task representing the current "inherited" task
118 * priority, assigned by inherit_priority and 131 * priority, assigned by inherit_priority and
@@ -120,7 +133,7 @@ struct rt_param {
120 * could point to self if PI does not result in 133 * could point to self if PI does not result in
121 * an increased task priority. 134 * an increased task priority.
122 */ 135 */
123 struct task_struct* inh_task; 136 struct task_struct* inh_task;
124 137
125#ifdef CONFIG_NP_SECTION 138#ifdef CONFIG_NP_SECTION
126 /* For the FMLP under PSN-EDF, it is required to make the task 139 /* For the FMLP under PSN-EDF, it is required to make the task
diff --git a/include/litmus/sched_global_plugin.h b/include/litmus/sched_global_plugin.h
index cac2de63a3ee..21a91817eac1 100644
--- a/include/litmus/sched_global_plugin.h
+++ b/include/litmus/sched_global_plugin.h
@@ -29,7 +29,7 @@ typedef struct task_struct* (*take_ready_t)(rt_domain_t* rt);
29typedef void (*add_ready_t)(rt_domain_t* rt, struct task_struct *new); 29typedef void (*add_ready_t)(rt_domain_t* rt, struct task_struct *new);
30typedef void (*job_arrival_t)(struct task_struct* task); 30typedef void (*job_arrival_t)(struct task_struct* task);
31typedef void (*job_completion_t)(struct task_struct *t, int forced); 31typedef void (*job_completion_t)(struct task_struct *t, int forced);
32 32typedef int (*preemption_needed_t)(struct task_struct *t);
33 33
34struct sched_global_plugin { 34struct sched_global_plugin {
35 35
@@ -41,6 +41,7 @@ struct sched_global_plugin {
41 add_ready_t add_ready; 41 add_ready_t add_ready;
42 job_arrival_t job_arrival; 42 job_arrival_t job_arrival;
43 job_completion_t job_completion; 43 job_completion_t job_completion;
44 preemption_needed_t preemption_needed;
44 45
45 rt_domain_t domain; 46 rt_domain_t domain;
46 47
@@ -60,7 +61,6 @@ extern struct sched_global_plugin* active_gbl_plugin;
60 * 61 *
61 * Use prefix "gbl_" (global) 62 * Use prefix "gbl_" (global)
62 */ 63 */
63int gbl_preemption_needed(struct task_struct *t);
64int gbl_ready_order(struct bheap_node* a, struct bheap_node* b); 64int gbl_ready_order(struct bheap_node* a, struct bheap_node* b);
65int gbl_cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b); 65int gbl_cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b);
66void gbl_update_cpu_position(cpu_entry_t *entry); 66void gbl_update_cpu_position(cpu_entry_t *entry);
@@ -69,6 +69,7 @@ void gbl_link_task_to_cpu(struct task_struct* linked, cpu_entry_t *entry);
69void gbl_unlink(struct task_struct* t); 69void gbl_unlink(struct task_struct* t);
70void gbl_preempt(cpu_entry_t *entry); 70void gbl_preempt(cpu_entry_t *entry);
71void gbl_requeue(struct task_struct* task); 71void gbl_requeue(struct task_struct* task);
72void gbl_update_queue_position(struct task_struct *task);
72void gbl_check_for_preemptions(void); 73void gbl_check_for_preemptions(void);
73void gbl_release_jobs(rt_domain_t* rt, struct bheap* tasks); 74void gbl_release_jobs(rt_domain_t* rt, struct bheap* tasks);
74void gbl_job_completion(struct task_struct *t, int forced); 75void gbl_job_completion(struct task_struct *t, int forced);
@@ -87,6 +88,7 @@ long gbl_activate_plugin(void* plugin);
87 * Use prefix "gblv_" (global virtual) 88 * Use prefix "gblv_" (global virtual)
88 */ 89 */
89void gblv_job_arrival(struct task_struct* task); 90void gblv_job_arrival(struct task_struct* task);
91int gblv_preemption_needed(struct task_struct *t);
90void gblv_tick(struct task_struct* t); 92void gblv_tick(struct task_struct* t);
91struct task_struct* gblv_schedule(struct task_struct * prev); 93struct task_struct* gblv_schedule(struct task_struct * prev);
92void gblv_finish_switch(struct task_struct *prev); 94void gblv_finish_switch(struct task_struct *prev);
diff --git a/litmus/Kconfig b/litmus/Kconfig
index a2f267870f29..1e571af45e72 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -23,6 +23,17 @@ config PLUGIN_PFAIR
23 23
24 If unsure, say Yes. 24 If unsure, say Yes.
25 25
26config PLUGIN_EDZL
27 bool "EDZL"
28 depends on X86 && SYSFS
29 default y
30 help
31 Include the EDZL (Earliest Deadline, Zero Laxity) plugin in the kernel.
32 EDZL functions like G-EDF, except jobs with zero laxity are given maximum
33 priority.
34
35 If unsure, say Yes.
36
26config RELEASE_MASTER 37config RELEASE_MASTER
27 bool "Release-master Support" 38 bool "Release-master Support"
28 depends on ARCH_HAS_SEND_PULL_TIMERS 39 depends on ARCH_HAS_SEND_PULL_TIMERS
@@ -32,7 +43,7 @@ config RELEASE_MASTER
32 that services all timer interrupts, but that does not schedule 43 that services all timer interrupts, but that does not schedule
33 real-time tasks. See RTSS'09 paper for details 44 real-time tasks. See RTSS'09 paper for details
34 (http://www.cs.unc.edu/~anderson/papers.html). 45 (http://www.cs.unc.edu/~anderson/papers.html).
35 Currently only supported by GSN-EDF. 46 Currently only supported by GSN-EDF and EDZL.
36 47
37endmenu 48endmenu
38 49
diff --git a/litmus/Makefile b/litmus/Makefile
index 820deb7f2263..ec4e21106886 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -21,6 +21,7 @@ obj-y = sched_plugin.o litmus.o \
21 21
22obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 22obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
23obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 23obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
24obj-$(CONFIG_PLUGIN_EDZL) += sched_edzl.o edzl_common.o
24 25
25obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 26obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
26obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o 27obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
diff --git a/litmus/edzl_common.c b/litmus/edzl_common.c
new file mode 100644
index 000000000000..9e26304a1ea2
--- /dev/null
+++ b/litmus/edzl_common.c
@@ -0,0 +1,57 @@
1/*
2 * kernel/edzl_common.c
3 *
4 * Common functions for EDZL based scheduler.
5 */
6
7#include <linux/percpu.h>
8#include <linux/sched.h>
9#include <linux/list.h>
10
11#include <litmus/litmus.h>
12#include <litmus/sched_plugin.h>
13#include <litmus/sched_trace.h>
14
15#include <litmus/edf_common.h>
16#include <litmus/edzl_common.h>
17
18
19int edzl_higher_prio(struct task_struct* first,
20 struct task_struct* second)
21{
22 struct task_struct *first_task = first;
23 struct task_struct *second_task = second;
24
25 /* There is no point in comparing a task to itself. */
26 if (first && first == second) {
27 TRACE_TASK(first,
28 "WARNING: pointless edf priority comparison.\n");
29 return 0;
30 }
31
32
33 /* Check for inherited priorities. Change task
34 * used for comparison in such a case.
35 */
36 if (first && first->rt_param.inh_task)
37 first_task = first->rt_param.inh_task;
38 if (second && second->rt_param.inh_task)
39 second_task = second->rt_param.inh_task;
40
41 /* null checks & rt checks */
42 if(!first_task)
43 return 0;
44 else if(!second_task || !is_realtime(second_task))
45 return 1;
46
47
48 if(likely(get_zerolaxity(first_task) == get_zerolaxity(second_task)))
49 {
50 /* edf order if both tasks have the same laxity state */
51 return(edf_higher_prio(first_task, second_task));
52 }
53 else
54 {
55 return(get_zerolaxity(first_task));
56 }
57}
diff --git a/litmus/sched_edzl.c b/litmus/sched_edzl.c
new file mode 100644
index 000000000000..0664b78e540b
--- /dev/null
+++ b/litmus/sched_edzl.c
@@ -0,0 +1,327 @@
1/*
2 * litmus/sched_edzl.c
3 *
4 * Implementation of the EDZL scheduling algorithm.
5 *
6 * This version uses the simple approach and serializes all scheduling
7 * decisions by the use of a queue lock. This is probably not the
8 * best way to do it, but it should suffice for now.
9 */
10
11#include <linux/spinlock.h>
12#include <linux/percpu.h>
13#include <linux/sched.h>
14
15#include <litmus/litmus.h>
16#include <litmus/jobs.h>
17#include <litmus/sched_global_plugin.h>
18#include <litmus/edzl_common.h>
19#include <litmus/sched_trace.h>
20
21#include <litmus/preempt.h>
22
23#include <litmus/bheap.h>
24
25#include <linux/module.h>
26
27static struct task_struct* __edzl_take_ready(rt_domain_t* rt);
28static void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new);
29static void edzl_job_arrival(struct task_struct* task);
30static void edzl_task_new(struct task_struct * t, int on_rq, int running);
31static void edzl_task_wake_up(struct task_struct *task);
32static void edzl_task_exit(struct task_struct * t);
33static int edzl_preemption_needed(struct task_struct *t);
34
35
36/* EDZL Plugin object */
37static struct sched_global_plugin edzl_plugin __cacheline_aligned_in_smp = {
38 .plugin = {
39 .finish_switch = gblv_finish_switch,
40 .tick = gblv_tick,
41 .complete_job = complete_job,
42 .schedule = gblv_schedule,
43 .task_block = gblv_task_block,
44 .admit_task = gblv_admit_task,
45 .activate_plugin = gbl_activate_plugin,
46
47 .plugin_name = "EDZL",
48 .task_new = edzl_task_new,
49 .task_wake_up = edzl_task_wake_up,
50 .task_exit = edzl_task_exit,
51 },
52
53 .job_completion = gbl_job_completion,
54
55 .prio_order = edzl_higher_prio,
56 .take_ready = __edzl_take_ready,
57 .add_ready = __edzl_add_ready,
58 .job_arrival = edzl_job_arrival,
59 .preemption_needed = edzl_preemption_needed
60};
61
62
63#define active_gbl_domain (active_gbl_plugin->domain)
64#define active_gbl_domain_lock (active_gbl_domain.ready_lock)
65
66DEFINE_PER_CPU(cpu_entry_t, edzl_cpu_entries);
67
68
69static enum hrtimer_restart on_zero_laxity(struct hrtimer *timer)
70{
71 unsigned long flags;
72 struct task_struct* t;
73
74 lt_t now = litmus_clock();
75
76 TRACE("Zero-laxity timer went off!\n");
77
78 raw_spin_lock_irqsave(&active_gbl_domain_lock, flags);
79
80 t = container_of(container_of(timer, struct rt_param, zl_timer),
81 struct task_struct,
82 rt_param);
83
84 TRACE_TASK(t, "Reached zero-laxity. (now: %llu, zl-pt: %lld, time remaining (now): %lld)\n",
85 now,
86 get_deadline(t) - budget_remaining(t),
87 get_deadline(t) - now);
88
89 set_zerolaxity(t);
90 gbl_update_queue_position(t);
91
92 raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags);
93
94 return HRTIMER_NORESTART;
95}
96
97/* __edzl_take_ready - call's __take_ready with EDZL timer cancelation side-effect. */
98static struct task_struct* __edzl_take_ready(rt_domain_t* rt)
99{
100 struct task_struct* t = __take_ready(rt);
101
102 if(t)
103 {
104 if(get_zerolaxity(t) == 0)
105 {
106 if(hrtimer_active(&tsk_rt(t)->zl_timer))
107 {
108 int cancel_ret;
109
110 TRACE_TASK(t, "Canceling zero-laxity timer.\n");
111 cancel_ret = hrtimer_try_to_cancel(&tsk_rt(t)->zl_timer);
112 WARN_ON(cancel_ret == 0); /* should never be inactive. */
113 }
114 }
115 else
116 {
117 TRACE_TASK(t, "Task already has zero-laxity flagged.\n");
118 }
119 }
120
121 return t;
122}
123
124/* __edzl_add_ready - call's __add_ready with EDZL setting timer side-effect. */
125static void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new)
126{
127 __add_ready(rt, new);
128
129 if(get_zerolaxity(new) == 0)
130 {
131 lt_t when_to_fire;
132
133 when_to_fire = get_deadline(new) - budget_remaining(new);
134
135 TRACE_TASK(new, "Setting zero-laxity timer for %llu. (deadline: %llu, remaining: %llu)\n",
136 when_to_fire,
137 get_deadline(new),
138 budget_remaining(new));
139
140 __hrtimer_start_range_ns(&tsk_rt(new)->zl_timer,
141 ns_to_ktime(when_to_fire),
142 0,
143 HRTIMER_MODE_ABS_PINNED,
144 0);
145 }
146 else
147 {
148 TRACE_TASK(new, "Already has zero-laxity when added to ready queue. (deadline: %llu, remaining: %llu))\n",
149 get_deadline(new),
150 budget_remaining(new));
151 }
152}
153
154
155
156/* edzl_job_arrival: task is either resumed or released */
157static void edzl_job_arrival(struct task_struct* task)
158{
159 BUG_ON(!task);
160
161 /* clear old laxity flag or tag zero-laxity upon release */
162 if(laxity_remaining(task))
163 clear_zerolaxity(task);
164 else
165 set_zerolaxity(task);
166
167 gbl_requeue(task);
168 gbl_check_for_preemptions();
169}
170
171
172/* Prepare a task for running in RT mode
173 */
174static void edzl_task_new(struct task_struct * t, int on_rq, int running)
175{
176 unsigned long flags;
177 cpu_entry_t* entry;
178
179 TRACE("edzl: task new %d\n", t->pid);
180
181 raw_spin_lock_irqsave(&active_gbl_domain_lock, flags);
182
183 hrtimer_init(&t->rt_param.zl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
184 t->rt_param.zl_timer.function = on_zero_laxity;
185
186 /* setup job params */
187 release_at(t, litmus_clock());
188
189 if (running) {
190 entry = active_gbl_plugin->cpus[task_cpu(t)];
191 BUG_ON(entry->scheduled);
192
193#ifdef CONFIG_RELEASE_MASTER
194 if (entry->cpu != active_gbl_domain.release_master) {
195#endif
196 entry->scheduled = t;
197 tsk_rt(t)->scheduled_on = task_cpu(t);
198#ifdef CONFIG_RELEASE_MASTER
199 } else {
200 /* do not schedule on release master */
201 gbl_preempt(entry); /* force resched */
202 tsk_rt(t)->scheduled_on = NO_CPU;
203 }
204#endif
205 } else {
206 t->rt_param.scheduled_on = NO_CPU;
207 }
208 t->rt_param.linked_on = NO_CPU;
209
210 active_gbl_plugin->job_arrival(t);
211 raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags);
212}
213
214
215static void edzl_task_wake_up(struct task_struct *task)
216{
217 unsigned long flags;
218 lt_t now;
219
220 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
221
222 raw_spin_lock_irqsave(&active_gbl_domain_lock, flags);
223 /* We need to take suspensions because of semaphores into
224 * account! If a job resumes after being suspended due to acquiring
225 * a semaphore, it should never be treated as a new job release.
226 */
227 if (get_rt_flags(task) == RT_F_EXIT_SEM) {
228 set_rt_flags(task, RT_F_RUNNING);
229 } else {
230 now = litmus_clock();
231 if (is_tardy(task, now)) {
232 /* new sporadic release */
233 release_at(task, now);
234 sched_trace_task_release(task);
235 }
236 else {
237 if (task->rt.time_slice) {
238 /* came back in time before deadline
239 */
240 set_rt_flags(task, RT_F_RUNNING);
241 }
242 }
243 }
244 active_gbl_plugin->job_arrival(task);
245 raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags);
246}
247
248
249static void edzl_task_exit(struct task_struct * t)
250{
251 unsigned long flags;
252
253 /* unlink if necessary */
254 raw_spin_lock_irqsave(&active_gbl_domain_lock, flags);
255 gbl_unlink(t);
256 if (tsk_rt(t)->scheduled_on != NO_CPU) {
257 active_gbl_plugin->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
258 tsk_rt(t)->scheduled_on = NO_CPU;
259 }
260
261 if(hrtimer_active(&tsk_rt(t)->zl_timer))
262 {
263 /* BUG if reached? */
264 TRACE_TASK(t, "Canceled armed timer while exiting.\n");
265 hrtimer_cancel(&tsk_rt(t)->zl_timer);
266 }
267
268 raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags);
269
270 BUG_ON(!is_realtime(t));
271 TRACE_TASK(t, "RIP\n");
272}
273
274
275/* need_to_preempt - check whether the task t needs to be preempted
276 * call only with irqs disabled and with ready_lock acquired
277 * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT!
278 */
279static int edzl_preemption_needed(struct task_struct *t)
280{
281 /* we need the read lock for edf_ready_queue */
282 /* no need to preempt if there is nothing pending */
283 if (!__jobs_pending(&active_gbl_domain))
284 return 0;
285 /* we need to reschedule if t doesn't exist */
286 if (!t)
287 return 1;
288 /* make sure to get non-rt stuff out of the way */
289 if (!is_realtime(t))
290 return 1;
291
292 /* NOTE: We cannot check for non-preemptibility since we
293 * don't know what address space we're currently in.
294 */
295
296 /* Detect zero-laxity as needed. Easier to do it here than in tick.
297 (No timer is used to detect zero-laxity while a job is running.) */
298 if(unlikely(!get_zerolaxity(t) && laxity_remaining(t) == 0))
299 {
300 set_zerolaxity(t);
301 }
302
303 return edzl_higher_prio(__next_ready(&active_gbl_domain), t);
304}
305
306
307static int __init init_edzl(void)
308{
309 int cpu;
310 cpu_entry_t *entry;
311
312 bheap_init(&edzl_plugin.cpu_heap);
313 /* initialize CPU state */
314 for (cpu = 0; cpu < NR_CPUS; cpu++) {
315 entry = &per_cpu(edzl_cpu_entries, cpu);
316 edzl_plugin.cpus[cpu] = entry;
317 entry->cpu = cpu;
318 entry->hn = &edzl_plugin.heap_node[cpu];
319 bheap_node_init(&entry->hn, entry);
320 }
321 gbl_domain_init(&edzl_plugin, NULL, gbl_release_jobs);
322
323 return register_sched_plugin(&edzl_plugin.plugin);
324}
325
326
327module_init(init_edzl);
diff --git a/litmus/sched_global_plugin.c b/litmus/sched_global_plugin.c
index 22dffa7d62fc..e94247b66b59 100644
--- a/litmus/sched_global_plugin.c
+++ b/litmus/sched_global_plugin.c
@@ -105,31 +105,11 @@ struct sched_global_plugin* active_gbl_plugin;
105/*********************************************************************/ 105/*********************************************************************/
106 106
107/* Priority-related functions */ 107/* Priority-related functions */
108int gbl_preemption_needed(struct task_struct *t)
109{
110 /* we need the read lock for active_gbl_domain's ready_queue */
111 /* no need to preempt if there is nothing pending */
112 if (!__jobs_pending(&active_gbl_domain))
113 return 0;
114 /* we need to reschedule if t doesn't exist */
115 if (!t)
116 return 1;
117
118 /* NOTE: We cannot check for non-preemptibility since we
119 * don't know what address space we're currently in.
120 */
121
122 /* make sure to get non-rt stuff out of the way */
123 return !is_realtime(t) || active_gbl_plugin->prio_order(__next_ready(&active_gbl_domain), t);
124}
125
126int gbl_ready_order(struct bheap_node* a, struct bheap_node* b) 108int gbl_ready_order(struct bheap_node* a, struct bheap_node* b)
127{ 109{
128 return active_gbl_plugin->prio_order(bheap2task(a), bheap2task(b)); 110 return active_gbl_plugin->prio_order(bheap2task(a), bheap2task(b));
129} 111}
130 112
131
132
133int gbl_cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) 113int gbl_cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
134{ 114{
135 cpu_entry_t *a, *b; 115 cpu_entry_t *a, *b;
@@ -243,7 +223,6 @@ void gbl_unlink(struct task_struct* t)
243 } 223 }
244} 224}
245 225
246
247/* preempt - force a CPU to reschedule 226/* preempt - force a CPU to reschedule
248 */ 227 */
249void gbl_preempt(cpu_entry_t *entry) 228void gbl_preempt(cpu_entry_t *entry)
@@ -268,6 +247,71 @@ void gbl_requeue(struct task_struct* task)
268 } 247 }
269} 248}
270 249
250/*
251 * update_queue_position - call after changing the priority of 'task'.
252 */
253void gbl_update_queue_position(struct task_struct *task)
254{
255 /* We don't know whether task is in the ready queue. It should, but
256 * on a budget overrun it may already be in a release queue. Hence,
257 * calling unlink() is not possible since it assumes that the task is
258 * not in a release queue.
259 */
260
261 /* Assumption: caller holds active_gbl_domain_lock */
262
263 int check_preempt = 0;
264
265 if (tsk_rt(task)->linked_on != NO_CPU) {
266 TRACE_TASK(task, "%s: linked on %d\n",
267 __FUNCTION__, tsk_rt(task)->linked_on);
268 /* Task is scheduled; need to re-order CPUs.
269 * We can't use heap_decrease() here since
270 * the cpu_heap is ordered in reverse direction, so
271 * it is actually an increase. */
272 bheap_delete(gbl_cpu_lower_prio, &active_gbl_plugin->cpu_heap,
273 active_gbl_plugin->cpus[tsk_rt(task)->linked_on]->hn);
274 bheap_insert(gbl_cpu_lower_prio, &active_gbl_plugin->cpu_heap,
275 active_gbl_plugin->cpus[tsk_rt(task)->linked_on]->hn);
276 } else {
277 /* task may be queued: first stop queue changes */
278 raw_spin_lock(&active_gbl_domain.release_lock);
279 if (is_queued(task)) {
280 TRACE_TASK(task, "%s: is queued\n",
281 __FUNCTION__);
282 /* We need to update the position
283 * of task in some heap. Note that this
284 * may be a release heap. */
285 check_preempt =
286 !bheap_decrease(gbl_ready_order,
287 tsk_rt(task)->heap_node);
288 } else {
289 /* Nothing to do: if it is not queued and not linked
290 * then it is currently being moved by other code
291 * (e.g., a timer interrupt handler) that will use the
292 * correct priority when enqueuing the task. */
293 TRACE_TASK(task, "%s: is NOT queued => Done.\n",
294 __FUNCTION__);
295 }
296 raw_spin_unlock(&active_gbl_domain.release_lock);
297
298 /* If task was enqueued in a release heap, then the following
299 * preemption check is pointless, but we can't easily detect
300 * that case. If you want to fix this, then consider that
301 * simply adding a state flag requires O(n) time to update when
302 * releasing n tasks, which conflicts with the goal to have
303 * O(log n) merges. */
304 if (check_preempt) {
305 /* heap_decrease() hit the top level of the heap: make
306 * sure preemption checks get the right task, not the
307 * potentially stale cache. */
308 bheap_uncache_min(gbl_ready_order,
309 &active_gbl_domain.ready_queue);
310 gbl_check_for_preemptions();
311 }
312 }
313}
314
271 315
272/* check for any necessary preemptions */ 316/* check for any necessary preemptions */
273void gbl_check_for_preemptions(void) 317void gbl_check_for_preemptions(void)
@@ -276,7 +320,7 @@ void gbl_check_for_preemptions(void)
276 cpu_entry_t* last; 320 cpu_entry_t* last;
277 321
278 for(last = lowest_prio_cpu(); 322 for(last = lowest_prio_cpu();
279 gbl_preemption_needed(last->linked); 323 active_gbl_plugin->preemption_needed(last->linked);
280 last = lowest_prio_cpu()) 324 last = lowest_prio_cpu())
281 { 325 {
282 /* preemption necessary */ 326 /* preemption necessary */
@@ -392,6 +436,24 @@ void gblv_job_arrival(struct task_struct* task)
392 gbl_check_for_preemptions(); 436 gbl_check_for_preemptions();
393} 437}
394 438
439int gblv_preemption_needed(struct task_struct *t)
440{
441 /* we need the read lock for active_gbl_domain's ready_queue */
442 /* no need to preempt if there is nothing pending */
443 if (!__jobs_pending(&active_gbl_domain))
444 return 0;
445 /* we need to reschedule if t doesn't exist */
446 if (!t)
447 return 1;
448
449 /* NOTE: We cannot check for non-preemptibility since we
450 * don't know what address space we're currently in.
451 */
452
453 /* make sure to get non-rt stuff out of the way */
454 return !is_realtime(t) || active_gbl_plugin->prio_order(__next_ready(&active_gbl_domain), t);
455}
456
395/* gbl_tick - this function is called for every local timer interrupt. 457/* gbl_tick - this function is called for every local timer interrupt.
396 * 458 *
397 * checks whether the current task has expired and checks 459 * checks whether the current task has expired and checks
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 7876d707d939..e4d17da0160a 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -63,76 +63,12 @@ static struct sched_global_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
63 .take_ready = __take_ready, 63 .take_ready = __take_ready,
64 .add_ready = __add_ready, 64 .add_ready = __add_ready,
65 .job_arrival = gblv_job_arrival, 65 .job_arrival = gblv_job_arrival,
66 .job_completion = gbl_job_completion 66 .job_completion = gbl_job_completion,
67 .preemption_needed = gblv_preemption_needed
67}; 68};
68 69
69 70
70#ifdef CONFIG_FMLP 71#ifdef CONFIG_FMLP
71/* Update the queue position of a task that got it's priority boosted via
72 * priority inheritance. */
73static void update_queue_position(struct task_struct *holder)
74{
75 /* We don't know whether holder is in the ready queue. It should, but
76 * on a budget overrun it may already be in a release queue. Hence,
77 * calling unlink() is not possible since it assumes that the task is
78 * not in a release queue. However, we can safely check whether
79 * sem->holder is currently in a queue or scheduled after locking both
80 * the release and the ready queue lock. */
81
82 /* Assumption: caller holds gsnedf_lock */
83
84 int check_preempt = 0;
85
86 if (tsk_rt(holder)->linked_on != NO_CPU) {
87 TRACE_TASK(holder, "%s: linked on %d\n",
88 __FUNCTION__, tsk_rt(holder)->linked_on);
89 /* Holder is scheduled; need to re-order CPUs.
90 * We can't use heap_decrease() here since
91 * the cpu_heap is ordered in reverse direction, so
92 * it is actually an increase. */
93 bheap_delete(gbl_cpu_lower_prio, &gsn_edf_plugin.cpu_heap,
94 gsn_edf_plugin.cpus[tsk_rt(holder)->linked_on]->hn);
95 bheap_insert(gbl_cpu_lower_prio, &gsn_edf_plugin.cpu_heap,
96 gsn_edf_plugin.cpus[tsk_rt(holder)->linked_on]->hn);
97 } else {
98 /* holder may be queued: first stop queue changes */
99 raw_spin_lock(&gsn_edf_plugin.domain.release_lock);
100 if (is_queued(holder)) {
101 TRACE_TASK(holder, "%s: is queued\n",
102 __FUNCTION__);
103 /* We need to update the position
104 * of holder in some heap. Note that this
105 * may be a release heap. */
106 check_preempt =
107 !bheap_decrease(edf_ready_order,
108 tsk_rt(holder)->heap_node);
109 } else {
110 /* Nothing to do: if it is not queued and not linked
111 * then it is currently being moved by other code
112 * (e.g., a timer interrupt handler) that will use the
113 * correct priority when enqueuing the task. */
114 TRACE_TASK(holder, "%s: is NOT queued => Done.\n",
115 __FUNCTION__);
116 }
117 raw_spin_unlock(&gsn_edf_plugin.domain.release_lock);
118
119 /* If holder was enqueued in a release heap, then the following
120 * preemption check is pointless, but we can't easily detect
121 * that case. If you want to fix this, then consider that
122 * simply adding a state flag requires O(n) time to update when
123 * releasing n tasks, which conflicts with the goal to have
124 * O(log n) merges. */
125 if (check_preempt) {
126 /* heap_decrease() hit the top level of the heap: make
127 * sure preemption checks get the right task, not the
128 * potentially stale cache. */
129 bheap_uncache_min(gbl_ready_order,
130 &gsn_edf_plugin.domain.ready_queue);
131 gbl_check_for_preemptions();
132 }
133 }
134}
135
136static long gsnedf_pi_block(struct pi_semaphore *sem, 72static long gsnedf_pi_block(struct pi_semaphore *sem,
137 struct task_struct *new_waiter) 73 struct task_struct *new_waiter)
138{ 74{
@@ -158,7 +94,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem,
158 new_waiter->comm, new_waiter->pid); 94 new_waiter->comm, new_waiter->pid);
159 /* let holder inherit */ 95 /* let holder inherit */
160 sem->holder->rt_param.inh_task = new_waiter; 96 sem->holder->rt_param.inh_task = new_waiter;
161 update_queue_position(sem->holder); 97 gbl_update_queue_position(sem->holder);
162 } 98 }
163 raw_spin_unlock(&gsnedf_lock); 99 raw_spin_unlock(&gsnedf_lock);
164 } 100 }