diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2011-01-28 19:15:16 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2011-01-31 15:08:42 -0500 |
commit | ae5fade24f7e9009168169b65e677ec554770e42 (patch) | |
tree | 23dee10a2d89e79367aa695695b02710484c9944 | |
parent | cb9879c0fdadf174f3500fe4283c092677356605 (diff) |
Implementation of AEDZL.
-rw-r--r-- | include/litmus/edzl_common.h | 14 | ||||
-rw-r--r-- | include/litmus/fpmath.h | 136 | ||||
-rw-r--r-- | include/litmus/litmus.h | 48 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 24 | ||||
-rw-r--r-- | include/litmus/sched_trace.h | 13 | ||||
-rw-r--r-- | include/litmus/sched_zl_plugin.h | 39 | ||||
-rw-r--r-- | litmus/Kconfig | 14 | ||||
-rw-r--r-- | litmus/Makefile | 3 | ||||
-rw-r--r-- | litmus/edzl_common.c | 57 | ||||
-rw-r--r-- | litmus/litmus.c | 12 | ||||
-rw-r--r-- | litmus/sched_aedzl.c | 140 | ||||
-rw-r--r-- | litmus/sched_edzl.c | 310 | ||||
-rw-r--r-- | litmus/sched_global_plugin.c | 1 | ||||
-rw-r--r-- | litmus/sched_task_trace.c | 27 | ||||
-rw-r--r-- | litmus/sched_zl_plugin.c | 301 |
15 files changed, 786 insertions, 353 deletions
diff --git a/include/litmus/edzl_common.h b/include/litmus/edzl_common.h deleted file mode 100644 index d1a89ee08554..000000000000 --- a/include/litmus/edzl_common.h +++ /dev/null | |||
@@ -1,14 +0,0 @@ | |||
1 | /* | ||
2 | * EDZL common data structures and utility functions shared by all EDZL | ||
3 | * based scheduler plugins | ||
4 | */ | ||
5 | |||
6 | #ifndef __UNC_EDZL_COMMON_H__ | ||
7 | #define __UNC_EDZL_COMMON_H__ | ||
8 | |||
9 | #include <litmus/rt_domain.h> | ||
10 | |||
11 | int edzl_higher_prio(struct task_struct* first, | ||
12 | struct task_struct* second); | ||
13 | |||
14 | #endif | ||
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h new file mode 100644 index 000000000000..9a2519523fd5 --- /dev/null +++ b/include/litmus/fpmath.h | |||
@@ -0,0 +1,136 @@ | |||
1 | #ifndef __FP_MATH_H__ | ||
2 | #define __FP_MATH_H__ | ||
3 | |||
4 | #ifdef CONFIG_PLUGIN_AEDZL | ||
5 | #include <litmus/rt_param.h> | ||
6 | #else | ||
7 | typedef long fpbuf_t; | ||
8 | typedef struct | ||
9 | { | ||
10 | fpbuf_t val; | ||
11 | } fp_t; | ||
12 | #endif | ||
13 | |||
14 | #define FP_SHIFT 10 | ||
15 | #define ROUND_BIT (FP_SHIFT - 1) | ||
16 | #define ONE FP(1) | ||
17 | |||
18 | #define _fp(x) ((fp_t) {x}) | ||
19 | |||
20 | static const fp_t LITMUS_FP_ZERO = {.val = 0}; | ||
21 | static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)}; | ||
22 | |||
23 | static inline fp_t FP(fpbuf_t x) | ||
24 | { | ||
25 | return _fp(((fpbuf_t) x) << FP_SHIFT); | ||
26 | } | ||
27 | |||
28 | /* divide two integers to obtain a fixed point value */ | ||
29 | static inline fp_t _frac(fpbuf_t a, fpbuf_t b) | ||
30 | { | ||
31 | return _fp(FP(a).val / (b)); | ||
32 | } | ||
33 | |||
34 | #ifdef __KERNEL__ | ||
35 | |||
36 | static inline fpbuf_t _point(fp_t x) | ||
37 | { | ||
38 | return (x.val % (1 << FP_SHIFT)); | ||
39 | |||
40 | } | ||
41 | |||
42 | #define fp2str(x) x.val | ||
43 | /*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */ | ||
44 | #define _FP_ "%ld/1024" | ||
45 | |||
46 | static inline fpbuf_t _floor(fp_t x) | ||
47 | { | ||
48 | return x.val >> FP_SHIFT; | ||
49 | } | ||
50 | |||
51 | /* FIXME: negative rounding */ | ||
52 | static inline fpbuf_t _round(fp_t x) | ||
53 | { | ||
54 | return _floor(x) + ((x.val >> ROUND_BIT) & 1); | ||
55 | } | ||
56 | |||
57 | /* multiply two fixed point values */ | ||
58 | static inline fp_t _mul(fp_t a, fp_t b) | ||
59 | { | ||
60 | return _fp((a.val * b.val) >> FP_SHIFT); | ||
61 | } | ||
62 | |||
63 | static inline fp_t _div(fp_t a, fp_t b) | ||
64 | { | ||
65 | /* try not to overflow */ | ||
66 | if (unlikely( a.val > (2l << (BITS_PER_LONG - FP_SHIFT)) )) | ||
67 | return _fp((a.val / b.val) << FP_SHIFT); | ||
68 | else | ||
69 | return _fp((a.val << FP_SHIFT) / b.val); | ||
70 | } | ||
71 | |||
72 | static inline fp_t _add(fp_t a, fp_t b) | ||
73 | { | ||
74 | return _fp(a.val + b.val); | ||
75 | } | ||
76 | |||
77 | static inline fp_t _sub(fp_t a, fp_t b) | ||
78 | { | ||
79 | return _fp(a.val - b.val); | ||
80 | } | ||
81 | |||
82 | static inline fp_t _neg(fp_t x) | ||
83 | { | ||
84 | return _fp(-x.val); | ||
85 | } | ||
86 | |||
87 | static inline fp_t _abs(fp_t x) | ||
88 | { | ||
89 | return _fp(abs(x.val)); | ||
90 | } | ||
91 | |||
92 | /* works the same as casting float/double to integer */ | ||
93 | static inline fpbuf_t _fp_to_integer(fp_t x) | ||
94 | { | ||
95 | return _floor(_abs(x)) * ((x.val > 0) ? 1 : -1); | ||
96 | } | ||
97 | |||
98 | static inline fp_t _integer_to_fp(fpbuf_t x) | ||
99 | { | ||
100 | return _frac(x,1); | ||
101 | } | ||
102 | |||
103 | static inline int _leq(fp_t a, fp_t b) | ||
104 | { | ||
105 | return a.val <= b.val; | ||
106 | } | ||
107 | |||
108 | static inline int _geq(fp_t a, fp_t b) | ||
109 | { | ||
110 | return a.val >= b.val; | ||
111 | } | ||
112 | |||
113 | static inline int _lt(fp_t a, fp_t b) | ||
114 | { | ||
115 | return a.val < b.val; | ||
116 | } | ||
117 | |||
118 | static inline int _gt(fp_t a, fp_t b) | ||
119 | { | ||
120 | return a.val > b.val; | ||
121 | } | ||
122 | |||
123 | static inline int _eq(fp_t a, fp_t b) | ||
124 | { | ||
125 | return a.val == b.val; | ||
126 | } | ||
127 | |||
128 | static inline fp_t _max(fp_t a, fp_t b) | ||
129 | { | ||
130 | if (a.val < b.val) | ||
131 | return b; | ||
132 | else | ||
133 | return a; | ||
134 | } | ||
135 | #endif | ||
136 | #endif | ||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 3203a0809f96..aee1c89e6f9a 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -8,6 +8,10 @@ | |||
8 | 8 | ||
9 | #include <litmus/debug_trace.h> | 9 | #include <litmus/debug_trace.h> |
10 | 10 | ||
11 | #ifdef CONFIG_PLUGIN_AEDZL | ||
12 | #include <litmus/fpmath.h> | ||
13 | #endif | ||
14 | |||
11 | #ifdef CONFIG_RELEASE_MASTER | 15 | #ifdef CONFIG_RELEASE_MASTER |
12 | extern atomic_t release_master_cpu; | 16 | extern atomic_t release_master_cpu; |
13 | #endif | 17 | #endif |
@@ -106,6 +110,50 @@ inline static lt_t laxity_remaining(struct task_struct* t) | |||
106 | } | 110 | } |
107 | #endif | 111 | #endif |
108 | 112 | ||
113 | #ifdef CONFIG_PLUGIN_AEDZL | ||
114 | #define get_feedback_a(t) (tsk_rt(t)->task_params.a) | ||
115 | #define get_feedback_b(t) (tsk_rt(t)->task_params.b) | ||
116 | |||
117 | inline static lt_t get_exec_cost_est(struct task_struct* t) | ||
118 | { | ||
119 | return (lt_t)( /* express cost in terms of lt_t */ | ||
120 | abs( /* fp_t sometimes has both num and denom < 0, | ||
121 | assume fraction is not negative and take abs()*/ | ||
122 | _fp_to_integer( /* truncate off fractional part */ | ||
123 | _mul( /* exe = util * period */ | ||
124 | tsk_rt(t)->zl_util_est, _frac(get_rt_period(t), 1) | ||
125 | ) | ||
126 | ) | ||
127 | ) | ||
128 | ); | ||
129 | } | ||
130 | |||
131 | inline static int budget_exhausted_est(struct task_struct* t) | ||
132 | { | ||
133 | return get_exec_time(t) >= get_exec_cost_est(t); | ||
134 | } | ||
135 | |||
136 | inline static lt_t budget_remaining_est(struct task_struct* t) | ||
137 | { | ||
138 | if (!budget_exhausted_est(t)) | ||
139 | return get_exec_cost_est(t) - get_exec_time(t); | ||
140 | else | ||
141 | return 0; /* avoid overflow */ | ||
142 | } | ||
143 | |||
144 | inline static lt_t laxity_remaining_est(struct task_struct* t) | ||
145 | { | ||
146 | lt_t now = litmus_clock(); | ||
147 | lt_t remaining = budget_remaining_est(t); | ||
148 | lt_t deadline = get_deadline(t); | ||
149 | |||
150 | if(lt_before(now + remaining, deadline)) | ||
151 | return (deadline - (now + remaining)); | ||
152 | else | ||
153 | return 0; | ||
154 | } | ||
155 | #endif | ||
156 | |||
109 | 157 | ||
110 | /* A macro to convert from nanoseconds to ktime_t. */ | 158 | /* A macro to convert from nanoseconds to ktime_t. */ |
111 | #define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) | 159 | #define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 41768f446436..41ac654dd27e 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -33,6 +33,14 @@ typedef enum { | |||
33 | PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */ | 33 | PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */ |
34 | } budget_policy_t; | 34 | } budget_policy_t; |
35 | 35 | ||
36 | #ifdef CONFIG_PLUGIN_AEDZL | ||
37 | typedef long fpbuf_t; | ||
38 | typedef struct | ||
39 | { | ||
40 | fpbuf_t val; | ||
41 | } fp_t; | ||
42 | #endif | ||
43 | |||
36 | struct rt_task { | 44 | struct rt_task { |
37 | lt_t exec_cost; | 45 | lt_t exec_cost; |
38 | lt_t period; | 46 | lt_t period; |
@@ -40,6 +48,12 @@ struct rt_task { | |||
40 | unsigned int cpu; | 48 | unsigned int cpu; |
41 | task_class_t cls; | 49 | task_class_t cls; |
42 | budget_policy_t budget_policy; /* ignored by pfair */ | 50 | budget_policy_t budget_policy; /* ignored by pfair */ |
51 | |||
52 | #ifdef CONFIG_PLUGIN_AEDZL | ||
53 | /* AEDZL - feedback control parameters set by user */ | ||
54 | fp_t a; | ||
55 | fp_t b; | ||
56 | #endif | ||
43 | }; | 57 | }; |
44 | 58 | ||
45 | /* The definition of the data that is shared between the kernel and real-time | 59 | /* The definition of the data that is shared between the kernel and real-time |
@@ -97,7 +111,7 @@ struct rt_job { | |||
97 | This makes priority comparison operations more | 111 | This makes priority comparison operations more |
98 | predictable since laxity varies with time */ | 112 | predictable since laxity varies with time */ |
99 | unsigned int zero_laxity:1; | 113 | unsigned int zero_laxity:1; |
100 | #endif | 114 | #endif |
101 | }; | 115 | }; |
102 | 116 | ||
103 | struct pfair_param; | 117 | struct pfair_param; |
@@ -126,7 +140,13 @@ struct rt_param { | |||
126 | /* used to trigger zero-laxity detection */ | 140 | /* used to trigger zero-laxity detection */ |
127 | struct hrtimer zl_timer; | 141 | struct hrtimer zl_timer; |
128 | #endif | 142 | #endif |
129 | 143 | ||
144 | #ifdef CONFIG_PLUGIN_AEDZL | ||
145 | /* feedback control state */ | ||
146 | fp_t zl_util_est; | ||
147 | fp_t zl_accum_err; | ||
148 | #endif | ||
149 | |||
130 | /* task representing the current "inherited" task | 150 | /* task representing the current "inherited" task |
131 | * priority, assigned by inherit_priority and | 151 | * priority, assigned by inherit_priority and |
132 | * return priority in the scheduler plugins. | 152 | * return priority in the scheduler plugins. |
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index a5f73736f7e8..a12ac302f836 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -57,6 +57,13 @@ struct st_completion_data { /* A job completed. */ | |||
57 | u8 __unused[3]; | 57 | u8 __unused[3]; |
58 | }; | 58 | }; |
59 | 59 | ||
60 | /* Extra completion data for AEDZL */ | ||
61 | struct st_completion_adaptive_data { | ||
62 | u32 estimated_exe; | ||
63 | s32 accumulated_err; | ||
64 | u8 __unused[8]; | ||
65 | }; | ||
66 | |||
60 | struct st_block_data { /* A task blocks. */ | 67 | struct st_block_data { /* A task blocks. */ |
61 | u64 when; | 68 | u64 when; |
62 | u64 __unused; | 69 | u64 __unused; |
@@ -86,6 +93,7 @@ typedef enum { | |||
86 | ST_BLOCK, | 93 | ST_BLOCK, |
87 | ST_RESUME, | 94 | ST_RESUME, |
88 | ST_SYS_RELEASE, | 95 | ST_SYS_RELEASE, |
96 | ST_COMPLETION_ADAPTIVE | ||
89 | } st_event_record_type_t; | 97 | } st_event_record_type_t; |
90 | 98 | ||
91 | struct st_event_record { | 99 | struct st_event_record { |
@@ -100,6 +108,7 @@ struct st_event_record { | |||
100 | DATA(switch_to); | 108 | DATA(switch_to); |
101 | DATA(switch_away); | 109 | DATA(switch_away); |
102 | DATA(completion); | 110 | DATA(completion); |
111 | DATA(completion_adaptive); | ||
103 | DATA(block); | 112 | DATA(block); |
104 | DATA(resume); | 113 | DATA(resume); |
105 | DATA(sys_release); | 114 | DATA(sys_release); |
@@ -136,6 +145,8 @@ feather_callback void do_sched_trace_task_switch_away(unsigned long id, | |||
136 | feather_callback void do_sched_trace_task_completion(unsigned long id, | 145 | feather_callback void do_sched_trace_task_completion(unsigned long id, |
137 | struct task_struct* task, | 146 | struct task_struct* task, |
138 | unsigned long forced); | 147 | unsigned long forced); |
148 | feather_callback void do_sched_trace_task_completion_adaptive(unsigned long id, | ||
149 | struct task_struct* task); | ||
139 | feather_callback void do_sched_trace_task_block(unsigned long id, | 150 | feather_callback void do_sched_trace_task_block(unsigned long id, |
140 | struct task_struct* task); | 151 | struct task_struct* task); |
141 | feather_callback void do_sched_trace_task_resume(unsigned long id, | 152 | feather_callback void do_sched_trace_task_resume(unsigned long id, |
@@ -175,6 +186,8 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, | |||
175 | /* when is a pointer, it does not need an explicit cast to unsigned long */ | 186 | /* when is a pointer, it does not need an explicit cast to unsigned long */ |
176 | #define sched_trace_sys_release(when) \ | 187 | #define sched_trace_sys_release(when) \ |
177 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when) | 188 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when) |
189 | #define sched_trace_task_completion_adaptive(t) \ | ||
190 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_task_completion_adaptive, t) | ||
178 | 191 | ||
179 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ | 192 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ |
180 | 193 | ||
diff --git a/include/litmus/sched_zl_plugin.h b/include/litmus/sched_zl_plugin.h new file mode 100644 index 000000000000..db044e606354 --- /dev/null +++ b/include/litmus/sched_zl_plugin.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * EDZL common data structures and utility functions shared by all EDZL | ||
3 | * based scheduler plugins | ||
4 | */ | ||
5 | |||
6 | #ifndef __UNC_EDZL_COMMON_H__ | ||
7 | #define __UNC_EDZL_COMMON_H__ | ||
8 | |||
9 | #include <litmus/rt_domain.h> | ||
10 | #include <litmus/sched_global_plugin.h> | ||
11 | |||
12 | int edzl_higher_prio(struct task_struct* first, | ||
13 | struct task_struct* second); | ||
14 | |||
15 | |||
16 | typedef lt_t (*laxity_remaining_t)(struct task_struct* t); | ||
17 | typedef lt_t (*budget_remaining_t)(struct task_struct* t); | ||
18 | typedef int (*budget_exhausted_t)(struct task_struct* t); | ||
19 | |||
20 | struct sched_zl_plugin { | ||
21 | struct sched_global_plugin gbl_plugin; | ||
22 | |||
23 | /* function pointers MUST be set by plugin */ | ||
24 | laxity_remaining_t laxity_remaining; | ||
25 | budget_remaining_t budget_remaining; | ||
26 | budget_exhausted_t budget_exhausted; | ||
27 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
28 | |||
29 | /* functions common to EDZL and AEDZL */ | ||
30 | enum hrtimer_restart zl_on_zero_laxity(struct hrtimer *timer); | ||
31 | struct task_struct* __zl_take_ready(rt_domain_t* rt); | ||
32 | void __zl_add_ready(rt_domain_t* rt, struct task_struct *new); | ||
33 | void zl_job_arrival(struct task_struct* task); | ||
34 | void zl_task_new(struct task_struct * t, int on_rq, int running); | ||
35 | void zl_task_wake_up(struct task_struct *task); | ||
36 | void zl_task_exit(struct task_struct * t); | ||
37 | int zl_preemption_needed(struct task_struct *t); | ||
38 | |||
39 | #endif | ||
diff --git a/litmus/Kconfig b/litmus/Kconfig index 1e571af45e72..dda271fbeca5 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -33,6 +33,18 @@ config PLUGIN_EDZL | |||
33 | priority. | 33 | priority. |
34 | 34 | ||
35 | If unsure, say Yes. | 35 | If unsure, say Yes. |
36 | |||
37 | config PLUGIN_AEDZL | ||
38 | bool "AEDZL" | ||
39 | depends on PLUGIN_EDZL | ||
40 | default y | ||
41 | help | ||
42 | Include the AEDZL (Adpative Earliest Deadline, Zero Laxity) plugin in | ||
43 | the kernel. AEDZL functions like EDZL, except that it uses feedback- | ||
44 | control methods to estimate actual job execution time. This improves | ||
45 | the accuracy of determined zero-laxity points. | ||
46 | |||
47 | If unsure, say Yes. | ||
36 | 48 | ||
37 | config RELEASE_MASTER | 49 | config RELEASE_MASTER |
38 | bool "Release-master Support" | 50 | bool "Release-master Support" |
@@ -43,7 +55,7 @@ config RELEASE_MASTER | |||
43 | that services all timer interrupts, but that does not schedule | 55 | that services all timer interrupts, but that does not schedule |
44 | real-time tasks. See RTSS'09 paper for details | 56 | real-time tasks. See RTSS'09 paper for details |
45 | (http://www.cs.unc.edu/~anderson/papers.html). | 57 | (http://www.cs.unc.edu/~anderson/papers.html). |
46 | Currently only supported by GSN-EDF and EDZL. | 58 | Currently only supported by GSN-EDF, EDZL and AEDZL. |
47 | 59 | ||
48 | endmenu | 60 | endmenu |
49 | 61 | ||
diff --git a/litmus/Makefile b/litmus/Makefile index ec4e21106886..15ecd425a6c4 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -21,7 +21,8 @@ obj-y = sched_plugin.o litmus.o \ | |||
21 | 21 | ||
22 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | 22 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o |
23 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o | 23 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o |
24 | obj-$(CONFIG_PLUGIN_EDZL) += sched_edzl.o edzl_common.o | 24 | obj-$(CONFIG_PLUGIN_EDZL) += sched_edzl.o sched_zl_plugin.o |
25 | obj-$(CONFIG_PLUGIN_AEDZL) += sched_aedzl.o | ||
25 | 26 | ||
26 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | 27 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o |
27 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o | 28 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o |
diff --git a/litmus/edzl_common.c b/litmus/edzl_common.c deleted file mode 100644 index 9e26304a1ea2..000000000000 --- a/litmus/edzl_common.c +++ /dev/null | |||
@@ -1,57 +0,0 @@ | |||
1 | /* | ||
2 | * kernel/edzl_common.c | ||
3 | * | ||
4 | * Common functions for EDZL based scheduler. | ||
5 | */ | ||
6 | |||
7 | #include <linux/percpu.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/list.h> | ||
10 | |||
11 | #include <litmus/litmus.h> | ||
12 | #include <litmus/sched_plugin.h> | ||
13 | #include <litmus/sched_trace.h> | ||
14 | |||
15 | #include <litmus/edf_common.h> | ||
16 | #include <litmus/edzl_common.h> | ||
17 | |||
18 | |||
19 | int edzl_higher_prio(struct task_struct* first, | ||
20 | struct task_struct* second) | ||
21 | { | ||
22 | struct task_struct *first_task = first; | ||
23 | struct task_struct *second_task = second; | ||
24 | |||
25 | /* There is no point in comparing a task to itself. */ | ||
26 | if (first && first == second) { | ||
27 | TRACE_TASK(first, | ||
28 | "WARNING: pointless edf priority comparison.\n"); | ||
29 | return 0; | ||
30 | } | ||
31 | |||
32 | |||
33 | /* Check for inherited priorities. Change task | ||
34 | * used for comparison in such a case. | ||
35 | */ | ||
36 | if (first && first->rt_param.inh_task) | ||
37 | first_task = first->rt_param.inh_task; | ||
38 | if (second && second->rt_param.inh_task) | ||
39 | second_task = second->rt_param.inh_task; | ||
40 | |||
41 | /* null checks & rt checks */ | ||
42 | if(!first_task) | ||
43 | return 0; | ||
44 | else if(!second_task || !is_realtime(second_task)) | ||
45 | return 1; | ||
46 | |||
47 | |||
48 | if(likely(get_zerolaxity(first_task) == get_zerolaxity(second_task))) | ||
49 | { | ||
50 | /* edf order if both tasks have the same laxity state */ | ||
51 | return(edf_higher_prio(first_task, second_task)); | ||
52 | } | ||
53 | else | ||
54 | { | ||
55 | return(get_zerolaxity(first_task)); | ||
56 | } | ||
57 | } | ||
diff --git a/litmus/litmus.c b/litmus/litmus.c index 744880c90eb5..744e20c382f7 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <litmus/litmus_proc.h> | 17 | #include <litmus/litmus_proc.h> |
18 | #include <litmus/sched_trace.h> | 18 | #include <litmus/sched_trace.h> |
19 | 19 | ||
20 | |||
20 | /* Number of RT tasks that exist in the system */ | 21 | /* Number of RT tasks that exist in the system */ |
21 | atomic_t rt_task_count = ATOMIC_INIT(0); | 22 | atomic_t rt_task_count = ATOMIC_INIT(0); |
22 | static DEFINE_RAW_SPINLOCK(task_transition_lock); | 23 | static DEFINE_RAW_SPINLOCK(task_transition_lock); |
@@ -123,6 +124,17 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) | |||
123 | 124 | ||
124 | target->rt_param.task_params = tp; | 125 | target->rt_param.task_params = tp; |
125 | 126 | ||
127 | #ifdef CONFIG_PLUGIN_AEDZL | ||
128 | /* default feedback parameters if not specified */ | ||
129 | if(target->rt_param.task_params.a.val == LITMUS_FP_ZERO.val) | ||
130 | target->rt_param.task_params.a.val = 104; /* 0.102 -- 102/1000 */ | ||
131 | if(target->rt_param.task_params.b.val == LITMUS_FP_ZERO.val) | ||
132 | target->rt_param.task_params.b.val = 310; /* 0.303 -- 303/1000 */ | ||
133 | |||
134 | target->rt_param.zl_util_est = _frac(tp.exec_cost, tp.period); | ||
135 | target->rt_param.zl_accum_err = LITMUS_FP_ZERO; | ||
136 | #endif | ||
137 | |||
126 | retval = 0; | 138 | retval = 0; |
127 | out_unlock: | 139 | out_unlock: |
128 | read_unlock_irq(&tasklist_lock); | 140 | read_unlock_irq(&tasklist_lock); |
diff --git a/litmus/sched_aedzl.c b/litmus/sched_aedzl.c new file mode 100644 index 000000000000..45949e699bee --- /dev/null +++ b/litmus/sched_aedzl.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * litmus/sched_aedzl.c | ||
3 | * | ||
4 | * Implementation of the AEDZL scheduling algorithm. | ||
5 | * | ||
6 | * This version uses the simple approach and serializes all scheduling | ||
7 | * decisions by the use of a queue lock. This is probably not the | ||
8 | * best way to do it, but it should suffice for now. | ||
9 | */ | ||
10 | |||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <linux/sched.h> | ||
14 | |||
15 | #include <litmus/litmus.h> | ||
16 | #include <litmus/jobs.h> | ||
17 | #include <litmus/sched_zl_plugin.h> | ||
18 | #include <litmus/sched_trace.h> | ||
19 | |||
20 | #include <litmus/preempt.h> | ||
21 | |||
22 | #include <litmus/bheap.h> | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | |||
26 | static void aedzl_job_completion(struct task_struct *t, int forced); | ||
27 | |||
28 | /* AEDZL Plugin object */ | ||
29 | static struct sched_zl_plugin aedzl_plugin __cacheline_aligned_in_smp = { | ||
30 | .gbl_plugin = { | ||
31 | .plugin = { | ||
32 | .finish_switch = gblv_finish_switch, | ||
33 | .tick = gblv_tick, | ||
34 | .complete_job = complete_job, | ||
35 | .schedule = gblv_schedule, | ||
36 | .task_block = gblv_task_block, | ||
37 | .admit_task = gblv_admit_task, | ||
38 | .activate_plugin = gbl_activate_plugin, | ||
39 | |||
40 | .plugin_name = "AEDZL", | ||
41 | .task_new = zl_task_new, | ||
42 | .task_wake_up = zl_task_wake_up, | ||
43 | .task_exit = zl_task_exit, | ||
44 | }, | ||
45 | |||
46 | .prio_order = edzl_higher_prio, | ||
47 | .take_ready = __zl_take_ready, | ||
48 | .add_ready = __zl_add_ready, | ||
49 | .job_arrival = zl_job_arrival, | ||
50 | .preemption_needed = zl_preemption_needed, | ||
51 | |||
52 | .job_completion = aedzl_job_completion, | ||
53 | }, | ||
54 | |||
55 | .laxity_remaining = laxity_remaining_est, | ||
56 | .budget_remaining = budget_remaining_est, | ||
57 | .budget_exhausted = budget_exhausted_est | ||
58 | }; | ||
59 | |||
60 | |||
61 | static inline void update_exe_estimate(struct task_struct *t, lt_t actual_cost) | ||
62 | { | ||
63 | fp_t err, new; | ||
64 | fp_t actual_util = _frac(actual_cost, get_rt_period(t)); | ||
65 | |||
66 | TRACE_TASK(t, "OLD cost: %llu, est cost: %llu, est util: %d.%d, est err: %d.%d\n", | ||
67 | tsk_rt(t)->task_params.exec_cost, | ||
68 | get_exec_cost_est(t), | ||
69 | _fp_to_integer(tsk_rt(t)->zl_util_est), _point(tsk_rt(t)->zl_util_est), | ||
70 | _fp_to_integer(tsk_rt(t)->zl_accum_err), _point(tsk_rt(t)->zl_accum_err)); | ||
71 | |||
72 | err = _sub(actual_util, tsk_rt(t)->zl_util_est); | ||
73 | |||
74 | |||
75 | new = _add(_mul(get_feedback_a(t), err), | ||
76 | _mul(get_feedback_b(t), tsk_rt(t)->zl_accum_err)); | ||
77 | |||
78 | tsk_rt(t)->zl_util_est = new; | ||
79 | tsk_rt(t)->zl_accum_err = _add(tsk_rt(t)->zl_accum_err, err); | ||
80 | |||
81 | TRACE_TASK(t, "cost: %llu, est cost: %llu, est util: %d.%d, est err: %d.%d, (delta cost: %d.%d, delta err: %d.%d)\n", | ||
82 | tsk_rt(t)->task_params.exec_cost, | ||
83 | get_exec_cost_est(t), | ||
84 | _fp_to_integer(tsk_rt(t)->zl_util_est), _point(tsk_rt(t)->zl_util_est), | ||
85 | _fp_to_integer(tsk_rt(t)->zl_accum_err), _point(tsk_rt(t)->zl_accum_err), | ||
86 | _fp_to_integer(new), _point(new), | ||
87 | _fp_to_integer(err), _point(err)); | ||
88 | } | ||
89 | |||
90 | /* caller holds active_gbl_domain_lock */ | ||
91 | static void aedzl_job_completion(struct task_struct *t, int forced) | ||
92 | { | ||
93 | BUG_ON(!t); | ||
94 | |||
95 | sched_trace_task_completion(t, forced); | ||
96 | sched_trace_task_completion_adaptive(t); | ||
97 | |||
98 | TRACE_TASK(t, "job_completion().\n"); | ||
99 | |||
100 | /* set flags */ | ||
101 | set_rt_flags(t, RT_F_SLEEP); | ||
102 | |||
103 | /* update exe estimate */ | ||
104 | update_exe_estimate(t, get_exec_time(t)); | ||
105 | |||
106 | /* prepare for next period */ | ||
107 | prepare_for_next_period(t); | ||
108 | if (is_released(t, litmus_clock())) | ||
109 | sched_trace_task_release(t); | ||
110 | /* unlink */ | ||
111 | gbl_unlink(t); | ||
112 | /* requeue | ||
113 | * But don't requeue a blocking task. */ | ||
114 | if (is_running(t)) | ||
115 | active_gbl_plugin->job_arrival(t); | ||
116 | } | ||
117 | |||
118 | |||
119 | DEFINE_PER_CPU(cpu_entry_t, aedzl_cpu_entries); | ||
120 | |||
121 | static int __init init_aedzl(void) | ||
122 | { | ||
123 | int cpu; | ||
124 | cpu_entry_t *entry; | ||
125 | |||
126 | bheap_init(&aedzl_plugin.gbl_plugin.cpu_heap); | ||
127 | /* initialize CPU state */ | ||
128 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
129 | entry = &per_cpu(aedzl_cpu_entries, cpu); | ||
130 | aedzl_plugin.gbl_plugin.cpus[cpu] = entry; | ||
131 | entry->cpu = cpu; | ||
132 | entry->hn = &aedzl_plugin.gbl_plugin.heap_node[cpu]; | ||
133 | bheap_node_init(&entry->hn, entry); | ||
134 | } | ||
135 | gbl_domain_init(&aedzl_plugin.gbl_plugin, NULL, gbl_release_jobs); | ||
136 | |||
137 | return register_sched_plugin(&aedzl_plugin.gbl_plugin.plugin); | ||
138 | } | ||
139 | |||
140 | module_init(init_aedzl); | ||
diff --git a/litmus/sched_edzl.c b/litmus/sched_edzl.c index 0664b78e540b..fb34be7196b4 100644 --- a/litmus/sched_edzl.c +++ b/litmus/sched_edzl.c | |||
@@ -14,8 +14,7 @@ | |||
14 | 14 | ||
15 | #include <litmus/litmus.h> | 15 | #include <litmus/litmus.h> |
16 | #include <litmus/jobs.h> | 16 | #include <litmus/jobs.h> |
17 | #include <litmus/sched_global_plugin.h> | 17 | #include <litmus/sched_zl_plugin.h> |
18 | #include <litmus/edzl_common.h> | ||
19 | #include <litmus/sched_trace.h> | 18 | #include <litmus/sched_trace.h> |
20 | 19 | ||
21 | #include <litmus/preempt.h> | 20 | #include <litmus/preempt.h> |
@@ -24,303 +23,58 @@ | |||
24 | 23 | ||
25 | #include <linux/module.h> | 24 | #include <linux/module.h> |
26 | 25 | ||
27 | static struct task_struct* __edzl_take_ready(rt_domain_t* rt); | ||
28 | static void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new); | ||
29 | static void edzl_job_arrival(struct task_struct* task); | ||
30 | static void edzl_task_new(struct task_struct * t, int on_rq, int running); | ||
31 | static void edzl_task_wake_up(struct task_struct *task); | ||
32 | static void edzl_task_exit(struct task_struct * t); | ||
33 | static int edzl_preemption_needed(struct task_struct *t); | ||
34 | |||
35 | |||
36 | /* EDZL Plugin object */ | 26 | /* EDZL Plugin object */ |
37 | static struct sched_global_plugin edzl_plugin __cacheline_aligned_in_smp = { | 27 | static struct sched_zl_plugin edzl_plugin __cacheline_aligned_in_smp = { |
38 | .plugin = { | 28 | .gbl_plugin = { |
39 | .finish_switch = gblv_finish_switch, | 29 | .plugin = { |
40 | .tick = gblv_tick, | 30 | .finish_switch = gblv_finish_switch, |
41 | .complete_job = complete_job, | 31 | .tick = gblv_tick, |
42 | .schedule = gblv_schedule, | 32 | .complete_job = complete_job, |
43 | .task_block = gblv_task_block, | 33 | .schedule = gblv_schedule, |
44 | .admit_task = gblv_admit_task, | 34 | .task_block = gblv_task_block, |
45 | .activate_plugin = gbl_activate_plugin, | 35 | .admit_task = gblv_admit_task, |
36 | .activate_plugin = gbl_activate_plugin, | ||
37 | |||
38 | .plugin_name = "EDZL", | ||
39 | .task_new = zl_task_new, | ||
40 | .task_wake_up = zl_task_wake_up, | ||
41 | .task_exit = zl_task_exit, | ||
42 | }, | ||
43 | |||
44 | .job_completion = gbl_job_completion, | ||
46 | 45 | ||
47 | .plugin_name = "EDZL", | 46 | .prio_order = edzl_higher_prio, |
48 | .task_new = edzl_task_new, | 47 | .take_ready = __zl_take_ready, |
49 | .task_wake_up = edzl_task_wake_up, | 48 | .add_ready = __zl_add_ready, |
50 | .task_exit = edzl_task_exit, | 49 | .job_arrival = zl_job_arrival, |
50 | .preemption_needed = zl_preemption_needed | ||
51 | }, | 51 | }, |
52 | 52 | ||
53 | .job_completion = gbl_job_completion, | 53 | .laxity_remaining = laxity_remaining, |
54 | 54 | .budget_remaining = budget_remaining, | |
55 | .prio_order = edzl_higher_prio, | 55 | .budget_exhausted = budget_exhausted |
56 | .take_ready = __edzl_take_ready, | ||
57 | .add_ready = __edzl_add_ready, | ||
58 | .job_arrival = edzl_job_arrival, | ||
59 | .preemption_needed = edzl_preemption_needed | ||
60 | }; | 56 | }; |
61 | 57 | ||
62 | 58 | ||
63 | #define active_gbl_domain (active_gbl_plugin->domain) | ||
64 | #define active_gbl_domain_lock (active_gbl_domain.ready_lock) | ||
65 | |||
66 | DEFINE_PER_CPU(cpu_entry_t, edzl_cpu_entries); | 59 | DEFINE_PER_CPU(cpu_entry_t, edzl_cpu_entries); |
67 | 60 | ||
68 | |||
69 | static enum hrtimer_restart on_zero_laxity(struct hrtimer *timer) | ||
70 | { | ||
71 | unsigned long flags; | ||
72 | struct task_struct* t; | ||
73 | |||
74 | lt_t now = litmus_clock(); | ||
75 | |||
76 | TRACE("Zero-laxity timer went off!\n"); | ||
77 | |||
78 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
79 | |||
80 | t = container_of(container_of(timer, struct rt_param, zl_timer), | ||
81 | struct task_struct, | ||
82 | rt_param); | ||
83 | |||
84 | TRACE_TASK(t, "Reached zero-laxity. (now: %llu, zl-pt: %lld, time remaining (now): %lld)\n", | ||
85 | now, | ||
86 | get_deadline(t) - budget_remaining(t), | ||
87 | get_deadline(t) - now); | ||
88 | |||
89 | set_zerolaxity(t); | ||
90 | gbl_update_queue_position(t); | ||
91 | |||
92 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
93 | |||
94 | return HRTIMER_NORESTART; | ||
95 | } | ||
96 | |||
97 | /* __edzl_take_ready - call's __take_ready with EDZL timer cancelation side-effect. */ | ||
98 | static struct task_struct* __edzl_take_ready(rt_domain_t* rt) | ||
99 | { | ||
100 | struct task_struct* t = __take_ready(rt); | ||
101 | |||
102 | if(t) | ||
103 | { | ||
104 | if(get_zerolaxity(t) == 0) | ||
105 | { | ||
106 | if(hrtimer_active(&tsk_rt(t)->zl_timer)) | ||
107 | { | ||
108 | int cancel_ret; | ||
109 | |||
110 | TRACE_TASK(t, "Canceling zero-laxity timer.\n"); | ||
111 | cancel_ret = hrtimer_try_to_cancel(&tsk_rt(t)->zl_timer); | ||
112 | WARN_ON(cancel_ret == 0); /* should never be inactive. */ | ||
113 | } | ||
114 | } | ||
115 | else | ||
116 | { | ||
117 | TRACE_TASK(t, "Task already has zero-laxity flagged.\n"); | ||
118 | } | ||
119 | } | ||
120 | |||
121 | return t; | ||
122 | } | ||
123 | |||
124 | /* __edzl_add_ready - call's __add_ready with EDZL setting timer side-effect. */ | ||
125 | static void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new) | ||
126 | { | ||
127 | __add_ready(rt, new); | ||
128 | |||
129 | if(get_zerolaxity(new) == 0) | ||
130 | { | ||
131 | lt_t when_to_fire; | ||
132 | |||
133 | when_to_fire = get_deadline(new) - budget_remaining(new); | ||
134 | |||
135 | TRACE_TASK(new, "Setting zero-laxity timer for %llu. (deadline: %llu, remaining: %llu)\n", | ||
136 | when_to_fire, | ||
137 | get_deadline(new), | ||
138 | budget_remaining(new)); | ||
139 | |||
140 | __hrtimer_start_range_ns(&tsk_rt(new)->zl_timer, | ||
141 | ns_to_ktime(when_to_fire), | ||
142 | 0, | ||
143 | HRTIMER_MODE_ABS_PINNED, | ||
144 | 0); | ||
145 | } | ||
146 | else | ||
147 | { | ||
148 | TRACE_TASK(new, "Already has zero-laxity when added to ready queue. (deadline: %llu, remaining: %llu))\n", | ||
149 | get_deadline(new), | ||
150 | budget_remaining(new)); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | |||
155 | |||
156 | /* edzl_job_arrival: task is either resumed or released */ | ||
157 | static void edzl_job_arrival(struct task_struct* task) | ||
158 | { | ||
159 | BUG_ON(!task); | ||
160 | |||
161 | /* clear old laxity flag or tag zero-laxity upon release */ | ||
162 | if(laxity_remaining(task)) | ||
163 | clear_zerolaxity(task); | ||
164 | else | ||
165 | set_zerolaxity(task); | ||
166 | |||
167 | gbl_requeue(task); | ||
168 | gbl_check_for_preemptions(); | ||
169 | } | ||
170 | |||
171 | |||
172 | /* Prepare a task for running in RT mode | ||
173 | */ | ||
174 | static void edzl_task_new(struct task_struct * t, int on_rq, int running) | ||
175 | { | ||
176 | unsigned long flags; | ||
177 | cpu_entry_t* entry; | ||
178 | |||
179 | TRACE("edzl: task new %d\n", t->pid); | ||
180 | |||
181 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
182 | |||
183 | hrtimer_init(&t->rt_param.zl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
184 | t->rt_param.zl_timer.function = on_zero_laxity; | ||
185 | |||
186 | /* setup job params */ | ||
187 | release_at(t, litmus_clock()); | ||
188 | |||
189 | if (running) { | ||
190 | entry = active_gbl_plugin->cpus[task_cpu(t)]; | ||
191 | BUG_ON(entry->scheduled); | ||
192 | |||
193 | #ifdef CONFIG_RELEASE_MASTER | ||
194 | if (entry->cpu != active_gbl_domain.release_master) { | ||
195 | #endif | ||
196 | entry->scheduled = t; | ||
197 | tsk_rt(t)->scheduled_on = task_cpu(t); | ||
198 | #ifdef CONFIG_RELEASE_MASTER | ||
199 | } else { | ||
200 | /* do not schedule on release master */ | ||
201 | gbl_preempt(entry); /* force resched */ | ||
202 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
203 | } | ||
204 | #endif | ||
205 | } else { | ||
206 | t->rt_param.scheduled_on = NO_CPU; | ||
207 | } | ||
208 | t->rt_param.linked_on = NO_CPU; | ||
209 | |||
210 | active_gbl_plugin->job_arrival(t); | ||
211 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
212 | } | ||
213 | |||
214 | |||
215 | static void edzl_task_wake_up(struct task_struct *task) | ||
216 | { | ||
217 | unsigned long flags; | ||
218 | lt_t now; | ||
219 | |||
220 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
221 | |||
222 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
223 | /* We need to take suspensions because of semaphores into | ||
224 | * account! If a job resumes after being suspended due to acquiring | ||
225 | * a semaphore, it should never be treated as a new job release. | ||
226 | */ | ||
227 | if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
228 | set_rt_flags(task, RT_F_RUNNING); | ||
229 | } else { | ||
230 | now = litmus_clock(); | ||
231 | if (is_tardy(task, now)) { | ||
232 | /* new sporadic release */ | ||
233 | release_at(task, now); | ||
234 | sched_trace_task_release(task); | ||
235 | } | ||
236 | else { | ||
237 | if (task->rt.time_slice) { | ||
238 | /* came back in time before deadline | ||
239 | */ | ||
240 | set_rt_flags(task, RT_F_RUNNING); | ||
241 | } | ||
242 | } | ||
243 | } | ||
244 | active_gbl_plugin->job_arrival(task); | ||
245 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
246 | } | ||
247 | |||
248 | |||
249 | static void edzl_task_exit(struct task_struct * t) | ||
250 | { | ||
251 | unsigned long flags; | ||
252 | |||
253 | /* unlink if necessary */ | ||
254 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
255 | gbl_unlink(t); | ||
256 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
257 | active_gbl_plugin->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
258 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
259 | } | ||
260 | |||
261 | if(hrtimer_active(&tsk_rt(t)->zl_timer)) | ||
262 | { | ||
263 | /* BUG if reached? */ | ||
264 | TRACE_TASK(t, "Canceled armed timer while exiting.\n"); | ||
265 | hrtimer_cancel(&tsk_rt(t)->zl_timer); | ||
266 | } | ||
267 | |||
268 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
269 | |||
270 | BUG_ON(!is_realtime(t)); | ||
271 | TRACE_TASK(t, "RIP\n"); | ||
272 | } | ||
273 | |||
274 | |||
275 | /* need_to_preempt - check whether the task t needs to be preempted | ||
276 | * call only with irqs disabled and with ready_lock acquired | ||
277 | * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! | ||
278 | */ | ||
279 | static int edzl_preemption_needed(struct task_struct *t) | ||
280 | { | ||
281 | /* we need the read lock for edf_ready_queue */ | ||
282 | /* no need to preempt if there is nothing pending */ | ||
283 | if (!__jobs_pending(&active_gbl_domain)) | ||
284 | return 0; | ||
285 | /* we need to reschedule if t doesn't exist */ | ||
286 | if (!t) | ||
287 | return 1; | ||
288 | /* make sure to get non-rt stuff out of the way */ | ||
289 | if (!is_realtime(t)) | ||
290 | return 1; | ||
291 | |||
292 | /* NOTE: We cannot check for non-preemptibility since we | ||
293 | * don't know what address space we're currently in. | ||
294 | */ | ||
295 | |||
296 | /* Detect zero-laxity as needed. Easier to do it here than in tick. | ||
297 | (No timer is used to detect zero-laxity while a job is running.) */ | ||
298 | if(unlikely(!get_zerolaxity(t) && laxity_remaining(t) == 0)) | ||
299 | { | ||
300 | set_zerolaxity(t); | ||
301 | } | ||
302 | |||
303 | return edzl_higher_prio(__next_ready(&active_gbl_domain), t); | ||
304 | } | ||
305 | |||
306 | |||
307 | static int __init init_edzl(void) | 61 | static int __init init_edzl(void) |
308 | { | 62 | { |
309 | int cpu; | 63 | int cpu; |
310 | cpu_entry_t *entry; | 64 | cpu_entry_t *entry; |
311 | 65 | ||
312 | bheap_init(&edzl_plugin.cpu_heap); | 66 | bheap_init(&edzl_plugin.gbl_plugin.cpu_heap); |
313 | /* initialize CPU state */ | 67 | /* initialize CPU state */ |
314 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 68 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
315 | entry = &per_cpu(edzl_cpu_entries, cpu); | 69 | entry = &per_cpu(edzl_cpu_entries, cpu); |
316 | edzl_plugin.cpus[cpu] = entry; | 70 | edzl_plugin.gbl_plugin.cpus[cpu] = entry; |
317 | entry->cpu = cpu; | 71 | entry->cpu = cpu; |
318 | entry->hn = &edzl_plugin.heap_node[cpu]; | 72 | entry->hn = &edzl_plugin.gbl_plugin.heap_node[cpu]; |
319 | bheap_node_init(&entry->hn, entry); | 73 | bheap_node_init(&entry->hn, entry); |
320 | } | 74 | } |
321 | gbl_domain_init(&edzl_plugin, NULL, gbl_release_jobs); | 75 | gbl_domain_init(&edzl_plugin.gbl_plugin, NULL, gbl_release_jobs); |
322 | 76 | ||
323 | return register_sched_plugin(&edzl_plugin.plugin); | 77 | return register_sched_plugin(&edzl_plugin.gbl_plugin.plugin); |
324 | } | 78 | } |
325 | 79 | ||
326 | 80 | ||
diff --git a/litmus/sched_global_plugin.c b/litmus/sched_global_plugin.c index e94247b66b59..da51eafee1b0 100644 --- a/litmus/sched_global_plugin.c +++ b/litmus/sched_global_plugin.c | |||
@@ -358,6 +358,7 @@ void gbl_job_completion(struct task_struct *t, int forced) | |||
358 | 358 | ||
359 | /* set flags */ | 359 | /* set flags */ |
360 | set_rt_flags(t, RT_F_SLEEP); | 360 | set_rt_flags(t, RT_F_SLEEP); |
361 | |||
361 | /* prepare for next period */ | 362 | /* prepare for next period */ |
362 | prepare_for_next_period(t); | 363 | prepare_for_next_period(t); |
363 | if (is_released(t, litmus_clock())) | 364 | if (is_released(t, litmus_clock())) |
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index a15b25d21a89..a904fa15ce46 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -191,6 +191,33 @@ feather_callback void do_sched_trace_task_completion(unsigned long id, | |||
191 | } | 191 | } |
192 | } | 192 | } |
193 | 193 | ||
194 | #ifdef CONFIG_AEDZL_PLUGIN | ||
195 | feather_callback void do_sched_trace_task_completion_adaptive(unsigned long id, | ||
196 | unsigned long _task) | ||
197 | { | ||
198 | struct task_struct *t = (struct task_struct*) _task; | ||
199 | struct st_event_record* rec = get_record(ST_COMPLETION_ADAPTIVE, t); | ||
200 | if (rec) { | ||
201 | s32 err; | ||
202 | rec->data.completion_adaptive.estimated_exe = get_exec_cost_est(t); | ||
203 | |||
204 | err = _fp_to_integer(_mul(tsk_rt(t)->zl_accum_err, _frac(get_rt_period(t), 1))); | ||
205 | if((err < 0) && _point(tsk_rt(t)->zl_accum_err) < 0) | ||
206 | { | ||
207 | err *= -1; | ||
208 | } | ||
209 | rec->data.completion_adaptive.accumulated_err = err; | ||
210 | |||
211 | put_record(rec); | ||
212 | } | ||
213 | } | ||
214 | #else | ||
215 | feather_callback void do_sched_trace_task_completion_adaptive(unsigned long id, | ||
216 | unsigned long _task) | ||
217 | { | ||
218 | } | ||
219 | #endif | ||
220 | |||
194 | feather_callback void do_sched_trace_task_block(unsigned long id, | 221 | feather_callback void do_sched_trace_task_block(unsigned long id, |
195 | unsigned long _task) | 222 | unsigned long _task) |
196 | { | 223 | { |
diff --git a/litmus/sched_zl_plugin.c b/litmus/sched_zl_plugin.c new file mode 100644 index 000000000000..5dc8b9f451d7 --- /dev/null +++ b/litmus/sched_zl_plugin.c | |||
@@ -0,0 +1,301 @@ | |||
1 | /* | ||
2 | * kernel/edzl_common.c | ||
3 | * | ||
4 | * Common functions for EDZL based scheduler. | ||
5 | */ | ||
6 | |||
7 | #include <linux/percpu.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/list.h> | ||
10 | |||
11 | #include <litmus/litmus.h> | ||
12 | #include <litmus/jobs.h> | ||
13 | #include <litmus/sched_zl_plugin.h> | ||
14 | #include <litmus/sched_trace.h> | ||
15 | |||
16 | #include <litmus/edf_common.h> | ||
17 | |||
18 | |||
19 | |||
20 | int edzl_higher_prio(struct task_struct* first, | ||
21 | struct task_struct* second) | ||
22 | { | ||
23 | struct task_struct *first_task = first; | ||
24 | struct task_struct *second_task = second; | ||
25 | |||
26 | /* There is no point in comparing a task to itself. */ | ||
27 | if (first && first == second) { | ||
28 | TRACE_TASK(first, | ||
29 | "WARNING: pointless edf priority comparison.\n"); | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | |||
34 | /* Check for inherited priorities. Change task | ||
35 | * used for comparison in such a case. | ||
36 | */ | ||
37 | if (first && first->rt_param.inh_task) | ||
38 | first_task = first->rt_param.inh_task; | ||
39 | if (second && second->rt_param.inh_task) | ||
40 | second_task = second->rt_param.inh_task; | ||
41 | |||
42 | /* null checks & rt checks */ | ||
43 | if(!first_task) | ||
44 | return 0; | ||
45 | else if(!second_task || !is_realtime(second_task)) | ||
46 | return 1; | ||
47 | |||
48 | |||
49 | if(likely(get_zerolaxity(first_task) == get_zerolaxity(second_task))) | ||
50 | { | ||
51 | /* edf order if both tasks have the same laxity state */ | ||
52 | return(edf_higher_prio(first_task, second_task)); | ||
53 | } | ||
54 | else | ||
55 | { | ||
56 | return(get_zerolaxity(first_task)); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | |||
61 | #define active_zl_plugin (container_of(active_gbl_plugin, struct sched_zl_plugin, gbl_plugin)) | ||
62 | |||
63 | #define active_gbl_domain (active_gbl_plugin->domain) | ||
64 | #define active_gbl_domain_lock (active_gbl_domain.ready_lock) | ||
65 | |||
66 | |||
67 | enum hrtimer_restart zl_on_zero_laxity(struct hrtimer *timer) | ||
68 | { | ||
69 | unsigned long flags; | ||
70 | struct task_struct* t; | ||
71 | |||
72 | lt_t now = litmus_clock(); | ||
73 | |||
74 | TRACE("Zero-laxity timer went off!\n"); | ||
75 | |||
76 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
77 | |||
78 | t = container_of(container_of(timer, struct rt_param, zl_timer), | ||
79 | struct task_struct, | ||
80 | rt_param); | ||
81 | |||
82 | TRACE_TASK(t, "Reached zero-laxity. (now: %llu, zl-pt: %lld, time remaining (now): %lld)\n", | ||
83 | now, | ||
84 | get_deadline(t) - active_zl_plugin->budget_remaining(t), | ||
85 | get_deadline(t) - now); | ||
86 | |||
87 | set_zerolaxity(t); | ||
88 | gbl_update_queue_position(t); | ||
89 | |||
90 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
91 | |||
92 | return HRTIMER_NORESTART; | ||
93 | } | ||
94 | |||
95 | |||
96 | /* __zl_take_ready - call's __take_ready with timer cancelation side-effect. */ | ||
97 | struct task_struct* __zl_take_ready(rt_domain_t* rt) | ||
98 | { | ||
99 | struct task_struct* t = __take_ready(rt); | ||
100 | |||
101 | if(t) | ||
102 | { | ||
103 | if(get_zerolaxity(t) == 0) | ||
104 | { | ||
105 | if(hrtimer_active(&tsk_rt(t)->zl_timer)) | ||
106 | { | ||
107 | int cancel_ret; | ||
108 | |||
109 | TRACE_TASK(t, "Canceling zero-laxity timer.\n"); | ||
110 | cancel_ret = hrtimer_try_to_cancel(&tsk_rt(t)->zl_timer); | ||
111 | WARN_ON(cancel_ret == 0); /* should never be inactive. */ | ||
112 | } | ||
113 | } | ||
114 | else | ||
115 | { | ||
116 | TRACE_TASK(t, "Task already has zero-laxity flagged.\n"); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | return t; | ||
121 | } | ||
122 | |||
123 | |||
124 | /* __zl_add_ready - call's __add_ready with setting timer side-effect. */ | ||
125 | void __zl_add_ready(rt_domain_t* rt, struct task_struct *new) | ||
126 | { | ||
127 | __add_ready(rt, new); | ||
128 | |||
129 | if(get_zerolaxity(new) == 0) | ||
130 | { | ||
131 | lt_t when_to_fire; | ||
132 | |||
133 | when_to_fire = get_deadline(new) - active_zl_plugin->budget_remaining(new); | ||
134 | |||
135 | TRACE_TASK(new, "Setting zero-laxity timer for %llu. (deadline: %llu, remaining: %llu)\n", | ||
136 | when_to_fire, | ||
137 | get_deadline(new), | ||
138 | active_zl_plugin->budget_remaining(new)); | ||
139 | |||
140 | __hrtimer_start_range_ns(&tsk_rt(new)->zl_timer, | ||
141 | ns_to_ktime(when_to_fire), | ||
142 | 0, | ||
143 | HRTIMER_MODE_ABS_PINNED, | ||
144 | 0); | ||
145 | } | ||
146 | else | ||
147 | { | ||
148 | TRACE_TASK(new, "Already has zero-laxity when added to ready queue. (deadline: %llu, remaining: %llu))\n", | ||
149 | get_deadline(new), | ||
150 | active_zl_plugin->budget_remaining(new)); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | |||
155 | /* zl_job_arrival: task is either resumed or released */ | ||
156 | void zl_job_arrival(struct task_struct* task) | ||
157 | { | ||
158 | BUG_ON(!task); | ||
159 | |||
160 | /* clear old laxity flag or tag zero-laxity upon release */ | ||
161 | if(active_zl_plugin->laxity_remaining(task)) | ||
162 | clear_zerolaxity(task); | ||
163 | else | ||
164 | set_zerolaxity(task); | ||
165 | |||
166 | gbl_requeue(task); | ||
167 | gbl_check_for_preemptions(); | ||
168 | } | ||
169 | |||
170 | |||
171 | void zl_task_new(struct task_struct * t, int on_rq, int running) | ||
172 | { | ||
173 | unsigned long flags; | ||
174 | cpu_entry_t* entry; | ||
175 | |||
176 | TRACE("zl: task new %d\n", t->pid); | ||
177 | |||
178 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
179 | |||
180 | hrtimer_init(&t->rt_param.zl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
181 | t->rt_param.zl_timer.function = zl_on_zero_laxity; | ||
182 | |||
183 | /* setup job params */ | ||
184 | release_at(t, litmus_clock()); | ||
185 | |||
186 | if (running) { | ||
187 | entry = active_gbl_plugin->cpus[task_cpu(t)]; | ||
188 | BUG_ON(entry->scheduled); | ||
189 | |||
190 | #ifdef CONFIG_RELEASE_MASTER | ||
191 | if (entry->cpu != active_gbl_domain.release_master) { | ||
192 | #endif | ||
193 | entry->scheduled = t; | ||
194 | tsk_rt(t)->scheduled_on = task_cpu(t); | ||
195 | #ifdef CONFIG_RELEASE_MASTER | ||
196 | } else { | ||
197 | /* do not schedule on release master */ | ||
198 | gbl_preempt(entry); /* force resched */ | ||
199 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
200 | } | ||
201 | #endif | ||
202 | } else { | ||
203 | t->rt_param.scheduled_on = NO_CPU; | ||
204 | } | ||
205 | t->rt_param.linked_on = NO_CPU; | ||
206 | |||
207 | active_gbl_plugin->job_arrival(t); | ||
208 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
209 | } | ||
210 | |||
211 | |||
212 | void zl_task_wake_up(struct task_struct *task) | ||
213 | { | ||
214 | unsigned long flags; | ||
215 | lt_t now; | ||
216 | |||
217 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
218 | |||
219 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
220 | /* We need to take suspensions because of semaphores into | ||
221 | * account! If a job resumes after being suspended due to acquiring | ||
222 | * a semaphore, it should never be treated as a new job release. | ||
223 | */ | ||
224 | if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
225 | set_rt_flags(task, RT_F_RUNNING); | ||
226 | } else { | ||
227 | now = litmus_clock(); | ||
228 | if (is_tardy(task, now)) { | ||
229 | /* new sporadic release */ | ||
230 | release_at(task, now); | ||
231 | sched_trace_task_release(task); | ||
232 | } | ||
233 | else { | ||
234 | if (task->rt.time_slice) { | ||
235 | /* came back in time before deadline | ||
236 | */ | ||
237 | set_rt_flags(task, RT_F_RUNNING); | ||
238 | } | ||
239 | } | ||
240 | } | ||
241 | active_gbl_plugin->job_arrival(task); | ||
242 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
243 | } | ||
244 | |||
245 | |||
246 | void zl_task_exit(struct task_struct * t) | ||
247 | { | ||
248 | unsigned long flags; | ||
249 | |||
250 | /* unlink if necessary */ | ||
251 | raw_spin_lock_irqsave(&active_gbl_domain_lock, flags); | ||
252 | gbl_unlink(t); | ||
253 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
254 | active_gbl_plugin->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
255 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
256 | } | ||
257 | |||
258 | if(hrtimer_active(&tsk_rt(t)->zl_timer)) | ||
259 | { | ||
260 | /* BUG if reached? */ | ||
261 | TRACE_TASK(t, "Canceled armed timer while exiting.\n"); | ||
262 | hrtimer_cancel(&tsk_rt(t)->zl_timer); | ||
263 | } | ||
264 | |||
265 | raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags); | ||
266 | |||
267 | BUG_ON(!is_realtime(t)); | ||
268 | TRACE_TASK(t, "RIP\n"); | ||
269 | } | ||
270 | |||
271 | |||
272 | /* need_to_preempt - check whether the task t needs to be preempted | ||
273 | * call only with irqs disabled and with ready_lock acquired | ||
274 | * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! | ||
275 | */ | ||
276 | int zl_preemption_needed(struct task_struct *t) | ||
277 | { | ||
278 | /* we need the read lock for edf_ready_queue */ | ||
279 | /* no need to preempt if there is nothing pending */ | ||
280 | if (!__jobs_pending(&active_gbl_domain)) | ||
281 | return 0; | ||
282 | /* we need to reschedule if t doesn't exist */ | ||
283 | if (!t) | ||
284 | return 1; | ||
285 | /* make sure to get non-rt stuff out of the way */ | ||
286 | if (!is_realtime(t)) | ||
287 | return 1; | ||
288 | |||
289 | /* NOTE: We cannot check for non-preemptibility since we | ||
290 | * don't know what address space we're currently in. | ||
291 | */ | ||
292 | |||
293 | /* Detect zero-laxity as needed. Easier to do it here than in tick. | ||
294 | (No timer is used to detect zero-laxity while a job is running.) */ | ||
295 | if(unlikely(!get_zerolaxity(t) && active_zl_plugin->laxity_remaining(t) == 0)) | ||
296 | { | ||
297 | set_zerolaxity(t); | ||
298 | } | ||
299 | |||
300 | return active_zl_plugin->gbl_plugin.prio_order(__next_ready(&active_gbl_domain), t); | ||
301 | } \ No newline at end of file | ||