diff options
-rw-r--r-- | include/litmus/mc2_common.h | 31 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 4 | ||||
-rw-r--r-- | include/litmus/unistd_32.h | 3 | ||||
-rw-r--r-- | include/litmus/unistd_64.h | 5 | ||||
-rw-r--r-- | litmus/Makefile | 3 | ||||
-rw-r--r-- | litmus/mc2_common.c | 78 | ||||
-rw-r--r-- | litmus/reservation.c | 6 | ||||
-rw-r--r-- | litmus/sched_mc2.c | 1634 |
8 files changed, 1757 insertions, 7 deletions
diff --git a/include/litmus/mc2_common.h b/include/litmus/mc2_common.h new file mode 100644 index 000000000000..e3c0af28f1b9 --- /dev/null +++ b/include/litmus/mc2_common.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * MC^2 common data structures | ||
3 | */ | ||
4 | |||
5 | #ifndef __UNC_MC2_COMMON_H__ | ||
6 | #define __UNC_MC2_COMMON_H__ | ||
7 | |||
8 | enum crit_level { | ||
9 | CRIT_LEVEL_A = 0, | ||
10 | CRIT_LEVEL_B = 1, | ||
11 | CRIT_LEVEL_C = 2, | ||
12 | NUM_CRIT_LEVELS = 3, | ||
13 | }; | ||
14 | |||
15 | struct mc2_task { | ||
16 | enum crit_level crit; | ||
17 | unsigned int res_id; | ||
18 | }; | ||
19 | |||
20 | #ifdef __KERNEL__ | ||
21 | |||
22 | #include <litmus/reservation.h> | ||
23 | |||
24 | #define tsk_mc2_data(t) (tsk_rt(t)->mc2_data) | ||
25 | |||
26 | long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk, | ||
27 | struct reservation *res); | ||
28 | |||
29 | #endif /* __KERNEL__ */ | ||
30 | |||
31 | #endif \ No newline at end of file | ||
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index e626bbbe60d5..26dfa33c1e5e 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -206,6 +206,7 @@ struct rt_job { | |||
206 | }; | 206 | }; |
207 | 207 | ||
208 | struct pfair_param; | 208 | struct pfair_param; |
209 | struct mc2_task; | ||
209 | 210 | ||
210 | /* RT task parameters for scheduling extensions | 211 | /* RT task parameters for scheduling extensions |
211 | * These parameters are inherited during clone and therefore must | 212 | * These parameters are inherited during clone and therefore must |
@@ -322,6 +323,9 @@ struct rt_param { | |||
322 | 323 | ||
323 | /* Pointer to the page shared between userspace and kernel. */ | 324 | /* Pointer to the page shared between userspace and kernel. */ |
324 | struct control_page * ctrl_page; | 325 | struct control_page * ctrl_page; |
326 | |||
327 | /* Mixed-criticality specific data */ | ||
328 | struct mc2_task* mc2_data; | ||
325 | }; | 329 | }; |
326 | 330 | ||
327 | #endif | 331 | #endif |
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h index 5f6a2749c6a7..202f439a62ae 100644 --- a/include/litmus/unistd_32.h +++ b/include/litmus/unistd_32.h | |||
@@ -19,5 +19,6 @@ | |||
19 | #define __NR_null_call __LSC(11) | 19 | #define __NR_null_call __LSC(11) |
20 | #define __NR_reservation_create __LSC(12) | 20 | #define __NR_reservation_create __LSC(12) |
21 | #define __NR_reservation_destroy __LSC(13) | 21 | #define __NR_reservation_destroy __LSC(13) |
22 | #define __NR_set_mc2_task_param __LSC(14) | ||
22 | 23 | ||
23 | #define NR_litmus_syscalls 14 | 24 | #define NR_litmus_syscalls 15 |
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h index 3e6b1d330336..ba2c91c5bf8c 100644 --- a/include/litmus/unistd_64.h +++ b/include/litmus/unistd_64.h | |||
@@ -33,6 +33,7 @@ __SYSCALL(__NR_null_call, sys_null_call) | |||
33 | __SYSCALL(__NR_reservation_create, sys_reservation_create) | 33 | __SYSCALL(__NR_reservation_create, sys_reservation_create) |
34 | #define __NR_reservation_destroy __LSC(13) | 34 | #define __NR_reservation_destroy __LSC(13) |
35 | __SYSCALL(__NR_reservation_destroy, sys_reservation_destroy) | 35 | __SYSCALL(__NR_reservation_destroy, sys_reservation_destroy) |
36 | #define __NR_set_mc2_task_param __LSC(14) | ||
37 | __SYSCALL(__NR_set_mc2_task_param, sys_set_mc2_task_param) | ||
36 | 38 | ||
37 | 39 | #define NR_litmus_syscalls 15 | |
38 | #define NR_litmus_syscalls 14 | ||
diff --git a/litmus/Makefile b/litmus/Makefile index 05021f553eda..70c77b3e9b53 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -35,4 +35,5 @@ obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o | |||
35 | 35 | ||
36 | obj-y += reservation.o polling_reservations.o | 36 | obj-y += reservation.o polling_reservations.o |
37 | 37 | ||
38 | obj-y += sched_pres.o \ No newline at end of file | 38 | obj-y += sched_pres.o |
39 | obj-y += mc2_common.o sched_mc2.o | ||
diff --git a/litmus/mc2_common.c b/litmus/mc2_common.c new file mode 100644 index 000000000000..a8ea5d9889f3 --- /dev/null +++ b/litmus/mc2_common.c | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * litmus/mc2_common.c | ||
3 | * | ||
4 | * Common functions for MC2 plugin. | ||
5 | */ | ||
6 | |||
7 | #include <linux/percpu.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/list.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <asm/uaccess.h> | ||
12 | |||
13 | #include <litmus/litmus.h> | ||
14 | #include <litmus/sched_plugin.h> | ||
15 | #include <litmus/sched_trace.h> | ||
16 | |||
17 | #include <litmus/mc2_common.h> | ||
18 | |||
19 | long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk, struct reservation *res) | ||
20 | { | ||
21 | task_client_init(tc, tsk, res); | ||
22 | if ((mc2_param->crit < CRIT_LEVEL_A) || | ||
23 | (mc2_param->crit > CRIT_LEVEL_C)) | ||
24 | return -EINVAL; | ||
25 | |||
26 | TRACE_TASK(tsk, "mc2_task_client_init: crit_level = %d\n", mc2_param->crit); | ||
27 | |||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | asmlinkage long sys_set_mc2_task_param(pid_t pid, struct mc2_task __user * param) | ||
32 | { | ||
33 | struct task_struct *target; | ||
34 | int retval = -EINVAL; | ||
35 | struct mc2_task *mp = kzalloc(sizeof(*mp), GFP_KERNEL); | ||
36 | |||
37 | if (!mp) | ||
38 | return -ENOMEM; | ||
39 | |||
40 | printk("Setting up mc^2 task parameters for process %d.\n", pid); | ||
41 | |||
42 | if (pid < 0 || param == 0) { | ||
43 | goto out; | ||
44 | } | ||
45 | if (copy_from_user(mp, param, sizeof(*mp))) { | ||
46 | retval = -EFAULT; | ||
47 | goto out; | ||
48 | } | ||
49 | |||
50 | /* Task search and manipulation must be protected */ | ||
51 | read_lock_irq(&tasklist_lock); | ||
52 | if (!(target = find_task_by_vpid(pid))) { | ||
53 | retval = -ESRCH; | ||
54 | goto out_unlock; | ||
55 | } | ||
56 | |||
57 | if (is_realtime(target)) { | ||
58 | /* The task is already a real-time task. | ||
59 | * We cannot not allow parameter changes at this point. | ||
60 | */ | ||
61 | retval = -EBUSY; | ||
62 | goto out_unlock; | ||
63 | } | ||
64 | if (mp->crit < CRIT_LEVEL_A || mp->crit >= NUM_CRIT_LEVELS) { | ||
65 | printk(KERN_INFO "litmus: real-time task %d rejected " | ||
66 | "because of invalid criticality level\n", pid); | ||
67 | goto out_unlock; | ||
68 | } | ||
69 | |||
70 | //target->rt_param.plugin_state = mp; | ||
71 | target->rt_param.mc2_data = mp; | ||
72 | |||
73 | retval = 0; | ||
74 | out_unlock: | ||
75 | read_unlock_irq(&tasklist_lock); | ||
76 | out: | ||
77 | return retval; | ||
78 | } \ No newline at end of file | ||
diff --git a/litmus/reservation.c b/litmus/reservation.c index 08c74f9005b3..d11003af279a 100644 --- a/litmus/reservation.c +++ b/litmus/reservation.c | |||
@@ -217,7 +217,7 @@ static void sup_charge_budget( | |||
217 | /* stop at the first ACTIVE reservation */ | 217 | /* stop at the first ACTIVE reservation */ |
218 | //break; | 218 | //break; |
219 | } | 219 | } |
220 | TRACE("finished charging budgets\n"); | 220 | //TRACE("finished charging budgets\n"); |
221 | } | 221 | } |
222 | 222 | ||
223 | static void sup_replenish_budgets(struct sup_reservation_environment* sup_env) | 223 | static void sup_replenish_budgets(struct sup_reservation_environment* sup_env) |
@@ -234,7 +234,7 @@ static void sup_replenish_budgets(struct sup_reservation_environment* sup_env) | |||
234 | break; | 234 | break; |
235 | } | 235 | } |
236 | } | 236 | } |
237 | TRACE("finished replenishing budgets\n"); | 237 | //TRACE("finished replenishing budgets\n"); |
238 | 238 | ||
239 | /* request a scheduler update at the next replenishment instant */ | 239 | /* request a scheduler update at the next replenishment instant */ |
240 | res = list_first_entry_or_null(&sup_env->depleted_reservations, | 240 | res = list_first_entry_or_null(&sup_env->depleted_reservations, |
@@ -252,7 +252,7 @@ void sup_update_time( | |||
252 | /* If the time didn't advance, there is nothing to do. | 252 | /* If the time didn't advance, there is nothing to do. |
253 | * This check makes it safe to call sup_advance_time() potentially | 253 | * This check makes it safe to call sup_advance_time() potentially |
254 | * multiple times (e.g., via different code paths. */ | 254 | * multiple times (e.g., via different code paths. */ |
255 | TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time); | 255 | //TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time); |
256 | if (unlikely(now <= sup_env->env.current_time)) | 256 | if (unlikely(now <= sup_env->env.current_time)) |
257 | return; | 257 | return; |
258 | 258 | ||
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c new file mode 100644 index 000000000000..09b5ebed2be5 --- /dev/null +++ b/litmus/sched_mc2.c | |||
@@ -0,0 +1,1634 @@ | |||
1 | /* | ||
2 | * litmus/sched_mc2.c | ||
3 | * | ||
4 | * Implementation of the Mixed-Criticality on MultiCore scheduler | ||
5 | * | ||
6 | * Thus plugin implements a scheduling algorithm proposed in | ||
7 | * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper. | ||
8 | */ | ||
9 | |||
10 | #include <linux/percpu.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <asm/uaccess.h> | ||
13 | |||
14 | #include <litmus/sched_plugin.h> | ||
15 | #include <litmus/preempt.h> | ||
16 | #include <litmus/debug_trace.h> | ||
17 | |||
18 | #include <litmus/litmus.h> | ||
19 | #include <litmus/jobs.h> | ||
20 | #include <litmus/budget.h> | ||
21 | #include <litmus/litmus_proc.h> | ||
22 | #include <litmus/sched_trace.h> | ||
23 | |||
24 | #include <litmus/mc2_common.h> | ||
25 | #include <litmus/reservation.h> | ||
26 | #include <litmus/polling_reservations.h> | ||
27 | |||
28 | /* _global_env - reservation container for level-C tasks*/ | ||
29 | struct gmp_reservation_environment _global_env; | ||
30 | |||
31 | /* cpu_entry - keep track of a running task on a cpu | ||
32 | * This state is used to decide the lowest priority cpu | ||
33 | */ | ||
34 | struct cpu_entry { | ||
35 | struct task_struct *scheduled; | ||
36 | lt_t deadline; | ||
37 | int cpu; | ||
38 | enum crit_level lv; | ||
39 | /* if will_schedule is true, this cpu is already selected and | ||
40 | call mc2_schedule() soon. */ | ||
41 | bool will_schedule; | ||
42 | }; | ||
43 | |||
44 | /* cpu_priority - a global state for choosing the lowest priority CPU */ | ||
45 | struct cpu_priority { | ||
46 | raw_spinlock_t lock; | ||
47 | struct cpu_entry cpu_entries[NR_CPUS]; | ||
48 | }; | ||
49 | |||
50 | struct cpu_priority _lowest_prio_cpu; | ||
51 | |||
52 | /* mc2_task_state - a task state structure */ | ||
53 | struct mc2_task_state { | ||
54 | struct task_client res_info; | ||
55 | /* if cpu == -1, this task is a global task (level C) */ | ||
56 | int cpu; | ||
57 | bool has_departed; | ||
58 | struct mc2_task mc2_param; | ||
59 | }; | ||
60 | |||
61 | /* crit_entry - maintain the logically running job (ghost job) */ | ||
62 | struct crit_entry { | ||
63 | enum crit_level level; | ||
64 | struct task_struct *running; | ||
65 | }; | ||
66 | |||
67 | /* mc2_cpu_state - maintain the scheduled state and ghost jobs | ||
68 | * timer : timer for partitioned tasks (level A and B) | ||
69 | * g_timer : timer for global tasks (level C) | ||
70 | */ | ||
71 | struct mc2_cpu_state { | ||
72 | raw_spinlock_t lock; | ||
73 | |||
74 | struct sup_reservation_environment sup_env; | ||
75 | struct hrtimer timer; | ||
76 | |||
77 | int cpu; | ||
78 | struct task_struct* scheduled; | ||
79 | struct crit_entry crit_entries[NUM_CRIT_LEVELS]; | ||
80 | }; | ||
81 | |||
82 | static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); | ||
83 | |||
84 | #define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id)) | ||
85 | #define local_cpu_state() (&__get_cpu_var(mc2_cpu_state)) | ||
86 | |||
87 | /* get_mc2_state - get the task's state */ | ||
88 | static struct mc2_task_state* get_mc2_state(struct task_struct *tsk) | ||
89 | { | ||
90 | struct mc2_task_state* tinfo; | ||
91 | |||
92 | tinfo = (struct mc2_task_state*)tsk_rt(tsk)->plugin_state; | ||
93 | |||
94 | if (tinfo) | ||
95 | return tinfo; | ||
96 | else | ||
97 | return NULL; | ||
98 | } | ||
99 | |||
100 | /* get_task_crit_level - return the criticaility level of a task */ | ||
101 | static enum crit_level get_task_crit_level(struct task_struct *tsk) | ||
102 | { | ||
103 | struct mc2_task *mp; | ||
104 | |||
105 | if (!tsk || !is_realtime(tsk)) | ||
106 | return NUM_CRIT_LEVELS; | ||
107 | |||
108 | mp = tsk_rt(tsk)->mc2_data; | ||
109 | |||
110 | if (!mp) | ||
111 | return NUM_CRIT_LEVELS; | ||
112 | else | ||
113 | return mp->crit; | ||
114 | } | ||
115 | |||
116 | /* res_find_by_id - find reservation by id */ | ||
117 | static struct reservation* res_find_by_id(struct mc2_cpu_state *state, | ||
118 | unsigned int id) | ||
119 | { | ||
120 | struct reservation *res; | ||
121 | |||
122 | res = sup_find_by_id(&state->sup_env, id); | ||
123 | if (!res) | ||
124 | res = gmp_find_by_id(&_global_env, id); | ||
125 | |||
126 | return res; | ||
127 | } | ||
128 | |||
129 | /* mc2_update_time - update time for a given criticality level. | ||
130 | * caller must hold a proper lock | ||
131 | * (cpu_state lock or global lock) | ||
132 | */ | ||
133 | static void mc2_update_time(enum crit_level lv, | ||
134 | struct mc2_cpu_state *state, lt_t time) | ||
135 | { | ||
136 | if (lv < CRIT_LEVEL_C) | ||
137 | sup_update_time(&state->sup_env, time); | ||
138 | else if (lv == CRIT_LEVEL_C) | ||
139 | gmp_update_time(&_global_env, time); | ||
140 | else | ||
141 | TRACE("update_time(): Criticality level error!!!!\n"); | ||
142 | } | ||
143 | |||
144 | /* task_depart - remove a task from its reservation | ||
145 | * If the job has remaining budget, convert it to a ghost job | ||
146 | * and update crit_entries[] | ||
147 | * | ||
148 | * @job_complete indicate whether job completes or not | ||
149 | */ | ||
150 | static void task_departs(struct task_struct *tsk, int job_complete) | ||
151 | { | ||
152 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
153 | struct mc2_cpu_state* state = local_cpu_state(); | ||
154 | struct reservation* res; | ||
155 | struct reservation_client *client; | ||
156 | |||
157 | BUG_ON(!is_realtime(tsk)); | ||
158 | |||
159 | res = tinfo->res_info.client.reservation; | ||
160 | client = &tinfo->res_info.client; | ||
161 | |||
162 | res->ops->client_departs(res, client, job_complete); | ||
163 | tinfo->has_departed = true; | ||
164 | TRACE_TASK(tsk, "CLIENT DEPART with budget %llu\n", res->cur_budget); | ||
165 | |||
166 | if (job_complete && res->cur_budget) { | ||
167 | struct crit_entry* ce; | ||
168 | enum crit_level lv = tinfo->mc2_param.crit; | ||
169 | |||
170 | ce = &state->crit_entries[lv]; | ||
171 | ce->running = tsk; | ||
172 | res->is_ghost = 1; | ||
173 | TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock()); | ||
174 | |||
175 | } | ||
176 | } | ||
177 | |||
178 | /* task_arrive - put a task into its reservation | ||
179 | * If the job was a ghost job, remove it from crit_entries[] | ||
180 | */ | ||
181 | static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) | ||
182 | { | ||
183 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
184 | struct reservation* res; | ||
185 | struct reservation_client *client; | ||
186 | enum crit_level lv = get_task_crit_level(tsk); | ||
187 | |||
188 | res = tinfo->res_info.client.reservation; | ||
189 | client = &tinfo->res_info.client; | ||
190 | |||
191 | tinfo->has_departed = false; | ||
192 | res->ops->client_arrives(res, client); | ||
193 | |||
194 | sched_trace_task_release(tsk); | ||
195 | |||
196 | if (lv != NUM_CRIT_LEVELS) { | ||
197 | struct crit_entry *ce; | ||
198 | ce = &state->crit_entries[lv]; | ||
199 | /* if the currrent task is a ghost job, remove it */ | ||
200 | if (ce->running == tsk) | ||
201 | ce->running = NULL; | ||
202 | } | ||
203 | } | ||
204 | |||
205 | /* get_lowest_prio_cpu - return the lowest priority cpu | ||
206 | * This will be used for scheduling level-C tasks. | ||
207 | * If all CPUs are running tasks which has | ||
208 | * higher priority than level C, return NO_CPU. | ||
209 | */ | ||
210 | static int get_lowest_prio_cpu(lt_t priority) | ||
211 | { | ||
212 | struct cpu_entry *ce; | ||
213 | int cpu, ret = NO_CPU; | ||
214 | lt_t latest_deadline = 0; | ||
215 | |||
216 | raw_spin_lock(&_lowest_prio_cpu.lock); | ||
217 | ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; | ||
218 | if (!ce->will_schedule && !ce->scheduled) { | ||
219 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
220 | TRACE("CPU %d (local) is the lowest!\n", ce->cpu); | ||
221 | return ce->cpu; | ||
222 | } else { | ||
223 | TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0); | ||
224 | } | ||
225 | |||
226 | for_each_online_cpu(cpu) { | ||
227 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | ||
228 | /* If a CPU will call schedule() in the near future, we don't | ||
229 | return that CPU. */ | ||
230 | TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, | ||
231 | ce->scheduled ? (ce->scheduled)->comm : "null", | ||
232 | ce->scheduled ? (ce->scheduled)->pid : 0, | ||
233 | ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); | ||
234 | if (!ce->will_schedule) { | ||
235 | if (!ce->scheduled) { | ||
236 | /* Idle cpu, return this. */ | ||
237 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
238 | TRACE("CPU %d is the lowest!\n", ce->cpu); | ||
239 | return ce->cpu; | ||
240 | } else if (ce->lv == CRIT_LEVEL_C && | ||
241 | ce->deadline > latest_deadline) { | ||
242 | latest_deadline = ce->deadline; | ||
243 | ret = ce->cpu; | ||
244 | } | ||
245 | } | ||
246 | } | ||
247 | |||
248 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
249 | |||
250 | if (priority >= latest_deadline) | ||
251 | ret = NO_CPU; | ||
252 | |||
253 | TRACE("CPU %d is the lowest!\n", ret); | ||
254 | |||
255 | return ret; | ||
256 | } | ||
257 | |||
258 | /* NOTE: drops state->lock */ | ||
259 | /* mc2_update_timer_and_unlock - set a timer and g_timer and unlock | ||
260 | * Whenever res_env.current_time is updated, | ||
261 | * we check next_scheduler_update and set | ||
262 | * a timer. | ||
263 | * If there exist a global event which is | ||
264 | * not armed on any CPU and g_timer is not | ||
265 | * active, set a g_timer for that event. | ||
266 | */ | ||
267 | static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | ||
268 | { | ||
269 | int local; | ||
270 | lt_t update, now; | ||
271 | enum crit_level lv = get_task_crit_level(state->scheduled); | ||
272 | struct next_timer_event *event, *next; | ||
273 | |||
274 | //TRACE_TASK(state->scheduled, "update_timer!\n"); | ||
275 | if (lv != NUM_CRIT_LEVELS) | ||
276 | TRACE_TASK(state->scheduled, "UPDATE_TIMER LV = %d\n", lv); | ||
277 | |||
278 | update = state->sup_env.next_scheduler_update; | ||
279 | now = state->sup_env.env.current_time; | ||
280 | |||
281 | /* Be sure we're actually running on the right core, | ||
282 | * as pres_update_timer() is also called from pres_task_resume(), | ||
283 | * which might be called on any CPU when a thread resumes. | ||
284 | */ | ||
285 | local = local_cpu_state() == state; | ||
286 | |||
287 | list_for_each_entry_safe(event, next, &_global_env.next_events, list) { | ||
288 | /* If the event time is already passed, we call schedule() on | ||
289 | the lowest priority cpu */ | ||
290 | if (event->next_update >= update) { | ||
291 | break; | ||
292 | } | ||
293 | |||
294 | if (event->next_update < litmus_clock()) { | ||
295 | if (event->timer_armed_on == NO_CPU) { | ||
296 | struct reservation *res = gmp_find_by_id(&_global_env, event->id); | ||
297 | int cpu = get_lowest_prio_cpu(res?res->priority:0); | ||
298 | TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); | ||
299 | list_del(&event->list); | ||
300 | kfree(event); | ||
301 | if (cpu != NO_CPU) { | ||
302 | raw_spin_lock(&_lowest_prio_cpu.lock); | ||
303 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
304 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
305 | litmus_reschedule(cpu); | ||
306 | } | ||
307 | } | ||
308 | } else if (event->next_update < update && event->timer_armed_on == NO_CPU) { | ||
309 | event->timer_armed_on = state->cpu; | ||
310 | update = event->next_update; | ||
311 | break; | ||
312 | } | ||
313 | } | ||
314 | |||
315 | /* Must drop state lock before calling into hrtimer_start(), which | ||
316 | * may raise a softirq, which in turn may wake ksoftirqd. */ | ||
317 | raw_spin_unlock(&state->lock); | ||
318 | raw_spin_unlock(&_global_env.lock); | ||
319 | |||
320 | if (update <= now) { | ||
321 | litmus_reschedule(state->cpu); | ||
322 | } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { | ||
323 | /* Reprogram only if not already set correctly. */ | ||
324 | if (!hrtimer_active(&state->timer) || | ||
325 | ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) { | ||
326 | TRACE("canceling timer...at %llu\n", | ||
327 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | ||
328 | hrtimer_cancel(&state->timer); | ||
329 | TRACE("setting scheduler timer for %llu\n", update); | ||
330 | /* We cannot use hrtimer_start() here because the | ||
331 | * wakeup flag must be set to zero. */ | ||
332 | __hrtimer_start_range_ns(&state->timer, | ||
333 | ns_to_ktime(update), | ||
334 | 0 /* timer coalescing slack */, | ||
335 | HRTIMER_MODE_ABS_PINNED, | ||
336 | 0 /* wakeup */); | ||
337 | } | ||
338 | } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) { | ||
339 | /* Poke remote core only if timer needs to be set earlier than | ||
340 | * it is currently set. | ||
341 | */ | ||
342 | TRACE("mc2_update_timer for remote CPU %d (update=%llu, " | ||
343 | "active:%d, set:%llu)\n", | ||
344 | state->cpu, | ||
345 | update, | ||
346 | hrtimer_active(&state->timer), | ||
347 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | ||
348 | if (!hrtimer_active(&state->timer) || | ||
349 | ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) { | ||
350 | TRACE("poking CPU %d so that it can update its " | ||
351 | "scheduling timer (active:%d, set:%llu)\n", | ||
352 | state->cpu, | ||
353 | hrtimer_active(&state->timer), | ||
354 | ktime_to_ns(hrtimer_get_expires(&state->timer))); | ||
355 | litmus_reschedule(state->cpu); | ||
356 | } | ||
357 | } | ||
358 | } | ||
359 | |||
360 | /* mc2_update_ghost_state - Update crit_entries[] to track ghost jobs | ||
361 | * If the budget of a ghost is exhausted, | ||
362 | * clear is_ghost and reschedule | ||
363 | */ | ||
364 | static lt_t mc2_update_ghost_state(struct mc2_cpu_state *state) | ||
365 | { | ||
366 | int lv = 0; | ||
367 | struct crit_entry* ce; | ||
368 | struct reservation *res; | ||
369 | struct mc2_task_state *tinfo; | ||
370 | lt_t ret = ULLONG_MAX; | ||
371 | |||
372 | BUG_ON(!state); | ||
373 | |||
374 | for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { | ||
375 | ce = &state->crit_entries[lv]; | ||
376 | if (ce->running != NULL) { | ||
377 | //printk(KERN_ALERT "P%d ce->running : %s/%d\n", state->cpu, ce->running ? (ce->running)->comm : "null", ce->running ? (ce->running)->pid : 0); | ||
378 | tinfo = get_mc2_state(ce->running); | ||
379 | if (!tinfo) | ||
380 | continue; | ||
381 | |||
382 | res = res_find_by_id(state, tinfo->mc2_param.res_id); | ||
383 | BUG_ON(!res); | ||
384 | //printk(KERN_ALERT "R%d found!\n", res->id); | ||
385 | TRACE("LV %d running id %d budget %llu\n", | ||
386 | lv, tinfo->mc2_param.res_id, res->cur_budget); | ||
387 | /* If the budget is exhausted, clear is_ghost and reschedule */ | ||
388 | if (!res->cur_budget) { | ||
389 | struct sup_reservation_environment* sup_env = &state->sup_env; | ||
390 | |||
391 | TRACE("GHOST FINISH id %d at %llu\n", | ||
392 | tinfo->mc2_param.res_id, litmus_clock()); | ||
393 | ce->running = NULL; | ||
394 | res->is_ghost = 0; | ||
395 | |||
396 | if (lv < CRIT_LEVEL_C) { | ||
397 | res = list_first_entry_or_null( | ||
398 | &sup_env->active_reservations, | ||
399 | struct reservation, list); | ||
400 | if (res) | ||
401 | litmus_reschedule_local(); | ||
402 | } else if (lv == CRIT_LEVEL_C) { | ||
403 | res = list_first_entry_or_null( | ||
404 | &_global_env.active_reservations, | ||
405 | struct reservation, list); | ||
406 | if (res) | ||
407 | litmus_reschedule(state->cpu); | ||
408 | } | ||
409 | } else { | ||
410 | //TRACE("GHOST NOT FINISH id %d budget %llu\n", res->id, res->cur_budget); | ||
411 | //gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); | ||
412 | if (ret > res->cur_budget) { | ||
413 | ret = res->cur_budget; | ||
414 | } | ||
415 | } | ||
416 | } | ||
417 | } | ||
418 | |||
419 | return ret; | ||
420 | } | ||
421 | |||
422 | /* update_cpu_prio - Update cpu's priority | ||
423 | * When a cpu picks a new task, call this function | ||
424 | * to update cpu priorities. | ||
425 | */ | ||
426 | static void update_cpu_prio(struct mc2_cpu_state *state) | ||
427 | { | ||
428 | struct cpu_entry *ce = &_lowest_prio_cpu.cpu_entries[state->cpu]; | ||
429 | enum crit_level lv = get_task_crit_level(state->scheduled); | ||
430 | |||
431 | if (!state->scheduled) { | ||
432 | /* cpu is idle. */ | ||
433 | ce->scheduled = NULL; | ||
434 | ce->deadline = ULLONG_MAX; | ||
435 | ce->lv = NUM_CRIT_LEVELS; | ||
436 | } else if (lv == CRIT_LEVEL_C) { | ||
437 | ce->scheduled = state->scheduled; | ||
438 | ce->deadline = get_deadline(state->scheduled); | ||
439 | ce->lv = lv; | ||
440 | } else if (lv < CRIT_LEVEL_C) { | ||
441 | /* If cpu is running level A or B tasks, it is not eligible | ||
442 | to run level-C tasks */ | ||
443 | ce->scheduled = state->scheduled; | ||
444 | ce->deadline = 0; | ||
445 | ce->lv = lv; | ||
446 | } | ||
447 | }; | ||
448 | |||
449 | /* on_scheduling_timer - timer event for partitioned tasks | ||
450 | */ | ||
451 | static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | ||
452 | { | ||
453 | unsigned long flags; | ||
454 | enum hrtimer_restart restart = HRTIMER_NORESTART; | ||
455 | struct mc2_cpu_state *state; | ||
456 | lt_t update, now; | ||
457 | int global_schedule_now; | ||
458 | lt_t remain_budget; | ||
459 | |||
460 | state = container_of(timer, struct mc2_cpu_state, timer); | ||
461 | |||
462 | /* The scheduling timer should only fire on the local CPU, because | ||
463 | * otherwise deadlocks via timer_cancel() are possible. | ||
464 | * Note: this does not interfere with dedicated interrupt handling, as | ||
465 | * even under dedicated interrupt handling scheduling timers for | ||
466 | * budget enforcement must occur locally on each CPU. | ||
467 | */ | ||
468 | BUG_ON(state->cpu != raw_smp_processor_id()); | ||
469 | |||
470 | TRACE("TIMER FIRED at %llu\n", litmus_clock()); | ||
471 | raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
472 | raw_spin_lock(&state->lock); | ||
473 | //printk(KERN_ALERT "P%d on_scheduling_timer() hold lock %s/%d\n", state->cpu, current ? (current)->comm : "null", current ? (current)->pid : 0); | ||
474 | now = litmus_clock(); | ||
475 | sup_update_time(&state->sup_env, now); | ||
476 | global_schedule_now = gmp_update_time(&_global_env, now); | ||
477 | //printk(KERN_ALERT "P%d update_time in timer() %s/%d\n", state->cpu, current ? (current)->comm : "null", current ? (current)->pid : 0); | ||
478 | remain_budget = mc2_update_ghost_state(state); | ||
479 | |||
480 | update = state->sup_env.next_scheduler_update; | ||
481 | now = state->sup_env.env.current_time; | ||
482 | |||
483 | if (remain_budget != ULLONG_MAX && update > now + remain_budget) { | ||
484 | update = now + remain_budget; | ||
485 | } | ||
486 | |||
487 | //TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d) g_schedule_now:%d\n", now, update, state->cpu, global_schedule_now); | ||
488 | //printk(KERN_ALERT "on_scheduling_timer at %llu, upd:%llu (for cpu=%d) g_schedule_now:%d\n", now, update, state->cpu, global_schedule_now); | ||
489 | if (update <= now) { | ||
490 | litmus_reschedule_local(); | ||
491 | } else if (update != SUP_NO_SCHEDULER_UPDATE) { | ||
492 | hrtimer_set_expires(timer, ns_to_ktime(update)); | ||
493 | restart = HRTIMER_RESTART; | ||
494 | } | ||
495 | |||
496 | BUG_ON(global_schedule_now < 0 || global_schedule_now > 4); | ||
497 | |||
498 | /* Find the lowest cpu, and call reschedule */ | ||
499 | while (global_schedule_now--) { | ||
500 | int cpu = get_lowest_prio_cpu(0); | ||
501 | if (cpu != NO_CPU) { | ||
502 | raw_spin_lock(&_lowest_prio_cpu.lock); | ||
503 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
504 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
505 | //TRACE("LOWEST CPU = P%d\n", cpu); | ||
506 | litmus_reschedule(cpu); | ||
507 | } | ||
508 | } | ||
509 | |||
510 | raw_spin_unlock(&state->lock); | ||
511 | raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
512 | //printk(KERN_ALERT "P%d on_scheduling_timer() release lock %s/%d\n", state->cpu, current ? (current)->comm : "null", current ? (current)->pid : 0); | ||
513 | return restart; | ||
514 | } | ||
515 | |||
516 | /* mc2_dispatch - Select the next task to schedule. | ||
517 | */ | ||
518 | struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, struct mc2_cpu_state* state) | ||
519 | { | ||
520 | struct reservation *res, *next; | ||
521 | struct task_struct *tsk = NULL; | ||
522 | struct crit_entry *ce; | ||
523 | enum crit_level lv; | ||
524 | lt_t time_slice, cur_priority; | ||
525 | |||
526 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | ||
527 | if (res->state == RESERVATION_ACTIVE) { | ||
528 | tsk = res->ops->dispatch_client(res, &time_slice); | ||
529 | if (likely(tsk)) { | ||
530 | lv = get_task_crit_level(tsk); | ||
531 | if (lv == NUM_CRIT_LEVELS) { | ||
532 | sup_scheduler_update_after(sup_env, res->cur_budget); | ||
533 | return tsk; | ||
534 | } else { | ||
535 | ce = &state->crit_entries[lv]; | ||
536 | if (likely(!ce->running)) { | ||
537 | /* If we found the next task, clear all flags */ | ||
538 | sup_scheduler_update_after(sup_env, res->cur_budget); | ||
539 | res->blocked_by_ghost = 0; | ||
540 | res->is_ghost = 0; | ||
541 | return tsk; | ||
542 | } else { | ||
543 | /* We cannot schedule the same criticality task | ||
544 | because the ghost job exists. Set blocked_by_ghost | ||
545 | flag not to charge budget */ | ||
546 | res->blocked_by_ghost = 1; | ||
547 | TRACE_TASK(ce->running, " is GHOST\n"); | ||
548 | } | ||
549 | } | ||
550 | } | ||
551 | } | ||
552 | } | ||
553 | |||
554 | /* no eligible level A or B tasks exists */ | ||
555 | /* check the ghost job */ | ||
556 | ce = &state->crit_entries[CRIT_LEVEL_C]; | ||
557 | if (ce->running) { | ||
558 | TRACE_TASK(ce->running," is GHOST\n"); | ||
559 | return NULL; | ||
560 | } | ||
561 | |||
562 | cur_priority = _lowest_prio_cpu.cpu_entries[state->cpu].deadline; | ||
563 | |||
564 | TRACE("****** ACTIVE LIST ******\n"); | ||
565 | TRACE_TASK(_lowest_prio_cpu.cpu_entries[state->cpu].scheduled, "** CURRENT JOB deadline %llu **\n", cur_priority); | ||
566 | list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { | ||
567 | TRACE("R%d deadline=%llu, scheduled_on=%d\n", res->id, res->priority, res->scheduled_on); | ||
568 | if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { | ||
569 | tsk = res->ops->dispatch_client(res, &time_slice); | ||
570 | if (likely(tsk)) { | ||
571 | lv = get_task_crit_level(tsk); | ||
572 | if (lv == NUM_CRIT_LEVELS) { | ||
573 | gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); | ||
574 | //res->event_added = 1; | ||
575 | return tsk; | ||
576 | } else if (lv == CRIT_LEVEL_C) { | ||
577 | //ce = &state->crit_entries[lv]; | ||
578 | //if (likely(!ce->running)) { | ||
579 | gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); | ||
580 | res->event_added = 1; | ||
581 | res->blocked_by_ghost = 0; | ||
582 | res->is_ghost = 0; | ||
583 | res->scheduled_on = state->cpu; | ||
584 | return tsk; | ||
585 | //} else { | ||
586 | // res->blocked_by_ghost = 1; | ||
587 | // TRACE_TASK(ce->running, " is GHOST\n"); | ||
588 | // return NULL; | ||
589 | //} | ||
590 | } else { | ||
591 | BUG(); | ||
592 | } | ||
593 | } | ||
594 | } | ||
595 | } | ||
596 | |||
597 | return NULL; | ||
598 | } | ||
599 | |||
600 | /* not used now */ | ||
601 | static void pre_schedule(struct task_struct *prev) | ||
602 | { | ||
603 | enum crit_level lv; | ||
604 | if (!is_realtime(prev) || !prev) | ||
605 | return; | ||
606 | |||
607 | lv = get_task_crit_level(prev); | ||
608 | } | ||
609 | |||
610 | /* not used now */ | ||
611 | static void post_schedule(struct task_struct *next) | ||
612 | { | ||
613 | enum crit_level lv; | ||
614 | if (!is_realtime(next) || !next) | ||
615 | return; | ||
616 | |||
617 | lv = get_task_crit_level(next); | ||
618 | } | ||
619 | |||
620 | /* mc2_schedule - main scheduler function. pick the next task to run | ||
621 | */ | ||
622 | static struct task_struct* mc2_schedule(struct task_struct * prev) | ||
623 | { | ||
624 | /* next == NULL means "schedule background work". */ | ||
625 | lt_t now; | ||
626 | struct mc2_cpu_state *state = local_cpu_state(); | ||
627 | |||
628 | pre_schedule(prev); | ||
629 | |||
630 | raw_spin_lock(&_global_env.lock); | ||
631 | raw_spin_lock(&state->lock); | ||
632 | |||
633 | //BUG_ON(state->scheduled && state->scheduled != prev); | ||
634 | //BUG_ON(state->scheduled && !is_realtime(prev)); | ||
635 | if (state->scheduled && state->scheduled != prev) | ||
636 | printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); | ||
637 | if (state->scheduled && !is_realtime(prev)) | ||
638 | printk(KERN_ALERT "BUG2!!!!!!!! \n"); | ||
639 | |||
640 | /* update time */ | ||
641 | state->sup_env.will_schedule = true; | ||
642 | |||
643 | now = litmus_clock(); | ||
644 | sup_update_time(&state->sup_env, now); | ||
645 | gmp_update_time(&_global_env, now); | ||
646 | |||
647 | mc2_update_ghost_state(state); | ||
648 | |||
649 | /* remove task from reservation if it blocks */ | ||
650 | if (is_realtime(prev) && !is_running(prev)) | ||
651 | task_departs(prev, is_completed(prev)); | ||
652 | |||
653 | raw_spin_lock(&_lowest_prio_cpu.lock); | ||
654 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | ||
655 | |||
656 | /* figure out what to schedule next */ | ||
657 | state->scheduled = mc2_dispatch(&state->sup_env, state); | ||
658 | if (state->scheduled && is_realtime(state->scheduled)) | ||
659 | TRACE_TASK(state->scheduled, "mc2_dispatch picked me!\n"); | ||
660 | |||
661 | update_cpu_prio(state); | ||
662 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
663 | |||
664 | /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ | ||
665 | sched_state_task_picked(); | ||
666 | |||
667 | /* program scheduler timer */ | ||
668 | state->sup_env.will_schedule = false; | ||
669 | |||
670 | /* NOTE: drops state->lock */ | ||
671 | mc2_update_timer_and_unlock(state); | ||
672 | |||
673 | |||
674 | |||
675 | if (prev != state->scheduled && is_realtime(prev)) { | ||
676 | struct mc2_task_state* tinfo = get_mc2_state(prev); | ||
677 | struct reservation* res = tinfo->res_info.client.reservation; | ||
678 | TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on); | ||
679 | res->scheduled_on = NO_CPU; | ||
680 | TRACE_TASK(prev, "descheduled.\n"); | ||
681 | /* if prev is preempted and a global task, find the lowest cpu and reschedule */ | ||
682 | if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { | ||
683 | int cpu; | ||
684 | raw_spin_lock(&_global_env.lock); | ||
685 | cpu = get_lowest_prio_cpu(res?res->priority:0); | ||
686 | //TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); | ||
687 | if (cpu != NO_CPU) { | ||
688 | raw_spin_lock(&_lowest_prio_cpu.lock); | ||
689 | _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; | ||
690 | raw_spin_unlock(&_lowest_prio_cpu.lock); | ||
691 | litmus_reschedule(cpu); | ||
692 | } | ||
693 | raw_spin_unlock(&_global_env.lock); | ||
694 | } | ||
695 | } | ||
696 | if (state->scheduled) { | ||
697 | TRACE_TASK(state->scheduled, "scheduled.\n"); | ||
698 | } | ||
699 | |||
700 | post_schedule(state->scheduled); | ||
701 | |||
702 | return state->scheduled; | ||
703 | } | ||
704 | |||
705 | static void resume_legacy_task_model_updates(struct task_struct *tsk) | ||
706 | { | ||
707 | lt_t now; | ||
708 | if (is_sporadic(tsk)) { | ||
709 | /* If this sporadic task was gone for a "long" time and woke up past | ||
710 | * its deadline, then give it a new budget by triggering a job | ||
711 | * release. This is purely cosmetic and has no effect on the | ||
712 | * P-RES scheduler. */ | ||
713 | |||
714 | now = litmus_clock(); | ||
715 | if (is_tardy(tsk, now)) { | ||
716 | release_at(tsk, now); | ||
717 | sched_trace_task_release(tsk); | ||
718 | } | ||
719 | } | ||
720 | } | ||
721 | |||
722 | /* mc2_task_resume - Called when the state of tsk changes back to | ||
723 | * TASK_RUNNING. We need to requeue the task. | ||
724 | */ | ||
725 | static void mc2_task_resume(struct task_struct *tsk) | ||
726 | { | ||
727 | unsigned long flags; | ||
728 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
729 | struct mc2_cpu_state *state; | ||
730 | |||
731 | TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); | ||
732 | |||
733 | local_irq_save(flags); | ||
734 | if (tinfo->cpu != -1) | ||
735 | state = cpu_state_for(tinfo->cpu); | ||
736 | else | ||
737 | state = local_cpu_state(); | ||
738 | |||
739 | raw_spin_lock(&_global_env.lock); | ||
740 | //printk(KERN_ALERT "P%d resume() hold lock\n", state->cpu); | ||
741 | /* Requeue only if self-suspension was already processed. */ | ||
742 | if (tinfo->has_departed) | ||
743 | { | ||
744 | raw_spin_lock(&state->lock); | ||
745 | /* Assumption: litmus_clock() is synchronized across cores, | ||
746 | * since we might not actually be executing on tinfo->cpu | ||
747 | * at the moment. */ | ||
748 | if (tinfo->cpu != -1) { | ||
749 | sup_update_time(&state->sup_env, litmus_clock()); | ||
750 | } else { | ||
751 | //TRACE("RESUME UPDATE ####\n"); | ||
752 | gmp_update_time(&_global_env, litmus_clock()); | ||
753 | //TRACE("RESUME UPDATE $$$$\n"); | ||
754 | } | ||
755 | |||
756 | mc2_update_ghost_state(state); | ||
757 | task_arrives(state, tsk); | ||
758 | /* NOTE: drops state->lock */ | ||
759 | TRACE_TASK(tsk, "mc2_resume()\n"); | ||
760 | mc2_update_timer_and_unlock(state); | ||
761 | //printk(KERN_ALERT "P%d resume() dropped lock\n", state->cpu); | ||
762 | } else { | ||
763 | TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); | ||
764 | raw_spin_unlock(&_global_env.lock); | ||
765 | //printk(KERN_ALERT "P%d resume() release lock\n", state->cpu); | ||
766 | } | ||
767 | |||
768 | local_irq_restore(flags); | ||
769 | |||
770 | resume_legacy_task_model_updates(tsk); | ||
771 | } | ||
772 | |||
773 | /* mc2_complete_job - syscall backend for job completions | ||
774 | */ | ||
775 | static long mc2_complete_job(void) | ||
776 | { | ||
777 | ktime_t next_release; | ||
778 | long err; | ||
779 | |||
780 | TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(), | ||
781 | get_deadline(current)); | ||
782 | |||
783 | tsk_rt(current)->completed = 1; | ||
784 | |||
785 | /* If this the first job instance, we need to reset replenish | ||
786 | time to the next release time */ | ||
787 | if (tsk_rt(current)->sporadic_release) { | ||
788 | struct mc2_cpu_state *state; | ||
789 | struct reservation_environment *env; | ||
790 | struct mc2_task_state *tinfo; | ||
791 | struct reservation *res; | ||
792 | unsigned long flags; | ||
793 | |||
794 | preempt_disable(); | ||
795 | local_irq_save(flags); | ||
796 | |||
797 | tinfo = get_mc2_state(current); | ||
798 | |||
799 | if (get_task_crit_level(current) < CRIT_LEVEL_C) | ||
800 | state = cpu_state_for(tinfo->cpu); | ||
801 | else | ||
802 | state = local_cpu_state(); | ||
803 | |||
804 | raw_spin_lock(&_global_env.lock); | ||
805 | raw_spin_lock(&state->lock); | ||
806 | //printk(KERN_ALERT "P%d complete() hold lock\n", state->cpu); | ||
807 | env = &(state->sup_env.env); | ||
808 | |||
809 | res = res_find_by_id(state, tinfo->mc2_param.res_id); | ||
810 | |||
811 | if (get_task_crit_level(current) < CRIT_LEVEL_C) { | ||
812 | env->time_zero = tsk_rt(current)->sporadic_release_time; | ||
813 | } else { | ||
814 | _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time; | ||
815 | } | ||
816 | |||
817 | /* set next_replenishtime to synchronous release time */ | ||
818 | res->next_replenishment = tsk_rt(current)->sporadic_release_time; | ||
819 | |||
820 | if (get_task_crit_level(current) == CRIT_LEVEL_A) { | ||
821 | struct table_driven_reservation *tdres; | ||
822 | tdres = container_of(res, struct table_driven_reservation, res); | ||
823 | tdres->next_interval = 0; | ||
824 | tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time; | ||
825 | res->next_replenishment += tdres->intervals[0].start; | ||
826 | } | ||
827 | res->cur_budget = 0; | ||
828 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); | ||
829 | |||
830 | //TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); | ||
831 | |||
832 | raw_spin_unlock(&state->lock); | ||
833 | raw_spin_unlock(&_global_env.lock); | ||
834 | //printk(KERN_ALERT "P%d complete() release lock\n", state->cpu); | ||
835 | local_irq_restore(flags); | ||
836 | preempt_enable(); | ||
837 | } | ||
838 | sched_trace_task_completion(current, 0); | ||
839 | |||
840 | /* update the next release time and deadline */ | ||
841 | prepare_for_next_period(current); | ||
842 | |||
843 | next_release = ns_to_ktime(get_release(current)); | ||
844 | preempt_disable(); | ||
845 | TRACE_CUR("next_release=%llu\n", get_release(current)); | ||
846 | if (get_release(current) > litmus_clock()) { | ||
847 | /* sleep until next_release */ | ||
848 | set_current_state(TASK_INTERRUPTIBLE); | ||
849 | preempt_enable_no_resched(); | ||
850 | err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS); | ||
851 | if (get_task_crit_level(current) == CRIT_LEVEL_A) | ||
852 | sched_trace_task_release(current); | ||
853 | } else { | ||
854 | /* release the next job immediately */ | ||
855 | err = 0; | ||
856 | TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock()); | ||
857 | preempt_enable(); | ||
858 | if (get_task_crit_level(current) == CRIT_LEVEL_A) | ||
859 | sched_trace_task_release(current); | ||
860 | } | ||
861 | |||
862 | TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock()); | ||
863 | |||
864 | return err; | ||
865 | } | ||
866 | |||
867 | /* mc2_admit_task - Setup mc2 task parameters | ||
868 | */ | ||
869 | static long mc2_admit_task(struct task_struct *tsk) | ||
870 | { | ||
871 | long err = -ESRCH; | ||
872 | unsigned long flags; | ||
873 | struct reservation *res; | ||
874 | struct mc2_cpu_state *state; | ||
875 | struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC); | ||
876 | struct mc2_task *mp = tsk_rt(tsk)->mc2_data; | ||
877 | enum crit_level lv; | ||
878 | |||
879 | if (!tinfo) | ||
880 | return -ENOMEM; | ||
881 | |||
882 | if (!mp) { | ||
883 | printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n"); | ||
884 | return err; | ||
885 | } | ||
886 | |||
887 | lv = mp->crit; | ||
888 | preempt_disable(); | ||
889 | |||
890 | if (lv < CRIT_LEVEL_C) { | ||
891 | state = cpu_state_for(task_cpu(tsk)); | ||
892 | raw_spin_lock_irqsave(&state->lock, flags); | ||
893 | |||
894 | res = sup_find_by_id(&state->sup_env, mp->res_id); | ||
895 | |||
896 | /* found the appropriate reservation */ | ||
897 | if (res) { | ||
898 | TRACE_TASK(tsk, "SUP FOUND RES ID\n"); | ||
899 | tinfo->mc2_param.crit = mp->crit; | ||
900 | tinfo->mc2_param.res_id = mp->res_id; | ||
901 | |||
902 | /* initial values */ | ||
903 | err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); | ||
904 | tinfo->cpu = task_cpu(tsk); | ||
905 | tinfo->has_departed = true; | ||
906 | tsk_rt(tsk)->plugin_state = tinfo; | ||
907 | |||
908 | /* disable LITMUS^RT's per-thread budget enforcement */ | ||
909 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | ||
910 | } | ||
911 | |||
912 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
913 | } else if (lv == CRIT_LEVEL_C) { | ||
914 | raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
915 | //printk(KERN_ALERT "admit() hold lock\n"); | ||
916 | state = local_cpu_state(); | ||
917 | |||
918 | raw_spin_lock(&state->lock); | ||
919 | |||
920 | res = gmp_find_by_id(&_global_env, mp->res_id); | ||
921 | |||
922 | /* found the appropriate reservation (or vCPU) */ | ||
923 | if (res) { | ||
924 | TRACE_TASK(tsk, "GMP FOUND RES ID\n"); | ||
925 | tinfo->mc2_param.crit = mp->crit; | ||
926 | tinfo->mc2_param.res_id = mp->res_id; | ||
927 | |||
928 | /* initial values */ | ||
929 | err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); | ||
930 | tinfo->cpu = -1; | ||
931 | tinfo->has_departed = true; | ||
932 | tsk_rt(tsk)->plugin_state = tinfo; | ||
933 | |||
934 | /* disable LITMUS^RT's per-thread budget enforcement */ | ||
935 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | ||
936 | } | ||
937 | |||
938 | raw_spin_unlock(&state->lock); | ||
939 | raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
940 | //printk(KERN_ALERT "admit() release lock\n"); | ||
941 | } | ||
942 | |||
943 | preempt_enable(); | ||
944 | |||
945 | if (err) | ||
946 | kfree(tinfo); | ||
947 | |||
948 | return err; | ||
949 | } | ||
950 | |||
951 | /* mc2_task_new - A new real-time job is arrived. Release the next job | ||
952 | * at the next reservation replenish time | ||
953 | */ | ||
954 | static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | ||
955 | int is_running) | ||
956 | { | ||
957 | unsigned long flags; | ||
958 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
959 | struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); | ||
960 | struct reservation *res; | ||
961 | enum crit_level lv = get_task_crit_level(tsk); | ||
962 | lt_t release = 0; | ||
963 | |||
964 | TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", | ||
965 | litmus_clock(), on_runqueue, is_running); | ||
966 | |||
967 | local_irq_save(flags); | ||
968 | if (tinfo->cpu == -1) | ||
969 | state = local_cpu_state(); | ||
970 | else | ||
971 | state = cpu_state_for(tinfo->cpu); | ||
972 | |||
973 | /* acquire the lock protecting the state and disable interrupts */ | ||
974 | raw_spin_lock(&_global_env.lock); | ||
975 | raw_spin_lock(&state->lock); | ||
976 | //printk(KERN_ALERT "new() hold lock R%d\n", tinfo->mc2_param.res_id); | ||
977 | if (is_running) { | ||
978 | state->scheduled = tsk; | ||
979 | /* make sure this task should actually be running */ | ||
980 | litmus_reschedule_local(); | ||
981 | } | ||
982 | |||
983 | res = res_find_by_id(state, tinfo->mc2_param.res_id); | ||
984 | release = res->next_replenishment; | ||
985 | |||
986 | if (on_runqueue || is_running) { | ||
987 | /* Assumption: litmus_clock() is synchronized across cores | ||
988 | * [see comment in pres_task_resume()] */ | ||
989 | mc2_update_time(lv, state, litmus_clock()); | ||
990 | mc2_update_ghost_state(state); | ||
991 | task_arrives(state, tsk); | ||
992 | /* NOTE: drops state->lock */ | ||
993 | TRACE("mc2_new()\n"); | ||
994 | |||
995 | mc2_update_timer_and_unlock(state); | ||
996 | //printk(KERN_ALERT "new() dropped lock R%d\n",tinfo->mc2_param.res_id); | ||
997 | } else { | ||
998 | raw_spin_unlock(&state->lock); | ||
999 | raw_spin_unlock(&_global_env.lock); | ||
1000 | //printk(KERN_ALERT "new() release lock R%d\n",tinfo->mc2_param.res_id); | ||
1001 | } | ||
1002 | local_irq_restore(flags); | ||
1003 | |||
1004 | if (!release) { | ||
1005 | TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release); | ||
1006 | release_at(tsk, release); | ||
1007 | } | ||
1008 | else | ||
1009 | TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); | ||
1010 | } | ||
1011 | |||
1012 | /* mc2_reservation_destroy - reservation_destroy system call backend | ||
1013 | */ | ||
1014 | static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | ||
1015 | { | ||
1016 | long ret = -EINVAL; | ||
1017 | struct mc2_cpu_state *state; | ||
1018 | struct reservation *res = NULL, *next; | ||
1019 | struct sup_reservation_environment *sup_env; | ||
1020 | int found = 0; | ||
1021 | enum crit_level lv = get_task_crit_level(current); | ||
1022 | unsigned long flags; | ||
1023 | |||
1024 | if (cpu == -1) { | ||
1025 | /* if the reservation is global reservation */ | ||
1026 | local_irq_save(flags); | ||
1027 | state = local_cpu_state(); | ||
1028 | raw_spin_lock(&_global_env.lock); | ||
1029 | raw_spin_lock(&state->lock); | ||
1030 | |||
1031 | list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) { | ||
1032 | if (res->id == reservation_id) { | ||
1033 | TRACE("DESTROY RES FOUND!!!\n"); | ||
1034 | list_del(&res->list); | ||
1035 | kfree(res); | ||
1036 | found = 1; | ||
1037 | ret = 0; | ||
1038 | } | ||
1039 | } | ||
1040 | if (!found) { | ||
1041 | list_for_each_entry_safe(res, next, &_global_env.inactive_reservations, list) { | ||
1042 | if (res->id == reservation_id) { | ||
1043 | TRACE("DESTROY RES FOUND!!!\n"); | ||
1044 | list_del(&res->list); | ||
1045 | kfree(res); | ||
1046 | found = 1; | ||
1047 | ret = 0; | ||
1048 | } | ||
1049 | } | ||
1050 | } | ||
1051 | if (!found) { | ||
1052 | list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { | ||
1053 | if (res->id == reservation_id) { | ||
1054 | TRACE("DESTROY RES FOUND!!!\n"); | ||
1055 | list_del(&res->list); | ||
1056 | kfree(res); | ||
1057 | found = 1; | ||
1058 | ret = 0; | ||
1059 | } | ||
1060 | } | ||
1061 | } | ||
1062 | |||
1063 | raw_spin_unlock(&state->lock); | ||
1064 | raw_spin_unlock(&_global_env.lock); | ||
1065 | local_irq_restore(flags); | ||
1066 | } else { | ||
1067 | /* if the reservation is partitioned reservation */ | ||
1068 | state = cpu_state_for(cpu); | ||
1069 | raw_spin_lock_irqsave(&state->lock, flags); | ||
1070 | |||
1071 | // res = sup_find_by_id(&state->sup_env, reservation_id); | ||
1072 | sup_env = &state->sup_env; | ||
1073 | list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { | ||
1074 | if (res->id == reservation_id) { | ||
1075 | if (lv == CRIT_LEVEL_A) { | ||
1076 | struct table_driven_reservation *tdres; | ||
1077 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1078 | kfree(tdres->intervals); | ||
1079 | } | ||
1080 | list_del(&res->list); | ||
1081 | kfree(res); | ||
1082 | found = 1; | ||
1083 | ret = 0; | ||
1084 | } | ||
1085 | } | ||
1086 | if (!found) { | ||
1087 | list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) { | ||
1088 | if (res->id == reservation_id) { | ||
1089 | if (lv == CRIT_LEVEL_A) { | ||
1090 | struct table_driven_reservation *tdres; | ||
1091 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1092 | kfree(tdres->intervals); | ||
1093 | } | ||
1094 | list_del(&res->list); | ||
1095 | kfree(res); | ||
1096 | found = 1; | ||
1097 | ret = 0; | ||
1098 | } | ||
1099 | } | ||
1100 | } | ||
1101 | if (!found) { | ||
1102 | list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { | ||
1103 | if (res->id == reservation_id) { | ||
1104 | if (lv == CRIT_LEVEL_A) { | ||
1105 | struct table_driven_reservation *tdres; | ||
1106 | tdres = container_of(res, struct table_driven_reservation, res); | ||
1107 | kfree(tdres->intervals); | ||
1108 | } | ||
1109 | list_del(&res->list); | ||
1110 | kfree(res); | ||
1111 | found = 1; | ||
1112 | ret = 0; | ||
1113 | } | ||
1114 | } | ||
1115 | } | ||
1116 | |||
1117 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
1118 | } | ||
1119 | |||
1120 | TRACE("RESERVATION_DESTROY ret = %d\n", ret); | ||
1121 | return ret; | ||
1122 | } | ||
1123 | |||
1124 | /* mc2_task_exit - Task became a normal task (not real-time task) | ||
1125 | */ | ||
1126 | static void mc2_task_exit(struct task_struct *tsk) | ||
1127 | { | ||
1128 | unsigned long flags; | ||
1129 | struct mc2_task_state* tinfo = get_mc2_state(tsk); | ||
1130 | struct mc2_cpu_state *state; | ||
1131 | enum crit_level lv = tinfo->mc2_param.crit; | ||
1132 | struct crit_entry* ce; | ||
1133 | int cpu; | ||
1134 | |||
1135 | local_irq_save(flags); | ||
1136 | if (tinfo->cpu != -1) | ||
1137 | state = cpu_state_for(tinfo->cpu); | ||
1138 | else | ||
1139 | state = local_cpu_state(); | ||
1140 | |||
1141 | raw_spin_lock(&_global_env.lock); | ||
1142 | raw_spin_lock(&state->lock); | ||
1143 | |||
1144 | if (state->scheduled == tsk) | ||
1145 | state->scheduled = NULL; | ||
1146 | |||
1147 | ce = &state->crit_entries[lv]; | ||
1148 | if (ce->running == tsk) | ||
1149 | ce->running = NULL; | ||
1150 | |||
1151 | /* remove from queues */ | ||
1152 | if (is_running(tsk)) { | ||
1153 | /* Assumption: litmus_clock() is synchronized across cores | ||
1154 | * [see comment in pres_task_resume()] */ | ||
1155 | |||
1156 | /* update both global and partitioned */ | ||
1157 | mc2_update_time(lv, state, litmus_clock()); | ||
1158 | mc2_update_ghost_state(state); | ||
1159 | task_departs(tsk, 0); | ||
1160 | |||
1161 | /* NOTE: drops state->lock */ | ||
1162 | TRACE("mc2_exit()\n"); | ||
1163 | |||
1164 | mc2_update_timer_and_unlock(state); | ||
1165 | } else { | ||
1166 | raw_spin_unlock(&state->lock); | ||
1167 | raw_spin_unlock(&_global_env.lock); | ||
1168 | } | ||
1169 | |||
1170 | for_each_online_cpu(cpu) { | ||
1171 | state = cpu_state_for(cpu); | ||
1172 | if (state == local_cpu_state()) | ||
1173 | continue; | ||
1174 | raw_spin_lock(&state->lock); | ||
1175 | |||
1176 | if (state->scheduled == tsk) | ||
1177 | state->scheduled = NULL; | ||
1178 | |||
1179 | ce = &state->crit_entries[lv]; | ||
1180 | if (ce->running == tsk) | ||
1181 | ce->running = NULL; | ||
1182 | |||
1183 | raw_spin_unlock(&state->lock); | ||
1184 | } | ||
1185 | |||
1186 | local_irq_restore(flags); | ||
1187 | |||
1188 | kfree(tsk_rt(tsk)->plugin_state); | ||
1189 | tsk_rt(tsk)->plugin_state = NULL; | ||
1190 | kfree(tsk_rt(tsk)->mc2_data); | ||
1191 | tsk_rt(tsk)->mc2_data = NULL; | ||
1192 | } | ||
1193 | |||
1194 | /* create_polling_reservation - create a new polling reservation | ||
1195 | */ | ||
1196 | static long create_polling_reservation( | ||
1197 | int res_type, | ||
1198 | struct reservation_config *config) | ||
1199 | { | ||
1200 | struct mc2_cpu_state *state; | ||
1201 | struct reservation* res; | ||
1202 | struct polling_reservation *pres; | ||
1203 | unsigned long flags; | ||
1204 | int use_edf = config->priority == LITMUS_NO_PRIORITY; | ||
1205 | int periodic = res_type == PERIODIC_POLLING; | ||
1206 | long err = -EINVAL; | ||
1207 | |||
1208 | /* sanity checks */ | ||
1209 | if (config->polling_params.budget > | ||
1210 | config->polling_params.period) { | ||
1211 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
1212 | "budget > period\n", config->id); | ||
1213 | return -EINVAL; | ||
1214 | } | ||
1215 | if (config->polling_params.budget > | ||
1216 | config->polling_params.relative_deadline | ||
1217 | && config->polling_params.relative_deadline) { | ||
1218 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
1219 | "budget > deadline\n", config->id); | ||
1220 | return -EINVAL; | ||
1221 | } | ||
1222 | if (config->polling_params.offset > | ||
1223 | config->polling_params.period) { | ||
1224 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
1225 | "offset > period\n", config->id); | ||
1226 | return -EINVAL; | ||
1227 | } | ||
1228 | |||
1229 | /* Allocate before we grab a spin lock. | ||
1230 | * Todo: would be nice to use a core-local allocation. | ||
1231 | */ | ||
1232 | pres = kzalloc(sizeof(*pres), GFP_KERNEL); | ||
1233 | if (!pres) | ||
1234 | return -ENOMEM; | ||
1235 | |||
1236 | if (config->cpu != -1) { | ||
1237 | |||
1238 | raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
1239 | state = cpu_state_for(config->cpu); | ||
1240 | raw_spin_lock(&state->lock); | ||
1241 | |||
1242 | res = sup_find_by_id(&state->sup_env, config->id); | ||
1243 | if (!res) { | ||
1244 | polling_reservation_init(pres, use_edf, periodic, | ||
1245 | config->polling_params.budget, | ||
1246 | config->polling_params.period, | ||
1247 | config->polling_params.relative_deadline, | ||
1248 | config->polling_params.offset); | ||
1249 | pres->res.id = config->id; | ||
1250 | pres->res.blocked_by_ghost = 0; | ||
1251 | pres->res.is_ghost = 0; | ||
1252 | if (!use_edf) | ||
1253 | pres->res.priority = config->priority; | ||
1254 | sup_add_new_reservation(&state->sup_env, &pres->res); | ||
1255 | err = config->id; | ||
1256 | } else { | ||
1257 | err = -EEXIST; | ||
1258 | } | ||
1259 | |||
1260 | raw_spin_unlock(&state->lock); | ||
1261 | raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
1262 | |||
1263 | } else { | ||
1264 | raw_spin_lock_irqsave(&_global_env.lock, flags); | ||
1265 | |||
1266 | res = gmp_find_by_id(&_global_env, config->id); | ||
1267 | if (!res) { | ||
1268 | polling_reservation_init(pres, use_edf, periodic, | ||
1269 | config->polling_params.budget, | ||
1270 | config->polling_params.period, | ||
1271 | config->polling_params.relative_deadline, | ||
1272 | config->polling_params.offset); | ||
1273 | pres->res.id = config->id; | ||
1274 | pres->res.blocked_by_ghost = 0; | ||
1275 | pres->res.scheduled_on = NO_CPU; | ||
1276 | pres->res.is_ghost = 0; | ||
1277 | if (!use_edf) | ||
1278 | pres->res.priority = config->priority; | ||
1279 | gmp_add_new_reservation(&_global_env, &pres->res); | ||
1280 | TRACE("GMP_ADD_NEW_RESERVATION R%d\n", pres->res.id); | ||
1281 | err = config->id; | ||
1282 | } else { | ||
1283 | err = -EEXIST; | ||
1284 | } | ||
1285 | raw_spin_unlock_irqrestore(&_global_env.lock, flags); | ||
1286 | } | ||
1287 | |||
1288 | if (err < 0) | ||
1289 | kfree(pres); | ||
1290 | |||
1291 | return err; | ||
1292 | } | ||
1293 | |||
1294 | #define MAX_INTERVALS 1024 | ||
1295 | |||
1296 | /* create_table_driven_reservation - create a table_driven reservation | ||
1297 | */ | ||
1298 | static long create_table_driven_reservation( | ||
1299 | struct reservation_config *config) | ||
1300 | { | ||
1301 | struct mc2_cpu_state *state; | ||
1302 | struct reservation* res; | ||
1303 | struct table_driven_reservation *td_res = NULL; | ||
1304 | struct lt_interval *slots = NULL; | ||
1305 | size_t slots_size; | ||
1306 | unsigned int i, num_slots; | ||
1307 | unsigned long flags; | ||
1308 | long err = -EINVAL; | ||
1309 | |||
1310 | |||
1311 | if (!config->table_driven_params.num_intervals) { | ||
1312 | printk(KERN_ERR "invalid table-driven reservation (%u): " | ||
1313 | "no intervals\n", config->id); | ||
1314 | return -EINVAL; | ||
1315 | } | ||
1316 | |||
1317 | if (config->table_driven_params.num_intervals > MAX_INTERVALS) { | ||
1318 | printk(KERN_ERR "invalid table-driven reservation (%u): " | ||
1319 | "too many intervals (max: %d)\n", config->id, MAX_INTERVALS); | ||
1320 | return -EINVAL; | ||
1321 | } | ||
1322 | |||
1323 | num_slots = config->table_driven_params.num_intervals; | ||
1324 | slots_size = sizeof(slots[0]) * num_slots; | ||
1325 | slots = kzalloc(slots_size, GFP_KERNEL); | ||
1326 | if (!slots) | ||
1327 | return -ENOMEM; | ||
1328 | |||
1329 | td_res = kzalloc(sizeof(*td_res), GFP_KERNEL); | ||
1330 | if (!td_res) | ||
1331 | err = -ENOMEM; | ||
1332 | else | ||
1333 | err = copy_from_user(slots, | ||
1334 | config->table_driven_params.intervals, slots_size); | ||
1335 | |||
1336 | if (!err) { | ||
1337 | /* sanity checks */ | ||
1338 | for (i = 0; !err && i < num_slots; i++) | ||
1339 | if (slots[i].end <= slots[i].start) { | ||
1340 | printk(KERN_ERR | ||
1341 | "invalid table-driven reservation (%u): " | ||
1342 | "invalid interval %u => [%llu, %llu]\n", | ||
1343 | config->id, i, | ||
1344 | slots[i].start, slots[i].end); | ||
1345 | err = -EINVAL; | ||
1346 | } | ||
1347 | |||
1348 | for (i = 0; !err && i + 1 < num_slots; i++) | ||
1349 | if (slots[i + 1].start <= slots[i].end) { | ||
1350 | printk(KERN_ERR | ||
1351 | "invalid table-driven reservation (%u): " | ||
1352 | "overlapping intervals %u, %u\n", | ||
1353 | config->id, i, i + 1); | ||
1354 | err = -EINVAL; | ||
1355 | } | ||
1356 | |||
1357 | if (slots[num_slots - 1].end > | ||
1358 | config->table_driven_params.major_cycle_length) { | ||
1359 | printk(KERN_ERR | ||
1360 | "invalid table-driven reservation (%u): last " | ||
1361 | "interval ends past major cycle %llu > %llu\n", | ||
1362 | config->id, | ||
1363 | slots[num_slots - 1].end, | ||
1364 | config->table_driven_params.major_cycle_length); | ||
1365 | err = -EINVAL; | ||
1366 | } | ||
1367 | } | ||
1368 | |||
1369 | if (!err) { | ||
1370 | state = cpu_state_for(config->cpu); | ||
1371 | raw_spin_lock_irqsave(&state->lock, flags); | ||
1372 | |||
1373 | res = sup_find_by_id(&state->sup_env, config->id); | ||
1374 | if (!res) { | ||
1375 | table_driven_reservation_init(td_res, | ||
1376 | config->table_driven_params.major_cycle_length, | ||
1377 | slots, num_slots); | ||
1378 | td_res->res.id = config->id; | ||
1379 | td_res->res.priority = config->priority; | ||
1380 | td_res->res.blocked_by_ghost = 0; | ||
1381 | sup_add_new_reservation(&state->sup_env, &td_res->res); | ||
1382 | err = config->id; | ||
1383 | } else { | ||
1384 | err = -EEXIST; | ||
1385 | } | ||
1386 | |||
1387 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
1388 | } | ||
1389 | |||
1390 | if (err < 0) { | ||
1391 | kfree(slots); | ||
1392 | kfree(td_res); | ||
1393 | } | ||
1394 | |||
1395 | return err; | ||
1396 | } | ||
1397 | |||
1398 | /* mc2_reservation_create - reservation_create system call backend | ||
1399 | */ | ||
1400 | static long mc2_reservation_create(int res_type, void* __user _config) | ||
1401 | { | ||
1402 | long ret = -EINVAL; | ||
1403 | struct reservation_config config; | ||
1404 | |||
1405 | TRACE("Attempt to create reservation (%d)\n", res_type); | ||
1406 | |||
1407 | if (copy_from_user(&config, _config, sizeof(config))) | ||
1408 | return -EFAULT; | ||
1409 | |||
1410 | if (config.cpu != -1) { | ||
1411 | if (config.cpu < 0 || !cpu_online(config.cpu)) { | ||
1412 | printk(KERN_ERR "invalid polling reservation (%u): " | ||
1413 | "CPU %d offline\n", config.id, config.cpu); | ||
1414 | return -EINVAL; | ||
1415 | } | ||
1416 | } | ||
1417 | |||
1418 | switch (res_type) { | ||
1419 | case PERIODIC_POLLING: | ||
1420 | case SPORADIC_POLLING: | ||
1421 | ret = create_polling_reservation(res_type, &config); | ||
1422 | break; | ||
1423 | |||
1424 | case TABLE_DRIVEN: | ||
1425 | ret = create_table_driven_reservation(&config); | ||
1426 | break; | ||
1427 | |||
1428 | default: | ||
1429 | return -EINVAL; | ||
1430 | }; | ||
1431 | |||
1432 | return ret; | ||
1433 | } | ||
1434 | |||
1435 | static struct domain_proc_info mc2_domain_proc_info; | ||
1436 | |||
1437 | static long mc2_get_domain_proc_info(struct domain_proc_info **ret) | ||
1438 | { | ||
1439 | *ret = &mc2_domain_proc_info; | ||
1440 | return 0; | ||
1441 | } | ||
1442 | |||
1443 | static void mc2_setup_domain_proc(void) | ||
1444 | { | ||
1445 | int i, cpu; | ||
1446 | int num_rt_cpus = num_online_cpus(); | ||
1447 | |||
1448 | struct cd_mapping *cpu_map, *domain_map; | ||
1449 | |||
1450 | memset(&mc2_domain_proc_info, sizeof(mc2_domain_proc_info), 0); | ||
1451 | init_domain_proc_info(&mc2_domain_proc_info, num_rt_cpus, num_rt_cpus); | ||
1452 | mc2_domain_proc_info.num_cpus = num_rt_cpus; | ||
1453 | mc2_domain_proc_info.num_domains = num_rt_cpus; | ||
1454 | |||
1455 | i = 0; | ||
1456 | for_each_online_cpu(cpu) { | ||
1457 | cpu_map = &mc2_domain_proc_info.cpu_to_domains[i]; | ||
1458 | domain_map = &mc2_domain_proc_info.domain_to_cpus[i]; | ||
1459 | |||
1460 | cpu_map->id = cpu; | ||
1461 | domain_map->id = i; | ||
1462 | cpumask_set_cpu(i, cpu_map->mask); | ||
1463 | cpumask_set_cpu(cpu, domain_map->mask); | ||
1464 | ++i; | ||
1465 | } | ||
1466 | } | ||
1467 | |||
1468 | static long mc2_activate_plugin(void) | ||
1469 | { | ||
1470 | int cpu, lv; | ||
1471 | struct mc2_cpu_state *state; | ||
1472 | struct cpu_entry *ce; | ||
1473 | |||
1474 | gmp_init(&_global_env); | ||
1475 | raw_spin_lock_init(&_lowest_prio_cpu.lock); | ||
1476 | |||
1477 | for_each_online_cpu(cpu) { | ||
1478 | TRACE("Initializing CPU%d...\n", cpu); | ||
1479 | |||
1480 | state = cpu_state_for(cpu); | ||
1481 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | ||
1482 | |||
1483 | ce->cpu = cpu; | ||
1484 | ce->scheduled = NULL; | ||
1485 | ce->deadline = ULLONG_MAX; | ||
1486 | ce->lv = NUM_CRIT_LEVELS; | ||
1487 | ce->will_schedule = false; | ||
1488 | |||
1489 | raw_spin_lock_init(&state->lock); | ||
1490 | state->cpu = cpu; | ||
1491 | state->scheduled = NULL; | ||
1492 | for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { | ||
1493 | struct crit_entry *cr_entry = &state->crit_entries[lv]; | ||
1494 | cr_entry->level = lv; | ||
1495 | cr_entry->running = NULL; | ||
1496 | } | ||
1497 | sup_init(&state->sup_env); | ||
1498 | |||
1499 | hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); | ||
1500 | state->timer.function = on_scheduling_timer; | ||
1501 | } | ||
1502 | |||
1503 | mc2_setup_domain_proc(); | ||
1504 | |||
1505 | return 0; | ||
1506 | } | ||
1507 | |||
1508 | static void mc2_finish_switch(struct task_struct *prev) | ||
1509 | { | ||
1510 | struct mc2_cpu_state *state = local_cpu_state(); | ||
1511 | |||
1512 | state->scheduled = is_realtime(current) ? current : NULL; | ||
1513 | TRACE("FINISH CXS! from %s/%d to %s/%d\n", prev ? (prev)->comm : "null", prev ? (prev)->pid : 0, current ? (current)->comm : "null", current ? (current)->pid : 0); | ||
1514 | } | ||
1515 | |||
1516 | static long mc2_deactivate_plugin(void) | ||
1517 | { | ||
1518 | int cpu; | ||
1519 | struct mc2_cpu_state *state; | ||
1520 | struct reservation *res; | ||
1521 | struct next_timer_event *event; | ||
1522 | struct cpu_entry *ce; | ||
1523 | |||
1524 | raw_spin_lock(&_global_env.lock); | ||
1525 | |||
1526 | for_each_online_cpu(cpu) { | ||
1527 | state = cpu_state_for(cpu); | ||
1528 | raw_spin_lock(&state->lock); | ||
1529 | |||
1530 | hrtimer_cancel(&state->timer); | ||
1531 | |||
1532 | ce = &_lowest_prio_cpu.cpu_entries[cpu]; | ||
1533 | |||
1534 | ce->cpu = cpu; | ||
1535 | ce->scheduled = NULL; | ||
1536 | ce->deadline = ULLONG_MAX; | ||
1537 | ce->lv = NUM_CRIT_LEVELS; | ||
1538 | ce->will_schedule = false; | ||
1539 | |||
1540 | /* Delete all reservations --- assumes struct reservation | ||
1541 | * is prefix of containing struct. */ | ||
1542 | |||
1543 | while (!list_empty(&state->sup_env.active_reservations)) { | ||
1544 | res = list_first_entry( | ||
1545 | &state->sup_env.active_reservations, | ||
1546 | struct reservation, list); | ||
1547 | list_del(&res->list); | ||
1548 | kfree(res); | ||
1549 | } | ||
1550 | |||
1551 | while (!list_empty(&state->sup_env.inactive_reservations)) { | ||
1552 | res = list_first_entry( | ||
1553 | &state->sup_env.inactive_reservations, | ||
1554 | struct reservation, list); | ||
1555 | list_del(&res->list); | ||
1556 | kfree(res); | ||
1557 | } | ||
1558 | |||
1559 | while (!list_empty(&state->sup_env.depleted_reservations)) { | ||
1560 | res = list_first_entry( | ||
1561 | &state->sup_env.depleted_reservations, | ||
1562 | struct reservation, list); | ||
1563 | list_del(&res->list); | ||
1564 | kfree(res); | ||
1565 | } | ||
1566 | |||
1567 | raw_spin_unlock(&state->lock); | ||
1568 | } | ||
1569 | |||
1570 | |||
1571 | while (!list_empty(&_global_env.active_reservations)) { | ||
1572 | TRACE("RES FOUND!!!\n"); | ||
1573 | res = list_first_entry( | ||
1574 | &_global_env.active_reservations, | ||
1575 | struct reservation, list); | ||
1576 | list_del(&res->list); | ||
1577 | kfree(res); | ||
1578 | } | ||
1579 | |||
1580 | while (!list_empty(&_global_env.inactive_reservations)) { | ||
1581 | TRACE("RES FOUND!!!\n"); | ||
1582 | res = list_first_entry( | ||
1583 | &_global_env.inactive_reservations, | ||
1584 | struct reservation, list); | ||
1585 | list_del(&res->list); | ||
1586 | kfree(res); | ||
1587 | } | ||
1588 | |||
1589 | while (!list_empty(&_global_env.depleted_reservations)) { | ||
1590 | TRACE("RES FOUND!!!\n"); | ||
1591 | res = list_first_entry( | ||
1592 | &_global_env.depleted_reservations, | ||
1593 | struct reservation, list); | ||
1594 | list_del(&res->list); | ||
1595 | kfree(res); | ||
1596 | } | ||
1597 | |||
1598 | while (!list_empty(&_global_env.next_events)) { | ||
1599 | TRACE("EVENT FOUND!!!\n"); | ||
1600 | event = list_first_entry( | ||
1601 | &_global_env.next_events, | ||
1602 | struct next_timer_event, list); | ||
1603 | list_del(&event->list); | ||
1604 | kfree(event); | ||
1605 | } | ||
1606 | |||
1607 | raw_spin_unlock(&_global_env.lock); | ||
1608 | |||
1609 | destroy_domain_proc_info(&mc2_domain_proc_info); | ||
1610 | return 0; | ||
1611 | } | ||
1612 | |||
1613 | static struct sched_plugin mc2_plugin = { | ||
1614 | .plugin_name = "MC2", | ||
1615 | .schedule = mc2_schedule, | ||
1616 | .finish_switch = mc2_finish_switch, | ||
1617 | .task_wake_up = mc2_task_resume, | ||
1618 | .admit_task = mc2_admit_task, | ||
1619 | .task_new = mc2_task_new, | ||
1620 | .task_exit = mc2_task_exit, | ||
1621 | .complete_job = mc2_complete_job, | ||
1622 | .get_domain_proc_info = mc2_get_domain_proc_info, | ||
1623 | .activate_plugin = mc2_activate_plugin, | ||
1624 | .deactivate_plugin = mc2_deactivate_plugin, | ||
1625 | .reservation_create = mc2_reservation_create, | ||
1626 | .reservation_destroy = mc2_reservation_destroy, | ||
1627 | }; | ||
1628 | |||
1629 | static int __init init_mc2(void) | ||
1630 | { | ||
1631 | return register_sched_plugin(&mc2_plugin); | ||
1632 | } | ||
1633 | |||
1634 | module_init(init_mc2); | ||