aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_mc_ce.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-09-27 20:15:32 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-09-27 20:36:04 -0400
commit23a00b911b968c6290251913ecc34171836b4d32 (patch)
treef6c8289054d2961902931e89bdc11ccc01bc3a73 /litmus/sched_mc_ce.c
parentf21e1d0ef90c2e88ae6a563afc31ea601ed968c7 (diff)
parent609c45f71b7a2405230fd2f8436837d6389ec599 (diff)
Merged with ce domains
Diffstat (limited to 'litmus/sched_mc_ce.c')
-rw-r--r--litmus/sched_mc_ce.c365
1 files changed, 211 insertions, 154 deletions
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c
index dcb74f4ca67b..63b0470e1f52 100644
--- a/litmus/sched_mc_ce.c
+++ b/litmus/sched_mc_ce.c
@@ -23,27 +23,86 @@
23#include <litmus/sched_trace.h> 23#include <litmus/sched_trace.h>
24#include <litmus/jobs.h> 24#include <litmus/jobs.h>
25#include <litmus/sched_mc.h> 25#include <litmus/sched_mc.h>
26#include <litmus/ce_domain.h>
26 27
27static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp; 28static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp;
28 29
29#define is_active_plugin() (litmus == &mc_ce_plugin) 30#define using_linux_plugin() (litmus == &linux_sched_plugin)
31
32/* get a reference to struct domain for a CPU */
33#define get_domain_for(cpu) (&per_cpu(domains, cpu)->domain)
34
35#define get_pid_table(cpu) (&per_cpu(ce_pid_table, cpu))
36#define get_pid_entry(cpu, idx) (&(get_pid_table(cpu)->entries[idx]))
30 37
31static atomic_t start_time_set = ATOMIC_INIT(-1); 38static atomic_t start_time_set = ATOMIC_INIT(-1);
32static atomic64_t start_time = ATOMIC64_INIT(0); 39static atomic64_t start_time = ATOMIC64_INIT(0);
33static struct proc_dir_entry *mc_ce_dir = NULL, *ce_file = NULL; 40static struct proc_dir_entry *mc_ce_dir = NULL, *ce_file = NULL;
34 41
42/*
43 * Cache the budget along with the struct PID for a task so that we don't need
44 * to fetch its task_struct every time we check to see what should be
45 * scheduled.
46 */
47struct ce_pid_entry {
48 struct pid *pid;
49 lt_t budget;
50 /* accumulated (summed) budgets, including this one */
51 lt_t acc_time;
52 unsigned int expected_job;
53};
54
55struct ce_pid_table {
56 struct ce_pid_entry entries[CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS];
57 int num_pid_entries;
58 lt_t cycle_time;
59};
60
61DEFINE_PER_CPU(struct ce_pid_table, ce_pid_table);
62
63/*
64 * How we get the domain for a given CPU locally. Set with the
65 * mc_ce_set_domains function. Must be done before activating plugins. Be
66 * careful when using domains as a variable elsewhere in this file.
67 */
68
69DEFINE_PER_CPU(struct domain_data*, domains);
35 70
36DEFINE_PER_CPU(domain_t, mc_ce_doms); 71/*
37DEFINE_PER_CPU(rt_domain_t, mc_ce_rts); 72 * The domains and other data used by the MC-CE plugin when it runs alone.
73 */
74DEFINE_PER_CPU(struct domain_data, _mc_ce_doms);
38DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data); 75DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data);
76DEFINE_PER_CPU(raw_spinlock_t, _mc_ce_dom_locks);
39 77
40/* Return the address of the domain_t for this CPU, used by the 78long mc_ce_set_domains(const int n, struct domain_data *domains_in[])
41 * mixed-criticality plugin. */
42domain_t *ce_domain_for(int cpu)
43{ 79{
44 return &per_cpu(mc_ce_doms, cpu); 80 const int max = (NR_CPUS < n) ? NR_CPUS : n;
81 struct domain_data *new_dom = NULL;
82 int i, ret;
83 if (!using_linux_plugin()) {
84 printk(KERN_WARNING "can't set MC-CE domains when not using "
85 "Linux scheduler.\n");
86 ret = -EINVAL;
87 goto out;
88 }
89 for (i = 0; i < max; ++i) {
90 new_dom = domains_in[i];
91 per_cpu(domains, i) = new_dom;
92 }
93 ret = 0;
94out:
95 return ret;
45} 96}
46 97
98unsigned int mc_ce_get_expected_job(const int cpu, const int idx)
99{
100 const struct ce_pid_table *pid_table = get_pid_table(cpu);
101 BUG_ON(0 > cpu);
102 BUG_ON(0 > idx);
103 BUG_ON(pid_table->num_pid_entries <= idx);
104 return pid_table->entries[idx].expected_job;
105}
47 106
48/* 107/*
49 * Get the offset into the cycle taking the start time into account. 108 * Get the offset into the cycle taking the start time into account.
@@ -63,16 +122,14 @@ static inline lt_t get_cycle_offset(const lt_t when, const lt_t cycle_time)
63 * 122 *
64 * Do not call prepare_for_next_period on Level-A tasks! 123 * Do not call prepare_for_next_period on Level-A tasks!
65 */ 124 */
66static void mc_ce_job_completion(struct task_struct *ts) 125static void mc_ce_job_completion(struct domain *dom, struct task_struct *ts)
67{ 126{
68 const domain_t *dom = &per_cpu(mc_ce_doms, smp_processor_id()); 127 const int cpu = task_cpu(ts);
69 const struct ce_dom_data *ce_data = dom->data;
70 const int idx = tsk_mc_data(ts)->mc_task.lvl_a_id; 128 const int idx = tsk_mc_data(ts)->mc_task.lvl_a_id;
71 const struct ce_dom_pid_entry *pid_entry = 129 const struct ce_pid_entry *pid_entry = get_pid_entry(cpu, idx);
72 &ce_data->pid_entries[idx]; 130 unsigned int just_finished;
73 int just_finished;
74 131
75 TRACE_TASK(ts, "completed\n"); 132 TRACE_TASK(ts, "Completed\n");
76 133
77 sched_trace_task_completion(ts, 0); 134 sched_trace_task_completion(ts, 0);
78 /* post-increment is important here */ 135 /* post-increment is important here */
@@ -85,11 +142,11 @@ static void mc_ce_job_completion(struct task_struct *ts)
85 if (just_finished < pid_entry->expected_job) { 142 if (just_finished < pid_entry->expected_job) {
86 /* this job is already released because it's running behind */ 143 /* this job is already released because it's running behind */
87 set_rt_flags(ts, RT_F_RUNNING); 144 set_rt_flags(ts, RT_F_RUNNING);
88 TRACE_TASK(ts, "appears behind: the expected job is %d but " 145 TRACE_TASK(ts, "appears behind: the expected job is %u but "
89 "job %d just completed\n", 146 "job %u just completed\n",
90 pid_entry->expected_job, just_finished); 147 pid_entry->expected_job, just_finished);
91 } else if (pid_entry->expected_job < just_finished) { 148 } else if (pid_entry->expected_job < just_finished) {
92 printk(KERN_CRIT "job %d completed in expected job %d which " 149 printk(KERN_CRIT "job %u completed in expected job %u which "
93 "seems too early\n", just_finished, 150 "seems too early\n", just_finished,
94 pid_entry->expected_job); 151 pid_entry->expected_job);
95 BUG(); 152 BUG();
@@ -104,31 +161,32 @@ static void mc_ce_job_completion(struct task_struct *ts)
104 * 161 *
105 * TODO Currently O(n) in the number of tasks on the CPU. Binary search? 162 * TODO Currently O(n) in the number of tasks on the CPU. Binary search?
106 */ 163 */
107static int mc_ce_schedule_at(const domain_t *dom, lt_t offset) 164static int mc_ce_schedule_at(const struct domain *dom, lt_t offset)
108{ 165{
109 const struct ce_dom_data *ce_data = dom->data; 166 const struct ce_dom_data *ce_data = dom->data;
110 const struct ce_dom_pid_entry *pid_entry = NULL; 167 struct ce_pid_table *pid_table = get_pid_table(ce_data->cpu);
111 int i; 168 const struct ce_pid_entry *pid_entry = NULL;
169 int idx;
112 170
113 BUG_ON(ce_data->cycle_time < 1); 171 BUG_ON(pid_table->cycle_time < 1);
114 BUG_ON(ce_data->num_pid_entries < 1); 172 BUG_ON(pid_table->num_pid_entries < 1);
115 173
116 for (i = 0; i < ce_data->num_pid_entries; ++i) { 174 for (idx = 0; idx < pid_table->num_pid_entries; ++idx) {
117 pid_entry = &ce_data->pid_entries[i]; 175 pid_entry = &pid_table->entries[idx];
118 if (offset < pid_entry->acc_time) { 176 if (offset < pid_entry->acc_time) {
119 /* found task to schedule in this window */ 177 /* found task to schedule in this window */
120 break; 178 break;
121 } 179 }
122 } 180 }
123 /* can only happen if cycle_time is not right */ 181 /* can only happen if cycle_time is not right */
124 BUG_ON(pid_entry->acc_time > ce_data->cycle_time); 182 BUG_ON(pid_entry->acc_time > pid_table->cycle_time);
125 TRACE("schedule at returned task %d for CPU %d\n", i, ce_data->cpu); 183 TRACE("schedule at returning task %d for CPU %d\n", idx, ce_data->cpu);
126 return i; 184 return idx;
127} 185}
128 186
129static struct task_struct *mc_ce_schedule(struct task_struct *prev) 187static struct task_struct *mc_ce_schedule(struct task_struct *prev)
130{ 188{
131 domain_t *dom = &per_cpu(mc_ce_doms, smp_processor_id()); 189 struct domain *dom = get_domain_for(smp_processor_id());
132 struct ce_dom_data *ce_data = dom->data; 190 struct ce_dom_data *ce_data = dom->data;
133 struct task_struct *next = NULL; 191 struct task_struct *next = NULL;
134 int exists, sleep, should_sched_exists, should_sched_blocked, 192 int exists, sleep, should_sched_exists, should_sched_blocked,
@@ -147,7 +205,7 @@ static struct task_struct *mc_ce_schedule(struct task_struct *prev)
147 TRACE("exists: %d, sleep: %d\n", exists, sleep); 205 TRACE("exists: %d, sleep: %d\n", exists, sleep);
148 206
149 if (sleep) 207 if (sleep)
150 mc_ce_job_completion(ce_data->scheduled); 208 mc_ce_job_completion(dom, ce_data->scheduled);
151 209
152 /* these checks must go after the call to mc_ce_job_completion in case 210 /* these checks must go after the call to mc_ce_job_completion in case
153 * a late task needs to be scheduled again right away and its the only 211 * a late task needs to be scheduled again right away and its the only
@@ -178,7 +236,7 @@ static struct task_struct *mc_ce_schedule(struct task_struct *prev)
178 236
179static void mc_ce_finish_switch(struct task_struct *prev) 237static void mc_ce_finish_switch(struct task_struct *prev)
180{ 238{
181 domain_t *dom = &per_cpu(mc_ce_doms, smp_processor_id()); 239 struct domain *dom = get_domain_for(smp_processor_id());
182 struct ce_dom_data *ce_data = dom->data; 240 struct ce_dom_data *ce_data = dom->data;
183 241
184 TRACE("finish switch\n"); 242 TRACE("finish switch\n");
@@ -190,41 +248,21 @@ static void mc_ce_finish_switch(struct task_struct *prev)
190} 248}
191 249
192/* 250/*
193 * Called for every local timer interrupt.
194 * Linux calls this with interrupts disabled, AFAIK.
195 */
196static void mc_ce_tick(struct task_struct *ts)
197{
198 domain_t *dom = &per_cpu(mc_ce_doms, smp_processor_id());
199 struct ce_dom_data *ce_data = dom->data;
200 struct task_struct *should_schedule;
201
202 if (is_realtime(ts) && CRIT_LEVEL_A == tsk_mc_crit(ts)) {
203 raw_spin_lock(dom->lock);
204 should_schedule = ce_data->should_schedule;
205 raw_spin_unlock(dom->lock);
206
207 if (!is_np(ts) && ts != should_schedule) {
208 litmus_reschedule_local();
209 } else if (is_user_np(ts)) {
210 request_exit_np(ts);
211 }
212 }
213}
214
215/*
216 * Admit task called to see if this task is permitted to enter the system. 251 * Admit task called to see if this task is permitted to enter the system.
217 * Here we look up the task's PID structure and save it in the proper slot on 252 * Here we look up the task's PID structure and save it in the proper slot on
218 * the CPU this task will run on. 253 * the CPU this task will run on.
219 */ 254 */
220static long __mc_ce_admit_task(struct task_struct *ts) 255long mc_ce_admit_task_common(struct task_struct *ts)
221{ 256{
222 domain_t *dom = &per_cpu(mc_ce_doms, get_partition(ts)); 257 struct domain *dom = get_domain_for(get_partition(ts));
223 struct ce_dom_data *ce_data = dom->data; 258 struct ce_dom_data *ce_data = dom->data;
224 struct mc_data *mcd = tsk_mc_data(ts); 259 struct mc_data *mcd = tsk_mc_data(ts);
225 struct pid *pid = NULL; 260 struct pid *pid = NULL;
226 long retval = -EINVAL; 261 long retval = -EINVAL;
227 const int lvl_a_id = mcd->mc_task.lvl_a_id; 262 const int lvl_a_id = mcd->mc_task.lvl_a_id;
263 struct ce_pid_table *pid_table = get_pid_table(ce_data->cpu);
264
265 BUG_ON(get_partition(ts) != ce_data->cpu);
228 266
229 /* check the task has migrated to the right CPU (like in sched_cedf) */ 267 /* check the task has migrated to the right CPU (like in sched_cedf) */
230 if (task_cpu(ts) != get_partition(ts)) { 268 if (task_cpu(ts) != get_partition(ts)) {
@@ -248,26 +286,26 @@ static long __mc_ce_admit_task(struct task_struct *ts)
248 goto out; 286 goto out;
249 } 287 }
250 288
251 if (lvl_a_id >= ce_data->num_pid_entries) { 289 if (lvl_a_id >= pid_table->num_pid_entries) {
252 printk(KERN_INFO "litmus: level A id greater than expected " 290 printk(KERN_INFO "litmus: level A id greater than expected "
253 "number of tasks %d for %d cpu %d\n", 291 "number of tasks %d for %d cpu %d\n",
254 ce_data->num_pid_entries, ts->pid, 292 pid_table->num_pid_entries, ts->pid,
255 get_partition(ts)); 293 get_partition(ts));
256 goto out_put_pid; 294 goto out_put_pid;
257 } 295 }
258 if (ce_data->pid_entries[lvl_a_id].pid) { 296 if (pid_table->entries[lvl_a_id].pid) {
259 printk(KERN_INFO "litmus: have saved pid info id: %d cpu: %d\n", 297 printk(KERN_INFO "litmus: have saved pid info id: %d cpu: %d\n",
260 lvl_a_id, get_partition(ts)); 298 lvl_a_id, get_partition(ts));
261 goto out_put_pid; 299 goto out_put_pid;
262 } 300 }
263 if (get_exec_cost(ts) >= ce_data->pid_entries[lvl_a_id].budget) { 301 if (get_exec_cost(ts) >= pid_table->entries[lvl_a_id].budget) {
264 printk(KERN_INFO "litmus: execution cost %llu is larger than " 302 printk(KERN_INFO "litmus: execution cost %llu is larger than "
265 "the budget %llu\n", 303 "the budget %llu\n",
266 get_exec_cost(ts), 304 get_exec_cost(ts),
267 ce_data->pid_entries[lvl_a_id].budget); 305 pid_table->entries[lvl_a_id].budget);
268 goto out_put_pid; 306 goto out_put_pid;
269 } 307 }
270 ce_data->pid_entries[lvl_a_id].pid = pid; 308 pid_table->entries[lvl_a_id].pid = pid;
271 retval = 0; 309 retval = 0;
272 /* don't call put_pid if we are successful */ 310 /* don't call put_pid if we are successful */
273 goto out; 311 goto out;
@@ -280,10 +318,10 @@ out:
280 318
281static long mc_ce_admit_task(struct task_struct *ts) 319static long mc_ce_admit_task(struct task_struct *ts)
282{ 320{
283 domain_t *dom = &per_cpu(mc_ce_doms, get_partition(ts)); 321 struct domain *dom = get_domain_for(get_partition(ts));
284 unsigned long flags, retval; 322 unsigned long flags, retval;
285 raw_spin_lock_irqsave(dom->lock, flags); 323 raw_spin_lock_irqsave(dom->lock, flags);
286 retval = __mc_ce_admit_task(ts); 324 retval = mc_ce_admit_task_common(ts);
287 raw_spin_unlock_irqrestore(dom->lock, flags); 325 raw_spin_unlock_irqrestore(dom->lock, flags);
288 return retval; 326 return retval;
289} 327}
@@ -295,26 +333,26 @@ static long mc_ce_admit_task(struct task_struct *ts)
295 */ 333 */
296static void mc_ce_task_new(struct task_struct *ts, int on_rq, int running) 334static void mc_ce_task_new(struct task_struct *ts, int on_rq, int running)
297{ 335{
298 domain_t *dom = &per_cpu(mc_ce_doms, task_cpu(ts)); 336 const int cpu = task_cpu(ts);
337 struct domain *dom = get_domain_for(cpu);
299 struct ce_dom_data *ce_data = dom->data; 338 struct ce_dom_data *ce_data = dom->data;
339 struct ce_pid_table *pid_table = get_pid_table(cpu);
300 struct pid *pid_should_be_running; 340 struct pid *pid_should_be_running;
301 struct ce_dom_pid_entry *pid_entry; 341 struct ce_pid_entry *pid_entry;
302 unsigned long flags; 342 unsigned long flags;
303 int idx, should_be_running; 343 int idx, should_be_running;
304 lt_t offset; 344 lt_t offset;
305 345
306 /* have to call mc_ce_schedule_at because the task only gets a PID
307 * entry after calling admit_task */
308
309 raw_spin_lock_irqsave(dom->lock, flags); 346 raw_spin_lock_irqsave(dom->lock, flags);
310 pid_entry = &ce_data->pid_entries[tsk_mc_data(ts)->mc_task.lvl_a_id]; 347 pid_entry = get_pid_entry(cpu, tsk_mc_data(ts)->mc_task.lvl_a_id);
311 /* initialize some task state */ 348 /* initialize some task state */
312 set_rt_flags(ts, RT_F_RUNNING); 349 set_rt_flags(ts, RT_F_RUNNING);
313 tsk_rt(ts)->job_params.job_no = 0;
314 350
315 offset = get_cycle_offset(litmus_clock(), ce_data->cycle_time); 351 /* have to call mc_ce_schedule_at because the task only gets a PID
352 * entry after calling admit_task */
353 offset = get_cycle_offset(litmus_clock(), pid_table->cycle_time);
316 idx = mc_ce_schedule_at(dom, offset); 354 idx = mc_ce_schedule_at(dom, offset);
317 pid_should_be_running = ce_data->pid_entries[idx].pid; 355 pid_should_be_running = get_pid_entry(cpu, idx)->pid;
318 rcu_read_lock(); 356 rcu_read_lock();
319 should_be_running = (ts == pid_task(pid_should_be_running, PIDTYPE_PID)); 357 should_be_running = (ts == pid_task(pid_should_be_running, PIDTYPE_PID));
320 rcu_read_unlock(); 358 rcu_read_unlock();
@@ -341,7 +379,7 @@ static void mc_ce_task_new(struct task_struct *ts, int on_rq, int running)
341 */ 379 */
342static void mc_ce_task_wake_up(struct task_struct *ts) 380static void mc_ce_task_wake_up(struct task_struct *ts)
343{ 381{
344 domain_t *dom = &per_cpu(mc_ce_doms, smp_processor_id()); 382 struct domain *dom = get_domain_for(get_partition(ts));
345 struct ce_dom_data *ce_data = dom->data; 383 struct ce_dom_data *ce_data = dom->data;
346 unsigned long flags; 384 unsigned long flags;
347 385
@@ -366,25 +404,25 @@ static void mc_ce_task_block(struct task_struct *ts)
366/* 404/*
367 * Called when a task switches from RT mode back to normal mode. 405 * Called when a task switches from RT mode back to normal mode.
368 */ 406 */
369static void mc_ce_task_exit(struct task_struct *ts) 407void mc_ce_task_exit_common(struct task_struct *ts)
370{ 408{
371 domain_t *dom = &per_cpu(mc_ce_doms, get_partition(ts)); 409 struct domain *dom = get_domain_for(get_partition(ts));
372 struct ce_dom_data *ce_data = dom->data; 410 struct ce_dom_data *ce_data = dom->data;
373 unsigned long flags; 411 unsigned long flags;
374 struct pid *pid; 412 struct pid *pid;
375 const int lvl_a_id = tsk_mc_data(ts)->mc_task.lvl_a_id; 413 const int lvl_a_id = tsk_mc_data(ts)->mc_task.lvl_a_id;
376 414 struct ce_pid_table *pid_table = get_pid_table(ce_data->cpu);
377 TRACE_TASK(ts, "exited\n");
378 415
379 BUG_ON(task_cpu(ts) != get_partition(ts)); 416 BUG_ON(task_cpu(ts) != get_partition(ts));
380 BUG_ON(CRIT_LEVEL_A != tsk_mc_crit(ts)); 417 BUG_ON(CRIT_LEVEL_A != tsk_mc_crit(ts));
381 BUG_ON(lvl_a_id >= ce_data->num_pid_entries); 418 BUG_ON(lvl_a_id >= pid_table->num_pid_entries);
419 BUG_ON(ce_data->cpu != task_cpu(ts));
382 420
383 raw_spin_lock_irqsave(dom->lock, flags); 421 raw_spin_lock_irqsave(dom->lock, flags);
384 pid = ce_data->pid_entries[lvl_a_id].pid; 422 pid = pid_table->entries[lvl_a_id].pid;
385 BUG_ON(!pid); 423 BUG_ON(!pid);
386 put_pid(pid); 424 put_pid(pid);
387 ce_data->pid_entries[lvl_a_id].pid = NULL; 425 pid_table->entries[lvl_a_id].pid = NULL;
388 if (ce_data->scheduled == ts) 426 if (ce_data->scheduled == ts)
389 ce_data->scheduled = NULL; 427 ce_data->scheduled = NULL;
390 if (ce_data->should_schedule == ts) 428 if (ce_data->should_schedule == ts)
@@ -396,32 +434,32 @@ static void mc_ce_task_exit(struct task_struct *ts)
396 * Timer stuff 434 * Timer stuff
397 **********************************************************/ 435 **********************************************************/
398 436
399void __mc_ce_timer_callback(struct hrtimer *timer) 437void mc_ce_timer_callback_common(struct domain *dom, struct hrtimer *timer)
400{ 438{
401 /* relative and absolute times for cycles */ 439 /* relative and absolute times for cycles */
402 lt_t now, offset_rel, cycle_start_abs, next_timer_abs; 440 lt_t now, offset_rel, cycle_start_abs, next_timer_abs;
403 struct task_struct *should_schedule; 441 struct task_struct *should_schedule;
404 struct ce_dom_pid_entry *pid_entry; 442 struct ce_pid_table *pid_table;
443 struct ce_pid_entry *pid_entry;
405 struct ce_dom_data *ce_data; 444 struct ce_dom_data *ce_data;
406 domain_t *dom;
407 int idx, budget_overrun; 445 int idx, budget_overrun;
408 446
409 ce_data = container_of(timer, struct ce_dom_data, timer); 447 ce_data = dom->data;
410 dom = container_of(((void*)ce_data), domain_t, data); 448 pid_table = get_pid_table(ce_data->cpu);
411 449
412 /* Based off of the current time, figure out the offset into the cycle 450 /* Based off of the current time, figure out the offset into the cycle
413 * and the cycle's start time, and determine what should be scheduled. 451 * and the cycle's start time, and determine what should be scheduled.
414 */ 452 */
415 now = litmus_clock(); 453 now = litmus_clock();
416 offset_rel = get_cycle_offset(now, ce_data->cycle_time); 454 offset_rel = get_cycle_offset(now, pid_table->cycle_time);
417 cycle_start_abs = now - offset_rel; 455 cycle_start_abs = now - offset_rel;
418 idx = mc_ce_schedule_at(dom, offset_rel); 456 idx = mc_ce_schedule_at(dom, offset_rel);
419 pid_entry = &ce_data->pid_entries[idx]; 457 pid_entry = get_pid_entry(ce_data->cpu, idx);
420 /* set the timer to fire at the next cycle start */ 458 /* set the timer to fire at the next cycle start */
421 next_timer_abs = cycle_start_abs + pid_entry->acc_time; 459 next_timer_abs = cycle_start_abs + pid_entry->acc_time;
422 hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs)); 460 hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs));
423 461
424 TRACE("timer: now: %llu offset_rel: %llu cycle_start_abs: %llu " 462 STRACE("timer: now: %llu offset_rel: %llu cycle_start_abs: %llu "
425 "next_timer_abs: %llu\n", now, offset_rel, 463 "next_timer_abs: %llu\n", now, offset_rel,
426 cycle_start_abs, next_timer_abs); 464 cycle_start_abs, next_timer_abs);
427 465
@@ -440,10 +478,11 @@ void __mc_ce_timer_callback(struct hrtimer *timer)
440 budget_overrun = pid_entry->expected_job != 478 budget_overrun = pid_entry->expected_job !=
441 tsk_rt(should_schedule)->job_params.job_no; 479 tsk_rt(should_schedule)->job_params.job_no;
442 if (budget_overrun) 480 if (budget_overrun)
443 TRACE_TASK(should_schedule, "timer expected job number: %d " 481 TRACE_MC_TASK(should_schedule,
444 "but current job: %d\n", 482 "timer expected job number: %u "
445 pid_entry->expected_job, 483 "but current job: %u",
446 tsk_rt(should_schedule)->job_params.job_no); 484 pid_entry->expected_job,
485 tsk_rt(should_schedule)->job_params.job_no);
447 } 486 }
448 487
449 if (ce_data->should_schedule) { 488 if (ce_data->should_schedule) {
@@ -466,15 +505,15 @@ static enum hrtimer_restart mc_ce_timer_callback(struct hrtimer *timer)
466{ 505{
467 struct ce_dom_data *ce_data; 506 struct ce_dom_data *ce_data;
468 unsigned long flags; 507 unsigned long flags;
469 domain_t *dom; 508 struct domain *dom;
470 509
471 ce_data = container_of(timer, struct ce_dom_data, timer); 510 ce_data = container_of(timer, struct ce_dom_data, timer);
472 dom = container_of(((void*)ce_data), domain_t, data); 511 dom = get_domain_for(ce_data->cpu);
473 512
474 TRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu); 513 TRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu);
475 514
476 raw_spin_lock_irqsave(dom->lock, flags); 515 raw_spin_lock_irqsave(dom->lock, flags);
477 __mc_ce_timer_callback(timer); 516 mc_ce_timer_callback_common(dom, timer);
478 517
479 if (ce_data->scheduled != ce_data->should_schedule) 518 if (ce_data->scheduled != ce_data->should_schedule)
480 preempt_if_preemptable(ce_data->scheduled, ce_data->cpu); 519 preempt_if_preemptable(ce_data->scheduled, ce_data->cpu);
@@ -490,13 +529,13 @@ static enum hrtimer_restart mc_ce_timer_callback(struct hrtimer *timer)
490static int cancel_all_timers(void) 529static int cancel_all_timers(void)
491{ 530{
492 struct ce_dom_data *ce_data; 531 struct ce_dom_data *ce_data;
493 domain_t *dom; 532 struct domain *dom;
494 int cpu, ret = 0, cancel_res; 533 int cpu, cancel_res, ret = 0;
495 534
496 TRACE("cancel all timers\n"); 535 TRACE("cancel all timers\n");
497 536
498 for_each_online_cpu(cpu) { 537 for_each_online_cpu(cpu) {
499 dom = &per_cpu(mc_ce_doms, cpu); 538 dom = get_domain_for(cpu);
500 ce_data = dom->data; 539 ce_data = dom->data;
501 ce_data->should_schedule = NULL; 540 ce_data->should_schedule = NULL;
502 cancel_res = hrtimer_cancel(&ce_data->timer); 541 cancel_res = hrtimer_cancel(&ce_data->timer);
@@ -514,20 +553,22 @@ static int cancel_all_timers(void)
514 */ 553 */
515static void arm_all_timers(void) 554static void arm_all_timers(void)
516{ 555{
556 struct domain *dom;
517 struct ce_dom_data *ce_data; 557 struct ce_dom_data *ce_data;
518 domain_t *dom; 558 struct ce_pid_table *pid_table;
519 int cpu, idx; 559 int cpu, idx;
520 const lt_t start = atomic64_read(&start_time); 560 const lt_t start = atomic64_read(&start_time);
521 561
522 TRACE("arm all timers\n"); 562 TRACE("arm all timers\n");
523 563
524 for_each_online_cpu(cpu) { 564 for_each_online_cpu(cpu) {
525 dom = &per_cpu(mc_ce_doms, cpu); 565 dom = get_domain_for(cpu);
526 ce_data = dom->data; 566 ce_data = dom->data;
527 if (0 == ce_data->num_pid_entries) 567 pid_table = get_pid_table(cpu);
568 if (0 == pid_table->num_pid_entries)
528 continue; 569 continue;
529 for (idx = 0; idx < ce_data->num_pid_entries; idx++) { 570 for (idx = 0; idx < pid_table->num_pid_entries; idx++) {
530 ce_data->pid_entries[idx].expected_job = -1; 571 pid_table->entries[idx].expected_job = 0;
531 } 572 }
532 TRACE("arming timer for CPU %d\n", cpu); 573 TRACE("arming timer for CPU %d\n", cpu);
533 hrtimer_start_on(cpu, &ce_data->timer_info, &ce_data->timer, 574 hrtimer_start_on(cpu, &ce_data->timer_info, &ce_data->timer,
@@ -540,7 +581,7 @@ static void arm_all_timers(void)
540 * call this. We can re-set our notion of the CE period start to make 581 * call this. We can re-set our notion of the CE period start to make
541 * the schedule look pretty. 582 * the schedule look pretty.
542 */ 583 */
543void mc_ce_release_at(struct task_struct *ts, lt_t start) 584void mc_ce_release_at_common(struct task_struct *ts, lt_t start)
544{ 585{
545 TRACE_TASK(ts, "release at\n"); 586 TRACE_TASK(ts, "release at\n");
546 if (atomic_inc_and_test(&start_time_set)) { 587 if (atomic_inc_and_test(&start_time_set)) {
@@ -552,14 +593,14 @@ void mc_ce_release_at(struct task_struct *ts, lt_t start)
552 atomic_dec(&start_time_set); 593 atomic_dec(&start_time_set);
553} 594}
554 595
555long mc_ce_activate_plugin(void) 596long mc_ce_activate_plugin_common(void)
556{ 597{
557 struct ce_dom_data *ce_data; 598 struct ce_dom_data *ce_data;
558 domain_t *dom; 599 struct domain *dom;
559 int cpu; 600 int cpu;
560 601
561 for_each_online_cpu(cpu) { 602 for_each_online_cpu(cpu) {
562 dom = &per_cpu(mc_ce_doms, cpu); 603 dom = get_domain_for(cpu);
563 ce_data = dom->data; 604 ce_data = dom->data;
564 ce_data->scheduled = NULL; 605 ce_data->scheduled = NULL;
565 ce_data->should_schedule = NULL; 606 ce_data->should_schedule = NULL;
@@ -572,33 +613,54 @@ long mc_ce_activate_plugin(void)
572 return 0; 613 return 0;
573} 614}
574 615
616static long mc_ce_activate_plugin(void)
617{
618 struct domain_data *our_domains[NR_CPUS];
619 int cpu, n = 0;
620 long ret;
621
622 for_each_online_cpu(cpu) {
623 BUG_ON(NR_CPUS <= n);
624 our_domains[cpu] = &per_cpu(_mc_ce_doms, cpu);
625 n++;
626 }
627 ret = mc_ce_set_domains(n, our_domains);
628 if (ret)
629 goto out;
630 ret = mc_ce_activate_plugin_common();
631out:
632 return ret;
633}
634
575static void clear_pid_entries(void) 635static void clear_pid_entries(void)
576{ 636{
637 struct ce_pid_table *pid_table = NULL;
577 int cpu, entry; 638 int cpu, entry;
578 domain_t *dom;
579 struct ce_dom_data *ce_data;
580 639
581 for_each_online_cpu(cpu) { 640 for_each_online_cpu(cpu) {
582 dom = &per_cpu(mc_ce_doms, cpu); 641 pid_table = get_pid_table(cpu);
583 ce_data = dom->data; 642 pid_table->num_pid_entries = 0;
584 ce_data->num_pid_entries = 0; 643 pid_table->cycle_time = 0;
585 ce_data->cycle_time = 0;
586 for (entry = 0; entry < CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS; 644 for (entry = 0; entry < CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS;
587 ++entry) { 645 ++entry) {
588 if (NULL != ce_data->pid_entries[entry].pid) { 646 if (NULL != pid_table->entries[entry].pid) {
589 put_pid(ce_data->pid_entries[entry].pid); 647 put_pid(pid_table->entries[entry].pid);
590 ce_data->pid_entries[entry].pid = NULL; 648 pid_table->entries[entry].pid = NULL;
591 } 649 }
592 ce_data->pid_entries[entry].budget = 0; 650 pid_table->entries[entry].budget = 0;
593 ce_data->pid_entries[entry].acc_time = 0; 651 pid_table->entries[entry].acc_time = 0;
594 ce_data->pid_entries[entry].expected_job = -1; 652 pid_table->entries[entry].expected_job = 0;
595 } 653 }
596 } 654 }
597} 655}
598 656
599long mc_ce_deactivate_plugin(void) 657long mc_ce_deactivate_plugin_common(void)
600{ 658{
659 int cpu;
601 cancel_all_timers(); 660 cancel_all_timers();
661 for_each_online_cpu(cpu) {
662 per_cpu(domains, cpu) = NULL;
663 }
602 return 0; 664 return 0;
603} 665}
604 666
@@ -608,35 +670,33 @@ static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp = {
608 .admit_task = mc_ce_admit_task, 670 .admit_task = mc_ce_admit_task,
609 .task_new = mc_ce_task_new, 671 .task_new = mc_ce_task_new,
610 .complete_job = complete_job, 672 .complete_job = complete_job,
611 .release_at = mc_ce_release_at, 673 .release_at = mc_ce_release_at_common,
612 .task_exit = mc_ce_task_exit, 674 .task_exit = mc_ce_task_exit_common,
613 .schedule = mc_ce_schedule, 675 .schedule = mc_ce_schedule,
614 .finish_switch = mc_ce_finish_switch, 676 .finish_switch = mc_ce_finish_switch,
615 .tick = mc_ce_tick,
616 .task_wake_up = mc_ce_task_wake_up, 677 .task_wake_up = mc_ce_task_wake_up,
617 .task_block = mc_ce_task_block, 678 .task_block = mc_ce_task_block,
618 .activate_plugin = mc_ce_activate_plugin, 679 .activate_plugin = mc_ce_activate_plugin,
619 .deactivate_plugin = mc_ce_deactivate_plugin, 680 .deactivate_plugin = mc_ce_deactivate_plugin_common,
620}; 681};
621 682
622static int setup_proc(void); 683static int setup_proc(void);
623static int __init init_sched_mc_ce(void) 684static int __init init_sched_mc_ce(void)
624{ 685{
625 struct ce_dom_data *ce_data; 686 raw_spinlock_t *ce_lock;
626 domain_t *dom; 687 struct domain_data *dom_data;
627 rt_domain_t *rt; 688 struct domain *dom;
628 int cpu, err; 689 int cpu, err;
629 690
630 for_each_online_cpu(cpu) { 691 for_each_online_cpu(cpu) {
631 dom = &per_cpu(mc_ce_doms, cpu); 692 per_cpu(domains, cpu) = NULL;
632 rt = &per_cpu(mc_ce_rts, cpu); 693 ce_lock = &per_cpu(_mc_ce_dom_locks, cpu);
633 pd_domain_init(dom, rt, NULL, NULL, NULL, NULL, NULL); 694 raw_spin_lock_init(ce_lock);
634 dom->data = &per_cpu(_mc_ce_dom_data, cpu); 695 dom_data = &per_cpu(_mc_ce_doms, cpu);
635 ce_data = dom->data; 696 dom = &dom_data->domain;
636 hrtimer_init(&ce_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 697 ce_domain_init(dom, ce_lock, NULL, NULL, NULL, NULL, NULL,
637 hrtimer_start_on_info_init(&ce_data->timer_info); 698 &per_cpu(_mc_ce_dom_data, cpu), cpu,
638 ce_data->cpu = cpu; 699 mc_ce_timer_callback);
639 ce_data->timer.function = mc_ce_timer_callback;
640 } 700 }
641 clear_pid_entries(); 701 clear_pid_entries();
642 err = setup_proc(); 702 err = setup_proc();
@@ -678,7 +738,7 @@ out:
678#define PID_SPACE 15 738#define PID_SPACE 15
679#define TASK_INFO_BUF (PID_SPACE + TASK_COMM_LEN) 739#define TASK_INFO_BUF (PID_SPACE + TASK_COMM_LEN)
680static int write_pid_entry(char *page, const int count, const int cpu, 740static int write_pid_entry(char *page, const int count, const int cpu,
681 const int task, struct ce_dom_pid_entry *pid_entry) 741 const int task, struct ce_pid_entry *pid_entry)
682{ 742{
683 static char task_info[TASK_INFO_BUF]; 743 static char task_info[TASK_INFO_BUF];
684 struct task_struct *ts; 744 struct task_struct *ts;
@@ -735,8 +795,7 @@ static int proc_read_ce_file(char *page, char **start, off_t off, int count,
735 int *eof, void *data) 795 int *eof, void *data)
736{ 796{
737 int n = 0, err, cpu, t; 797 int n = 0, err, cpu, t;
738 struct ce_dom_data *ce_data; 798 struct ce_pid_table *pid_table;
739 domain_t *dom;
740 799
741 if (off > 0) { 800 if (off > 0) {
742 printk(KERN_INFO "litmus: MC-CE called read with off > 0\n"); 801 printk(KERN_INFO "litmus: MC-CE called read with off > 0\n");
@@ -744,11 +803,10 @@ static int proc_read_ce_file(char *page, char **start, off_t off, int count,
744 } 803 }
745 804
746 for_each_online_cpu(cpu) { 805 for_each_online_cpu(cpu) {
747 dom = &per_cpu(mc_ce_doms, cpu); 806 pid_table = get_pid_table(cpu);
748 ce_data = dom->data; 807 for (t = 0; t < pid_table->num_pid_entries; ++t) {
749 for (t = 0; t < ce_data->num_pid_entries; ++t) {
750 err = write_pid_entry(page + n, count - n, 808 err = write_pid_entry(page + n, count - n,
751 cpu, t, &ce_data->pid_entries[t]); 809 cpu, t, get_pid_entry(cpu, t));
752 if (err < 0) { 810 if (err < 0) {
753 n = -ENOSPC; 811 n = -ENOSPC;
754 goto out; 812 goto out;
@@ -785,9 +843,8 @@ static int skip_comment(const char *buf, const unsigned long max)
785#define BUDGET_THRESHOLD 5000000ULL 843#define BUDGET_THRESHOLD 5000000ULL
786static int setup_pid_entry(const int cpu, const int task, const lt_t budget) 844static int setup_pid_entry(const int cpu, const int task, const lt_t budget)
787{ 845{
788 domain_t *dom = &per_cpu(mc_ce_doms, cpu); 846 struct ce_pid_table *pid_table = get_pid_table(cpu);
789 struct ce_dom_data *ce_data = dom->data; 847 struct ce_pid_entry *new_entry = NULL;
790 struct ce_dom_pid_entry *new_entry;
791 int err = 0; 848 int err = 0;
792 849
793 /* check the inputs */ 850 /* check the inputs */
@@ -805,20 +862,20 @@ static int setup_pid_entry(const int cpu, const int task, const lt_t budget)
805 "MC-CE task; that might be an issue.\n"); 862 "MC-CE task; that might be an issue.\n");
806 } 863 }
807 /* check that we have space for a new entry */ 864 /* check that we have space for a new entry */
808 if (CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS <= ce_data->num_pid_entries) { 865 if (CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS <= pid_table->num_pid_entries) {
809 printk(KERN_INFO "litmus: too many MC-CE tasks for cpu " 866 printk(KERN_INFO "litmus: too many MC-CE tasks for cpu "
810 "%d\n", cpu); 867 "%d\n", cpu);
811 err = -EINVAL; 868 err = -EINVAL;
812 goto out; 869 goto out;
813 } 870 }
814 /* add the new entry */ 871 /* add the new entry */
815 new_entry = &ce_data->pid_entries[ce_data->num_pid_entries]; 872 new_entry = get_pid_entry(cpu, pid_table->num_pid_entries);
816 BUG_ON(NULL != new_entry->pid); 873 BUG_ON(NULL != new_entry->pid);
817 new_entry->budget = budget; 874 new_entry->budget = budget;
818 new_entry->acc_time = ce_data->cycle_time + budget; 875 new_entry->acc_time = pid_table->cycle_time + budget;
819 /* update the domain entry */ 876 /* update the domain entry */
820 ce_data->cycle_time += budget; 877 pid_table->cycle_time += budget;
821 ce_data->num_pid_entries++; 878 pid_table->num_pid_entries++;
822out: 879out:
823 return err; 880 return err;
824} 881}
@@ -839,9 +896,9 @@ static int proc_write_ce_file(struct file *file, const char __user *buffer,
839 int cpu, task, cnt = 0, chars_read, converted, err; 896 int cpu, task, cnt = 0, chars_read, converted, err;
840 lt_t budget; 897 lt_t budget;
841 898
842 if (is_active_plugin()) { 899 if (!using_linux_plugin()) {
843 printk(KERN_INFO "litmus: can't edit MC-CE proc when plugin " 900 printk(KERN_INFO "litmus: can only edit MC-CE proc under Linux "
844 "active\n"); 901 "plugin\n");
845 cnt = -EINVAL; 902 cnt = -EINVAL;
846 goto out; 903 goto out;
847 } 904 }