aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2012-03-15 12:11:32 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2012-03-30 17:18:46 -0400
commitbbdb7896e6a6df8bcc3aa1c1746ab0f898fedfd8 (patch)
tree659e4b4d88708f347509dbe7317b5d66427e7760
parent26bafa3b7880a323d83b8ea71bdb8e2118a5cba0 (diff)
First container trace iteration
Conflicts: include/litmus/rt_param.h include/litmus/sched_trace.h litmus/budget.c litmus/sched_mc.c litmus/sched_mc_ce.c
-rw-r--r--include/litmus/litmus.h2
-rw-r--r--include/litmus/rt_param.h2
-rw-r--r--include/litmus/sched_plugin.h5
-rw-r--r--include/litmus/sched_trace.h42
-rw-r--r--include/trace/events/litmus.h163
-rw-r--r--litmus/sched_mc_ce.c1052
-rw-r--r--litmus/sched_plugin.c6
-rw-r--r--litmus/sched_psn_edf.c3
-rw-r--r--litmus/sync.c3
9 files changed, 1258 insertions, 20 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 0b071fd359f9..2776470bb897 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -44,6 +44,8 @@ void litmus_exit_task(struct task_struct *tsk);
44 44
45#define tsk_rt(t) (&(t)->rt_param) 45#define tsk_rt(t) (&(t)->rt_param)
46 46
47#define get_server_job(t) (tsk_rt(t)->job_params.fake_job_no)
48
47/* Realtime utility macros */ 49/* Realtime utility macros */
48#define get_rt_flags(t) (tsk_rt(t)->flags) 50#define get_rt_flags(t) (tsk_rt(t)->flags)
49#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f)) 51#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f))
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index d6d799174160..ba62e10d6f2c 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -89,7 +89,7 @@ struct rt_job {
89 lt_t exec_time; 89 lt_t exec_time;
90 90
91 /* Which job is this. This is used to let user space 91 /* Which job is this. This is used to let user space
92 * specify which job to wait for, which is important if jobs 92yes * specify which job to wait for, which is important if jobs
93 * overrun. If we just call sys_sleep_next_period() then we 93 * overrun. If we just call sys_sleep_next_period() then we
94 * will unintentionally miss jobs after an overrun. 94 * will unintentionally miss jobs after an overrun.
95 * 95 *
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index 6e7cabdddae8..01786b57a4a9 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -67,6 +67,9 @@ typedef long (*admit_task_t)(struct task_struct* tsk);
67 67
68typedef void (*release_at_t)(struct task_struct *t, lt_t start); 68typedef void (*release_at_t)(struct task_struct *t, lt_t start);
69 69
70/* TODO remove me */
71typedef void (*release_ts_t)(lt_t time);
72
70struct sched_plugin { 73struct sched_plugin {
71 struct list_head list; 74 struct list_head list;
72 /* basic info */ 75 /* basic info */
@@ -93,6 +96,8 @@ struct sched_plugin {
93 task_block_t task_block; 96 task_block_t task_block;
94 task_exit_t task_exit; 97 task_exit_t task_exit;
95 98
99 release_ts_t release_ts;
100
96#ifdef CONFIG_LITMUS_LOCKING 101#ifdef CONFIG_LITMUS_LOCKING
97 /* locking protocols */ 102 /* locking protocols */
98 allocate_lock_t allocate_lock; 103 allocate_lock_t allocate_lock;
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index 82bde8241298..c6897f799867 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -180,6 +180,13 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
180#define trace_litmus_task_resume(t) 180#define trace_litmus_task_resume(t)
181#define trace_litmus_sys_release(start) 181#define trace_litmus_sys_release(start)
182 182
183#define trace_litmus_container_param(cid, name)
184#define trace_litmus_server_param(sid, cid, wcet, time)
185#define trace_litmus_server_switch_to(sid, job, tid)
186#define trace_litmus_server_switch_away(sid, job, tid)
187#define trace_litmus_server_release(sid, job, release, deadline)
188#define trace_litmus_server_completion(sid, job)
189
183#endif 190#endif
184 191
185 192
@@ -252,6 +259,41 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
252 trace_litmus_sys_release(when); \ 259 trace_litmus_sys_release(when); \
253 } while (0) 260 } while (0)
254 261
262#define QT_START lt_t _qt_start = litmus_clock()
263#define QT_END \
264 sched_trace_log_message("%d P%d [%s@%s:%d]: Took %llu\n\n", \
265 TRACE_ARGS, litmus_clock() - _qt_start)
266
267#define sched_trace_container_param(cid, name) \
268 do { \
269 trace_litmus_container_param(cid, name); \
270 } while (0)
271
272#define sched_trace_server_param(sid, cid, wcet, period) \
273 do { \
274 trace_litmus_server_param(sid, cid, wcet, period); \
275 } while(0)
276
277#define sched_trace_server_switch_to(sid, job, tid) \
278 do { \
279 trace_litmus_server_switch_to(sid, job, tid); \
280 } while(0)
281
282#define sched_trace_server_switch_away(sid, job, tid) \
283 do { \
284 trace_litmus_server_switch_away(sid, job, tid); \
285 } while (0)
286
287#define sched_trace_server_release(sid, job, rel, dead) \
288 do { \
289 trace_litmus_server_release(sid, job, rel, dead); \
290 } while (0)
291
292#define sched_trace_server_completion(sid, job) \
293 do { \
294 trace_litmus_server_completion(sid, job); \
295 } while (0)
296
255#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ 297#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
256 298
257#endif /* __KERNEL__ */ 299#endif /* __KERNEL__ */
diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h
index 0fffcee02be0..5ca4bef205f0 100644
--- a/include/trace/events/litmus.h
+++ b/include/trace/events/litmus.h
@@ -11,10 +11,6 @@
11 11
12#include <litmus/litmus.h> 12#include <litmus/litmus.h>
13#include <litmus/rt_param.h> 13#include <litmus/rt_param.h>
14
15/*
16 * Tracing task admission
17 */
18TRACE_EVENT(litmus_task_param, 14TRACE_EVENT(litmus_task_param,
19 15
20 TP_PROTO(struct task_struct *t), 16 TP_PROTO(struct task_struct *t),
@@ -24,9 +20,9 @@ TRACE_EVENT(litmus_task_param,
24 TP_STRUCT__entry( 20 TP_STRUCT__entry(
25 __field( pid_t, pid ) 21 __field( pid_t, pid )
26 __field( unsigned int, job ) 22 __field( unsigned int, job )
27 __field( lt_t, wcet ) 23 __field( unsigned long long, wcet )
28 __field( lt_t, period ) 24 __field( unsigned long long, period )
29 __field( lt_t, phase ) 25 __field( unsigned long long, phase )
30 __field( int, partition ) 26 __field( int, partition )
31 ), 27 ),
32 28
@@ -56,8 +52,8 @@ TRACE_EVENT(litmus_task_release,
56 TP_STRUCT__entry( 52 TP_STRUCT__entry(
57 __field( pid_t, pid ) 53 __field( pid_t, pid )
58 __field( unsigned int, job ) 54 __field( unsigned int, job )
59 __field( lt_t, release ) 55 __field( unsigned long long, release )
60 __field( lt_t, deadline ) 56 __field( unsigned long long, deadline )
61 ), 57 ),
62 58
63 TP_fast_assign( 59 TP_fast_assign(
@@ -84,8 +80,8 @@ TRACE_EVENT(litmus_switch_to,
84 TP_STRUCT__entry( 80 TP_STRUCT__entry(
85 __field( pid_t, pid ) 81 __field( pid_t, pid )
86 __field( unsigned int, job ) 82 __field( unsigned int, job )
87 __field( lt_t, when ) 83 __field( unsigned long long, when )
88 __field( lt_t, exec_time ) 84 __field( unsigned long long, exec_time )
89 ), 85 ),
90 86
91 TP_fast_assign( 87 TP_fast_assign(
@@ -112,8 +108,8 @@ TRACE_EVENT(litmus_switch_away,
112 TP_STRUCT__entry( 108 TP_STRUCT__entry(
113 __field( pid_t, pid ) 109 __field( pid_t, pid )
114 __field( unsigned int, job ) 110 __field( unsigned int, job )
115 __field( lt_t, when ) 111 __field( unsigned long long, when )
116 __field( lt_t, exec_time ) 112 __field( unsigned long long, exec_time )
117 ), 113 ),
118 114
119 TP_fast_assign( 115 TP_fast_assign(
@@ -140,7 +136,7 @@ TRACE_EVENT(litmus_task_completion,
140 TP_STRUCT__entry( 136 TP_STRUCT__entry(
141 __field( pid_t, pid ) 137 __field( pid_t, pid )
142 __field( unsigned int, job ) 138 __field( unsigned int, job )
143 __field( lt_t, when ) 139 __field( unsigned long long, when )
144 __field( unsigned long, forced ) 140 __field( unsigned long, forced )
145 ), 141 ),
146 142
@@ -167,7 +163,7 @@ TRACE_EVENT(litmus_task_block,
167 163
168 TP_STRUCT__entry( 164 TP_STRUCT__entry(
169 __field( pid_t, pid ) 165 __field( pid_t, pid )
170 __field( lt_t, when ) 166 __field( unsigned long long, when )
171 ), 167 ),
172 168
173 TP_fast_assign( 169 TP_fast_assign(
@@ -190,7 +186,7 @@ TRACE_EVENT(litmus_task_resume,
190 TP_STRUCT__entry( 186 TP_STRUCT__entry(
191 __field( pid_t, pid ) 187 __field( pid_t, pid )
192 __field( unsigned int, job ) 188 __field( unsigned int, job )
193 __field( lt_t, when ) 189 __field( unsigned long long, when )
194 ), 190 ),
195 191
196 TP_fast_assign( 192 TP_fast_assign(
@@ -208,13 +204,13 @@ TRACE_EVENT(litmus_task_resume,
208 */ 204 */
209TRACE_EVENT(litmus_sys_release, 205TRACE_EVENT(litmus_sys_release,
210 206
211 TP_PROTO(lt_t *start), 207 TP_PROTO(unsigned long long *start),
212 208
213 TP_ARGS(start), 209 TP_ARGS(start),
214 210
215 TP_STRUCT__entry( 211 TP_STRUCT__entry(
216 __field( lt_t, rel ) 212 __field( unsigned long long, rel )
217 __field( lt_t, when ) 213 __field( unsigned long long, when )
218 ), 214 ),
219 215
220 TP_fast_assign( 216 TP_fast_assign(
@@ -225,6 +221,135 @@ TRACE_EVENT(litmus_sys_release,
225 TP_printk("SynRelease(%Lu) at %Lu\n", __entry->rel, __entry->when) 221 TP_printk("SynRelease(%Lu) at %Lu\n", __entry->rel, __entry->when)
226); 222);
227 223
224/*
225 * Containers
226 */
227TRACE_EVENT(litmus_container_param,
228
229 TP_PROTO(int cid, const char *name),
230
231 TP_ARGS(cid, name),
232
233 TP_STRUCT__entry(
234 __field( int, cid )
235 __array( char, name, TASK_COMM_LEN )
236 ),
237
238 TP_fast_assign(
239 memcpy(__entry->name, name, TASK_COMM_LEN);
240 __entry->cid = cid;
241 ),
242
243 TP_printk("container, name: %s, id: %d\n", __entry->name, __entry->cid)
244);
245
246TRACE_EVENT(litmus_server_param,
247
248 TP_PROTO(int sid, int cid, unsigned long long wcet, unsigned long long period),
249
250 TP_ARGS(sid, cid, wcet, period),
251
252 TP_STRUCT__entry(
253 __field( int, sid )
254 __field( int, cid )
255 __field( unsigned long long, wcet )
256 __field( unsigned long long, period )
257 ),
258
259 TP_fast_assign(
260 __entry->cid = cid;
261 __entry->sid = sid;
262 __entry->wcet = wcet;
263 __entry->period = period;
264 ),
265
266 TP_printk("server(%llu, %llu), sid: %llu, cont: %llu\n",
267 __entry->wcet, __entry->period, __entry->sid, __entry->cid)
268);
269
270TRACE_EVENT(litmus_server_switch_to,
271
272 TP_PROTO(int sid, unsigned int job, int tid),
273
274 TP_ARGS(sid, job, tid),
275
276 TP_STRUCT__entry(
277 __field( int, sid)
278 __field( unsigned int, job)
279 __field( int, tid)
280 ),
281
282 TP_fast_assign(
283 __entry->sid = sid;
284 __entry->tid = tid;
285 __entry->job = job;
286 ),
287
288 TP_printk("switch_to(server(%d, %u)): %d\n", __entry->sid, __entry->job, __entry->tid)
289);
290
291TRACE_EVENT(litmus_server_switch_away,
292
293 TP_PROTO(int sid, unsigned int job, int tid),
294
295 TP_ARGS(sid, job, tid),
296
297 TP_STRUCT__entry(
298 __field( int, sid)
299 __field( unsigned int, job)
300 __field( int, tid)
301 ),
302
303 TP_fast_assign(
304 __entry->sid = sid;
305 __entry->tid = tid;
306 ),
307
308 TP_printk("switch_away(server(%d, %u)): %d\n", __entry->sid, __entry->job, __entry->tid)
309);
310
311TRACE_EVENT(litmus_server_release,
312
313 TP_PROTO(int sid, unsigned int job, unsigned long long release, unsigned long long deadline),
314
315 TP_ARGS(sid, job, release, deadline),
316
317 TP_STRUCT__entry(
318 __field( int, sid)
319 __field( unsigned int, job)
320 __field( unsigned long long, release)
321 __field( unsigned long long, deadline)
322 ),
323
324 TP_fast_assign(
325 __entry->sid = sid;
326 __entry->job = job;
327 __entry->release = release;
328 __entry->deadline = deadline;
329 ),
330
331 TP_printk("release(server(%d, %u)), release: %llu, deadline: %llu\n", __entry->sid, __entry->job, __entry->release, __entry->deadline)
332);
333
334TRACE_EVENT(litmus_server_completion,
335
336 TP_PROTO(int sid, int job),
337
338 TP_ARGS(sid, job),
339
340 TP_STRUCT__entry(
341 __field( int, sid)
342 __field( unsigned int, job)
343 ),
344
345 TP_fast_assign(
346 __entry->sid = sid;
347 __entry->job = job;
348 ),
349
350 TP_printk("completion(server(%d, %d))\n", __entry->sid, __entry->job)
351);
352
228#endif /* _SCHED_TASK_TRACEPOINT_H */ 353#endif /* _SCHED_TASK_TRACEPOINT_H */
229 354
230/* Must stay outside the protection */ 355/* Must stay outside the protection */
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c
new file mode 100644
index 000000000000..4808377b9bb7
--- /dev/null
+++ b/litmus/sched_mc_ce.c
@@ -0,0 +1,1052 @@
1/**
2 * litmus/sched_mc_ce.c
3 *
4 * The Cyclic Executive (CE) scheduler used by the mixed criticality scheduling
5 * algorithm.
6 */
7
8#include <asm/atomic.h>
9#include <asm/uaccess.h>
10
11#include <linux/module.h>
12#include <linux/percpu.h>
13#include <linux/hrtimer.h>
14#include <linux/pid.h>
15#include <linux/sched.h>
16#include <linux/proc_fs.h>
17
18#include <litmus/litmus.h>
19#include <litmus/sched_plugin.h>
20#include <litmus/rt_domain.h>
21#include <litmus/rt_param.h>
22#include <litmus/litmus_proc.h>
23#include <litmus/sched_trace.h>
24#include <litmus/jobs.h>
25#include <litmus/sched_mc.h>
26#include <litmus/ce_domain.h>
27
28static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp;
29
30#define using_linux_plugin() (litmus == &linux_sched_plugin)
31
32/* get a reference to struct domain for a CPU */
33#define get_domain_for(cpu) (&per_cpu(domains, cpu)->domain)
34
35#define get_pid_table(cpu) (&per_cpu(ce_pid_table, cpu))
36#define get_pid_entry(cpu, idx) (&(get_pid_table(cpu)->entries[idx]))
37
38static atomic_t start_time_set = ATOMIC_INIT(-1);
39static atomic64_t start_time = ATOMIC64_INIT(0);
40static struct proc_dir_entry *mc_ce_dir = NULL, *ce_file = NULL;
41
42/*
43 * Cache the budget along with the struct PID for a task so that we don't need
44 * to fetch its task_struct every time we check to see what should be
45 * scheduled.
46 */
47struct ce_pid_entry {
48 struct pid *pid;
49 lt_t budget;
50 /* accumulated (summed) budgets, including this one */
51 lt_t acc_time;
52 unsigned int expected_job;
53};
54
55/*
56 * Each CPU needs a mapping of level A ID (integer) to struct pid so that we
57 * can get its task struct.
58 */
59struct ce_pid_table {
60 struct ce_pid_entry entries[CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS];
61 int num_pid_entries;
62 lt_t cycle_time;
63};
64
65DEFINE_PER_CPU(struct ce_pid_table, ce_pid_table);
66
67/*
68 * How we get the domain for a given CPU locally. Set with the
69 * mc_ce_set_domains function. Must be done before activating plugins. Be
70 * careful when using domains as a variable elsewhere in this file.
71 */
72
73DEFINE_PER_CPU(struct domain_data*, domains);
74
75/*
76 * The domains and other data used by the MC-CE plugin when it runs alone.
77 */
78DEFINE_PER_CPU(struct domain_data, _mc_ce_doms);
79DEFINE_PER_CPU(struct ce_dom_data, _mc_ce_dom_data);
80DEFINE_PER_CPU(raw_spinlock_t, _mc_ce_dom_locks);
81
82#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER
83static int interrupt_cpu;
84#endif
85
86long mc_ce_set_domains(const int n, struct domain_data *domains_in[])
87{
88 const int max = (NR_CPUS < n) ? NR_CPUS : n;
89 struct domain_data *new_dom = NULL;
90 int i, ret;
91 if (!using_linux_plugin()) {
92 printk(KERN_WARNING "can't set MC-CE domains when not using "
93 "Linux scheduler.\n");
94 ret = -EINVAL;
95 goto out;
96 }
97 for (i = 0; i < max; ++i) {
98 new_dom = domains_in[i];
99 per_cpu(domains, i) = new_dom;
100 }
101 ret = 0;
102out:
103 return ret;
104}
105
106unsigned int mc_ce_get_expected_job(const int cpu, const int idx)
107{
108 const struct ce_pid_table *pid_table = get_pid_table(cpu);
109 BUG_ON(0 > cpu);
110 BUG_ON(0 > idx);
111 BUG_ON(pid_table->num_pid_entries <= idx);
112 return pid_table->entries[idx].expected_job;
113}
114
115/*
116 * Get the offset into the cycle taking the start time into account.
117 */
118static inline lt_t get_cycle_offset(const lt_t when, const lt_t cycle_time)
119{
120 long long st = atomic64_read(&start_time);
121 lt_t offset = (when - st) % cycle_time;
122 TRACE("when: %llu cycle_time: %llu start_time: %lld offset %llu\n",
123 when, cycle_time, st, offset);
124 return offset;
125}
126
127/*
128 * The user land job completion call will set the RT_F_SLEEP flag and then
129 * call schedule. This function is used when schedule sleeps a task.
130 *
131 * Do not call prepare_for_next_period on Level-A tasks!
132 */
133static void mc_ce_job_completion(struct domain *dom, struct task_struct *ts)
134{
135 const int cpu = task_cpu(ts);
136 const int idx = tsk_mc_data(ts)->mc_task.lvl_a_id;
137 const struct ce_pid_entry *pid_entry = get_pid_entry(cpu, idx);
138 unsigned int just_finished;
139
140 TRACE_TASK(ts, "Completed\n");
141
142 sched_trace_task_completion(ts, 0);
143 /* post-increment is important here */
144 just_finished = (tsk_rt(ts)->job_params.job_no)++;
145
146 /* Job completes in expected window: everything is normal.
147 * Job completes in an earlier window: BUG(), that's wrong.
148 * Job completes in a later window: The job is behind.
149 */
150 if (just_finished < pid_entry->expected_job) {
151 /* this job is already released because it's running behind */
152 set_rt_flags(ts, RT_F_RUNNING);
153 TRACE_TASK(ts, "appears behind: the expected job is %u but "
154 "job %u just completed\n",
155 pid_entry->expected_job, just_finished);
156 } else if (pid_entry->expected_job < just_finished) {
157 printk(KERN_CRIT "job %u completed in expected job %u which "
158 "seems too early\n", just_finished,
159 pid_entry->expected_job);
160 BUG();
161 }
162}
163
164
165/*
166 * Return the index into the PID entries table of what to schedule next.
167 * Don't call if the table is empty. Assumes the caller has the domain lock.
168 * The offset parameter is the offset into the cycle.
169 *
170 * TODO Currently O(n) in the number of tasks on the CPU. Binary search?
171 */
172static int mc_ce_schedule_at(const struct domain *dom, lt_t offset)
173{
174 const struct ce_dom_data *ce_data = dom->data;
175 struct ce_pid_table *pid_table = get_pid_table(ce_data->cpu);
176 const struct ce_pid_entry *pid_entry = NULL;
177 int idx;
178
179 BUG_ON(pid_table->cycle_time < 1);
180 BUG_ON(pid_table->num_pid_entries < 1);
181
182 for (idx = 0; idx < pid_table->num_pid_entries; ++idx) {
183 pid_entry = &pid_table->entries[idx];
184 if (offset < pid_entry->acc_time) {
185 /* found task to schedule in this window */
186 break;
187 }
188 }
189 /* can only happen if cycle_time is not right */
190 BUG_ON(pid_entry->acc_time > pid_table->cycle_time);
191 TRACE("schedule at returning task %d for CPU %d\n", idx, ce_data->cpu);
192 return idx;
193}
194
195static struct task_struct *mc_ce_schedule(struct task_struct *prev)
196{
197 struct domain *dom = get_domain_for(smp_processor_id());
198 struct ce_dom_data *ce_data = dom->data;
199 struct task_struct *next = NULL;
200 int exists, sleep, should_sched_exists, should_sched_blocked,
201 should_sched_asleep;
202
203 raw_spin_lock(dom->lock);
204
205 /* sanity checking */
206 BUG_ON(ce_data->scheduled && ce_data->scheduled != prev);
207 BUG_ON(ce_data->scheduled && !is_realtime(prev));
208 BUG_ON(is_realtime(prev) && !ce_data->scheduled);
209
210 exists = NULL != ce_data->scheduled;
211 sleep = exists && RT_F_SLEEP == get_rt_flags(ce_data->scheduled);
212
213 TRACE("exists: %d, sleep: %d\n", exists, sleep);
214
215 if (sleep)
216 mc_ce_job_completion(dom, ce_data->scheduled);
217
218 /* these checks must go after the call to mc_ce_job_completion in case
219 * a late task needs to be scheduled again right away and its the only
220 * task on a core
221 */
222 should_sched_exists = NULL != ce_data->should_schedule;
223 should_sched_blocked = should_sched_exists &&
224 !is_running(ce_data->should_schedule);
225 should_sched_asleep = should_sched_exists &&
226 RT_F_SLEEP == get_rt_flags(ce_data->should_schedule);
227
228 TRACE("should_sched_exists: %d, should_sched_blocked: %d, "
229 "should_sched_asleep: %d\n", should_sched_exists,
230 should_sched_blocked, should_sched_asleep);
231
232 if (should_sched_exists && !should_sched_blocked &&
233 !should_sched_asleep) {
234 /*
235 * schedule the task that should be executing in the cyclic
236 * schedule if it is not blocked and not sleeping
237 */
238 next = ce_data->should_schedule;
239 }
240 sched_state_task_picked();
241 raw_spin_unlock(dom->lock);
242 return next;
243}
244
245static void mc_ce_finish_switch(struct task_struct *prev)
246{
247 struct domain *dom = get_domain_for(smp_processor_id());
248 struct ce_dom_data *ce_data = dom->data;
249
250 TRACE("finish switch\n");
251
252 if (is_realtime(current) && CRIT_LEVEL_A == tsk_mc_crit(current))
253 ce_data->scheduled = current;
254 else
255 ce_data->scheduled = NULL;
256}
257
258/*
259 * Admit task called to see if this task is permitted to enter the system.
260 * Here we look up the task's PID structure and save it in the proper slot on
261 * the CPU this task will run on.
262 */
263long mc_ce_admit_task_common(struct task_struct *ts)
264{
265 struct domain *dom = get_domain_for(get_partition(ts));
266 struct ce_dom_data *ce_data = dom->data;
267 struct mc_data *mcd = tsk_mc_data(ts);
268 struct pid *pid = NULL;
269 long retval = -EINVAL;
270 const int lvl_a_id = mcd->mc_task.lvl_a_id;
271 struct ce_pid_table *pid_table = get_pid_table(ce_data->cpu);
272
273 BUG_ON(get_partition(ts) != ce_data->cpu);
274
275 /* check the task has migrated to the right CPU (like in sched_cedf) */
276 if (task_cpu(ts) != get_partition(ts)) {
277 printk(KERN_INFO "litmus: %d admitted on CPU %d but want %d ",
278 ts->pid, task_cpu(ts), get_partition(ts));
279 goto out;
280 }
281
282 /* only level A tasks can be CE */
283 if (!mcd || CRIT_LEVEL_A != tsk_mc_crit(ts)) {
284 printk(KERN_INFO "litmus: non-MC or non level A task %d\n",
285 ts->pid);
286 goto out;
287 }
288
289 /* try and get the task's PID structure */
290 pid = get_task_pid(ts, PIDTYPE_PID);
291 if (IS_ERR_OR_NULL(pid)) {
292 printk(KERN_INFO "litmus: couldn't get pid struct for %d\n",
293 ts->pid);
294 goto out;
295n }
296
297 if (lvl_a_id >= pid_table->num_pid_entries) {
298 printk(KERN_INFO "litmus: level A id greater than expected "
299 "number of tasks %d for %d cpu %d\n",
300 pid_table->num_pid_entries, ts->pid,
301 get_partition(ts));
302 goto out_put_pid;
303 }
304 if (pid_table->entries[lvl_a_id].pid) {
305 printk(KERN_INFO "litmus: have saved pid info id: %d cpu: %d\n",
306 lvl_a_id, get_partition(ts));
307 goto out_put_pid;
308 }
309 if (get_exec_cost(ts) >= pid_table->entries[lvl_a_id].budget) {
310 printk(KERN_INFO "litmus: execution cost %llu is larger than "
311 "the budget %llu\n",
312 get_exec_cost(ts),
313 pid_table->entries[lvl_a_id].budget);
314 goto out_put_pid;
315 }
316 pid_table->entries[lvl_a_id].pid = pid;
317 retval = 0;
318 /* don't call put_pid if we are successful */
319 goto out;
320
321out_put_pid:
322 put_pid(pid);
323out:
324 return retval;
325}
326
327static long mc_ce_admit_task(struct task_struct *ts)
328{
329 struct domain *dom = get_domain_for(get_partition(ts));
330 unsigned long flags, retval;
331 raw_spin_lock_irqsave(dom->lock, flags);
332 retval = mc_ce_admit_task_common(ts);
333 raw_spin_unlock_irqrestore(dom->lock, flags);
334 return retval;
335}
336
337/*
338 * Called to set up a new real-time task (after the admit_task callback).
339 * At this point the task's struct PID is already hooked up on the destination
340 * CPU. The task may already be running.
341 */
342static void mc_ce_task_new(struct task_struct *ts, int on_rq, int running)
343{
344 const int cpu = task_cpu(ts);
345 struct domain *dom = get_domain_for(cpu);
346 struct ce_dom_data *ce_data = dom->data;
347 struct ce_pid_table *pid_table = get_pid_table(cpu);
348 struct pid *pid_should_be_running;
349 struct ce_pid_entry *pid_entry;
350 unsigned long flags;
351 int idx, should_be_running;
352 lt_t offset;
353
354 raw_spin_lock_irqsave(dom->lock, flags);
355 pid_entry = get_pid_entry(cpu, tsk_mc_data(ts)->mc_task.lvl_a_id);
356 /* initialize some task state */
357 set_rt_flags(ts, RT_F_RUNNING);
358
359 /* have to call mc_ce_schedule_at because the task only gets a PID
360 * entry after calling admit_task */
361 offset = get_cycle_offset(litmus_clock(), pid_table->cycle_time);
362 idx = mc_ce_schedule_at(dom, offset);
363 pid_should_be_running = get_pid_entry(cpu, idx)->pid;
364 rcu_read_lock();
365 should_be_running = (ts == pid_task(pid_should_be_running, PIDTYPE_PID));
366 rcu_read_unlock();
367 if (running) {
368 /* admit task checks that the task is not on the wrong CPU */
369 BUG_ON(task_cpu(ts) != get_partition(ts));
370 BUG_ON(ce_data->scheduled);
371 ce_data->scheduled = ts;
372
373 if (should_be_running)
374 ce_data->should_schedule = ts;
375 else
376 preempt_if_preemptable(ce_data->scheduled, ce_data->cpu);
377 } else if (!running && should_be_running) {
378 ce_data->should_schedule = ts;
379 preempt_if_preemptable(ce_data->scheduled, ce_data->cpu);
380 }
381 raw_spin_unlock_irqrestore(dom->lock, flags);
382}
383
384/*
385 * Called to re-introduce a task after blocking.
386 * Can potentailly be called multiple times.
387 */
388static void mc_ce_task_wake_up(struct task_struct *ts)
389{
390 struct domain *dom = get_domain_for(get_partition(ts));
391 struct ce_dom_data *ce_data = dom->data;
392 unsigned long flags;
393
394 TRACE_TASK(ts, "wake up\n");
395
396 raw_spin_lock_irqsave(dom->lock, flags);
397 if (ts == ce_data->should_schedule && ts != ce_data->scheduled)
398 preempt_if_preemptable(ts, ce_data->cpu);
399 raw_spin_unlock_irqrestore(dom->lock, flags);
400}
401
402/*
403 * Called to notify the plugin of a blocking real-time tasks. Only called for
404 * real-time tasks and before schedule is called.
405 */
406static void mc_ce_task_block(struct task_struct *ts)
407{
408 /* nothing to do because it will be taken care of in schedule */
409 TRACE_TASK(ts, "blocked\n");
410}
411
412/*
413 * Called when a task switches from RT mode back to normal mode.
414 */
415void mc_ce_task_exit_common(struct task_struct *ts)
416{
417 struct domain *dom = get_domain_for(get_partition(ts));
418 struct ce_dom_data *ce_data = dom->data;
419 unsigned long flags;
420 struct pid *pid;
421 const int lvl_a_id = tsk_mc_data(ts)->mc_task.lvl_a_id;
422 struct ce_pid_table *pid_table = get_pid_table(ce_data->cpu);
423
424 BUG_ON(CRIT_LEVEL_A != tsk_mc_crit(ts));
425 BUG_ON(lvl_a_id >= pid_table->num_pid_entries);
426
427 raw_spin_lock_irqsave(dom->lock, flags);
428 pid = pid_table->entries[lvl_a_id].pid;
429 BUG_ON(!pid);
430 put_pid(pid);
431 pid_table->entries[lvl_a_id].pid = NULL;
432 if (ce_data->scheduled == ts)
433 ce_data->scheduled = NULL;
434 if (ce_data->should_schedule == ts)
435 ce_data->should_schedule = NULL;
436 raw_spin_unlock_irqrestore(dom->lock, flags);
437}
438
439/***********************************************************
440 * Timer stuff
441 **********************************************************/
442
443/*
444 * Returns the next absolute time that the timer should fire.
445 */
446lt_t mc_ce_timer_callback_common(struct domain *dom)
447{
448 /* relative and absolute times for cycles */
449 lt_t now, offset_rel, cycle_start_abs, next_timer_abs;
450 struct task_struct *should_schedule;
451 struct ce_pid_table *pid_table;
452 struct ce_pid_entry *pid_entry;
453 struct ce_dom_data *ce_data;
454 int idx, budget_overrun;
455
456 ce_data = dom->data;
457 pid_table = get_pid_table(ce_data->cpu);
458
459 /* Based off of the current time, figure out the offset into the cycle
460 * and the cycle's start time, and determine what should be scheduled.
461 */
462 now = litmus_clock();
463 offset_rel = get_cycle_offset(now, pid_table->cycle_time);
464 cycle_start_abs = now - offset_rel;
465 idx = mc_ce_schedule_at(dom, offset_rel);
466 pid_entry = get_pid_entry(ce_data->cpu, idx);
467 next_timer_abs = cycle_start_abs + pid_entry->acc_time;
468
469 STRACE("timer: now: %llu offset_rel: %llu cycle_start_abs: %llu "
470 "next_timer_abs: %llu\n", now, offset_rel,
471 cycle_start_abs, next_timer_abs);
472
473 /* get the task_struct (pid_task can accept a NULL) */
474 rcu_read_lock();
475 should_schedule = pid_task(pid_entry->pid, PIDTYPE_PID);
476 rcu_read_unlock();
477 ce_data->should_schedule = should_schedule;
478
479 if (should_schedule && 0 == atomic_read(&start_time_set)) {
480 /*
481 * If jobs are not overrunning their budgets, then this
482 * should not happen.
483 */
484 pid_entry->expected_job++;
485 budget_overrun = pid_entry->expected_job !=
486 tsk_rt(should_schedule)->job_params.job_no;
487 if (budget_overrun)
488 TRACE_MC_TASK(should_schedule,
489 "timer expected job number: %u "
490 "but current job: %u\n",
491 pid_entry->expected_job,
492 tsk_rt(should_schedule)->job_params.job_no);
493 }
494
495 if (ce_data->should_schedule) {
496 tsk_rt(should_schedule)->job_params.deadline =
497 cycle_start_abs + pid_entry->acc_time;
498 tsk_rt(should_schedule)->job_params.release =
499 tsk_rt(should_schedule)->job_params.deadline -
500 pid_entry->budget;
501 tsk_rt(should_schedule)->job_params.exec_time = 0;
502 sched_trace_task_release(should_schedule);
503 set_rt_flags(ce_data->should_schedule, RT_F_RUNNING);
504 }
505 return next_timer_abs;
506}
507
508/*
509 * What to do when a timer fires. The timer should only be armed if the number
510 * of PID entries is positive.
511 */
512#ifdef CONFIG_MERGE_TIMERS
513static void mc_ce_timer_callback(struct rt_event *e)
514#else
515static enum hrtimer_restart mc_ce_timer_callback(struct hrtimer *timer)
516#endif
517{
518 struct ce_dom_data *ce_data;
519 unsigned long flags;
520 struct domain *dom;
521 lt_t next_timer_abs;
522#ifdef CONFIG_MERGE_TIMERS
523 struct event_group *event_group;
524 ce_data = container_of(e, struct ce_dom_data, event);
525 /* use the same CPU the callbacking is executing on by passing NO_CPU */
526 event_group = get_event_group_for(NO_CPU);
527#else /* CONFIG_MERGE_TIMERS */
528 ce_data = container_of(timer, struct ce_dom_data, timer);
529#endif
530 dom = get_domain_for(ce_data->cpu);
531
532 TRACE("timer callback on CPU %d (before lock)\n", ce_data->cpu);
533
534 raw_spin_lock_irqsave(dom->lock, flags);
535 next_timer_abs = mc_ce_timer_callback_common(dom);
536
537 /* setup an event or timer for the next release in the CE schedule */
538#ifdef CONFIG_MERGE_TIMERS
539 add_event(event_group, e, next_timer_abs);
540#else
541 hrtimer_set_expires(timer, ns_to_ktime(next_timer_abs));
542#endif
543
544 if (ce_data->scheduled != ce_data->should_schedule)
545 preempt_if_preemptable(ce_data->scheduled, ce_data->cpu);
546
547 raw_spin_unlock_irqrestore(dom->lock, flags);
548
549#ifndef CONFIG_MERGE_TIMERS
550 return HRTIMER_RESTART;
551#endif
552}
553
554/*
555 * Cancel timers on all CPUs. Returns 1 if any were active.
556 */
557static int cancel_all_timers(void)
558{
559 struct ce_dom_data *ce_data;
560 struct domain *dom;
561 int cpu, ret = 0;
562#ifndef CONFIG_MERGE_TIMERS
563 int cancel_res;
564#endif
565
566 TRACE("cancel all timers\n");
567
568 for_each_online_cpu(cpu) {
569 dom = get_domain_for(cpu);
570 ce_data = dom->data;
571 ce_data->should_schedule = NULL;
572#ifdef CONFIG_MERGE_TIMERS
573 cancel_event(&ce_data->event);
574#else
575 cancel_res = hrtimer_cancel(&ce_data->timer);
576 atomic_set(&ce_data->timer_info.state,
577 HRTIMER_START_ON_INACTIVE);
578 ret = ret || cancel_res;
579#endif
580 }
581 return ret;
582}
583
584/*
585 * Arm all timers so that they start at the new value of start time.
586 * Any CPU without CE PID entries won't have a timer armed.
587 * All timers should be canceled before calling this.
588 */
589static void arm_all_timers(void)
590{
591 struct domain *dom;
592 struct ce_dom_data *ce_data;
593 struct ce_pid_table *pid_table;
594 int cpu, idx, cpu_for_timer;
595 const lt_t start = atomic64_read(&start_time);
596
597 TRACE("arm all timers\n");
598
599 for_each_online_cpu(cpu) {
600 dom = get_domain_for(cpu);
601 ce_data = dom->data;
602 pid_table = get_pid_table(cpu);
603 if (0 == pid_table->num_pid_entries)
604 continue;
605 for (idx = 0; idx < pid_table->num_pid_entries; idx++) {
606 pid_table->entries[idx].expected_job = 0;
607 }
608#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER
609 cpu_for_timer = interrupt_cpu;
610#else
611 cpu_for_timer = cpu;
612#endif
613
614#ifdef CONFIG_MERGE_TIMERS
615 add_event(get_event_group_for(cpu_for_timer),
616 &ce_data->event, start);
617#else
618 hrtimer_start_on(cpu_for_timer, &ce_data->timer_info,
619 &ce_data->timer, ns_to_ktime(start),
620 HRTIMER_MODE_ABS_PINNED);
621#endif
622 }
623}
624
625/*
626 * There are no real releases in the CE, but the task release syscall will
627 * call this. We can re-set our notion of the CE period start to make
628 * the schedule look pretty.
629 */
630void mc_ce_release_at_common(struct task_struct *ts, lt_t start)
631{
632 TRACE_TASK(ts, "release at\n");
633 if (atomic_inc_and_test(&start_time_set)) {
634 /* in this case, we won the race */
635 cancel_all_timers();
636 atomic64_set(&start_time, start);
637 arm_all_timers();
638 } else
639 atomic_dec(&start_time_set);
640}
641
642long mc_ce_activate_plugin_common(void)
643{
644 struct ce_dom_data *ce_data;
645 struct domain *dom;
646 long ret;
647 int cpu;
648
649#ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER
650 interrupt_cpu = atomic_read(&release_master_cpu);
651 if (NO_CPU == interrupt_cpu) {
652 printk(KERN_ERR "LITMUS: MC-CE needs a release master\n");
653 ret = -EINVAL;
654 goto out;
655 }
656#endif
657
658 for_each_online_cpu(cpu) {
659 dom = get_domain_for(cpu);
660 ce_data = dom->data;
661 ce_data->scheduled = NULL;
662 ce_data->should_schedule = NULL;
663 }
664
665 atomic_set(&start_time_set, -1);
666 atomic64_set(&start_time, litmus_clock());
667 /* may not want to arm timers on activation, just after release */
668 arm_all_timers();
669 ret = 0;
670out:
671 return ret;
672}
673
674static long mc_ce_activate_plugin(void)
675{
676 struct domain_data *our_domains[NR_CPUS];
677 int cpu, n = 0;
678 long ret;
679
680 for_each_online_cpu(cpu) {
681 BUG_ON(NR_CPUS <= n);
682 our_domains[cpu] = &per_cpu(_mc_ce_doms, cpu);
683 n++;
684 }
685 ret = mc_ce_set_domains(n, our_domains);
686 if (ret)
687 goto out;
688 ret = mc_ce_activate_plugin_common();
689out:
690 return ret;
691}
692
693static void clear_pid_entries(void)
694{
695 struct ce_pid_table *pid_table = NULL;
696 int cpu, entry;
697
698 for_each_online_cpu(cpu) {
699 pid_table = get_pid_table(cpu);
700 pid_table->num_pid_entries = 0;
701 pid_table->cycle_time = 0;
702 for (entry = 0; entry < CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS;
703 ++entry) {
704 if (NULL != pid_table->entries[entry].pid) {
705 put_pid(pid_table->entries[entry].pid);
706 pid_table->entries[entry].pid = NULL;
707 }
708 pid_table->entries[entry].budget = 0;
709 pid_table->entries[entry].acc_time = 0;
710 pid_table->entries[entry].expected_job = 0;
711 }
712 }
713}
714
715long mc_ce_deactivate_plugin_common(void)
716{
717 int cpu;
718 cancel_all_timers();
719 for_each_online_cpu(cpu) {
720 per_cpu(domains, cpu) = NULL;
721 }
722 return 0;
723}
724
725/* Plugin object */
726static struct sched_plugin mc_ce_plugin __cacheline_aligned_in_smp = {
727 .plugin_name = "MC-CE",
728 .admit_task = mc_ce_admit_task,
729 .task_new = mc_ce_task_new,
730 .complete_job = complete_job,
731 .release_at = mc_ce_release_at_common,
732 .task_exit = mc_ce_task_exit_common,
733 .schedule = mc_ce_schedule,
734 .finish_switch = mc_ce_finish_switch,
735 .task_wake_up = mc_ce_task_wake_up,
736 .task_block = mc_ce_task_block,
737 .activate_plugin = mc_ce_activate_plugin,
738 .deactivate_plugin = mc_ce_deactivate_plugin_common,
739};
740
741static int setup_proc(void);
742static int __init init_sched_mc_ce(void)
743{
744 raw_spinlock_t *ce_lock;
745 struct domain_data *dom_data;
746 struct domain *dom;
747 int cpu, err;
748
749 for_each_online_cpu(cpu) {
750 per_cpu(domains, cpu) = NULL;
751 ce_lock = &per_cpu(_mc_ce_dom_locks, cpu);
752 raw_spin_lock_init(ce_lock);
753 dom_data = &per_cpu(_mc_ce_doms, cpu);
754 dom = &dom_data->domain;
755 ce_domain_init(dom, ce_lock, NULL, NULL, NULL, NULL, NULL,
756 &per_cpu(_mc_ce_dom_data, cpu), cpu,
757 mc_ce_timer_callback);
758 }
759 clear_pid_entries();
760 err = setup_proc();
761 if (!err)
762 err = register_sched_plugin(&mc_ce_plugin);
763 return err;
764}
765
766#define BUF_SIZE PAGE_SIZE
767static int write_into_proc(char *proc_buf, const int proc_size, char *fmt, ...)
768{
769 static char buf[BUF_SIZE];
770 int n;
771 va_list args;
772
773 /* When writing to procfs, we don't care about the trailing null that
774 * is not included in the count returned by vscnprintf.
775 */
776 va_start(args, fmt);
777 n = vsnprintf(buf, BUF_SIZE, fmt, args);
778 va_end(args);
779 if (BUF_SIZE <= n || proc_size <= n) {
780 /* too big for formatting buffer or proc (less null byte) */
781 n = -EINVAL;
782 goto out;
783 }
784 memcpy(proc_buf, buf, n);
785out:
786 return n;
787}
788#undef BUF_SIZE
789
790/*
791 * Writes a PID entry to the procfs.
792 *
793 * @page buffer to write into.
794 * @count bytes available in the buffer
795 */
796#define PID_SPACE 15
797#define TASK_INFO_BUF (PID_SPACE + TASK_COMM_LEN)
798static int write_pid_entry(char *page, const int count, const int cpu,
799 const int task, struct ce_pid_entry *pid_entry)
800{
801 static char task_info[TASK_INFO_BUF];
802 struct task_struct *ts;
803 int n = 0, err, ti_n;
804 char *ti_b;
805
806 if (pid_entry->pid) {
807 rcu_read_lock();
808 ts = pid_task(pid_entry->pid, PIDTYPE_PID);
809 rcu_read_unlock();
810
811 /* get some information about the task */
812 if (ts) {
813 ti_b = task_info;
814 ti_n = snprintf(ti_b, PID_SPACE, "%d", ts->pid);
815 if (PID_SPACE <= ti_n)
816 ti_n = PID_SPACE - 1;
817 ti_b += ti_n;
818 *ti_b = ' '; /* nuke the null byte */
819 ti_b++;
820 get_task_comm(ti_b, ts);
821 } else {
822 strncpy(task_info, "pid_task() failed :(",
823 TASK_INFO_BUF);
824 }
825
826 } else
827 strncpy(task_info, "no", TASK_INFO_BUF);
828 task_info[TASK_INFO_BUF - 1] = '\0'; /* just to be sure */
829
830 err = write_into_proc(page + n, count - n, "# task: %s\n", task_info);
831 if (err < 0) {
832 n = -ENOSPC;
833 goto out;
834 }
835 n += err;
836 err = write_into_proc(page + n, count - n, "%d, %d, %llu\n",
837 cpu, task, pid_entry->budget);
838 if (err < 0) {
839 n = -ENOSPC;
840 goto out;
841 }
842 n += err;
843out:
844 return n;
845}
846#undef PID_SPACE
847#undef TASK_INFO_BUF
848
849/*
850 * Called when the user-land reads from proc.
851 */
852static int proc_read_ce_file(char *page, char **start, off_t off, int count,
853 int *eof, void *data)
854{
855 int n = 0, err, cpu, t;
856 struct ce_pid_table *pid_table;
857
858 if (off > 0) {
859 printk(KERN_INFO "litmus: MC-CE called read with off > 0\n");
860 goto out;
861 }
862
863 for_each_online_cpu(cpu) {
864 pid_table = get_pid_table(cpu);
865 for (t = 0; t < pid_table->num_pid_entries; ++t) {
866 err = write_pid_entry(page + n, count - n,
867 cpu, t, get_pid_entry(cpu, t));
868 if (err < 0) {
869 n = -ENOSPC;
870 goto out;
871 }
872 n += err;
873 }
874 }
875out:
876 *eof = 1;
877 return n;
878}
879
880/*
881 * Skip a commented line.
882 */
883static int skip_comment(const char *buf, const unsigned long max)
884{
885 unsigned long i = 0;
886 const char *c = buf;
887 if (0 == max || !c || *c != '#')
888 return 0;
889 ++c; ++i;
890 for (; i < max; ++i) {
891 if (*c == '\n') {
892 ++c; ++i;
893 break;
894 }
895 ++c;
896 }
897 return i;
898}
899
900/* a budget of 5 milliseconds is probably reasonable */
901#define BUDGET_THRESHOLD 5000000ULL
902static int setup_pid_entry(const int cpu, const int task, const lt_t budget)
903{
904 struct ce_pid_table *pid_table = get_pid_table(cpu);
905 struct ce_pid_entry *new_entry = NULL;
906 int err = 0;
907
908 /* check the inputs */
909 if (cpu < 0 || NR_CPUS <= cpu || task < 0 ||
910 CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS <= task ||
911 budget < 1) {
912 printk(KERN_INFO "litmus: bad cpu, task ID, or budget sent to "
913 "MC-CE proc\n");
914 err = -EINVAL;
915 goto out;
916 }
917 /* check for small budgets */
918 if (BUDGET_THRESHOLD > budget) {
919 printk(KERN_CRIT "litmus: you gave a small budget for an "
920 "MC-CE task; that might be an issue.\n");
921 }
922 /* check that we have space for a new entry */
923 if (CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS <= pid_table->num_pid_entries) {
924 printk(KERN_INFO "litmus: too many MC-CE tasks for cpu "
925 "%d\n", cpu);
926 err = -EINVAL;
927 goto out;
928 }
929 /* add the new entry */
930 new_entry = get_pid_entry(cpu, pid_table->num_pid_entries);
931 BUG_ON(NULL != new_entry->pid);
932 new_entry->budget = budget;
933 new_entry->acc_time = pid_table->cycle_time + budget;
934 /* update the domain entry */
935 pid_table->cycle_time += budget;
936 pid_table->num_pid_entries++;
937out:
938 return err;
939}
940#undef BUDGET_THRESHOLD
941
942/*
943 * Called when the user-land writes to proc.
944 *
945 * Error checking is quite minimal. Format is:
946 * <cpu>, <process ID>, <budget>
947 */
948#define PROCFS_MAX_SIZE PAGE_SIZE
949static int proc_write_ce_file(struct file *file, const char __user *buffer,
950 unsigned long count, void *data)
951{
952 static char kbuf[PROCFS_MAX_SIZE];
953 char *c = kbuf, *c_skipped;
954 int cpu, task, cnt = 0, chars_read, converted, err;
955 lt_t budget;
956
957 if (!using_linux_plugin()) {
958 printk(KERN_INFO "litmus: can only edit MC-CE proc under Linux "
959 "plugin\n");
960 cnt = -EINVAL;
961 goto out;
962 }
963
964 if (count > PROCFS_MAX_SIZE) {
965 printk(KERN_INFO "litmus: MC-CE procfs got too many bytes "
966 "from user-space.\n");
967 cnt = -EINVAL;
968 goto out;
969 }
970
971 if (copy_from_user(kbuf, buffer, count)) {
972 printk(KERN_INFO "litmus: couldn't copy from user %s\n",
973 __FUNCTION__);
974 cnt = -EFAULT;
975 goto out;
976 }
977 clear_pid_entries();
978 while (cnt < count) {
979 c_skipped = skip_spaces(c);
980 if (c_skipped != c) {
981 chars_read = c_skipped - c;
982 cnt += chars_read;
983 c += chars_read;
984 continue;
985 }
986 if (*c == '#') {
987 chars_read = skip_comment(c, count - cnt);
988 cnt += chars_read;
989 c += chars_read;
990 continue;
991 }
992 converted = sscanf(c, "%d, %d, %llu%n", &cpu, &task, &budget,
993 &chars_read);
994 if (3 != converted) {
995 printk(KERN_INFO "litmus: MC-CE procfs expected three "
996 "arguments, but got %d.\n", converted);
997 cnt = -EINVAL;
998 goto out;
999 }
1000 cnt += chars_read;
1001 c += chars_read;
1002 err = setup_pid_entry(cpu, task, budget);
1003 if (err) {
1004 cnt = -EINVAL;
1005 goto out;
1006 }
1007 }
1008out:
1009 return cnt;
1010}
1011#undef PROCFS_MAX_SIZE
1012
1013#define CE_FILE_PROC_NAME "ce_file"
1014static void tear_down_proc(void)
1015{
1016 if (ce_file)
1017 remove_proc_entry(CE_FILE_PROC_NAME, mc_ce_dir);
1018 if (mc_ce_dir)
1019 remove_plugin_proc_dir(&mc_ce_plugin);
1020}
1021
1022static int setup_proc(void)
1023{
1024 int err;
1025 err = make_plugin_proc_dir(&mc_ce_plugin, &mc_ce_dir);
1026 if (err) {
1027 printk(KERN_ERR "could not create MC-CE procfs dir.\n");
1028 goto out;
1029 }
1030 ce_file = create_proc_entry(CE_FILE_PROC_NAME, 0644, mc_ce_dir);
1031 if (!ce_file) {
1032 printk(KERN_ERR "could not create MC-CE procfs file.\n");
1033 err = -EIO;
1034 goto out_remove_proc;
1035 }
1036 ce_file->read_proc = proc_read_ce_file;
1037 ce_file->write_proc = proc_write_ce_file;
1038 goto out;
1039out_remove_proc:
1040 tear_down_proc();
1041out:
1042 return err;
1043}
1044#undef CE_FILE_PROC_NAME
1045
1046static void clean_sched_mc_ce(void)
1047{
1048 tear_down_proc();
1049}
1050
1051module_init(init_sched_mc_ce);
1052module_exit(clean_sched_mc_ce);
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index 00a1900d6457..123c7516fb76 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -95,6 +95,10 @@ static void litmus_dummy_task_exit(struct task_struct *task)
95{ 95{
96} 96}
97 97
98static void litmus_dummy_release_ts(lt_t time)
99{
100}
101
98static long litmus_dummy_complete_job(void) 102static long litmus_dummy_complete_job(void)
99{ 103{
100 return -ENOSYS; 104 return -ENOSYS;
@@ -136,6 +140,7 @@ struct sched_plugin linux_sched_plugin = {
136 .finish_switch = litmus_dummy_finish_switch, 140 .finish_switch = litmus_dummy_finish_switch,
137 .activate_plugin = litmus_dummy_activate_plugin, 141 .activate_plugin = litmus_dummy_activate_plugin,
138 .deactivate_plugin = litmus_dummy_deactivate_plugin, 142 .deactivate_plugin = litmus_dummy_deactivate_plugin,
143 .release_ts = litmus_dummy_release_ts,
139#ifdef CONFIG_LITMUS_LOCKING 144#ifdef CONFIG_LITMUS_LOCKING
140 .allocate_lock = litmus_dummy_allocate_lock, 145 .allocate_lock = litmus_dummy_allocate_lock,
141#endif 146#endif
@@ -174,6 +179,7 @@ int register_sched_plugin(struct sched_plugin* plugin)
174 CHECK(complete_job); 179 CHECK(complete_job);
175 CHECK(activate_plugin); 180 CHECK(activate_plugin);
176 CHECK(deactivate_plugin); 181 CHECK(deactivate_plugin);
182 CHECK(release_ts);
177#ifdef CONFIG_LITMUS_LOCKING 183#ifdef CONFIG_LITMUS_LOCKING
178 CHECK(allocate_lock); 184 CHECK(allocate_lock);
179#endif 185#endif
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 8e4a22dd8d6a..a4ffb9bb9970 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -284,6 +284,9 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
284 TRACE_TASK(t, "psn edf: task new, cpu = %d\n", 284 TRACE_TASK(t, "psn edf: task new, cpu = %d\n",
285 t->rt_param.task_params.cpu); 285 t->rt_param.task_params.cpu);
286 286
287 trace_litmus_server_param(0 - t->pid, -1 - get_partition(task),
288 get_exec_time(t), get_rt_period(t));
289
287 /* setup job parameters */ 290 /* setup job parameters */
288 release_at(t, litmus_clock()); 291 release_at(t, litmus_clock());
289 292
diff --git a/litmus/sync.c b/litmus/sync.c
index bf75fde5450b..f3c9262f7022 100644
--- a/litmus/sync.c
+++ b/litmus/sync.c
@@ -73,6 +73,9 @@ static long do_release_ts(lt_t start)
73 73
74 complete_n(&ts_release, task_count); 74 complete_n(&ts_release, task_count);
75 75
76 /* TODO: remove this hack */
77 litmus->release_ts(start);
78
76 return task_count; 79 return task_count;
77} 80}
78 81