diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2009-12-17 21:23:36 -0500 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 17:05:45 -0400 |
commit | 4b38febbd59fd33542a343991262119eb9860f5e (patch) | |
tree | 1af88a0d354abe344c2c2869631f76a1806d75c3 /litmus/sched_litmus.c | |
parent | 22763c5cf3690a681551162c15d34d935308c8d7 (diff) |
[ported from 2008.3] Core LITMUS^RT infrastructure
Port 2008.3 Core LITMUS^RT infrastructure to Linux 2.6.32
litmus_sched_class implements 4 new methods:
- prio_changed:
void
- switched_to:
void
- get_rr_interval:
return infinity (i.e., 0)
- select_task_rq:
return current cpu
Diffstat (limited to 'litmus/sched_litmus.c')
-rw-r--r-- | litmus/sched_litmus.c | 275 |
1 files changed, 275 insertions, 0 deletions
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c new file mode 100644 index 000000000000..ccedd3670ac5 --- /dev/null +++ b/litmus/sched_litmus.c | |||
@@ -0,0 +1,275 @@ | |||
1 | /* This file is included from kernel/sched.c */ | ||
2 | |||
3 | #include <litmus/litmus.h> | ||
4 | #include <litmus/sched_plugin.h> | ||
5 | |||
6 | static void update_time_litmus(struct rq *rq, struct task_struct *p) | ||
7 | { | ||
8 | u64 delta = rq->clock - p->se.exec_start; | ||
9 | if (unlikely((s64)delta < 0)) | ||
10 | delta = 0; | ||
11 | /* per job counter */ | ||
12 | p->rt_param.job_params.exec_time += delta; | ||
13 | /* task counter */ | ||
14 | p->se.sum_exec_runtime += delta; | ||
15 | /* sched_clock() */ | ||
16 | p->se.exec_start = rq->clock; | ||
17 | cpuacct_charge(p, delta); | ||
18 | } | ||
19 | |||
20 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); | ||
21 | static void double_rq_unlock(struct rq *rq1, struct rq *rq2); | ||
22 | |||
23 | static void litmus_tick(struct rq *rq, struct task_struct *p) | ||
24 | { | ||
25 | if (is_realtime(p)) | ||
26 | update_time_litmus(rq, p); | ||
27 | litmus->tick(p); | ||
28 | } | ||
29 | |||
30 | static void litmus_schedule(struct rq *rq, struct task_struct *prev) | ||
31 | { | ||
32 | struct rq* other_rq; | ||
33 | long was_running; | ||
34 | lt_t _maybe_deadlock = 0; | ||
35 | /* WARNING: rq is _not_ locked! */ | ||
36 | if (is_realtime(prev)) { | ||
37 | update_time_litmus(rq, prev); | ||
38 | if (!is_running(prev)) | ||
39 | tsk_rt(prev)->present = 0; | ||
40 | } | ||
41 | |||
42 | /* let the plugin schedule */ | ||
43 | rq->litmus_next = litmus->schedule(prev); | ||
44 | |||
45 | /* check if a global plugin pulled a task from a different RQ */ | ||
46 | if (rq->litmus_next && task_rq(rq->litmus_next) != rq) { | ||
47 | /* we need to migrate the task */ | ||
48 | other_rq = task_rq(rq->litmus_next); | ||
49 | TRACE_TASK(rq->litmus_next, "migrate from %d\n", other_rq->cpu); | ||
50 | |||
51 | /* while we drop the lock, the prev task could change its | ||
52 | * state | ||
53 | */ | ||
54 | was_running = is_running(prev); | ||
55 | mb(); | ||
56 | spin_unlock(&rq->lock); | ||
57 | |||
58 | /* Don't race with a concurrent switch. This could deadlock in | ||
59 | * the case of cross or circular migrations. It's the job of | ||
60 | * the plugin to make sure that doesn't happen. | ||
61 | */ | ||
62 | TRACE_TASK(rq->litmus_next, "stack_in_use=%d\n", | ||
63 | rq->litmus_next->rt_param.stack_in_use); | ||
64 | if (rq->litmus_next->rt_param.stack_in_use != NO_CPU) { | ||
65 | TRACE_TASK(rq->litmus_next, "waiting to deschedule\n"); | ||
66 | _maybe_deadlock = litmus_clock(); | ||
67 | } | ||
68 | while (rq->litmus_next->rt_param.stack_in_use != NO_CPU) { | ||
69 | cpu_relax(); | ||
70 | mb(); | ||
71 | if (rq->litmus_next->rt_param.stack_in_use == NO_CPU) | ||
72 | TRACE_TASK(rq->litmus_next, | ||
73 | "descheduled. Proceeding.\n"); | ||
74 | if (lt_before(_maybe_deadlock + 10000000, | ||
75 | litmus_clock())) { | ||
76 | /* We've been spinning for 10ms. | ||
77 | * Something can't be right! | ||
78 | * Let's abandon the task and bail out; at least | ||
79 | * we will have debug info instead of a hard | ||
80 | * deadlock. | ||
81 | */ | ||
82 | TRACE_TASK(rq->litmus_next, | ||
83 | "stack too long in use. " | ||
84 | "Deadlock?\n"); | ||
85 | rq->litmus_next = NULL; | ||
86 | |||
87 | /* bail out */ | ||
88 | spin_lock(&rq->lock); | ||
89 | return; | ||
90 | } | ||
91 | } | ||
92 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | ||
93 | if (rq->litmus_next->oncpu) | ||
94 | TRACE_TASK(rq->litmus_next, "waiting for !oncpu"); | ||
95 | while (rq->litmus_next->oncpu) { | ||
96 | cpu_relax(); | ||
97 | mb(); | ||
98 | } | ||
99 | #endif | ||
100 | double_rq_lock(rq, other_rq); | ||
101 | mb(); | ||
102 | if (is_realtime(prev) && is_running(prev) != was_running) { | ||
103 | TRACE_TASK(prev, | ||
104 | "state changed while we dropped" | ||
105 | " the lock: is_running=%d, was_running=%d\n", | ||
106 | is_running(prev), was_running); | ||
107 | if (is_running(prev) && !was_running) { | ||
108 | /* prev task became unblocked | ||
109 | * we need to simulate normal sequence of events | ||
110 | * to scheduler plugins. | ||
111 | */ | ||
112 | litmus->task_block(prev); | ||
113 | litmus->task_wake_up(prev); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | set_task_cpu(rq->litmus_next, smp_processor_id()); | ||
118 | |||
119 | /* DEBUG: now that we have the lock we need to make sure a | ||
120 | * couple of things still hold: | ||
121 | * - it is still a real-time task | ||
122 | * - it is still runnable (could have been stopped) | ||
123 | * If either is violated, then the active plugin is | ||
124 | * doing something wrong. | ||
125 | */ | ||
126 | if (!is_realtime(rq->litmus_next) || | ||
127 | !is_running(rq->litmus_next)) { | ||
128 | /* BAD BAD BAD */ | ||
129 | TRACE_TASK(rq->litmus_next, | ||
130 | "BAD: migration invariant FAILED: " | ||
131 | "rt=%d running=%d\n", | ||
132 | is_realtime(rq->litmus_next), | ||
133 | is_running(rq->litmus_next)); | ||
134 | /* drop the task */ | ||
135 | rq->litmus_next = NULL; | ||
136 | } | ||
137 | /* release the other CPU's runqueue, but keep ours */ | ||
138 | spin_unlock(&other_rq->lock); | ||
139 | } | ||
140 | if (rq->litmus_next) | ||
141 | rq->litmus_next->rt_param.stack_in_use = rq->cpu; | ||
142 | } | ||
143 | |||
144 | static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, | ||
145 | int wakeup) | ||
146 | { | ||
147 | if (wakeup) { | ||
148 | sched_trace_task_resume(p); | ||
149 | tsk_rt(p)->present = 1; | ||
150 | litmus->task_wake_up(p); | ||
151 | } else | ||
152 | TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n"); | ||
153 | } | ||
154 | |||
155 | static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, int sleep) | ||
156 | { | ||
157 | if (sleep) { | ||
158 | litmus->task_block(p); | ||
159 | tsk_rt(p)->present = 0; | ||
160 | sched_trace_task_block(p); | ||
161 | } else | ||
162 | TRACE_TASK(p, "ignoring a dequeue, not going to sleep.\n"); | ||
163 | } | ||
164 | |||
165 | static void yield_task_litmus(struct rq *rq) | ||
166 | { | ||
167 | BUG_ON(rq->curr != current); | ||
168 | litmus->complete_job(); | ||
169 | } | ||
170 | |||
171 | /* Plugins are responsible for this. | ||
172 | */ | ||
173 | static void check_preempt_curr_litmus(struct rq *rq, struct task_struct *p, int flags) | ||
174 | { | ||
175 | } | ||
176 | |||
177 | /* has already been taken care of */ | ||
178 | static void put_prev_task_litmus(struct rq *rq, struct task_struct *p) | ||
179 | { | ||
180 | } | ||
181 | |||
182 | static struct task_struct *pick_next_task_litmus(struct rq *rq) | ||
183 | { | ||
184 | struct task_struct* picked = rq->litmus_next; | ||
185 | rq->litmus_next = NULL; | ||
186 | if (picked) | ||
187 | picked->se.exec_start = rq->clock; | ||
188 | return picked; | ||
189 | } | ||
190 | |||
191 | static void task_tick_litmus(struct rq *rq, struct task_struct *p, int queued) | ||
192 | { | ||
193 | } | ||
194 | |||
195 | static void switched_to_litmus(struct rq *rq, struct task_struct *p, int running) | ||
196 | { | ||
197 | } | ||
198 | |||
199 | static void prio_changed_litmus(struct rq *rq, struct task_struct *p, | ||
200 | int oldprio, int running) | ||
201 | { | ||
202 | } | ||
203 | |||
204 | unsigned int get_rr_interval_litmus(struct task_struct *p) | ||
205 | { | ||
206 | /* return infinity */ | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | /* This is called when a task became a real-time task, either due to a SCHED_* | ||
211 | * class transition or due to PI mutex inheritance. We don't handle Linux PI | ||
212 | * mutex inheritance yet (and probably never will). Use LITMUS provided | ||
213 | * synchronization primitives instead. | ||
214 | */ | ||
215 | static void set_curr_task_litmus(struct rq *rq) | ||
216 | { | ||
217 | rq->curr->se.exec_start = rq->clock; | ||
218 | } | ||
219 | |||
220 | |||
221 | #ifdef CONFIG_SMP | ||
222 | /* execve tries to rebalance task in this scheduling domain */ | ||
223 | static int select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags) | ||
224 | { | ||
225 | /* preemption is already disabled. | ||
226 | * We don't want to change cpu here | ||
227 | */ | ||
228 | return smp_processor_id(); | ||
229 | } | ||
230 | |||
231 | /* we don't repartition at runtime */ | ||
232 | |||
233 | static unsigned long | ||
234 | load_balance_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
235 | unsigned long max_load_move, | ||
236 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
237 | int *all_pinned, int *this_best_prio) | ||
238 | { | ||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | static int | ||
243 | move_one_task_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
244 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
245 | { | ||
246 | return 0; | ||
247 | } | ||
248 | #endif | ||
249 | |||
250 | const struct sched_class litmus_sched_class = { | ||
251 | .next = &rt_sched_class, | ||
252 | .enqueue_task = enqueue_task_litmus, | ||
253 | .dequeue_task = dequeue_task_litmus, | ||
254 | .yield_task = yield_task_litmus, | ||
255 | |||
256 | .check_preempt_curr = check_preempt_curr_litmus, | ||
257 | |||
258 | .pick_next_task = pick_next_task_litmus, | ||
259 | .put_prev_task = put_prev_task_litmus, | ||
260 | |||
261 | #ifdef CONFIG_SMP | ||
262 | .select_task_rq = select_task_rq_litmus, | ||
263 | |||
264 | .load_balance = load_balance_litmus, | ||
265 | .move_one_task = move_one_task_litmus, | ||
266 | #endif | ||
267 | |||
268 | .set_curr_task = set_curr_task_litmus, | ||
269 | .task_tick = task_tick_litmus, | ||
270 | |||
271 | .get_rr_interval = get_rr_interval_litmus, | ||
272 | |||
273 | .prio_changed = prio_changed_litmus, | ||
274 | .switched_to = switched_to_litmus, | ||
275 | }; | ||