diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2011-06-02 16:06:05 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2011-06-02 16:06:05 -0400 |
commit | 3d5537c160c1484e8d562b9828baf679cc53f67a (patch) | |
tree | b595364f1b0f94ac2426c8315bc5967debc7bbb0 /litmus/sched_task_trace.c | |
parent | 7d754596756240fa918b94cd0c3011c77a638987 (diff) |
Full patch for klitirqd with Nvidia GPU support.
Diffstat (limited to 'litmus/sched_task_trace.c')
-rw-r--r-- | litmus/sched_task_trace.c | 216 |
1 files changed, 209 insertions, 7 deletions
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 5ef8d09ab41f..7aeb99b668d3 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/percpu.h> | 9 | #include <linux/percpu.h> |
10 | #include <linux/hardirq.h> | ||
10 | 11 | ||
11 | #include <litmus/ftdev.h> | 12 | #include <litmus/ftdev.h> |
12 | #include <litmus/litmus.h> | 13 | #include <litmus/litmus.h> |
@@ -16,13 +17,13 @@ | |||
16 | #include <litmus/ftdev.h> | 17 | #include <litmus/ftdev.h> |
17 | 18 | ||
18 | 19 | ||
19 | #define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT) | 20 | #define NUM_EVENTS (1 << (CONFIG_SCHED_TASK_TRACE_SHIFT+11)) |
20 | 21 | ||
21 | #define now() litmus_clock() | 22 | #define now() litmus_clock() |
22 | 23 | ||
23 | struct local_buffer { | 24 | struct local_buffer { |
24 | struct st_event_record record[NO_EVENTS]; | 25 | struct st_event_record record[NUM_EVENTS]; |
25 | char flag[NO_EVENTS]; | 26 | char flag[NUM_EVENTS]; |
26 | struct ft_buffer ftbuf; | 27 | struct ft_buffer ftbuf; |
27 | }; | 28 | }; |
28 | 29 | ||
@@ -41,7 +42,7 @@ static int __init init_sched_task_trace(void) | |||
41 | int i, ok = 0, err; | 42 | int i, ok = 0, err; |
42 | printk("Allocated %u sched_trace_xxx() events per CPU " | 43 | printk("Allocated %u sched_trace_xxx() events per CPU " |
43 | "(buffer size: %d bytes)\n", | 44 | "(buffer size: %d bytes)\n", |
44 | NO_EVENTS, (int) sizeof(struct local_buffer)); | 45 | NUM_EVENTS, (int) sizeof(struct local_buffer)); |
45 | 46 | ||
46 | err = ftdev_init(&st_dev, THIS_MODULE, | 47 | err = ftdev_init(&st_dev, THIS_MODULE, |
47 | num_online_cpus(), "sched_trace"); | 48 | num_online_cpus(), "sched_trace"); |
@@ -50,7 +51,7 @@ static int __init init_sched_task_trace(void) | |||
50 | 51 | ||
51 | for (i = 0; i < st_dev.minor_cnt; i++) { | 52 | for (i = 0; i < st_dev.minor_cnt; i++) { |
52 | buf = &per_cpu(st_event_buffer, i); | 53 | buf = &per_cpu(st_event_buffer, i); |
53 | ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, | 54 | ok += init_ft_buffer(&buf->ftbuf, NUM_EVENTS, |
54 | sizeof(struct st_event_record), | 55 | sizeof(struct st_event_record), |
55 | buf->flag, | 56 | buf->flag, |
56 | buf->record); | 57 | buf->record); |
@@ -154,7 +155,8 @@ feather_callback void do_sched_trace_task_switch_to(unsigned long id, | |||
154 | { | 155 | { |
155 | struct task_struct *t = (struct task_struct*) _task; | 156 | struct task_struct *t = (struct task_struct*) _task; |
156 | struct st_event_record* rec; | 157 | struct st_event_record* rec; |
157 | if (is_realtime(t)) { | 158 | //if (is_realtime(t)) /* comment out to trace EVERYTHING */ |
159 | { | ||
158 | rec = get_record(ST_SWITCH_TO, t); | 160 | rec = get_record(ST_SWITCH_TO, t); |
159 | if (rec) { | 161 | if (rec) { |
160 | rec->data.switch_to.when = now(); | 162 | rec->data.switch_to.when = now(); |
@@ -169,7 +171,8 @@ feather_callback void do_sched_trace_task_switch_away(unsigned long id, | |||
169 | { | 171 | { |
170 | struct task_struct *t = (struct task_struct*) _task; | 172 | struct task_struct *t = (struct task_struct*) _task; |
171 | struct st_event_record* rec; | 173 | struct st_event_record* rec; |
172 | if (is_realtime(t)) { | 174 | //if (is_realtime(t)) /* comment out to trace EVERYTHING */ |
175 | { | ||
173 | rec = get_record(ST_SWITCH_AWAY, t); | 176 | rec = get_record(ST_SWITCH_AWAY, t); |
174 | if (rec) { | 177 | if (rec) { |
175 | rec->data.switch_away.when = now(); | 178 | rec->data.switch_away.when = now(); |
@@ -188,6 +191,7 @@ feather_callback void do_sched_trace_task_completion(unsigned long id, | |||
188 | if (rec) { | 191 | if (rec) { |
189 | rec->data.completion.when = now(); | 192 | rec->data.completion.when = now(); |
190 | rec->data.completion.forced = forced; | 193 | rec->data.completion.forced = forced; |
194 | rec->data.completion.nv_int_count = (u16)atomic_read(&tsk_rt(t)->nv_int_count); | ||
191 | put_record(rec); | 195 | put_record(rec); |
192 | } | 196 | } |
193 | } | 197 | } |
@@ -239,3 +243,201 @@ feather_callback void do_sched_trace_action(unsigned long id, | |||
239 | put_record(rec); | 243 | put_record(rec); |
240 | } | 244 | } |
241 | } | 245 | } |
246 | |||
247 | |||
248 | feather_callback void do_sched_trace_tasklet_release(unsigned long id, | ||
249 | unsigned long _owner) | ||
250 | { | ||
251 | struct task_struct *t = (struct task_struct*) _owner; | ||
252 | struct st_event_record *rec = get_record(ST_TASKLET_RELEASE, t); | ||
253 | |||
254 | if (rec) { | ||
255 | rec->data.tasklet_release.when = now(); | ||
256 | put_record(rec); | ||
257 | } | ||
258 | } | ||
259 | |||
260 | |||
261 | feather_callback void do_sched_trace_tasklet_begin(unsigned long id, | ||
262 | unsigned long _owner) | ||
263 | { | ||
264 | struct task_struct *t = (struct task_struct*) _owner; | ||
265 | struct st_event_record *rec = get_record(ST_TASKLET_BEGIN, t); | ||
266 | |||
267 | if (rec) { | ||
268 | rec->data.tasklet_begin.when = now(); | ||
269 | |||
270 | if(!in_interrupt()) | ||
271 | rec->data.tasklet_begin.exe_pid = current->pid; | ||
272 | else | ||
273 | rec->data.tasklet_begin.exe_pid = 0; | ||
274 | |||
275 | put_record(rec); | ||
276 | } | ||
277 | } | ||
278 | EXPORT_SYMBOL(do_sched_trace_tasklet_begin); | ||
279 | |||
280 | |||
281 | feather_callback void do_sched_trace_tasklet_end(unsigned long id, | ||
282 | unsigned long _owner, | ||
283 | unsigned long _flushed) | ||
284 | { | ||
285 | struct task_struct *t = (struct task_struct*) _owner; | ||
286 | struct st_event_record *rec = get_record(ST_TASKLET_END, t); | ||
287 | |||
288 | if (rec) { | ||
289 | rec->data.tasklet_end.when = now(); | ||
290 | rec->data.tasklet_end.flushed = _flushed; | ||
291 | |||
292 | if(!in_interrupt()) | ||
293 | rec->data.tasklet_end.exe_pid = current->pid; | ||
294 | else | ||
295 | rec->data.tasklet_end.exe_pid = 0; | ||
296 | |||
297 | put_record(rec); | ||
298 | } | ||
299 | } | ||
300 | EXPORT_SYMBOL(do_sched_trace_tasklet_end); | ||
301 | |||
302 | |||
303 | feather_callback void do_sched_trace_work_release(unsigned long id, | ||
304 | unsigned long _owner) | ||
305 | { | ||
306 | struct task_struct *t = (struct task_struct*) _owner; | ||
307 | struct st_event_record *rec = get_record(ST_WORK_RELEASE, t); | ||
308 | |||
309 | if (rec) { | ||
310 | rec->data.work_release.when = now(); | ||
311 | put_record(rec); | ||
312 | } | ||
313 | } | ||
314 | |||
315 | |||
316 | feather_callback void do_sched_trace_work_begin(unsigned long id, | ||
317 | unsigned long _owner, | ||
318 | unsigned long _exe) | ||
319 | { | ||
320 | struct task_struct *t = (struct task_struct*) _owner; | ||
321 | struct st_event_record *rec = get_record(ST_WORK_BEGIN, t); | ||
322 | |||
323 | if (rec) { | ||
324 | struct task_struct *exe = (struct task_struct*) _exe; | ||
325 | rec->data.work_begin.exe_pid = exe->pid; | ||
326 | rec->data.work_begin.when = now(); | ||
327 | put_record(rec); | ||
328 | } | ||
329 | } | ||
330 | EXPORT_SYMBOL(do_sched_trace_work_begin); | ||
331 | |||
332 | |||
333 | feather_callback void do_sched_trace_work_end(unsigned long id, | ||
334 | unsigned long _owner, | ||
335 | unsigned long _exe, | ||
336 | unsigned long _flushed) | ||
337 | { | ||
338 | struct task_struct *t = (struct task_struct*) _owner; | ||
339 | struct st_event_record *rec = get_record(ST_WORK_END, t); | ||
340 | |||
341 | if (rec) { | ||
342 | struct task_struct *exe = (struct task_struct*) _exe; | ||
343 | rec->data.work_end.exe_pid = exe->pid; | ||
344 | rec->data.work_end.flushed = _flushed; | ||
345 | rec->data.work_end.when = now(); | ||
346 | put_record(rec); | ||
347 | } | ||
348 | } | ||
349 | EXPORT_SYMBOL(do_sched_trace_work_end); | ||
350 | |||
351 | |||
352 | feather_callback void do_sched_trace_eff_prio_change(unsigned long id, | ||
353 | unsigned long _task, | ||
354 | unsigned long _inh) | ||
355 | { | ||
356 | struct task_struct *t = (struct task_struct*) _task; | ||
357 | struct st_event_record *rec = get_record(ST_EFF_PRIO_CHANGE, t); | ||
358 | |||
359 | if (rec) { | ||
360 | struct task_struct *inh = (struct task_struct*) _inh; | ||
361 | rec->data.effective_priority_change.when = now(); | ||
362 | rec->data.effective_priority_change.inh_pid = (inh != NULL) ? | ||
363 | inh->pid : | ||
364 | 0xffff; | ||
365 | |||
366 | put_record(rec); | ||
367 | } | ||
368 | } | ||
369 | |||
370 | |||
371 | /* pray for no nesting of nv interrupts on same CPU... */ | ||
372 | struct tracing_interrupt_map | ||
373 | { | ||
374 | int active; | ||
375 | int count; | ||
376 | unsigned long data[128]; // assume nesting less than 128... | ||
377 | }; | ||
378 | DEFINE_PER_CPU(struct tracing_interrupt_map, active_interrupt_tracing); | ||
379 | |||
380 | feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id, | ||
381 | unsigned long _device) | ||
382 | { | ||
383 | struct st_event_record *rec; | ||
384 | |||
385 | { | ||
386 | struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id()); | ||
387 | if(int_map->active == 0xcafebabe) | ||
388 | { | ||
389 | int_map->count++; | ||
390 | } | ||
391 | else | ||
392 | { | ||
393 | int_map->active = 0xcafebabe; | ||
394 | int_map->count = 1; | ||
395 | } | ||
396 | int_map->data[int_map->count-1] = _device; | ||
397 | } | ||
398 | |||
399 | rec = get_record(ST_NV_INTERRUPT_BEGIN, NULL); | ||
400 | if(rec) { | ||
401 | u32 device = _device; | ||
402 | rec->data.nv_interrupt_begin.when = now(); | ||
403 | rec->data.nv_interrupt_begin.device = device; | ||
404 | put_record(rec); | ||
405 | } | ||
406 | } | ||
407 | EXPORT_SYMBOL(do_sched_trace_nv_interrupt_begin); | ||
408 | |||
409 | /* | ||
410 | int is_interrupt_tracing_active(void) | ||
411 | { | ||
412 | struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id()); | ||
413 | if(int_map->active == 0xcafebabe) | ||
414 | return 1; | ||
415 | return 0; | ||
416 | } | ||
417 | */ | ||
418 | |||
419 | feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id, unsigned long unused) | ||
420 | { | ||
421 | struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id()); | ||
422 | if(int_map->active == 0xcafebabe) | ||
423 | { | ||
424 | struct st_event_record *rec = get_record(ST_NV_INTERRUPT_END, NULL); | ||
425 | |||
426 | int_map->count--; | ||
427 | if(int_map->count == 0) | ||
428 | int_map->active = 0; | ||
429 | |||
430 | if(rec) { | ||
431 | rec->data.nv_interrupt_end.when = now(); | ||
432 | rec->data.nv_interrupt_end.device = int_map->data[int_map->count]; | ||
433 | put_record(rec); | ||
434 | } | ||
435 | } | ||
436 | } | ||
437 | EXPORT_SYMBOL(do_sched_trace_nv_interrupt_end); | ||
438 | |||
439 | |||
440 | |||
441 | |||
442 | |||
443 | |||