diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2009-12-17 21:26:50 -0500 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 17:12:27 -0400 |
commit | 96979188007a0671d3f067d7edf144742d7433ee (patch) | |
tree | 8b93dacea74499926cc4fcaa0879dbfe3ace9d7f /litmus/sched_task_trace.c | |
parent | cf3f4bd8db320f3f487d66bdec924e926f004787 (diff) |
[ported from 2008.3] Add tracing support and hook up Litmus KConfig for x86
- fix requesting more than 2^11 pages (MAX_ORDER)
to system allocator
Still to be merged:
- feather-trace generic implementation
Diffstat (limited to 'litmus/sched_task_trace.c')
-rw-r--r-- | litmus/sched_task_trace.c | 202 |
1 files changed, 202 insertions, 0 deletions
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c new file mode 100644 index 000000000000..b7ea6d4e6e57 --- /dev/null +++ b/litmus/sched_task_trace.c | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * sched_task_trace.c -- record scheduling events to a byte stream | ||
3 | */ | ||
4 | |||
5 | #define NO_TASK_TRACE_DECLS | ||
6 | |||
7 | #include <linux/module.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/percpu.h> | ||
10 | |||
11 | #include <litmus/ftdev.h> | ||
12 | #include <litmus/litmus.h> | ||
13 | |||
14 | #include <litmus/sched_trace.h> | ||
15 | #include <litmus/feather_trace.h> | ||
16 | #include <litmus/ftdev.h> | ||
17 | |||
18 | #define FT_TASK_TRACE_MAJOR 253 | ||
19 | #define NO_EVENTS 4096 /* this is a buffer of 12 4k pages per CPU */ | ||
20 | |||
21 | #define now() litmus_clock() | ||
22 | |||
23 | struct local_buffer { | ||
24 | struct st_event_record record[NO_EVENTS]; | ||
25 | char flag[NO_EVENTS]; | ||
26 | struct ft_buffer ftbuf; | ||
27 | }; | ||
28 | |||
29 | DEFINE_PER_CPU(struct local_buffer, st_event_buffer); | ||
30 | |||
31 | static struct ftdev st_dev; | ||
32 | |||
33 | static int st_dev_can_open(struct ftdev *dev, unsigned int cpu) | ||
34 | { | ||
35 | return cpu_online(cpu) ? 0 : -ENODEV; | ||
36 | } | ||
37 | |||
38 | static int __init init_sched_task_trace(void) | ||
39 | { | ||
40 | struct local_buffer* buf; | ||
41 | int i, ok = 0; | ||
42 | ftdev_init(&st_dev, THIS_MODULE); | ||
43 | for (i = 0; i < NR_CPUS; i++) { | ||
44 | buf = &per_cpu(st_event_buffer, i); | ||
45 | ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, | ||
46 | sizeof(struct st_event_record), | ||
47 | buf->flag, | ||
48 | buf->record); | ||
49 | st_dev.minor[i].buf = &buf->ftbuf; | ||
50 | } | ||
51 | if (ok == NR_CPUS) { | ||
52 | st_dev.minor_cnt = NR_CPUS; | ||
53 | st_dev.can_open = st_dev_can_open; | ||
54 | return register_ftdev(&st_dev, "sched_trace", FT_TASK_TRACE_MAJOR); | ||
55 | } else { | ||
56 | return -EINVAL; | ||
57 | } | ||
58 | } | ||
59 | |||
60 | module_init(init_sched_task_trace); | ||
61 | |||
62 | |||
63 | static inline struct st_event_record* get_record(u8 type, struct task_struct* t) | ||
64 | { | ||
65 | struct st_event_record* rec; | ||
66 | struct local_buffer* buf; | ||
67 | |||
68 | buf = &get_cpu_var(st_event_buffer); | ||
69 | if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) { | ||
70 | rec->hdr.type = type; | ||
71 | rec->hdr.cpu = smp_processor_id(); | ||
72 | rec->hdr.pid = t ? t->pid : 0; | ||
73 | rec->hdr.job = t ? t->rt_param.job_params.job_no : 0; | ||
74 | } else { | ||
75 | put_cpu_var(st_event_buffer); | ||
76 | } | ||
77 | /* rec will be NULL if it failed */ | ||
78 | return rec; | ||
79 | } | ||
80 | |||
81 | static inline void put_record(struct st_event_record* rec) | ||
82 | { | ||
83 | struct local_buffer* buf; | ||
84 | buf = &__get_cpu_var(st_event_buffer); | ||
85 | ft_buffer_finish_write(&buf->ftbuf, rec); | ||
86 | put_cpu_var(st_event_buffer); | ||
87 | } | ||
88 | |||
89 | feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task) | ||
90 | { | ||
91 | struct task_struct *t = (struct task_struct*) _task; | ||
92 | struct st_event_record* rec = get_record(ST_NAME, t); | ||
93 | int i; | ||
94 | if (rec) { | ||
95 | for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++) | ||
96 | rec->data.name.cmd[i] = t->comm[i]; | ||
97 | put_record(rec); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task) | ||
102 | { | ||
103 | struct task_struct *t = (struct task_struct*) _task; | ||
104 | struct st_event_record* rec = get_record(ST_PARAM, t); | ||
105 | if (rec) { | ||
106 | rec->data.param.wcet = get_exec_cost(t); | ||
107 | rec->data.param.period = get_rt_period(t); | ||
108 | rec->data.param.phase = get_rt_phase(t); | ||
109 | rec->data.param.partition = get_partition(t); | ||
110 | put_record(rec); | ||
111 | } | ||
112 | } | ||
113 | |||
114 | feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task) | ||
115 | { | ||
116 | struct task_struct *t = (struct task_struct*) _task; | ||
117 | struct st_event_record* rec = get_record(ST_RELEASE, t); | ||
118 | if (rec) { | ||
119 | rec->data.release.release = get_release(t); | ||
120 | rec->data.release.deadline = get_deadline(t); | ||
121 | put_record(rec); | ||
122 | } | ||
123 | } | ||
124 | |||
125 | /* skipped: st_assigned_data, we don't use it atm */ | ||
126 | |||
127 | feather_callback void do_sched_trace_task_switch_to(unsigned long id, | ||
128 | unsigned long _task) | ||
129 | { | ||
130 | struct task_struct *t = (struct task_struct*) _task; | ||
131 | struct st_event_record* rec; | ||
132 | if (is_realtime(t)) { | ||
133 | rec = get_record(ST_SWITCH_TO, t); | ||
134 | if (rec) { | ||
135 | rec->data.switch_to.when = now(); | ||
136 | rec->data.switch_to.exec_time = get_exec_time(t); | ||
137 | put_record(rec); | ||
138 | } | ||
139 | } | ||
140 | } | ||
141 | |||
142 | feather_callback void do_sched_trace_task_switch_away(unsigned long id, | ||
143 | unsigned long _task) | ||
144 | { | ||
145 | struct task_struct *t = (struct task_struct*) _task; | ||
146 | struct st_event_record* rec; | ||
147 | if (is_realtime(t)) { | ||
148 | rec = get_record(ST_SWITCH_AWAY, t); | ||
149 | if (rec) { | ||
150 | rec->data.switch_away.when = now(); | ||
151 | rec->data.switch_away.exec_time = get_exec_time(t); | ||
152 | put_record(rec); | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | |||
157 | feather_callback void do_sched_trace_task_completion(unsigned long id, | ||
158 | unsigned long _task, | ||
159 | unsigned long forced) | ||
160 | { | ||
161 | struct task_struct *t = (struct task_struct*) _task; | ||
162 | struct st_event_record* rec = get_record(ST_COMPLETION, t); | ||
163 | if (rec) { | ||
164 | rec->data.completion.when = now(); | ||
165 | rec->data.completion.forced = forced; | ||
166 | put_record(rec); | ||
167 | } | ||
168 | } | ||
169 | |||
170 | feather_callback void do_sched_trace_task_block(unsigned long id, | ||
171 | unsigned long _task) | ||
172 | { | ||
173 | struct task_struct *t = (struct task_struct*) _task; | ||
174 | struct st_event_record* rec = get_record(ST_BLOCK, t); | ||
175 | if (rec) { | ||
176 | rec->data.block.when = now(); | ||
177 | put_record(rec); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | feather_callback void do_sched_trace_task_resume(unsigned long id, | ||
182 | unsigned long _task) | ||
183 | { | ||
184 | struct task_struct *t = (struct task_struct*) _task; | ||
185 | struct st_event_record* rec = get_record(ST_RESUME, t); | ||
186 | if (rec) { | ||
187 | rec->data.resume.when = now(); | ||
188 | put_record(rec); | ||
189 | } | ||
190 | } | ||
191 | |||
192 | feather_callback void do_sched_trace_sys_release(unsigned long id, | ||
193 | unsigned long _start) | ||
194 | { | ||
195 | lt_t *start = (lt_t*) _start; | ||
196 | struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL); | ||
197 | if (rec) { | ||
198 | rec->data.sys_release.when = now(); | ||
199 | rec->data.sys_release.release = *start; | ||
200 | put_record(rec); | ||
201 | } | ||
202 | } | ||