diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-12-08 10:42:39 -0500 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-12-08 12:44:49 -0500 |
commit | f043ca3a785750091f6a73b6625b6b7a5a5d19f6 (patch) | |
tree | 5f919ae5968d0c41066edc770ddb825440f3f611 | |
parent | f369d531a5add5bd978a5c41fdb9fd607038e51b (diff) |
Add tracing support.
(KConfig still missing.)
fix sched_trace printk hooks (to be merged)
Fix feather-trace generic impl. (to be merged)
-rw-r--r-- | include/asm-x86/feather_trace.h | 11 | ||||
-rw-r--r-- | include/litmus/feather_trace.h | 14 | ||||
-rw-r--r-- | litmus/sched_task_trace.c | 200 | ||||
-rw-r--r-- | litmus/sched_trace.c | 498 | ||||
-rw-r--r-- | litmus/trace.c | 98 |
5 files changed, 821 insertions, 0 deletions
diff --git a/include/asm-x86/feather_trace.h b/include/asm-x86/feather_trace.h new file mode 100644 index 000000000000..f60fbed07afb --- /dev/null +++ b/include/asm-x86/feather_trace.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef _ARCH_FEATHER_TRACE_H | ||
2 | #define _ARCH_FEATHER_TRACE_H | ||
3 | |||
4 | static inline unsigned long long ft_timestamp(void) | ||
5 | { | ||
6 | unsigned long long ret; | ||
7 | __asm__ __volatile__("rdtsc" : "=A" (ret)); | ||
8 | return ret; | ||
9 | } | ||
10 | |||
11 | #endif | ||
diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h index 3ac1ee5e0277..eef8af7a414e 100644 --- a/include/litmus/feather_trace.h +++ b/include/litmus/feather_trace.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _FEATHER_TRACE_H_ | 1 | #ifndef _FEATHER_TRACE_H_ |
2 | #define _FEATHER_TRACE_H_ | 2 | #define _FEATHER_TRACE_H_ |
3 | 3 | ||
4 | #include <asm/feather_trace.h> | ||
4 | 5 | ||
5 | int ft_enable_event(unsigned long id); | 6 | int ft_enable_event(unsigned long id); |
6 | int ft_disable_event(unsigned long id); | 7 | int ft_disable_event(unsigned long id); |
@@ -30,6 +31,19 @@ extern int ft_events[MAX_EVENTS]; | |||
30 | 31 | ||
31 | #define ft_event3(id, callback, p, p2, p3) \ | 32 | #define ft_event3(id, callback, p, p2, p3) \ |
32 | if (ft_events[id]) callback(id, p, p2, p3); | 33 | if (ft_events[id]) callback(id, p, p2, p3); |
34 | |||
35 | #include <asm/atomic.h> | ||
36 | |||
37 | static inline int fetch_and_inc(int *val) | ||
38 | { | ||
39 | return atomic_add_return(1, (atomic_t*) val) - 1; | ||
40 | } | ||
41 | |||
42 | static inline int fetch_and_dec(int *val) | ||
43 | { | ||
44 | return atomic_sub_return(1, (atomic_t*) val) + 1; | ||
45 | } | ||
46 | |||
33 | #endif | 47 | #endif |
34 | 48 | ||
35 | 49 | ||
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c new file mode 100644 index 000000000000..c9dbfb22c2c6 --- /dev/null +++ b/litmus/sched_task_trace.c | |||
@@ -0,0 +1,200 @@ | |||
1 | /* sched_task_trace.c -- record scheduling events to a byte stream | ||
2 | * | ||
3 | */ | ||
4 | |||
5 | #define NO_TASK_TRACE_DECLS | ||
6 | |||
7 | #include <linux/module.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/percpu.h> | ||
10 | |||
11 | #include <litmus/ftdev.h> | ||
12 | #include <litmus/litmus.h> | ||
13 | |||
14 | #include <litmus/sched_trace.h> | ||
15 | #include <litmus/feather_trace.h> | ||
16 | #include <litmus/ftdev.h> | ||
17 | |||
18 | #define FT_TASK_TRACE_MAJOR 253 | ||
19 | #define NO_EVENTS 4096 /* this is a buffer of 12 4k pages per CPU */ | ||
20 | |||
21 | #define now() litmus_clock() | ||
22 | |||
23 | struct local_buffer { | ||
24 | struct st_event_record record[NO_EVENTS]; | ||
25 | char flag[NO_EVENTS]; | ||
26 | struct ft_buffer ftbuf; | ||
27 | }; | ||
28 | |||
29 | DEFINE_PER_CPU(struct local_buffer, st_event_buffer); | ||
30 | |||
31 | static struct ftdev st_dev; | ||
32 | |||
33 | static int st_dev_can_open(struct ftdev *dev, unsigned int cpu) | ||
34 | { | ||
35 | return cpu_online(cpu) ? 0 : -ENODEV; | ||
36 | } | ||
37 | |||
38 | static int __init init_sched_task_trace(void) | ||
39 | { | ||
40 | struct local_buffer* buf; | ||
41 | int i, ok = 0; | ||
42 | ftdev_init(&st_dev, THIS_MODULE); | ||
43 | for (i = 0; i < NR_CPUS; i++) { | ||
44 | buf = &per_cpu(st_event_buffer, i); | ||
45 | ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, | ||
46 | sizeof(struct st_event_record), | ||
47 | buf->flag, | ||
48 | buf->record); | ||
49 | st_dev.minor[i].buf = &buf->ftbuf; | ||
50 | } | ||
51 | if (ok == NR_CPUS) { | ||
52 | st_dev.minor_cnt = NR_CPUS; | ||
53 | st_dev.can_open = st_dev_can_open; | ||
54 | return register_ftdev(&st_dev, "sched_trace", FT_TASK_TRACE_MAJOR); | ||
55 | } else | ||
56 | return -EINVAL; | ||
57 | } | ||
58 | |||
59 | module_init(init_sched_task_trace); | ||
60 | |||
61 | |||
62 | static inline struct st_event_record* get_record(u8 type, struct task_struct* t) | ||
63 | { | ||
64 | struct st_event_record* rec; | ||
65 | struct local_buffer* buf; | ||
66 | |||
67 | buf = &get_cpu_var(st_event_buffer); | ||
68 | if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) { | ||
69 | rec->hdr.type = type; | ||
70 | rec->hdr.cpu = smp_processor_id(); | ||
71 | rec->hdr.pid = t ? t->pid : 0; | ||
72 | rec->hdr.job = t ? t->rt_param.job_params.job_no : 0; | ||
73 | } else | ||
74 | put_cpu_var(st_event_buffer); | ||
75 | /* rec will be NULL if it failed */ | ||
76 | return rec; | ||
77 | } | ||
78 | |||
79 | static inline void put_record(struct st_event_record* rec) | ||
80 | { | ||
81 | struct local_buffer* buf; | ||
82 | buf = &__get_cpu_var(st_event_buffer); | ||
83 | ft_buffer_finish_write(&buf->ftbuf, rec); | ||
84 | put_cpu_var(st_event_buffer); | ||
85 | } | ||
86 | |||
87 | feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task) | ||
88 | { | ||
89 | struct task_struct *t = (struct task_struct*) _task; | ||
90 | struct st_event_record* rec = get_record(ST_NAME, t); | ||
91 | int i; | ||
92 | if (rec) { | ||
93 | for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++) | ||
94 | rec->data.name.cmd[i] = t->comm[i]; | ||
95 | put_record(rec); | ||
96 | } | ||
97 | } | ||
98 | |||
99 | feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task) | ||
100 | { | ||
101 | struct task_struct *t = (struct task_struct*) _task; | ||
102 | struct st_event_record* rec = get_record(ST_PARAM, t); | ||
103 | if (rec) { | ||
104 | rec->data.param.wcet = get_exec_cost(t); | ||
105 | rec->data.param.period = get_rt_period(t); | ||
106 | rec->data.param.phase = get_rt_phase(t); | ||
107 | rec->data.param.partition = get_partition(t); | ||
108 | put_record(rec); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task) | ||
113 | { | ||
114 | struct task_struct *t = (struct task_struct*) _task; | ||
115 | struct st_event_record* rec = get_record(ST_RELEASE, t); | ||
116 | if (rec) { | ||
117 | rec->data.release.release = get_release(t); | ||
118 | rec->data.release.deadline = get_deadline(t); | ||
119 | put_record(rec); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | /* skipped: st_assigned_data, we don't use it atm */ | ||
124 | |||
125 | feather_callback void do_sched_trace_task_switch_to(unsigned long id, | ||
126 | unsigned long _task) | ||
127 | { | ||
128 | struct task_struct *t = (struct task_struct*) _task; | ||
129 | struct st_event_record* rec; | ||
130 | if (is_realtime(t)) { | ||
131 | rec = get_record(ST_SWITCH_TO, t); | ||
132 | if (rec) { | ||
133 | rec->data.switch_to.when = now(); | ||
134 | rec->data.switch_to.exec_time = get_exec_time(t); | ||
135 | put_record(rec); | ||
136 | } | ||
137 | } | ||
138 | } | ||
139 | |||
140 | feather_callback void do_sched_trace_task_switch_away(unsigned long id, | ||
141 | unsigned long _task) | ||
142 | { | ||
143 | struct task_struct *t = (struct task_struct*) _task; | ||
144 | struct st_event_record* rec; | ||
145 | if (is_realtime(t)) { | ||
146 | rec = get_record(ST_SWITCH_AWAY, t); | ||
147 | if (rec) { | ||
148 | rec->data.switch_away.when = now(); | ||
149 | rec->data.switch_away.exec_time = get_exec_time(t); | ||
150 | put_record(rec); | ||
151 | } | ||
152 | } | ||
153 | } | ||
154 | |||
155 | feather_callback void do_sched_trace_task_completion(unsigned long id, | ||
156 | unsigned long _task, | ||
157 | unsigned long forced) | ||
158 | { | ||
159 | struct task_struct *t = (struct task_struct*) _task; | ||
160 | struct st_event_record* rec = get_record(ST_COMPLETION, t); | ||
161 | if (rec) { | ||
162 | rec->data.completion.when = now(); | ||
163 | rec->data.completion.forced = forced; | ||
164 | put_record(rec); | ||
165 | } | ||
166 | } | ||
167 | |||
168 | feather_callback void do_sched_trace_task_block(unsigned long id, | ||
169 | unsigned long _task) | ||
170 | { | ||
171 | struct task_struct *t = (struct task_struct*) _task; | ||
172 | struct st_event_record* rec = get_record(ST_BLOCK, t); | ||
173 | if (rec) { | ||
174 | rec->data.block.when = now(); | ||
175 | put_record(rec); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | feather_callback void do_sched_trace_task_resume(unsigned long id, | ||
180 | unsigned long _task) | ||
181 | { | ||
182 | struct task_struct *t = (struct task_struct*) _task; | ||
183 | struct st_event_record* rec = get_record(ST_RESUME, t); | ||
184 | if (rec) { | ||
185 | rec->data.resume.when = now(); | ||
186 | put_record(rec); | ||
187 | } | ||
188 | } | ||
189 | |||
190 | feather_callback void do_sched_trace_sys_release(unsigned long id, | ||
191 | unsigned long _start) | ||
192 | { | ||
193 | lt_t *start = (lt_t*) _start; | ||
194 | struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL); | ||
195 | if (rec) { | ||
196 | rec->data.sys_release.when = now(); | ||
197 | rec->data.sys_release.release = *start; | ||
198 | put_record(rec); | ||
199 | } | ||
200 | } | ||
diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c new file mode 100644 index 000000000000..8581c03bb137 --- /dev/null +++ b/litmus/sched_trace.c | |||
@@ -0,0 +1,498 @@ | |||
1 | /* sched_trace.c -- record scheduling events to a byte stream. | ||
2 | * | ||
3 | * TODO: Move ring buffer to a lockfree implementation. | ||
4 | */ | ||
5 | |||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/fs.h> | ||
8 | #include <linux/cdev.h> | ||
9 | #include <asm/semaphore.h> | ||
10 | #include <asm/uaccess.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/sysrq.h> | ||
13 | |||
14 | #include <litmus/sched_trace.h> | ||
15 | #include <litmus/litmus.h> | ||
16 | |||
17 | typedef struct { | ||
18 | /* guard read and write pointers */ | ||
19 | spinlock_t lock; | ||
20 | /* guard against concurrent freeing of buffer */ | ||
21 | rwlock_t del_lock; | ||
22 | |||
23 | /* memory allocated for ring buffer */ | ||
24 | unsigned long order; | ||
25 | char* buf; | ||
26 | char* end; | ||
27 | |||
28 | /* Read/write pointer. May not cross. | ||
29 | * They point to the position of next write and | ||
30 | * last read. | ||
31 | */ | ||
32 | char* writep; | ||
33 | char* readp; | ||
34 | |||
35 | } ring_buffer_t; | ||
36 | |||
37 | #define EMPTY_RING_BUFFER { \ | ||
38 | .lock = SPIN_LOCK_UNLOCKED, \ | ||
39 | .del_lock = RW_LOCK_UNLOCKED, \ | ||
40 | .buf = NULL, \ | ||
41 | .end = NULL, \ | ||
42 | .writep = NULL, \ | ||
43 | .readp = NULL \ | ||
44 | } | ||
45 | |||
46 | void rb_init(ring_buffer_t* buf) | ||
47 | { | ||
48 | *buf = (ring_buffer_t) EMPTY_RING_BUFFER; | ||
49 | } | ||
50 | |||
51 | int rb_alloc_buf(ring_buffer_t* buf, unsigned long order) | ||
52 | { | ||
53 | unsigned long flags; | ||
54 | int error = 0; | ||
55 | char *mem; | ||
56 | |||
57 | /* do memory allocation while not atomic */ | ||
58 | mem = (char *) __get_free_pages(GFP_KERNEL, order); | ||
59 | if (!mem) | ||
60 | return -ENOMEM; | ||
61 | write_lock_irqsave(&buf->del_lock, flags); | ||
62 | BUG_ON(buf->buf); | ||
63 | buf->buf = mem; | ||
64 | buf->end = buf->buf + PAGE_SIZE * (1 << order) - 1; | ||
65 | memset(buf->buf, 0xff, buf->end - buf->buf); | ||
66 | buf->order = order; | ||
67 | buf->writep = buf->buf + 1; | ||
68 | buf->readp = buf->buf; | ||
69 | write_unlock_irqrestore(&buf->del_lock, flags); | ||
70 | return error; | ||
71 | } | ||
72 | |||
73 | int rb_free_buf(ring_buffer_t* buf) | ||
74 | { | ||
75 | unsigned long flags; | ||
76 | int error = 0; | ||
77 | write_lock_irqsave(&buf->del_lock, flags); | ||
78 | BUG_ON(!buf->buf); | ||
79 | free_pages((unsigned long) buf->buf, buf->order); | ||
80 | buf->buf = NULL; | ||
81 | buf->end = NULL; | ||
82 | buf->writep = NULL; | ||
83 | buf->readp = NULL; | ||
84 | write_unlock_irqrestore(&buf->del_lock, flags); | ||
85 | return error; | ||
86 | } | ||
87 | |||
88 | /* Assumption: concurrent writes are serialized externally | ||
89 | * | ||
90 | * Will only succeed if there is enough space for all len bytes. | ||
91 | */ | ||
92 | int rb_put(ring_buffer_t* buf, char* mem, size_t len) | ||
93 | { | ||
94 | unsigned long flags; | ||
95 | char* r , *w; | ||
96 | int error = 0; | ||
97 | read_lock_irqsave(&buf->del_lock, flags); | ||
98 | if (!buf->buf) { | ||
99 | error = -ENODEV; | ||
100 | goto out; | ||
101 | } | ||
102 | spin_lock(&buf->lock); | ||
103 | r = buf->readp; | ||
104 | w = buf->writep; | ||
105 | spin_unlock(&buf->lock); | ||
106 | if (r < w && buf->end - w >= len - 1) { | ||
107 | /* easy case: there is enough space in the buffer | ||
108 | * to write it in one continous chunk*/ | ||
109 | memcpy(w, mem, len); | ||
110 | w += len; | ||
111 | if (w > buf->end) | ||
112 | /* special case: fit exactly into buffer | ||
113 | * w is now buf->end + 1 | ||
114 | */ | ||
115 | w = buf->buf; | ||
116 | } else if (w < r && r - w >= len) { /* >= len because may not cross */ | ||
117 | /* we are constrained by the read pointer but we there | ||
118 | * is enough space | ||
119 | */ | ||
120 | memcpy(w, mem, len); | ||
121 | w += len; | ||
122 | } else if (r <= w && buf->end - w < len - 1) { | ||
123 | /* the wrap around case: there may or may not be space */ | ||
124 | if ((buf->end - w) + (r - buf->buf) >= len - 1) { | ||
125 | /* copy chunk that fits at the end */ | ||
126 | memcpy(w, mem, buf->end - w + 1); | ||
127 | mem += buf->end - w + 1; | ||
128 | len -= (buf->end - w + 1); | ||
129 | w = buf->buf; | ||
130 | /* copy the rest */ | ||
131 | memcpy(w, mem, len); | ||
132 | w += len; | ||
133 | } | ||
134 | else | ||
135 | error = -ENOMEM; | ||
136 | } else { | ||
137 | error = -ENOMEM; | ||
138 | } | ||
139 | if (!error) { | ||
140 | spin_lock(&buf->lock); | ||
141 | buf->writep = w; | ||
142 | spin_unlock(&buf->lock); | ||
143 | } | ||
144 | out: | ||
145 | read_unlock_irqrestore(&buf->del_lock, flags); | ||
146 | return error; | ||
147 | } | ||
148 | |||
149 | /* Assumption: concurrent reads are serialized externally */ | ||
150 | int rb_get(ring_buffer_t* buf, char* mem, size_t len) | ||
151 | { | ||
152 | unsigned long flags; | ||
153 | char* r , *w; | ||
154 | int error = 0; | ||
155 | read_lock_irqsave(&buf->del_lock, flags); | ||
156 | if (!buf->buf) { | ||
157 | error = -ENODEV; | ||
158 | goto out; | ||
159 | } | ||
160 | spin_lock(&buf->lock); | ||
161 | r = buf->readp; | ||
162 | w = buf->writep; | ||
163 | spin_unlock(&buf->lock); | ||
164 | |||
165 | if (w <= r && buf->end - r >= len) { | ||
166 | /* easy case: there is enough data in the buffer | ||
167 | * to get it in one chunk*/ | ||
168 | memcpy(mem, r + 1, len); | ||
169 | r += len; | ||
170 | error = len; | ||
171 | |||
172 | } else if (r + 1 < w && w - r - 1 >= len) { | ||
173 | /* we are constrained by the write pointer but | ||
174 | * there is enough data | ||
175 | */ | ||
176 | memcpy(mem, r + 1, len); | ||
177 | r += len; | ||
178 | error = len; | ||
179 | |||
180 | } else if (r + 1 < w && w - r - 1 < len) { | ||
181 | /* we are constrained by the write pointer and there | ||
182 | * there is not enough data | ||
183 | */ | ||
184 | memcpy(mem, r + 1, w - r - 1); | ||
185 | error = w - r - 1; | ||
186 | r += w - r - 1; | ||
187 | |||
188 | } else if (w <= r && buf->end - r < len) { | ||
189 | /* the wrap around case: there may or may not be enough data | ||
190 | * first let's get what is available | ||
191 | */ | ||
192 | memcpy(mem, r + 1, buf->end - r); | ||
193 | error += (buf->end - r); | ||
194 | mem += (buf->end - r); | ||
195 | len -= (buf->end - r); | ||
196 | r += (buf->end - r); | ||
197 | |||
198 | if (w > buf->buf) { | ||
199 | /* there is more to get */ | ||
200 | r = buf->buf - 1; | ||
201 | if (w - r >= len) { | ||
202 | /* plenty */ | ||
203 | memcpy(mem, r + 1, len); | ||
204 | error += len; | ||
205 | r += len; | ||
206 | } else { | ||
207 | memcpy(mem, r + 1, w - r - 1); | ||
208 | error += w - r - 1; | ||
209 | r += w - r - 1; | ||
210 | } | ||
211 | } | ||
212 | } /* nothing available */ | ||
213 | |||
214 | if (error > 0) { | ||
215 | spin_lock(&buf->lock); | ||
216 | buf->readp = r; | ||
217 | spin_unlock(&buf->lock); | ||
218 | } | ||
219 | out: | ||
220 | read_unlock_irqrestore(&buf->del_lock, flags); | ||
221 | return error; | ||
222 | } | ||
223 | |||
224 | |||
225 | |||
226 | /******************************************************************************/ | ||
227 | /* DEVICE FILE DRIVER */ | ||
228 | /******************************************************************************/ | ||
229 | |||
230 | |||
231 | |||
232 | /* Allocate a buffer of about 1 MB per CPU. | ||
233 | * | ||
234 | */ | ||
235 | #define BUFFER_ORDER 8 | ||
236 | |||
237 | typedef struct { | ||
238 | ring_buffer_t buf; | ||
239 | atomic_t reader_cnt; | ||
240 | struct semaphore reader_mutex; | ||
241 | } trace_buffer_t; | ||
242 | |||
243 | |||
244 | /* This does not initialize the semaphore!! */ | ||
245 | |||
246 | #define EMPTY_TRACE_BUFFER \ | ||
247 | { .buf = EMPTY_RING_BUFFER, .reader_cnt = ATOMIC_INIT(0)} | ||
248 | |||
249 | static spinlock_t log_buffer_lock = SPIN_LOCK_UNLOCKED; | ||
250 | static trace_buffer_t log_buffer = EMPTY_TRACE_BUFFER; | ||
251 | |||
252 | static void init_log_buffer(void) | ||
253 | { | ||
254 | /* only initialize the mutex, the rest was initialized as part | ||
255 | * of the static initialization macro | ||
256 | */ | ||
257 | init_MUTEX(&log_buffer.reader_mutex); | ||
258 | } | ||
259 | |||
260 | static ssize_t log_read(struct file *filp, char __user *to, size_t len, | ||
261 | loff_t *f_pos) | ||
262 | { | ||
263 | /* we ignore f_pos, this is strictly sequential */ | ||
264 | |||
265 | ssize_t error = -EINVAL; | ||
266 | char* mem; | ||
267 | trace_buffer_t *buf = filp->private_data; | ||
268 | |||
269 | if (down_interruptible(&buf->reader_mutex)) { | ||
270 | error = -ERESTARTSYS; | ||
271 | goto out; | ||
272 | } | ||
273 | |||
274 | if (len > 64 * 1024) | ||
275 | len = 64 * 1024; | ||
276 | mem = kmalloc(len, GFP_KERNEL); | ||
277 | if (!mem) { | ||
278 | error = -ENOMEM; | ||
279 | goto out_unlock; | ||
280 | } | ||
281 | |||
282 | error = rb_get(&buf->buf, mem, len); | ||
283 | while (!error) { | ||
284 | set_current_state(TASK_INTERRUPTIBLE); | ||
285 | schedule_timeout(110); | ||
286 | if (signal_pending(current)) | ||
287 | error = -ERESTARTSYS; | ||
288 | else | ||
289 | error = rb_get(&buf->buf, mem, len); | ||
290 | } | ||
291 | |||
292 | if (error > 0 && copy_to_user(to, mem, error)) | ||
293 | error = -EFAULT; | ||
294 | |||
295 | kfree(mem); | ||
296 | out_unlock: | ||
297 | up(&buf->reader_mutex); | ||
298 | out: | ||
299 | return error; | ||
300 | } | ||
301 | |||
302 | |||
303 | |||
304 | |||
305 | /* log_open - open the global log message ring buffer. | ||
306 | */ | ||
307 | static int log_open(struct inode *in, struct file *filp) | ||
308 | { | ||
309 | int error = -EINVAL; | ||
310 | trace_buffer_t* buf; | ||
311 | |||
312 | buf = &log_buffer; | ||
313 | |||
314 | if (down_interruptible(&buf->reader_mutex)) { | ||
315 | error = -ERESTARTSYS; | ||
316 | goto out; | ||
317 | } | ||
318 | |||
319 | /* first open must allocate buffers */ | ||
320 | if (atomic_inc_return(&buf->reader_cnt) == 1) { | ||
321 | if ((error = rb_alloc_buf(&buf->buf, BUFFER_ORDER))) | ||
322 | { | ||
323 | atomic_dec(&buf->reader_cnt); | ||
324 | goto out_unlock; | ||
325 | } | ||
326 | } | ||
327 | |||
328 | error = 0; | ||
329 | filp->private_data = buf; | ||
330 | printk(KERN_DEBUG "sched_trace buf: from 0x%p to 0x%p length: %x\n", | ||
331 | buf->buf.buf, buf->buf.end, | ||
332 | (unsigned int) (buf->buf.end - buf->buf.buf)); | ||
333 | out_unlock: | ||
334 | up(&buf->reader_mutex); | ||
335 | out: | ||
336 | return error; | ||
337 | } | ||
338 | |||
339 | static int log_release(struct inode *in, struct file *filp) | ||
340 | { | ||
341 | int error = -EINVAL; | ||
342 | trace_buffer_t* buf = filp->private_data; | ||
343 | |||
344 | BUG_ON(!filp->private_data); | ||
345 | |||
346 | if (down_interruptible(&buf->reader_mutex)) { | ||
347 | error = -ERESTARTSYS; | ||
348 | goto out; | ||
349 | } | ||
350 | |||
351 | /* last release must deallocate buffers */ | ||
352 | if (atomic_dec_return(&buf->reader_cnt) == 0) { | ||
353 | error = rb_free_buf(&buf->buf); | ||
354 | } | ||
355 | |||
356 | up(&buf->reader_mutex); | ||
357 | out: | ||
358 | return error; | ||
359 | } | ||
360 | |||
361 | /******************************************************************************/ | ||
362 | /* Device Registration */ | ||
363 | /******************************************************************************/ | ||
364 | |||
365 | /* the major numbes are from the unassigned/local use block | ||
366 | * | ||
367 | * This should be converted to dynamic allocation at some point... | ||
368 | */ | ||
369 | #define LOG_MAJOR 251 | ||
370 | |||
371 | /* log_fops - The file operations for accessing the global LITMUS log message | ||
372 | * buffer. | ||
373 | * | ||
374 | * Except for opening the device file it uses the same operations as trace_fops. | ||
375 | */ | ||
376 | struct file_operations log_fops = { | ||
377 | .owner = THIS_MODULE, | ||
378 | .open = log_open, | ||
379 | .release = log_release, | ||
380 | .read = log_read, | ||
381 | }; | ||
382 | |||
383 | static int __init register_buffer_dev(const char* name, | ||
384 | struct file_operations* fops, | ||
385 | int major, int count) | ||
386 | { | ||
387 | dev_t trace_dev; | ||
388 | struct cdev *cdev; | ||
389 | int error = 0; | ||
390 | |||
391 | trace_dev = MKDEV(major, 0); | ||
392 | error = register_chrdev_region(trace_dev, count, name); | ||
393 | if (error) | ||
394 | { | ||
395 | printk(KERN_WARNING "sched trace: " | ||
396 | "Could not register major/minor number %d\n", major); | ||
397 | return error; | ||
398 | } | ||
399 | cdev = cdev_alloc(); | ||
400 | if (!cdev) { | ||
401 | printk(KERN_WARNING "sched trace: " | ||
402 | "Could not get a cdev for %s.\n", name); | ||
403 | return -ENOMEM; | ||
404 | } | ||
405 | cdev->owner = THIS_MODULE; | ||
406 | cdev->ops = fops; | ||
407 | error = cdev_add(cdev, trace_dev, count); | ||
408 | if (error) { | ||
409 | printk(KERN_WARNING "sched trace: " | ||
410 | "add_cdev failed for %s.\n", name); | ||
411 | return -ENOMEM; | ||
412 | } | ||
413 | return error; | ||
414 | |||
415 | } | ||
416 | |||
417 | #ifdef CONFIG_MAGIC_SYSRQ | ||
418 | |||
419 | static void sysrq_dump_trace_buffer(int key, struct tty_struct *tty) | ||
420 | { | ||
421 | dump_trace_buffer(100); | ||
422 | } | ||
423 | |||
424 | static struct sysrq_key_op sysrq_dump_trace_buffer_op = { | ||
425 | .handler = sysrq_dump_trace_buffer, | ||
426 | .help_msg = "dump-trace-buffer(Y)", | ||
427 | .action_msg = "writing content of TRACE() buffer", | ||
428 | }; | ||
429 | |||
430 | #endif | ||
431 | |||
432 | static int __init init_sched_trace(void) | ||
433 | { | ||
434 | printk("Initializing TRACE() device\n"); | ||
435 | init_log_buffer(); | ||
436 | |||
437 | #ifdef CONFIG_MAGIC_SYSRQ | ||
438 | /* offer some debugging help */ | ||
439 | if (!register_sysrq_key('y', &sysrq_dump_trace_buffer_op)) | ||
440 | printk("Registered dump-trace-buffer(Y) magic sysrq.\n"); | ||
441 | else | ||
442 | printk("Could not register dump-trace-buffer(Y) magic sysrq.\n"); | ||
443 | #endif | ||
444 | |||
445 | |||
446 | return register_buffer_dev("litmus_log", &log_fops, | ||
447 | LOG_MAJOR, 1); | ||
448 | } | ||
449 | |||
450 | module_init(init_sched_trace); | ||
451 | |||
452 | #define MSG_SIZE 255 | ||
453 | static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); | ||
454 | |||
455 | /* sched_trace_log_message - This is the only function that accesses the the | ||
456 | * log buffer inside the kernel for writing. | ||
457 | * Concurrent access to it is serialized via the | ||
458 | * log_buffer_lock. | ||
459 | * | ||
460 | * The maximum length of a formatted message is 255. | ||
461 | */ | ||
462 | void sched_trace_log_message(const char* fmt, ...) | ||
463 | { | ||
464 | unsigned long flags; | ||
465 | va_list args; | ||
466 | size_t len; | ||
467 | char* buf; | ||
468 | |||
469 | va_start(args, fmt); | ||
470 | local_irq_save(flags); | ||
471 | |||
472 | /* format message */ | ||
473 | buf = __get_cpu_var(fmt_buffer); | ||
474 | len = vscnprintf(buf, MSG_SIZE, fmt, args); | ||
475 | |||
476 | spin_lock(&log_buffer_lock); | ||
477 | /* Don't copy the trailing null byte, we don't want null bytes | ||
478 | * in a text file. | ||
479 | */ | ||
480 | rb_put(&log_buffer.buf, buf, len); | ||
481 | spin_unlock(&log_buffer_lock); | ||
482 | |||
483 | local_irq_restore(flags); | ||
484 | va_end(args); | ||
485 | } | ||
486 | |||
487 | void dump_trace_buffer(int max) | ||
488 | { | ||
489 | char line[80]; | ||
490 | int len; | ||
491 | int count = 0; | ||
492 | |||
493 | while ((max == 0 || count++ < max) && | ||
494 | (len = rb_get(&log_buffer.buf, line, sizeof(line) - 1)) > 0) { | ||
495 | line[len] = '\0'; | ||
496 | printk("%s", line); | ||
497 | } | ||
498 | } | ||
diff --git a/litmus/trace.c b/litmus/trace.c new file mode 100644 index 000000000000..6a134121e18c --- /dev/null +++ b/litmus/trace.c | |||
@@ -0,0 +1,98 @@ | |||
1 | #include <linux/module.h> | ||
2 | |||
3 | #include <litmus/ftdev.h> | ||
4 | #include <litmus/litmus.h> | ||
5 | #include <litmus/trace.h> | ||
6 | |||
7 | /******************************************************************************/ | ||
8 | /* Allocation */ | ||
9 | /******************************************************************************/ | ||
10 | |||
11 | static struct ftdev overhead_dev; | ||
12 | |||
13 | #define trace_ts_buf overhead_dev.minor[0].buf | ||
14 | |||
15 | static unsigned int ts_seq_no = 0; | ||
16 | |||
17 | static inline void __save_timestamp_cpu(unsigned long event, | ||
18 | uint8_t type, uint8_t cpu) | ||
19 | { | ||
20 | unsigned int seq_no; | ||
21 | struct timestamp *ts; | ||
22 | seq_no = fetch_and_inc((int *) &ts_seq_no); | ||
23 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | ||
24 | ts->event = event; | ||
25 | ts->timestamp = ft_timestamp(); | ||
26 | ts->seq_no = seq_no; | ||
27 | ts->cpu = cpu; | ||
28 | ts->task_type = type; | ||
29 | ft_buffer_finish_write(trace_ts_buf, ts); | ||
30 | } | ||
31 | } | ||
32 | |||
33 | static inline void __save_timestamp(unsigned long event, | ||
34 | uint8_t type) | ||
35 | { | ||
36 | __save_timestamp_cpu(event, type, raw_smp_processor_id()); | ||
37 | } | ||
38 | |||
39 | feather_callback void save_timestamp(unsigned long event) | ||
40 | { | ||
41 | __save_timestamp(event, TSK_UNKNOWN); | ||
42 | } | ||
43 | |||
44 | feather_callback void save_timestamp_def(unsigned long event, | ||
45 | unsigned long type) | ||
46 | { | ||
47 | __save_timestamp(event, (uint8_t) type); | ||
48 | } | ||
49 | |||
50 | feather_callback void save_timestamp_task(unsigned long event, | ||
51 | unsigned long t_ptr) | ||
52 | { | ||
53 | int rt = is_realtime((struct task_struct *) t_ptr); | ||
54 | __save_timestamp(event, rt ? TSK_RT : TSK_BE); | ||
55 | } | ||
56 | |||
57 | feather_callback void save_timestamp_cpu(unsigned long event, | ||
58 | unsigned long cpu) | ||
59 | { | ||
60 | __save_timestamp_cpu(event, TSK_UNKNOWN, cpu); | ||
61 | } | ||
62 | |||
63 | /******************************************************************************/ | ||
64 | /* DEVICE FILE DRIVER */ | ||
65 | /******************************************************************************/ | ||
66 | |||
67 | #define NO_TIMESTAMPS (2 << 19) /* that should be 8 megs of ram, we may not get | ||
68 | * as much */ | ||
69 | #define FT_TRACE_MAJOR 252 | ||
70 | |||
71 | static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | ||
72 | { | ||
73 | unsigned int count = NO_TIMESTAMPS; | ||
74 | while (count && !trace_ts_buf) { | ||
75 | printk("time stamp buffer: trying to allocate %u time stamps.\n", count); | ||
76 | ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); | ||
77 | count /= 2; | ||
78 | } | ||
79 | return ftdev->minor[idx].buf ? 0 : -ENOMEM; | ||
80 | } | ||
81 | |||
82 | static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | ||
83 | { | ||
84 | free_ft_buffer(ftdev->minor[idx].buf); | ||
85 | ftdev->minor[idx].buf = NULL; | ||
86 | } | ||
87 | |||
88 | static int __init init_ft_overhead_trace(void) | ||
89 | { | ||
90 | printk("Initializing Feather-Trace overhead tracing device.\n"); | ||
91 | ftdev_init(&overhead_dev, THIS_MODULE); | ||
92 | overhead_dev.minor_cnt = 1; /* only one buffer */ | ||
93 | overhead_dev.alloc = alloc_timestamp_buffer; | ||
94 | overhead_dev.free = free_timestamp_buffer; | ||
95 | return register_ftdev(&overhead_dev, "ft_trace", FT_TRACE_MAJOR); | ||
96 | } | ||
97 | |||
98 | module_init(init_ft_overhead_trace); | ||