aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:26:50 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:12:27 -0400
commit96979188007a0671d3f067d7edf144742d7433ee (patch)
tree8b93dacea74499926cc4fcaa0879dbfe3ace9d7f
parentcf3f4bd8db320f3f487d66bdec924e926f004787 (diff)
[ported from 2008.3] Add tracing support and hook up Litmus KConfig for x86
- fix requesting more than 2^11 pages (MAX_ORDER) to system allocator Still to be merged: - feather-trace generic implementation
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/feather_trace.h11
-rw-r--r--include/litmus/feather_trace.h14
-rw-r--r--include/litmus/sched_trace.h3
-rw-r--r--kernel/printk.c14
-rw-r--r--litmus/sched_task_trace.c202
-rw-r--r--litmus/sched_trace.c510
-rw-r--r--litmus/trace.c102
8 files changed, 855 insertions, 3 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 72ace9515a07..e2cd95ebeeb1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2092,3 +2092,5 @@ source "crypto/Kconfig"
2092source "arch/x86/kvm/Kconfig" 2092source "arch/x86/kvm/Kconfig"
2093 2093
2094source "lib/Kconfig" 2094source "lib/Kconfig"
2095
2096source "litmus/Kconfig"
diff --git a/arch/x86/include/asm/feather_trace.h b/arch/x86/include/asm/feather_trace.h
new file mode 100644
index 000000000000..f60fbed07afb
--- /dev/null
+++ b/arch/x86/include/asm/feather_trace.h
@@ -0,0 +1,11 @@
1#ifndef _ARCH_FEATHER_TRACE_H
2#define _ARCH_FEATHER_TRACE_H
3
4static inline unsigned long long ft_timestamp(void)
5{
6 unsigned long long ret;
7 __asm__ __volatile__("rdtsc" : "=A" (ret));
8 return ret;
9}
10
11#endif
diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h
index 3ac1ee5e0277..eef8af7a414e 100644
--- a/include/litmus/feather_trace.h
+++ b/include/litmus/feather_trace.h
@@ -1,6 +1,7 @@
1#ifndef _FEATHER_TRACE_H_ 1#ifndef _FEATHER_TRACE_H_
2#define _FEATHER_TRACE_H_ 2#define _FEATHER_TRACE_H_
3 3
4#include <asm/feather_trace.h>
4 5
5int ft_enable_event(unsigned long id); 6int ft_enable_event(unsigned long id);
6int ft_disable_event(unsigned long id); 7int ft_disable_event(unsigned long id);
@@ -30,6 +31,19 @@ extern int ft_events[MAX_EVENTS];
30 31
31#define ft_event3(id, callback, p, p2, p3) \ 32#define ft_event3(id, callback, p, p2, p3) \
32 if (ft_events[id]) callback(id, p, p2, p3); 33 if (ft_events[id]) callback(id, p, p2, p3);
34
35#include <asm/atomic.h>
36
37static inline int fetch_and_inc(int *val)
38{
39 return atomic_add_return(1, (atomic_t*) val) - 1;
40}
41
42static inline int fetch_and_dec(int *val)
43{
44 return atomic_sub_return(1, (atomic_t*) val) + 1;
45}
46
33#endif 47#endif
34 48
35 49
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index afd0391d127b..aae6ac27fe1b 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -1,4 +1,5 @@
1/* sched_trace.h -- record scheduler events to a byte stream for offline analysis. 1/*
2 * sched_trace.h -- record scheduler events to a byte stream for offline analysis.
2 */ 3 */
3#ifndef _LINUX_SCHED_TRACE_H_ 4#ifndef _LINUX_SCHED_TRACE_H_
4#define _LINUX_SCHED_TRACE_H_ 5#define _LINUX_SCHED_TRACE_H_
diff --git a/kernel/printk.c b/kernel/printk.c
index f38b07f78a4e..6712a252b306 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -70,6 +70,13 @@ int console_printk[4] = {
70static int saved_console_loglevel = -1; 70static int saved_console_loglevel = -1;
71 71
72/* 72/*
73 * divert printk() messages when there is a LITMUS^RT debug listener
74 */
75#include <litmus/litmus.h>
76int trace_override = 0;
77int trace_recurse = 0;
78
79/*
73 * Low level drivers may need that to know if they can schedule in 80 * Low level drivers may need that to know if they can schedule in
74 * their unblank() callback or not. So let's export it. 81 * their unblank() callback or not. So let's export it.
75 */ 82 */
@@ -713,6 +720,9 @@ asmlinkage int vprintk(const char *fmt, va_list args)
713 /* Emit the output into the temporary buffer */ 720 /* Emit the output into the temporary buffer */
714 printed_len += vscnprintf(printk_buf + printed_len, 721 printed_len += vscnprintf(printk_buf + printed_len,
715 sizeof(printk_buf) - printed_len, fmt, args); 722 sizeof(printk_buf) - printed_len, fmt, args);
723 /* if LITMUS^RT tracer is active divert printk() msgs */
724 if (trace_override && !trace_recurse)
725 TRACE("%s", printk_buf);
716 726
717 727
718 p = printk_buf; 728 p = printk_buf;
@@ -782,7 +792,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
782 * Try to acquire and then immediately release the 792 * Try to acquire and then immediately release the
783 * console semaphore. The release will do all the 793 * console semaphore. The release will do all the
784 * actual magic (print out buffers, wake up klogd, 794 * actual magic (print out buffers, wake up klogd,
785 * etc). 795 * etc).
786 * 796 *
787 * The acquire_console_semaphore_for_printk() function 797 * The acquire_console_semaphore_for_printk() function
788 * will release 'logbuf_lock' regardless of whether it 798 * will release 'logbuf_lock' regardless of whether it
@@ -1019,7 +1029,7 @@ int printk_needs_cpu(int cpu)
1019 1029
1020void wake_up_klogd(void) 1030void wake_up_klogd(void)
1021{ 1031{
1022 if (waitqueue_active(&log_wait)) 1032 if (!trace_override && waitqueue_active(&log_wait))
1023 __raw_get_cpu_var(printk_pending) = 1; 1033 __raw_get_cpu_var(printk_pending) = 1;
1024} 1034}
1025 1035
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
new file mode 100644
index 000000000000..b7ea6d4e6e57
--- /dev/null
+++ b/litmus/sched_task_trace.c
@@ -0,0 +1,202 @@
1/*
2 * sched_task_trace.c -- record scheduling events to a byte stream
3 */
4
5#define NO_TASK_TRACE_DECLS
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/percpu.h>
10
11#include <litmus/ftdev.h>
12#include <litmus/litmus.h>
13
14#include <litmus/sched_trace.h>
15#include <litmus/feather_trace.h>
16#include <litmus/ftdev.h>
17
18#define FT_TASK_TRACE_MAJOR 253
19#define NO_EVENTS 4096 /* this is a buffer of 12 4k pages per CPU */
20
21#define now() litmus_clock()
22
23struct local_buffer {
24 struct st_event_record record[NO_EVENTS];
25 char flag[NO_EVENTS];
26 struct ft_buffer ftbuf;
27};
28
29DEFINE_PER_CPU(struct local_buffer, st_event_buffer);
30
31static struct ftdev st_dev;
32
33static int st_dev_can_open(struct ftdev *dev, unsigned int cpu)
34{
35 return cpu_online(cpu) ? 0 : -ENODEV;
36}
37
38static int __init init_sched_task_trace(void)
39{
40 struct local_buffer* buf;
41 int i, ok = 0;
42 ftdev_init(&st_dev, THIS_MODULE);
43 for (i = 0; i < NR_CPUS; i++) {
44 buf = &per_cpu(st_event_buffer, i);
45 ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS,
46 sizeof(struct st_event_record),
47 buf->flag,
48 buf->record);
49 st_dev.minor[i].buf = &buf->ftbuf;
50 }
51 if (ok == NR_CPUS) {
52 st_dev.minor_cnt = NR_CPUS;
53 st_dev.can_open = st_dev_can_open;
54 return register_ftdev(&st_dev, "sched_trace", FT_TASK_TRACE_MAJOR);
55 } else {
56 return -EINVAL;
57 }
58}
59
60module_init(init_sched_task_trace);
61
62
63static inline struct st_event_record* get_record(u8 type, struct task_struct* t)
64{
65 struct st_event_record* rec;
66 struct local_buffer* buf;
67
68 buf = &get_cpu_var(st_event_buffer);
69 if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) {
70 rec->hdr.type = type;
71 rec->hdr.cpu = smp_processor_id();
72 rec->hdr.pid = t ? t->pid : 0;
73 rec->hdr.job = t ? t->rt_param.job_params.job_no : 0;
74 } else {
75 put_cpu_var(st_event_buffer);
76 }
77 /* rec will be NULL if it failed */
78 return rec;
79}
80
81static inline void put_record(struct st_event_record* rec)
82{
83 struct local_buffer* buf;
84 buf = &__get_cpu_var(st_event_buffer);
85 ft_buffer_finish_write(&buf->ftbuf, rec);
86 put_cpu_var(st_event_buffer);
87}
88
89feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task)
90{
91 struct task_struct *t = (struct task_struct*) _task;
92 struct st_event_record* rec = get_record(ST_NAME, t);
93 int i;
94 if (rec) {
95 for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++)
96 rec->data.name.cmd[i] = t->comm[i];
97 put_record(rec);
98 }
99}
100
101feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task)
102{
103 struct task_struct *t = (struct task_struct*) _task;
104 struct st_event_record* rec = get_record(ST_PARAM, t);
105 if (rec) {
106 rec->data.param.wcet = get_exec_cost(t);
107 rec->data.param.period = get_rt_period(t);
108 rec->data.param.phase = get_rt_phase(t);
109 rec->data.param.partition = get_partition(t);
110 put_record(rec);
111 }
112}
113
114feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task)
115{
116 struct task_struct *t = (struct task_struct*) _task;
117 struct st_event_record* rec = get_record(ST_RELEASE, t);
118 if (rec) {
119 rec->data.release.release = get_release(t);
120 rec->data.release.deadline = get_deadline(t);
121 put_record(rec);
122 }
123}
124
125/* skipped: st_assigned_data, we don't use it atm */
126
127feather_callback void do_sched_trace_task_switch_to(unsigned long id,
128 unsigned long _task)
129{
130 struct task_struct *t = (struct task_struct*) _task;
131 struct st_event_record* rec;
132 if (is_realtime(t)) {
133 rec = get_record(ST_SWITCH_TO, t);
134 if (rec) {
135 rec->data.switch_to.when = now();
136 rec->data.switch_to.exec_time = get_exec_time(t);
137 put_record(rec);
138 }
139 }
140}
141
142feather_callback void do_sched_trace_task_switch_away(unsigned long id,
143 unsigned long _task)
144{
145 struct task_struct *t = (struct task_struct*) _task;
146 struct st_event_record* rec;
147 if (is_realtime(t)) {
148 rec = get_record(ST_SWITCH_AWAY, t);
149 if (rec) {
150 rec->data.switch_away.when = now();
151 rec->data.switch_away.exec_time = get_exec_time(t);
152 put_record(rec);
153 }
154 }
155}
156
157feather_callback void do_sched_trace_task_completion(unsigned long id,
158 unsigned long _task,
159 unsigned long forced)
160{
161 struct task_struct *t = (struct task_struct*) _task;
162 struct st_event_record* rec = get_record(ST_COMPLETION, t);
163 if (rec) {
164 rec->data.completion.when = now();
165 rec->data.completion.forced = forced;
166 put_record(rec);
167 }
168}
169
170feather_callback void do_sched_trace_task_block(unsigned long id,
171 unsigned long _task)
172{
173 struct task_struct *t = (struct task_struct*) _task;
174 struct st_event_record* rec = get_record(ST_BLOCK, t);
175 if (rec) {
176 rec->data.block.when = now();
177 put_record(rec);
178 }
179}
180
181feather_callback void do_sched_trace_task_resume(unsigned long id,
182 unsigned long _task)
183{
184 struct task_struct *t = (struct task_struct*) _task;
185 struct st_event_record* rec = get_record(ST_RESUME, t);
186 if (rec) {
187 rec->data.resume.when = now();
188 put_record(rec);
189 }
190}
191
192feather_callback void do_sched_trace_sys_release(unsigned long id,
193 unsigned long _start)
194{
195 lt_t *start = (lt_t*) _start;
196 struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL);
197 if (rec) {
198 rec->data.sys_release.when = now();
199 rec->data.sys_release.release = *start;
200 put_record(rec);
201 }
202}
diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c
new file mode 100644
index 000000000000..5befc1e21b1f
--- /dev/null
+++ b/litmus/sched_trace.c
@@ -0,0 +1,510 @@
1/*
2 * sched_trace.c -- record scheduling events to a byte stream.
3 *
4 * TODO: Move ring buffer to a lockfree implementation.
5 */
6
7#include <linux/spinlock.h>
8#include <linux/fs.h>
9#include <linux/cdev.h>
10#include <linux/semaphore.h>
11#include <asm/uaccess.h>
12#include <linux/module.h>
13#include <linux/sysrq.h>
14
15#include <litmus/sched_trace.h>
16#include <litmus/litmus.h>
17
18typedef struct {
19 /* guard read and write pointers */
20 spinlock_t lock;
21 /* guard against concurrent freeing of buffer */
22 rwlock_t del_lock;
23
24 /* memory allocated for ring buffer */
25 unsigned long order;
26 char* buf;
27 char* end;
28
29 /* Read/write pointer. May not cross.
30 * They point to the position of next write and
31 * last read.
32 */
33 char* writep;
34 char* readp;
35
36} ring_buffer_t;
37
38#define EMPTY_RING_BUFFER { \
39 .lock = SPIN_LOCK_UNLOCKED, \
40 .del_lock = RW_LOCK_UNLOCKED, \
41 .buf = NULL, \
42 .end = NULL, \
43 .writep = NULL, \
44 .readp = NULL \
45}
46
47void rb_init(ring_buffer_t* buf)
48{
49 *buf = (ring_buffer_t) EMPTY_RING_BUFFER;
50}
51
52int rb_alloc_buf(ring_buffer_t* buf, unsigned long order)
53{
54 unsigned long flags;
55 int error = 0;
56 char *mem;
57
58 /* do memory allocation while not atomic */
59 mem = (char *) __get_free_pages(GFP_KERNEL, order);
60 if (!mem)
61 return -ENOMEM;
62 write_lock_irqsave(&buf->del_lock, flags);
63 BUG_ON(buf->buf);
64 buf->buf = mem;
65 buf->end = buf->buf + PAGE_SIZE * (1 << order) - 1;
66 memset(buf->buf, 0xff, buf->end - buf->buf);
67 buf->order = order;
68 buf->writep = buf->buf + 1;
69 buf->readp = buf->buf;
70 write_unlock_irqrestore(&buf->del_lock, flags);
71 return error;
72}
73
74int rb_free_buf(ring_buffer_t* buf)
75{
76 unsigned long flags;
77 int error = 0;
78 write_lock_irqsave(&buf->del_lock, flags);
79 BUG_ON(!buf->buf);
80 free_pages((unsigned long) buf->buf, buf->order);
81 buf->buf = NULL;
82 buf->end = NULL;
83 buf->writep = NULL;
84 buf->readp = NULL;
85 write_unlock_irqrestore(&buf->del_lock, flags);
86 return error;
87}
88
89/* Assumption: concurrent writes are serialized externally
90 *
91 * Will only succeed if there is enough space for all len bytes.
92 */
93int rb_put(ring_buffer_t* buf, char* mem, size_t len)
94{
95 unsigned long flags;
96 char* r , *w;
97 int error = 0;
98 read_lock_irqsave(&buf->del_lock, flags);
99 if (!buf->buf) {
100 error = -ENODEV;
101 goto out;
102 }
103 spin_lock(&buf->lock);
104 r = buf->readp;
105 w = buf->writep;
106 spin_unlock(&buf->lock);
107 if (r < w && buf->end - w >= len - 1) {
108 /* easy case: there is enough space in the buffer
109 * to write it in one continous chunk*/
110 memcpy(w, mem, len);
111 w += len;
112 if (w > buf->end)
113 /* special case: fit exactly into buffer
114 * w is now buf->end + 1
115 */
116 w = buf->buf;
117 } else if (w < r && r - w >= len) { /* >= len because may not cross */
118 /* we are constrained by the read pointer but we there
119 * is enough space
120 */
121 memcpy(w, mem, len);
122 w += len;
123 } else if (r <= w && buf->end - w < len - 1) {
124 /* the wrap around case: there may or may not be space */
125 if ((buf->end - w) + (r - buf->buf) >= len - 1) {
126 /* copy chunk that fits at the end */
127 memcpy(w, mem, buf->end - w + 1);
128 mem += buf->end - w + 1;
129 len -= (buf->end - w + 1);
130 w = buf->buf;
131 /* copy the rest */
132 memcpy(w, mem, len);
133 w += len;
134 }
135 else
136 error = -ENOMEM;
137 } else {
138 error = -ENOMEM;
139 }
140 if (!error) {
141 spin_lock(&buf->lock);
142 buf->writep = w;
143 spin_unlock(&buf->lock);
144 }
145 out:
146 read_unlock_irqrestore(&buf->del_lock, flags);
147 return error;
148}
149
150/* Assumption: concurrent reads are serialized externally */
151int rb_get(ring_buffer_t* buf, char* mem, size_t len)
152{
153 unsigned long flags;
154 char* r , *w;
155 int error = 0;
156 read_lock_irqsave(&buf->del_lock, flags);
157 if (!buf->buf) {
158 error = -ENODEV;
159 goto out;
160 }
161 spin_lock(&buf->lock);
162 r = buf->readp;
163 w = buf->writep;
164 spin_unlock(&buf->lock);
165
166 if (w <= r && buf->end - r >= len) {
167 /* easy case: there is enough data in the buffer
168 * to get it in one chunk*/
169 memcpy(mem, r + 1, len);
170 r += len;
171 error = len;
172
173 } else if (r + 1 < w && w - r - 1 >= len) {
174 /* we are constrained by the write pointer but
175 * there is enough data
176 */
177 memcpy(mem, r + 1, len);
178 r += len;
179 error = len;
180
181 } else if (r + 1 < w && w - r - 1 < len) {
182 /* we are constrained by the write pointer and there
183 * there is not enough data
184 */
185 memcpy(mem, r + 1, w - r - 1);
186 error = w - r - 1;
187 r += w - r - 1;
188
189 } else if (w <= r && buf->end - r < len) {
190 /* the wrap around case: there may or may not be enough data
191 * first let's get what is available
192 */
193 memcpy(mem, r + 1, buf->end - r);
194 error += (buf->end - r);
195 mem += (buf->end - r);
196 len -= (buf->end - r);
197 r += (buf->end - r);
198
199 if (w > buf->buf) {
200 /* there is more to get */
201 r = buf->buf - 1;
202 if (w - r >= len) {
203 /* plenty */
204 memcpy(mem, r + 1, len);
205 error += len;
206 r += len;
207 } else {
208 memcpy(mem, r + 1, w - r - 1);
209 error += w - r - 1;
210 r += w - r - 1;
211 }
212 }
213 } /* nothing available */
214
215 if (error > 0) {
216 spin_lock(&buf->lock);
217 buf->readp = r;
218 spin_unlock(&buf->lock);
219 }
220 out:
221 read_unlock_irqrestore(&buf->del_lock, flags);
222 return error;
223}
224
225
226
227/******************************************************************************/
228/* DEVICE FILE DRIVER */
229/******************************************************************************/
230
231
232
233/* Allocate a buffer of about 1 MB per CPU.
234 *
235 */
236#define BUFFER_ORDER 8
237
238typedef struct {
239 ring_buffer_t buf;
240 atomic_t reader_cnt;
241 struct semaphore reader_mutex;
242} trace_buffer_t;
243
244
245/* This does not initialize the semaphore!! */
246
247#define EMPTY_TRACE_BUFFER \
248 { .buf = EMPTY_RING_BUFFER, .reader_cnt = ATOMIC_INIT(0)}
249
250static spinlock_t log_buffer_lock = SPIN_LOCK_UNLOCKED;
251static trace_buffer_t log_buffer = EMPTY_TRACE_BUFFER;
252
253static void init_log_buffer(void)
254{
255 /* only initialize the mutex, the rest was initialized as part
256 * of the static initialization macro
257 */
258 init_MUTEX(&log_buffer.reader_mutex);
259}
260
261static ssize_t log_read(struct file *filp, char __user *to, size_t len,
262 loff_t *f_pos)
263{
264 /* we ignore f_pos, this is strictly sequential */
265
266 ssize_t error = -EINVAL;
267 char* mem;
268 trace_buffer_t *buf = filp->private_data;
269
270 if (down_interruptible(&buf->reader_mutex)) {
271 error = -ERESTARTSYS;
272 goto out;
273 }
274
275 if (len > 64 * 1024)
276 len = 64 * 1024;
277 mem = kmalloc(len, GFP_KERNEL);
278 if (!mem) {
279 error = -ENOMEM;
280 goto out_unlock;
281 }
282
283 error = rb_get(&buf->buf, mem, len);
284 while (!error) {
285 set_current_state(TASK_INTERRUPTIBLE);
286 schedule_timeout(110);
287 if (signal_pending(current))
288 error = -ERESTARTSYS;
289 else
290 error = rb_get(&buf->buf, mem, len);
291 }
292
293 if (error > 0 && copy_to_user(to, mem, error))
294 error = -EFAULT;
295
296 kfree(mem);
297 out_unlock:
298 up(&buf->reader_mutex);
299 out:
300 return error;
301}
302
303/* defined in kernel/printk.c */
304extern int trace_override;
305extern int trace_recurse;
306
307/* log_open - open the global log message ring buffer.
308 */
309static int log_open(struct inode *in, struct file *filp)
310{
311 int error = -EINVAL;
312 trace_buffer_t* buf;
313
314 buf = &log_buffer;
315
316 if (down_interruptible(&buf->reader_mutex)) {
317 error = -ERESTARTSYS;
318 goto out;
319 }
320
321 /* first open must allocate buffers */
322 if (atomic_inc_return(&buf->reader_cnt) == 1) {
323 if ((error = rb_alloc_buf(&buf->buf, BUFFER_ORDER)))
324 {
325 atomic_dec(&buf->reader_cnt);
326 goto out_unlock;
327 }
328 }
329
330 error = 0;
331 filp->private_data = buf;
332 printk(KERN_DEBUG "sched_trace buf: from 0x%p to 0x%p length: %x\n",
333 buf->buf.buf, buf->buf.end,
334 (unsigned int) (buf->buf.end - buf->buf.buf));
335
336 /* override printk() */
337 trace_override++;
338
339 out_unlock:
340 up(&buf->reader_mutex);
341 out:
342 return error;
343}
344
345static int log_release(struct inode *in, struct file *filp)
346{
347 int error = -EINVAL;
348 trace_buffer_t* buf = filp->private_data;
349
350 BUG_ON(!filp->private_data);
351
352 if (down_interruptible(&buf->reader_mutex)) {
353 error = -ERESTARTSYS;
354 goto out;
355 }
356
357 /* last release must deallocate buffers */
358 if (atomic_dec_return(&buf->reader_cnt) == 0) {
359 error = rb_free_buf(&buf->buf);
360 }
361
362 /* release printk() overriding */
363 trace_override--;
364
365 up(&buf->reader_mutex);
366 out:
367 return error;
368}
369
370/******************************************************************************/
371/* Device Registration */
372/******************************************************************************/
373
374/* the major numbes are from the unassigned/local use block
375 *
376 * This should be converted to dynamic allocation at some point...
377 */
378#define LOG_MAJOR 251
379
380/* log_fops - The file operations for accessing the global LITMUS log message
381 * buffer.
382 *
383 * Except for opening the device file it uses the same operations as trace_fops.
384 */
385struct file_operations log_fops = {
386 .owner = THIS_MODULE,
387 .open = log_open,
388 .release = log_release,
389 .read = log_read,
390};
391
392static int __init register_buffer_dev(const char* name,
393 struct file_operations* fops,
394 int major, int count)
395{
396 dev_t trace_dev;
397 struct cdev *cdev;
398 int error = 0;
399
400 trace_dev = MKDEV(major, 0);
401 error = register_chrdev_region(trace_dev, count, name);
402 if (error)
403 {
404 printk(KERN_WARNING "sched trace: "
405 "Could not register major/minor number %d\n", major);
406 return error;
407 }
408 cdev = cdev_alloc();
409 if (!cdev) {
410 printk(KERN_WARNING "sched trace: "
411 "Could not get a cdev for %s.\n", name);
412 return -ENOMEM;
413 }
414 cdev->owner = THIS_MODULE;
415 cdev->ops = fops;
416 error = cdev_add(cdev, trace_dev, count);
417 if (error) {
418 printk(KERN_WARNING "sched trace: "
419 "add_cdev failed for %s.\n", name);
420 return -ENOMEM;
421 }
422 return error;
423
424}
425
426#ifdef CONFIG_MAGIC_SYSRQ
427
428static void sysrq_dump_trace_buffer(int key, struct tty_struct *tty)
429{
430 dump_trace_buffer(100);
431}
432
433static struct sysrq_key_op sysrq_dump_trace_buffer_op = {
434 .handler = sysrq_dump_trace_buffer,
435 .help_msg = "dump-trace-buffer(Y)",
436 .action_msg = "writing content of TRACE() buffer",
437};
438
439#endif
440
441static int __init init_sched_trace(void)
442{
443 printk("Initializing TRACE() device\n");
444 init_log_buffer();
445
446#ifdef CONFIG_MAGIC_SYSRQ
447 /* offer some debugging help */
448 if (!register_sysrq_key('y', &sysrq_dump_trace_buffer_op))
449 printk("Registered dump-trace-buffer(Y) magic sysrq.\n");
450 else
451 printk("Could not register dump-trace-buffer(Y) magic sysrq.\n");
452#endif
453
454
455 return register_buffer_dev("litmus_log", &log_fops,
456 LOG_MAJOR, 1);
457}
458
459module_init(init_sched_trace);
460
461#define MSG_SIZE 255
462static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer);
463
464/* sched_trace_log_message - This is the only function that accesses the the
465 * log buffer inside the kernel for writing.
466 * Concurrent access to it is serialized via the
467 * log_buffer_lock.
468 *
469 * The maximum length of a formatted message is 255.
470 */
471void sched_trace_log_message(const char* fmt, ...)
472{
473 unsigned long flags;
474 va_list args;
475 size_t len;
476 char* buf;
477
478 va_start(args, fmt);
479 local_irq_save(flags);
480
481 /* format message */
482 buf = __get_cpu_var(fmt_buffer);
483 len = vscnprintf(buf, MSG_SIZE, fmt, args);
484
485 spin_lock(&log_buffer_lock);
486 /* Don't copy the trailing null byte, we don't want null bytes
487 * in a text file.
488 */
489 rb_put(&log_buffer.buf, buf, len);
490 spin_unlock(&log_buffer_lock);
491
492 local_irq_restore(flags);
493 va_end(args);
494}
495
496void dump_trace_buffer(int max)
497{
498 char line[80];
499 int len;
500 int count = 0;
501
502 /* potentially, but very unlikely race... */
503 trace_recurse = 1;
504 while ((max == 0 || count++ < max) &&
505 (len = rb_get(&log_buffer.buf, line, sizeof(line) - 1)) > 0) {
506 line[len] = '\0';
507 printk("%s", line);
508 }
509 trace_recurse = 0;
510}
diff --git a/litmus/trace.c b/litmus/trace.c
new file mode 100644
index 000000000000..5735d28f5e30
--- /dev/null
+++ b/litmus/trace.c
@@ -0,0 +1,102 @@
1#include <linux/module.h>
2
3#include <litmus/ftdev.h>
4#include <litmus/litmus.h>
5#include <litmus/trace.h>
6
7/******************************************************************************/
8/* Allocation */
9/******************************************************************************/
10
11static struct ftdev overhead_dev;
12
13#define trace_ts_buf overhead_dev.minor[0].buf
14
15static unsigned int ts_seq_no = 0;
16
17static inline void __save_timestamp_cpu(unsigned long event,
18 uint8_t type, uint8_t cpu)
19{
20 unsigned int seq_no;
21 struct timestamp *ts;
22 seq_no = fetch_and_inc((int *) &ts_seq_no);
23 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) {
24 ts->event = event;
25 ts->timestamp = ft_timestamp();
26 ts->seq_no = seq_no;
27 ts->cpu = cpu;
28 ts->task_type = type;
29 ft_buffer_finish_write(trace_ts_buf, ts);
30 }
31}
32
33static inline void __save_timestamp(unsigned long event,
34 uint8_t type)
35{
36 __save_timestamp_cpu(event, type, raw_smp_processor_id());
37}
38
39feather_callback void save_timestamp(unsigned long event)
40{
41 __save_timestamp(event, TSK_UNKNOWN);
42}
43
44feather_callback void save_timestamp_def(unsigned long event,
45 unsigned long type)
46{
47 __save_timestamp(event, (uint8_t) type);
48}
49
50feather_callback void save_timestamp_task(unsigned long event,
51 unsigned long t_ptr)
52{
53 int rt = is_realtime((struct task_struct *) t_ptr);
54 __save_timestamp(event, rt ? TSK_RT : TSK_BE);
55}
56
57feather_callback void save_timestamp_cpu(unsigned long event,
58 unsigned long cpu)
59{
60 __save_timestamp_cpu(event, TSK_UNKNOWN, cpu);
61}
62
63/******************************************************************************/
64/* DEVICE FILE DRIVER */
65/******************************************************************************/
66
67/*
68 * should be 8M; it is the max we can ask to buddy system allocator (MAX_ORDER)
69 * and we might not get as much
70 */
71#define NO_TIMESTAMPS (2 << 11)
72
73#define FT_TRACE_MAJOR 252
74
75static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx)
76{
77 unsigned int count = NO_TIMESTAMPS;
78 while (count && !trace_ts_buf) {
79 printk("time stamp buffer: trying to allocate %u time stamps.\n", count);
80 ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp));
81 count /= 2;
82 }
83 return ftdev->minor[idx].buf ? 0 : -ENOMEM;
84}
85
86static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx)
87{
88 free_ft_buffer(ftdev->minor[idx].buf);
89 ftdev->minor[idx].buf = NULL;
90}
91
92static int __init init_ft_overhead_trace(void)
93{
94 printk("Initializing Feather-Trace overhead tracing device.\n");
95 ftdev_init(&overhead_dev, THIS_MODULE);
96 overhead_dev.minor_cnt = 1; /* only one buffer */
97 overhead_dev.alloc = alloc_timestamp_buffer;
98 overhead_dev.free = free_timestamp_buffer;
99 return register_ftdev(&overhead_dev, "ft_trace", FT_TRACE_MAJOR);
100}
101
102module_init(init_ft_overhead_trace);