diff options
Diffstat (limited to 'litmus/sched_trace.c')
-rw-r--r-- | litmus/sched_trace.c | 378 |
1 files changed, 378 insertions, 0 deletions
diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c new file mode 100644 index 000000000000..ad0b138d4b01 --- /dev/null +++ b/litmus/sched_trace.c | |||
@@ -0,0 +1,378 @@ | |||
1 | /* | ||
2 | * sched_trace.c -- record scheduling events to a byte stream. | ||
3 | */ | ||
4 | #include <linux/spinlock.h> | ||
5 | #include <linux/semaphore.h> | ||
6 | |||
7 | #include <linux/fs.h> | ||
8 | #include <linux/miscdevice.h> | ||
9 | #include <asm/uaccess.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/sysrq.h> | ||
12 | |||
13 | #include <linux/kfifo.h> | ||
14 | |||
15 | #include <litmus/sched_trace.h> | ||
16 | #include <litmus/litmus.h> | ||
17 | |||
18 | #define SCHED_TRACE_NAME "litmus/log" | ||
19 | |||
20 | /* Allocate a buffer of about 32k per CPU */ | ||
21 | #define LITMUS_TRACE_BUF_PAGES 8 | ||
22 | #define LITMUS_TRACE_BUF_SIZE (PAGE_SIZE * LITMUS_TRACE_BUF_PAGES * NR_CPUS) | ||
23 | |||
24 | /* Max length of one read from the buffer */ | ||
25 | #define MAX_READ_LEN (64 * 1024) | ||
26 | |||
27 | /* Max length for one write --- from kernel --- to the buffer */ | ||
28 | #define MSG_SIZE 255 | ||
29 | |||
30 | /* Inner ring buffer structure */ | ||
31 | typedef struct { | ||
32 | rwlock_t del_lock; | ||
33 | |||
34 | /* the buffer */ | ||
35 | struct kfifo *kfifo; | ||
36 | } ring_buffer_t; | ||
37 | |||
38 | /* Main buffer structure */ | ||
39 | typedef struct { | ||
40 | ring_buffer_t buf; | ||
41 | atomic_t reader_cnt; | ||
42 | struct semaphore reader_mutex; | ||
43 | } trace_buffer_t; | ||
44 | |||
45 | |||
46 | /* | ||
47 | * Inner buffer management functions | ||
48 | */ | ||
49 | void rb_init(ring_buffer_t* buf) | ||
50 | { | ||
51 | rwlock_init(&buf->del_lock); | ||
52 | buf->kfifo = NULL; | ||
53 | } | ||
54 | |||
55 | int rb_alloc_buf(ring_buffer_t* buf, unsigned int size) | ||
56 | { | ||
57 | unsigned long flags; | ||
58 | |||
59 | write_lock_irqsave(&buf->del_lock, flags); | ||
60 | |||
61 | buf->kfifo = kfifo_alloc(size, GFP_ATOMIC, NULL); | ||
62 | |||
63 | write_unlock_irqrestore(&buf->del_lock, flags); | ||
64 | |||
65 | if(IS_ERR(buf->kfifo)) { | ||
66 | printk(KERN_ERR "kfifo_alloc failed\n"); | ||
67 | return PTR_ERR(buf->kfifo); | ||
68 | } | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | int rb_free_buf(ring_buffer_t* buf) | ||
74 | { | ||
75 | unsigned long flags; | ||
76 | |||
77 | write_lock_irqsave(&buf->del_lock, flags); | ||
78 | |||
79 | BUG_ON(!buf->kfifo); | ||
80 | kfifo_free(buf->kfifo); | ||
81 | |||
82 | buf->kfifo = NULL; | ||
83 | |||
84 | write_unlock_irqrestore(&buf->del_lock, flags); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Assumption: concurrent writes are serialized externally | ||
91 | * | ||
92 | * Will only succeed if there is enough space for all len bytes. | ||
93 | */ | ||
94 | int rb_put(ring_buffer_t* buf, char* mem, size_t len) | ||
95 | { | ||
96 | unsigned long flags; | ||
97 | int error = 0; | ||
98 | |||
99 | read_lock_irqsave(&buf->del_lock, flags); | ||
100 | |||
101 | if (!buf->kfifo) { | ||
102 | error = -ENODEV; | ||
103 | goto out; | ||
104 | } | ||
105 | |||
106 | if((__kfifo_put(buf->kfifo, mem, len)) < len) { | ||
107 | error = -ENOMEM; | ||
108 | goto out; | ||
109 | } | ||
110 | |||
111 | out: | ||
112 | read_unlock_irqrestore(&buf->del_lock, flags); | ||
113 | return error; | ||
114 | } | ||
115 | |||
116 | /* Assumption: concurrent reads are serialized externally */ | ||
117 | int rb_get(ring_buffer_t* buf, char* mem, size_t len) | ||
118 | { | ||
119 | unsigned long flags; | ||
120 | int error = 0; | ||
121 | |||
122 | read_lock_irqsave(&buf->del_lock, flags); | ||
123 | if (!buf->kfifo) { | ||
124 | error = -ENODEV; | ||
125 | goto out; | ||
126 | } | ||
127 | |||
128 | error = __kfifo_get(buf->kfifo, (unsigned char*)mem, len); | ||
129 | |||
130 | out: | ||
131 | read_unlock_irqrestore(&buf->del_lock, flags); | ||
132 | return error; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Device Driver management | ||
137 | */ | ||
138 | static spinlock_t log_buffer_lock = SPIN_LOCK_UNLOCKED; | ||
139 | static trace_buffer_t log_buffer; | ||
140 | |||
141 | static void init_log_buffer(void) | ||
142 | { | ||
143 | rb_init(&log_buffer.buf); | ||
144 | atomic_set(&log_buffer.reader_cnt,0); | ||
145 | init_MUTEX(&log_buffer.reader_mutex); | ||
146 | } | ||
147 | |||
148 | static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); | ||
149 | |||
150 | /* | ||
151 | * sched_trace_log_message - Write to the trace buffer (log_buffer) | ||
152 | * | ||
153 | * This is the only function accessing the log_buffer from inside the | ||
154 | * kernel for writing. | ||
155 | * Concurrent access to sched_trace_log_message must be serialized using | ||
156 | * log_buffer_lock | ||
157 | * The maximum length of a formatted message is 255 | ||
158 | */ | ||
159 | void sched_trace_log_message(const char* fmt, ...) | ||
160 | { | ||
161 | unsigned long flags; | ||
162 | va_list args; | ||
163 | size_t len; | ||
164 | char* buf; | ||
165 | |||
166 | va_start(args, fmt); | ||
167 | local_irq_save(flags); | ||
168 | |||
169 | /* format message */ | ||
170 | buf = __get_cpu_var(fmt_buffer); | ||
171 | len = vscnprintf(buf, MSG_SIZE, fmt, args); | ||
172 | |||
173 | spin_lock(&log_buffer_lock); | ||
174 | /* Don't copy the trailing null byte, we don't want null bytes | ||
175 | * in a text file. | ||
176 | */ | ||
177 | rb_put(&log_buffer.buf, buf, len); | ||
178 | spin_unlock(&log_buffer_lock); | ||
179 | |||
180 | local_irq_restore(flags); | ||
181 | va_end(args); | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * log_read - Read the trace buffer | ||
186 | * | ||
187 | * This function is called as a file operation from userspace. | ||
188 | * Readers can sleep. Access is serialized through reader_mutex | ||
189 | */ | ||
190 | static ssize_t log_read(struct file *filp, char __user *to, size_t len, | ||
191 | loff_t *f_pos) | ||
192 | { | ||
193 | /* we ignore f_pos, this is strictly sequential */ | ||
194 | |||
195 | ssize_t error = -EINVAL; | ||
196 | char* mem; | ||
197 | trace_buffer_t *tbuf = filp->private_data; | ||
198 | |||
199 | if (down_interruptible(&tbuf->reader_mutex)) { | ||
200 | error = -ERESTARTSYS; | ||
201 | goto out; | ||
202 | } | ||
203 | |||
204 | if (len > MAX_READ_LEN) | ||
205 | len = MAX_READ_LEN; | ||
206 | |||
207 | mem = kmalloc(len, GFP_KERNEL); | ||
208 | if (!mem) { | ||
209 | error = -ENOMEM; | ||
210 | goto out_unlock; | ||
211 | } | ||
212 | |||
213 | error = rb_get(&tbuf->buf, mem, len); | ||
214 | while (!error) { | ||
215 | set_current_state(TASK_INTERRUPTIBLE); | ||
216 | schedule_timeout(110); | ||
217 | if (signal_pending(current)) | ||
218 | error = -ERESTARTSYS; | ||
219 | else | ||
220 | error = rb_get(&tbuf->buf, mem, len); | ||
221 | } | ||
222 | |||
223 | if (error > 0 && copy_to_user(to, mem, error)) | ||
224 | error = -EFAULT; | ||
225 | |||
226 | kfree(mem); | ||
227 | out_unlock: | ||
228 | up(&tbuf->reader_mutex); | ||
229 | out: | ||
230 | return error; | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * Enable redirection of printk() messages to the trace buffer. | ||
235 | * Defined in kernel/printk.c | ||
236 | */ | ||
237 | extern int trace_override; | ||
238 | extern int trace_recurse; | ||
239 | |||
240 | /* | ||
241 | * log_open - open the global log message ring buffer. | ||
242 | */ | ||
243 | static int log_open(struct inode *in, struct file *filp) | ||
244 | { | ||
245 | int error = -EINVAL; | ||
246 | trace_buffer_t* tbuf; | ||
247 | |||
248 | tbuf = &log_buffer; | ||
249 | |||
250 | if (down_interruptible(&tbuf->reader_mutex)) { | ||
251 | error = -ERESTARTSYS; | ||
252 | goto out; | ||
253 | } | ||
254 | |||
255 | /* first open must allocate buffers */ | ||
256 | if (atomic_inc_return(&tbuf->reader_cnt) == 1) { | ||
257 | if ((error = rb_alloc_buf(&tbuf->buf, LITMUS_TRACE_BUF_SIZE))) | ||
258 | { | ||
259 | atomic_dec(&tbuf->reader_cnt); | ||
260 | goto out_unlock; | ||
261 | } | ||
262 | } | ||
263 | |||
264 | error = 0; | ||
265 | filp->private_data = tbuf; | ||
266 | |||
267 | printk(KERN_DEBUG | ||
268 | "sched_trace kfifo at 0x%p with buffer starting at: 0x%p\n", | ||
269 | tbuf->buf.kfifo, &((tbuf->buf.kfifo)->buffer)); | ||
270 | |||
271 | /* override printk() */ | ||
272 | trace_override++; | ||
273 | |||
274 | out_unlock: | ||
275 | up(&tbuf->reader_mutex); | ||
276 | out: | ||
277 | return error; | ||
278 | } | ||
279 | |||
280 | static int log_release(struct inode *in, struct file *filp) | ||
281 | { | ||
282 | int error = -EINVAL; | ||
283 | trace_buffer_t* tbuf = filp->private_data; | ||
284 | |||
285 | BUG_ON(!filp->private_data); | ||
286 | |||
287 | if (down_interruptible(&tbuf->reader_mutex)) { | ||
288 | error = -ERESTARTSYS; | ||
289 | goto out; | ||
290 | } | ||
291 | |||
292 | /* last release must deallocate buffers */ | ||
293 | if (atomic_dec_return(&tbuf->reader_cnt) == 0) { | ||
294 | error = rb_free_buf(&tbuf->buf); | ||
295 | } | ||
296 | |||
297 | /* release printk() overriding */ | ||
298 | trace_override--; | ||
299 | |||
300 | printk(KERN_DEBUG "sched_trace kfifo released\n"); | ||
301 | |||
302 | up(&tbuf->reader_mutex); | ||
303 | out: | ||
304 | return error; | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * log_fops - The file operations for accessing the global LITMUS log message | ||
309 | * buffer. | ||
310 | * | ||
311 | * Except for opening the device file it uses the same operations as trace_fops. | ||
312 | */ | ||
313 | static struct file_operations log_fops = { | ||
314 | .owner = THIS_MODULE, | ||
315 | .open = log_open, | ||
316 | .release = log_release, | ||
317 | .read = log_read, | ||
318 | }; | ||
319 | |||
320 | static struct miscdevice litmus_log_dev = { | ||
321 | .name = SCHED_TRACE_NAME, | ||
322 | .minor = MISC_DYNAMIC_MINOR, | ||
323 | .fops = &log_fops, | ||
324 | }; | ||
325 | |||
326 | #ifdef CONFIG_MAGIC_SYSRQ | ||
327 | void dump_trace_buffer(int max) | ||
328 | { | ||
329 | char line[80]; | ||
330 | int len; | ||
331 | int count = 0; | ||
332 | |||
333 | /* potential, but very unlikely, race... */ | ||
334 | trace_recurse = 1; | ||
335 | while ((max == 0 || count++ < max) && | ||
336 | (len = rb_get(&log_buffer.buf, line, sizeof(line) - 1)) > 0) { | ||
337 | line[len] = '\0'; | ||
338 | printk("%s", line); | ||
339 | } | ||
340 | trace_recurse = 0; | ||
341 | } | ||
342 | |||
343 | static void sysrq_dump_trace_buffer(int key, struct tty_struct *tty) | ||
344 | { | ||
345 | dump_trace_buffer(100); | ||
346 | } | ||
347 | |||
348 | static struct sysrq_key_op sysrq_dump_trace_buffer_op = { | ||
349 | .handler = sysrq_dump_trace_buffer, | ||
350 | .help_msg = "dump-trace-buffer(Y)", | ||
351 | .action_msg = "writing content of TRACE() buffer", | ||
352 | }; | ||
353 | #endif | ||
354 | |||
355 | static int __init init_sched_trace(void) | ||
356 | { | ||
357 | printk("Initializing TRACE() device\n"); | ||
358 | init_log_buffer(); | ||
359 | |||
360 | #ifdef CONFIG_MAGIC_SYSRQ | ||
361 | /* offer some debugging help */ | ||
362 | if (!register_sysrq_key('y', &sysrq_dump_trace_buffer_op)) | ||
363 | printk("Registered dump-trace-buffer(Y) magic sysrq.\n"); | ||
364 | else | ||
365 | printk("Could not register dump-trace-buffer(Y) magic sysrq.\n"); | ||
366 | #endif | ||
367 | |||
368 | |||
369 | return misc_register(&litmus_log_dev); | ||
370 | } | ||
371 | |||
372 | static void __exit exit_sched_trace(void) | ||
373 | { | ||
374 | misc_deregister(&litmus_log_dev); | ||
375 | } | ||
376 | |||
377 | module_init(init_sched_trace); | ||
378 | module_exit(exit_sched_trace); | ||