aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2008-09-16 16:04:15 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2008-09-16 16:04:15 -0400
commit74784b9c59b5e41ebed5a9634b45ec154802a5fd (patch)
tree87838c4b9f158af3e768cbb4baa84b29b1ca4796
parente5e7046b446c9e709701cec312f9de120d7f94ff (diff)
sched_trace: make inclusion optional
We don't always need the file in the kernel.
-rw-r--r--litmus/Makefile3
-rw-r--r--litmus/sched_trace.c123
2 files changed, 10 insertions, 116 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 545203876a..3c39899f93 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -2,7 +2,7 @@
2# Makefile for LITMUS^RT 2# Makefile for LITMUS^RT
3# 3#
4 4
5obj-y = sched_plugin.o litmus.o sched_trace.o \ 5obj-y = sched_plugin.o litmus.o \
6 edf_common.o jobs.o \ 6 edf_common.o jobs.o \
7 rt_domain.o fdso.o sync.o \ 7 rt_domain.o fdso.o sync.o \
8 fmlp.o srp.o norqlock.o \ 8 fmlp.o srp.o norqlock.o \
@@ -12,3 +12,4 @@ obj-y = sched_plugin.o litmus.o sched_trace.o \
12 sched_pfair.o 12 sched_pfair.o
13 13
14obj-$(CONFIG_FEATHER_TRACE) += trace.o ft_event.o 14obj-$(CONFIG_FEATHER_TRACE) += trace.o ft_event.o
15obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o \ No newline at end of file
diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c
index 43447851da..527a58b696 100644
--- a/litmus/sched_trace.c
+++ b/litmus/sched_trace.c
@@ -13,7 +13,6 @@
13#include <litmus/sched_trace.h> 13#include <litmus/sched_trace.h>
14#include <litmus/litmus.h> 14#include <litmus/litmus.h>
15 15
16
17typedef struct { 16typedef struct {
18 /* guard read and write pointers */ 17 /* guard read and write pointers */
19 spinlock_t lock; 18 spinlock_t lock;
@@ -246,51 +245,18 @@ typedef struct {
246#define EMPTY_TRACE_BUFFER \ 245#define EMPTY_TRACE_BUFFER \
247 { .buf = EMPTY_RING_BUFFER, .reader_cnt = ATOMIC_INIT(0)} 246 { .buf = EMPTY_RING_BUFFER, .reader_cnt = ATOMIC_INIT(0)}
248 247
249static DEFINE_PER_CPU(trace_buffer_t, trace_buffer);
250
251#ifdef CONFIG_SCHED_DEBUG_TRACE
252static spinlock_t log_buffer_lock = SPIN_LOCK_UNLOCKED; 248static spinlock_t log_buffer_lock = SPIN_LOCK_UNLOCKED;
253#endif
254static trace_buffer_t log_buffer = EMPTY_TRACE_BUFFER; 249static trace_buffer_t log_buffer = EMPTY_TRACE_BUFFER;
255 250
256static void init_buffers(void) 251static void init_log_buffer(void)
257{ 252{
258 int i;
259
260 for (i = 0; i < NR_CPUS; i++) {
261 rb_init(&per_cpu(trace_buffer, i).buf);
262 init_MUTEX(&per_cpu(trace_buffer, i).reader_mutex);
263 atomic_set(&per_cpu(trace_buffer, i).reader_cnt, 0);
264 }
265 /* only initialize the mutex, the rest was initialized as part 253 /* only initialize the mutex, the rest was initialized as part
266 * of the static initialization macro 254 * of the static initialization macro
267 */ 255 */
268 init_MUTEX(&log_buffer.reader_mutex); 256 init_MUTEX(&log_buffer.reader_mutex);
269} 257}
270 258
271static int trace_release(struct inode *in, struct file *filp) 259static ssize_t log_read(struct file *filp, char __user *to, size_t len,
272{
273 int error = -EINVAL;
274 trace_buffer_t* buf = filp->private_data;
275
276 BUG_ON(!filp->private_data);
277
278 if (down_interruptible(&buf->reader_mutex)) {
279 error = -ERESTARTSYS;
280 goto out;
281 }
282
283 /* last release must deallocate buffers */
284 if (atomic_dec_return(&buf->reader_cnt) == 0) {
285 error = rb_free_buf(&buf->buf);
286 }
287
288 up(&buf->reader_mutex);
289 out:
290 return error;
291}
292
293static ssize_t trace_read(struct file *filp, char __user *to, size_t len,
294 loff_t *f_pos) 260 loff_t *f_pos)
295{ 261{
296 /* we ignore f_pos, this is strictly sequential */ 262 /* we ignore f_pos, this is strictly sequential */
@@ -333,46 +299,6 @@ static ssize_t trace_read(struct file *filp, char __user *to, size_t len,
333} 299}
334 300
335 301
336/* trace_open - Open one of the per-CPU sched_trace buffers.
337 */
338static int trace_open(struct inode *in, struct file *filp)
339{
340 int error = -EINVAL;
341 int cpu = MINOR(in->i_rdev);
342 trace_buffer_t* buf;
343
344 if (!cpu_online(cpu)) {
345 printk(KERN_WARNING "sched trace: "
346 "CPU #%d is not online. (open failed)\n", cpu);
347 error = -ENODEV;
348 goto out;
349 }
350
351 buf = &per_cpu(trace_buffer, cpu);
352
353 if (down_interruptible(&buf->reader_mutex)) {
354 error = -ERESTARTSYS;
355 goto out;
356 }
357
358 /* first open must allocate buffers */
359 if (atomic_inc_return(&buf->reader_cnt) == 1) {
360 if ((error = rb_alloc_buf(&buf->buf, BUFFER_ORDER)))
361 {
362 atomic_dec(&buf->reader_cnt);
363 goto out_unlock;
364 }
365 }
366
367 error = 0;
368 filp->private_data = buf;
369
370 out_unlock:
371 up(&buf->reader_mutex);
372 out:
373 return error;
374}
375
376 302
377extern int trace_override; 303extern int trace_override;
378 304
@@ -401,7 +327,7 @@ static int log_open(struct inode *in, struct file *filp)
401 327
402 error = 0; 328 error = 0;
403 filp->private_data = buf; 329 filp->private_data = buf;
404 printk(KERN_DEBUG "sched_trace buf: from 0x%p to 0x%p length: %lx\n", 330 printk(KERN_DEBUG "sched_trace buf: from 0x%p to 0x%p length: %x\n",
405 buf->buf.buf, buf->buf.end, buf->buf.end - buf->buf.buf); 331 buf->buf.buf, buf->buf.end, buf->buf.end - buf->buf.buf);
406 trace_override++; 332 trace_override++;
407 out_unlock: 333 out_unlock:
@@ -441,19 +367,8 @@ static int log_release(struct inode *in, struct file *filp)
441 * 367 *
442 * This should be converted to dynamic allocation at some point... 368 * This should be converted to dynamic allocation at some point...
443 */ 369 */
444#define TRACE_MAJOR 250
445#define LOG_MAJOR 251 370#define LOG_MAJOR 251
446 371
447/* trace_fops - The file operations for accessing the per-CPU scheduling event
448 * trace buffers.
449 */
450struct file_operations trace_fops = {
451 .owner = THIS_MODULE,
452 .open = trace_open,
453 .release = trace_release,
454 .read = trace_read,
455};
456
457/* log_fops - The file operations for accessing the global LITMUS log message 372/* log_fops - The file operations for accessing the global LITMUS log message
458 * buffer. 373 * buffer.
459 * 374 *
@@ -463,7 +378,7 @@ struct file_operations log_fops = {
463 .owner = THIS_MODULE, 378 .owner = THIS_MODULE,
464 .open = log_open, 379 .open = log_open,
465 .release = log_release, 380 .release = log_release,
466 .read = trace_read, 381 .read = log_read,
467}; 382};
468 383
469static int __init register_buffer_dev(const char* name, 384static int __init register_buffer_dev(const char* name,
@@ -502,34 +417,15 @@ static int __init register_buffer_dev(const char* name,
502 417
503static int __init init_sched_trace(void) 418static int __init init_sched_trace(void)
504{ 419{
505 int error1 = 0, error2 = 0; 420 printk("Initializing TRACE() device\n");
506 421 init_log_buffer();
507 printk("Initializing scheduler trace device\n");
508 init_buffers();
509 422
510 error1 = register_buffer_dev("schedtrace", &trace_fops, 423 return register_buffer_dev("litmus_log", &log_fops,
511 TRACE_MAJOR, NR_CPUS); 424 LOG_MAJOR, 1);
512
513 error2 = register_buffer_dev("litmus_log", &log_fops,
514 LOG_MAJOR, 1);
515 if (error1 || error2)
516 return min(error1, error2);
517 else
518 return 0;
519} 425}
520 426
521module_init(init_sched_trace); 427module_init(init_sched_trace);
522 428
523/******************************************************************************/
524/* KERNEL API */
525/******************************************************************************/
526
527/* The per-CPU LITMUS log buffer. Don't put it on the stack, it is too big for
528 * that and the kernel gets very picky with nested interrupts and small stacks.
529 */
530
531#ifdef CONFIG_SCHED_DEBUG_TRACE
532
533#define MSG_SIZE 255 429#define MSG_SIZE 255
534static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); 430static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer);
535 431
@@ -564,6 +460,3 @@ void sched_trace_log_message(const char* fmt, ...)
564 local_irq_restore(flags); 460 local_irq_restore(flags);
565 va_end(args); 461 va_end(args);
566} 462}
567
568#endif
569