From 96979188007a0671d3f067d7edf144742d7433ee Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:26:50 -0500 Subject: [ported from 2008.3] Add tracing support and hook up Litmus KConfig for x86 - fix requesting more than 2^11 pages (MAX_ORDER) to system allocator Still to be merged: - feather-trace generic implementation --- arch/x86/Kconfig | 2 + arch/x86/include/asm/feather_trace.h | 11 + include/litmus/feather_trace.h | 14 + include/litmus/sched_trace.h | 3 +- kernel/printk.c | 14 +- litmus/sched_task_trace.c | 202 ++++++++++++++ litmus/sched_trace.c | 510 +++++++++++++++++++++++++++++++++++ litmus/trace.c | 102 +++++++ 8 files changed, 855 insertions(+), 3 deletions(-) create mode 100644 arch/x86/include/asm/feather_trace.h create mode 100644 litmus/sched_task_trace.c create mode 100644 litmus/sched_trace.c create mode 100644 litmus/trace.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 72ace9515a07..e2cd95ebeeb1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2092,3 +2092,5 @@ source "crypto/Kconfig" source "arch/x86/kvm/Kconfig" source "lib/Kconfig" + +source "litmus/Kconfig" diff --git a/arch/x86/include/asm/feather_trace.h b/arch/x86/include/asm/feather_trace.h new file mode 100644 index 000000000000..f60fbed07afb --- /dev/null +++ b/arch/x86/include/asm/feather_trace.h @@ -0,0 +1,11 @@ +#ifndef _ARCH_FEATHER_TRACE_H +#define _ARCH_FEATHER_TRACE_H + +static inline unsigned long long ft_timestamp(void) +{ + unsigned long long ret; + __asm__ __volatile__("rdtsc" : "=A" (ret)); + return ret; +} + +#endif diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h index 3ac1ee5e0277..eef8af7a414e 100644 --- a/include/litmus/feather_trace.h +++ b/include/litmus/feather_trace.h @@ -1,6 +1,7 @@ #ifndef _FEATHER_TRACE_H_ #define _FEATHER_TRACE_H_ +#include int ft_enable_event(unsigned long id); int ft_disable_event(unsigned long id); @@ -30,6 +31,19 @@ extern int ft_events[MAX_EVENTS]; #define ft_event3(id, callback, p, p2, p3) \ if (ft_events[id]) callback(id, p, p2, p3); + +#include + +static inline int fetch_and_inc(int *val) +{ + return atomic_add_return(1, (atomic_t*) val) - 1; +} + +static inline int fetch_and_dec(int *val) +{ + return atomic_sub_return(1, (atomic_t*) val) + 1; +} + #endif diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index afd0391d127b..aae6ac27fe1b 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h @@ -1,4 +1,5 @@ -/* sched_trace.h -- record scheduler events to a byte stream for offline analysis. +/* + * sched_trace.h -- record scheduler events to a byte stream for offline analysis. */ #ifndef _LINUX_SCHED_TRACE_H_ #define _LINUX_SCHED_TRACE_H_ diff --git a/kernel/printk.c b/kernel/printk.c index f38b07f78a4e..6712a252b306 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -69,6 +69,13 @@ int console_printk[4] = { static int saved_console_loglevel = -1; +/* + * divert printk() messages when there is a LITMUS^RT debug listener + */ +#include +int trace_override = 0; +int trace_recurse = 0; + /* * Low level drivers may need that to know if they can schedule in * their unblank() callback or not. So let's export it. @@ -713,6 +720,9 @@ asmlinkage int vprintk(const char *fmt, va_list args) /* Emit the output into the temporary buffer */ printed_len += vscnprintf(printk_buf + printed_len, sizeof(printk_buf) - printed_len, fmt, args); + /* if LITMUS^RT tracer is active divert printk() msgs */ + if (trace_override && !trace_recurse) + TRACE("%s", printk_buf); p = printk_buf; @@ -782,7 +792,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) * Try to acquire and then immediately release the * console semaphore. The release will do all the * actual magic (print out buffers, wake up klogd, - * etc). + * etc). * * The acquire_console_semaphore_for_printk() function * will release 'logbuf_lock' regardless of whether it @@ -1019,7 +1029,7 @@ int printk_needs_cpu(int cpu) void wake_up_klogd(void) { - if (waitqueue_active(&log_wait)) + if (!trace_override && waitqueue_active(&log_wait)) __raw_get_cpu_var(printk_pending) = 1; } diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c new file mode 100644 index 000000000000..b7ea6d4e6e57 --- /dev/null +++ b/litmus/sched_task_trace.c @@ -0,0 +1,202 @@ +/* + * sched_task_trace.c -- record scheduling events to a byte stream + */ + +#define NO_TASK_TRACE_DECLS + +#include +#include +#include + +#include +#include + +#include +#include +#include + +#define FT_TASK_TRACE_MAJOR 253 +#define NO_EVENTS 4096 /* this is a buffer of 12 4k pages per CPU */ + +#define now() litmus_clock() + +struct local_buffer { + struct st_event_record record[NO_EVENTS]; + char flag[NO_EVENTS]; + struct ft_buffer ftbuf; +}; + +DEFINE_PER_CPU(struct local_buffer, st_event_buffer); + +static struct ftdev st_dev; + +static int st_dev_can_open(struct ftdev *dev, unsigned int cpu) +{ + return cpu_online(cpu) ? 0 : -ENODEV; +} + +static int __init init_sched_task_trace(void) +{ + struct local_buffer* buf; + int i, ok = 0; + ftdev_init(&st_dev, THIS_MODULE); + for (i = 0; i < NR_CPUS; i++) { + buf = &per_cpu(st_event_buffer, i); + ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, + sizeof(struct st_event_record), + buf->flag, + buf->record); + st_dev.minor[i].buf = &buf->ftbuf; + } + if (ok == NR_CPUS) { + st_dev.minor_cnt = NR_CPUS; + st_dev.can_open = st_dev_can_open; + return register_ftdev(&st_dev, "sched_trace", FT_TASK_TRACE_MAJOR); + } else { + return -EINVAL; + } +} + +module_init(init_sched_task_trace); + + +static inline struct st_event_record* get_record(u8 type, struct task_struct* t) +{ + struct st_event_record* rec; + struct local_buffer* buf; + + buf = &get_cpu_var(st_event_buffer); + if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) { + rec->hdr.type = type; + rec->hdr.cpu = smp_processor_id(); + rec->hdr.pid = t ? t->pid : 0; + rec->hdr.job = t ? t->rt_param.job_params.job_no : 0; + } else { + put_cpu_var(st_event_buffer); + } + /* rec will be NULL if it failed */ + return rec; +} + +static inline void put_record(struct st_event_record* rec) +{ + struct local_buffer* buf; + buf = &__get_cpu_var(st_event_buffer); + ft_buffer_finish_write(&buf->ftbuf, rec); + put_cpu_var(st_event_buffer); +} + +feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task) +{ + struct task_struct *t = (struct task_struct*) _task; + struct st_event_record* rec = get_record(ST_NAME, t); + int i; + if (rec) { + for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++) + rec->data.name.cmd[i] = t->comm[i]; + put_record(rec); + } +} + +feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task) +{ + struct task_struct *t = (struct task_struct*) _task; + struct st_event_record* rec = get_record(ST_PARAM, t); + if (rec) { + rec->data.param.wcet = get_exec_cost(t); + rec->data.param.period = get_rt_period(t); + rec->data.param.phase = get_rt_phase(t); + rec->data.param.partition = get_partition(t); + put_record(rec); + } +} + +feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task) +{ + struct task_struct *t = (struct task_struct*) _task; + struct st_event_record* rec = get_record(ST_RELEASE, t); + if (rec) { + rec->data.release.release = get_release(t); + rec->data.release.deadline = get_deadline(t); + put_record(rec); + } +} + +/* skipped: st_assigned_data, we don't use it atm */ + +feather_callback void do_sched_trace_task_switch_to(unsigned long id, + unsigned long _task) +{ + struct task_struct *t = (struct task_struct*) _task; + struct st_event_record* rec; + if (is_realtime(t)) { + rec = get_record(ST_SWITCH_TO, t); + if (rec) { + rec->data.switch_to.when = now(); + rec->data.switch_to.exec_time = get_exec_time(t); + put_record(rec); + } + } +} + +feather_callback void do_sched_trace_task_switch_away(unsigned long id, + unsigned long _task) +{ + struct task_struct *t = (struct task_struct*) _task; + struct st_event_record* rec; + if (is_realtime(t)) { + rec = get_record(ST_SWITCH_AWAY, t); + if (rec) { + rec->data.switch_away.when = now(); + rec->data.switch_away.exec_time = get_exec_time(t); + put_record(rec); + } + } +} + +feather_callback void do_sched_trace_task_completion(unsigned long id, + unsigned long _task, + unsigned long forced) +{ + struct task_struct *t = (struct task_struct*) _task; + struct st_event_record* rec = get_record(ST_COMPLETION, t); + if (rec) { + rec->data.completion.when = now(); + rec->data.completion.forced = forced; + put_record(rec); + } +} + +feather_callback void do_sched_trace_task_block(unsigned long id, + unsigned long _task) +{ + struct task_struct *t = (struct task_struct*) _task; + struct st_event_record* rec = get_record(ST_BLOCK, t); + if (rec) { + rec->data.block.when = now(); + put_record(rec); + } +} + +feather_callback void do_sched_trace_task_resume(unsigned long id, + unsigned long _task) +{ + struct task_struct *t = (struct task_struct*) _task; + struct st_event_record* rec = get_record(ST_RESUME, t); + if (rec) { + rec->data.resume.when = now(); + put_record(rec); + } +} + +feather_callback void do_sched_trace_sys_release(unsigned long id, + unsigned long _start) +{ + lt_t *start = (lt_t*) _start; + struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL); + if (rec) { + rec->data.sys_release.when = now(); + rec->data.sys_release.release = *start; + put_record(rec); + } +} diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c new file mode 100644 index 000000000000..5befc1e21b1f --- /dev/null +++ b/litmus/sched_trace.c @@ -0,0 +1,510 @@ +/* + * sched_trace.c -- record scheduling events to a byte stream. + * + * TODO: Move ring buffer to a lockfree implementation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +typedef struct { + /* guard read and write pointers */ + spinlock_t lock; + /* guard against concurrent freeing of buffer */ + rwlock_t del_lock; + + /* memory allocated for ring buffer */ + unsigned long order; + char* buf; + char* end; + + /* Read/write pointer. May not cross. + * They point to the position of next write and + * last read. + */ + char* writep; + char* readp; + +} ring_buffer_t; + +#define EMPTY_RING_BUFFER { \ + .lock = SPIN_LOCK_UNLOCKED, \ + .del_lock = RW_LOCK_UNLOCKED, \ + .buf = NULL, \ + .end = NULL, \ + .writep = NULL, \ + .readp = NULL \ +} + +void rb_init(ring_buffer_t* buf) +{ + *buf = (ring_buffer_t) EMPTY_RING_BUFFER; +} + +int rb_alloc_buf(ring_buffer_t* buf, unsigned long order) +{ + unsigned long flags; + int error = 0; + char *mem; + + /* do memory allocation while not atomic */ + mem = (char *) __get_free_pages(GFP_KERNEL, order); + if (!mem) + return -ENOMEM; + write_lock_irqsave(&buf->del_lock, flags); + BUG_ON(buf->buf); + buf->buf = mem; + buf->end = buf->buf + PAGE_SIZE * (1 << order) - 1; + memset(buf->buf, 0xff, buf->end - buf->buf); + buf->order = order; + buf->writep = buf->buf + 1; + buf->readp = buf->buf; + write_unlock_irqrestore(&buf->del_lock, flags); + return error; +} + +int rb_free_buf(ring_buffer_t* buf) +{ + unsigned long flags; + int error = 0; + write_lock_irqsave(&buf->del_lock, flags); + BUG_ON(!buf->buf); + free_pages((unsigned long) buf->buf, buf->order); + buf->buf = NULL; + buf->end = NULL; + buf->writep = NULL; + buf->readp = NULL; + write_unlock_irqrestore(&buf->del_lock, flags); + return error; +} + +/* Assumption: concurrent writes are serialized externally + * + * Will only succeed if there is enough space for all len bytes. + */ +int rb_put(ring_buffer_t* buf, char* mem, size_t len) +{ + unsigned long flags; + char* r , *w; + int error = 0; + read_lock_irqsave(&buf->del_lock, flags); + if (!buf->buf) { + error = -ENODEV; + goto out; + } + spin_lock(&buf->lock); + r = buf->readp; + w = buf->writep; + spin_unlock(&buf->lock); + if (r < w && buf->end - w >= len - 1) { + /* easy case: there is enough space in the buffer + * to write it in one continous chunk*/ + memcpy(w, mem, len); + w += len; + if (w > buf->end) + /* special case: fit exactly into buffer + * w is now buf->end + 1 + */ + w = buf->buf; + } else if (w < r && r - w >= len) { /* >= len because may not cross */ + /* we are constrained by the read pointer but we there + * is enough space + */ + memcpy(w, mem, len); + w += len; + } else if (r <= w && buf->end - w < len - 1) { + /* the wrap around case: there may or may not be space */ + if ((buf->end - w) + (r - buf->buf) >= len - 1) { + /* copy chunk that fits at the end */ + memcpy(w, mem, buf->end - w + 1); + mem += buf->end - w + 1; + len -= (buf->end - w + 1); + w = buf->buf; + /* copy the rest */ + memcpy(w, mem, len); + w += len; + } + else + error = -ENOMEM; + } else { + error = -ENOMEM; + } + if (!error) { + spin_lock(&buf->lock); + buf->writep = w; + spin_unlock(&buf->lock); + } + out: + read_unlock_irqrestore(&buf->del_lock, flags); + return error; +} + +/* Assumption: concurrent reads are serialized externally */ +int rb_get(ring_buffer_t* buf, char* mem, size_t len) +{ + unsigned long flags; + char* r , *w; + int error = 0; + read_lock_irqsave(&buf->del_lock, flags); + if (!buf->buf) { + error = -ENODEV; + goto out; + } + spin_lock(&buf->lock); + r = buf->readp; + w = buf->writep; + spin_unlock(&buf->lock); + + if (w <= r && buf->end - r >= len) { + /* easy case: there is enough data in the buffer + * to get it in one chunk*/ + memcpy(mem, r + 1, len); + r += len; + error = len; + + } else if (r + 1 < w && w - r - 1 >= len) { + /* we are constrained by the write pointer but + * there is enough data + */ + memcpy(mem, r + 1, len); + r += len; + error = len; + + } else if (r + 1 < w && w - r - 1 < len) { + /* we are constrained by the write pointer and there + * there is not enough data + */ + memcpy(mem, r + 1, w - r - 1); + error = w - r - 1; + r += w - r - 1; + + } else if (w <= r && buf->end - r < len) { + /* the wrap around case: there may or may not be enough data + * first let's get what is available + */ + memcpy(mem, r + 1, buf->end - r); + error += (buf->end - r); + mem += (buf->end - r); + len -= (buf->end - r); + r += (buf->end - r); + + if (w > buf->buf) { + /* there is more to get */ + r = buf->buf - 1; + if (w - r >= len) { + /* plenty */ + memcpy(mem, r + 1, len); + error += len; + r += len; + } else { + memcpy(mem, r + 1, w - r - 1); + error += w - r - 1; + r += w - r - 1; + } + } + } /* nothing available */ + + if (error > 0) { + spin_lock(&buf->lock); + buf->readp = r; + spin_unlock(&buf->lock); + } + out: + read_unlock_irqrestore(&buf->del_lock, flags); + return error; +} + + + +/******************************************************************************/ +/* DEVICE FILE DRIVER */ +/******************************************************************************/ + + + +/* Allocate a buffer of about 1 MB per CPU. + * + */ +#define BUFFER_ORDER 8 + +typedef struct { + ring_buffer_t buf; + atomic_t reader_cnt; + struct semaphore reader_mutex; +} trace_buffer_t; + + +/* This does not initialize the semaphore!! */ + +#define EMPTY_TRACE_BUFFER \ + { .buf = EMPTY_RING_BUFFER, .reader_cnt = ATOMIC_INIT(0)} + +static spinlock_t log_buffer_lock = SPIN_LOCK_UNLOCKED; +static trace_buffer_t log_buffer = EMPTY_TRACE_BUFFER; + +static void init_log_buffer(void) +{ + /* only initialize the mutex, the rest was initialized as part + * of the static initialization macro + */ + init_MUTEX(&log_buffer.reader_mutex); +} + +static ssize_t log_read(struct file *filp, char __user *to, size_t len, + loff_t *f_pos) +{ + /* we ignore f_pos, this is strictly sequential */ + + ssize_t error = -EINVAL; + char* mem; + trace_buffer_t *buf = filp->private_data; + + if (down_interruptible(&buf->reader_mutex)) { + error = -ERESTARTSYS; + goto out; + } + + if (len > 64 * 1024) + len = 64 * 1024; + mem = kmalloc(len, GFP_KERNEL); + if (!mem) { + error = -ENOMEM; + goto out_unlock; + } + + error = rb_get(&buf->buf, mem, len); + while (!error) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(110); + if (signal_pending(current)) + error = -ERESTARTSYS; + else + error = rb_get(&buf->buf, mem, len); + } + + if (error > 0 && copy_to_user(to, mem, error)) + error = -EFAULT; + + kfree(mem); + out_unlock: + up(&buf->reader_mutex); + out: + return error; +} + +/* defined in kernel/printk.c */ +extern int trace_override; +extern int trace_recurse; + +/* log_open - open the global log message ring buffer. + */ +static int log_open(struct inode *in, struct file *filp) +{ + int error = -EINVAL; + trace_buffer_t* buf; + + buf = &log_buffer; + + if (down_interruptible(&buf->reader_mutex)) { + error = -ERESTARTSYS; + goto out; + } + + /* first open must allocate buffers */ + if (atomic_inc_return(&buf->reader_cnt) == 1) { + if ((error = rb_alloc_buf(&buf->buf, BUFFER_ORDER))) + { + atomic_dec(&buf->reader_cnt); + goto out_unlock; + } + } + + error = 0; + filp->private_data = buf; + printk(KERN_DEBUG "sched_trace buf: from 0x%p to 0x%p length: %x\n", + buf->buf.buf, buf->buf.end, + (unsigned int) (buf->buf.end - buf->buf.buf)); + + /* override printk() */ + trace_override++; + + out_unlock: + up(&buf->reader_mutex); + out: + return error; +} + +static int log_release(struct inode *in, struct file *filp) +{ + int error = -EINVAL; + trace_buffer_t* buf = filp->private_data; + + BUG_ON(!filp->private_data); + + if (down_interruptible(&buf->reader_mutex)) { + error = -ERESTARTSYS; + goto out; + } + + /* last release must deallocate buffers */ + if (atomic_dec_return(&buf->reader_cnt) == 0) { + error = rb_free_buf(&buf->buf); + } + + /* release printk() overriding */ + trace_override--; + + up(&buf->reader_mutex); + out: + return error; +} + +/******************************************************************************/ +/* Device Registration */ +/******************************************************************************/ + +/* the major numbes are from the unassigned/local use block + * + * This should be converted to dynamic allocation at some point... + */ +#define LOG_MAJOR 251 + +/* log_fops - The file operations for accessing the global LITMUS log message + * buffer. + * + * Except for opening the device file it uses the same operations as trace_fops. + */ +struct file_operations log_fops = { + .owner = THIS_MODULE, + .open = log_open, + .release = log_release, + .read = log_read, +}; + +static int __init register_buffer_dev(const char* name, + struct file_operations* fops, + int major, int count) +{ + dev_t trace_dev; + struct cdev *cdev; + int error = 0; + + trace_dev = MKDEV(major, 0); + error = register_chrdev_region(trace_dev, count, name); + if (error) + { + printk(KERN_WARNING "sched trace: " + "Could not register major/minor number %d\n", major); + return error; + } + cdev = cdev_alloc(); + if (!cdev) { + printk(KERN_WARNING "sched trace: " + "Could not get a cdev for %s.\n", name); + return -ENOMEM; + } + cdev->owner = THIS_MODULE; + cdev->ops = fops; + error = cdev_add(cdev, trace_dev, count); + if (error) { + printk(KERN_WARNING "sched trace: " + "add_cdev failed for %s.\n", name); + return -ENOMEM; + } + return error; + +} + +#ifdef CONFIG_MAGIC_SYSRQ + +static void sysrq_dump_trace_buffer(int key, struct tty_struct *tty) +{ + dump_trace_buffer(100); +} + +static struct sysrq_key_op sysrq_dump_trace_buffer_op = { + .handler = sysrq_dump_trace_buffer, + .help_msg = "dump-trace-buffer(Y)", + .action_msg = "writing content of TRACE() buffer", +}; + +#endif + +static int __init init_sched_trace(void) +{ + printk("Initializing TRACE() device\n"); + init_log_buffer(); + +#ifdef CONFIG_MAGIC_SYSRQ + /* offer some debugging help */ + if (!register_sysrq_key('y', &sysrq_dump_trace_buffer_op)) + printk("Registered dump-trace-buffer(Y) magic sysrq.\n"); + else + printk("Could not register dump-trace-buffer(Y) magic sysrq.\n"); +#endif + + + return register_buffer_dev("litmus_log", &log_fops, + LOG_MAJOR, 1); +} + +module_init(init_sched_trace); + +#define MSG_SIZE 255 +static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); + +/* sched_trace_log_message - This is the only function that accesses the the + * log buffer inside the kernel for writing. + * Concurrent access to it is serialized via the + * log_buffer_lock. + * + * The maximum length of a formatted message is 255. + */ +void sched_trace_log_message(const char* fmt, ...) +{ + unsigned long flags; + va_list args; + size_t len; + char* buf; + + va_start(args, fmt); + local_irq_save(flags); + + /* format message */ + buf = __get_cpu_var(fmt_buffer); + len = vscnprintf(buf, MSG_SIZE, fmt, args); + + spin_lock(&log_buffer_lock); + /* Don't copy the trailing null byte, we don't want null bytes + * in a text file. + */ + rb_put(&log_buffer.buf, buf, len); + spin_unlock(&log_buffer_lock); + + local_irq_restore(flags); + va_end(args); +} + +void dump_trace_buffer(int max) +{ + char line[80]; + int len; + int count = 0; + + /* potentially, but very unlikely race... */ + trace_recurse = 1; + while ((max == 0 || count++ < max) && + (len = rb_get(&log_buffer.buf, line, sizeof(line) - 1)) > 0) { + line[len] = '\0'; + printk("%s", line); + } + trace_recurse = 0; +} diff --git a/litmus/trace.c b/litmus/trace.c new file mode 100644 index 000000000000..5735d28f5e30 --- /dev/null +++ b/litmus/trace.c @@ -0,0 +1,102 @@ +#include + +#include +#include +#include + +/******************************************************************************/ +/* Allocation */ +/******************************************************************************/ + +static struct ftdev overhead_dev; + +#define trace_ts_buf overhead_dev.minor[0].buf + +static unsigned int ts_seq_no = 0; + +static inline void __save_timestamp_cpu(unsigned long event, + uint8_t type, uint8_t cpu) +{ + unsigned int seq_no; + struct timestamp *ts; + seq_no = fetch_and_inc((int *) &ts_seq_no); + if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { + ts->event = event; + ts->timestamp = ft_timestamp(); + ts->seq_no = seq_no; + ts->cpu = cpu; + ts->task_type = type; + ft_buffer_finish_write(trace_ts_buf, ts); + } +} + +static inline void __save_timestamp(unsigned long event, + uint8_t type) +{ + __save_timestamp_cpu(event, type, raw_smp_processor_id()); +} + +feather_callback void save_timestamp(unsigned long event) +{ + __save_timestamp(event, TSK_UNKNOWN); +} + +feather_callback void save_timestamp_def(unsigned long event, + unsigned long type) +{ + __save_timestamp(event, (uint8_t) type); +} + +feather_callback void save_timestamp_task(unsigned long event, + unsigned long t_ptr) +{ + int rt = is_realtime((struct task_struct *) t_ptr); + __save_timestamp(event, rt ? TSK_RT : TSK_BE); +} + +feather_callback void save_timestamp_cpu(unsigned long event, + unsigned long cpu) +{ + __save_timestamp_cpu(event, TSK_UNKNOWN, cpu); +} + +/******************************************************************************/ +/* DEVICE FILE DRIVER */ +/******************************************************************************/ + +/* + * should be 8M; it is the max we can ask to buddy system allocator (MAX_ORDER) + * and we might not get as much + */ +#define NO_TIMESTAMPS (2 << 11) + +#define FT_TRACE_MAJOR 252 + +static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) +{ + unsigned int count = NO_TIMESTAMPS; + while (count && !trace_ts_buf) { + printk("time stamp buffer: trying to allocate %u time stamps.\n", count); + ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); + count /= 2; + } + return ftdev->minor[idx].buf ? 0 : -ENOMEM; +} + +static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) +{ + free_ft_buffer(ftdev->minor[idx].buf); + ftdev->minor[idx].buf = NULL; +} + +static int __init init_ft_overhead_trace(void) +{ + printk("Initializing Feather-Trace overhead tracing device.\n"); + ftdev_init(&overhead_dev, THIS_MODULE); + overhead_dev.minor_cnt = 1; /* only one buffer */ + overhead_dev.alloc = alloc_timestamp_buffer; + overhead_dev.free = free_timestamp_buffer; + return register_ftdev(&overhead_dev, "ft_trace", FT_TRACE_MAJOR); +} + +module_init(init_ft_overhead_trace); -- cgit v1.2.2