From efbaae0016a8bc98cc6d24e17ee242a52b356f17 Mon Sep 17 00:00:00 2001 From: Bjoern Brandenburg Date: Sun, 23 Jun 2013 11:41:27 +0200 Subject: Feather-Trace: add platform independent implementation This patch adds the simple fallback implementation and creates dummy hooks in the x86 and ARM Kconfig files. --- arch/arm/Kconfig | 3 + arch/x86/Kconfig | 3 + include/litmus/feather_buffer.h | 118 ++++++++++++++++++++++++++++++++++++++++ include/litmus/feather_trace.h | 69 +++++++++++++++++++++++ litmus/Kconfig | 25 +++++++++ litmus/Makefile | 2 + litmus/ft_event.c | 43 +++++++++++++++ 7 files changed, 263 insertions(+) create mode 100644 include/litmus/feather_buffer.h create mode 100644 include/litmus/feather_trace.h create mode 100644 litmus/ft_event.c diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 131ec84cc694..ecfd73522b16 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2270,5 +2270,8 @@ source "lib/Kconfig" source "arch/arm/kvm/Kconfig" +config ARCH_HAS_FEATHER_TRACE + def_bool n + source "litmus/Kconfig" diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index bd67fd1a045f..0216c9394949 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2347,4 +2347,7 @@ source "arch/x86/kvm/Kconfig" source "lib/Kconfig" +config ARCH_HAS_FEATHER_TRACE + def_bool n + source "litmus/Kconfig" diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h new file mode 100644 index 000000000000..38de95b73553 --- /dev/null +++ b/include/litmus/feather_buffer.h @@ -0,0 +1,118 @@ +#ifndef _FEATHER_BUFFER_H_ +#define _FEATHER_BUFFER_H_ + +/* requires UINT_MAX and memcpy */ + +#define SLOT_FREE 0 +#define SLOT_BUSY 1 +#define SLOT_READY 2 + +struct ft_buffer { + unsigned int slot_count; + unsigned int slot_size; + + int free_count; + unsigned int write_idx; + unsigned int read_idx; + + char* slots; + void* buffer_mem; + unsigned int failed_writes; +}; + +static inline int init_ft_buffer(struct ft_buffer* buf, + unsigned int slot_count, + unsigned int slot_size, + char* slots, + void* buffer_mem) +{ + int i = 0; + if (!slot_count || UINT_MAX % slot_count != slot_count - 1) { + /* The slot count must divide UNIT_MAX + 1 so that when it + * wraps around the index correctly points to 0. + */ + return 0; + } else { + buf->slot_count = slot_count; + buf->slot_size = slot_size; + buf->slots = slots; + buf->buffer_mem = buffer_mem; + buf->free_count = slot_count; + buf->write_idx = 0; + buf->read_idx = 0; + buf->failed_writes = 0; + for (i = 0; i < slot_count; i++) + buf->slots[i] = SLOT_FREE; + return 1; + } +} + +static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr) +{ + int free = fetch_and_dec(&buf->free_count); + unsigned int idx; + if (free <= 0) { + fetch_and_inc(&buf->free_count); + *ptr = 0; + fetch_and_inc(&buf->failed_writes); + return 0; + } else { + idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count; + buf->slots[idx] = SLOT_BUSY; + *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size; + return 1; + } +} + +/* For single writer scenarios, with fewer atomic ops. */ +static inline int ft_buffer_start_single_write(struct ft_buffer* buf, void **ptr) +{ + unsigned int idx; + + if (buf->free_count <= 0) { + *ptr = 0; + /* single writer: no atomicity needed */ + buf->failed_writes++; + return 0; + } else { + /* free_count is positive, and can only increase since we are + * (by assumption) the only writer accessing the buffer. + */ + + idx = buf->write_idx++ % buf->slot_count; + buf->slots[idx] = SLOT_BUSY; + *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size; + + ft_atomic_dec(&buf->free_count); + return 1; + } +} + +static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr) +{ + unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size; + buf->slots[idx] = SLOT_READY; +} + + +/* exclusive reader access is assumed */ +static inline int ft_buffer_read(struct ft_buffer* buf, void* dest) +{ + unsigned int idx; + if (buf->free_count == buf->slot_count) + /* nothing available */ + return 0; + idx = buf->read_idx % buf->slot_count; + if (buf->slots[idx] == SLOT_READY) { + memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size, + buf->slot_size); + buf->slots[idx] = SLOT_FREE; + buf->read_idx++; + fetch_and_inc(&buf->free_count); + return 1; + } else + return 0; +} + + +#endif diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h new file mode 100644 index 000000000000..dbeca46c01f5 --- /dev/null +++ b/include/litmus/feather_trace.h @@ -0,0 +1,69 @@ +#ifndef _FEATHER_TRACE_H_ +#define _FEATHER_TRACE_H_ + +#include + +int ft_enable_event(unsigned long id); +int ft_disable_event(unsigned long id); +int ft_is_event_enabled(unsigned long id); +int ft_disable_all_events(void); + +/* atomic_* funcitons are inline anyway */ +static inline int fetch_and_inc(int *val) +{ + return atomic_add_return(1, (atomic_t*) val) - 1; +} + +static inline int fetch_and_dec(int *val) +{ + return atomic_sub_return(1, (atomic_t*) val) + 1; +} + +static inline void ft_atomic_dec(int *val) +{ + atomic_sub(1, (atomic_t*) val); +} + +/* Don't use rewriting implementation if kernel text pages are read-only. + * Ftrace gets around this by using the identity mapping, but that's more + * effort that is warrented right now for Feather-Trace. + * Eventually, it may make sense to replace Feather-Trace with ftrace. + */ +#if defined(CONFIG_ARCH_HAS_FEATHER_TRACE) && !defined(CONFIG_DEBUG_RODATA) + +#include + +#else /* !__ARCH_HAS_FEATHER_TRACE */ + +/* provide default implementation */ +#include /* for get_cycles() */ + +static inline unsigned long long ft_timestamp(void) +{ + return get_cycles(); +} + +#define feather_callback + +#define MAX_EVENTS 1024 + +extern int ft_events[MAX_EVENTS]; + +#define ft_event(id, callback) \ + if (ft_events[id]) callback(); + +#define ft_event0(id, callback) \ + if (ft_events[id]) callback(id); + +#define ft_event1(id, callback, param) \ + if (ft_events[id]) callback(id, param); + +#define ft_event2(id, callback, param, param2) \ + if (ft_events[id]) callback(id, param, param2); + +#define ft_event3(id, callback, p, p2, p3) \ + if (ft_events[id]) callback(id, p, p2, p3); + +#endif /* __ARCH_HAS_FEATHER_TRACE */ + +#endif diff --git a/litmus/Kconfig b/litmus/Kconfig index 382b2e426437..70ddbaddc06f 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -1,3 +1,28 @@ menu "LITMUS^RT" +menu "Tracing" + +config FEATHER_TRACE + bool "Feather-Trace Infrastructure" + default y + help + Feather-Trace basic tracing infrastructure. Includes device file + driver and instrumentation point support. + + There are actually two implementations of Feather-Trace. + 1) A slower, but portable, default implementation. + 2) Architecture-specific implementations that rewrite kernel .text at runtime. + + If enabled, Feather-Trace will be based on 2) if available (currently only for x86). + However, if DEBUG_RODATA=y, then Feather-Trace will choose option 1) in any case + to avoid problems with write-protected .text pages. + + Bottom line: to avoid increased overheads, choose DEBUG_RODATA=n. + + Note that this option only enables the basic Feather-Trace infrastructure; + you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to + actually enable any events. + +endmenu + endmenu diff --git a/litmus/Makefile b/litmus/Makefile index f0ed31faf582..4c6130b58bae 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -1,3 +1,5 @@ # # Makefile for LITMUS^RT # + +obj-$(CONFIG_FEATHER_TRACE) += ft_event.o diff --git a/litmus/ft_event.c b/litmus/ft_event.c new file mode 100644 index 000000000000..399a07becca5 --- /dev/null +++ b/litmus/ft_event.c @@ -0,0 +1,43 @@ +#include + +#include + +#if !defined(CONFIG_ARCH_HAS_FEATHER_TRACE) || defined(CONFIG_DEBUG_RODATA) +/* provide dummy implementation */ + +int ft_events[MAX_EVENTS]; + +int ft_enable_event(unsigned long id) +{ + if (id < MAX_EVENTS) { + ft_events[id]++; + return 1; + } else + return 0; +} + +int ft_disable_event(unsigned long id) +{ + if (id < MAX_EVENTS && ft_events[id]) { + ft_events[id]--; + return 1; + } else + return 0; +} + +int ft_disable_all_events(void) +{ + int i; + + for (i = 0; i < MAX_EVENTS; i++) + ft_events[i] = 0; + + return MAX_EVENTS; +} + +int ft_is_event_enabled(unsigned long id) +{ + return id < MAX_EVENTS && ft_events[id]; +} + +#endif -- cgit v1.2.2