aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2015-08-09 07:18:44 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2015-08-09 06:21:15 -0400
commit5459c945fd381af0c587a04a7d8d468fa348257d (patch)
tree9a0804b412623bdbd16235f14cd0e4ab48f24c02
parent6c10dcd2d019bb63026f1fdbd158788cdf7b8b0a (diff)
Feather-Trace: add platform independent implementation
This patch adds the simple fallback implementation and creates dummy hooks in the x86 and ARM Kconfig files.
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--include/litmus/feather_buffer.h118
-rw-r--r--include/litmus/feather_trace.h69
-rw-r--r--litmus/Kconfig25
-rw-r--r--litmus/Makefile2
-rw-r--r--litmus/ft_event.c43
7 files changed, 263 insertions, 0 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9fee3674d914..ce948d46a1b3 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2120,5 +2120,8 @@ source "lib/Kconfig"
2120 2120
2121source "arch/arm/kvm/Kconfig" 2121source "arch/arm/kvm/Kconfig"
2122 2122
2123config ARCH_HAS_FEATHER_TRACE
2124 def_bool n
2125
2123source "litmus/Kconfig" 2126source "litmus/Kconfig"
2124 2127
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9f305d6e1246..eda7a8806be0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2595,4 +2595,7 @@ source "arch/x86/kvm/Kconfig"
2595 2595
2596source "lib/Kconfig" 2596source "lib/Kconfig"
2597 2597
2598config ARCH_HAS_FEATHER_TRACE
2599 def_bool n
2600
2598source "litmus/Kconfig" 2601source "litmus/Kconfig"
diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h
new file mode 100644
index 000000000000..38de95b73553
--- /dev/null
+++ b/include/litmus/feather_buffer.h
@@ -0,0 +1,118 @@
1#ifndef _FEATHER_BUFFER_H_
2#define _FEATHER_BUFFER_H_
3
4/* requires UINT_MAX and memcpy */
5
6#define SLOT_FREE 0
7#define SLOT_BUSY 1
8#define SLOT_READY 2
9
10struct ft_buffer {
11 unsigned int slot_count;
12 unsigned int slot_size;
13
14 int free_count;
15 unsigned int write_idx;
16 unsigned int read_idx;
17
18 char* slots;
19 void* buffer_mem;
20 unsigned int failed_writes;
21};
22
23static inline int init_ft_buffer(struct ft_buffer* buf,
24 unsigned int slot_count,
25 unsigned int slot_size,
26 char* slots,
27 void* buffer_mem)
28{
29 int i = 0;
30 if (!slot_count || UINT_MAX % slot_count != slot_count - 1) {
31 /* The slot count must divide UNIT_MAX + 1 so that when it
32 * wraps around the index correctly points to 0.
33 */
34 return 0;
35 } else {
36 buf->slot_count = slot_count;
37 buf->slot_size = slot_size;
38 buf->slots = slots;
39 buf->buffer_mem = buffer_mem;
40 buf->free_count = slot_count;
41 buf->write_idx = 0;
42 buf->read_idx = 0;
43 buf->failed_writes = 0;
44 for (i = 0; i < slot_count; i++)
45 buf->slots[i] = SLOT_FREE;
46 return 1;
47 }
48}
49
50static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr)
51{
52 int free = fetch_and_dec(&buf->free_count);
53 unsigned int idx;
54 if (free <= 0) {
55 fetch_and_inc(&buf->free_count);
56 *ptr = 0;
57 fetch_and_inc(&buf->failed_writes);
58 return 0;
59 } else {
60 idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count;
61 buf->slots[idx] = SLOT_BUSY;
62 *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size;
63 return 1;
64 }
65}
66
67/* For single writer scenarios, with fewer atomic ops. */
68static inline int ft_buffer_start_single_write(struct ft_buffer* buf, void **ptr)
69{
70 unsigned int idx;
71
72 if (buf->free_count <= 0) {
73 *ptr = 0;
74 /* single writer: no atomicity needed */
75 buf->failed_writes++;
76 return 0;
77 } else {
78 /* free_count is positive, and can only increase since we are
79 * (by assumption) the only writer accessing the buffer.
80 */
81
82 idx = buf->write_idx++ % buf->slot_count;
83 buf->slots[idx] = SLOT_BUSY;
84 *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size;
85
86 ft_atomic_dec(&buf->free_count);
87 return 1;
88 }
89}
90
91static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr)
92{
93 unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size;
94 buf->slots[idx] = SLOT_READY;
95}
96
97
98/* exclusive reader access is assumed */
99static inline int ft_buffer_read(struct ft_buffer* buf, void* dest)
100{
101 unsigned int idx;
102 if (buf->free_count == buf->slot_count)
103 /* nothing available */
104 return 0;
105 idx = buf->read_idx % buf->slot_count;
106 if (buf->slots[idx] == SLOT_READY) {
107 memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size,
108 buf->slot_size);
109 buf->slots[idx] = SLOT_FREE;
110 buf->read_idx++;
111 fetch_and_inc(&buf->free_count);
112 return 1;
113 } else
114 return 0;
115}
116
117
118#endif
diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h
new file mode 100644
index 000000000000..dbeca46c01f5
--- /dev/null
+++ b/include/litmus/feather_trace.h
@@ -0,0 +1,69 @@
1#ifndef _FEATHER_TRACE_H_
2#define _FEATHER_TRACE_H_
3
4#include <asm/atomic.h>
5
6int ft_enable_event(unsigned long id);
7int ft_disable_event(unsigned long id);
8int ft_is_event_enabled(unsigned long id);
9int ft_disable_all_events(void);
10
11/* atomic_* funcitons are inline anyway */
12static inline int fetch_and_inc(int *val)
13{
14 return atomic_add_return(1, (atomic_t*) val) - 1;
15}
16
17static inline int fetch_and_dec(int *val)
18{
19 return atomic_sub_return(1, (atomic_t*) val) + 1;
20}
21
22static inline void ft_atomic_dec(int *val)
23{
24 atomic_sub(1, (atomic_t*) val);
25}
26
27/* Don't use rewriting implementation if kernel text pages are read-only.
28 * Ftrace gets around this by using the identity mapping, but that's more
29 * effort that is warrented right now for Feather-Trace.
30 * Eventually, it may make sense to replace Feather-Trace with ftrace.
31 */
32#if defined(CONFIG_ARCH_HAS_FEATHER_TRACE) && !defined(CONFIG_DEBUG_RODATA)
33
34#include <asm/feather_trace.h>
35
36#else /* !__ARCH_HAS_FEATHER_TRACE */
37
38/* provide default implementation */
39#include <linux/timex.h> /* for get_cycles() */
40
41static inline unsigned long long ft_timestamp(void)
42{
43 return get_cycles();
44}
45
46#define feather_callback
47
48#define MAX_EVENTS 1024
49
50extern int ft_events[MAX_EVENTS];
51
52#define ft_event(id, callback) \
53 if (ft_events[id]) callback();
54
55#define ft_event0(id, callback) \
56 if (ft_events[id]) callback(id);
57
58#define ft_event1(id, callback, param) \
59 if (ft_events[id]) callback(id, param);
60
61#define ft_event2(id, callback, param, param2) \
62 if (ft_events[id]) callback(id, param, param2);
63
64#define ft_event3(id, callback, p, p2, p3) \
65 if (ft_events[id]) callback(id, p, p2, p3);
66
67#endif /* __ARCH_HAS_FEATHER_TRACE */
68
69#endif
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 382b2e426437..70ddbaddc06f 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -1,3 +1,28 @@
1menu "LITMUS^RT" 1menu "LITMUS^RT"
2 2
3menu "Tracing"
4
5config FEATHER_TRACE
6 bool "Feather-Trace Infrastructure"
7 default y
8 help
9 Feather-Trace basic tracing infrastructure. Includes device file
10 driver and instrumentation point support.
11
12 There are actually two implementations of Feather-Trace.
13 1) A slower, but portable, default implementation.
14 2) Architecture-specific implementations that rewrite kernel .text at runtime.
15
16 If enabled, Feather-Trace will be based on 2) if available (currently only for x86).
17 However, if DEBUG_RODATA=y, then Feather-Trace will choose option 1) in any case
18 to avoid problems with write-protected .text pages.
19
20 Bottom line: to avoid increased overheads, choose DEBUG_RODATA=n.
21
22 Note that this option only enables the basic Feather-Trace infrastructure;
23 you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to
24 actually enable any events.
25
26endmenu
27
3endmenu 28endmenu
diff --git a/litmus/Makefile b/litmus/Makefile
index f0ed31faf582..4c6130b58bae 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -1,3 +1,5 @@
1# 1#
2# Makefile for LITMUS^RT 2# Makefile for LITMUS^RT
3# 3#
4
5obj-$(CONFIG_FEATHER_TRACE) += ft_event.o
diff --git a/litmus/ft_event.c b/litmus/ft_event.c
new file mode 100644
index 000000000000..399a07becca5
--- /dev/null
+++ b/litmus/ft_event.c
@@ -0,0 +1,43 @@
1#include <linux/types.h>
2
3#include <litmus/feather_trace.h>
4
5#if !defined(CONFIG_ARCH_HAS_FEATHER_TRACE) || defined(CONFIG_DEBUG_RODATA)
6/* provide dummy implementation */
7
8int ft_events[MAX_EVENTS];
9
10int ft_enable_event(unsigned long id)
11{
12 if (id < MAX_EVENTS) {
13 ft_events[id]++;
14 return 1;
15 } else
16 return 0;
17}
18
19int ft_disable_event(unsigned long id)
20{
21 if (id < MAX_EVENTS && ft_events[id]) {
22 ft_events[id]--;
23 return 1;
24 } else
25 return 0;
26}
27
28int ft_disable_all_events(void)
29{
30 int i;
31
32 for (i = 0; i < MAX_EVENTS; i++)
33 ft_events[i] = 0;
34
35 return MAX_EVENTS;
36}
37
38int ft_is_event_enabled(unsigned long id)
39{
40 return id < MAX_EVENTS && ft_events[id];
41}
42
43#endif