aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2013-06-23 08:40:52 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2013-08-07 03:46:37 -0400
commite089fc170eb4c33c8703bf09cacdff7323cf7575 (patch)
tree206ea5e20ea0552077d7076f007b4d8b22fdfb40
parenta20a7105a2206eb4b9c791aa276f4233bb453214 (diff)
Feather-Trace: add LITMUS^RT overhead tracing infrastructure
This patch adds the main infrastructure for tracing overheads in LITMUS^RT. It does not yet introduce any tracepoints into the kernel.
-rw-r--r--include/litmus/trace.h137
-rw-r--r--litmus/Kconfig25
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/trace.c284
4 files changed, 447 insertions, 0 deletions
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
new file mode 100644
index 000000000000..489b3d84f880
--- /dev/null
+++ b/include/litmus/trace.h
@@ -0,0 +1,137 @@
1#ifndef _SYS_TRACE_H_
2#define _SYS_TRACE_H_
3
4#ifdef CONFIG_SCHED_OVERHEAD_TRACE
5
6
7#include <litmus/feather_trace.h>
8#include <litmus/feather_buffer.h>
9
10
11/*********************** TIMESTAMPS ************************/
12
13enum task_type_marker {
14 TSK_BE,
15 TSK_RT,
16 TSK_UNKNOWN
17};
18
19struct timestamp {
20 uint64_t timestamp:48;
21 uint64_t pid:16;
22 uint32_t seq_no;
23 uint8_t cpu;
24 uint8_t event;
25 uint8_t task_type:2;
26 uint8_t irq_flag:1;
27 uint8_t irq_count:5;
28};
29
30/* tracing callbacks */
31feather_callback void save_timestamp(unsigned long event);
32feather_callback void save_timestamp_def(unsigned long event, unsigned long type);
33feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr);
34feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu);
35feather_callback void save_timestamp_time(unsigned long event, unsigned long time_ptr);
36feather_callback void save_timestamp_irq(unsigned long event, unsigned long irq_count_ptr);
37feather_callback void save_timestamp_hide_irq(unsigned long event);
38
39#define TIMESTAMP(id) ft_event0(id, save_timestamp)
40
41#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, (unsigned long) def)
42
43#define TIMESTAMP_CUR(id) DTIMESTAMP(id, is_realtime(current) ? TSK_RT : TSK_BE)
44
45#define TTIMESTAMP(id, task) \
46 ft_event1(id, save_timestamp_task, (unsigned long) task)
47
48#define CTIMESTAMP(id, cpu) \
49 ft_event1(id, save_timestamp_cpu, (unsigned long) cpu)
50
51#define TIMESTAMP_TIME(id, time_ptr) \
52 ft_event1(id, save_timestamp_time, (unsigned long) time_ptr)
53
54#define TIMESTAMP_IRQ(id, irq_count_ptr) \
55 ft_event1(id, save_timestamp_irq, (unsigned long) irq_count_ptr)
56
57#define TIMESTAMP_IN_IRQ(id) \
58 ft_event0(id, save_timestamp_hide_irq)
59
60#else /* !CONFIG_SCHED_OVERHEAD_TRACE */
61
62#define TIMESTAMP(id) /* no tracing */
63
64#define DTIMESTAMP(id, def) /* no tracing */
65
66#define TIMESTAMP_CUR(id) /* no tracing */
67
68#define TTIMESTAMP(id, task) /* no tracing */
69
70#define CTIMESTAMP(id, cpu) /* no tracing */
71
72#define TIMESTAMP_TIME(id, time_ptr) /* no tracing */
73
74#define TIMESTAMP_IRQ(id, irq_count_ptr) /* no tracing */
75
76#define TIMESTAMP_IN_IRQ(id) /* no tracing */
77
78#endif
79
80
81/* Convention for timestamps
82 * =========================
83 *
84 * In order to process the trace files with a common tool, we use the following
85 * convention to measure execution times: The end time id of a code segment is
86 * always the next number after the start time event id.
87 */
88
89#define __TS_SYSCALL_IN_START(p) TIMESTAMP_TIME(10, p)
90#define __TS_SYSCALL_IN_END(p) TIMESTAMP_IRQ(11, p)
91
92#define TS_SYSCALL_OUT_START TIMESTAMP_CUR(20)
93#define TS_SYSCALL_OUT_END TIMESTAMP_CUR(21)
94
95#define TS_LOCK_START TIMESTAMP_CUR(30)
96#define TS_LOCK_END TIMESTAMP_CUR(31)
97
98#define TS_LOCK_SUSPEND TIMESTAMP_CUR(38)
99#define TS_LOCK_RESUME TIMESTAMP_CUR(39)
100
101#define TS_UNLOCK_START TIMESTAMP_CUR(40)
102#define TS_UNLOCK_END TIMESTAMP_CUR(41)
103
104#define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only
105 * care
106 * about
107 * next */
108#define TS_SCHED_END(t) TTIMESTAMP(101, t)
109#define TS_SCHED2_START(t) TTIMESTAMP(102, t)
110#define TS_SCHED2_END(t) TTIMESTAMP(103, t)
111
112#define TS_CXS_START(t) TTIMESTAMP(104, t)
113#define TS_CXS_END(t) TTIMESTAMP(105, t)
114
115#define TS_RELEASE_START DTIMESTAMP(106, TSK_RT)
116#define TS_RELEASE_END DTIMESTAMP(107, TSK_RT)
117
118#define TS_TICK_START(t) TTIMESTAMP(110, t)
119#define TS_TICK_END(t) TTIMESTAMP(111, t)
120
121
122#define TS_PLUGIN_SCHED_START /* TIMESTAMP(120) */ /* currently unused */
123#define TS_PLUGIN_SCHED_END /* TIMESTAMP(121) */
124
125#define TS_PLUGIN_TICK_START /* TIMESTAMP(130) */
126#define TS_PLUGIN_TICK_END /* TIMESTAMP(131) */
127
128#define TS_ENTER_NP_START TIMESTAMP(140)
129#define TS_ENTER_NP_END TIMESTAMP(141)
130
131#define TS_EXIT_NP_START TIMESTAMP(150)
132#define TS_EXIT_NP_END TIMESTAMP(151)
133
134#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c)
135#define TS_SEND_RESCHED_END TIMESTAMP_IN_IRQ(191)
136
137#endif /* !_SYS_TRACE_H_ */
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 7456eb209cf7..0c7e06b922a1 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -24,6 +24,31 @@ config FEATHER_TRACE
24 you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to 24 you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to
25 actually enable any events. 25 actually enable any events.
26 26
27config SCHED_OVERHEAD_TRACE
28 bool "Record timestamps for overhead measurements"
29 depends on FEATHER_TRACE
30 default y
31 help
32 Export event stream for overhead tracing.
33 Say Yes for overhead tracing.
34
35config SCHED_OVERHEAD_TRACE_SHIFT
36 int "Buffer size for Feather-Trace overhead data"
37 depends on SCHED_OVERHEAD_TRACE
38 range 15 32
39 default 22
40 help
41
42 Select the buffer size for the Feather-Trace overhead tracing
43 infrastructure (/dev/litmus/ft_trace0 & ftcat) as a power of two. The
44 larger the buffer, the less likely the chance of buffer overflows if
45 the ftcat process is starved by real-time activity. In machines with
46 large memories, large buffer sizes are recommended.
47
48 Examples: 16 => 2 MB
49 24 => 512 MB
50 26 => 2G MB
51
27endmenu 52endmenu
28 53
29endmenu 54endmenu
diff --git a/litmus/Makefile b/litmus/Makefile
index bca61e6deb71..99f90c34f141 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 5obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
6obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o
diff --git a/litmus/trace.c b/litmus/trace.c
new file mode 100644
index 000000000000..7d29590252fd
--- /dev/null
+++ b/litmus/trace.c
@@ -0,0 +1,284 @@
1#include <linux/sched.h>
2#include <linux/module.h>
3#include <linux/uaccess.h>
4
5#include <litmus/ftdev.h>
6#include <litmus/trace.h>
7
8/* dummy definition of is_realtime() */
9#define is_realtime(t) (0)
10
11/******************************************************************************/
12/* Allocation */
13/******************************************************************************/
14
15static struct ftdev overhead_dev;
16
17#define trace_ts_buf overhead_dev.minor[0].buf
18
19static unsigned int ts_seq_no = 0;
20
21DEFINE_PER_CPU(atomic_t, irq_fired_count);
22
23void ft_irq_fired(void)
24{
25 /* Only called with preemptions disabled. */
26 atomic_inc(&__get_cpu_var(irq_fired_count));
27}
28
29static inline void clear_irq_fired(void)
30{
31 atomic_set(&__raw_get_cpu_var(irq_fired_count), 0);
32}
33
34static inline unsigned int get_and_clear_irq_fired(void)
35{
36 /* This is potentially not atomic since we might migrate if
37 * preemptions are not disabled. As a tradeoff between
38 * accuracy and tracing overheads, this seems acceptable.
39 * If it proves to be a problem, then one could add a callback
40 * from the migration code to invalidate irq_fired_count.
41 */
42 return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0);
43}
44
45static inline void save_irq_flags(struct timestamp *ts, unsigned int irq_count)
46{
47 /* Store how many interrupts occurred. */
48 ts->irq_count = irq_count;
49 /* Extra flag because ts->irq_count overflows quickly. */
50 ts->irq_flag = irq_count > 0;
51
52}
53
54static inline void write_timestamp(uint8_t event,
55 uint8_t type,
56 uint8_t cpu,
57 uint16_t pid_fragment,
58 unsigned int irq_count,
59 int record_irq,
60 int hide_irq,
61 uint64_t timestamp,
62 int record_timestamp)
63{
64 unsigned long flags;
65 unsigned int seq_no;
66 struct timestamp *ts;
67
68 /* Avoid preemptions while recording the timestamp. This reduces the
69 * number of "out of order" timestamps in the stream and makes
70 * post-processing easier. */
71
72 local_irq_save(flags);
73
74 seq_no = fetch_and_inc((int *) &ts_seq_no);
75 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) {
76 ts->event = event;
77 ts->seq_no = seq_no;
78
79 ts->task_type = type;
80 ts->pid = pid_fragment;
81
82 ts->cpu = cpu;
83
84 if (record_irq)
85 irq_count = get_and_clear_irq_fired();
86
87 save_irq_flags(ts, irq_count - hide_irq);
88
89 if (record_timestamp)
90 timestamp = ft_timestamp();
91
92 ts->timestamp = timestamp;
93 ft_buffer_finish_write(trace_ts_buf, ts);
94 }
95
96 local_irq_restore(flags);
97}
98
99static void __add_timestamp_user(struct timestamp *pre_recorded)
100{
101 unsigned long flags;
102 unsigned int seq_no;
103 struct timestamp *ts;
104
105
106 local_irq_save(flags);
107
108 seq_no = fetch_and_inc((int *) &ts_seq_no);
109 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) {
110 *ts = *pre_recorded;
111 ts->seq_no = seq_no;
112 ts->cpu = raw_smp_processor_id();
113 save_irq_flags(ts, get_and_clear_irq_fired());
114 ft_buffer_finish_write(trace_ts_buf, ts);
115 }
116
117 local_irq_restore(flags);
118}
119
120feather_callback void save_timestamp(unsigned long event)
121{
122 write_timestamp(event, TSK_UNKNOWN,
123 raw_smp_processor_id(),
124 current->pid,
125 0, 1, 0,
126 0, 1);
127}
128
129feather_callback void save_timestamp_def(unsigned long event,
130 unsigned long type)
131{
132 write_timestamp(event, type,
133 raw_smp_processor_id(),
134 current->pid,
135 0, 1, 0,
136 0, 1);
137}
138
139feather_callback void save_timestamp_task(unsigned long event,
140 unsigned long t_ptr)
141{
142 struct task_struct *t = (struct task_struct *) t_ptr;
143 int rt = is_realtime(t);
144
145 write_timestamp(event, rt ? TSK_RT : TSK_BE,
146 raw_smp_processor_id(),
147 t->pid,
148 0, 1, 0,
149 0, 1);
150}
151
152feather_callback void save_timestamp_cpu(unsigned long event,
153 unsigned long cpu)
154{
155 write_timestamp(event, TSK_UNKNOWN, cpu, current->pid,
156 0, 1, 0,
157 0, 1);
158}
159
160/* fake timestamp to user-reported time */
161feather_callback void save_timestamp_time(unsigned long event,
162 unsigned long ptr)
163{
164 uint64_t* time = (uint64_t*) ptr;
165
166 write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE,
167 raw_smp_processor_id(), current->pid,
168 0, 1, 0,
169 *time, 0);
170}
171
172/* Record user-reported IRQ count */
173feather_callback void save_timestamp_irq(unsigned long event,
174 unsigned long irq_counter_ptr)
175{
176 uint64_t* irqs = (uint64_t*) irq_counter_ptr;
177
178 write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE,
179 raw_smp_processor_id(), current->pid,
180 *irqs, 0, 0,
181 0, 1);
182}
183
184/* Suppress one IRQ from the irq count. Used by TS_SEND_RESCHED_END, which is
185 * called from within an interrupt that is expected. */
186feather_callback void save_timestamp_hide_irq(unsigned long event)
187{
188 write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE,
189 raw_smp_processor_id(), current->pid,
190 0, 1, 1,
191 0, 1);
192}
193
194/******************************************************************************/
195/* DEVICE FILE DRIVER */
196/******************************************************************************/
197
198#define NO_TIMESTAMPS (2 << CONFIG_SCHED_OVERHEAD_TRACE_SHIFT)
199
200static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx)
201{
202 unsigned int count = NO_TIMESTAMPS;
203
204 /* An overhead-tracing timestamp should be exactly 16 bytes long. */
205 BUILD_BUG_ON(sizeof(struct timestamp) != 16);
206
207 while (count && !trace_ts_buf) {
208 printk("time stamp buffer: trying to allocate %u time stamps.\n", count);
209 ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp));
210 count /= 2;
211 }
212 return ftdev->minor[idx].buf ? 0 : -ENOMEM;
213}
214
215static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx)
216{
217 free_ft_buffer(ftdev->minor[idx].buf);
218 ftdev->minor[idx].buf = NULL;
219}
220
221static ssize_t write_timestamp_from_user(struct ft_buffer* buf, size_t len,
222 const char __user *from)
223{
224 ssize_t consumed = 0;
225 struct timestamp ts;
226
227 /* don't give us partial timestamps */
228 if (len % sizeof(ts))
229 return -EINVAL;
230
231 while (len >= sizeof(ts)) {
232 if (copy_from_user(&ts, from, sizeof(ts))) {
233 consumed = -EFAULT;
234 goto out;
235 }
236 len -= sizeof(ts);
237 from += sizeof(ts);
238 consumed += sizeof(ts);
239
240 __add_timestamp_user(&ts);
241 }
242
243out:
244 return consumed;
245}
246
247static int __init init_ft_overhead_trace(void)
248{
249 int err, cpu;
250
251 printk("Initializing Feather-Trace overhead tracing device.\n");
252 err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace");
253 if (err)
254 goto err_out;
255
256 overhead_dev.alloc = alloc_timestamp_buffer;
257 overhead_dev.free = free_timestamp_buffer;
258 overhead_dev.write = write_timestamp_from_user;
259
260 err = register_ftdev(&overhead_dev);
261 if (err)
262 goto err_dealloc;
263
264 /* initialize IRQ flags */
265 for (cpu = 0; cpu < NR_CPUS; cpu++) {
266 clear_irq_fired();
267 }
268
269 return 0;
270
271err_dealloc:
272 ftdev_exit(&overhead_dev);
273err_out:
274 printk(KERN_WARNING "Could not register ft_trace module.\n");
275 return err;
276}
277
278static void __exit exit_ft_overhead_trace(void)
279{
280 ftdev_exit(&overhead_dev);
281}
282
283module_init(init_ft_overhead_trace);
284module_exit(exit_ft_overhead_trace);