aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/trace.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-04-19 17:31:52 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2013-04-19 17:31:52 -0400
commitf70a290e8a889caa905ab7650c696f2bb299be1a (patch)
tree56f0886d839499e9f522f189999024b3e86f9be2 /litmus/trace.c
parentfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (diff)
parent7ef4a793a624c6e66c16ca1051847f75161f5bec (diff)
Merge branch 'wip-nested-locking' into tegra-nested-lockingwip-nested-locking
Conflicts: Makefile include/linux/fs.h
Diffstat (limited to 'litmus/trace.c')
-rw-r--r--litmus/trace.c300
1 files changed, 300 insertions, 0 deletions
diff --git a/litmus/trace.c b/litmus/trace.c
new file mode 100644
index 00000000000..7dbb98e4a3c
--- /dev/null
+++ b/litmus/trace.c
@@ -0,0 +1,300 @@
1#include <linux/sched.h>
2#include <linux/module.h>
3#include <linux/uaccess.h>
4
5#include <litmus/ftdev.h>
6#include <litmus/litmus.h>
7#include <litmus/trace.h>
8
9/******************************************************************************/
10/* Allocation */
11/******************************************************************************/
12
13static struct ftdev overhead_dev;
14
15#define trace_ts_buf overhead_dev.minor[0].buf
16
17static unsigned int ts_seq_no = 0;
18
19DEFINE_PER_CPU(atomic_t, irq_fired_count);
20
21void ft_irq_fired(void)
22{
23 /* Only called with preemptions disabled. */
24 atomic_inc(&__get_cpu_var(irq_fired_count));
25
26 if (has_control_page(current))
27 get_control_page(current)->irq_count++;
28}
29
30static inline void clear_irq_fired(void)
31{
32 atomic_set(&__raw_get_cpu_var(irq_fired_count), 0);
33}
34
35static inline unsigned int get_and_clear_irq_fired(void)
36{
37 /* This is potentially not atomic since we might migrate if
38 * preemptions are not disabled. As a tradeoff between
39 * accuracy and tracing overheads, this seems acceptable.
40 * If it proves to be a problem, then one could add a callback
41 * from the migration code to invalidate irq_fired_count.
42 */
43 return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0);
44}
45
46static inline void save_irq_flags(struct timestamp *ts, unsigned int irq_count)
47{
48 /* Store how many interrupts occurred. */
49 ts->irq_count = irq_count;
50 /* Extra flag because ts->irq_count overflows quickly. */
51 ts->irq_flag = irq_count > 0;
52
53}
54
55static inline void write_timestamp(uint8_t event,
56 uint8_t type,
57 uint8_t cpu,
58 uint16_t pid_fragment,
59 unsigned int irq_count,
60 int record_irq,
61 int hide_irq,
62 uint64_t timestamp,
63 int record_timestamp)
64{
65 unsigned long flags;
66 unsigned int seq_no;
67 struct timestamp *ts;
68
69 /* Avoid preemptions while recording the timestamp. This reduces the
70 * number of "out of order" timestamps in the stream and makes
71 * post-processing easier. */
72
73 local_irq_save(flags);
74
75 seq_no = fetch_and_inc((int *) &ts_seq_no);
76 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) {
77 ts->event = event;
78 ts->seq_no = seq_no;
79
80 ts->task_type = type;
81 ts->pid = pid_fragment;
82
83 ts->cpu = cpu;
84
85 if (record_irq)
86 irq_count = get_and_clear_irq_fired();
87
88 save_irq_flags(ts, irq_count - hide_irq);
89
90 if (record_timestamp)
91 timestamp = ft_timestamp();
92
93 ts->timestamp = timestamp;
94 ft_buffer_finish_write(trace_ts_buf, ts);
95 }
96
97 local_irq_restore(flags);
98}
99
100static void __add_timestamp_user(struct timestamp *pre_recorded)
101{
102 unsigned long flags;
103 unsigned int seq_no;
104 struct timestamp *ts;
105
106
107 local_irq_save(flags);
108
109 seq_no = fetch_and_inc((int *) &ts_seq_no);
110 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) {
111 *ts = *pre_recorded;
112 ts->seq_no = seq_no;
113 ts->cpu = raw_smp_processor_id();
114 save_irq_flags(ts, get_and_clear_irq_fired());
115 ft_buffer_finish_write(trace_ts_buf, ts);
116 }
117
118 local_irq_restore(flags);
119}
120
121feather_callback void save_timestamp(unsigned long event)
122{
123 write_timestamp(event, TSK_UNKNOWN,
124 raw_smp_processor_id(),
125 current->pid,
126 0, 1, 0,
127 0, 1);
128}
129
130feather_callback void save_timestamp_def(unsigned long event,
131 unsigned long type)
132{
133 write_timestamp(event, type,
134 raw_smp_processor_id(),
135 current->pid,
136 0, 1, 0,
137 0, 1);
138}
139
140feather_callback void save_timestamp_task(unsigned long event,
141 unsigned long t_ptr)
142{
143 struct task_struct *t = (struct task_struct *) t_ptr;
144 int rt = is_realtime(t);
145
146 write_timestamp(event, rt ? TSK_RT : TSK_BE,
147 raw_smp_processor_id(),
148 t->pid,
149 0, 1, 0,
150 0, 1);
151}
152
153feather_callback void save_timestamp_cpu(unsigned long event,
154 unsigned long cpu)
155{
156 write_timestamp(event, TSK_UNKNOWN, cpu, current->pid,
157 0, 1, 0,
158 0, 1);
159}
160
161feather_callback void save_task_latency(unsigned long event,
162 unsigned long when_ptr)
163{
164 lt_t now = litmus_clock();
165 lt_t *when = (lt_t*) when_ptr;
166
167 write_timestamp(event, TSK_RT, raw_smp_processor_id(), 0,
168 0, 1, 0,
169 now - *when, 0);
170}
171
172/* fake timestamp to user-reported time */
173feather_callback void save_timestamp_time(unsigned long event,
174 unsigned long ptr)
175{
176 uint64_t* time = (uint64_t*) ptr;
177
178 write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE,
179 raw_smp_processor_id(), current->pid,
180 0, 1, 0,
181 *time, 0);
182}
183
184/* Record user-reported IRQ count */
185feather_callback void save_timestamp_irq(unsigned long event,
186 unsigned long irq_counter_ptr)
187{
188 uint64_t* irqs = (uint64_t*) irq_counter_ptr;
189
190 write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE,
191 raw_smp_processor_id(), current->pid,
192 *irqs, 0, 0,
193 0, 1);
194}
195
196/* Suppress one IRQ from the irq count. Used by TS_SEND_RESCHED_END, which is
197 * called from within an interrupt that is expected. */
198feather_callback void save_timestamp_hide_irq(unsigned long event)
199{
200 write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE,
201 raw_smp_processor_id(), current->pid,
202 0, 1, 1,
203 0, 1);
204}
205
206/******************************************************************************/
207/* DEVICE FILE DRIVER */
208/******************************************************************************/
209
210/*
211 * should be 8M; it is the max we can ask to buddy system allocator (MAX_ORDER)
212 * and we might not get as much
213 */
214#define NO_TIMESTAMPS (2 << 16)
215
216static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx)
217{
218 unsigned int count = NO_TIMESTAMPS;
219
220 /* An overhead-tracing timestamp should be exactly 16 bytes long. */
221 BUILD_BUG_ON(sizeof(struct timestamp) != 16);
222
223 while (count && !trace_ts_buf) {
224 printk("time stamp buffer: trying to allocate %u time stamps.\n", count);
225 ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp));
226 count /= 2;
227 }
228 return ftdev->minor[idx].buf ? 0 : -ENOMEM;
229}
230
231static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx)
232{
233 free_ft_buffer(ftdev->minor[idx].buf);
234 ftdev->minor[idx].buf = NULL;
235}
236
237static ssize_t write_timestamp_from_user(struct ft_buffer* buf, size_t len,
238 const char __user *from)
239{
240 ssize_t consumed = 0;
241 struct timestamp ts;
242
243 /* don't give us partial timestamps */
244 if (len % sizeof(ts))
245 return -EINVAL;
246
247 while (len >= sizeof(ts)) {
248 if (copy_from_user(&ts, from, sizeof(ts))) {
249 consumed = -EFAULT;
250 goto out;
251 }
252 len -= sizeof(ts);
253 from += sizeof(ts);
254 consumed += sizeof(ts);
255
256 __add_timestamp_user(&ts);
257 }
258
259out:
260 return consumed;
261}
262
263static int __init init_ft_overhead_trace(void)
264{
265 int err, cpu;
266
267 printk("Initializing Feather-Trace overhead tracing device.\n");
268 err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace");
269 if (err)
270 goto err_out;
271
272 overhead_dev.alloc = alloc_timestamp_buffer;
273 overhead_dev.free = free_timestamp_buffer;
274 overhead_dev.write = write_timestamp_from_user;
275
276 err = register_ftdev(&overhead_dev);
277 if (err)
278 goto err_dealloc;
279
280 /* initialize IRQ flags */
281 for (cpu = 0; cpu < NR_CPUS; cpu++) {
282 clear_irq_fired();
283 }
284
285 return 0;
286
287err_dealloc:
288 ftdev_exit(&overhead_dev);
289err_out:
290 printk(KERN_WARNING "Could not register ft_trace module.\n");
291 return err;
292}
293
294static void __exit exit_ft_overhead_trace(void)
295{
296 ftdev_exit(&overhead_dev);
297}
298
299module_init(init_ft_overhead_trace);
300module_exit(exit_ft_overhead_trace);