aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2012-01-24 03:36:12 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2012-01-30 13:59:46 -0500
commitdff289454b7f15885eabe7579248fc5a2d6c35e8 (patch)
tree79180eb8e7094014dced059873a741698e28090b
parent5bd89a34d89f252619d83fef3c9325e24311389e (diff)
Feather-Trace: keep track of interrupt-related interference.wip-litmus3.0-2011.2
Increment a processor-local counter whenever an interrupt is handled. This allows Feather-Trace to include a (truncated) counter and a flag to report interference from interrupts. This could be used to filter samples that were disturbed by interrupts.
-rw-r--r--include/linux/hardirq.h4
-rw-r--r--include/litmus/trace.h4
-rw-r--r--include/litmus/trace_irq.h21
-rw-r--r--litmus/trace.c47
4 files changed, 73 insertions, 3 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index ba362171e8ae..e6dd5a456bae 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -6,6 +6,8 @@
6#include <linux/ftrace_irq.h> 6#include <linux/ftrace_irq.h>
7#include <asm/hardirq.h> 7#include <asm/hardirq.h>
8 8
9#include <litmus/trace_irq.h>
10
9/* 11/*
10 * We put the hardirq and softirq counter into the preemption 12 * We put the hardirq and softirq counter into the preemption
11 * counter. The bitmask has the following meaning: 13 * counter. The bitmask has the following meaning:
@@ -186,6 +188,7 @@ extern void rcu_nmi_exit(void);
186 account_system_vtime(current); \ 188 account_system_vtime(current); \
187 add_preempt_count(HARDIRQ_OFFSET); \ 189 add_preempt_count(HARDIRQ_OFFSET); \
188 trace_hardirq_enter(); \ 190 trace_hardirq_enter(); \
191 ft_irq_fired(); \
189 } while (0) 192 } while (0)
190 193
191/* 194/*
@@ -216,6 +219,7 @@ extern void irq_exit(void);
216 lockdep_off(); \ 219 lockdep_off(); \
217 rcu_nmi_enter(); \ 220 rcu_nmi_enter(); \
218 trace_hardirq_enter(); \ 221 trace_hardirq_enter(); \
222 ft_irq_fired(); \
219 } while (0) 223 } while (0)
220 224
221#define nmi_exit() \ 225#define nmi_exit() \
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
index e2926a08c2f4..e809376d6487 100644
--- a/include/litmus/trace.h
+++ b/include/litmus/trace.h
@@ -20,7 +20,9 @@ struct timestamp {
20 uint32_t seq_no; 20 uint32_t seq_no;
21 uint8_t cpu; 21 uint8_t cpu;
22 uint8_t event; 22 uint8_t event;
23 uint8_t task_type; 23 uint8_t task_type:2;
24 uint8_t irq_flag:1;
25 uint8_t irq_count:5;
24}; 26};
25 27
26/* tracing callbacks */ 28/* tracing callbacks */
diff --git a/include/litmus/trace_irq.h b/include/litmus/trace_irq.h
new file mode 100644
index 000000000000..f18b127a089d
--- /dev/null
+++ b/include/litmus/trace_irq.h
@@ -0,0 +1,21 @@
1#ifndef _LITMUS_TRACE_IRQ_H_
2#define _LITMUS_TRACE_IRQ_H_
3
4#ifdef CONFIG_SCHED_OVERHEAD_TRACE
5
6extern DEFINE_PER_CPU(atomic_t, irq_fired_count);
7
8static inline void ft_irq_fired(void)
9{
10 /* Only called with preemptions disabled. */
11 atomic_inc(&__get_cpu_var(irq_fired_count));
12}
13
14
15#else
16
17#define ft_irq_fired() /* nothing to do */
18
19#endif
20
21#endif
diff --git a/litmus/trace.c b/litmus/trace.c
index 5d77806da647..3c35c527e805 100644
--- a/litmus/trace.c
+++ b/litmus/trace.c
@@ -16,6 +16,35 @@ static struct ftdev overhead_dev;
16 16
17static unsigned int ts_seq_no = 0; 17static unsigned int ts_seq_no = 0;
18 18
19DEFINE_PER_CPU(atomic_t, irq_fired_count);
20
21static inline void clear_irq_fired(void)
22{
23 atomic_set(&__raw_get_cpu_var(irq_fired_count), 0);
24}
25
26static inline unsigned int get_and_clear_irq_fired(void)
27{
28 /* This is potentially not atomic since we might migrate if
29 * preemptions are not disabled. As a tradeoff between
30 * accuracy and tracing overheads, this seems acceptable.
31 * If it proves to be a problem, then one could add a callback
32 * from the migration code to invalidate irq_fired_count.
33 */
34 return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0);
35}
36
37static inline void __save_irq_flags(struct timestamp *ts)
38{
39 unsigned int irq_count;
40
41 irq_count = get_and_clear_irq_fired();
42 /* Store how many interrupts occurred. */
43 ts->irq_count = irq_count;
44 /* Extra flag because ts->irq_count overflows quickly. */
45 ts->irq_flag = irq_count > 0;
46}
47
19static inline void __save_timestamp_cpu(unsigned long event, 48static inline void __save_timestamp_cpu(unsigned long event,
20 uint8_t type, uint8_t cpu) 49 uint8_t type, uint8_t cpu)
21{ 50{
@@ -24,10 +53,13 @@ static inline void __save_timestamp_cpu(unsigned long event,
24 seq_no = fetch_and_inc((int *) &ts_seq_no); 53 seq_no = fetch_and_inc((int *) &ts_seq_no);
25 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { 54 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) {
26 ts->event = event; 55 ts->event = event;
27 ts->timestamp = ft_timestamp();
28 ts->seq_no = seq_no; 56 ts->seq_no = seq_no;
29 ts->cpu = cpu; 57 ts->cpu = cpu;
30 ts->task_type = type; 58 ts->task_type = type;
59 __save_irq_flags(ts);
60 barrier();
61 /* prevent re-ordering of ft_timestamp() */
62 ts->timestamp = ft_timestamp();
31 ft_buffer_finish_write(trace_ts_buf, ts); 63 ft_buffer_finish_write(trace_ts_buf, ts);
32 } 64 }
33} 65}
@@ -40,6 +72,7 @@ static void __add_timestamp_user(struct timestamp *pre_recorded)
40 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { 72 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) {
41 *ts = *pre_recorded; 73 *ts = *pre_recorded;
42 ts->seq_no = seq_no; 74 ts->seq_no = seq_no;
75 __save_irq_flags(ts);
43 ft_buffer_finish_write(trace_ts_buf, ts); 76 ft_buffer_finish_write(trace_ts_buf, ts);
44 } 77 }
45} 78}
@@ -90,6 +123,7 @@ feather_callback void save_task_latency(unsigned long event,
90 ts->seq_no = seq_no; 123 ts->seq_no = seq_no;
91 ts->cpu = cpu; 124 ts->cpu = cpu;
92 ts->task_type = TSK_RT; 125 ts->task_type = TSK_RT;
126 __save_irq_flags(ts);
93 ft_buffer_finish_write(trace_ts_buf, ts); 127 ft_buffer_finish_write(trace_ts_buf, ts);
94 } 128 }
95} 129}
@@ -107,6 +141,10 @@ feather_callback void save_task_latency(unsigned long event,
107static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) 141static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx)
108{ 142{
109 unsigned int count = NO_TIMESTAMPS; 143 unsigned int count = NO_TIMESTAMPS;
144
145 /* An overhead-tracing timestamp should be exactly 16 bytes long. */
146 BUILD_BUG_ON(sizeof(struct timestamp) != 16);
147
110 while (count && !trace_ts_buf) { 148 while (count && !trace_ts_buf) {
111 printk("time stamp buffer: trying to allocate %u time stamps.\n", count); 149 printk("time stamp buffer: trying to allocate %u time stamps.\n", count);
112 ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); 150 ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp));
@@ -149,7 +187,7 @@ out:
149 187
150static int __init init_ft_overhead_trace(void) 188static int __init init_ft_overhead_trace(void)
151{ 189{
152 int err; 190 int err, cpu;
153 191
154 printk("Initializing Feather-Trace overhead tracing device.\n"); 192 printk("Initializing Feather-Trace overhead tracing device.\n");
155 err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace"); 193 err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace");
@@ -164,6 +202,11 @@ static int __init init_ft_overhead_trace(void)
164 if (err) 202 if (err)
165 goto err_dealloc; 203 goto err_dealloc;
166 204
205 /* initialize IRQ flags */
206 for (cpu = 0; cpu < NR_CPUS; cpu++) {
207 clear_irq_fired();
208 }
209
167 return 0; 210 return 0;
168 211
169err_dealloc: 212err_dealloc: