diff options
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/trace.c | 47 |
1 files changed, 45 insertions, 2 deletions
diff --git a/litmus/trace.c b/litmus/trace.c index 5d77806da647..3c35c527e805 100644 --- a/litmus/trace.c +++ b/litmus/trace.c | |||
@@ -16,6 +16,35 @@ static struct ftdev overhead_dev; | |||
16 | 16 | ||
17 | static unsigned int ts_seq_no = 0; | 17 | static unsigned int ts_seq_no = 0; |
18 | 18 | ||
19 | DEFINE_PER_CPU(atomic_t, irq_fired_count); | ||
20 | |||
21 | static inline void clear_irq_fired(void) | ||
22 | { | ||
23 | atomic_set(&__raw_get_cpu_var(irq_fired_count), 0); | ||
24 | } | ||
25 | |||
26 | static inline unsigned int get_and_clear_irq_fired(void) | ||
27 | { | ||
28 | /* This is potentially not atomic since we might migrate if | ||
29 | * preemptions are not disabled. As a tradeoff between | ||
30 | * accuracy and tracing overheads, this seems acceptable. | ||
31 | * If it proves to be a problem, then one could add a callback | ||
32 | * from the migration code to invalidate irq_fired_count. | ||
33 | */ | ||
34 | return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0); | ||
35 | } | ||
36 | |||
37 | static inline void __save_irq_flags(struct timestamp *ts) | ||
38 | { | ||
39 | unsigned int irq_count; | ||
40 | |||
41 | irq_count = get_and_clear_irq_fired(); | ||
42 | /* Store how many interrupts occurred. */ | ||
43 | ts->irq_count = irq_count; | ||
44 | /* Extra flag because ts->irq_count overflows quickly. */ | ||
45 | ts->irq_flag = irq_count > 0; | ||
46 | } | ||
47 | |||
19 | static inline void __save_timestamp_cpu(unsigned long event, | 48 | static inline void __save_timestamp_cpu(unsigned long event, |
20 | uint8_t type, uint8_t cpu) | 49 | uint8_t type, uint8_t cpu) |
21 | { | 50 | { |
@@ -24,10 +53,13 @@ static inline void __save_timestamp_cpu(unsigned long event, | |||
24 | seq_no = fetch_and_inc((int *) &ts_seq_no); | 53 | seq_no = fetch_and_inc((int *) &ts_seq_no); |
25 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | 54 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { |
26 | ts->event = event; | 55 | ts->event = event; |
27 | ts->timestamp = ft_timestamp(); | ||
28 | ts->seq_no = seq_no; | 56 | ts->seq_no = seq_no; |
29 | ts->cpu = cpu; | 57 | ts->cpu = cpu; |
30 | ts->task_type = type; | 58 | ts->task_type = type; |
59 | __save_irq_flags(ts); | ||
60 | barrier(); | ||
61 | /* prevent re-ordering of ft_timestamp() */ | ||
62 | ts->timestamp = ft_timestamp(); | ||
31 | ft_buffer_finish_write(trace_ts_buf, ts); | 63 | ft_buffer_finish_write(trace_ts_buf, ts); |
32 | } | 64 | } |
33 | } | 65 | } |
@@ -40,6 +72,7 @@ static void __add_timestamp_user(struct timestamp *pre_recorded) | |||
40 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | 72 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { |
41 | *ts = *pre_recorded; | 73 | *ts = *pre_recorded; |
42 | ts->seq_no = seq_no; | 74 | ts->seq_no = seq_no; |
75 | __save_irq_flags(ts); | ||
43 | ft_buffer_finish_write(trace_ts_buf, ts); | 76 | ft_buffer_finish_write(trace_ts_buf, ts); |
44 | } | 77 | } |
45 | } | 78 | } |
@@ -90,6 +123,7 @@ feather_callback void save_task_latency(unsigned long event, | |||
90 | ts->seq_no = seq_no; | 123 | ts->seq_no = seq_no; |
91 | ts->cpu = cpu; | 124 | ts->cpu = cpu; |
92 | ts->task_type = TSK_RT; | 125 | ts->task_type = TSK_RT; |
126 | __save_irq_flags(ts); | ||
93 | ft_buffer_finish_write(trace_ts_buf, ts); | 127 | ft_buffer_finish_write(trace_ts_buf, ts); |
94 | } | 128 | } |
95 | } | 129 | } |
@@ -107,6 +141,10 @@ feather_callback void save_task_latency(unsigned long event, | |||
107 | static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | 141 | static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) |
108 | { | 142 | { |
109 | unsigned int count = NO_TIMESTAMPS; | 143 | unsigned int count = NO_TIMESTAMPS; |
144 | |||
145 | /* An overhead-tracing timestamp should be exactly 16 bytes long. */ | ||
146 | BUILD_BUG_ON(sizeof(struct timestamp) != 16); | ||
147 | |||
110 | while (count && !trace_ts_buf) { | 148 | while (count && !trace_ts_buf) { |
111 | printk("time stamp buffer: trying to allocate %u time stamps.\n", count); | 149 | printk("time stamp buffer: trying to allocate %u time stamps.\n", count); |
112 | ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); | 150 | ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); |
@@ -149,7 +187,7 @@ out: | |||
149 | 187 | ||
150 | static int __init init_ft_overhead_trace(void) | 188 | static int __init init_ft_overhead_trace(void) |
151 | { | 189 | { |
152 | int err; | 190 | int err, cpu; |
153 | 191 | ||
154 | printk("Initializing Feather-Trace overhead tracing device.\n"); | 192 | printk("Initializing Feather-Trace overhead tracing device.\n"); |
155 | err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace"); | 193 | err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace"); |
@@ -164,6 +202,11 @@ static int __init init_ft_overhead_trace(void) | |||
164 | if (err) | 202 | if (err) |
165 | goto err_dealloc; | 203 | goto err_dealloc; |
166 | 204 | ||
205 | /* initialize IRQ flags */ | ||
206 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
207 | clear_irq_fired(); | ||
208 | } | ||
209 | |||
167 | return 0; | 210 | return 0; |
168 | 211 | ||
169 | err_dealloc: | 212 | err_dealloc: |