aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2012-09-04 05:27:56 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2012-10-18 16:14:04 -0400
commitb1a59c1438fa81ba1eca68fea665a913188076b2 (patch)
treeabc7c7b5262ecbe1902f36f3284fd2f307b52402
parent00b4b17dc1f65a1e5de4648200c826cc2eef37f4 (diff)
Make Feather-Trace timestamp collection non-preemptive
If the tracing code is interrupted / preempted inbetween the time that a sequence number is drawn and the time that the trace recorded is allocated, then the trace file will contain "out of order" events. These are difficult to detect during post-processing and can create artificial "outliers". This patch briefly disables local interrutps to avoid this. While at it, de-duplicate the timestamp recording code.
-rw-r--r--litmus/trace.c138
1 files changed, 71 insertions, 67 deletions
diff --git a/litmus/trace.c b/litmus/trace.c
index 45cd662c021d..4e3d116fe050 100644
--- a/litmus/trace.c
+++ b/litmus/trace.c
@@ -43,68 +43,97 @@ static inline unsigned int get_and_clear_irq_fired(void)
43 return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0); 43 return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0);
44} 44}
45 45
46static inline void __save_irq_flags(struct timestamp *ts) 46static inline void save_irq_flags(struct timestamp *ts, unsigned int irq_count)
47{ 47{
48 unsigned int irq_count;
49
50 irq_count = get_and_clear_irq_fired();
51 /* Store how many interrupts occurred. */ 48 /* Store how many interrupts occurred. */
52 ts->irq_count = irq_count; 49 ts->irq_count = irq_count;
53 /* Extra flag because ts->irq_count overflows quickly. */ 50 /* Extra flag because ts->irq_count overflows quickly. */
54 ts->irq_flag = irq_count > 0; 51 ts->irq_flag = irq_count > 0;
52
55} 53}
56 54
57static inline void __save_timestamp_cpu(unsigned long event, 55static inline void write_timestamp(uint8_t event,
58 uint8_t type, uint8_t cpu, 56 uint8_t type,
59 uint16_t pid_fragment) 57 uint8_t cpu,
58 uint16_t pid_fragment,
59 unsigned int irq_count,
60 int record_irq,
61 uint64_t timestamp,
62 int record_timestamp)
60{ 63{
64 unsigned long flags;
61 unsigned int seq_no; 65 unsigned int seq_no;
62 struct timestamp *ts; 66 struct timestamp *ts;
67
68 /* Avoid preemptions while recording the timestamp. This reduces the
69 * number of "out of order" timestamps in the stream and makes
70 * post-processing easier. */
71
72 local_irq_save(flags);
73
63 seq_no = fetch_and_inc((int *) &ts_seq_no); 74 seq_no = fetch_and_inc((int *) &ts_seq_no);
64 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { 75 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) {
65 ts->event = event; 76 ts->event = event;
66 ts->seq_no = seq_no; 77 ts->seq_no = seq_no;
78
79 ts->task_type = type;
67 ts->pid = pid_fragment; 80 ts->pid = pid_fragment;
81
68 ts->cpu = cpu; 82 ts->cpu = cpu;
69 ts->task_type = type; 83
70 __save_irq_flags(ts); 84 if (record_irq)
71 barrier(); 85 irq_count = get_and_clear_irq_fired();
72 /* prevent re-ordering of ft_timestamp() */ 86
73 ts->timestamp = ft_timestamp(); 87 save_irq_flags(ts, irq_count);
88
89 if (record_timestamp)
90 timestamp = ft_timestamp();
91
92 ts->timestamp = timestamp;
74 ft_buffer_finish_write(trace_ts_buf, ts); 93 ft_buffer_finish_write(trace_ts_buf, ts);
75 } 94 }
95
96 local_irq_restore(flags);
76} 97}
77 98
78static void __add_timestamp_user(struct timestamp *pre_recorded) 99static void __add_timestamp_user(struct timestamp *pre_recorded)
79{ 100{
101 unsigned long flags;
80 unsigned int seq_no; 102 unsigned int seq_no;
81 struct timestamp *ts; 103 struct timestamp *ts;
104
105
106 local_irq_save(flags);
107
82 seq_no = fetch_and_inc((int *) &ts_seq_no); 108 seq_no = fetch_and_inc((int *) &ts_seq_no);
83 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { 109 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) {
84 *ts = *pre_recorded; 110 *ts = *pre_recorded;
85 ts->seq_no = seq_no; 111 ts->seq_no = seq_no;
86 ts->cpu = raw_smp_processor_id(); 112 ts->cpu = raw_smp_processor_id();
87 __save_irq_flags(ts); 113 save_irq_flags(ts, get_and_clear_irq_fired());
88 ft_buffer_finish_write(trace_ts_buf, ts); 114 ft_buffer_finish_write(trace_ts_buf, ts);
89 } 115 }
90}
91 116
92static inline void __save_timestamp(unsigned long event, 117 local_irq_restore(flags);
93 uint8_t type)
94{
95 __save_timestamp_cpu(event, type, raw_smp_processor_id(),
96 current->pid);
97} 118}
98 119
99feather_callback void save_timestamp(unsigned long event) 120feather_callback void save_timestamp(unsigned long event)
100{ 121{
101 __save_timestamp(event, TSK_UNKNOWN); 122 write_timestamp(event, TSK_UNKNOWN,
123 raw_smp_processor_id(),
124 current->pid,
125 0, 1,
126 0, 1);
102} 127}
103 128
104feather_callback void save_timestamp_def(unsigned long event, 129feather_callback void save_timestamp_def(unsigned long event,
105 unsigned long type) 130 unsigned long type)
106{ 131{
107 __save_timestamp(event, (uint8_t) type); 132 write_timestamp(event, type,
133 raw_smp_processor_id(),
134 current->pid,
135 0, 1,
136 0, 1);
108} 137}
109 138
110feather_callback void save_timestamp_task(unsigned long event, 139feather_callback void save_timestamp_task(unsigned long event,
@@ -112,16 +141,19 @@ feather_callback void save_timestamp_task(unsigned long event,
112{ 141{
113 struct task_struct *t = (struct task_struct *) t_ptr; 142 struct task_struct *t = (struct task_struct *) t_ptr;
114 int rt = is_realtime(t); 143 int rt = is_realtime(t);
115 __save_timestamp_cpu(event, rt ? TSK_RT : TSK_BE, 144
116 raw_smp_processor_id(), 145 write_timestamp(event, rt ? TSK_RT : TSK_BE,
117 t->pid); 146 raw_smp_processor_id(),
147 t->pid,
148 0, 1,
149 0, 1);
118} 150}
119 151
120feather_callback void save_timestamp_cpu(unsigned long event, 152feather_callback void save_timestamp_cpu(unsigned long event,
121 unsigned long cpu) 153 unsigned long cpu)
122{ 154{
123 __save_timestamp_cpu(event, TSK_UNKNOWN, cpu, 155 write_timestamp(event, TSK_UNKNOWN, cpu, current->pid,
124 current->pid); 156 0, 1, 0, 1);
125} 157}
126 158
127feather_callback void save_task_latency(unsigned long event, 159feather_callback void save_task_latency(unsigned long event,
@@ -129,21 +161,10 @@ feather_callback void save_task_latency(unsigned long event,
129{ 161{
130 lt_t now = litmus_clock(); 162 lt_t now = litmus_clock();
131 lt_t *when = (lt_t*) when_ptr; 163 lt_t *when = (lt_t*) when_ptr;
132 unsigned int seq_no;
133 int cpu = raw_smp_processor_id();
134 struct timestamp *ts;
135 164
136 seq_no = fetch_and_inc((int *) &ts_seq_no); 165 write_timestamp(event, TSK_RT, raw_smp_processor_id(), 0,
137 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { 166 0, 1,
138 ts->event = event; 167 now - *when, 0);
139 ts->timestamp = now - *when;
140 ts->seq_no = seq_no;
141 ts->cpu = cpu;
142 ts->pid = 0;
143 ts->task_type = TSK_RT;
144 __save_irq_flags(ts);
145 ft_buffer_finish_write(trace_ts_buf, ts);
146 }
147} 168}
148 169
149/* fake timestamp to user-reported time */ 170/* fake timestamp to user-reported time */
@@ -151,19 +172,11 @@ void save_timestamp_time(unsigned long event,
151 unsigned long ptr) 172 unsigned long ptr)
152{ 173{
153 uint64_t* time = (uint64_t*) ptr; 174 uint64_t* time = (uint64_t*) ptr;
154 unsigned int seq_no; 175
155 struct timestamp *ts; 176 write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE,
156 seq_no = fetch_and_inc((int *) &ts_seq_no); 177 raw_smp_processor_id(), current->pid,
157 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { 178 0, 1,
158 ts->event = event; 179 *time, 0);
159 ts->seq_no = seq_no;
160 ts->pid = current->pid;
161 ts->cpu = raw_smp_processor_id();
162 ts->task_type = is_realtime(current) ? TSK_RT : TSK_BE;
163 __save_irq_flags(ts);
164 ts->timestamp = *time;
165 ft_buffer_finish_write(trace_ts_buf, ts);
166 }
167} 180}
168 181
169/* Record user-reported IRQ count */ 182/* Record user-reported IRQ count */
@@ -171,20 +184,11 @@ void save_timestamp_irq(unsigned long event,
171 unsigned long irq_counter_ptr) 184 unsigned long irq_counter_ptr)
172{ 185{
173 uint64_t* irqs = (uint64_t*) irq_counter_ptr; 186 uint64_t* irqs = (uint64_t*) irq_counter_ptr;
174 unsigned int seq_no; 187
175 struct timestamp *ts; 188 write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE,
176 seq_no = fetch_and_inc((int *) &ts_seq_no); 189 raw_smp_processor_id(), current->pid,
177 if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { 190 *irqs, 0,
178 ts->event = event; 191 0, 1);
179 ts->seq_no = seq_no;
180 ts->pid = current->pid;
181 ts->cpu = raw_smp_processor_id();
182 ts->task_type = is_realtime(current) ? TSK_RT : TSK_BE;
183 ts->irq_count = *irqs;
184 ts->irq_flag = *irqs > 0;
185 ts->timestamp = ft_timestamp();
186 ft_buffer_finish_write(trace_ts_buf, ts);
187 }
188} 192}
189 193
190/******************************************************************************/ 194/******************************************************************************/