diff options
author | Robert Richter <robert.richter@amd.com> | 2008-12-08 05:59:52 -0500 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2008-12-10 14:03:35 -0500 |
commit | 1d7503b5dccf2b95babca050e4960e10d2633f2b (patch) | |
tree | 99e032e01e451a4dfff5514486275d4be7b13dce /drivers/oprofile/cpu_buffer.c | |
parent | 6dad828b76c7224a22ddc9ce7aa495d994f03b31 (diff) |
oprofile: remove nr_available_slots()
This function is no longer available after the port to the new ring
buffer. Its removal can lead to incomplete sampling sequences since
IBS samples and backtraces are transfered in multiple samples. Due to
a full buffer, samples could be lost any time. The userspace daemon
has to live with such incomplete sampling sequences as long as the
data within one sample is consistent.
This will be fixed by changing the internal buffer data there all data
of one IBS sample or a backtrace is packed in a single ring buffer
entry. This is possible since the new ring buffer supports variable
data size.
Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'drivers/oprofile/cpu_buffer.c')
-rw-r--r-- | drivers/oprofile/cpu_buffer.c | 34 |
1 files changed, 0 insertions, 34 deletions
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index eb280ec96e24..7f7fc958297a 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -145,18 +145,6 @@ void end_cpu_work(void) | |||
145 | flush_scheduled_work(); | 145 | flush_scheduled_work(); |
146 | } | 146 | } |
147 | 147 | ||
148 | /* compute number of available slots in cpu_buffer queue */ | ||
149 | static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b) | ||
150 | { | ||
151 | unsigned long head = b->head_pos; | ||
152 | unsigned long tail = b->tail_pos; | ||
153 | |||
154 | if (tail > head) | ||
155 | return (tail - head) - 1; | ||
156 | |||
157 | return tail + (b->buffer_size - head) - 1; | ||
158 | } | ||
159 | |||
160 | static inline void | 148 | static inline void |
161 | add_sample(struct oprofile_cpu_buffer *cpu_buf, | 149 | add_sample(struct oprofile_cpu_buffer *cpu_buf, |
162 | unsigned long pc, unsigned long event) | 150 | unsigned long pc, unsigned long event) |
@@ -206,11 +194,6 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, | |||
206 | return 0; | 194 | return 0; |
207 | } | 195 | } |
208 | 196 | ||
209 | if (nr_available_slots(cpu_buf) < 3) { | ||
210 | cpu_buf->sample_lost_overflow++; | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | is_kernel = !!is_kernel; | 197 | is_kernel = !!is_kernel; |
215 | 198 | ||
216 | task = current; | 199 | task = current; |
@@ -233,11 +216,6 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, | |||
233 | 216 | ||
234 | static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) | 217 | static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) |
235 | { | 218 | { |
236 | if (nr_available_slots(cpu_buf) < 4) { | ||
237 | cpu_buf->sample_lost_overflow++; | ||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | add_code(cpu_buf, CPU_TRACE_BEGIN); | 219 | add_code(cpu_buf, CPU_TRACE_BEGIN); |
242 | cpu_buf->tracing = 1; | 220 | cpu_buf->tracing = 1; |
243 | return 1; | 221 | return 1; |
@@ -291,12 +269,6 @@ void oprofile_add_ibs_sample(struct pt_regs * const regs, | |||
291 | 269 | ||
292 | cpu_buf->sample_received++; | 270 | cpu_buf->sample_received++; |
293 | 271 | ||
294 | if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { | ||
295 | /* we can't backtrace since we lost the source of this event */ | ||
296 | cpu_buf->sample_lost_overflow++; | ||
297 | return; | ||
298 | } | ||
299 | |||
300 | /* notice a switch from user->kernel or vice versa */ | 272 | /* notice a switch from user->kernel or vice versa */ |
301 | if (cpu_buf->last_is_kernel != is_kernel) { | 273 | if (cpu_buf->last_is_kernel != is_kernel) { |
302 | cpu_buf->last_is_kernel = is_kernel; | 274 | cpu_buf->last_is_kernel = is_kernel; |
@@ -342,12 +314,6 @@ void oprofile_add_trace(unsigned long pc) | |||
342 | if (!cpu_buf->tracing) | 314 | if (!cpu_buf->tracing) |
343 | return; | 315 | return; |
344 | 316 | ||
345 | if (nr_available_slots(cpu_buf) < 1) { | ||
346 | cpu_buf->tracing = 0; | ||
347 | cpu_buf->sample_lost_overflow++; | ||
348 | return; | ||
349 | } | ||
350 | |||
351 | /* | 317 | /* |
352 | * broken frame can give an eip with the same value as an | 318 | * broken frame can give an eip with the same value as an |
353 | * escape code, abort the trace if we get it | 319 | * escape code, abort the trace if we get it |