diff options
| author | Robert Richter <robert.richter@amd.com> | 2008-12-08 20:13:25 -0500 |
|---|---|---|
| committer | Robert Richter <robert.richter@amd.com> | 2008-12-10 14:25:15 -0500 |
| commit | 211117ff09b7d81d91b7857651587128ed8b13d9 (patch) | |
| tree | 21145ea0cddbc3de803fd8621631e46b7c628057 | |
| parent | 1d7503b5dccf2b95babca050e4960e10d2633f2b (diff) | |
oprofile: fix lost sample counter
The number of lost samples could be greater than the number of
received samples. This patches fixes this. The implementation
introduces return values for add_sample() and add_code().
Signed-off-by: Robert Richter <robert.richter@amd.com>
| -rw-r--r-- | drivers/oprofile/cpu_buffer.c | 83 |
1 files changed, 53 insertions, 30 deletions
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 7f7fc958297a..61090969158f 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
| @@ -145,32 +145,31 @@ void end_cpu_work(void) | |||
| 145 | flush_scheduled_work(); | 145 | flush_scheduled_work(); |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | static inline void | 148 | static inline int |
| 149 | add_sample(struct oprofile_cpu_buffer *cpu_buf, | 149 | add_sample(struct oprofile_cpu_buffer *cpu_buf, |
| 150 | unsigned long pc, unsigned long event) | 150 | unsigned long pc, unsigned long event) |
| 151 | { | 151 | { |
| 152 | struct op_entry entry; | 152 | struct op_entry entry; |
| 153 | int ret; | ||
| 153 | 154 | ||
| 154 | if (cpu_buffer_write_entry(&entry)) | 155 | ret = cpu_buffer_write_entry(&entry); |
| 155 | goto Error; | 156 | if (ret) |
| 157 | return ret; | ||
| 156 | 158 | ||
| 157 | entry.sample->eip = pc; | 159 | entry.sample->eip = pc; |
| 158 | entry.sample->event = event; | 160 | entry.sample->event = event; |
| 159 | 161 | ||
| 160 | if (cpu_buffer_write_commit(&entry)) | 162 | ret = cpu_buffer_write_commit(&entry); |
| 161 | goto Error; | 163 | if (ret) |
| 164 | return ret; | ||
| 162 | 165 | ||
| 163 | return; | 166 | return 0; |
| 164 | |||
| 165 | Error: | ||
| 166 | cpu_buf->sample_lost_overflow++; | ||
| 167 | return; | ||
| 168 | } | 167 | } |
| 169 | 168 | ||
| 170 | static inline void | 169 | static inline int |
| 171 | add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) | 170 | add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) |
| 172 | { | 171 | { |
| 173 | add_sample(buffer, ESCAPE_CODE, value); | 172 | return add_sample(buffer, ESCAPE_CODE, value); |
| 174 | } | 173 | } |
| 175 | 174 | ||
| 176 | /* This must be safe from any context. It's safe writing here | 175 | /* This must be safe from any context. It's safe writing here |
| @@ -201,17 +200,25 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, | |||
| 201 | /* notice a switch from user->kernel or vice versa */ | 200 | /* notice a switch from user->kernel or vice versa */ |
| 202 | if (cpu_buf->last_is_kernel != is_kernel) { | 201 | if (cpu_buf->last_is_kernel != is_kernel) { |
| 203 | cpu_buf->last_is_kernel = is_kernel; | 202 | cpu_buf->last_is_kernel = is_kernel; |
| 204 | add_code(cpu_buf, is_kernel); | 203 | if (add_code(cpu_buf, is_kernel)) |
| 204 | goto fail; | ||
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | /* notice a task switch */ | 207 | /* notice a task switch */ |
| 208 | if (cpu_buf->last_task != task) { | 208 | if (cpu_buf->last_task != task) { |
| 209 | cpu_buf->last_task = task; | 209 | cpu_buf->last_task = task; |
| 210 | add_code(cpu_buf, (unsigned long)task); | 210 | if (add_code(cpu_buf, (unsigned long)task)) |
| 211 | goto fail; | ||
| 211 | } | 212 | } |
| 212 | 213 | ||
| 213 | add_sample(cpu_buf, pc, event); | 214 | if (add_sample(cpu_buf, pc, event)) |
| 215 | goto fail; | ||
| 216 | |||
| 214 | return 1; | 217 | return 1; |
| 218 | |||
| 219 | fail: | ||
| 220 | cpu_buf->sample_lost_overflow++; | ||
| 221 | return 0; | ||
| 215 | } | 222 | } |
| 216 | 223 | ||
| 217 | static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) | 224 | static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) |
| @@ -266,37 +273,49 @@ void oprofile_add_ibs_sample(struct pt_regs * const regs, | |||
| 266 | int is_kernel = !user_mode(regs); | 273 | int is_kernel = !user_mode(regs); |
| 267 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | 274 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
| 268 | struct task_struct *task; | 275 | struct task_struct *task; |
| 276 | int fail = 0; | ||
| 269 | 277 | ||
| 270 | cpu_buf->sample_received++; | 278 | cpu_buf->sample_received++; |
| 271 | 279 | ||
| 272 | /* notice a switch from user->kernel or vice versa */ | 280 | /* notice a switch from user->kernel or vice versa */ |
| 273 | if (cpu_buf->last_is_kernel != is_kernel) { | 281 | if (cpu_buf->last_is_kernel != is_kernel) { |
| 282 | if (add_code(cpu_buf, is_kernel)) | ||
| 283 | goto fail; | ||
| 274 | cpu_buf->last_is_kernel = is_kernel; | 284 | cpu_buf->last_is_kernel = is_kernel; |
| 275 | add_code(cpu_buf, is_kernel); | ||
| 276 | } | 285 | } |
| 277 | 286 | ||
| 278 | /* notice a task switch */ | 287 | /* notice a task switch */ |
| 279 | if (!is_kernel) { | 288 | if (!is_kernel) { |
| 280 | task = current; | 289 | task = current; |
| 281 | if (cpu_buf->last_task != task) { | 290 | if (cpu_buf->last_task != task) { |
| 291 | if (add_code(cpu_buf, (unsigned long)task)) | ||
| 292 | goto fail; | ||
| 282 | cpu_buf->last_task = task; | 293 | cpu_buf->last_task = task; |
| 283 | add_code(cpu_buf, (unsigned long)task); | ||
| 284 | } | 294 | } |
| 285 | } | 295 | } |
| 286 | 296 | ||
| 287 | add_code(cpu_buf, ibs_code); | 297 | fail = fail || add_code(cpu_buf, ibs_code); |
| 288 | add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); | 298 | fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); |
| 289 | add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); | 299 | fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); |
| 290 | add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); | 300 | fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); |
| 291 | 301 | ||
| 292 | if (ibs_code == IBS_OP_BEGIN) { | 302 | if (ibs_code == IBS_OP_BEGIN) { |
| 293 | add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); | 303 | fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); |
| 294 | add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); | 304 | fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); |
| 295 | add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); | 305 | fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); |
| 296 | } | 306 | } |
| 297 | 307 | ||
| 308 | if (fail) | ||
| 309 | goto fail; | ||
| 310 | |||
| 298 | if (backtrace_depth) | 311 | if (backtrace_depth) |
| 299 | oprofile_ops.backtrace(regs, backtrace_depth); | 312 | oprofile_ops.backtrace(regs, backtrace_depth); |
| 313 | |||
| 314 | return; | ||
| 315 | |||
| 316 | fail: | ||
| 317 | cpu_buf->sample_lost_overflow++; | ||
| 318 | return; | ||
| 300 | } | 319 | } |
| 301 | 320 | ||
| 302 | #endif | 321 | #endif |
| @@ -318,13 +337,17 @@ void oprofile_add_trace(unsigned long pc) | |||
| 318 | * broken frame can give an eip with the same value as an | 337 | * broken frame can give an eip with the same value as an |
| 319 | * escape code, abort the trace if we get it | 338 | * escape code, abort the trace if we get it |
| 320 | */ | 339 | */ |
| 321 | if (pc == ESCAPE_CODE) { | 340 | if (pc == ESCAPE_CODE) |
| 322 | cpu_buf->tracing = 0; | 341 | goto fail; |
| 323 | cpu_buf->backtrace_aborted++; | 342 | |
| 324 | return; | 343 | if (add_sample(cpu_buf, pc, 0)) |
| 325 | } | 344 | goto fail; |
| 326 | 345 | ||
| 327 | add_sample(cpu_buf, pc, 0); | 346 | return; |
| 347 | fail: | ||
| 348 | cpu_buf->tracing = 0; | ||
| 349 | cpu_buf->backtrace_aborted++; | ||
| 350 | return; | ||
| 328 | } | 351 | } |
| 329 | 352 | ||
| 330 | /* | 353 | /* |
