diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-11 07:35:54 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-01-11 07:35:54 -0500 |
commit | 2997c8c4a0b179e8b834a7f30ba4323f2c60ccf4 (patch) | |
tree | 319fa1c24c380544233890d6ff480a181bf80e96 /block/blktrace.c | |
parent | a24eab1ed506f3e0bcbcd3f619558935549d4ace (diff) |
block: fix blktrace timestamps
David Dillow reported broken blktrace timestamps. The reason
is cpu_clock() which is not a global time source.
Fix bkltrace timestamps by using ktime_get() like the networking
code does for packet timestamps. This also removes a whole lot
of complexity from bkltrace.c and shrinks the code by 500 bytes:
text data bss dec hex filename
2888 124 44 3056 bf0 blktrace.o.before
2390 116 44 2550 9f6 blktrace.o.after
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blktrace.c')
-rw-r--r-- | block/blktrace.c | 69 |
1 files changed, 2 insertions, 67 deletions
diff --git a/block/blktrace.c b/block/blktrace.c index 498a0a54a6aa..7471621d4ded 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/time.h> | 25 | #include <linux/time.h> |
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | 27 | ||
28 | static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, }; | ||
29 | static unsigned int blktrace_seq __read_mostly = 1; | 28 | static unsigned int blktrace_seq __read_mostly = 1; |
30 | 29 | ||
31 | /* | 30 | /* |
@@ -41,7 +40,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, | |||
41 | const int cpu = smp_processor_id(); | 40 | const int cpu = smp_processor_id(); |
42 | 41 | ||
43 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | 42 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; |
44 | t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu); | 43 | t->time = ktime_to_ns(ktime_get()); |
45 | t->device = bt->dev; | 44 | t->device = bt->dev; |
46 | t->action = action; | 45 | t->action = action; |
47 | t->pid = pid; | 46 | t->pid = pid; |
@@ -159,7 +158,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
159 | 158 | ||
160 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | 159 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; |
161 | t->sequence = ++(*sequence); | 160 | t->sequence = ++(*sequence); |
162 | t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu); | 161 | t->time = ktime_to_ns(ktime_get()); |
163 | t->sector = sector; | 162 | t->sector = sector; |
164 | t->bytes = bytes; | 163 | t->bytes = bytes; |
165 | t->action = what; | 164 | t->action = what; |
@@ -506,73 +505,9 @@ void blk_trace_shutdown(struct request_queue *q) | |||
506 | } | 505 | } |
507 | } | 506 | } |
508 | 507 | ||
509 | /* | ||
510 | * Average offset over two calls to cpu_clock() with a gettimeofday() | ||
511 | * in the middle | ||
512 | */ | ||
513 | static void blk_check_time(unsigned long long *t, int this_cpu) | ||
514 | { | ||
515 | unsigned long long a, b; | ||
516 | struct timeval tv; | ||
517 | |||
518 | a = cpu_clock(this_cpu); | ||
519 | do_gettimeofday(&tv); | ||
520 | b = cpu_clock(this_cpu); | ||
521 | |||
522 | *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000; | ||
523 | *t -= (a + b) / 2; | ||
524 | } | ||
525 | |||
526 | /* | ||
527 | * calibrate our inter-CPU timings | ||
528 | */ | ||
529 | static void blk_trace_check_cpu_time(void *data) | ||
530 | { | ||
531 | unsigned long long *t; | ||
532 | int this_cpu = get_cpu(); | ||
533 | |||
534 | t = &per_cpu(blk_trace_cpu_offset, this_cpu); | ||
535 | |||
536 | /* | ||
537 | * Just call it twice, hopefully the second call will be cache hot | ||
538 | * and a little more precise | ||
539 | */ | ||
540 | blk_check_time(t, this_cpu); | ||
541 | blk_check_time(t, this_cpu); | ||
542 | |||
543 | put_cpu(); | ||
544 | } | ||
545 | |||
546 | static void blk_trace_set_ht_offsets(void) | ||
547 | { | ||
548 | #if defined(CONFIG_SCHED_SMT) | ||
549 | int cpu, i; | ||
550 | |||
551 | /* | ||
552 | * now make sure HT siblings have the same time offset | ||
553 | */ | ||
554 | preempt_disable(); | ||
555 | for_each_online_cpu(cpu) { | ||
556 | unsigned long long *cpu_off, *sibling_off; | ||
557 | |||
558 | for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) { | ||
559 | if (i == cpu) | ||
560 | continue; | ||
561 | |||
562 | cpu_off = &per_cpu(blk_trace_cpu_offset, cpu); | ||
563 | sibling_off = &per_cpu(blk_trace_cpu_offset, i); | ||
564 | *sibling_off = *cpu_off; | ||
565 | } | ||
566 | } | ||
567 | preempt_enable(); | ||
568 | #endif | ||
569 | } | ||
570 | |||
571 | static __init int blk_trace_init(void) | 508 | static __init int blk_trace_init(void) |
572 | { | 509 | { |
573 | mutex_init(&blk_tree_mutex); | 510 | mutex_init(&blk_tree_mutex); |
574 | on_each_cpu(blk_trace_check_cpu_time, NULL, 1, 1); | ||
575 | blk_trace_set_ht_offsets(); | ||
576 | 511 | ||
577 | return 0; | 512 | return 0; |
578 | } | 513 | } |