aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/events
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-15 19:33:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-15 19:33:41 -0500
commitf412f2c60b480fa5140a4b4cb321cd48c64e1812 (patch)
treeaafd5a5922b43daca4abdfa9bb723fc1f334108d /include/trace/events
parentcd1177f25069cb494680eedd718e7c6d8fd85d10 (diff)
parent1cf7e9c68fe84248174e998922b39e508375e7c1 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull second round of block driver updates from Jens Axboe: "As mentioned in the original pull request, the bcache bits were pulled because of their dependency on the immutable bio vecs. Kent re-did this part and resubmitted it, so here's the 2nd round of (mostly) driver updates for 3.13. It contains: - The bcache work from Kent. - Conversion of virtio-blk to blk-mq. This removes the bio and request path, and substitutes with the blk-mq path instead. The end result almost 200 deleted lines. Patch is acked by Asias and Christoph, who both did a bunch of testing. - A removal of bootmem.h include from Grygorii Strashko, part of a larger series of his killing the dependency on that header file. - Removal of __cpuinit from blk-mq from Paul Gortmaker" * 'for-linus' of git://git.kernel.dk/linux-block: (56 commits) virtio_blk: blk-mq support blk-mq: remove newly added instances of __cpuinit bcache: defensively handle format strings bcache: Bypass torture test bcache: Delete some slower inline asm bcache: Use ida for bcache block dev minor bcache: Fix sysfs splat on shutdown with flash only devs bcache: Better full stripe scanning bcache: Have btree_split() insert into parent directly bcache: Move spinlock into struct time_stats bcache: Kill sequential_merge option bcache: Kill bch_next_recurse_key() bcache: Avoid deadlocking in garbage collection bcache: Incremental gc bcache: Add make_btree_freeing_key() bcache: Add btree_node_write_sync() bcache: PRECEDING_KEY() bcache: bch_(btree|extent)_ptr_invalid() bcache: Don't bother with bucket refcount for btree node allocations bcache: Debug code improvements ...
Diffstat (limited to 'include/trace/events')
-rw-r--r--include/trace/events/bcache.h47
1 files changed, 37 insertions, 10 deletions
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 5ebda976ea93..e2b9576d00e2 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -6,11 +6,9 @@
6 6
7#include <linux/tracepoint.h> 7#include <linux/tracepoint.h>
8 8
9struct search;
10
11DECLARE_EVENT_CLASS(bcache_request, 9DECLARE_EVENT_CLASS(bcache_request,
12 TP_PROTO(struct search *s, struct bio *bio), 10 TP_PROTO(struct bcache_device *d, struct bio *bio),
13 TP_ARGS(s, bio), 11 TP_ARGS(d, bio),
14 12
15 TP_STRUCT__entry( 13 TP_STRUCT__entry(
16 __field(dev_t, dev ) 14 __field(dev_t, dev )
@@ -24,8 +22,8 @@ DECLARE_EVENT_CLASS(bcache_request,
24 22
25 TP_fast_assign( 23 TP_fast_assign(
26 __entry->dev = bio->bi_bdev->bd_dev; 24 __entry->dev = bio->bi_bdev->bd_dev;
27 __entry->orig_major = s->d->disk->major; 25 __entry->orig_major = d->disk->major;
28 __entry->orig_minor = s->d->disk->first_minor; 26 __entry->orig_minor = d->disk->first_minor;
29 __entry->sector = bio->bi_sector; 27 __entry->sector = bio->bi_sector;
30 __entry->orig_sector = bio->bi_sector - 16; 28 __entry->orig_sector = bio->bi_sector - 16;
31 __entry->nr_sector = bio->bi_size >> 9; 29 __entry->nr_sector = bio->bi_size >> 9;
@@ -79,13 +77,13 @@ DECLARE_EVENT_CLASS(btree_node,
79/* request.c */ 77/* request.c */
80 78
81DEFINE_EVENT(bcache_request, bcache_request_start, 79DEFINE_EVENT(bcache_request, bcache_request_start,
82 TP_PROTO(struct search *s, struct bio *bio), 80 TP_PROTO(struct bcache_device *d, struct bio *bio),
83 TP_ARGS(s, bio) 81 TP_ARGS(d, bio)
84); 82);
85 83
86DEFINE_EVENT(bcache_request, bcache_request_end, 84DEFINE_EVENT(bcache_request, bcache_request_end,
87 TP_PROTO(struct search *s, struct bio *bio), 85 TP_PROTO(struct bcache_device *d, struct bio *bio),
88 TP_ARGS(s, bio) 86 TP_ARGS(d, bio)
89); 87);
90 88
91DECLARE_EVENT_CLASS(bcache_bio, 89DECLARE_EVENT_CLASS(bcache_bio,
@@ -370,6 +368,35 @@ DEFINE_EVENT(btree_node, bcache_btree_set_root,
370 TP_ARGS(b) 368 TP_ARGS(b)
371); 369);
372 370
371TRACE_EVENT(bcache_keyscan,
372 TP_PROTO(unsigned nr_found,
373 unsigned start_inode, uint64_t start_offset,
374 unsigned end_inode, uint64_t end_offset),
375 TP_ARGS(nr_found,
376 start_inode, start_offset,
377 end_inode, end_offset),
378
379 TP_STRUCT__entry(
380 __field(__u32, nr_found )
381 __field(__u32, start_inode )
382 __field(__u64, start_offset )
383 __field(__u32, end_inode )
384 __field(__u64, end_offset )
385 ),
386
387 TP_fast_assign(
388 __entry->nr_found = nr_found;
389 __entry->start_inode = start_inode;
390 __entry->start_offset = start_offset;
391 __entry->end_inode = end_inode;
392 __entry->end_offset = end_offset;
393 ),
394
395 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
396 __entry->start_inode, __entry->start_offset,
397 __entry->end_inode, __entry->end_offset)
398);
399
373/* Allocator */ 400/* Allocator */
374 401
375TRACE_EVENT(bcache_alloc_invalidate, 402TRACE_EVENT(bcache_alloc_invalidate,