diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-28 15:52:24 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-28 15:52:24 -0500 |
| commit | ee89f81252179dcbf6cd65bd48299f5e52292d88 (patch) | |
| tree | 805846cd12821f84cfe619d44c9e3e36e0b0f9e6 /include | |
| parent | 21f3b24da9328415792efc780f50b9f434c12465 (diff) | |
| parent | de33127d8d3f1d570aad8c2223cd81b206636bc1 (diff) | |
Merge branch 'for-3.9/core' of git://git.kernel.dk/linux-block
Pull block IO core bits from Jens Axboe:
"Below are the core block IO bits for 3.9. It was delayed a few days
since my workstation kept crashing every 2-8h after pulling it into
current -git, but turns out it is a bug in the new pstate code (divide
by zero, will report separately). In any case, it contains:
- The big cfq/blkcg update from Tejun and and Vivek.
- Additional block and writeback tracepoints from Tejun.
- Improvement of the should sort (based on queues) logic in the plug
flushing.
- _io() variants of the wait_for_completion() interface, using
io_schedule() instead of schedule() to contribute to io wait
properly.
- Various little fixes.
You'll get two trivial merge conflicts, which should be easy enough to
fix up"
Fix up the trivial conflicts due to hlist traversal cleanups (commit
b67bfe0d42ca: "hlist: drop the node parameter from iterators").
* 'for-3.9/core' of git://git.kernel.dk/linux-block: (39 commits)
block: remove redundant check to bd_openers()
block: use i_size_write() in bd_set_size()
cfq: fix lock imbalance with failed allocations
drivers/block/swim3.c: fix null pointer dereference
block: don't select PERCPU_RWSEM
block: account iowait time when waiting for completion of IO request
sched: add wait_for_completion_io[_timeout]
writeback: add more tracepoints
block: add block_{touch|dirty}_buffer tracepoint
buffer: make touch_buffer() an exported function
block: add @req to bio_{front|back}_merge tracepoints
block: add missing block_bio_complete() tracepoint
block: Remove should_sort judgement when flush blk_plug
block,elevator: use new hashtable implementation
cfq-iosched: add hierarchical cfq_group statistics
cfq-iosched: collect stats from dead cfqgs
cfq-iosched: separate out cfqg_stats_reset() from cfq_pd_reset_stats()
blkcg: make blkcg_print_blkgs() grab q locks instead of blkcg lock
block: RCU free request_queue
blkcg: implement blkg_[rw]stat_recursive_sum() and blkg_[rw]stat_merge()
...
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/blkdev.h | 3 | ||||
| -rw-r--r-- | include/linux/blktrace_api.h | 1 | ||||
| -rw-r--r-- | include/linux/buffer_head.h | 2 | ||||
| -rw-r--r-- | include/linux/completion.h | 3 | ||||
| -rw-r--r-- | include/linux/elevator.h | 5 | ||||
| -rw-r--r-- | include/trace/events/block.h | 104 | ||||
| -rw-r--r-- | include/trace/events/writeback.h | 116 |
7 files changed, 216 insertions, 18 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f94bc83011ed..78feda9bbae2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/gfp.h> | 19 | #include <linux/gfp.h> |
| 20 | #include <linux/bsg.h> | 20 | #include <linux/bsg.h> |
| 21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
| 22 | #include <linux/rcupdate.h> | ||
| 22 | 23 | ||
| 23 | #include <asm/scatterlist.h> | 24 | #include <asm/scatterlist.h> |
| 24 | 25 | ||
| @@ -437,6 +438,7 @@ struct request_queue { | |||
| 437 | /* Throttle data */ | 438 | /* Throttle data */ |
| 438 | struct throtl_data *td; | 439 | struct throtl_data *td; |
| 439 | #endif | 440 | #endif |
| 441 | struct rcu_head rcu_head; | ||
| 440 | }; | 442 | }; |
| 441 | 443 | ||
| 442 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 444 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
| @@ -974,7 +976,6 @@ struct blk_plug { | |||
| 974 | unsigned long magic; /* detect uninitialized use-cases */ | 976 | unsigned long magic; /* detect uninitialized use-cases */ |
| 975 | struct list_head list; /* requests */ | 977 | struct list_head list; /* requests */ |
| 976 | struct list_head cb_list; /* md requires an unplug callback */ | 978 | struct list_head cb_list; /* md requires an unplug callback */ |
| 977 | unsigned int should_sort; /* list to be sorted before flushing? */ | ||
| 978 | }; | 979 | }; |
| 979 | #define BLK_MAX_REQUEST_COUNT 16 | 980 | #define BLK_MAX_REQUEST_COUNT 16 |
| 980 | 981 | ||
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 7c2e030e72f1..0ea61e07a91c 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | struct blk_trace { | 13 | struct blk_trace { |
| 14 | int trace_state; | 14 | int trace_state; |
| 15 | bool rq_based; | ||
| 15 | struct rchan *rchan; | 16 | struct rchan *rchan; |
| 16 | unsigned long __percpu *sequence; | 17 | unsigned long __percpu *sequence; |
| 17 | unsigned char __percpu *msg_data; | 18 | unsigned char __percpu *msg_data; |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 458f497738a4..5afc4f94d110 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
| @@ -126,7 +126,6 @@ BUFFER_FNS(Write_EIO, write_io_error) | |||
| 126 | BUFFER_FNS(Unwritten, unwritten) | 126 | BUFFER_FNS(Unwritten, unwritten) |
| 127 | 127 | ||
| 128 | #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) | 128 | #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) |
| 129 | #define touch_buffer(bh) mark_page_accessed(bh->b_page) | ||
| 130 | 129 | ||
| 131 | /* If we *know* page->private refers to buffer_heads */ | 130 | /* If we *know* page->private refers to buffer_heads */ |
| 132 | #define page_buffers(page) \ | 131 | #define page_buffers(page) \ |
| @@ -142,6 +141,7 @@ BUFFER_FNS(Unwritten, unwritten) | |||
| 142 | 141 | ||
| 143 | void mark_buffer_dirty(struct buffer_head *bh); | 142 | void mark_buffer_dirty(struct buffer_head *bh); |
| 144 | void init_buffer(struct buffer_head *, bh_end_io_t *, void *); | 143 | void init_buffer(struct buffer_head *, bh_end_io_t *, void *); |
| 144 | void touch_buffer(struct buffer_head *bh); | ||
| 145 | void set_bh_page(struct buffer_head *bh, | 145 | void set_bh_page(struct buffer_head *bh, |
| 146 | struct page *page, unsigned long offset); | 146 | struct page *page, unsigned long offset); |
| 147 | int try_to_free_buffers(struct page *); | 147 | int try_to_free_buffers(struct page *); |
diff --git a/include/linux/completion.h b/include/linux/completion.h index 51494e6b5548..33f0280fd533 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h | |||
| @@ -77,10 +77,13 @@ static inline void init_completion(struct completion *x) | |||
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | extern void wait_for_completion(struct completion *); | 79 | extern void wait_for_completion(struct completion *); |
| 80 | extern void wait_for_completion_io(struct completion *); | ||
| 80 | extern int wait_for_completion_interruptible(struct completion *x); | 81 | extern int wait_for_completion_interruptible(struct completion *x); |
| 81 | extern int wait_for_completion_killable(struct completion *x); | 82 | extern int wait_for_completion_killable(struct completion *x); |
| 82 | extern unsigned long wait_for_completion_timeout(struct completion *x, | 83 | extern unsigned long wait_for_completion_timeout(struct completion *x, |
| 83 | unsigned long timeout); | 84 | unsigned long timeout); |
| 85 | extern unsigned long wait_for_completion_io_timeout(struct completion *x, | ||
| 86 | unsigned long timeout); | ||
| 84 | extern long wait_for_completion_interruptible_timeout( | 87 | extern long wait_for_completion_interruptible_timeout( |
| 85 | struct completion *x, unsigned long timeout); | 88 | struct completion *x, unsigned long timeout); |
| 86 | extern long wait_for_completion_killable_timeout( | 89 | extern long wait_for_completion_killable_timeout( |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 186620631750..acd0312d46fb 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define _LINUX_ELEVATOR_H | 2 | #define _LINUX_ELEVATOR_H |
| 3 | 3 | ||
| 4 | #include <linux/percpu.h> | 4 | #include <linux/percpu.h> |
| 5 | #include <linux/hashtable.h> | ||
| 5 | 6 | ||
| 6 | #ifdef CONFIG_BLOCK | 7 | #ifdef CONFIG_BLOCK |
| 7 | 8 | ||
| @@ -96,6 +97,8 @@ struct elevator_type | |||
| 96 | struct list_head list; | 97 | struct list_head list; |
| 97 | }; | 98 | }; |
| 98 | 99 | ||
| 100 | #define ELV_HASH_BITS 6 | ||
| 101 | |||
| 99 | /* | 102 | /* |
| 100 | * each queue has an elevator_queue associated with it | 103 | * each queue has an elevator_queue associated with it |
| 101 | */ | 104 | */ |
| @@ -105,8 +108,8 @@ struct elevator_queue | |||
| 105 | void *elevator_data; | 108 | void *elevator_data; |
| 106 | struct kobject kobj; | 109 | struct kobject kobj; |
| 107 | struct mutex sysfs_lock; | 110 | struct mutex sysfs_lock; |
| 108 | struct hlist_head *hash; | ||
| 109 | unsigned int registered:1; | 111 | unsigned int registered:1; |
| 112 | DECLARE_HASHTABLE(hash, ELV_HASH_BITS); | ||
| 110 | }; | 113 | }; |
| 111 | 114 | ||
| 112 | /* | 115 | /* |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 05c5e61f0a7c..9961726523d0 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
| @@ -6,10 +6,61 @@ | |||
| 6 | 6 | ||
| 7 | #include <linux/blktrace_api.h> | 7 | #include <linux/blktrace_api.h> |
| 8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/buffer_head.h> | ||
| 9 | #include <linux/tracepoint.h> | 10 | #include <linux/tracepoint.h> |
| 10 | 11 | ||
| 11 | #define RWBS_LEN 8 | 12 | #define RWBS_LEN 8 |
| 12 | 13 | ||
| 14 | DECLARE_EVENT_CLASS(block_buffer, | ||
| 15 | |||
| 16 | TP_PROTO(struct buffer_head *bh), | ||
| 17 | |||
| 18 | TP_ARGS(bh), | ||
| 19 | |||
| 20 | TP_STRUCT__entry ( | ||
| 21 | __field( dev_t, dev ) | ||
| 22 | __field( sector_t, sector ) | ||
| 23 | __field( size_t, size ) | ||
| 24 | ), | ||
| 25 | |||
| 26 | TP_fast_assign( | ||
| 27 | __entry->dev = bh->b_bdev->bd_dev; | ||
| 28 | __entry->sector = bh->b_blocknr; | ||
| 29 | __entry->size = bh->b_size; | ||
| 30 | ), | ||
| 31 | |||
| 32 | TP_printk("%d,%d sector=%llu size=%zu", | ||
| 33 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
| 34 | (unsigned long long)__entry->sector, __entry->size | ||
| 35 | ) | ||
| 36 | ); | ||
| 37 | |||
| 38 | /** | ||
| 39 | * block_touch_buffer - mark a buffer accessed | ||
| 40 | * @bh: buffer_head being touched | ||
| 41 | * | ||
| 42 | * Called from touch_buffer(). | ||
| 43 | */ | ||
| 44 | DEFINE_EVENT(block_buffer, block_touch_buffer, | ||
| 45 | |||
| 46 | TP_PROTO(struct buffer_head *bh), | ||
| 47 | |||
| 48 | TP_ARGS(bh) | ||
| 49 | ); | ||
| 50 | |||
| 51 | /** | ||
| 52 | * block_dirty_buffer - mark a buffer dirty | ||
| 53 | * @bh: buffer_head being dirtied | ||
| 54 | * | ||
| 55 | * Called from mark_buffer_dirty(). | ||
| 56 | */ | ||
| 57 | DEFINE_EVENT(block_buffer, block_dirty_buffer, | ||
| 58 | |||
| 59 | TP_PROTO(struct buffer_head *bh), | ||
| 60 | |||
| 61 | TP_ARGS(bh) | ||
| 62 | ); | ||
| 63 | |||
| 13 | DECLARE_EVENT_CLASS(block_rq_with_error, | 64 | DECLARE_EVENT_CLASS(block_rq_with_error, |
| 14 | 65 | ||
| 15 | TP_PROTO(struct request_queue *q, struct request *rq), | 66 | TP_PROTO(struct request_queue *q, struct request *rq), |
| @@ -206,7 +257,6 @@ TRACE_EVENT(block_bio_bounce, | |||
| 206 | 257 | ||
| 207 | /** | 258 | /** |
| 208 | * block_bio_complete - completed all work on the block operation | 259 | * block_bio_complete - completed all work on the block operation |
| 209 | * @q: queue holding the block operation | ||
| 210 | * @bio: block operation completed | 260 | * @bio: block operation completed |
| 211 | * @error: io error value | 261 | * @error: io error value |
| 212 | * | 262 | * |
| @@ -215,9 +265,9 @@ TRACE_EVENT(block_bio_bounce, | |||
| 215 | */ | 265 | */ |
| 216 | TRACE_EVENT(block_bio_complete, | 266 | TRACE_EVENT(block_bio_complete, |
| 217 | 267 | ||
| 218 | TP_PROTO(struct request_queue *q, struct bio *bio, int error), | 268 | TP_PROTO(struct bio *bio, int error), |
| 219 | 269 | ||
| 220 | TP_ARGS(q, bio, error), | 270 | TP_ARGS(bio, error), |
| 221 | 271 | ||
| 222 | TP_STRUCT__entry( | 272 | TP_STRUCT__entry( |
| 223 | __field( dev_t, dev ) | 273 | __field( dev_t, dev ) |
| @@ -228,7 +278,8 @@ TRACE_EVENT(block_bio_complete, | |||
| 228 | ), | 278 | ), |
| 229 | 279 | ||
| 230 | TP_fast_assign( | 280 | TP_fast_assign( |
| 231 | __entry->dev = bio->bi_bdev->bd_dev; | 281 | __entry->dev = bio->bi_bdev ? |
| 282 | bio->bi_bdev->bd_dev : 0; | ||
| 232 | __entry->sector = bio->bi_sector; | 283 | __entry->sector = bio->bi_sector; |
| 233 | __entry->nr_sector = bio->bi_size >> 9; | 284 | __entry->nr_sector = bio->bi_size >> 9; |
| 234 | __entry->error = error; | 285 | __entry->error = error; |
| @@ -241,11 +292,11 @@ TRACE_EVENT(block_bio_complete, | |||
| 241 | __entry->nr_sector, __entry->error) | 292 | __entry->nr_sector, __entry->error) |
| 242 | ); | 293 | ); |
| 243 | 294 | ||
| 244 | DECLARE_EVENT_CLASS(block_bio, | 295 | DECLARE_EVENT_CLASS(block_bio_merge, |
| 245 | 296 | ||
| 246 | TP_PROTO(struct request_queue *q, struct bio *bio), | 297 | TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), |
| 247 | 298 | ||
| 248 | TP_ARGS(q, bio), | 299 | TP_ARGS(q, rq, bio), |
| 249 | 300 | ||
| 250 | TP_STRUCT__entry( | 301 | TP_STRUCT__entry( |
| 251 | __field( dev_t, dev ) | 302 | __field( dev_t, dev ) |
| @@ -272,31 +323,33 @@ DECLARE_EVENT_CLASS(block_bio, | |||
| 272 | /** | 323 | /** |
| 273 | * block_bio_backmerge - merging block operation to the end of an existing operation | 324 | * block_bio_backmerge - merging block operation to the end of an existing operation |
| 274 | * @q: queue holding operation | 325 | * @q: queue holding operation |
| 326 | * @rq: request bio is being merged into | ||
| 275 | * @bio: new block operation to merge | 327 | * @bio: new block operation to merge |
| 276 | * | 328 | * |
| 277 | * Merging block request @bio to the end of an existing block request | 329 | * Merging block request @bio to the end of an existing block request |
| 278 | * in queue @q. | 330 | * in queue @q. |
| 279 | */ | 331 | */ |
| 280 | DEFINE_EVENT(block_bio, block_bio_backmerge, | 332 | DEFINE_EVENT(block_bio_merge, block_bio_backmerge, |
| 281 | 333 | ||
| 282 | TP_PROTO(struct request_queue *q, struct bio *bio), | 334 | TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), |
| 283 | 335 | ||
| 284 | TP_ARGS(q, bio) | 336 | TP_ARGS(q, rq, bio) |
| 285 | ); | 337 | ); |
| 286 | 338 | ||
| 287 | /** | 339 | /** |
| 288 | * block_bio_frontmerge - merging block operation to the beginning of an existing operation | 340 | * block_bio_frontmerge - merging block operation to the beginning of an existing operation |
| 289 | * @q: queue holding operation | 341 | * @q: queue holding operation |
| 342 | * @rq: request bio is being merged into | ||
| 290 | * @bio: new block operation to merge | 343 | * @bio: new block operation to merge |
| 291 | * | 344 | * |
| 292 | * Merging block IO operation @bio to the beginning of an existing block | 345 | * Merging block IO operation @bio to the beginning of an existing block |
| 293 | * operation in queue @q. | 346 | * operation in queue @q. |
| 294 | */ | 347 | */ |
| 295 | DEFINE_EVENT(block_bio, block_bio_frontmerge, | 348 | DEFINE_EVENT(block_bio_merge, block_bio_frontmerge, |
| 296 | 349 | ||
| 297 | TP_PROTO(struct request_queue *q, struct bio *bio), | 350 | TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), |
| 298 | 351 | ||
| 299 | TP_ARGS(q, bio) | 352 | TP_ARGS(q, rq, bio) |
| 300 | ); | 353 | ); |
| 301 | 354 | ||
| 302 | /** | 355 | /** |
| @@ -306,11 +359,32 @@ DEFINE_EVENT(block_bio, block_bio_frontmerge, | |||
| 306 | * | 359 | * |
| 307 | * About to place the block IO operation @bio into queue @q. | 360 | * About to place the block IO operation @bio into queue @q. |
| 308 | */ | 361 | */ |
| 309 | DEFINE_EVENT(block_bio, block_bio_queue, | 362 | TRACE_EVENT(block_bio_queue, |
| 310 | 363 | ||
| 311 | TP_PROTO(struct request_queue *q, struct bio *bio), | 364 | TP_PROTO(struct request_queue *q, struct bio *bio), |
| 312 | 365 | ||
| 313 | TP_ARGS(q, bio) | 366 | TP_ARGS(q, bio), |
| 367 | |||
| 368 | TP_STRUCT__entry( | ||
| 369 | __field( dev_t, dev ) | ||
| 370 | __field( sector_t, sector ) | ||
| 371 | __field( unsigned int, nr_sector ) | ||
| 372 | __array( char, rwbs, RWBS_LEN ) | ||
| 373 | __array( char, comm, TASK_COMM_LEN ) | ||
| 374 | ), | ||
| 375 | |||
| 376 | TP_fast_assign( | ||
| 377 | __entry->dev = bio->bi_bdev->bd_dev; | ||
| 378 | __entry->sector = bio->bi_sector; | ||
| 379 | __entry->nr_sector = bio->bi_size >> 9; | ||
| 380 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
| 381 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
| 382 | ), | ||
| 383 | |||
| 384 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
| 385 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
| 386 | (unsigned long long)__entry->sector, | ||
| 387 | __entry->nr_sector, __entry->comm) | ||
| 314 | ); | 388 | ); |
| 315 | 389 | ||
| 316 | DECLARE_EVENT_CLASS(block_get_rq, | 390 | DECLARE_EVENT_CLASS(block_get_rq, |
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index b453d92c2253..6a16fd2e70ed 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h | |||
| @@ -32,6 +32,115 @@ | |||
| 32 | 32 | ||
| 33 | struct wb_writeback_work; | 33 | struct wb_writeback_work; |
| 34 | 34 | ||
| 35 | TRACE_EVENT(writeback_dirty_page, | ||
| 36 | |||
| 37 | TP_PROTO(struct page *page, struct address_space *mapping), | ||
| 38 | |||
| 39 | TP_ARGS(page, mapping), | ||
| 40 | |||
| 41 | TP_STRUCT__entry ( | ||
| 42 | __array(char, name, 32) | ||
| 43 | __field(unsigned long, ino) | ||
| 44 | __field(pgoff_t, index) | ||
| 45 | ), | ||
| 46 | |||
| 47 | TP_fast_assign( | ||
| 48 | strncpy(__entry->name, | ||
| 49 | mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32); | ||
| 50 | __entry->ino = mapping ? mapping->host->i_ino : 0; | ||
| 51 | __entry->index = page->index; | ||
| 52 | ), | ||
| 53 | |||
| 54 | TP_printk("bdi %s: ino=%lu index=%lu", | ||
| 55 | __entry->name, | ||
| 56 | __entry->ino, | ||
| 57 | __entry->index | ||
| 58 | ) | ||
| 59 | ); | ||
| 60 | |||
| 61 | DECLARE_EVENT_CLASS(writeback_dirty_inode_template, | ||
| 62 | |||
| 63 | TP_PROTO(struct inode *inode, int flags), | ||
| 64 | |||
| 65 | TP_ARGS(inode, flags), | ||
| 66 | |||
| 67 | TP_STRUCT__entry ( | ||
| 68 | __array(char, name, 32) | ||
| 69 | __field(unsigned long, ino) | ||
| 70 | __field(unsigned long, flags) | ||
| 71 | ), | ||
| 72 | |||
| 73 | TP_fast_assign( | ||
| 74 | struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; | ||
| 75 | |||
| 76 | /* may be called for files on pseudo FSes w/ unregistered bdi */ | ||
| 77 | strncpy(__entry->name, | ||
| 78 | bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); | ||
| 79 | __entry->ino = inode->i_ino; | ||
| 80 | __entry->flags = flags; | ||
| 81 | ), | ||
| 82 | |||
| 83 | TP_printk("bdi %s: ino=%lu flags=%s", | ||
| 84 | __entry->name, | ||
| 85 | __entry->ino, | ||
| 86 | show_inode_state(__entry->flags) | ||
| 87 | ) | ||
| 88 | ); | ||
| 89 | |||
| 90 | DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start, | ||
| 91 | |||
| 92 | TP_PROTO(struct inode *inode, int flags), | ||
| 93 | |||
| 94 | TP_ARGS(inode, flags) | ||
| 95 | ); | ||
| 96 | |||
| 97 | DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode, | ||
| 98 | |||
| 99 | TP_PROTO(struct inode *inode, int flags), | ||
| 100 | |||
| 101 | TP_ARGS(inode, flags) | ||
| 102 | ); | ||
| 103 | |||
| 104 | DECLARE_EVENT_CLASS(writeback_write_inode_template, | ||
| 105 | |||
| 106 | TP_PROTO(struct inode *inode, struct writeback_control *wbc), | ||
| 107 | |||
| 108 | TP_ARGS(inode, wbc), | ||
| 109 | |||
| 110 | TP_STRUCT__entry ( | ||
| 111 | __array(char, name, 32) | ||
| 112 | __field(unsigned long, ino) | ||
| 113 | __field(int, sync_mode) | ||
| 114 | ), | ||
| 115 | |||
| 116 | TP_fast_assign( | ||
| 117 | strncpy(__entry->name, | ||
| 118 | dev_name(inode->i_mapping->backing_dev_info->dev), 32); | ||
| 119 | __entry->ino = inode->i_ino; | ||
| 120 | __entry->sync_mode = wbc->sync_mode; | ||
| 121 | ), | ||
| 122 | |||
| 123 | TP_printk("bdi %s: ino=%lu sync_mode=%d", | ||
| 124 | __entry->name, | ||
| 125 | __entry->ino, | ||
| 126 | __entry->sync_mode | ||
| 127 | ) | ||
| 128 | ); | ||
| 129 | |||
| 130 | DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start, | ||
| 131 | |||
| 132 | TP_PROTO(struct inode *inode, struct writeback_control *wbc), | ||
| 133 | |||
| 134 | TP_ARGS(inode, wbc) | ||
| 135 | ); | ||
| 136 | |||
| 137 | DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode, | ||
| 138 | |||
| 139 | TP_PROTO(struct inode *inode, struct writeback_control *wbc), | ||
| 140 | |||
| 141 | TP_ARGS(inode, wbc) | ||
| 142 | ); | ||
| 143 | |||
| 35 | DECLARE_EVENT_CLASS(writeback_work_class, | 144 | DECLARE_EVENT_CLASS(writeback_work_class, |
| 36 | TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), | 145 | TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), |
| 37 | TP_ARGS(bdi, work), | 146 | TP_ARGS(bdi, work), |
| @@ -479,6 +588,13 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, | |||
| 479 | ) | 588 | ) |
| 480 | ); | 589 | ); |
| 481 | 590 | ||
| 591 | DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start, | ||
| 592 | TP_PROTO(struct inode *inode, | ||
| 593 | struct writeback_control *wbc, | ||
| 594 | unsigned long nr_to_write), | ||
| 595 | TP_ARGS(inode, wbc, nr_to_write) | ||
| 596 | ); | ||
| 597 | |||
| 482 | DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, | 598 | DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, |
| 483 | TP_PROTO(struct inode *inode, | 599 | TP_PROTO(struct inode *inode, |
| 484 | struct writeback_control *wbc, | 600 | struct writeback_control *wbc, |
