diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-01-23 04:54:44 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-01-30 06:34:38 -0500 |
commit | bc58ba9468d94d62c56ab9b47173583ec140b165 (patch) | |
tree | e31bee7a5001efdd40ed568151d5fdfa8b1a746a | |
parent | 7598909e3ee2a08726276d6415b69dadb52d0d76 (diff) |
block: add sysfs file for controlling io stats accounting
This allows us to turn off disk stat accounting completely, for the cases
where the 0.5-1% reduction in system time is important.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/blk-core.c | 90 | ||||
-rw-r--r-- | block/blk-sysfs.c | 28 | ||||
-rw-r--r-- | include/linux/blkdev.h | 6 |
3 files changed, 88 insertions, 36 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index ae75c047f45d..ca69f3d94100 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -64,11 +64,12 @@ static struct workqueue_struct *kblockd_workqueue; | |||
64 | 64 | ||
65 | static void drive_stat_acct(struct request *rq, int new_io) | 65 | static void drive_stat_acct(struct request *rq, int new_io) |
66 | { | 66 | { |
67 | struct gendisk *disk = rq->rq_disk; | ||
67 | struct hd_struct *part; | 68 | struct hd_struct *part; |
68 | int rw = rq_data_dir(rq); | 69 | int rw = rq_data_dir(rq); |
69 | int cpu; | 70 | int cpu; |
70 | 71 | ||
71 | if (!blk_fs_request(rq) || !rq->rq_disk) | 72 | if (!blk_fs_request(rq) || !disk || !blk_queue_io_stat(disk->queue)) |
72 | return; | 73 | return; |
73 | 74 | ||
74 | cpu = part_stat_lock(); | 75 | cpu = part_stat_lock(); |
@@ -599,8 +600,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
599 | q->request_fn = rfn; | 600 | q->request_fn = rfn; |
600 | q->prep_rq_fn = NULL; | 601 | q->prep_rq_fn = NULL; |
601 | q->unplug_fn = generic_unplug_device; | 602 | q->unplug_fn = generic_unplug_device; |
602 | q->queue_flags = (1 << QUEUE_FLAG_CLUSTER | | 603 | q->queue_flags = QUEUE_FLAG_DEFAULT; |
603 | 1 << QUEUE_FLAG_STACKABLE); | ||
604 | q->queue_lock = lock; | 604 | q->queue_lock = lock; |
605 | 605 | ||
606 | blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); | 606 | blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); |
@@ -1663,6 +1663,55 @@ void blkdev_dequeue_request(struct request *req) | |||
1663 | } | 1663 | } |
1664 | EXPORT_SYMBOL(blkdev_dequeue_request); | 1664 | EXPORT_SYMBOL(blkdev_dequeue_request); |
1665 | 1665 | ||
1666 | static void blk_account_io_completion(struct request *req, unsigned int bytes) | ||
1667 | { | ||
1668 | struct gendisk *disk = req->rq_disk; | ||
1669 | |||
1670 | if (!disk || !blk_queue_io_stat(disk->queue)) | ||
1671 | return; | ||
1672 | |||
1673 | if (blk_fs_request(req)) { | ||
1674 | const int rw = rq_data_dir(req); | ||
1675 | struct hd_struct *part; | ||
1676 | int cpu; | ||
1677 | |||
1678 | cpu = part_stat_lock(); | ||
1679 | part = disk_map_sector_rcu(req->rq_disk, req->sector); | ||
1680 | part_stat_add(cpu, part, sectors[rw], bytes >> 9); | ||
1681 | part_stat_unlock(); | ||
1682 | } | ||
1683 | } | ||
1684 | |||
1685 | static void blk_account_io_done(struct request *req) | ||
1686 | { | ||
1687 | struct gendisk *disk = req->rq_disk; | ||
1688 | |||
1689 | if (!disk || !blk_queue_io_stat(disk->queue)) | ||
1690 | return; | ||
1691 | |||
1692 | /* | ||
1693 | * Account IO completion. bar_rq isn't accounted as a normal | ||
1694 | * IO on queueing nor completion. Accounting the containing | ||
1695 | * request is enough. | ||
1696 | */ | ||
1697 | if (blk_fs_request(req) && req != &req->q->bar_rq) { | ||
1698 | unsigned long duration = jiffies - req->start_time; | ||
1699 | const int rw = rq_data_dir(req); | ||
1700 | struct hd_struct *part; | ||
1701 | int cpu; | ||
1702 | |||
1703 | cpu = part_stat_lock(); | ||
1704 | part = disk_map_sector_rcu(disk, req->sector); | ||
1705 | |||
1706 | part_stat_inc(cpu, part, ios[rw]); | ||
1707 | part_stat_add(cpu, part, ticks[rw], duration); | ||
1708 | part_round_stats(cpu, part); | ||
1709 | part_dec_in_flight(part); | ||
1710 | |||
1711 | part_stat_unlock(); | ||
1712 | } | ||
1713 | } | ||
1714 | |||
1666 | /** | 1715 | /** |
1667 | * __end_that_request_first - end I/O on a request | 1716 | * __end_that_request_first - end I/O on a request |
1668 | * @req: the request being processed | 1717 | * @req: the request being processed |
@@ -1698,16 +1747,7 @@ static int __end_that_request_first(struct request *req, int error, | |||
1698 | (unsigned long long)req->sector); | 1747 | (unsigned long long)req->sector); |
1699 | } | 1748 | } |
1700 | 1749 | ||
1701 | if (blk_fs_request(req) && req->rq_disk) { | 1750 | blk_account_io_completion(req, nr_bytes); |
1702 | const int rw = rq_data_dir(req); | ||
1703 | struct hd_struct *part; | ||
1704 | int cpu; | ||
1705 | |||
1706 | cpu = part_stat_lock(); | ||
1707 | part = disk_map_sector_rcu(req->rq_disk, req->sector); | ||
1708 | part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9); | ||
1709 | part_stat_unlock(); | ||
1710 | } | ||
1711 | 1751 | ||
1712 | total_bytes = bio_nbytes = 0; | 1752 | total_bytes = bio_nbytes = 0; |
1713 | while ((bio = req->bio) != NULL) { | 1753 | while ((bio = req->bio) != NULL) { |
@@ -1787,8 +1827,6 @@ static int __end_that_request_first(struct request *req, int error, | |||
1787 | */ | 1827 | */ |
1788 | static void end_that_request_last(struct request *req, int error) | 1828 | static void end_that_request_last(struct request *req, int error) |
1789 | { | 1829 | { |
1790 | struct gendisk *disk = req->rq_disk; | ||
1791 | |||
1792 | if (blk_rq_tagged(req)) | 1830 | if (blk_rq_tagged(req)) |
1793 | blk_queue_end_tag(req->q, req); | 1831 | blk_queue_end_tag(req->q, req); |
1794 | 1832 | ||
@@ -1800,27 +1838,7 @@ static void end_that_request_last(struct request *req, int error) | |||
1800 | 1838 | ||
1801 | blk_delete_timer(req); | 1839 | blk_delete_timer(req); |
1802 | 1840 | ||
1803 | /* | 1841 | blk_account_io_done(req); |
1804 | * Account IO completion. bar_rq isn't accounted as a normal | ||
1805 | * IO on queueing nor completion. Accounting the containing | ||
1806 | * request is enough. | ||
1807 | */ | ||
1808 | if (disk && blk_fs_request(req) && req != &req->q->bar_rq) { | ||
1809 | unsigned long duration = jiffies - req->start_time; | ||
1810 | const int rw = rq_data_dir(req); | ||
1811 | struct hd_struct *part; | ||
1812 | int cpu; | ||
1813 | |||
1814 | cpu = part_stat_lock(); | ||
1815 | part = disk_map_sector_rcu(disk, req->sector); | ||
1816 | |||
1817 | part_stat_inc(cpu, part, ios[rw]); | ||
1818 | part_stat_add(cpu, part, ticks[rw], duration); | ||
1819 | part_round_stats(cpu, part); | ||
1820 | part_dec_in_flight(part); | ||
1821 | |||
1822 | part_stat_unlock(); | ||
1823 | } | ||
1824 | 1842 | ||
1825 | if (req->end_io) | 1843 | if (req->end_io) |
1826 | req->end_io(req, error); | 1844 | req->end_io(req, error); |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index b538ab8dbbd1..e29ddfc73cf4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -197,6 +197,27 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |||
197 | return ret; | 197 | return ret; |
198 | } | 198 | } |
199 | 199 | ||
200 | static ssize_t queue_iostats_show(struct request_queue *q, char *page) | ||
201 | { | ||
202 | return queue_var_show(blk_queue_io_stat(q), page); | ||
203 | } | ||
204 | |||
205 | static ssize_t queue_iostats_store(struct request_queue *q, const char *page, | ||
206 | size_t count) | ||
207 | { | ||
208 | unsigned long stats; | ||
209 | ssize_t ret = queue_var_store(&stats, page, count); | ||
210 | |||
211 | spin_lock_irq(q->queue_lock); | ||
212 | if (stats) | ||
213 | queue_flag_set(QUEUE_FLAG_IO_STAT, q); | ||
214 | else | ||
215 | queue_flag_clear(QUEUE_FLAG_IO_STAT, q); | ||
216 | spin_unlock_irq(q->queue_lock); | ||
217 | |||
218 | return ret; | ||
219 | } | ||
220 | |||
200 | static struct queue_sysfs_entry queue_requests_entry = { | 221 | static struct queue_sysfs_entry queue_requests_entry = { |
201 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | 222 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, |
202 | .show = queue_requests_show, | 223 | .show = queue_requests_show, |
@@ -249,6 +270,12 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = { | |||
249 | .store = queue_rq_affinity_store, | 270 | .store = queue_rq_affinity_store, |
250 | }; | 271 | }; |
251 | 272 | ||
273 | static struct queue_sysfs_entry queue_iostats_entry = { | ||
274 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, | ||
275 | .show = queue_iostats_show, | ||
276 | .store = queue_iostats_store, | ||
277 | }; | ||
278 | |||
252 | static struct attribute *default_attrs[] = { | 279 | static struct attribute *default_attrs[] = { |
253 | &queue_requests_entry.attr, | 280 | &queue_requests_entry.attr, |
254 | &queue_ra_entry.attr, | 281 | &queue_ra_entry.attr, |
@@ -259,6 +286,7 @@ static struct attribute *default_attrs[] = { | |||
259 | &queue_nonrot_entry.attr, | 286 | &queue_nonrot_entry.attr, |
260 | &queue_nomerges_entry.attr, | 287 | &queue_nomerges_entry.attr, |
261 | &queue_rq_affinity_entry.attr, | 288 | &queue_rq_affinity_entry.attr, |
289 | &queue_iostats_entry.attr, | ||
262 | NULL, | 290 | NULL, |
263 | }; | 291 | }; |
264 | 292 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 75426e4b8cdf..d08c4b8219a6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -451,6 +451,11 @@ struct request_queue | |||
451 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ | 451 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ |
452 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ | 452 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ |
453 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 453 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
454 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | ||
455 | |||
456 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | ||
457 | (1 << QUEUE_FLAG_CLUSTER) | \ | ||
458 | 1 << QUEUE_FLAG_STACKABLE) | ||
454 | 459 | ||
455 | static inline int queue_is_locked(struct request_queue *q) | 460 | static inline int queue_is_locked(struct request_queue *q) |
456 | { | 461 | { |
@@ -567,6 +572,7 @@ enum { | |||
567 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 572 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
568 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 573 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
569 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 574 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
575 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | ||
570 | #define blk_queue_flushing(q) ((q)->ordseq) | 576 | #define blk_queue_flushing(q) ((q)->ordseq) |
571 | #define blk_queue_stackable(q) \ | 577 | #define blk_queue_stackable(q) \ |
572 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 578 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |