summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2016-08-05 10:11:04 -0400
committerJens Axboe <axboe@fb.com>2016-08-07 16:41:02 -0400
commitc11f0c0b5bb949673e4fc16c742f0316ae4ced20 (patch)
tree86b0c6102a4b01c4609a199b783f990e78959b72
parent52ddb7e9dd735c1a10722c58d3e069af4d3e6df2 (diff)
block/mm: make bdev_ops->rw_page() take a bool for read/write
Commit abf545484d31 changed it from an 'rw' flags type to the newer ops based interface, but now we're effectively leaking some bdev internals to the rest of the kernel. Since we only care about whether it's a read or a write at that level, just pass in a bool 'is_write' parameter instead. Then we can also move op_is_write() and friends back under CONFIG_BLOCK protection. Reviewed-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/block/brd.c16
-rw-r--r--drivers/block/zram/zram_drv.c23
-rw-r--r--drivers/nvdimm/btt.c12
-rw-r--r--drivers/nvdimm/pmem.c12
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/mpage.c2
-rw-r--r--include/linux/blk_types.h22
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--mm/filemap.c4
11 files changed, 51 insertions, 53 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 3439b28cce8b..0c76d4016eeb 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
300 * Process a single bvec of a bio. 300 * Process a single bvec of a bio.
301 */ 301 */
302static int brd_do_bvec(struct brd_device *brd, struct page *page, 302static int brd_do_bvec(struct brd_device *brd, struct page *page,
303 unsigned int len, unsigned int off, int op, 303 unsigned int len, unsigned int off, bool is_write,
304 sector_t sector) 304 sector_t sector)
305{ 305{
306 void *mem; 306 void *mem;
307 int err = 0; 307 int err = 0;
308 308
309 if (op_is_write(op)) { 309 if (is_write) {
310 err = copy_to_brd_setup(brd, sector, len); 310 err = copy_to_brd_setup(brd, sector, len);
311 if (err) 311 if (err)
312 goto out; 312 goto out;
313 } 313 }
314 314
315 mem = kmap_atomic(page); 315 mem = kmap_atomic(page);
316 if (!op_is_write(op)) { 316 if (!is_write) {
317 copy_from_brd(mem + off, brd, sector, len); 317 copy_from_brd(mem + off, brd, sector, len);
318 flush_dcache_page(page); 318 flush_dcache_page(page);
319 } else { 319 } else {
@@ -350,8 +350,8 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
350 unsigned int len = bvec.bv_len; 350 unsigned int len = bvec.bv_len;
351 int err; 351 int err;
352 352
353 err = brd_do_bvec(brd, bvec.bv_page, len, 353 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
354 bvec.bv_offset, bio_op(bio), sector); 354 op_is_write(bio_op(bio)), sector);
355 if (err) 355 if (err)
356 goto io_error; 356 goto io_error;
357 sector += len >> SECTOR_SHIFT; 357 sector += len >> SECTOR_SHIFT;
@@ -366,11 +366,11 @@ io_error:
366} 366}
367 367
368static int brd_rw_page(struct block_device *bdev, sector_t sector, 368static int brd_rw_page(struct block_device *bdev, sector_t sector,
369 struct page *page, int op) 369 struct page *page, bool is_write)
370{ 370{
371 struct brd_device *brd = bdev->bd_disk->private_data; 371 struct brd_device *brd = bdev->bd_disk->private_data;
372 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector); 372 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
373 page_endio(page, op, err); 373 page_endio(page, is_write, err);
374 return err; 374 return err;
375} 375}
376 376
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index ca29649c4b08..04365b17ee67 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -843,15 +843,16 @@ static void zram_bio_discard(struct zram *zram, u32 index,
843} 843}
844 844
845static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, 845static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
846 int offset, int op) 846 int offset, bool is_write)
847{ 847{
848 unsigned long start_time = jiffies; 848 unsigned long start_time = jiffies;
849 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
849 int ret; 850 int ret;
850 851
851 generic_start_io_acct(op, bvec->bv_len >> SECTOR_SHIFT, 852 generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
852 &zram->disk->part0); 853 &zram->disk->part0);
853 854
854 if (!op_is_write(op)) { 855 if (!is_write) {
855 atomic64_inc(&zram->stats.num_reads); 856 atomic64_inc(&zram->stats.num_reads);
856 ret = zram_bvec_read(zram, bvec, index, offset); 857 ret = zram_bvec_read(zram, bvec, index, offset);
857 } else { 858 } else {
@@ -859,10 +860,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
859 ret = zram_bvec_write(zram, bvec, index, offset); 860 ret = zram_bvec_write(zram, bvec, index, offset);
860 } 861 }
861 862
862 generic_end_io_acct(op, &zram->disk->part0, start_time); 863 generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
863 864
864 if (unlikely(ret)) { 865 if (unlikely(ret)) {
865 if (!op_is_write(op)) 866 if (!is_write)
866 atomic64_inc(&zram->stats.failed_reads); 867 atomic64_inc(&zram->stats.failed_reads);
867 else 868 else
868 atomic64_inc(&zram->stats.failed_writes); 869 atomic64_inc(&zram->stats.failed_writes);
@@ -903,17 +904,17 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
903 bv.bv_offset = bvec.bv_offset; 904 bv.bv_offset = bvec.bv_offset;
904 905
905 if (zram_bvec_rw(zram, &bv, index, offset, 906 if (zram_bvec_rw(zram, &bv, index, offset,
906 bio_op(bio)) < 0) 907 op_is_write(bio_op(bio))) < 0)
907 goto out; 908 goto out;
908 909
909 bv.bv_len = bvec.bv_len - max_transfer_size; 910 bv.bv_len = bvec.bv_len - max_transfer_size;
910 bv.bv_offset += max_transfer_size; 911 bv.bv_offset += max_transfer_size;
911 if (zram_bvec_rw(zram, &bv, index + 1, 0, 912 if (zram_bvec_rw(zram, &bv, index + 1, 0,
912 bio_op(bio)) < 0) 913 op_is_write(bio_op(bio))) < 0)
913 goto out; 914 goto out;
914 } else 915 } else
915 if (zram_bvec_rw(zram, &bvec, index, offset, 916 if (zram_bvec_rw(zram, &bvec, index, offset,
916 bio_op(bio)) < 0) 917 op_is_write(bio_op(bio))) < 0)
917 goto out; 918 goto out;
918 919
919 update_position(&index, &offset, &bvec); 920 update_position(&index, &offset, &bvec);
@@ -970,7 +971,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
970} 971}
971 972
972static int zram_rw_page(struct block_device *bdev, sector_t sector, 973static int zram_rw_page(struct block_device *bdev, sector_t sector,
973 struct page *page, int op) 974 struct page *page, bool is_write)
974{ 975{
975 int offset, err = -EIO; 976 int offset, err = -EIO;
976 u32 index; 977 u32 index;
@@ -994,7 +995,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
994 bv.bv_len = PAGE_SIZE; 995 bv.bv_len = PAGE_SIZE;
995 bv.bv_offset = 0; 996 bv.bv_offset = 0;
996 997
997 err = zram_bvec_rw(zram, &bv, index, offset, op); 998 err = zram_bvec_rw(zram, &bv, index, offset, is_write);
998put_zram: 999put_zram:
999 zram_meta_put(zram); 1000 zram_meta_put(zram);
1000out: 1001out:
@@ -1007,7 +1008,7 @@ out:
1007 * (e.g., SetPageError, set_page_dirty and extra works). 1008 * (e.g., SetPageError, set_page_dirty and extra works).
1008 */ 1009 */
1009 if (err == 0) 1010 if (err == 0)
1010 page_endio(page, op, 0); 1011 page_endio(page, is_write, 0);
1011 return err; 1012 return err;
1012} 1013}
1013 1014
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 7cf3bdfaf809..88e91666f145 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1133 1133
1134static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, 1134static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1135 struct page *page, unsigned int len, unsigned int off, 1135 struct page *page, unsigned int len, unsigned int off,
1136 int op, sector_t sector) 1136 bool is_write, sector_t sector)
1137{ 1137{
1138 int ret; 1138 int ret;
1139 1139
1140 if (!op_is_write(op)) { 1140 if (!is_write) {
1141 ret = btt_read_pg(btt, bip, page, off, sector, len); 1141 ret = btt_read_pg(btt, bip, page, off, sector, len);
1142 flush_dcache_page(page); 1142 flush_dcache_page(page);
1143 } else { 1143 } else {
@@ -1180,7 +1180,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1180 BUG_ON(len % btt->sector_size); 1180 BUG_ON(len % btt->sector_size);
1181 1181
1182 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, 1182 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1183 bio_op(bio), iter.bi_sector); 1183 op_is_write(bio_op(bio)), iter.bi_sector);
1184 if (err) { 1184 if (err) {
1185 dev_info(&btt->nd_btt->dev, 1185 dev_info(&btt->nd_btt->dev,
1186 "io error in %s sector %lld, len %d,\n", 1186 "io error in %s sector %lld, len %d,\n",
@@ -1200,12 +1200,12 @@ out:
1200} 1200}
1201 1201
1202static int btt_rw_page(struct block_device *bdev, sector_t sector, 1202static int btt_rw_page(struct block_device *bdev, sector_t sector,
1203 struct page *page, int op) 1203 struct page *page, bool is_write)
1204{ 1204{
1205 struct btt *btt = bdev->bd_disk->private_data; 1205 struct btt *btt = bdev->bd_disk->private_data;
1206 1206
1207 btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, op, sector); 1207 btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
1208 page_endio(page, op, 0); 1208 page_endio(page, is_write, 0);
1209 return 0; 1209 return 0;
1210} 1210}
1211 1211
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index d64d92481c1d..20bae50c231d 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -67,7 +67,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
67} 67}
68 68
69static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, 69static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
70 unsigned int len, unsigned int off, int op, 70 unsigned int len, unsigned int off, bool is_write,
71 sector_t sector) 71 sector_t sector)
72{ 72{
73 int rc = 0; 73 int rc = 0;
@@ -79,7 +79,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
79 if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) 79 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
80 bad_pmem = true; 80 bad_pmem = true;
81 81
82 if (!op_is_write(op)) { 82 if (!is_write) {
83 if (unlikely(bad_pmem)) 83 if (unlikely(bad_pmem))
84 rc = -EIO; 84 rc = -EIO;
85 else { 85 else {
@@ -134,7 +134,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
134 do_acct = nd_iostat_start(bio, &start); 134 do_acct = nd_iostat_start(bio, &start);
135 bio_for_each_segment(bvec, bio, iter) { 135 bio_for_each_segment(bvec, bio, iter) {
136 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, 136 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
137 bvec.bv_offset, bio_op(bio), 137 bvec.bv_offset, op_is_write(bio_op(bio)),
138 iter.bi_sector); 138 iter.bi_sector);
139 if (rc) { 139 if (rc) {
140 bio->bi_error = rc; 140 bio->bi_error = rc;
@@ -152,12 +152,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
152} 152}
153 153
154static int pmem_rw_page(struct block_device *bdev, sector_t sector, 154static int pmem_rw_page(struct block_device *bdev, sector_t sector,
155 struct page *page, int op) 155 struct page *page, bool is_write)
156{ 156{
157 struct pmem_device *pmem = bdev->bd_queue->queuedata; 157 struct pmem_device *pmem = bdev->bd_queue->queuedata;
158 int rc; 158 int rc;
159 159
160 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, op, sector); 160 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
161 161
162 /* 162 /*
163 * The ->rw_page interface is subtle and tricky. The core 163 * The ->rw_page interface is subtle and tricky. The core
@@ -166,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
166 * caused by double completion. 166 * caused by double completion.
167 */ 167 */
168 if (rc == 0) 168 if (rc == 0)
169 page_endio(page, op, 0); 169 page_endio(page, is_write, 0);
170 170
171 return rc; 171 return rc;
172} 172}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index d402899ba135..c3cdde87cc8c 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -416,8 +416,7 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
416 result = blk_queue_enter(bdev->bd_queue, false); 416 result = blk_queue_enter(bdev->bd_queue, false);
417 if (result) 417 if (result)
418 return result; 418 return result;
419 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, 419 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
420 REQ_OP_READ);
421 blk_queue_exit(bdev->bd_queue); 420 blk_queue_exit(bdev->bd_queue);
422 return result; 421 return result;
423} 422}
@@ -455,8 +454,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
455 return result; 454 return result;
456 455
457 set_page_writeback(page); 456 set_page_writeback(page);
458 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, 457 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
459 REQ_OP_WRITE);
460 if (result) 458 if (result)
461 end_page_writeback(page); 459 end_page_writeback(page);
462 else 460 else
diff --git a/fs/mpage.c b/fs/mpage.c
index 7a09c55b4bd0..d2413af0823a 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -50,7 +50,7 @@ static void mpage_end_io(struct bio *bio)
50 50
51 bio_for_each_segment_all(bv, bio, i) { 51 bio_for_each_segment_all(bv, bio, i) {
52 struct page *page = bv->bv_page; 52 struct page *page = bv->bv_page;
53 page_endio(page, bio_op(bio), bio->bi_error); 53 page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
54 } 54 }
55 55
56 bio_put(bio); 56 bio_put(bio);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 14b28ff2caf8..f254eb264924 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -18,17 +18,6 @@ struct cgroup_subsys_state;
18typedef void (bio_end_io_t) (struct bio *); 18typedef void (bio_end_io_t) (struct bio *);
19typedef void (bio_destructor_t) (struct bio *); 19typedef void (bio_destructor_t) (struct bio *);
20 20
21enum req_op {
22 REQ_OP_READ,
23 REQ_OP_WRITE,
24 REQ_OP_DISCARD, /* request to discard sectors */
25 REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
26 REQ_OP_WRITE_SAME, /* write same block many times */
27 REQ_OP_FLUSH, /* request for cache flush */
28};
29
30#define REQ_OP_BITS 3
31
32#ifdef CONFIG_BLOCK 21#ifdef CONFIG_BLOCK
33/* 22/*
34 * main unit of I/O for the block layer and lower layers (ie drivers and 23 * main unit of I/O for the block layer and lower layers (ie drivers and
@@ -239,6 +228,17 @@ enum rq_flag_bits {
239#define REQ_HASHED (1ULL << __REQ_HASHED) 228#define REQ_HASHED (1ULL << __REQ_HASHED)
240#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 229#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
241 230
231enum req_op {
232 REQ_OP_READ,
233 REQ_OP_WRITE,
234 REQ_OP_DISCARD, /* request to discard sectors */
235 REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
236 REQ_OP_WRITE_SAME, /* write same block many times */
237 REQ_OP_FLUSH, /* request for cache flush */
238};
239
240#define REQ_OP_BITS 3
241
242typedef unsigned int blk_qc_t; 242typedef unsigned int blk_qc_t;
243#define BLK_QC_T_NONE -1U 243#define BLK_QC_T_NONE -1U
244#define BLK_QC_T_SHIFT 16 244#define BLK_QC_T_SHIFT 16
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ccd68c0d01de..2c210b6a7bcf 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1672,7 +1672,7 @@ struct blk_dax_ctl {
1672struct block_device_operations { 1672struct block_device_operations {
1673 int (*open) (struct block_device *, fmode_t); 1673 int (*open) (struct block_device *, fmode_t);
1674 void (*release) (struct gendisk *, fmode_t); 1674 void (*release) (struct gendisk *, fmode_t);
1675 int (*rw_page)(struct block_device *, sector_t, struct page *, int op); 1675 int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
1676 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1676 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1677 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1677 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1678 long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, 1678 long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 33f0e96db06f..3523bf62f328 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2480,13 +2480,12 @@ extern void init_special_inode(struct inode *, umode_t, dev_t);
2480extern void make_bad_inode(struct inode *); 2480extern void make_bad_inode(struct inode *);
2481extern bool is_bad_inode(struct inode *); 2481extern bool is_bad_inode(struct inode *);
2482 2482
2483#ifdef CONFIG_BLOCK
2483static inline bool op_is_write(unsigned int op) 2484static inline bool op_is_write(unsigned int op)
2484{ 2485{
2485 return op == REQ_OP_READ ? false : true; 2486 return op == REQ_OP_READ ? false : true;
2486} 2487}
2487 2488
2488#ifdef CONFIG_BLOCK
2489
2490/* 2489/*
2491 * return data direction, READ or WRITE 2490 * return data direction, READ or WRITE
2492 */ 2491 */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 45786374abbd..66a1260b33de 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -510,7 +510,7 @@ static inline void wait_on_page_writeback(struct page *page)
510extern void end_page_writeback(struct page *page); 510extern void end_page_writeback(struct page *page);
511void wait_for_stable_page(struct page *page); 511void wait_for_stable_page(struct page *page);
512 512
513void page_endio(struct page *page, int op, int err); 513void page_endio(struct page *page, bool is_write, int err);
514 514
515/* 515/*
516 * Add an arbitrary waiter to a page's wait queue 516 * Add an arbitrary waiter to a page's wait queue
diff --git a/mm/filemap.c b/mm/filemap.c
index daef091d4c50..8a287dfc5372 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -887,9 +887,9 @@ EXPORT_SYMBOL(end_page_writeback);
887 * After completing I/O on a page, call this routine to update the page 887 * After completing I/O on a page, call this routine to update the page
888 * flags appropriately 888 * flags appropriately
889 */ 889 */
890void page_endio(struct page *page, int op, int err) 890void page_endio(struct page *page, bool is_write, int err)
891{ 891{
892 if (!op_is_write(op)) { 892 if (!is_write) {
893 if (!err) { 893 if (!err) {
894 SetPageUptodate(page); 894 SetPageUptodate(page);
895 } else { 895 } else {