summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Christie <mchristi@redhat.com>2016-08-04 16:23:34 -0400
committerJens Axboe <axboe@fb.com>2016-08-04 16:25:33 -0400
commitabf545484d31b68777a85c5c8f5b4bcde08283eb (patch)
treeee91e0a9e679e13cff54ede6b6b489627d72c1b1
parentc1c87c2ba9ec06d8ba9e8a26c18c67a2ba9cd9c1 (diff)
mm/block: convert rw_page users to bio op use
The rw_page users were not converted to use bio/req ops. As a result bdev_write_page is not passing down REQ_OP_WRITE and the IOs will be sent down as reads. Signed-off-by: Mike Christie <mchristi@redhat.com> Fixes: 4e1b2d52a80d ("block, fs, drivers: remove REQ_OP compat defs and related code") Modified by me to: 1) Drop op_flags passing into ->rw_page(), as we don't use it. 2) Make op_is_write() and friends safe to use for !CONFIG_BLOCK Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/block/brd.c17
-rw-r--r--drivers/block/zram/zram_drv.c28
-rw-r--r--drivers/nvdimm/btt.c18
-rw-r--r--drivers/nvdimm/pmem.c12
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/mpage.c2
-rw-r--r--include/linux/blk_types.h22
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--mm/filemap.c6
11 files changed, 60 insertions, 59 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 3022dad24071..3439b28cce8b 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
300 * Process a single bvec of a bio. 300 * Process a single bvec of a bio.
301 */ 301 */
302static int brd_do_bvec(struct brd_device *brd, struct page *page, 302static int brd_do_bvec(struct brd_device *brd, struct page *page,
303 unsigned int len, unsigned int off, int rw, 303 unsigned int len, unsigned int off, int op,
304 sector_t sector) 304 sector_t sector)
305{ 305{
306 void *mem; 306 void *mem;
307 int err = 0; 307 int err = 0;
308 308
309 if (rw != READ) { 309 if (op_is_write(op)) {
310 err = copy_to_brd_setup(brd, sector, len); 310 err = copy_to_brd_setup(brd, sector, len);
311 if (err) 311 if (err)
312 goto out; 312 goto out;
313 } 313 }
314 314
315 mem = kmap_atomic(page); 315 mem = kmap_atomic(page);
316 if (rw == READ) { 316 if (!op_is_write(op)) {
317 copy_from_brd(mem + off, brd, sector, len); 317 copy_from_brd(mem + off, brd, sector, len);
318 flush_dcache_page(page); 318 flush_dcache_page(page);
319 } else { 319 } else {
@@ -330,7 +330,6 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
330{ 330{
331 struct block_device *bdev = bio->bi_bdev; 331 struct block_device *bdev = bio->bi_bdev;
332 struct brd_device *brd = bdev->bd_disk->private_data; 332 struct brd_device *brd = bdev->bd_disk->private_data;
333 int rw;
334 struct bio_vec bvec; 333 struct bio_vec bvec;
335 sector_t sector; 334 sector_t sector;
336 struct bvec_iter iter; 335 struct bvec_iter iter;
@@ -347,14 +346,12 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
347 goto out; 346 goto out;
348 } 347 }
349 348
350 rw = bio_data_dir(bio);
351
352 bio_for_each_segment(bvec, bio, iter) { 349 bio_for_each_segment(bvec, bio, iter) {
353 unsigned int len = bvec.bv_len; 350 unsigned int len = bvec.bv_len;
354 int err; 351 int err;
355 352
356 err = brd_do_bvec(brd, bvec.bv_page, len, 353 err = brd_do_bvec(brd, bvec.bv_page, len,
357 bvec.bv_offset, rw, sector); 354 bvec.bv_offset, bio_op(bio), sector);
358 if (err) 355 if (err)
359 goto io_error; 356 goto io_error;
360 sector += len >> SECTOR_SHIFT; 357 sector += len >> SECTOR_SHIFT;
@@ -369,11 +366,11 @@ io_error:
369} 366}
370 367
371static int brd_rw_page(struct block_device *bdev, sector_t sector, 368static int brd_rw_page(struct block_device *bdev, sector_t sector,
372 struct page *page, int rw) 369 struct page *page, int op)
373{ 370{
374 struct brd_device *brd = bdev->bd_disk->private_data; 371 struct brd_device *brd = bdev->bd_disk->private_data;
375 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector); 372 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
376 page_endio(page, rw & WRITE, err); 373 page_endio(page, op, err);
377 return err; 374 return err;
378} 375}
379 376
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7454cf188c8e..ca29649c4b08 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -843,15 +843,15 @@ static void zram_bio_discard(struct zram *zram, u32 index,
843} 843}
844 844
845static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, 845static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
846 int offset, int rw) 846 int offset, int op)
847{ 847{
848 unsigned long start_time = jiffies; 848 unsigned long start_time = jiffies;
849 int ret; 849 int ret;
850 850
851 generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT, 851 generic_start_io_acct(op, bvec->bv_len >> SECTOR_SHIFT,
852 &zram->disk->part0); 852 &zram->disk->part0);
853 853
854 if (rw == READ) { 854 if (!op_is_write(op)) {
855 atomic64_inc(&zram->stats.num_reads); 855 atomic64_inc(&zram->stats.num_reads);
856 ret = zram_bvec_read(zram, bvec, index, offset); 856 ret = zram_bvec_read(zram, bvec, index, offset);
857 } else { 857 } else {
@@ -859,10 +859,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
859 ret = zram_bvec_write(zram, bvec, index, offset); 859 ret = zram_bvec_write(zram, bvec, index, offset);
860 } 860 }
861 861
862 generic_end_io_acct(rw, &zram->disk->part0, start_time); 862 generic_end_io_acct(op, &zram->disk->part0, start_time);
863 863
864 if (unlikely(ret)) { 864 if (unlikely(ret)) {
865 if (rw == READ) 865 if (!op_is_write(op))
866 atomic64_inc(&zram->stats.failed_reads); 866 atomic64_inc(&zram->stats.failed_reads);
867 else 867 else
868 atomic64_inc(&zram->stats.failed_writes); 868 atomic64_inc(&zram->stats.failed_writes);
@@ -873,7 +873,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
873 873
874static void __zram_make_request(struct zram *zram, struct bio *bio) 874static void __zram_make_request(struct zram *zram, struct bio *bio)
875{ 875{
876 int offset, rw; 876 int offset;
877 u32 index; 877 u32 index;
878 struct bio_vec bvec; 878 struct bio_vec bvec;
879 struct bvec_iter iter; 879 struct bvec_iter iter;
@@ -888,7 +888,6 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
888 return; 888 return;
889 } 889 }
890 890
891 rw = bio_data_dir(bio);
892 bio_for_each_segment(bvec, bio, iter) { 891 bio_for_each_segment(bvec, bio, iter) {
893 int max_transfer_size = PAGE_SIZE - offset; 892 int max_transfer_size = PAGE_SIZE - offset;
894 893
@@ -903,15 +902,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
903 bv.bv_len = max_transfer_size; 902 bv.bv_len = max_transfer_size;
904 bv.bv_offset = bvec.bv_offset; 903 bv.bv_offset = bvec.bv_offset;
905 904
906 if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0) 905 if (zram_bvec_rw(zram, &bv, index, offset,
906 bio_op(bio)) < 0)
907 goto out; 907 goto out;
908 908
909 bv.bv_len = bvec.bv_len - max_transfer_size; 909 bv.bv_len = bvec.bv_len - max_transfer_size;
910 bv.bv_offset += max_transfer_size; 910 bv.bv_offset += max_transfer_size;
911 if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0) 911 if (zram_bvec_rw(zram, &bv, index + 1, 0,
912 bio_op(bio)) < 0)
912 goto out; 913 goto out;
913 } else 914 } else
914 if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0) 915 if (zram_bvec_rw(zram, &bvec, index, offset,
916 bio_op(bio)) < 0)
915 goto out; 917 goto out;
916 918
917 update_position(&index, &offset, &bvec); 919 update_position(&index, &offset, &bvec);
@@ -968,7 +970,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
968} 970}
969 971
970static int zram_rw_page(struct block_device *bdev, sector_t sector, 972static int zram_rw_page(struct block_device *bdev, sector_t sector,
971 struct page *page, int rw) 973 struct page *page, int op)
972{ 974{
973 int offset, err = -EIO; 975 int offset, err = -EIO;
974 u32 index; 976 u32 index;
@@ -992,7 +994,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
992 bv.bv_len = PAGE_SIZE; 994 bv.bv_len = PAGE_SIZE;
993 bv.bv_offset = 0; 995 bv.bv_offset = 0;
994 996
995 err = zram_bvec_rw(zram, &bv, index, offset, rw); 997 err = zram_bvec_rw(zram, &bv, index, offset, op);
996put_zram: 998put_zram:
997 zram_meta_put(zram); 999 zram_meta_put(zram);
998out: 1000out:
@@ -1005,7 +1007,7 @@ out:
1005 * (e.g., SetPageError, set_page_dirty and extra works). 1007 * (e.g., SetPageError, set_page_dirty and extra works).
1006 */ 1008 */
1007 if (err == 0) 1009 if (err == 0)
1008 page_endio(page, rw, 0); 1010 page_endio(page, op, 0);
1009 return err; 1011 return err;
1010} 1012}
1011 1013
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 9dce03f420eb..7cf3bdfaf809 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1133 1133
1134static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, 1134static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1135 struct page *page, unsigned int len, unsigned int off, 1135 struct page *page, unsigned int len, unsigned int off,
1136 int rw, sector_t sector) 1136 int op, sector_t sector)
1137{ 1137{
1138 int ret; 1138 int ret;
1139 1139
1140 if (rw == READ) { 1140 if (!op_is_write(op)) {
1141 ret = btt_read_pg(btt, bip, page, off, sector, len); 1141 ret = btt_read_pg(btt, bip, page, off, sector, len);
1142 flush_dcache_page(page); 1142 flush_dcache_page(page);
1143 } else { 1143 } else {
@@ -1155,7 +1155,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1155 struct bvec_iter iter; 1155 struct bvec_iter iter;
1156 unsigned long start; 1156 unsigned long start;
1157 struct bio_vec bvec; 1157 struct bio_vec bvec;
1158 int err = 0, rw; 1158 int err = 0;
1159 bool do_acct; 1159 bool do_acct;
1160 1160
1161 /* 1161 /*
@@ -1170,7 +1170,6 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1170 } 1170 }
1171 1171
1172 do_acct = nd_iostat_start(bio, &start); 1172 do_acct = nd_iostat_start(bio, &start);
1173 rw = bio_data_dir(bio);
1174 bio_for_each_segment(bvec, bio, iter) { 1173 bio_for_each_segment(bvec, bio, iter) {
1175 unsigned int len = bvec.bv_len; 1174 unsigned int len = bvec.bv_len;
1176 1175
@@ -1181,11 +1180,12 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1181 BUG_ON(len % btt->sector_size); 1180 BUG_ON(len % btt->sector_size);
1182 1181
1183 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, 1182 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1184 rw, iter.bi_sector); 1183 bio_op(bio), iter.bi_sector);
1185 if (err) { 1184 if (err) {
1186 dev_info(&btt->nd_btt->dev, 1185 dev_info(&btt->nd_btt->dev,
1187 "io error in %s sector %lld, len %d,\n", 1186 "io error in %s sector %lld, len %d,\n",
1188 (rw == READ) ? "READ" : "WRITE", 1187 (op_is_write(bio_op(bio))) ? "WRITE" :
1188 "READ",
1189 (unsigned long long) iter.bi_sector, len); 1189 (unsigned long long) iter.bi_sector, len);
1190 bio->bi_error = err; 1190 bio->bi_error = err;
1191 break; 1191 break;
@@ -1200,12 +1200,12 @@ out:
1200} 1200}
1201 1201
1202static int btt_rw_page(struct block_device *bdev, sector_t sector, 1202static int btt_rw_page(struct block_device *bdev, sector_t sector,
1203 struct page *page, int rw) 1203 struct page *page, int op)
1204{ 1204{
1205 struct btt *btt = bdev->bd_disk->private_data; 1205 struct btt *btt = bdev->bd_disk->private_data;
1206 1206
1207 btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector); 1207 btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, op, sector);
1208 page_endio(page, rw & WRITE, 0); 1208 page_endio(page, op, 0);
1209 return 0; 1209 return 0;
1210} 1210}
1211 1211
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index b511099457db..d64d92481c1d 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -67,7 +67,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
67} 67}
68 68
69static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, 69static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
70 unsigned int len, unsigned int off, int rw, 70 unsigned int len, unsigned int off, int op,
71 sector_t sector) 71 sector_t sector)
72{ 72{
73 int rc = 0; 73 int rc = 0;
@@ -79,7 +79,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
79 if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) 79 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
80 bad_pmem = true; 80 bad_pmem = true;
81 81
82 if (rw == READ) { 82 if (!op_is_write(op)) {
83 if (unlikely(bad_pmem)) 83 if (unlikely(bad_pmem))
84 rc = -EIO; 84 rc = -EIO;
85 else { 85 else {
@@ -134,7 +134,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
134 do_acct = nd_iostat_start(bio, &start); 134 do_acct = nd_iostat_start(bio, &start);
135 bio_for_each_segment(bvec, bio, iter) { 135 bio_for_each_segment(bvec, bio, iter) {
136 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, 136 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
137 bvec.bv_offset, bio_data_dir(bio), 137 bvec.bv_offset, bio_op(bio),
138 iter.bi_sector); 138 iter.bi_sector);
139 if (rc) { 139 if (rc) {
140 bio->bi_error = rc; 140 bio->bi_error = rc;
@@ -152,12 +152,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
152} 152}
153 153
154static int pmem_rw_page(struct block_device *bdev, sector_t sector, 154static int pmem_rw_page(struct block_device *bdev, sector_t sector,
155 struct page *page, int rw) 155 struct page *page, int op)
156{ 156{
157 struct pmem_device *pmem = bdev->bd_queue->queuedata; 157 struct pmem_device *pmem = bdev->bd_queue->queuedata;
158 int rc; 158 int rc;
159 159
160 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector); 160 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, op, sector);
161 161
162 /* 162 /*
163 * The ->rw_page interface is subtle and tricky. The core 163 * The ->rw_page interface is subtle and tricky. The core
@@ -166,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
166 * caused by double completion. 166 * caused by double completion.
167 */ 167 */
168 if (rc == 0) 168 if (rc == 0)
169 page_endio(page, rw & WRITE, 0); 169 page_endio(page, op, 0);
170 170
171 return rc; 171 return rc;
172} 172}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 2033a3f91d58..d402899ba135 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -416,7 +416,8 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
416 result = blk_queue_enter(bdev->bd_queue, false); 416 result = blk_queue_enter(bdev->bd_queue, false);
417 if (result) 417 if (result)
418 return result; 418 return result;
419 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); 419 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
420 REQ_OP_READ);
420 blk_queue_exit(bdev->bd_queue); 421 blk_queue_exit(bdev->bd_queue);
421 return result; 422 return result;
422} 423}
@@ -445,7 +446,6 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
445 struct page *page, struct writeback_control *wbc) 446 struct page *page, struct writeback_control *wbc)
446{ 447{
447 int result; 448 int result;
448 int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
449 const struct block_device_operations *ops = bdev->bd_disk->fops; 449 const struct block_device_operations *ops = bdev->bd_disk->fops;
450 450
451 if (!ops->rw_page || bdev_get_integrity(bdev)) 451 if (!ops->rw_page || bdev_get_integrity(bdev))
@@ -455,7 +455,8 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
455 return result; 455 return result;
456 456
457 set_page_writeback(page); 457 set_page_writeback(page);
458 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); 458 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
459 REQ_OP_WRITE);
459 if (result) 460 if (result)
460 end_page_writeback(page); 461 end_page_writeback(page);
461 else 462 else
diff --git a/fs/mpage.c b/fs/mpage.c
index 2ca1f39c8cba..7a09c55b4bd0 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -50,7 +50,7 @@ static void mpage_end_io(struct bio *bio)
50 50
51 bio_for_each_segment_all(bv, bio, i) { 51 bio_for_each_segment_all(bv, bio, i) {
52 struct page *page = bv->bv_page; 52 struct page *page = bv->bv_page;
53 page_endio(page, bio_data_dir(bio), bio->bi_error); 53 page_endio(page, bio_op(bio), bio->bi_error);
54 } 54 }
55 55
56 bio_put(bio); 56 bio_put(bio);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index f254eb264924..14b28ff2caf8 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -18,6 +18,17 @@ struct cgroup_subsys_state;
18typedef void (bio_end_io_t) (struct bio *); 18typedef void (bio_end_io_t) (struct bio *);
19typedef void (bio_destructor_t) (struct bio *); 19typedef void (bio_destructor_t) (struct bio *);
20 20
21enum req_op {
22 REQ_OP_READ,
23 REQ_OP_WRITE,
24 REQ_OP_DISCARD, /* request to discard sectors */
25 REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
26 REQ_OP_WRITE_SAME, /* write same block many times */
27 REQ_OP_FLUSH, /* request for cache flush */
28};
29
30#define REQ_OP_BITS 3
31
21#ifdef CONFIG_BLOCK 32#ifdef CONFIG_BLOCK
22/* 33/*
23 * main unit of I/O for the block layer and lower layers (ie drivers and 34 * main unit of I/O for the block layer and lower layers (ie drivers and
@@ -228,17 +239,6 @@ enum rq_flag_bits {
228#define REQ_HASHED (1ULL << __REQ_HASHED) 239#define REQ_HASHED (1ULL << __REQ_HASHED)
229#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 240#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
230 241
231enum req_op {
232 REQ_OP_READ,
233 REQ_OP_WRITE,
234 REQ_OP_DISCARD, /* request to discard sectors */
235 REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
236 REQ_OP_WRITE_SAME, /* write same block many times */
237 REQ_OP_FLUSH, /* request for cache flush */
238};
239
240#define REQ_OP_BITS 3
241
242typedef unsigned int blk_qc_t; 242typedef unsigned int blk_qc_t;
243#define BLK_QC_T_NONE -1U 243#define BLK_QC_T_NONE -1U
244#define BLK_QC_T_SHIFT 16 244#define BLK_QC_T_SHIFT 16
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index de7935961c27..ccd68c0d01de 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1672,7 +1672,7 @@ struct blk_dax_ctl {
1672struct block_device_operations { 1672struct block_device_operations {
1673 int (*open) (struct block_device *, fmode_t); 1673 int (*open) (struct block_device *, fmode_t);
1674 void (*release) (struct gendisk *, fmode_t); 1674 void (*release) (struct gendisk *, fmode_t);
1675 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); 1675 int (*rw_page)(struct block_device *, sector_t, struct page *, int op);
1676 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1676 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1677 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1677 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1678 long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, 1678 long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f3f0b4c8e8ac..498255e6914e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2480,12 +2480,13 @@ extern void init_special_inode(struct inode *, umode_t, dev_t);
2480extern void make_bad_inode(struct inode *); 2480extern void make_bad_inode(struct inode *);
2481extern bool is_bad_inode(struct inode *); 2481extern bool is_bad_inode(struct inode *);
2482 2482
2483#ifdef CONFIG_BLOCK
2484static inline bool op_is_write(unsigned int op) 2483static inline bool op_is_write(unsigned int op)
2485{ 2484{
2486 return op == REQ_OP_READ ? false : true; 2485 return op == REQ_OP_READ ? false : true;
2487} 2486}
2488 2487
2488#ifdef CONFIG_BLOCK
2489
2489/* 2490/*
2490 * return data direction, READ or WRITE 2491 * return data direction, READ or WRITE
2491 */ 2492 */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 81363b834900..45786374abbd 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -510,7 +510,7 @@ static inline void wait_on_page_writeback(struct page *page)
510extern void end_page_writeback(struct page *page); 510extern void end_page_writeback(struct page *page);
511void wait_for_stable_page(struct page *page); 511void wait_for_stable_page(struct page *page);
512 512
513void page_endio(struct page *page, int rw, int err); 513void page_endio(struct page *page, int op, int err);
514 514
515/* 515/*
516 * Add an arbitrary waiter to a page's wait queue 516 * Add an arbitrary waiter to a page's wait queue
diff --git a/mm/filemap.c b/mm/filemap.c
index 3083ded98b15..daef091d4c50 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -887,9 +887,9 @@ EXPORT_SYMBOL(end_page_writeback);
887 * After completing I/O on a page, call this routine to update the page 887 * After completing I/O on a page, call this routine to update the page
888 * flags appropriately 888 * flags appropriately
889 */ 889 */
890void page_endio(struct page *page, int rw, int err) 890void page_endio(struct page *page, int op, int err)
891{ 891{
892 if (rw == READ) { 892 if (!op_is_write(op)) {
893 if (!err) { 893 if (!err) {
894 SetPageUptodate(page); 894 SetPageUptodate(page);
895 } else { 895 } else {
@@ -897,7 +897,7 @@ void page_endio(struct page *page, int rw, int err)
897 SetPageError(page); 897 SetPageError(page);
898 } 898 }
899 unlock_page(page); 899 unlock_page(page);
900 } else { /* rw == WRITE */ 900 } else {
901 if (err) { 901 if (err) {
902 SetPageError(page); 902 SetPageError(page);
903 if (page->mapping) 903 if (page->mapping)