summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/block/brd.c4
-rw-r--r--drivers/block/drbd/drbd_req.c34
-rw-r--r--drivers/block/drbd/drbd_worker.c30
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/umem.c6
-rw-r--r--drivers/lightnvm/rrpc.c4
-rw-r--r--drivers/md/dm-raid1.c8
-rw-r--r--drivers/md/dm-snap.c13
-rw-r--r--drivers/md/dm-zero.c15
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/nvme/host/lightnvm.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c13
-rw-r--r--fs/buffer.c12
-rw-r--r--fs/f2fs/checkpoint.c2
-rw-r--r--fs/f2fs/gc.c3
-rw-r--r--fs/f2fs/node.c2
-rw-r--r--fs/gfs2/bmap.c3
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/meta_io.c2
-rw-r--r--fs/reiserfs/stree.c2
-rw-r--r--fs/udf/dir.c2
-rw-r--r--fs/udf/directory.c2
-rw-r--r--include/linux/fs.h18
-rw-r--r--include/trace/events/f2fs.h4
26 files changed, 95 insertions, 96 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index f5b0d6f4e09f..41a6c4c9da9f 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -347,9 +347,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
347 goto out; 347 goto out;
348 } 348 }
349 349
350 rw = bio_rw(bio); 350 rw = bio_data_dir(bio);
351 if (rw == READA)
352 rw = READ;
353 351
354 bio_for_each_segment(bvec, bio, iter) { 352 bio_for_each_segment(bvec, bio, iter) {
355 unsigned int len = bvec.bv_len; 353 unsigned int len = bvec.bv_len;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 787536a0ee7c..66b8e4bb74d8 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -219,7 +219,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
219{ 219{
220 const unsigned s = req->rq_state; 220 const unsigned s = req->rq_state;
221 struct drbd_device *device = req->device; 221 struct drbd_device *device = req->device;
222 int rw;
223 int error, ok; 222 int error, ok;
224 223
225 /* we must not complete the master bio, while it is 224 /* we must not complete the master bio, while it is
@@ -243,8 +242,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
243 return; 242 return;
244 } 243 }
245 244
246 rw = bio_rw(req->master_bio);
247
248 /* 245 /*
249 * figure out whether to report success or failure. 246 * figure out whether to report success or failure.
250 * 247 *
@@ -268,7 +265,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
268 * epoch number. If they match, increase the current_tle_nr, 265 * epoch number. If they match, increase the current_tle_nr,
269 * and reset the transfer log epoch write_cnt. 266 * and reset the transfer log epoch write_cnt.
270 */ 267 */
271 if (rw == WRITE && 268 if (op_is_write(bio_op(req->master_bio)) &&
272 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) 269 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
273 start_new_tl_epoch(first_peer_device(device)->connection); 270 start_new_tl_epoch(first_peer_device(device)->connection);
274 271
@@ -285,11 +282,14 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
285 * because no path was available, in which case 282 * because no path was available, in which case
286 * it was not even added to the transfer_log. 283 * it was not even added to the transfer_log.
287 * 284 *
288 * READA may fail, and will not be retried. 285 * read-ahead may fail, and will not be retried.
289 * 286 *
290 * WRITE should have used all available paths already. 287 * WRITE should have used all available paths already.
291 */ 288 */
292 if (!ok && rw == READ && !list_empty(&req->tl_requests)) 289 if (!ok &&
290 bio_op(req->master_bio) == REQ_OP_READ &&
291 !(req->master_bio->bi_rw & REQ_RAHEAD) &&
292 !list_empty(&req->tl_requests))
293 req->rq_state |= RQ_POSTPONED; 293 req->rq_state |= RQ_POSTPONED;
294 294
295 if (!(req->rq_state & RQ_POSTPONED)) { 295 if (!(req->rq_state & RQ_POSTPONED)) {
@@ -645,7 +645,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
645 __drbd_chk_io_error(device, DRBD_READ_ERROR); 645 __drbd_chk_io_error(device, DRBD_READ_ERROR);
646 /* fall through. */ 646 /* fall through. */
647 case READ_AHEAD_COMPLETED_WITH_ERROR: 647 case READ_AHEAD_COMPLETED_WITH_ERROR:
648 /* it is legal to fail READA, no __drbd_chk_io_error in that case. */ 648 /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
649 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); 649 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
650 break; 650 break;
651 651
@@ -657,7 +657,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
657 break; 657 break;
658 658
659 case QUEUE_FOR_NET_READ: 659 case QUEUE_FOR_NET_READ:
660 /* READ or READA, and 660 /* READ, and
661 * no local disk, 661 * no local disk,
662 * or target area marked as invalid, 662 * or target area marked as invalid,
663 * or just got an io-error. */ 663 * or just got an io-error. */
@@ -1172,7 +1172,14 @@ drbd_submit_req_private_bio(struct drbd_request *req)
1172{ 1172{
1173 struct drbd_device *device = req->device; 1173 struct drbd_device *device = req->device;
1174 struct bio *bio = req->private_bio; 1174 struct bio *bio = req->private_bio;
1175 const int rw = bio_rw(bio); 1175 unsigned int type;
1176
1177 if (bio_op(bio) != REQ_OP_READ)
1178 type = DRBD_FAULT_DT_WR;
1179 else if (bio->bi_rw & REQ_RAHEAD)
1180 type = DRBD_FAULT_DT_RA;
1181 else
1182 type = DRBD_FAULT_DT_RD;
1176 1183
1177 bio->bi_bdev = device->ldev->backing_bdev; 1184 bio->bi_bdev = device->ldev->backing_bdev;
1178 1185
@@ -1182,10 +1189,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
1182 * stable storage, and this is a WRITE, we may not even submit 1189 * stable storage, and this is a WRITE, we may not even submit
1183 * this bio. */ 1190 * this bio. */
1184 if (get_ldev(device)) { 1191 if (get_ldev(device)) {
1185 if (drbd_insert_fault(device, 1192 if (drbd_insert_fault(device, type))
1186 rw == WRITE ? DRBD_FAULT_DT_WR
1187 : rw == READ ? DRBD_FAULT_DT_RD
1188 : DRBD_FAULT_DT_RA))
1189 bio_io_error(bio); 1193 bio_io_error(bio);
1190 else if (bio_op(bio) == REQ_OP_DISCARD) 1194 else if (bio_op(bio) == REQ_OP_DISCARD)
1191 drbd_process_discard_req(req); 1195 drbd_process_discard_req(req);
@@ -1278,7 +1282,7 @@ static bool may_do_writes(struct drbd_device *device)
1278static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) 1282static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
1279{ 1283{
1280 struct drbd_resource *resource = device->resource; 1284 struct drbd_resource *resource = device->resource;
1281 const int rw = bio_rw(req->master_bio); 1285 const int rw = bio_data_dir(req->master_bio);
1282 struct bio_and_error m = { NULL, }; 1286 struct bio_and_error m = { NULL, };
1283 bool no_remote = false; 1287 bool no_remote = false;
1284 bool submit_private_bio = false; 1288 bool submit_private_bio = false;
@@ -1308,7 +1312,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
1308 goto out; 1312 goto out;
1309 } 1313 }
1310 1314
1311 /* We fail READ/READA early, if we can not serve it. 1315 /* We fail READ early, if we can not serve it.
1312 * We must do this before req is registered on any lists. 1316 * We must do this before req is registered on any lists.
1313 * Otherwise, drbd_req_complete() will queue failed READ for retry. */ 1317 * Otherwise, drbd_req_complete() will queue failed READ for retry. */
1314 if (rw != WRITE) { 1318 if (rw != WRITE) {
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index b3fa5575bc0e..35dbb3dca47e 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -248,18 +248,26 @@ void drbd_request_endio(struct bio *bio)
248 248
249 /* to avoid recursion in __req_mod */ 249 /* to avoid recursion in __req_mod */
250 if (unlikely(bio->bi_error)) { 250 if (unlikely(bio->bi_error)) {
251 if (bio_op(bio) == REQ_OP_DISCARD) 251 switch (bio_op(bio)) {
252 what = (bio->bi_error == -EOPNOTSUPP) 252 case REQ_OP_DISCARD:
253 ? DISCARD_COMPLETED_NOTSUPP 253 if (bio->bi_error == -EOPNOTSUPP)
254 : DISCARD_COMPLETED_WITH_ERROR; 254 what = DISCARD_COMPLETED_NOTSUPP;
255 else 255 else
256 what = (bio_data_dir(bio) == WRITE) 256 what = DISCARD_COMPLETED_WITH_ERROR;
257 ? WRITE_COMPLETED_WITH_ERROR 257 break;
258 : (bio_rw(bio) == READ) 258 case REQ_OP_READ:
259 ? READ_COMPLETED_WITH_ERROR 259 if (bio->bi_rw & REQ_RAHEAD)
260 : READ_AHEAD_COMPLETED_WITH_ERROR; 260 what = READ_AHEAD_COMPLETED_WITH_ERROR;
261 } else 261 else
262 what = READ_COMPLETED_WITH_ERROR;
263 break;
264 default:
265 what = WRITE_COMPLETED_WITH_ERROR;
266 break;
267 }
268 } else {
262 what = COMPLETED_OK; 269 what = COMPLETED_OK;
270 }
263 271
264 bio_put(req->private_bio); 272 bio_put(req->private_bio);
265 req->private_bio = ERR_PTR(bio->bi_error); 273 req->private_bio = ERR_PTR(bio->bi_error);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index cab97593ba54..75a7f88d6717 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -448,7 +448,7 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
448 struct request *rq; 448 struct request *rq;
449 struct bio *bio = rqd->bio; 449 struct bio *bio = rqd->bio;
450 450
451 rq = blk_mq_alloc_request(q, bio_rw(bio), 0); 451 rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
452 if (IS_ERR(rq)) 452 if (IS_ERR(rq))
453 return -ENOMEM; 453 return -ENOMEM;
454 454
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 4b3ba74e9d22..d0a3e6d4515f 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -344,7 +344,6 @@ static int add_bio(struct cardinfo *card)
344 int offset; 344 int offset;
345 struct bio *bio; 345 struct bio *bio;
346 struct bio_vec vec; 346 struct bio_vec vec;
347 int rw;
348 347
349 bio = card->currentbio; 348 bio = card->currentbio;
350 if (!bio && card->bio) { 349 if (!bio && card->bio) {
@@ -359,7 +358,6 @@ static int add_bio(struct cardinfo *card)
359 if (!bio) 358 if (!bio)
360 return 0; 359 return 0;
361 360
362 rw = bio_rw(bio);
363 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) 361 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
364 return 0; 362 return 0;
365 363
@@ -369,7 +367,7 @@ static int add_bio(struct cardinfo *card)
369 vec.bv_page, 367 vec.bv_page,
370 vec.bv_offset, 368 vec.bv_offset,
371 vec.bv_len, 369 vec.bv_len,
372 (rw == READ) ? 370 bio_op(bio) == REQ_OP_READ ?
373 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); 371 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
374 372
375 p = &card->mm_pages[card->Ready]; 373 p = &card->mm_pages[card->Ready];
@@ -398,7 +396,7 @@ static int add_bio(struct cardinfo *card)
398 DMASCR_CHAIN_EN | 396 DMASCR_CHAIN_EN |
399 DMASCR_SEM_EN | 397 DMASCR_SEM_EN |
400 pci_cmds); 398 pci_cmds);
401 if (rw == WRITE) 399 if (bio_op(bio) == REQ_OP_WRITE)
402 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); 400 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
403 desc->sem_control_bits = desc->control_bits; 401 desc->sem_control_bits = desc->control_bits;
404 402
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index fa1ab0421489..37fcaadbf80c 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -853,14 +853,14 @@ static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
853 return NVM_IO_ERR; 853 return NVM_IO_ERR;
854 } 854 }
855 855
856 if (bio_rw(bio) == WRITE) 856 if (bio_op(bio) == REQ_OP_WRITE)
857 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags, 857 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
858 npages); 858 npages);
859 859
860 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages); 860 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
861 } 861 }
862 862
863 if (bio_rw(bio) == WRITE) 863 if (bio_op(bio) == REQ_OP_WRITE)
864 return rrpc_write_rq(rrpc, bio, rqd, flags); 864 return rrpc_write_rq(rrpc, bio, rqd, flags);
865 865
866 return rrpc_read_rq(rrpc, bio, rqd, flags); 866 return rrpc_read_rq(rrpc, bio, rqd, flags);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9f5f460c0e92..dac55b254a09 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -528,7 +528,7 @@ static void read_callback(unsigned long error, void *context)
528 DMWARN_LIMIT("Read failure on mirror device %s. " 528 DMWARN_LIMIT("Read failure on mirror device %s. "
529 "Trying alternative device.", 529 "Trying alternative device.",
530 m->dev->name); 530 m->dev->name);
531 queue_bio(m->ms, bio, bio_rw(bio)); 531 queue_bio(m->ms, bio, bio_data_dir(bio));
532 return; 532 return;
533 } 533 }
534 534
@@ -1193,7 +1193,7 @@ static void mirror_dtr(struct dm_target *ti)
1193 */ 1193 */
1194static int mirror_map(struct dm_target *ti, struct bio *bio) 1194static int mirror_map(struct dm_target *ti, struct bio *bio)
1195{ 1195{
1196 int r, rw = bio_rw(bio); 1196 int r, rw = bio_data_dir(bio);
1197 struct mirror *m; 1197 struct mirror *m;
1198 struct mirror_set *ms = ti->private; 1198 struct mirror_set *ms = ti->private;
1199 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1199 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
@@ -1217,7 +1217,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
1217 * If region is not in-sync queue the bio. 1217 * If region is not in-sync queue the bio.
1218 */ 1218 */
1219 if (!r || (r == -EWOULDBLOCK)) { 1219 if (!r || (r == -EWOULDBLOCK)) {
1220 if (rw == READA) 1220 if (bio->bi_rw & REQ_RAHEAD)
1221 return -EWOULDBLOCK; 1221 return -EWOULDBLOCK;
1222 1222
1223 queue_bio(ms, bio, rw); 1223 queue_bio(ms, bio, rw);
@@ -1242,7 +1242,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
1242 1242
1243static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) 1243static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1244{ 1244{
1245 int rw = bio_rw(bio); 1245 int rw = bio_data_dir(bio);
1246 struct mirror_set *ms = (struct mirror_set *) ti->private; 1246 struct mirror_set *ms = (struct mirror_set *) ti->private;
1247 struct mirror *m = NULL; 1247 struct mirror *m = NULL;
1248 struct dm_bio_details *bd = NULL; 1248 struct dm_bio_details *bd = NULL;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 69ab1ff5f5c9..cc2f14b42ba4 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1696,7 +1696,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1696 * to copy an exception */ 1696 * to copy an exception */
1697 down_write(&s->lock); 1697 down_write(&s->lock);
1698 1698
1699 if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) { 1699 if (!s->valid || (unlikely(s->snapshot_overflowed) &&
1700 bio_data_dir(bio) == WRITE)) {
1700 r = -EIO; 1701 r = -EIO;
1701 goto out_unlock; 1702 goto out_unlock;
1702 } 1703 }
@@ -1713,7 +1714,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1713 * flags so we should only get this if we are 1714 * flags so we should only get this if we are
1714 * writeable. 1715 * writeable.
1715 */ 1716 */
1716 if (bio_rw(bio) == WRITE) { 1717 if (bio_data_dir(bio) == WRITE) {
1717 pe = __lookup_pending_exception(s, chunk); 1718 pe = __lookup_pending_exception(s, chunk);
1718 if (!pe) { 1719 if (!pe) {
1719 up_write(&s->lock); 1720 up_write(&s->lock);
@@ -1819,7 +1820,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1819 e = dm_lookup_exception(&s->complete, chunk); 1820 e = dm_lookup_exception(&s->complete, chunk);
1820 if (e) { 1821 if (e) {
1821 /* Queue writes overlapping with chunks being merged */ 1822 /* Queue writes overlapping with chunks being merged */
1822 if (bio_rw(bio) == WRITE && 1823 if (bio_data_dir(bio) == WRITE &&
1823 chunk >= s->first_merging_chunk && 1824 chunk >= s->first_merging_chunk &&
1824 chunk < (s->first_merging_chunk + 1825 chunk < (s->first_merging_chunk +
1825 s->num_merging_chunks)) { 1826 s->num_merging_chunks)) {
@@ -1831,7 +1832,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1831 1832
1832 remap_exception(s, e, bio, chunk); 1833 remap_exception(s, e, bio, chunk);
1833 1834
1834 if (bio_rw(bio) == WRITE) 1835 if (bio_data_dir(bio) == WRITE)
1835 track_chunk(s, bio, chunk); 1836 track_chunk(s, bio, chunk);
1836 goto out_unlock; 1837 goto out_unlock;
1837 } 1838 }
@@ -1839,7 +1840,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1839redirect_to_origin: 1840redirect_to_origin:
1840 bio->bi_bdev = s->origin->bdev; 1841 bio->bi_bdev = s->origin->bdev;
1841 1842
1842 if (bio_rw(bio) == WRITE) { 1843 if (bio_data_dir(bio) == WRITE) {
1843 up_write(&s->lock); 1844 up_write(&s->lock);
1844 return do_origin(s->origin, bio); 1845 return do_origin(s->origin, bio);
1845 } 1846 }
@@ -2288,7 +2289,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
2288 if (unlikely(bio->bi_rw & REQ_PREFLUSH)) 2289 if (unlikely(bio->bi_rw & REQ_PREFLUSH))
2289 return DM_MAPIO_REMAPPED; 2290 return DM_MAPIO_REMAPPED;
2290 2291
2291 if (bio_rw(bio) != WRITE) 2292 if (bio_data_dir(bio) != WRITE)
2292 return DM_MAPIO_REMAPPED; 2293 return DM_MAPIO_REMAPPED;
2293 2294
2294 available_sectors = o->split_boundary - 2295 available_sectors = o->split_boundary -
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 766bc93006e6..618b8752dcf1 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -35,16 +35,19 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
35 */ 35 */
36static int zero_map(struct dm_target *ti, struct bio *bio) 36static int zero_map(struct dm_target *ti, struct bio *bio)
37{ 37{
38 switch(bio_rw(bio)) { 38 switch (bio_op(bio)) {
39 case READ: 39 case REQ_OP_READ:
40 if (bio->bi_rw & REQ_RAHEAD) {
41 /* readahead of null bytes only wastes buffer cache */
42 return -EIO;
43 }
40 zero_fill_bio(bio); 44 zero_fill_bio(bio);
41 break; 45 break;
42 case READA: 46 case REQ_OP_WRITE:
43 /* readahead of null bytes only wastes buffer cache */
44 return -EIO;
45 case WRITE:
46 /* writes get silently dropped */ 47 /* writes get silently dropped */
47 break; 48 break;
49 default:
50 return -EIO;
48 } 51 }
49 52
50 bio_endio(bio); 53 bio_endio(bio);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index aba7ed9abb3a..812fd5984eea 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1833,7 +1833,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1833 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { 1833 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1834 dm_put_live_table(md, srcu_idx); 1834 dm_put_live_table(md, srcu_idx);
1835 1835
1836 if (bio_rw(bio) != READA) 1836 if (!(bio->bi_rw & REQ_RAHEAD))
1837 queue_io(md, bio); 1837 queue_io(md, bio);
1838 else 1838 else
1839 bio_io_error(bio); 1839 bio_io_error(bio);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 41d9c31da3b3..4e6da4497553 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1105,7 +1105,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
1105 bitmap = mddev->bitmap; 1105 bitmap = mddev->bitmap;
1106 1106
1107 /* 1107 /*
1108 * make_request() can abort the operation when READA is being 1108 * make_request() can abort the operation when read-ahead is being
1109 * used and no empty request is available. 1109 * used and no empty request is available.
1110 * 1110 *
1111 */ 1111 */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7aacf5b55e15..6953d78297b0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5233,7 +5233,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
5233 (unsigned long long)logical_sector); 5233 (unsigned long long)logical_sector);
5234 5234
5235 sh = raid5_get_active_stripe(conf, new_sector, previous, 5235 sh = raid5_get_active_stripe(conf, new_sector, previous,
5236 (bi->bi_rw&RWA_MASK), 0); 5236 (bi->bi_rw & REQ_RAHEAD), 0);
5237 if (sh) { 5237 if (sh) {
5238 if (unlikely(previous)) { 5238 if (unlikely(previous)) {
5239 /* expansion might have moved on while waiting for a 5239 /* expansion might have moved on while waiting for a
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 97fe6109c98f..63f483daf930 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -500,7 +500,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
500 struct bio *bio = rqd->bio; 500 struct bio *bio = rqd->bio;
501 struct nvme_nvm_command *cmd; 501 struct nvme_nvm_command *cmd;
502 502
503 rq = blk_mq_alloc_request(q, bio_rw(bio), 0); 503 rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
504 if (IS_ERR(rq)) 504 if (IS_ERR(rq))
505 return -ENOMEM; 505 return -ENOMEM;
506 506
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index b677930a8a3b..56ae8ac32708 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -336,7 +336,6 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
336static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio) 336static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio)
337{ 337{
338 struct lloop_device *lo = q->queuedata; 338 struct lloop_device *lo = q->queuedata;
339 int rw = bio_rw(old_bio);
340 int inactive; 339 int inactive;
341 340
342 blk_queue_split(q, &old_bio, q->bio_split); 341 blk_queue_split(q, &old_bio, q->bio_split);
@@ -354,13 +353,15 @@ static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio)
354 if (inactive) 353 if (inactive)
355 goto err; 354 goto err;
356 355
357 if (rw == WRITE) { 356 switch (bio_op(old_bio)) {
357 case REQ_OP_WRITE:
358 if (lo->lo_flags & LO_FLAGS_READ_ONLY) 358 if (lo->lo_flags & LO_FLAGS_READ_ONLY)
359 goto err; 359 goto err;
360 } else if (rw == READA) { 360 break;
361 rw = READ; 361 case REQ_OP_READ:
362 } else if (rw != READ) { 362 break;
363 CERROR("lloop: unknown command (%x)\n", rw); 363 default:
364 CERROR("lloop: unknown command (%x)\n", bio_op(old_bio));
364 goto err; 365 goto err;
365 } 366 }
366 loop_add_bio(lo, old_bio); 367 loop_add_bio(lo, old_bio);
diff --git a/fs/buffer.c b/fs/buffer.c
index 373aacb4f4c1..c24201e78492 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -153,7 +153,7 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
153 if (uptodate) { 153 if (uptodate) {
154 set_buffer_uptodate(bh); 154 set_buffer_uptodate(bh);
155 } else { 155 } else {
156 /* This happens, due to failed READA attempts. */ 156 /* This happens, due to failed read-ahead attempts. */
157 clear_buffer_uptodate(bh); 157 clear_buffer_uptodate(bh);
158 } 158 }
159 unlock_buffer(bh); 159 unlock_buffer(bh);
@@ -1395,7 +1395,7 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1395{ 1395{
1396 struct buffer_head *bh = __getblk(bdev, block, size); 1396 struct buffer_head *bh = __getblk(bdev, block, size);
1397 if (likely(bh)) { 1397 if (likely(bh)) {
1398 ll_rw_block(REQ_OP_READ, READA, 1, &bh); 1398 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
1399 brelse(bh); 1399 brelse(bh);
1400 } 1400 }
1401} 1401}
@@ -3052,14 +3052,14 @@ EXPORT_SYMBOL(submit_bh);
3052/** 3052/**
3053 * ll_rw_block: low-level access to block devices (DEPRECATED) 3053 * ll_rw_block: low-level access to block devices (DEPRECATED)
3054 * @op: whether to %READ or %WRITE 3054 * @op: whether to %READ or %WRITE
3055 * @op_flags: rq_flag_bits or %READA (readahead) 3055 * @op_flags: rq_flag_bits
3056 * @nr: number of &struct buffer_heads in the array 3056 * @nr: number of &struct buffer_heads in the array
3057 * @bhs: array of pointers to &struct buffer_head 3057 * @bhs: array of pointers to &struct buffer_head
3058 * 3058 *
3059 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and 3059 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3060 * requests an I/O operation on them, either a %READ or a %WRITE. The third 3060 * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE.
3061 * %READA option is described in the documentation for generic_make_request() 3061 * @op_flags contains flags modifying the detailed I/O behavior, most notably
3062 * which ll_rw_block() calls. 3062 * %REQ_RAHEAD.
3063 * 3063 *
3064 * This function drops any buffer that it cannot get a lock on (with the 3064 * This function drops any buffer that it cannot get a lock on (with the
3065 * BH_Lock state bit), any buffer that appears to be clean when doing a write 3065 * BH_Lock state bit), any buffer that appears to be clean when doing a write
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index b6d600e91f39..124b4a3017b5 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -159,7 +159,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
159 .sbi = sbi, 159 .sbi = sbi,
160 .type = META, 160 .type = META,
161 .op = REQ_OP_READ, 161 .op = REQ_OP_READ,
162 .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA, 162 .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD,
163 .encrypted_page = NULL, 163 .encrypted_page = NULL,
164 }; 164 };
165 struct blk_plug plug; 165 struct blk_plug plug;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 3649d86bb431..f06ed73adf99 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -733,7 +733,8 @@ next_step:
733 733
734 start_bidx = start_bidx_of_node(nofs, inode); 734 start_bidx = start_bidx_of_node(nofs, inode);
735 data_page = get_read_data_page(inode, 735 data_page = get_read_data_page(inode,
736 start_bidx + ofs_in_node, READA, true); 736 start_bidx + ofs_in_node, REQ_RAHEAD,
737 true);
737 if (IS_ERR(data_page)) { 738 if (IS_ERR(data_page)) {
738 iput(inode); 739 iput(inode);
739 continue; 740 continue;
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index e53403987f6d..d1867698e601 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1119,7 +1119,7 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1119 if (!apage) 1119 if (!apage)
1120 return; 1120 return;
1121 1121
1122 err = read_node_page(apage, READA); 1122 err = read_node_page(apage, REQ_RAHEAD);
1123 f2fs_put_page(apage, err ? 1 : 0); 1123 f2fs_put_page(apage, err ? 1 : 0);
1124} 1124}
1125 1125
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index fd6389cf0f14..6e2bec1cd289 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -285,7 +285,8 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl,
285 if (trylock_buffer(rabh)) { 285 if (trylock_buffer(rabh)) {
286 if (!buffer_uptodate(rabh)) { 286 if (!buffer_uptodate(rabh)) {
287 rabh->b_end_io = end_buffer_read_sync; 287 rabh->b_end_io = end_buffer_read_sync;
288 submit_bh(REQ_OP_READ, READA | REQ_META, rabh); 288 submit_bh(REQ_OP_READ, REQ_RAHEAD | REQ_META,
289 rabh);
289 continue; 290 continue;
290 } 291 }
291 unlock_buffer(rabh); 292 unlock_buffer(rabh);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 0fbb42679cef..f077cf5796ee 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1513,7 +1513,7 @@ static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
1513 continue; 1513 continue;
1514 } 1514 }
1515 bh->b_end_io = end_buffer_read_sync; 1515 bh->b_end_io = end_buffer_read_sync;
1516 submit_bh(REQ_OP_READ, READA | REQ_META, bh); 1516 submit_bh(REQ_OP_READ, REQ_RAHEAD | REQ_META, bh);
1517 continue; 1517 continue;
1518 } 1518 }
1519 brelse(bh); 1519 brelse(bh);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 052c1132e5b6..950b8be68e41 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -459,7 +459,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
459 bh = gfs2_getbuf(gl, dblock, CREATE); 459 bh = gfs2_getbuf(gl, dblock, CREATE);
460 460
461 if (!buffer_uptodate(bh) && !buffer_locked(bh)) 461 if (!buffer_uptodate(bh) && !buffer_locked(bh))
462 ll_rw_block(REQ_OP_READ, READA | REQ_META, 1, &bh); 462 ll_rw_block(REQ_OP_READ, REQ_RAHEAD | REQ_META, 1, &bh);
463 brelse(bh); 463 brelse(bh);
464 dblock++; 464 dblock++;
465 extlen--; 465 extlen--;
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 64b29b592d86..4032d1e87c8f 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -551,7 +551,7 @@ static int search_by_key_reada(struct super_block *s,
551 if (!buffer_uptodate(bh[j])) { 551 if (!buffer_uptodate(bh[j])) {
552 if (depth == -1) 552 if (depth == -1)
553 depth = reiserfs_write_unlock_nested(s); 553 depth = reiserfs_write_unlock_nested(s);
554 ll_rw_block(REQ_OP_READ, READA, 1, bh + j); 554 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, bh + j);
555 } 555 }
556 brelse(bh[j]); 556 brelse(bh[j]);
557 } 557 }
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 80c8a21daed9..aaec13c95253 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -113,7 +113,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
113 brelse(tmp); 113 brelse(tmp);
114 } 114 }
115 if (num) { 115 if (num) {
116 ll_rw_block(REQ_OP_READ, READA, num, bha); 116 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, num, bha);
117 for (i = 0; i < num; i++) 117 for (i = 0; i < num; i++)
118 brelse(bha[i]); 118 brelse(bha[i]);
119 } 119 }
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 71f3e0b5b8ab..988d5352bdb8 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -87,7 +87,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
87 brelse(tmp); 87 brelse(tmp);
88 } 88 }
89 if (num) { 89 if (num) {
90 ll_rw_block(REQ_OP_READ, READA, num, bha); 90 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, num, bha);
91 for (i = 0; i < num; i++) 91 for (i = 0; i < num; i++)
92 brelse(bha[i]); 92 brelse(bha[i]);
93 } 93 }
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 183024525d40..dc488662ce0b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -178,9 +178,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
178 * READ_SYNC A synchronous read. Device is not plugged, caller can 178 * READ_SYNC A synchronous read. Device is not plugged, caller can
179 * immediately wait on this read without caring about 179 * immediately wait on this read without caring about
180 * unplugging. 180 * unplugging.
181 * READA Used for read-ahead operations. Lower priority, and the
182 * block layer could (in theory) choose to ignore this
183 * request if it runs into resource problems.
184 * WRITE A normal async write. Device will be plugged. 181 * WRITE A normal async write. Device will be plugged.
185 * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down 182 * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
186 * the hint that someone will be waiting on this IO 183 * the hint that someone will be waiting on this IO
@@ -195,11 +192,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
195 * 192 *
196 */ 193 */
197#define RW_MASK REQ_OP_WRITE 194#define RW_MASK REQ_OP_WRITE
198#define RWA_MASK REQ_RAHEAD
199 195
200#define READ REQ_OP_READ 196#define READ REQ_OP_READ
201#define WRITE RW_MASK 197#define WRITE REQ_OP_WRITE
202#define READA RWA_MASK
203 198
204#define READ_SYNC REQ_SYNC 199#define READ_SYNC REQ_SYNC
205#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE) 200#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE)
@@ -2471,17 +2466,6 @@ static inline bool op_is_write(unsigned int op)
2471} 2466}
2472 2467
2473/* 2468/*
2474 * return READ, READA, or WRITE
2475 */
2476static inline int bio_rw(struct bio *bio)
2477{
2478 if (op_is_write(bio_op(bio)))
2479 return WRITE;
2480
2481 return bio->bi_rw & RWA_MASK;
2482}
2483
2484/*
2485 * return data direction, READ or WRITE 2469 * return data direction, READ or WRITE
2486 */ 2470 */
2487static inline int bio_data_dir(struct bio *bio) 2471static inline int bio_data_dir(struct bio *bio)
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 878963a1f058..ff95fd02116f 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -55,7 +55,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
55 { IPU, "IN-PLACE" }, \ 55 { IPU, "IN-PLACE" }, \
56 { OPU, "OUT-OF-PLACE" }) 56 { OPU, "OUT-OF-PLACE" })
57 57
58#define F2FS_BIO_FLAG_MASK(t) (t & (READA | WRITE_FLUSH_FUA)) 58#define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | WRITE_FLUSH_FUA))
59#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) 59#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
60 60
61#define show_bio_type(op, op_flags) show_bio_op(op), \ 61#define show_bio_type(op, op_flags) show_bio_op(op), \
@@ -68,7 +68,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
68 68
69#define show_bio_op_flags(flags) \ 69#define show_bio_op_flags(flags) \
70 __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ 70 __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \
71 { READA, "READAHEAD" }, \ 71 { REQ_RAHEAD, "READAHEAD" }, \
72 { READ_SYNC, "READ_SYNC" }, \ 72 { READ_SYNC, "READ_SYNC" }, \
73 { WRITE_SYNC, "WRITE_SYNC" }, \ 73 { WRITE_SYNC, "WRITE_SYNC" }, \
74 { WRITE_FLUSH, "WRITE_FLUSH" }, \ 74 { WRITE_FLUSH, "WRITE_FLUSH" }, \