diff options
author | Mike Christie <mchristi@redhat.com> | 2016-06-05 15:32:07 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-06-07 15:41:38 -0400 |
commit | 796a5cf083c2631180ad209c3ebb7d11d776cd72 (patch) | |
tree | 7724b594e515b7ded26eb00aaa054e716e50e9db /drivers/md/raid10.c | |
parent | bb3cc85e16431bb4b19606cbcf5fca8cce4e5200 (diff) |
md: use bio op accessors
Separate the op from the rq_flag_bits and have md
set/get the bio using bio_set_op_attrs/bio_op.
Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r-- | drivers/md/raid10.c | 48 |
1 files changed, 23 insertions, 25 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0be6497d8e34..615045a11bac 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -865,7 +865,7 @@ static void flush_pending_writes(struct r10conf *conf) | |||
865 | while (bio) { /* submit pending writes */ | 865 | while (bio) { /* submit pending writes */ |
866 | struct bio *next = bio->bi_next; | 866 | struct bio *next = bio->bi_next; |
867 | bio->bi_next = NULL; | 867 | bio->bi_next = NULL; |
868 | if (unlikely((bio->bi_rw & REQ_DISCARD) && | 868 | if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
869 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 869 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
870 | /* Just ignore it */ | 870 | /* Just ignore it */ |
871 | bio_endio(bio); | 871 | bio_endio(bio); |
@@ -1041,7 +1041,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) | |||
1041 | while (bio) { /* submit pending writes */ | 1041 | while (bio) { /* submit pending writes */ |
1042 | struct bio *next = bio->bi_next; | 1042 | struct bio *next = bio->bi_next; |
1043 | bio->bi_next = NULL; | 1043 | bio->bi_next = NULL; |
1044 | if (unlikely((bio->bi_rw & REQ_DISCARD) && | 1044 | if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
1045 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 1045 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
1046 | /* Just ignore it */ | 1046 | /* Just ignore it */ |
1047 | bio_endio(bio); | 1047 | bio_endio(bio); |
@@ -1058,12 +1058,11 @@ static void __make_request(struct mddev *mddev, struct bio *bio) | |||
1058 | struct r10bio *r10_bio; | 1058 | struct r10bio *r10_bio; |
1059 | struct bio *read_bio; | 1059 | struct bio *read_bio; |
1060 | int i; | 1060 | int i; |
1061 | const int op = bio_op(bio); | ||
1061 | const int rw = bio_data_dir(bio); | 1062 | const int rw = bio_data_dir(bio); |
1062 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 1063 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
1063 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); | 1064 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); |
1064 | const unsigned long do_discard = (bio->bi_rw | 1065 | const unsigned long do_sec = (bio->bi_rw & REQ_SECURE); |
1065 | & (REQ_DISCARD | REQ_SECURE)); | ||
1066 | const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); | ||
1067 | unsigned long flags; | 1066 | unsigned long flags; |
1068 | struct md_rdev *blocked_rdev; | 1067 | struct md_rdev *blocked_rdev; |
1069 | struct blk_plug_cb *cb; | 1068 | struct blk_plug_cb *cb; |
@@ -1156,7 +1155,7 @@ read_again: | |||
1156 | choose_data_offset(r10_bio, rdev); | 1155 | choose_data_offset(r10_bio, rdev); |
1157 | read_bio->bi_bdev = rdev->bdev; | 1156 | read_bio->bi_bdev = rdev->bdev; |
1158 | read_bio->bi_end_io = raid10_end_read_request; | 1157 | read_bio->bi_end_io = raid10_end_read_request; |
1159 | read_bio->bi_rw = READ | do_sync; | 1158 | bio_set_op_attrs(read_bio, op, do_sync); |
1160 | read_bio->bi_private = r10_bio; | 1159 | read_bio->bi_private = r10_bio; |
1161 | 1160 | ||
1162 | if (max_sectors < r10_bio->sectors) { | 1161 | if (max_sectors < r10_bio->sectors) { |
@@ -1363,8 +1362,7 @@ retry_write: | |||
1363 | rdev)); | 1362 | rdev)); |
1364 | mbio->bi_bdev = rdev->bdev; | 1363 | mbio->bi_bdev = rdev->bdev; |
1365 | mbio->bi_end_io = raid10_end_write_request; | 1364 | mbio->bi_end_io = raid10_end_write_request; |
1366 | mbio->bi_rw = | 1365 | bio_set_op_attrs(mbio, op, do_sync | do_fua | do_sec); |
1367 | WRITE | do_sync | do_fua | do_discard | do_same; | ||
1368 | mbio->bi_private = r10_bio; | 1366 | mbio->bi_private = r10_bio; |
1369 | 1367 | ||
1370 | atomic_inc(&r10_bio->remaining); | 1368 | atomic_inc(&r10_bio->remaining); |
@@ -1406,8 +1404,7 @@ retry_write: | |||
1406 | r10_bio, rdev)); | 1404 | r10_bio, rdev)); |
1407 | mbio->bi_bdev = rdev->bdev; | 1405 | mbio->bi_bdev = rdev->bdev; |
1408 | mbio->bi_end_io = raid10_end_write_request; | 1406 | mbio->bi_end_io = raid10_end_write_request; |
1409 | mbio->bi_rw = | 1407 | bio_set_op_attrs(mbio, op, do_sync | do_fua | do_sec); |
1410 | WRITE | do_sync | do_fua | do_discard | do_same; | ||
1411 | mbio->bi_private = r10_bio; | 1408 | mbio->bi_private = r10_bio; |
1412 | 1409 | ||
1413 | atomic_inc(&r10_bio->remaining); | 1410 | atomic_inc(&r10_bio->remaining); |
@@ -1992,10 +1989,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
1992 | 1989 | ||
1993 | tbio->bi_vcnt = vcnt; | 1990 | tbio->bi_vcnt = vcnt; |
1994 | tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; | 1991 | tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; |
1995 | tbio->bi_rw = WRITE; | ||
1996 | tbio->bi_private = r10_bio; | 1992 | tbio->bi_private = r10_bio; |
1997 | tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; | 1993 | tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; |
1998 | tbio->bi_end_io = end_sync_write; | 1994 | tbio->bi_end_io = end_sync_write; |
1995 | bio_set_op_attrs(tbio, REQ_OP_WRITE, 0); | ||
1999 | 1996 | ||
2000 | bio_copy_data(tbio, fbio); | 1997 | bio_copy_data(tbio, fbio); |
2001 | 1998 | ||
@@ -2078,7 +2075,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) | |||
2078 | addr, | 2075 | addr, |
2079 | s << 9, | 2076 | s << 9, |
2080 | bio->bi_io_vec[idx].bv_page, | 2077 | bio->bi_io_vec[idx].bv_page, |
2081 | READ, false); | 2078 | REQ_OP_READ, 0, false); |
2082 | if (ok) { | 2079 | if (ok) { |
2083 | rdev = conf->mirrors[dw].rdev; | 2080 | rdev = conf->mirrors[dw].rdev; |
2084 | addr = r10_bio->devs[1].addr + sect; | 2081 | addr = r10_bio->devs[1].addr + sect; |
@@ -2086,7 +2083,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) | |||
2086 | addr, | 2083 | addr, |
2087 | s << 9, | 2084 | s << 9, |
2088 | bio->bi_io_vec[idx].bv_page, | 2085 | bio->bi_io_vec[idx].bv_page, |
2089 | WRITE, false); | 2086 | REQ_OP_WRITE, 0, false); |
2090 | if (!ok) { | 2087 | if (!ok) { |
2091 | set_bit(WriteErrorSeen, &rdev->flags); | 2088 | set_bit(WriteErrorSeen, &rdev->flags); |
2092 | if (!test_and_set_bit(WantReplacement, | 2089 | if (!test_and_set_bit(WantReplacement, |
@@ -2213,7 +2210,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, | |||
2213 | if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) | 2210 | if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) |
2214 | && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) | 2211 | && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) |
2215 | return -1; | 2212 | return -1; |
2216 | if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) | 2213 | if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) |
2217 | /* success */ | 2214 | /* success */ |
2218 | return 1; | 2215 | return 1; |
2219 | if (rw == WRITE) { | 2216 | if (rw == WRITE) { |
@@ -2299,7 +2296,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 | |||
2299 | r10_bio->devs[sl].addr + | 2296 | r10_bio->devs[sl].addr + |
2300 | sect, | 2297 | sect, |
2301 | s<<9, | 2298 | s<<9, |
2302 | conf->tmppage, READ, false); | 2299 | conf->tmppage, |
2300 | REQ_OP_READ, 0, false); | ||
2303 | rdev_dec_pending(rdev, mddev); | 2301 | rdev_dec_pending(rdev, mddev); |
2304 | rcu_read_lock(); | 2302 | rcu_read_lock(); |
2305 | if (success) | 2303 | if (success) |
@@ -2474,7 +2472,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) | |||
2474 | choose_data_offset(r10_bio, rdev) + | 2472 | choose_data_offset(r10_bio, rdev) + |
2475 | (sector - r10_bio->sector)); | 2473 | (sector - r10_bio->sector)); |
2476 | wbio->bi_bdev = rdev->bdev; | 2474 | wbio->bi_bdev = rdev->bdev; |
2477 | wbio->bi_rw = WRITE; | 2475 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); |
2478 | 2476 | ||
2479 | if (submit_bio_wait(wbio) < 0) | 2477 | if (submit_bio_wait(wbio) < 0) |
2480 | /* Failure! */ | 2478 | /* Failure! */ |
@@ -2550,7 +2548,7 @@ read_more: | |||
2550 | bio->bi_iter.bi_sector = r10_bio->devs[slot].addr | 2548 | bio->bi_iter.bi_sector = r10_bio->devs[slot].addr |
2551 | + choose_data_offset(r10_bio, rdev); | 2549 | + choose_data_offset(r10_bio, rdev); |
2552 | bio->bi_bdev = rdev->bdev; | 2550 | bio->bi_bdev = rdev->bdev; |
2553 | bio->bi_rw = READ | do_sync; | 2551 | bio_set_op_attrs(bio, REQ_OP_READ, do_sync); |
2554 | bio->bi_private = r10_bio; | 2552 | bio->bi_private = r10_bio; |
2555 | bio->bi_end_io = raid10_end_read_request; | 2553 | bio->bi_end_io = raid10_end_read_request; |
2556 | if (max_sectors < r10_bio->sectors) { | 2554 | if (max_sectors < r10_bio->sectors) { |
@@ -3040,7 +3038,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3040 | biolist = bio; | 3038 | biolist = bio; |
3041 | bio->bi_private = r10_bio; | 3039 | bio->bi_private = r10_bio; |
3042 | bio->bi_end_io = end_sync_read; | 3040 | bio->bi_end_io = end_sync_read; |
3043 | bio->bi_rw = READ; | 3041 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
3044 | from_addr = r10_bio->devs[j].addr; | 3042 | from_addr = r10_bio->devs[j].addr; |
3045 | bio->bi_iter.bi_sector = from_addr + | 3043 | bio->bi_iter.bi_sector = from_addr + |
3046 | rdev->data_offset; | 3044 | rdev->data_offset; |
@@ -3066,7 +3064,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3066 | biolist = bio; | 3064 | biolist = bio; |
3067 | bio->bi_private = r10_bio; | 3065 | bio->bi_private = r10_bio; |
3068 | bio->bi_end_io = end_sync_write; | 3066 | bio->bi_end_io = end_sync_write; |
3069 | bio->bi_rw = WRITE; | 3067 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
3070 | bio->bi_iter.bi_sector = to_addr | 3068 | bio->bi_iter.bi_sector = to_addr |
3071 | + rdev->data_offset; | 3069 | + rdev->data_offset; |
3072 | bio->bi_bdev = rdev->bdev; | 3070 | bio->bi_bdev = rdev->bdev; |
@@ -3095,7 +3093,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3095 | biolist = bio; | 3093 | biolist = bio; |
3096 | bio->bi_private = r10_bio; | 3094 | bio->bi_private = r10_bio; |
3097 | bio->bi_end_io = end_sync_write; | 3095 | bio->bi_end_io = end_sync_write; |
3098 | bio->bi_rw = WRITE; | 3096 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
3099 | bio->bi_iter.bi_sector = to_addr + | 3097 | bio->bi_iter.bi_sector = to_addr + |
3100 | rdev->data_offset; | 3098 | rdev->data_offset; |
3101 | bio->bi_bdev = rdev->bdev; | 3099 | bio->bi_bdev = rdev->bdev; |
@@ -3215,7 +3213,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3215 | biolist = bio; | 3213 | biolist = bio; |
3216 | bio->bi_private = r10_bio; | 3214 | bio->bi_private = r10_bio; |
3217 | bio->bi_end_io = end_sync_read; | 3215 | bio->bi_end_io = end_sync_read; |
3218 | bio->bi_rw = READ; | 3216 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
3219 | bio->bi_iter.bi_sector = sector + | 3217 | bio->bi_iter.bi_sector = sector + |
3220 | conf->mirrors[d].rdev->data_offset; | 3218 | conf->mirrors[d].rdev->data_offset; |
3221 | bio->bi_bdev = conf->mirrors[d].rdev->bdev; | 3219 | bio->bi_bdev = conf->mirrors[d].rdev->bdev; |
@@ -3237,7 +3235,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3237 | biolist = bio; | 3235 | biolist = bio; |
3238 | bio->bi_private = r10_bio; | 3236 | bio->bi_private = r10_bio; |
3239 | bio->bi_end_io = end_sync_write; | 3237 | bio->bi_end_io = end_sync_write; |
3240 | bio->bi_rw = WRITE; | 3238 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
3241 | bio->bi_iter.bi_sector = sector + | 3239 | bio->bi_iter.bi_sector = sector + |
3242 | conf->mirrors[d].replacement->data_offset; | 3240 | conf->mirrors[d].replacement->data_offset; |
3243 | bio->bi_bdev = conf->mirrors[d].replacement->bdev; | 3241 | bio->bi_bdev = conf->mirrors[d].replacement->bdev; |
@@ -4322,7 +4320,7 @@ read_more: | |||
4322 | + rdev->data_offset); | 4320 | + rdev->data_offset); |
4323 | read_bio->bi_private = r10_bio; | 4321 | read_bio->bi_private = r10_bio; |
4324 | read_bio->bi_end_io = end_sync_read; | 4322 | read_bio->bi_end_io = end_sync_read; |
4325 | read_bio->bi_rw = READ; | 4323 | bio_set_op_attrs(read_bio, REQ_OP_READ, 0); |
4326 | read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); | 4324 | read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); |
4327 | read_bio->bi_error = 0; | 4325 | read_bio->bi_error = 0; |
4328 | read_bio->bi_vcnt = 0; | 4326 | read_bio->bi_vcnt = 0; |
@@ -4356,7 +4354,7 @@ read_more: | |||
4356 | rdev2->new_data_offset; | 4354 | rdev2->new_data_offset; |
4357 | b->bi_private = r10_bio; | 4355 | b->bi_private = r10_bio; |
4358 | b->bi_end_io = end_reshape_write; | 4356 | b->bi_end_io = end_reshape_write; |
4359 | b->bi_rw = WRITE; | 4357 | bio_set_op_attrs(b, REQ_OP_WRITE, 0); |
4360 | b->bi_next = blist; | 4358 | b->bi_next = blist; |
4361 | blist = b; | 4359 | blist = b; |
4362 | } | 4360 | } |
@@ -4524,7 +4522,7 @@ static int handle_reshape_read_error(struct mddev *mddev, | |||
4524 | addr, | 4522 | addr, |
4525 | s << 9, | 4523 | s << 9, |
4526 | bvec[idx].bv_page, | 4524 | bvec[idx].bv_page, |
4527 | READ, false); | 4525 | REQ_OP_READ, 0, false); |
4528 | if (success) | 4526 | if (success) |
4529 | break; | 4527 | break; |
4530 | failed: | 4528 | failed: |