diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 18:03:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 18:03:07 -0400 |
commit | d05d7f40791ccbb6e543cc5dd6a6aa08fc71d635 (patch) | |
tree | dc0039fe490a41a70de10d58fe8e6136db46463a | |
parent | 75a442efb1ca613f8d1cc71a32c2c9b0aefae4a5 (diff) | |
parent | 17007f3994cdb4643355c73f54f0adad006cf59e (diff) |
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe:
- the big change is the cleanup from Mike Christie, cleaning up our
uses of command types and modified flags. This is what will throw
some merge conflicts
- regression fix for the above for btrfs, from Vincent
- following up to the above, better packing of struct request from
Christoph
- a 2038 fix for blktrace from Arnd
- a few trivial/spelling fixes from Bart Van Assche
- a front merge check fix from Damien, which could cause issues on
SMR drives
- Atari partition fix from Gabriel
- convert cfq to highres timers, since jiffies isn't granular enough
for some devices these days. From Jan and Jeff
- CFQ priority boost fix idle classes, from me
- cleanup series from Ming, improving our bio/bvec iteration
- a direct issue fix for blk-mq from Omar
- fix for plug merging not involving the IO scheduler, like we do for
other types of merges. From Tahsin
- expose DAX type internally and through sysfs. From Toshi and Yigal
* 'for-4.8/core' of git://git.kernel.dk/linux-block: (76 commits)
block: Fix front merge check
block: do not merge requests without consulting with io scheduler
block: Fix spelling in a source code comment
block: expose QUEUE_FLAG_DAX in sysfs
block: add QUEUE_FLAG_DAX for devices to advertise their DAX support
Btrfs: fix comparison in __btrfs_map_block()
block: atari: Return early for unsupported sector size
Doc: block: Fix a typo in queue-sysfs.txt
cfq-iosched: Charge at least 1 jiffie instead of 1 ns
cfq-iosched: Fix regression in bonnie++ rewrite performance
cfq-iosched: Convert slice_resid from u64 to s64
block: Convert fifo_time from ulong to u64
blktrace: avoid using timespec
block/blk-cgroup.c: Declare local symbols static
block/bio-integrity.c: Add #include "blk.h"
block/partition-generic.c: Remove a set-but-not-used variable
block: bio: kill BIO_MAX_SIZE
cfq-iosched: temporarily boost queue priority for idle classes
block: drbd: avoid to use BIO_MAX_SIZE
block: bio: remove BIO_MAX_SECTORS
...
199 files changed, 1918 insertions, 1523 deletions
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt index dce25d848d92..d515d58962b9 100644 --- a/Documentation/block/queue-sysfs.txt +++ b/Documentation/block/queue-sysfs.txt | |||
@@ -53,7 +53,7 @@ disk. | |||
53 | 53 | ||
54 | logical_block_size (RO) | 54 | logical_block_size (RO) |
55 | ----------------------- | 55 | ----------------------- |
56 | This is the logcal block size of the device, in bytes. | 56 | This is the logical block size of the device, in bytes. |
57 | 57 | ||
58 | max_hw_sectors_kb (RO) | 58 | max_hw_sectors_kb (RO) |
59 | ---------------------- | 59 | ---------------------- |
diff --git a/Documentation/block/writeback_cache_control.txt b/Documentation/block/writeback_cache_control.txt index 59e0516cbf6b..8a6bdada5f6b 100644 --- a/Documentation/block/writeback_cache_control.txt +++ b/Documentation/block/writeback_cache_control.txt | |||
@@ -20,11 +20,11 @@ a forced cache flush, and the Force Unit Access (FUA) flag for requests. | |||
20 | Explicit cache flushes | 20 | Explicit cache flushes |
21 | ---------------------- | 21 | ---------------------- |
22 | 22 | ||
23 | The REQ_FLUSH flag can be OR ed into the r/w flags of a bio submitted from | 23 | The REQ_PREFLUSH flag can be OR ed into the r/w flags of a bio submitted from |
24 | the filesystem and will make sure the volatile cache of the storage device | 24 | the filesystem and will make sure the volatile cache of the storage device |
25 | has been flushed before the actual I/O operation is started. This explicitly | 25 | has been flushed before the actual I/O operation is started. This explicitly |
26 | guarantees that previously completed write requests are on non-volatile | 26 | guarantees that previously completed write requests are on non-volatile |
27 | storage before the flagged bio starts. In addition the REQ_FLUSH flag can be | 27 | storage before the flagged bio starts. In addition the REQ_PREFLUSH flag can be |
28 | set on an otherwise empty bio structure, which causes only an explicit cache | 28 | set on an otherwise empty bio structure, which causes only an explicit cache |
29 | flush without any dependent I/O. It is recommend to use | 29 | flush without any dependent I/O. It is recommend to use |
30 | the blkdev_issue_flush() helper for a pure cache flush. | 30 | the blkdev_issue_flush() helper for a pure cache flush. |
@@ -41,21 +41,21 @@ signaled after the data has been committed to non-volatile storage. | |||
41 | Implementation details for filesystems | 41 | Implementation details for filesystems |
42 | -------------------------------------- | 42 | -------------------------------------- |
43 | 43 | ||
44 | Filesystems can simply set the REQ_FLUSH and REQ_FUA bits and do not have to | 44 | Filesystems can simply set the REQ_PREFLUSH and REQ_FUA bits and do not have to |
45 | worry if the underlying devices need any explicit cache flushing and how | 45 | worry if the underlying devices need any explicit cache flushing and how |
46 | the Forced Unit Access is implemented. The REQ_FLUSH and REQ_FUA flags | 46 | the Forced Unit Access is implemented. The REQ_PREFLUSH and REQ_FUA flags |
47 | may both be set on a single bio. | 47 | may both be set on a single bio. |
48 | 48 | ||
49 | 49 | ||
50 | Implementation details for make_request_fn based block drivers | 50 | Implementation details for make_request_fn based block drivers |
51 | -------------------------------------------------------------- | 51 | -------------------------------------------------------------- |
52 | 52 | ||
53 | These drivers will always see the REQ_FLUSH and REQ_FUA bits as they sit | 53 | These drivers will always see the REQ_PREFLUSH and REQ_FUA bits as they sit |
54 | directly below the submit_bio interface. For remapping drivers the REQ_FUA | 54 | directly below the submit_bio interface. For remapping drivers the REQ_FUA |
55 | bits need to be propagated to underlying devices, and a global flush needs | 55 | bits need to be propagated to underlying devices, and a global flush needs |
56 | to be implemented for bios with the REQ_FLUSH bit set. For real device | 56 | to be implemented for bios with the REQ_PREFLUSH bit set. For real device |
57 | drivers that do not have a volatile cache the REQ_FLUSH and REQ_FUA bits | 57 | drivers that do not have a volatile cache the REQ_PREFLUSH and REQ_FUA bits |
58 | on non-empty bios can simply be ignored, and REQ_FLUSH requests without | 58 | on non-empty bios can simply be ignored, and REQ_PREFLUSH requests without |
59 | data can be completed successfully without doing any work. Drivers for | 59 | data can be completed successfully without doing any work. Drivers for |
60 | devices with volatile caches need to implement the support for these | 60 | devices with volatile caches need to implement the support for these |
61 | flags themselves without any help from the block layer. | 61 | flags themselves without any help from the block layer. |
@@ -65,17 +65,17 @@ Implementation details for request_fn based block drivers | |||
65 | -------------------------------------------------------------- | 65 | -------------------------------------------------------------- |
66 | 66 | ||
67 | For devices that do not support volatile write caches there is no driver | 67 | For devices that do not support volatile write caches there is no driver |
68 | support required, the block layer completes empty REQ_FLUSH requests before | 68 | support required, the block layer completes empty REQ_PREFLUSH requests before |
69 | entering the driver and strips off the REQ_FLUSH and REQ_FUA bits from | 69 | entering the driver and strips off the REQ_PREFLUSH and REQ_FUA bits from |
70 | requests that have a payload. For devices with volatile write caches the | 70 | requests that have a payload. For devices with volatile write caches the |
71 | driver needs to tell the block layer that it supports flushing caches by | 71 | driver needs to tell the block layer that it supports flushing caches by |
72 | doing: | 72 | doing: |
73 | 73 | ||
74 | blk_queue_write_cache(sdkp->disk->queue, true, false); | 74 | blk_queue_write_cache(sdkp->disk->queue, true, false); |
75 | 75 | ||
76 | and handle empty REQ_FLUSH requests in its prep_fn/request_fn. Note that | 76 | and handle empty REQ_OP_FLUSH requests in its prep_fn/request_fn. Note that |
77 | REQ_FLUSH requests with a payload are automatically turned into a sequence | 77 | REQ_PREFLUSH requests with a payload are automatically turned into a sequence |
78 | of an empty REQ_FLUSH request followed by the actual write by the block | 78 | of an empty REQ_OP_FLUSH request followed by the actual write by the block |
79 | layer. For devices that also support the FUA bit the block layer needs | 79 | layer. For devices that also support the FUA bit the block layer needs |
80 | to be told to pass through the REQ_FUA bit using: | 80 | to be told to pass through the REQ_FUA bit using: |
81 | 81 | ||
@@ -83,4 +83,4 @@ to be told to pass through the REQ_FUA bit using: | |||
83 | 83 | ||
84 | and the driver must handle write requests that have the REQ_FUA bit set | 84 | and the driver must handle write requests that have the REQ_FUA bit set |
85 | in prep_fn/request_fn. If the FUA bit is not natively supported the block | 85 | in prep_fn/request_fn. If the FUA bit is not natively supported the block |
86 | layer turns it into an empty REQ_FLUSH request after the actual write. | 86 | layer turns it into an empty REQ_OP_FLUSH request after the actual write. |
diff --git a/Documentation/device-mapper/log-writes.txt b/Documentation/device-mapper/log-writes.txt index c10f30c9b534..f4ebcbaf50f3 100644 --- a/Documentation/device-mapper/log-writes.txt +++ b/Documentation/device-mapper/log-writes.txt | |||
@@ -14,14 +14,14 @@ Log Ordering | |||
14 | 14 | ||
15 | We log things in order of completion once we are sure the write is no longer in | 15 | We log things in order of completion once we are sure the write is no longer in |
16 | cache. This means that normal WRITE requests are not actually logged until the | 16 | cache. This means that normal WRITE requests are not actually logged until the |
17 | next REQ_FLUSH request. This is to make it easier for userspace to replay the | 17 | next REQ_PREFLUSH request. This is to make it easier for userspace to replay |
18 | log in a way that correlates to what is on disk and not what is in cache, to | 18 | the log in a way that correlates to what is on disk and not what is in cache, |
19 | make it easier to detect improper waiting/flushing. | 19 | to make it easier to detect improper waiting/flushing. |
20 | 20 | ||
21 | This works by attaching all WRITE requests to a list once the write completes. | 21 | This works by attaching all WRITE requests to a list once the write completes. |
22 | Once we see a REQ_FLUSH request we splice this list onto the request and once | 22 | Once we see a REQ_PREFLUSH request we splice this list onto the request and once |
23 | the FLUSH request completes we log all of the WRITEs and then the FLUSH. Only | 23 | the FLUSH request completes we log all of the WRITEs and then the FLUSH. Only |
24 | completed WRITEs, at the time the REQ_FLUSH is issued, are added in order to | 24 | completed WRITEs, at the time the REQ_PREFLUSH is issued, are added in order to |
25 | simulate the worst case scenario with regard to power failures. Consider the | 25 | simulate the worst case scenario with regard to power failures. Consider the |
26 | following example (W means write, C means complete): | 26 | following example (W means write, C means complete): |
27 | 27 | ||
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 17e96dc29596..ef6b4d960bad 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
@@ -1286,7 +1286,7 @@ static void do_ubd_request(struct request_queue *q) | |||
1286 | 1286 | ||
1287 | req = dev->request; | 1287 | req = dev->request; |
1288 | 1288 | ||
1289 | if (req->cmd_flags & REQ_FLUSH) { | 1289 | if (req_op(req) == REQ_OP_FLUSH) { |
1290 | io_req = kmalloc(sizeof(struct io_thread_req), | 1290 | io_req = kmalloc(sizeof(struct io_thread_req), |
1291 | GFP_ATOMIC); | 1291 | GFP_ATOMIC); |
1292 | if (io_req == NULL) { | 1292 | if (io_req == NULL) { |
diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 711e4d8de6fa..15d37b1cd500 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/bio.h> | 26 | #include <linux/bio.h> |
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include "blk.h" | ||
29 | 30 | ||
30 | #define BIP_INLINE_VECS 4 | 31 | #define BIP_INLINE_VECS 4 |
31 | 32 | ||
diff --git a/block/bio.c b/block/bio.c index 0e4aa42bc30d..848cd351513b 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -656,16 +656,15 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, | |||
656 | bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); | 656 | bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); |
657 | if (!bio) | 657 | if (!bio) |
658 | return NULL; | 658 | return NULL; |
659 | |||
660 | bio->bi_bdev = bio_src->bi_bdev; | 659 | bio->bi_bdev = bio_src->bi_bdev; |
661 | bio->bi_rw = bio_src->bi_rw; | 660 | bio->bi_rw = bio_src->bi_rw; |
662 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; | 661 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; |
663 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; | 662 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; |
664 | 663 | ||
665 | if (bio->bi_rw & REQ_DISCARD) | 664 | if (bio_op(bio) == REQ_OP_DISCARD) |
666 | goto integrity_clone; | 665 | goto integrity_clone; |
667 | 666 | ||
668 | if (bio->bi_rw & REQ_WRITE_SAME) { | 667 | if (bio_op(bio) == REQ_OP_WRITE_SAME) { |
669 | bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; | 668 | bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; |
670 | goto integrity_clone; | 669 | goto integrity_clone; |
671 | } | 670 | } |
@@ -854,21 +853,20 @@ static void submit_bio_wait_endio(struct bio *bio) | |||
854 | 853 | ||
855 | /** | 854 | /** |
856 | * submit_bio_wait - submit a bio, and wait until it completes | 855 | * submit_bio_wait - submit a bio, and wait until it completes |
857 | * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) | ||
858 | * @bio: The &struct bio which describes the I/O | 856 | * @bio: The &struct bio which describes the I/O |
859 | * | 857 | * |
860 | * Simple wrapper around submit_bio(). Returns 0 on success, or the error from | 858 | * Simple wrapper around submit_bio(). Returns 0 on success, or the error from |
861 | * bio_endio() on failure. | 859 | * bio_endio() on failure. |
862 | */ | 860 | */ |
863 | int submit_bio_wait(int rw, struct bio *bio) | 861 | int submit_bio_wait(struct bio *bio) |
864 | { | 862 | { |
865 | struct submit_bio_ret ret; | 863 | struct submit_bio_ret ret; |
866 | 864 | ||
867 | rw |= REQ_SYNC; | ||
868 | init_completion(&ret.event); | 865 | init_completion(&ret.event); |
869 | bio->bi_private = &ret; | 866 | bio->bi_private = &ret; |
870 | bio->bi_end_io = submit_bio_wait_endio; | 867 | bio->bi_end_io = submit_bio_wait_endio; |
871 | submit_bio(rw, bio); | 868 | bio->bi_rw |= REQ_SYNC; |
869 | submit_bio(bio); | ||
872 | wait_for_completion_io(&ret.event); | 870 | wait_for_completion_io(&ret.event); |
873 | 871 | ||
874 | return ret.error; | 872 | return ret.error; |
@@ -1167,7 +1165,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
1167 | goto out_bmd; | 1165 | goto out_bmd; |
1168 | 1166 | ||
1169 | if (iter->type & WRITE) | 1167 | if (iter->type & WRITE) |
1170 | bio->bi_rw |= REQ_WRITE; | 1168 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
1171 | 1169 | ||
1172 | ret = 0; | 1170 | ret = 0; |
1173 | 1171 | ||
@@ -1337,7 +1335,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
1337 | * set data direction, and check if mapped pages need bouncing | 1335 | * set data direction, and check if mapped pages need bouncing |
1338 | */ | 1336 | */ |
1339 | if (iter->type & WRITE) | 1337 | if (iter->type & WRITE) |
1340 | bio->bi_rw |= REQ_WRITE; | 1338 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
1341 | 1339 | ||
1342 | bio_set_flag(bio, BIO_USER_MAPPED); | 1340 | bio_set_flag(bio, BIO_USER_MAPPED); |
1343 | 1341 | ||
@@ -1530,7 +1528,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, | |||
1530 | bio->bi_private = data; | 1528 | bio->bi_private = data; |
1531 | } else { | 1529 | } else { |
1532 | bio->bi_end_io = bio_copy_kern_endio; | 1530 | bio->bi_end_io = bio_copy_kern_endio; |
1533 | bio->bi_rw |= REQ_WRITE; | 1531 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
1534 | } | 1532 | } |
1535 | 1533 | ||
1536 | return bio; | 1534 | return bio; |
@@ -1785,7 +1783,7 @@ struct bio *bio_split(struct bio *bio, int sectors, | |||
1785 | * Discards need a mutable bio_vec to accommodate the payload | 1783 | * Discards need a mutable bio_vec to accommodate the payload |
1786 | * required by the DSM TRIM and UNMAP commands. | 1784 | * required by the DSM TRIM and UNMAP commands. |
1787 | */ | 1785 | */ |
1788 | if (bio->bi_rw & REQ_DISCARD) | 1786 | if (bio_op(bio) == REQ_OP_DISCARD) |
1789 | split = bio_clone_bioset(bio, gfp, bs); | 1787 | split = bio_clone_bioset(bio, gfp, bs); |
1790 | else | 1788 | else |
1791 | split = bio_clone_fast(bio, gfp, bs); | 1789 | split = bio_clone_fast(bio, gfp, bs); |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 66e6f1aae02e..dd38e5ced4a3 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -905,7 +905,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) | |||
905 | return 0; | 905 | return 0; |
906 | } | 906 | } |
907 | 907 | ||
908 | struct cftype blkcg_files[] = { | 908 | static struct cftype blkcg_files[] = { |
909 | { | 909 | { |
910 | .name = "stat", | 910 | .name = "stat", |
911 | .flags = CFTYPE_NOT_ON_ROOT, | 911 | .flags = CFTYPE_NOT_ON_ROOT, |
@@ -914,7 +914,7 @@ struct cftype blkcg_files[] = { | |||
914 | { } /* terminate */ | 914 | { } /* terminate */ |
915 | }; | 915 | }; |
916 | 916 | ||
917 | struct cftype blkcg_legacy_files[] = { | 917 | static struct cftype blkcg_legacy_files[] = { |
918 | { | 918 | { |
919 | .name = "reset_stats", | 919 | .name = "reset_stats", |
920 | .write_u64 = blkcg_reset_stats, | 920 | .write_u64 = blkcg_reset_stats, |
diff --git a/block/blk-core.c b/block/blk-core.c index 2475b1c72773..3cfd67d006fb 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -959,10 +959,10 @@ static void __freed_request(struct request_list *rl, int sync) | |||
959 | * A request has just been released. Account for it, update the full and | 959 | * A request has just been released. Account for it, update the full and |
960 | * congestion status, wake up any waiters. Called under q->queue_lock. | 960 | * congestion status, wake up any waiters. Called under q->queue_lock. |
961 | */ | 961 | */ |
962 | static void freed_request(struct request_list *rl, unsigned int flags) | 962 | static void freed_request(struct request_list *rl, int op, unsigned int flags) |
963 | { | 963 | { |
964 | struct request_queue *q = rl->q; | 964 | struct request_queue *q = rl->q; |
965 | int sync = rw_is_sync(flags); | 965 | int sync = rw_is_sync(op, flags); |
966 | 966 | ||
967 | q->nr_rqs[sync]--; | 967 | q->nr_rqs[sync]--; |
968 | rl->count[sync]--; | 968 | rl->count[sync]--; |
@@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio) | |||
1029 | * Flush requests do not use the elevator so skip initialization. | 1029 | * Flush requests do not use the elevator so skip initialization. |
1030 | * This allows a request to share the flush and elevator data. | 1030 | * This allows a request to share the flush and elevator data. |
1031 | */ | 1031 | */ |
1032 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) | 1032 | if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) |
1033 | return false; | 1033 | return false; |
1034 | 1034 | ||
1035 | return true; | 1035 | return true; |
@@ -1054,7 +1054,8 @@ static struct io_context *rq_ioc(struct bio *bio) | |||
1054 | /** | 1054 | /** |
1055 | * __get_request - get a free request | 1055 | * __get_request - get a free request |
1056 | * @rl: request list to allocate from | 1056 | * @rl: request list to allocate from |
1057 | * @rw_flags: RW and SYNC flags | 1057 | * @op: REQ_OP_READ/REQ_OP_WRITE |
1058 | * @op_flags: rq_flag_bits | ||
1058 | * @bio: bio to allocate request for (can be %NULL) | 1059 | * @bio: bio to allocate request for (can be %NULL) |
1059 | * @gfp_mask: allocation mask | 1060 | * @gfp_mask: allocation mask |
1060 | * | 1061 | * |
@@ -1065,21 +1066,22 @@ static struct io_context *rq_ioc(struct bio *bio) | |||
1065 | * Returns ERR_PTR on failure, with @q->queue_lock held. | 1066 | * Returns ERR_PTR on failure, with @q->queue_lock held. |
1066 | * Returns request pointer on success, with @q->queue_lock *not held*. | 1067 | * Returns request pointer on success, with @q->queue_lock *not held*. |
1067 | */ | 1068 | */ |
1068 | static struct request *__get_request(struct request_list *rl, int rw_flags, | 1069 | static struct request *__get_request(struct request_list *rl, int op, |
1069 | struct bio *bio, gfp_t gfp_mask) | 1070 | int op_flags, struct bio *bio, |
1071 | gfp_t gfp_mask) | ||
1070 | { | 1072 | { |
1071 | struct request_queue *q = rl->q; | 1073 | struct request_queue *q = rl->q; |
1072 | struct request *rq; | 1074 | struct request *rq; |
1073 | struct elevator_type *et = q->elevator->type; | 1075 | struct elevator_type *et = q->elevator->type; |
1074 | struct io_context *ioc = rq_ioc(bio); | 1076 | struct io_context *ioc = rq_ioc(bio); |
1075 | struct io_cq *icq = NULL; | 1077 | struct io_cq *icq = NULL; |
1076 | const bool is_sync = rw_is_sync(rw_flags) != 0; | 1078 | const bool is_sync = rw_is_sync(op, op_flags) != 0; |
1077 | int may_queue; | 1079 | int may_queue; |
1078 | 1080 | ||
1079 | if (unlikely(blk_queue_dying(q))) | 1081 | if (unlikely(blk_queue_dying(q))) |
1080 | return ERR_PTR(-ENODEV); | 1082 | return ERR_PTR(-ENODEV); |
1081 | 1083 | ||
1082 | may_queue = elv_may_queue(q, rw_flags); | 1084 | may_queue = elv_may_queue(q, op, op_flags); |
1083 | if (may_queue == ELV_MQUEUE_NO) | 1085 | if (may_queue == ELV_MQUEUE_NO) |
1084 | goto rq_starved; | 1086 | goto rq_starved; |
1085 | 1087 | ||
@@ -1123,7 +1125,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, | |||
1123 | 1125 | ||
1124 | /* | 1126 | /* |
1125 | * Decide whether the new request will be managed by elevator. If | 1127 | * Decide whether the new request will be managed by elevator. If |
1126 | * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will | 1128 | * so, mark @op_flags and increment elvpriv. Non-zero elvpriv will |
1127 | * prevent the current elevator from being destroyed until the new | 1129 | * prevent the current elevator from being destroyed until the new |
1128 | * request is freed. This guarantees icq's won't be destroyed and | 1130 | * request is freed. This guarantees icq's won't be destroyed and |
1129 | * makes creating new ones safe. | 1131 | * makes creating new ones safe. |
@@ -1132,14 +1134,14 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, | |||
1132 | * it will be created after releasing queue_lock. | 1134 | * it will be created after releasing queue_lock. |
1133 | */ | 1135 | */ |
1134 | if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { | 1136 | if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { |
1135 | rw_flags |= REQ_ELVPRIV; | 1137 | op_flags |= REQ_ELVPRIV; |
1136 | q->nr_rqs_elvpriv++; | 1138 | q->nr_rqs_elvpriv++; |
1137 | if (et->icq_cache && ioc) | 1139 | if (et->icq_cache && ioc) |
1138 | icq = ioc_lookup_icq(ioc, q); | 1140 | icq = ioc_lookup_icq(ioc, q); |
1139 | } | 1141 | } |
1140 | 1142 | ||
1141 | if (blk_queue_io_stat(q)) | 1143 | if (blk_queue_io_stat(q)) |
1142 | rw_flags |= REQ_IO_STAT; | 1144 | op_flags |= REQ_IO_STAT; |
1143 | spin_unlock_irq(q->queue_lock); | 1145 | spin_unlock_irq(q->queue_lock); |
1144 | 1146 | ||
1145 | /* allocate and init request */ | 1147 | /* allocate and init request */ |
@@ -1149,10 +1151,10 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, | |||
1149 | 1151 | ||
1150 | blk_rq_init(q, rq); | 1152 | blk_rq_init(q, rq); |
1151 | blk_rq_set_rl(rq, rl); | 1153 | blk_rq_set_rl(rq, rl); |
1152 | rq->cmd_flags = rw_flags | REQ_ALLOCED; | 1154 | req_set_op_attrs(rq, op, op_flags | REQ_ALLOCED); |
1153 | 1155 | ||
1154 | /* init elvpriv */ | 1156 | /* init elvpriv */ |
1155 | if (rw_flags & REQ_ELVPRIV) { | 1157 | if (op_flags & REQ_ELVPRIV) { |
1156 | if (unlikely(et->icq_cache && !icq)) { | 1158 | if (unlikely(et->icq_cache && !icq)) { |
1157 | if (ioc) | 1159 | if (ioc) |
1158 | icq = ioc_create_icq(ioc, q, gfp_mask); | 1160 | icq = ioc_create_icq(ioc, q, gfp_mask); |
@@ -1178,7 +1180,7 @@ out: | |||
1178 | if (ioc_batching(q, ioc)) | 1180 | if (ioc_batching(q, ioc)) |
1179 | ioc->nr_batch_requests--; | 1181 | ioc->nr_batch_requests--; |
1180 | 1182 | ||
1181 | trace_block_getrq(q, bio, rw_flags & 1); | 1183 | trace_block_getrq(q, bio, op); |
1182 | return rq; | 1184 | return rq; |
1183 | 1185 | ||
1184 | fail_elvpriv: | 1186 | fail_elvpriv: |
@@ -1208,7 +1210,7 @@ fail_alloc: | |||
1208 | * queue, but this is pretty rare. | 1210 | * queue, but this is pretty rare. |
1209 | */ | 1211 | */ |
1210 | spin_lock_irq(q->queue_lock); | 1212 | spin_lock_irq(q->queue_lock); |
1211 | freed_request(rl, rw_flags); | 1213 | freed_request(rl, op, op_flags); |
1212 | 1214 | ||
1213 | /* | 1215 | /* |
1214 | * in the very unlikely event that allocation failed and no | 1216 | * in the very unlikely event that allocation failed and no |
@@ -1226,7 +1228,8 @@ rq_starved: | |||
1226 | /** | 1228 | /** |
1227 | * get_request - get a free request | 1229 | * get_request - get a free request |
1228 | * @q: request_queue to allocate request from | 1230 | * @q: request_queue to allocate request from |
1229 | * @rw_flags: RW and SYNC flags | 1231 | * @op: REQ_OP_READ/REQ_OP_WRITE |
1232 | * @op_flags: rq_flag_bits | ||
1230 | * @bio: bio to allocate request for (can be %NULL) | 1233 | * @bio: bio to allocate request for (can be %NULL) |
1231 | * @gfp_mask: allocation mask | 1234 | * @gfp_mask: allocation mask |
1232 | * | 1235 | * |
@@ -1237,17 +1240,18 @@ rq_starved: | |||
1237 | * Returns ERR_PTR on failure, with @q->queue_lock held. | 1240 | * Returns ERR_PTR on failure, with @q->queue_lock held. |
1238 | * Returns request pointer on success, with @q->queue_lock *not held*. | 1241 | * Returns request pointer on success, with @q->queue_lock *not held*. |
1239 | */ | 1242 | */ |
1240 | static struct request *get_request(struct request_queue *q, int rw_flags, | 1243 | static struct request *get_request(struct request_queue *q, int op, |
1241 | struct bio *bio, gfp_t gfp_mask) | 1244 | int op_flags, struct bio *bio, |
1245 | gfp_t gfp_mask) | ||
1242 | { | 1246 | { |
1243 | const bool is_sync = rw_is_sync(rw_flags) != 0; | 1247 | const bool is_sync = rw_is_sync(op, op_flags) != 0; |
1244 | DEFINE_WAIT(wait); | 1248 | DEFINE_WAIT(wait); |
1245 | struct request_list *rl; | 1249 | struct request_list *rl; |
1246 | struct request *rq; | 1250 | struct request *rq; |
1247 | 1251 | ||
1248 | rl = blk_get_rl(q, bio); /* transferred to @rq on success */ | 1252 | rl = blk_get_rl(q, bio); /* transferred to @rq on success */ |
1249 | retry: | 1253 | retry: |
1250 | rq = __get_request(rl, rw_flags, bio, gfp_mask); | 1254 | rq = __get_request(rl, op, op_flags, bio, gfp_mask); |
1251 | if (!IS_ERR(rq)) | 1255 | if (!IS_ERR(rq)) |
1252 | return rq; | 1256 | return rq; |
1253 | 1257 | ||
@@ -1260,7 +1264,7 @@ retry: | |||
1260 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, | 1264 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, |
1261 | TASK_UNINTERRUPTIBLE); | 1265 | TASK_UNINTERRUPTIBLE); |
1262 | 1266 | ||
1263 | trace_block_sleeprq(q, bio, rw_flags & 1); | 1267 | trace_block_sleeprq(q, bio, op); |
1264 | 1268 | ||
1265 | spin_unlock_irq(q->queue_lock); | 1269 | spin_unlock_irq(q->queue_lock); |
1266 | io_schedule(); | 1270 | io_schedule(); |
@@ -1289,7 +1293,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, | |||
1289 | create_io_context(gfp_mask, q->node); | 1293 | create_io_context(gfp_mask, q->node); |
1290 | 1294 | ||
1291 | spin_lock_irq(q->queue_lock); | 1295 | spin_lock_irq(q->queue_lock); |
1292 | rq = get_request(q, rw, NULL, gfp_mask); | 1296 | rq = get_request(q, rw, 0, NULL, gfp_mask); |
1293 | if (IS_ERR(rq)) | 1297 | if (IS_ERR(rq)) |
1294 | spin_unlock_irq(q->queue_lock); | 1298 | spin_unlock_irq(q->queue_lock); |
1295 | /* q->queue_lock is unlocked at this point */ | 1299 | /* q->queue_lock is unlocked at this point */ |
@@ -1491,13 +1495,14 @@ void __blk_put_request(struct request_queue *q, struct request *req) | |||
1491 | */ | 1495 | */ |
1492 | if (req->cmd_flags & REQ_ALLOCED) { | 1496 | if (req->cmd_flags & REQ_ALLOCED) { |
1493 | unsigned int flags = req->cmd_flags; | 1497 | unsigned int flags = req->cmd_flags; |
1498 | int op = req_op(req); | ||
1494 | struct request_list *rl = blk_rq_rl(req); | 1499 | struct request_list *rl = blk_rq_rl(req); |
1495 | 1500 | ||
1496 | BUG_ON(!list_empty(&req->queuelist)); | 1501 | BUG_ON(!list_empty(&req->queuelist)); |
1497 | BUG_ON(ELV_ON_HASH(req)); | 1502 | BUG_ON(ELV_ON_HASH(req)); |
1498 | 1503 | ||
1499 | blk_free_request(rl, req); | 1504 | blk_free_request(rl, req); |
1500 | freed_request(rl, flags); | 1505 | freed_request(rl, op, flags); |
1501 | blk_put_rl(rl); | 1506 | blk_put_rl(rl); |
1502 | } | 1507 | } |
1503 | } | 1508 | } |
@@ -1712,7 +1717,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) | |||
1712 | { | 1717 | { |
1713 | const bool sync = !!(bio->bi_rw & REQ_SYNC); | 1718 | const bool sync = !!(bio->bi_rw & REQ_SYNC); |
1714 | struct blk_plug *plug; | 1719 | struct blk_plug *plug; |
1715 | int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; | 1720 | int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT; |
1716 | struct request *req; | 1721 | struct request *req; |
1717 | unsigned int request_count = 0; | 1722 | unsigned int request_count = 0; |
1718 | 1723 | ||
@@ -1731,7 +1736,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) | |||
1731 | return BLK_QC_T_NONE; | 1736 | return BLK_QC_T_NONE; |
1732 | } | 1737 | } |
1733 | 1738 | ||
1734 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { | 1739 | if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) { |
1735 | spin_lock_irq(q->queue_lock); | 1740 | spin_lock_irq(q->queue_lock); |
1736 | where = ELEVATOR_INSERT_FLUSH; | 1741 | where = ELEVATOR_INSERT_FLUSH; |
1737 | goto get_rq; | 1742 | goto get_rq; |
@@ -1772,15 +1777,19 @@ get_rq: | |||
1772 | * but we need to set it earlier to expose the sync flag to the | 1777 | * but we need to set it earlier to expose the sync flag to the |
1773 | * rq allocator and io schedulers. | 1778 | * rq allocator and io schedulers. |
1774 | */ | 1779 | */ |
1775 | rw_flags = bio_data_dir(bio); | ||
1776 | if (sync) | 1780 | if (sync) |
1777 | rw_flags |= REQ_SYNC; | 1781 | rw_flags |= REQ_SYNC; |
1778 | 1782 | ||
1779 | /* | 1783 | /* |
1784 | * Add in META/PRIO flags, if set, before we get to the IO scheduler | ||
1785 | */ | ||
1786 | rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO)); | ||
1787 | |||
1788 | /* | ||
1780 | * Grab a free request. This is might sleep but can not fail. | 1789 | * Grab a free request. This is might sleep but can not fail. |
1781 | * Returns with the queue unlocked. | 1790 | * Returns with the queue unlocked. |
1782 | */ | 1791 | */ |
1783 | req = get_request(q, rw_flags, bio, GFP_NOIO); | 1792 | req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO); |
1784 | if (IS_ERR(req)) { | 1793 | if (IS_ERR(req)) { |
1785 | bio->bi_error = PTR_ERR(req); | 1794 | bio->bi_error = PTR_ERR(req); |
1786 | bio_endio(bio); | 1795 | bio_endio(bio); |
@@ -1849,7 +1858,7 @@ static void handle_bad_sector(struct bio *bio) | |||
1849 | char b[BDEVNAME_SIZE]; | 1858 | char b[BDEVNAME_SIZE]; |
1850 | 1859 | ||
1851 | printk(KERN_INFO "attempt to access beyond end of device\n"); | 1860 | printk(KERN_INFO "attempt to access beyond end of device\n"); |
1852 | printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", | 1861 | printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", |
1853 | bdevname(bio->bi_bdev, b), | 1862 | bdevname(bio->bi_bdev, b), |
1854 | bio->bi_rw, | 1863 | bio->bi_rw, |
1855 | (unsigned long long)bio_end_sector(bio), | 1864 | (unsigned long long)bio_end_sector(bio), |
@@ -1964,23 +1973,23 @@ generic_make_request_checks(struct bio *bio) | |||
1964 | * drivers without flush support don't have to worry | 1973 | * drivers without flush support don't have to worry |
1965 | * about them. | 1974 | * about them. |
1966 | */ | 1975 | */ |
1967 | if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && | 1976 | if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) && |
1968 | !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { | 1977 | !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { |
1969 | bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); | 1978 | bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA); |
1970 | if (!nr_sectors) { | 1979 | if (!nr_sectors) { |
1971 | err = 0; | 1980 | err = 0; |
1972 | goto end_io; | 1981 | goto end_io; |
1973 | } | 1982 | } |
1974 | } | 1983 | } |
1975 | 1984 | ||
1976 | if ((bio->bi_rw & REQ_DISCARD) && | 1985 | if ((bio_op(bio) == REQ_OP_DISCARD) && |
1977 | (!blk_queue_discard(q) || | 1986 | (!blk_queue_discard(q) || |
1978 | ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { | 1987 | ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) { |
1979 | err = -EOPNOTSUPP; | 1988 | err = -EOPNOTSUPP; |
1980 | goto end_io; | 1989 | goto end_io; |
1981 | } | 1990 | } |
1982 | 1991 | ||
1983 | if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { | 1992 | if (bio_op(bio) == REQ_OP_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) { |
1984 | err = -EOPNOTSUPP; | 1993 | err = -EOPNOTSUPP; |
1985 | goto end_io; | 1994 | goto end_io; |
1986 | } | 1995 | } |
@@ -2094,7 +2103,6 @@ EXPORT_SYMBOL(generic_make_request); | |||
2094 | 2103 | ||
2095 | /** | 2104 | /** |
2096 | * submit_bio - submit a bio to the block device layer for I/O | 2105 | * submit_bio - submit a bio to the block device layer for I/O |
2097 | * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) | ||
2098 | * @bio: The &struct bio which describes the I/O | 2106 | * @bio: The &struct bio which describes the I/O |
2099 | * | 2107 | * |
2100 | * submit_bio() is very similar in purpose to generic_make_request(), and | 2108 | * submit_bio() is very similar in purpose to generic_make_request(), and |
@@ -2102,10 +2110,8 @@ EXPORT_SYMBOL(generic_make_request); | |||
2102 | * interfaces; @bio must be presetup and ready for I/O. | 2110 | * interfaces; @bio must be presetup and ready for I/O. |
2103 | * | 2111 | * |
2104 | */ | 2112 | */ |
2105 | blk_qc_t submit_bio(int rw, struct bio *bio) | 2113 | blk_qc_t submit_bio(struct bio *bio) |
2106 | { | 2114 | { |
2107 | bio->bi_rw |= rw; | ||
2108 | |||
2109 | /* | 2115 | /* |
2110 | * If it's a regular read/write or a barrier with data attached, | 2116 | * If it's a regular read/write or a barrier with data attached, |
2111 | * go through the normal accounting stuff before submission. | 2117 | * go through the normal accounting stuff before submission. |
@@ -2113,12 +2119,12 @@ blk_qc_t submit_bio(int rw, struct bio *bio) | |||
2113 | if (bio_has_data(bio)) { | 2119 | if (bio_has_data(bio)) { |
2114 | unsigned int count; | 2120 | unsigned int count; |
2115 | 2121 | ||
2116 | if (unlikely(rw & REQ_WRITE_SAME)) | 2122 | if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) |
2117 | count = bdev_logical_block_size(bio->bi_bdev) >> 9; | 2123 | count = bdev_logical_block_size(bio->bi_bdev) >> 9; |
2118 | else | 2124 | else |
2119 | count = bio_sectors(bio); | 2125 | count = bio_sectors(bio); |
2120 | 2126 | ||
2121 | if (rw & WRITE) { | 2127 | if (op_is_write(bio_op(bio))) { |
2122 | count_vm_events(PGPGOUT, count); | 2128 | count_vm_events(PGPGOUT, count); |
2123 | } else { | 2129 | } else { |
2124 | task_io_account_read(bio->bi_iter.bi_size); | 2130 | task_io_account_read(bio->bi_iter.bi_size); |
@@ -2129,7 +2135,7 @@ blk_qc_t submit_bio(int rw, struct bio *bio) | |||
2129 | char b[BDEVNAME_SIZE]; | 2135 | char b[BDEVNAME_SIZE]; |
2130 | printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", | 2136 | printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", |
2131 | current->comm, task_pid_nr(current), | 2137 | current->comm, task_pid_nr(current), |
2132 | (rw & WRITE) ? "WRITE" : "READ", | 2138 | op_is_write(bio_op(bio)) ? "WRITE" : "READ", |
2133 | (unsigned long long)bio->bi_iter.bi_sector, | 2139 | (unsigned long long)bio->bi_iter.bi_sector, |
2134 | bdevname(bio->bi_bdev, b), | 2140 | bdevname(bio->bi_bdev, b), |
2135 | count); | 2141 | count); |
@@ -2160,7 +2166,7 @@ EXPORT_SYMBOL(submit_bio); | |||
2160 | static int blk_cloned_rq_check_limits(struct request_queue *q, | 2166 | static int blk_cloned_rq_check_limits(struct request_queue *q, |
2161 | struct request *rq) | 2167 | struct request *rq) |
2162 | { | 2168 | { |
2163 | if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { | 2169 | if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) { |
2164 | printk(KERN_ERR "%s: over max size limit.\n", __func__); | 2170 | printk(KERN_ERR "%s: over max size limit.\n", __func__); |
2165 | return -EIO; | 2171 | return -EIO; |
2166 | } | 2172 | } |
@@ -2216,7 +2222,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |||
2216 | */ | 2222 | */ |
2217 | BUG_ON(blk_queued_rq(rq)); | 2223 | BUG_ON(blk_queued_rq(rq)); |
2218 | 2224 | ||
2219 | if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) | 2225 | if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) |
2220 | where = ELEVATOR_INSERT_FLUSH; | 2226 | where = ELEVATOR_INSERT_FLUSH; |
2221 | 2227 | ||
2222 | add_acct_request(q, rq, where); | 2228 | add_acct_request(q, rq, where); |
@@ -2979,8 +2985,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request_err); | |||
2979 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 2985 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
2980 | struct bio *bio) | 2986 | struct bio *bio) |
2981 | { | 2987 | { |
2982 | /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ | 2988 | req_set_op(rq, bio_op(bio)); |
2983 | rq->cmd_flags |= bio->bi_rw & REQ_WRITE; | ||
2984 | 2989 | ||
2985 | if (bio_has_data(bio)) | 2990 | if (bio_has_data(bio)) |
2986 | rq->nr_phys_segments = bio_phys_segments(q, bio); | 2991 | rq->nr_phys_segments = bio_phys_segments(q, bio); |
@@ -3065,7 +3070,8 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); | |||
3065 | static void __blk_rq_prep_clone(struct request *dst, struct request *src) | 3070 | static void __blk_rq_prep_clone(struct request *dst, struct request *src) |
3066 | { | 3071 | { |
3067 | dst->cpu = src->cpu; | 3072 | dst->cpu = src->cpu; |
3068 | dst->cmd_flags |= (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; | 3073 | req_set_op_attrs(dst, req_op(src), |
3074 | (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE); | ||
3069 | dst->cmd_type = src->cmd_type; | 3075 | dst->cmd_type = src->cmd_type; |
3070 | dst->__sector = blk_rq_pos(src); | 3076 | dst->__sector = blk_rq_pos(src); |
3071 | dst->__data_len = blk_rq_bytes(src); | 3077 | dst->__data_len = blk_rq_bytes(src); |
@@ -3310,7 +3316,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
3310 | /* | 3316 | /* |
3311 | * rq is already accounted, so use raw insert | 3317 | * rq is already accounted, so use raw insert |
3312 | */ | 3318 | */ |
3313 | if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) | 3319 | if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) |
3314 | __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); | 3320 | __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); |
3315 | else | 3321 | else |
3316 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); | 3322 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); |
diff --git a/block/blk-exec.c b/block/blk-exec.c index 3fec8a29d0fa..7ea04325d02f 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -62,7 +62,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
62 | 62 | ||
63 | /* | 63 | /* |
64 | * don't check dying flag for MQ because the request won't | 64 | * don't check dying flag for MQ because the request won't |
65 | * be resued after dying flag is set | 65 | * be reused after dying flag is set |
66 | */ | 66 | */ |
67 | if (q->mq_ops) { | 67 | if (q->mq_ops) { |
68 | blk_mq_insert_request(rq, at_head, true, false); | 68 | blk_mq_insert_request(rq, at_head, true, false); |
diff --git a/block/blk-flush.c b/block/blk-flush.c index b1c91d229e5e..d308def812db 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -10,8 +10,8 @@ | |||
10 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request | 10 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request |
11 | * properties and hardware capability. | 11 | * properties and hardware capability. |
12 | * | 12 | * |
13 | * If a request doesn't have data, only REQ_FLUSH makes sense, which | 13 | * If a request doesn't have data, only REQ_PREFLUSH makes sense, which |
14 | * indicates a simple flush request. If there is data, REQ_FLUSH indicates | 14 | * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates |
15 | * that the device cache should be flushed before the data is executed, and | 15 | * that the device cache should be flushed before the data is executed, and |
16 | * REQ_FUA means that the data must be on non-volatile media on request | 16 | * REQ_FUA means that the data must be on non-volatile media on request |
17 | * completion. | 17 | * completion. |
@@ -20,16 +20,16 @@ | |||
20 | * difference. The requests are either completed immediately if there's no | 20 | * difference. The requests are either completed immediately if there's no |
21 | * data or executed as normal requests otherwise. | 21 | * data or executed as normal requests otherwise. |
22 | * | 22 | * |
23 | * If the device has writeback cache and supports FUA, REQ_FLUSH is | 23 | * If the device has writeback cache and supports FUA, REQ_PREFLUSH is |
24 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. | 24 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. |
25 | * | 25 | * |
26 | * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is | 26 | * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH |
27 | * translated to PREFLUSH and REQ_FUA to POSTFLUSH. | 27 | * is translated to PREFLUSH and REQ_FUA to POSTFLUSH. |
28 | * | 28 | * |
29 | * The actual execution of flush is double buffered. Whenever a request | 29 | * The actual execution of flush is double buffered. Whenever a request |
30 | * needs to execute PRE or POSTFLUSH, it queues at | 30 | * needs to execute PRE or POSTFLUSH, it queues at |
31 | * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a | 31 | * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a |
32 | * flush is issued and the pending_idx is toggled. When the flush | 32 | * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush |
33 | * completes, all the requests which were pending are proceeded to the next | 33 | * completes, all the requests which were pending are proceeded to the next |
34 | * step. This allows arbitrary merging of different types of FLUSH/FUA | 34 | * step. This allows arbitrary merging of different types of FLUSH/FUA |
35 | * requests. | 35 | * requests. |
@@ -103,7 +103,7 @@ static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) | |||
103 | policy |= REQ_FSEQ_DATA; | 103 | policy |= REQ_FSEQ_DATA; |
104 | 104 | ||
105 | if (fflags & (1UL << QUEUE_FLAG_WC)) { | 105 | if (fflags & (1UL << QUEUE_FLAG_WC)) { |
106 | if (rq->cmd_flags & REQ_FLUSH) | 106 | if (rq->cmd_flags & REQ_PREFLUSH) |
107 | policy |= REQ_FSEQ_PREFLUSH; | 107 | policy |= REQ_FSEQ_PREFLUSH; |
108 | if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && | 108 | if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && |
109 | (rq->cmd_flags & REQ_FUA)) | 109 | (rq->cmd_flags & REQ_FUA)) |
@@ -330,7 +330,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) | |||
330 | } | 330 | } |
331 | 331 | ||
332 | flush_rq->cmd_type = REQ_TYPE_FS; | 332 | flush_rq->cmd_type = REQ_TYPE_FS; |
333 | flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; | 333 | req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH | REQ_FLUSH_SEQ); |
334 | flush_rq->rq_disk = first_rq->rq_disk; | 334 | flush_rq->rq_disk = first_rq->rq_disk; |
335 | flush_rq->end_io = flush_end_io; | 335 | flush_rq->end_io = flush_end_io; |
336 | 336 | ||
@@ -391,9 +391,9 @@ void blk_insert_flush(struct request *rq) | |||
391 | 391 | ||
392 | /* | 392 | /* |
393 | * @policy now records what operations need to be done. Adjust | 393 | * @policy now records what operations need to be done. Adjust |
394 | * REQ_FLUSH and FUA for the driver. | 394 | * REQ_PREFLUSH and FUA for the driver. |
395 | */ | 395 | */ |
396 | rq->cmd_flags &= ~REQ_FLUSH; | 396 | rq->cmd_flags &= ~REQ_PREFLUSH; |
397 | if (!(fflags & (1UL << QUEUE_FLAG_FUA))) | 397 | if (!(fflags & (1UL << QUEUE_FLAG_FUA))) |
398 | rq->cmd_flags &= ~REQ_FUA; | 398 | rq->cmd_flags &= ~REQ_FUA; |
399 | 399 | ||
@@ -485,8 +485,9 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, | |||
485 | 485 | ||
486 | bio = bio_alloc(gfp_mask, 0); | 486 | bio = bio_alloc(gfp_mask, 0); |
487 | bio->bi_bdev = bdev; | 487 | bio->bi_bdev = bdev; |
488 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); | ||
488 | 489 | ||
489 | ret = submit_bio_wait(WRITE_FLUSH, bio); | 490 | ret = submit_bio_wait(bio); |
490 | 491 | ||
491 | /* | 492 | /* |
492 | * The driver must store the error location in ->bi_sector, if | 493 | * The driver must store the error location in ->bi_sector, if |
diff --git a/block/blk-lib.c b/block/blk-lib.c index 9e29dc351695..9031d2af0b47 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
@@ -9,21 +9,22 @@ | |||
9 | 9 | ||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages, | 12 | static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, |
13 | gfp_t gfp) | 13 | gfp_t gfp) |
14 | { | 14 | { |
15 | struct bio *new = bio_alloc(gfp, nr_pages); | 15 | struct bio *new = bio_alloc(gfp, nr_pages); |
16 | 16 | ||
17 | if (bio) { | 17 | if (bio) { |
18 | bio_chain(bio, new); | 18 | bio_chain(bio, new); |
19 | submit_bio(rw, bio); | 19 | submit_bio(bio); |
20 | } | 20 | } |
21 | 21 | ||
22 | return new; | 22 | return new; |
23 | } | 23 | } |
24 | 24 | ||
25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
26 | sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop) | 26 | sector_t nr_sects, gfp_t gfp_mask, int op_flags, |
27 | struct bio **biop) | ||
27 | { | 28 | { |
28 | struct request_queue *q = bdev_get_queue(bdev); | 29 | struct request_queue *q = bdev_get_queue(bdev); |
29 | struct bio *bio = *biop; | 30 | struct bio *bio = *biop; |
@@ -34,7 +35,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
34 | return -ENXIO; | 35 | return -ENXIO; |
35 | if (!blk_queue_discard(q)) | 36 | if (!blk_queue_discard(q)) |
36 | return -EOPNOTSUPP; | 37 | return -EOPNOTSUPP; |
37 | if ((type & REQ_SECURE) && !blk_queue_secdiscard(q)) | 38 | if ((op_flags & REQ_SECURE) && !blk_queue_secdiscard(q)) |
38 | return -EOPNOTSUPP; | 39 | return -EOPNOTSUPP; |
39 | 40 | ||
40 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | 41 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
@@ -62,9 +63,10 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
62 | req_sects = end_sect - sector; | 63 | req_sects = end_sect - sector; |
63 | } | 64 | } |
64 | 65 | ||
65 | bio = next_bio(bio, type, 1, gfp_mask); | 66 | bio = next_bio(bio, 1, gfp_mask); |
66 | bio->bi_iter.bi_sector = sector; | 67 | bio->bi_iter.bi_sector = sector; |
67 | bio->bi_bdev = bdev; | 68 | bio->bi_bdev = bdev; |
69 | bio_set_op_attrs(bio, REQ_OP_DISCARD, op_flags); | ||
68 | 70 | ||
69 | bio->bi_iter.bi_size = req_sects << 9; | 71 | bio->bi_iter.bi_size = req_sects << 9; |
70 | nr_sects -= req_sects; | 72 | nr_sects -= req_sects; |
@@ -98,19 +100,19 @@ EXPORT_SYMBOL(__blkdev_issue_discard); | |||
98 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 100 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
99 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | 101 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) |
100 | { | 102 | { |
101 | int type = REQ_WRITE | REQ_DISCARD; | 103 | int op_flags = 0; |
102 | struct bio *bio = NULL; | 104 | struct bio *bio = NULL; |
103 | struct blk_plug plug; | 105 | struct blk_plug plug; |
104 | int ret; | 106 | int ret; |
105 | 107 | ||
106 | if (flags & BLKDEV_DISCARD_SECURE) | 108 | if (flags & BLKDEV_DISCARD_SECURE) |
107 | type |= REQ_SECURE; | 109 | op_flags |= REQ_SECURE; |
108 | 110 | ||
109 | blk_start_plug(&plug); | 111 | blk_start_plug(&plug); |
110 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type, | 112 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, op_flags, |
111 | &bio); | 113 | &bio); |
112 | if (!ret && bio) { | 114 | if (!ret && bio) { |
113 | ret = submit_bio_wait(type, bio); | 115 | ret = submit_bio_wait(bio); |
114 | if (ret == -EOPNOTSUPP) | 116 | if (ret == -EOPNOTSUPP) |
115 | ret = 0; | 117 | ret = 0; |
116 | bio_put(bio); | 118 | bio_put(bio); |
@@ -148,13 +150,14 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |||
148 | max_write_same_sectors = UINT_MAX >> 9; | 150 | max_write_same_sectors = UINT_MAX >> 9; |
149 | 151 | ||
150 | while (nr_sects) { | 152 | while (nr_sects) { |
151 | bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask); | 153 | bio = next_bio(bio, 1, gfp_mask); |
152 | bio->bi_iter.bi_sector = sector; | 154 | bio->bi_iter.bi_sector = sector; |
153 | bio->bi_bdev = bdev; | 155 | bio->bi_bdev = bdev; |
154 | bio->bi_vcnt = 1; | 156 | bio->bi_vcnt = 1; |
155 | bio->bi_io_vec->bv_page = page; | 157 | bio->bi_io_vec->bv_page = page; |
156 | bio->bi_io_vec->bv_offset = 0; | 158 | bio->bi_io_vec->bv_offset = 0; |
157 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | 159 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); |
160 | bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); | ||
158 | 161 | ||
159 | if (nr_sects > max_write_same_sectors) { | 162 | if (nr_sects > max_write_same_sectors) { |
160 | bio->bi_iter.bi_size = max_write_same_sectors << 9; | 163 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
@@ -167,7 +170,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |||
167 | } | 170 | } |
168 | 171 | ||
169 | if (bio) { | 172 | if (bio) { |
170 | ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); | 173 | ret = submit_bio_wait(bio); |
171 | bio_put(bio); | 174 | bio_put(bio); |
172 | } | 175 | } |
173 | return ret != -EOPNOTSUPP ? ret : 0; | 176 | return ret != -EOPNOTSUPP ? ret : 0; |
@@ -193,11 +196,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |||
193 | unsigned int sz; | 196 | unsigned int sz; |
194 | 197 | ||
195 | while (nr_sects != 0) { | 198 | while (nr_sects != 0) { |
196 | bio = next_bio(bio, WRITE, | 199 | bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), |
197 | min(nr_sects, (sector_t)BIO_MAX_PAGES), | ||
198 | gfp_mask); | 200 | gfp_mask); |
199 | bio->bi_iter.bi_sector = sector; | 201 | bio->bi_iter.bi_sector = sector; |
200 | bio->bi_bdev = bdev; | 202 | bio->bi_bdev = bdev; |
203 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
201 | 204 | ||
202 | while (nr_sects != 0) { | 205 | while (nr_sects != 0) { |
203 | sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); | 206 | sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); |
@@ -210,7 +213,7 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |||
210 | } | 213 | } |
211 | 214 | ||
212 | if (bio) { | 215 | if (bio) { |
213 | ret = submit_bio_wait(WRITE, bio); | 216 | ret = submit_bio_wait(bio); |
214 | bio_put(bio); | 217 | bio_put(bio); |
215 | return ret; | 218 | return ret; |
216 | } | 219 | } |
diff --git a/block/blk-map.c b/block/blk-map.c index b9f88b7751fb..61733a660c3a 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -224,7 +224,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |||
224 | return PTR_ERR(bio); | 224 | return PTR_ERR(bio); |
225 | 225 | ||
226 | if (!reading) | 226 | if (!reading) |
227 | bio->bi_rw |= REQ_WRITE; | 227 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
228 | 228 | ||
229 | if (do_copy) | 229 | if (do_copy) |
230 | rq->cmd_flags |= REQ_COPY_USER; | 230 | rq->cmd_flags |= REQ_COPY_USER; |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 261353166dcf..5e4d93edeaf7 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -172,9 +172,9 @@ void blk_queue_split(struct request_queue *q, struct bio **bio, | |||
172 | struct bio *split, *res; | 172 | struct bio *split, *res; |
173 | unsigned nsegs; | 173 | unsigned nsegs; |
174 | 174 | ||
175 | if ((*bio)->bi_rw & REQ_DISCARD) | 175 | if (bio_op(*bio) == REQ_OP_DISCARD) |
176 | split = blk_bio_discard_split(q, *bio, bs, &nsegs); | 176 | split = blk_bio_discard_split(q, *bio, bs, &nsegs); |
177 | else if ((*bio)->bi_rw & REQ_WRITE_SAME) | 177 | else if (bio_op(*bio) == REQ_OP_WRITE_SAME) |
178 | split = blk_bio_write_same_split(q, *bio, bs, &nsegs); | 178 | split = blk_bio_write_same_split(q, *bio, bs, &nsegs); |
179 | else | 179 | else |
180 | split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); | 180 | split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); |
@@ -213,10 +213,10 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
213 | * This should probably be returning 0, but blk_add_request_payload() | 213 | * This should probably be returning 0, but blk_add_request_payload() |
214 | * (Christoph!!!!) | 214 | * (Christoph!!!!) |
215 | */ | 215 | */ |
216 | if (bio->bi_rw & REQ_DISCARD) | 216 | if (bio_op(bio) == REQ_OP_DISCARD) |
217 | return 1; | 217 | return 1; |
218 | 218 | ||
219 | if (bio->bi_rw & REQ_WRITE_SAME) | 219 | if (bio_op(bio) == REQ_OP_WRITE_SAME) |
220 | return 1; | 220 | return 1; |
221 | 221 | ||
222 | fbio = bio; | 222 | fbio = bio; |
@@ -385,7 +385,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, | |||
385 | nsegs = 0; | 385 | nsegs = 0; |
386 | cluster = blk_queue_cluster(q); | 386 | cluster = blk_queue_cluster(q); |
387 | 387 | ||
388 | if (bio->bi_rw & REQ_DISCARD) { | 388 | if (bio_op(bio) == REQ_OP_DISCARD) { |
389 | /* | 389 | /* |
390 | * This is a hack - drivers should be neither modifying the | 390 | * This is a hack - drivers should be neither modifying the |
391 | * biovec, nor relying on bi_vcnt - but because of | 391 | * biovec, nor relying on bi_vcnt - but because of |
@@ -400,7 +400,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, | |||
400 | return 0; | 400 | return 0; |
401 | } | 401 | } |
402 | 402 | ||
403 | if (bio->bi_rw & REQ_WRITE_SAME) { | 403 | if (bio_op(bio) == REQ_OP_WRITE_SAME) { |
404 | single_segment: | 404 | single_segment: |
405 | *sg = sglist; | 405 | *sg = sglist; |
406 | bvec = bio_iovec(bio); | 406 | bvec = bio_iovec(bio); |
@@ -439,7 +439,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
439 | } | 439 | } |
440 | 440 | ||
441 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { | 441 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
442 | if (rq->cmd_flags & REQ_WRITE) | 442 | if (op_is_write(req_op(rq))) |
443 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); | 443 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
444 | 444 | ||
445 | sg_unmark_end(sg); | 445 | sg_unmark_end(sg); |
@@ -500,7 +500,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, | |||
500 | integrity_req_gap_back_merge(req, bio)) | 500 | integrity_req_gap_back_merge(req, bio)) |
501 | return 0; | 501 | return 0; |
502 | if (blk_rq_sectors(req) + bio_sectors(bio) > | 502 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
503 | blk_rq_get_max_sectors(req)) { | 503 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) { |
504 | req->cmd_flags |= REQ_NOMERGE; | 504 | req->cmd_flags |= REQ_NOMERGE; |
505 | if (req == q->last_merge) | 505 | if (req == q->last_merge) |
506 | q->last_merge = NULL; | 506 | q->last_merge = NULL; |
@@ -524,7 +524,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, | |||
524 | integrity_req_gap_front_merge(req, bio)) | 524 | integrity_req_gap_front_merge(req, bio)) |
525 | return 0; | 525 | return 0; |
526 | if (blk_rq_sectors(req) + bio_sectors(bio) > | 526 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
527 | blk_rq_get_max_sectors(req)) { | 527 | blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { |
528 | req->cmd_flags |= REQ_NOMERGE; | 528 | req->cmd_flags |= REQ_NOMERGE; |
529 | if (req == q->last_merge) | 529 | if (req == q->last_merge) |
530 | q->last_merge = NULL; | 530 | q->last_merge = NULL; |
@@ -570,7 +570,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
570 | * Will it become too large? | 570 | * Will it become too large? |
571 | */ | 571 | */ |
572 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > | 572 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
573 | blk_rq_get_max_sectors(req)) | 573 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
574 | return 0; | 574 | return 0; |
575 | 575 | ||
576 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | 576 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
@@ -649,7 +649,8 @@ static int attempt_merge(struct request_queue *q, struct request *req, | |||
649 | if (!rq_mergeable(req) || !rq_mergeable(next)) | 649 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
650 | return 0; | 650 | return 0; |
651 | 651 | ||
652 | if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) | 652 | if (!blk_check_merge_flags(req->cmd_flags, req_op(req), next->cmd_flags, |
653 | req_op(next))) | ||
653 | return 0; | 654 | return 0; |
654 | 655 | ||
655 | /* | 656 | /* |
@@ -663,7 +664,7 @@ static int attempt_merge(struct request_queue *q, struct request *req, | |||
663 | || req_no_special_merge(next)) | 664 | || req_no_special_merge(next)) |
664 | return 0; | 665 | return 0; |
665 | 666 | ||
666 | if (req->cmd_flags & REQ_WRITE_SAME && | 667 | if (req_op(req) == REQ_OP_WRITE_SAME && |
667 | !blk_write_same_mergeable(req->bio, next->bio)) | 668 | !blk_write_same_mergeable(req->bio, next->bio)) |
668 | return 0; | 669 | return 0; |
669 | 670 | ||
@@ -743,6 +744,12 @@ int attempt_front_merge(struct request_queue *q, struct request *rq) | |||
743 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, | 744 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, |
744 | struct request *next) | 745 | struct request *next) |
745 | { | 746 | { |
747 | struct elevator_queue *e = q->elevator; | ||
748 | |||
749 | if (e->type->ops.elevator_allow_rq_merge_fn) | ||
750 | if (!e->type->ops.elevator_allow_rq_merge_fn(q, rq, next)) | ||
751 | return 0; | ||
752 | |||
746 | return attempt_merge(q, rq, next); | 753 | return attempt_merge(q, rq, next); |
747 | } | 754 | } |
748 | 755 | ||
@@ -751,7 +758,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |||
751 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) | 758 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
752 | return false; | 759 | return false; |
753 | 760 | ||
754 | if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) | 761 | if (!blk_check_merge_flags(rq->cmd_flags, req_op(rq), bio->bi_rw, |
762 | bio_op(bio))) | ||
755 | return false; | 763 | return false; |
756 | 764 | ||
757 | /* different data direction or already started, don't merge */ | 765 | /* different data direction or already started, don't merge */ |
@@ -767,7 +775,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |||
767 | return false; | 775 | return false; |
768 | 776 | ||
769 | /* must be using the same buffer */ | 777 | /* must be using the same buffer */ |
770 | if (rq->cmd_flags & REQ_WRITE_SAME && | 778 | if (req_op(rq) == REQ_OP_WRITE_SAME && |
771 | !blk_write_same_mergeable(rq->bio, bio)) | 779 | !blk_write_same_mergeable(rq->bio, bio)) |
772 | return false; | 780 | return false; |
773 | 781 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index f9b9049b1284..2a1920c6d6e5 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -159,16 +159,17 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) | |||
159 | EXPORT_SYMBOL(blk_mq_can_queue); | 159 | EXPORT_SYMBOL(blk_mq_can_queue); |
160 | 160 | ||
161 | static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | 161 | static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, |
162 | struct request *rq, unsigned int rw_flags) | 162 | struct request *rq, int op, |
163 | unsigned int op_flags) | ||
163 | { | 164 | { |
164 | if (blk_queue_io_stat(q)) | 165 | if (blk_queue_io_stat(q)) |
165 | rw_flags |= REQ_IO_STAT; | 166 | op_flags |= REQ_IO_STAT; |
166 | 167 | ||
167 | INIT_LIST_HEAD(&rq->queuelist); | 168 | INIT_LIST_HEAD(&rq->queuelist); |
168 | /* csd/requeue_work/fifo_time is initialized before use */ | 169 | /* csd/requeue_work/fifo_time is initialized before use */ |
169 | rq->q = q; | 170 | rq->q = q; |
170 | rq->mq_ctx = ctx; | 171 | rq->mq_ctx = ctx; |
171 | rq->cmd_flags |= rw_flags; | 172 | req_set_op_attrs(rq, op, op_flags); |
172 | /* do not touch atomic flags, it needs atomic ops against the timer */ | 173 | /* do not touch atomic flags, it needs atomic ops against the timer */ |
173 | rq->cpu = -1; | 174 | rq->cpu = -1; |
174 | INIT_HLIST_NODE(&rq->hash); | 175 | INIT_HLIST_NODE(&rq->hash); |
@@ -203,11 +204,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
203 | rq->end_io_data = NULL; | 204 | rq->end_io_data = NULL; |
204 | rq->next_rq = NULL; | 205 | rq->next_rq = NULL; |
205 | 206 | ||
206 | ctx->rq_dispatched[rw_is_sync(rw_flags)]++; | 207 | ctx->rq_dispatched[rw_is_sync(op, op_flags)]++; |
207 | } | 208 | } |
208 | 209 | ||
209 | static struct request * | 210 | static struct request * |
210 | __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) | 211 | __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags) |
211 | { | 212 | { |
212 | struct request *rq; | 213 | struct request *rq; |
213 | unsigned int tag; | 214 | unsigned int tag; |
@@ -222,7 +223,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) | |||
222 | } | 223 | } |
223 | 224 | ||
224 | rq->tag = tag; | 225 | rq->tag = tag; |
225 | blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); | 226 | blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags); |
226 | return rq; | 227 | return rq; |
227 | } | 228 | } |
228 | 229 | ||
@@ -246,7 +247,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, | |||
246 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 247 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
247 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); | 248 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); |
248 | 249 | ||
249 | rq = __blk_mq_alloc_request(&alloc_data, rw); | 250 | rq = __blk_mq_alloc_request(&alloc_data, rw, 0); |
250 | if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) { | 251 | if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) { |
251 | __blk_mq_run_hw_queue(hctx); | 252 | __blk_mq_run_hw_queue(hctx); |
252 | blk_mq_put_ctx(ctx); | 253 | blk_mq_put_ctx(ctx); |
@@ -254,7 +255,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, | |||
254 | ctx = blk_mq_get_ctx(q); | 255 | ctx = blk_mq_get_ctx(q); |
255 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 256 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
256 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); | 257 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); |
257 | rq = __blk_mq_alloc_request(&alloc_data, rw); | 258 | rq = __blk_mq_alloc_request(&alloc_data, rw, 0); |
258 | ctx = alloc_data.ctx; | 259 | ctx = alloc_data.ctx; |
259 | } | 260 | } |
260 | blk_mq_put_ctx(ctx); | 261 | blk_mq_put_ctx(ctx); |
@@ -784,7 +785,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) | |||
784 | switch (ret) { | 785 | switch (ret) { |
785 | case BLK_MQ_RQ_QUEUE_OK: | 786 | case BLK_MQ_RQ_QUEUE_OK: |
786 | queued++; | 787 | queued++; |
787 | continue; | 788 | break; |
788 | case BLK_MQ_RQ_QUEUE_BUSY: | 789 | case BLK_MQ_RQ_QUEUE_BUSY: |
789 | list_add(&rq->queuelist, &rq_list); | 790 | list_add(&rq->queuelist, &rq_list); |
790 | __blk_mq_requeue_request(rq); | 791 | __blk_mq_requeue_request(rq); |
@@ -1169,28 +1170,29 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
1169 | struct blk_mq_hw_ctx *hctx; | 1170 | struct blk_mq_hw_ctx *hctx; |
1170 | struct blk_mq_ctx *ctx; | 1171 | struct blk_mq_ctx *ctx; |
1171 | struct request *rq; | 1172 | struct request *rq; |
1172 | int rw = bio_data_dir(bio); | 1173 | int op = bio_data_dir(bio); |
1174 | int op_flags = 0; | ||
1173 | struct blk_mq_alloc_data alloc_data; | 1175 | struct blk_mq_alloc_data alloc_data; |
1174 | 1176 | ||
1175 | blk_queue_enter_live(q); | 1177 | blk_queue_enter_live(q); |
1176 | ctx = blk_mq_get_ctx(q); | 1178 | ctx = blk_mq_get_ctx(q); |
1177 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 1179 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
1178 | 1180 | ||
1179 | if (rw_is_sync(bio->bi_rw)) | 1181 | if (rw_is_sync(bio_op(bio), bio->bi_rw)) |
1180 | rw |= REQ_SYNC; | 1182 | op_flags |= REQ_SYNC; |
1181 | 1183 | ||
1182 | trace_block_getrq(q, bio, rw); | 1184 | trace_block_getrq(q, bio, op); |
1183 | blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx); | 1185 | blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx); |
1184 | rq = __blk_mq_alloc_request(&alloc_data, rw); | 1186 | rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); |
1185 | if (unlikely(!rq)) { | 1187 | if (unlikely(!rq)) { |
1186 | __blk_mq_run_hw_queue(hctx); | 1188 | __blk_mq_run_hw_queue(hctx); |
1187 | blk_mq_put_ctx(ctx); | 1189 | blk_mq_put_ctx(ctx); |
1188 | trace_block_sleeprq(q, bio, rw); | 1190 | trace_block_sleeprq(q, bio, op); |
1189 | 1191 | ||
1190 | ctx = blk_mq_get_ctx(q); | 1192 | ctx = blk_mq_get_ctx(q); |
1191 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 1193 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
1192 | blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); | 1194 | blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); |
1193 | rq = __blk_mq_alloc_request(&alloc_data, rw); | 1195 | rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); |
1194 | ctx = alloc_data.ctx; | 1196 | ctx = alloc_data.ctx; |
1195 | hctx = alloc_data.hctx; | 1197 | hctx = alloc_data.hctx; |
1196 | } | 1198 | } |
@@ -1244,8 +1246,8 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie) | |||
1244 | */ | 1246 | */ |
1245 | static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | 1247 | static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) |
1246 | { | 1248 | { |
1247 | const int is_sync = rw_is_sync(bio->bi_rw); | 1249 | const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); |
1248 | const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); | 1250 | const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); |
1249 | struct blk_map_ctx data; | 1251 | struct blk_map_ctx data; |
1250 | struct request *rq; | 1252 | struct request *rq; |
1251 | unsigned int request_count = 0; | 1253 | unsigned int request_count = 0; |
@@ -1338,8 +1340,8 @@ done: | |||
1338 | */ | 1340 | */ |
1339 | static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) | 1341 | static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) |
1340 | { | 1342 | { |
1341 | const int is_sync = rw_is_sync(bio->bi_rw); | 1343 | const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); |
1342 | const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); | 1344 | const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); |
1343 | struct blk_plug *plug; | 1345 | struct blk_plug *plug; |
1344 | unsigned int request_count = 0; | 1346 | unsigned int request_count = 0; |
1345 | struct blk_map_ctx data; | 1347 | struct blk_map_ctx data; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 99205965f559..f87a7e747d36 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -379,6 +379,11 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page, | |||
379 | return count; | 379 | return count; |
380 | } | 380 | } |
381 | 381 | ||
382 | static ssize_t queue_dax_show(struct request_queue *q, char *page) | ||
383 | { | ||
384 | return queue_var_show(blk_queue_dax(q), page); | ||
385 | } | ||
386 | |||
382 | static struct queue_sysfs_entry queue_requests_entry = { | 387 | static struct queue_sysfs_entry queue_requests_entry = { |
383 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | 388 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, |
384 | .show = queue_requests_show, | 389 | .show = queue_requests_show, |
@@ -516,6 +521,11 @@ static struct queue_sysfs_entry queue_wc_entry = { | |||
516 | .store = queue_wc_store, | 521 | .store = queue_wc_store, |
517 | }; | 522 | }; |
518 | 523 | ||
524 | static struct queue_sysfs_entry queue_dax_entry = { | ||
525 | .attr = {.name = "dax", .mode = S_IRUGO }, | ||
526 | .show = queue_dax_show, | ||
527 | }; | ||
528 | |||
519 | static struct attribute *default_attrs[] = { | 529 | static struct attribute *default_attrs[] = { |
520 | &queue_requests_entry.attr, | 530 | &queue_requests_entry.attr, |
521 | &queue_ra_entry.attr, | 531 | &queue_ra_entry.attr, |
@@ -542,6 +552,7 @@ static struct attribute *default_attrs[] = { | |||
542 | &queue_random_entry.attr, | 552 | &queue_random_entry.attr, |
543 | &queue_poll_entry.attr, | 553 | &queue_poll_entry.attr, |
544 | &queue_wc_entry.attr, | 554 | &queue_wc_entry.attr, |
555 | &queue_dax_entry.attr, | ||
545 | NULL, | 556 | NULL, |
546 | }; | 557 | }; |
547 | 558 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 4a349787bc62..acabba198de9 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/blkdev.h> | 11 | #include <linux/blkdev.h> |
12 | #include <linux/elevator.h> | 12 | #include <linux/elevator.h> |
13 | #include <linux/jiffies.h> | 13 | #include <linux/ktime.h> |
14 | #include <linux/rbtree.h> | 14 | #include <linux/rbtree.h> |
15 | #include <linux/ioprio.h> | 15 | #include <linux/ioprio.h> |
16 | #include <linux/blktrace_api.h> | 16 | #include <linux/blktrace_api.h> |
@@ -22,28 +22,28 @@ | |||
22 | */ | 22 | */ |
23 | /* max queue in one round of service */ | 23 | /* max queue in one round of service */ |
24 | static const int cfq_quantum = 8; | 24 | static const int cfq_quantum = 8; |
25 | static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; | 25 | static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; |
26 | /* maximum backwards seek, in KiB */ | 26 | /* maximum backwards seek, in KiB */ |
27 | static const int cfq_back_max = 16 * 1024; | 27 | static const int cfq_back_max = 16 * 1024; |
28 | /* penalty of a backwards seek */ | 28 | /* penalty of a backwards seek */ |
29 | static const int cfq_back_penalty = 2; | 29 | static const int cfq_back_penalty = 2; |
30 | static const int cfq_slice_sync = HZ / 10; | 30 | static const u64 cfq_slice_sync = NSEC_PER_SEC / 10; |
31 | static int cfq_slice_async = HZ / 25; | 31 | static u64 cfq_slice_async = NSEC_PER_SEC / 25; |
32 | static const int cfq_slice_async_rq = 2; | 32 | static const int cfq_slice_async_rq = 2; |
33 | static int cfq_slice_idle = HZ / 125; | 33 | static u64 cfq_slice_idle = NSEC_PER_SEC / 125; |
34 | static int cfq_group_idle = HZ / 125; | 34 | static u64 cfq_group_idle = NSEC_PER_SEC / 125; |
35 | static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ | 35 | static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */ |
36 | static const int cfq_hist_divisor = 4; | 36 | static const int cfq_hist_divisor = 4; |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * offset from end of service tree | 39 | * offset from end of service tree |
40 | */ | 40 | */ |
41 | #define CFQ_IDLE_DELAY (HZ / 5) | 41 | #define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5) |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * below this threshold, we consider thinktime immediate | 44 | * below this threshold, we consider thinktime immediate |
45 | */ | 45 | */ |
46 | #define CFQ_MIN_TT (2) | 46 | #define CFQ_MIN_TT (2 * NSEC_PER_SEC / HZ) |
47 | 47 | ||
48 | #define CFQ_SLICE_SCALE (5) | 48 | #define CFQ_SLICE_SCALE (5) |
49 | #define CFQ_HW_QUEUE_MIN (5) | 49 | #define CFQ_HW_QUEUE_MIN (5) |
@@ -73,11 +73,11 @@ static struct kmem_cache *cfq_pool; | |||
73 | #define CFQ_WEIGHT_LEGACY_MAX 1000 | 73 | #define CFQ_WEIGHT_LEGACY_MAX 1000 |
74 | 74 | ||
75 | struct cfq_ttime { | 75 | struct cfq_ttime { |
76 | unsigned long last_end_request; | 76 | u64 last_end_request; |
77 | 77 | ||
78 | unsigned long ttime_total; | 78 | u64 ttime_total; |
79 | u64 ttime_mean; | ||
79 | unsigned long ttime_samples; | 80 | unsigned long ttime_samples; |
80 | unsigned long ttime_mean; | ||
81 | }; | 81 | }; |
82 | 82 | ||
83 | /* | 83 | /* |
@@ -94,7 +94,7 @@ struct cfq_rb_root { | |||
94 | struct cfq_ttime ttime; | 94 | struct cfq_ttime ttime; |
95 | }; | 95 | }; |
96 | #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \ | 96 | #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \ |
97 | .ttime = {.last_end_request = jiffies,},} | 97 | .ttime = {.last_end_request = ktime_get_ns(),},} |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * Per process-grouping structure | 100 | * Per process-grouping structure |
@@ -109,7 +109,7 @@ struct cfq_queue { | |||
109 | /* service_tree member */ | 109 | /* service_tree member */ |
110 | struct rb_node rb_node; | 110 | struct rb_node rb_node; |
111 | /* service_tree key */ | 111 | /* service_tree key */ |
112 | unsigned long rb_key; | 112 | u64 rb_key; |
113 | /* prio tree member */ | 113 | /* prio tree member */ |
114 | struct rb_node p_node; | 114 | struct rb_node p_node; |
115 | /* prio tree root we belong to, if any */ | 115 | /* prio tree root we belong to, if any */ |
@@ -126,13 +126,13 @@ struct cfq_queue { | |||
126 | struct list_head fifo; | 126 | struct list_head fifo; |
127 | 127 | ||
128 | /* time when queue got scheduled in to dispatch first request. */ | 128 | /* time when queue got scheduled in to dispatch first request. */ |
129 | unsigned long dispatch_start; | 129 | u64 dispatch_start; |
130 | unsigned int allocated_slice; | 130 | u64 allocated_slice; |
131 | unsigned int slice_dispatch; | 131 | u64 slice_dispatch; |
132 | /* time when first request from queue completed and slice started. */ | 132 | /* time when first request from queue completed and slice started. */ |
133 | unsigned long slice_start; | 133 | u64 slice_start; |
134 | unsigned long slice_end; | 134 | u64 slice_end; |
135 | long slice_resid; | 135 | s64 slice_resid; |
136 | 136 | ||
137 | /* pending priority requests */ | 137 | /* pending priority requests */ |
138 | int prio_pending; | 138 | int prio_pending; |
@@ -141,7 +141,7 @@ struct cfq_queue { | |||
141 | 141 | ||
142 | /* io prio of this group */ | 142 | /* io prio of this group */ |
143 | unsigned short ioprio, org_ioprio; | 143 | unsigned short ioprio, org_ioprio; |
144 | unsigned short ioprio_class; | 144 | unsigned short ioprio_class, org_ioprio_class; |
145 | 145 | ||
146 | pid_t pid; | 146 | pid_t pid; |
147 | 147 | ||
@@ -290,7 +290,7 @@ struct cfq_group { | |||
290 | struct cfq_rb_root service_trees[2][3]; | 290 | struct cfq_rb_root service_trees[2][3]; |
291 | struct cfq_rb_root service_tree_idle; | 291 | struct cfq_rb_root service_tree_idle; |
292 | 292 | ||
293 | unsigned long saved_wl_slice; | 293 | u64 saved_wl_slice; |
294 | enum wl_type_t saved_wl_type; | 294 | enum wl_type_t saved_wl_type; |
295 | enum wl_class_t saved_wl_class; | 295 | enum wl_class_t saved_wl_class; |
296 | 296 | ||
@@ -329,7 +329,7 @@ struct cfq_data { | |||
329 | */ | 329 | */ |
330 | enum wl_class_t serving_wl_class; | 330 | enum wl_class_t serving_wl_class; |
331 | enum wl_type_t serving_wl_type; | 331 | enum wl_type_t serving_wl_type; |
332 | unsigned long workload_expires; | 332 | u64 workload_expires; |
333 | struct cfq_group *serving_group; | 333 | struct cfq_group *serving_group; |
334 | 334 | ||
335 | /* | 335 | /* |
@@ -362,7 +362,7 @@ struct cfq_data { | |||
362 | /* | 362 | /* |
363 | * idle window management | 363 | * idle window management |
364 | */ | 364 | */ |
365 | struct timer_list idle_slice_timer; | 365 | struct hrtimer idle_slice_timer; |
366 | struct work_struct unplug_work; | 366 | struct work_struct unplug_work; |
367 | 367 | ||
368 | struct cfq_queue *active_queue; | 368 | struct cfq_queue *active_queue; |
@@ -374,22 +374,22 @@ struct cfq_data { | |||
374 | * tunables, see top of file | 374 | * tunables, see top of file |
375 | */ | 375 | */ |
376 | unsigned int cfq_quantum; | 376 | unsigned int cfq_quantum; |
377 | unsigned int cfq_fifo_expire[2]; | ||
378 | unsigned int cfq_back_penalty; | 377 | unsigned int cfq_back_penalty; |
379 | unsigned int cfq_back_max; | 378 | unsigned int cfq_back_max; |
380 | unsigned int cfq_slice[2]; | ||
381 | unsigned int cfq_slice_async_rq; | 379 | unsigned int cfq_slice_async_rq; |
382 | unsigned int cfq_slice_idle; | ||
383 | unsigned int cfq_group_idle; | ||
384 | unsigned int cfq_latency; | 380 | unsigned int cfq_latency; |
385 | unsigned int cfq_target_latency; | 381 | u64 cfq_fifo_expire[2]; |
382 | u64 cfq_slice[2]; | ||
383 | u64 cfq_slice_idle; | ||
384 | u64 cfq_group_idle; | ||
385 | u64 cfq_target_latency; | ||
386 | 386 | ||
387 | /* | 387 | /* |
388 | * Fallback dummy cfqq for extreme OOM conditions | 388 | * Fallback dummy cfqq for extreme OOM conditions |
389 | */ | 389 | */ |
390 | struct cfq_queue oom_cfqq; | 390 | struct cfq_queue oom_cfqq; |
391 | 391 | ||
392 | unsigned long last_delayed_sync; | 392 | u64 last_delayed_sync; |
393 | }; | 393 | }; |
394 | 394 | ||
395 | static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); | 395 | static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); |
@@ -667,15 +667,16 @@ static inline void cfqg_put(struct cfq_group *cfqg) | |||
667 | } while (0) | 667 | } while (0) |
668 | 668 | ||
669 | static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, | 669 | static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, |
670 | struct cfq_group *curr_cfqg, int rw) | 670 | struct cfq_group *curr_cfqg, int op, |
671 | int op_flags) | ||
671 | { | 672 | { |
672 | blkg_rwstat_add(&cfqg->stats.queued, rw, 1); | 673 | blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, 1); |
673 | cfqg_stats_end_empty_time(&cfqg->stats); | 674 | cfqg_stats_end_empty_time(&cfqg->stats); |
674 | cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); | 675 | cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); |
675 | } | 676 | } |
676 | 677 | ||
677 | static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, | 678 | static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, |
678 | unsigned long time, unsigned long unaccounted_time) | 679 | uint64_t time, unsigned long unaccounted_time) |
679 | { | 680 | { |
680 | blkg_stat_add(&cfqg->stats.time, time); | 681 | blkg_stat_add(&cfqg->stats.time, time); |
681 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 682 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
@@ -683,26 +684,30 @@ static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, | |||
683 | #endif | 684 | #endif |
684 | } | 685 | } |
685 | 686 | ||
686 | static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) | 687 | static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op, |
688 | int op_flags) | ||
687 | { | 689 | { |
688 | blkg_rwstat_add(&cfqg->stats.queued, rw, -1); | 690 | blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, -1); |
689 | } | 691 | } |
690 | 692 | ||
691 | static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) | 693 | static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op, |
694 | int op_flags) | ||
692 | { | 695 | { |
693 | blkg_rwstat_add(&cfqg->stats.merged, rw, 1); | 696 | blkg_rwstat_add(&cfqg->stats.merged, op, op_flags, 1); |
694 | } | 697 | } |
695 | 698 | ||
696 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, | 699 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, |
697 | uint64_t start_time, uint64_t io_start_time, int rw) | 700 | uint64_t start_time, uint64_t io_start_time, int op, |
701 | int op_flags) | ||
698 | { | 702 | { |
699 | struct cfqg_stats *stats = &cfqg->stats; | 703 | struct cfqg_stats *stats = &cfqg->stats; |
700 | unsigned long long now = sched_clock(); | 704 | unsigned long long now = sched_clock(); |
701 | 705 | ||
702 | if (time_after64(now, io_start_time)) | 706 | if (time_after64(now, io_start_time)) |
703 | blkg_rwstat_add(&stats->service_time, rw, now - io_start_time); | 707 | blkg_rwstat_add(&stats->service_time, op, op_flags, |
708 | now - io_start_time); | ||
704 | if (time_after64(io_start_time, start_time)) | 709 | if (time_after64(io_start_time, start_time)) |
705 | blkg_rwstat_add(&stats->wait_time, rw, | 710 | blkg_rwstat_add(&stats->wait_time, op, op_flags, |
706 | io_start_time - start_time); | 711 | io_start_time - start_time); |
707 | } | 712 | } |
708 | 713 | ||
@@ -781,13 +786,16 @@ static inline void cfqg_put(struct cfq_group *cfqg) { } | |||
781 | #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) | 786 | #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) |
782 | 787 | ||
783 | static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, | 788 | static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, |
784 | struct cfq_group *curr_cfqg, int rw) { } | 789 | struct cfq_group *curr_cfqg, int op, int op_flags) { } |
785 | static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, | 790 | static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, |
786 | unsigned long time, unsigned long unaccounted_time) { } | 791 | uint64_t time, unsigned long unaccounted_time) { } |
787 | static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { } | 792 | static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op, |
788 | static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { } | 793 | int op_flags) { } |
794 | static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op, | ||
795 | int op_flags) { } | ||
789 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, | 796 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, |
790 | uint64_t start_time, uint64_t io_start_time, int rw) { } | 797 | uint64_t start_time, uint64_t io_start_time, int op, |
798 | int op_flags) { } | ||
791 | 799 | ||
792 | #endif /* CONFIG_CFQ_GROUP_IOSCHED */ | 800 | #endif /* CONFIG_CFQ_GROUP_IOSCHED */ |
793 | 801 | ||
@@ -807,7 +815,7 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, | |||
807 | static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd, | 815 | static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd, |
808 | struct cfq_ttime *ttime, bool group_idle) | 816 | struct cfq_ttime *ttime, bool group_idle) |
809 | { | 817 | { |
810 | unsigned long slice; | 818 | u64 slice; |
811 | if (!sample_valid(ttime->ttime_samples)) | 819 | if (!sample_valid(ttime->ttime_samples)) |
812 | return false; | 820 | return false; |
813 | if (group_idle) | 821 | if (group_idle) |
@@ -930,17 +938,18 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | |||
930 | * if a queue is marked sync and has sync io queued. A sync queue with async | 938 | * if a queue is marked sync and has sync io queued. A sync queue with async |
931 | * io only, should not get full sync slice length. | 939 | * io only, should not get full sync slice length. |
932 | */ | 940 | */ |
933 | static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, | 941 | static inline u64 cfq_prio_slice(struct cfq_data *cfqd, bool sync, |
934 | unsigned short prio) | 942 | unsigned short prio) |
935 | { | 943 | { |
936 | const int base_slice = cfqd->cfq_slice[sync]; | 944 | u64 base_slice = cfqd->cfq_slice[sync]; |
945 | u64 slice = div_u64(base_slice, CFQ_SLICE_SCALE); | ||
937 | 946 | ||
938 | WARN_ON(prio >= IOPRIO_BE_NR); | 947 | WARN_ON(prio >= IOPRIO_BE_NR); |
939 | 948 | ||
940 | return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); | 949 | return base_slice + (slice * (4 - prio)); |
941 | } | 950 | } |
942 | 951 | ||
943 | static inline int | 952 | static inline u64 |
944 | cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 953 | cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
945 | { | 954 | { |
946 | return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); | 955 | return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); |
@@ -958,15 +967,14 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
958 | * | 967 | * |
959 | * The result is also in fixed point w/ CFQ_SERVICE_SHIFT. | 968 | * The result is also in fixed point w/ CFQ_SERVICE_SHIFT. |
960 | */ | 969 | */ |
961 | static inline u64 cfqg_scale_charge(unsigned long charge, | 970 | static inline u64 cfqg_scale_charge(u64 charge, |
962 | unsigned int vfraction) | 971 | unsigned int vfraction) |
963 | { | 972 | { |
964 | u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */ | 973 | u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */ |
965 | 974 | ||
966 | /* charge / vfraction */ | 975 | /* charge / vfraction */ |
967 | c <<= CFQ_SERVICE_SHIFT; | 976 | c <<= CFQ_SERVICE_SHIFT; |
968 | do_div(c, vfraction); | 977 | return div_u64(c, vfraction); |
969 | return c; | ||
970 | } | 978 | } |
971 | 979 | ||
972 | static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) | 980 | static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) |
@@ -1019,16 +1027,16 @@ static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, | |||
1019 | return cfqg->busy_queues_avg[rt]; | 1027 | return cfqg->busy_queues_avg[rt]; |
1020 | } | 1028 | } |
1021 | 1029 | ||
1022 | static inline unsigned | 1030 | static inline u64 |
1023 | cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) | 1031 | cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) |
1024 | { | 1032 | { |
1025 | return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT; | 1033 | return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT; |
1026 | } | 1034 | } |
1027 | 1035 | ||
1028 | static inline unsigned | 1036 | static inline u64 |
1029 | cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 1037 | cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1030 | { | 1038 | { |
1031 | unsigned slice = cfq_prio_to_slice(cfqd, cfqq); | 1039 | u64 slice = cfq_prio_to_slice(cfqd, cfqq); |
1032 | if (cfqd->cfq_latency) { | 1040 | if (cfqd->cfq_latency) { |
1033 | /* | 1041 | /* |
1034 | * interested queues (we consider only the ones with the same | 1042 | * interested queues (we consider only the ones with the same |
@@ -1036,20 +1044,22 @@ cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1036 | */ | 1044 | */ |
1037 | unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, | 1045 | unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, |
1038 | cfq_class_rt(cfqq)); | 1046 | cfq_class_rt(cfqq)); |
1039 | unsigned sync_slice = cfqd->cfq_slice[1]; | 1047 | u64 sync_slice = cfqd->cfq_slice[1]; |
1040 | unsigned expect_latency = sync_slice * iq; | 1048 | u64 expect_latency = sync_slice * iq; |
1041 | unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); | 1049 | u64 group_slice = cfq_group_slice(cfqd, cfqq->cfqg); |
1042 | 1050 | ||
1043 | if (expect_latency > group_slice) { | 1051 | if (expect_latency > group_slice) { |
1044 | unsigned base_low_slice = 2 * cfqd->cfq_slice_idle; | 1052 | u64 base_low_slice = 2 * cfqd->cfq_slice_idle; |
1053 | u64 low_slice; | ||
1054 | |||
1045 | /* scale low_slice according to IO priority | 1055 | /* scale low_slice according to IO priority |
1046 | * and sync vs async */ | 1056 | * and sync vs async */ |
1047 | unsigned low_slice = | 1057 | low_slice = div64_u64(base_low_slice*slice, sync_slice); |
1048 | min(slice, base_low_slice * slice / sync_slice); | 1058 | low_slice = min(slice, low_slice); |
1049 | /* the adapted slice value is scaled to fit all iqs | 1059 | /* the adapted slice value is scaled to fit all iqs |
1050 | * into the target latency */ | 1060 | * into the target latency */ |
1051 | slice = max(slice * group_slice / expect_latency, | 1061 | slice = div64_u64(slice*group_slice, expect_latency); |
1052 | low_slice); | 1062 | slice = max(slice, low_slice); |
1053 | } | 1063 | } |
1054 | } | 1064 | } |
1055 | return slice; | 1065 | return slice; |
@@ -1058,12 +1068,13 @@ cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1058 | static inline void | 1068 | static inline void |
1059 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 1069 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1060 | { | 1070 | { |
1061 | unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); | 1071 | u64 slice = cfq_scaled_cfqq_slice(cfqd, cfqq); |
1072 | u64 now = ktime_get_ns(); | ||
1062 | 1073 | ||
1063 | cfqq->slice_start = jiffies; | 1074 | cfqq->slice_start = now; |
1064 | cfqq->slice_end = jiffies + slice; | 1075 | cfqq->slice_end = now + slice; |
1065 | cfqq->allocated_slice = slice; | 1076 | cfqq->allocated_slice = slice; |
1066 | cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); | 1077 | cfq_log_cfqq(cfqd, cfqq, "set_slice=%llu", cfqq->slice_end - now); |
1067 | } | 1078 | } |
1068 | 1079 | ||
1069 | /* | 1080 | /* |
@@ -1075,7 +1086,7 @@ static inline bool cfq_slice_used(struct cfq_queue *cfqq) | |||
1075 | { | 1086 | { |
1076 | if (cfq_cfqq_slice_new(cfqq)) | 1087 | if (cfq_cfqq_slice_new(cfqq)) |
1077 | return false; | 1088 | return false; |
1078 | if (time_before(jiffies, cfqq->slice_end)) | 1089 | if (ktime_get_ns() < cfqq->slice_end) |
1079 | return false; | 1090 | return false; |
1080 | 1091 | ||
1081 | return true; | 1092 | return true; |
@@ -1241,8 +1252,8 @@ cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1241 | return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); | 1252 | return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); |
1242 | } | 1253 | } |
1243 | 1254 | ||
1244 | static unsigned long cfq_slice_offset(struct cfq_data *cfqd, | 1255 | static u64 cfq_slice_offset(struct cfq_data *cfqd, |
1245 | struct cfq_queue *cfqq) | 1256 | struct cfq_queue *cfqq) |
1246 | { | 1257 | { |
1247 | /* | 1258 | /* |
1248 | * just an approximation, should be ok. | 1259 | * just an approximation, should be ok. |
@@ -1435,31 +1446,32 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
1435 | cfqg_stats_update_dequeue(cfqg); | 1446 | cfqg_stats_update_dequeue(cfqg); |
1436 | } | 1447 | } |
1437 | 1448 | ||
1438 | static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, | 1449 | static inline u64 cfq_cfqq_slice_usage(struct cfq_queue *cfqq, |
1439 | unsigned int *unaccounted_time) | 1450 | u64 *unaccounted_time) |
1440 | { | 1451 | { |
1441 | unsigned int slice_used; | 1452 | u64 slice_used; |
1453 | u64 now = ktime_get_ns(); | ||
1442 | 1454 | ||
1443 | /* | 1455 | /* |
1444 | * Queue got expired before even a single request completed or | 1456 | * Queue got expired before even a single request completed or |
1445 | * got expired immediately after first request completion. | 1457 | * got expired immediately after first request completion. |
1446 | */ | 1458 | */ |
1447 | if (!cfqq->slice_start || cfqq->slice_start == jiffies) { | 1459 | if (!cfqq->slice_start || cfqq->slice_start == now) { |
1448 | /* | 1460 | /* |
1449 | * Also charge the seek time incurred to the group, otherwise | 1461 | * Also charge the seek time incurred to the group, otherwise |
1450 | * if there are mutiple queues in the group, each can dispatch | 1462 | * if there are mutiple queues in the group, each can dispatch |
1451 | * a single request on seeky media and cause lots of seek time | 1463 | * a single request on seeky media and cause lots of seek time |
1452 | * and group will never know it. | 1464 | * and group will never know it. |
1453 | */ | 1465 | */ |
1454 | slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start), | 1466 | slice_used = max_t(u64, (now - cfqq->dispatch_start), |
1455 | 1); | 1467 | jiffies_to_nsecs(1)); |
1456 | } else { | 1468 | } else { |
1457 | slice_used = jiffies - cfqq->slice_start; | 1469 | slice_used = now - cfqq->slice_start; |
1458 | if (slice_used > cfqq->allocated_slice) { | 1470 | if (slice_used > cfqq->allocated_slice) { |
1459 | *unaccounted_time = slice_used - cfqq->allocated_slice; | 1471 | *unaccounted_time = slice_used - cfqq->allocated_slice; |
1460 | slice_used = cfqq->allocated_slice; | 1472 | slice_used = cfqq->allocated_slice; |
1461 | } | 1473 | } |
1462 | if (time_after(cfqq->slice_start, cfqq->dispatch_start)) | 1474 | if (cfqq->slice_start > cfqq->dispatch_start) |
1463 | *unaccounted_time += cfqq->slice_start - | 1475 | *unaccounted_time += cfqq->slice_start - |
1464 | cfqq->dispatch_start; | 1476 | cfqq->dispatch_start; |
1465 | } | 1477 | } |
@@ -1471,10 +1483,11 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
1471 | struct cfq_queue *cfqq) | 1483 | struct cfq_queue *cfqq) |
1472 | { | 1484 | { |
1473 | struct cfq_rb_root *st = &cfqd->grp_service_tree; | 1485 | struct cfq_rb_root *st = &cfqd->grp_service_tree; |
1474 | unsigned int used_sl, charge, unaccounted_sl = 0; | 1486 | u64 used_sl, charge, unaccounted_sl = 0; |
1475 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) | 1487 | int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) |
1476 | - cfqg->service_tree_idle.count; | 1488 | - cfqg->service_tree_idle.count; |
1477 | unsigned int vfr; | 1489 | unsigned int vfr; |
1490 | u64 now = ktime_get_ns(); | ||
1478 | 1491 | ||
1479 | BUG_ON(nr_sync < 0); | 1492 | BUG_ON(nr_sync < 0); |
1480 | used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); | 1493 | used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); |
@@ -1496,9 +1509,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
1496 | cfq_group_service_tree_add(st, cfqg); | 1509 | cfq_group_service_tree_add(st, cfqg); |
1497 | 1510 | ||
1498 | /* This group is being expired. Save the context */ | 1511 | /* This group is being expired. Save the context */ |
1499 | if (time_after(cfqd->workload_expires, jiffies)) { | 1512 | if (cfqd->workload_expires > now) { |
1500 | cfqg->saved_wl_slice = cfqd->workload_expires | 1513 | cfqg->saved_wl_slice = cfqd->workload_expires - now; |
1501 | - jiffies; | ||
1502 | cfqg->saved_wl_type = cfqd->serving_wl_type; | 1514 | cfqg->saved_wl_type = cfqd->serving_wl_type; |
1503 | cfqg->saved_wl_class = cfqd->serving_wl_class; | 1515 | cfqg->saved_wl_class = cfqd->serving_wl_class; |
1504 | } else | 1516 | } else |
@@ -1507,7 +1519,7 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
1507 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, | 1519 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, |
1508 | st->min_vdisktime); | 1520 | st->min_vdisktime); |
1509 | cfq_log_cfqq(cfqq->cfqd, cfqq, | 1521 | cfq_log_cfqq(cfqq->cfqd, cfqq, |
1510 | "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", | 1522 | "sl_used=%llu disp=%llu charge=%llu iops=%u sect=%lu", |
1511 | used_sl, cfqq->slice_dispatch, charge, | 1523 | used_sl, cfqq->slice_dispatch, charge, |
1512 | iops_mode(cfqd), cfqq->nr_sectors); | 1524 | iops_mode(cfqd), cfqq->nr_sectors); |
1513 | cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl); | 1525 | cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl); |
@@ -1530,7 +1542,7 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg) | |||
1530 | *st = CFQ_RB_ROOT; | 1542 | *st = CFQ_RB_ROOT; |
1531 | RB_CLEAR_NODE(&cfqg->rb_node); | 1543 | RB_CLEAR_NODE(&cfqg->rb_node); |
1532 | 1544 | ||
1533 | cfqg->ttime.last_end_request = jiffies; | 1545 | cfqg->ttime.last_end_request = ktime_get_ns(); |
1534 | } | 1546 | } |
1535 | 1547 | ||
1536 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | 1548 | #ifdef CONFIG_CFQ_GROUP_IOSCHED |
@@ -2213,10 +2225,11 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
2213 | { | 2225 | { |
2214 | struct rb_node **p, *parent; | 2226 | struct rb_node **p, *parent; |
2215 | struct cfq_queue *__cfqq; | 2227 | struct cfq_queue *__cfqq; |
2216 | unsigned long rb_key; | 2228 | u64 rb_key; |
2217 | struct cfq_rb_root *st; | 2229 | struct cfq_rb_root *st; |
2218 | int left; | 2230 | int left; |
2219 | int new_cfqq = 1; | 2231 | int new_cfqq = 1; |
2232 | u64 now = ktime_get_ns(); | ||
2220 | 2233 | ||
2221 | st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); | 2234 | st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); |
2222 | if (cfq_class_idle(cfqq)) { | 2235 | if (cfq_class_idle(cfqq)) { |
@@ -2226,7 +2239,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
2226 | __cfqq = rb_entry(parent, struct cfq_queue, rb_node); | 2239 | __cfqq = rb_entry(parent, struct cfq_queue, rb_node); |
2227 | rb_key += __cfqq->rb_key; | 2240 | rb_key += __cfqq->rb_key; |
2228 | } else | 2241 | } else |
2229 | rb_key += jiffies; | 2242 | rb_key += now; |
2230 | } else if (!add_front) { | 2243 | } else if (!add_front) { |
2231 | /* | 2244 | /* |
2232 | * Get our rb key offset. Subtract any residual slice | 2245 | * Get our rb key offset. Subtract any residual slice |
@@ -2234,13 +2247,13 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
2234 | * count indicates slice overrun, and this should position | 2247 | * count indicates slice overrun, and this should position |
2235 | * the next service time further away in the tree. | 2248 | * the next service time further away in the tree. |
2236 | */ | 2249 | */ |
2237 | rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; | 2250 | rb_key = cfq_slice_offset(cfqd, cfqq) + now; |
2238 | rb_key -= cfqq->slice_resid; | 2251 | rb_key -= cfqq->slice_resid; |
2239 | cfqq->slice_resid = 0; | 2252 | cfqq->slice_resid = 0; |
2240 | } else { | 2253 | } else { |
2241 | rb_key = -HZ; | 2254 | rb_key = -NSEC_PER_SEC; |
2242 | __cfqq = cfq_rb_first(st); | 2255 | __cfqq = cfq_rb_first(st); |
2243 | rb_key += __cfqq ? __cfqq->rb_key : jiffies; | 2256 | rb_key += __cfqq ? __cfqq->rb_key : now; |
2244 | } | 2257 | } |
2245 | 2258 | ||
2246 | if (!RB_EMPTY_NODE(&cfqq->rb_node)) { | 2259 | if (!RB_EMPTY_NODE(&cfqq->rb_node)) { |
@@ -2266,7 +2279,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
2266 | /* | 2279 | /* |
2267 | * sort by key, that represents service time. | 2280 | * sort by key, that represents service time. |
2268 | */ | 2281 | */ |
2269 | if (time_before(rb_key, __cfqq->rb_key)) | 2282 | if (rb_key < __cfqq->rb_key) |
2270 | p = &parent->rb_left; | 2283 | p = &parent->rb_left; |
2271 | else { | 2284 | else { |
2272 | p = &parent->rb_right; | 2285 | p = &parent->rb_right; |
@@ -2461,10 +2474,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) | |||
2461 | { | 2474 | { |
2462 | elv_rb_del(&cfqq->sort_list, rq); | 2475 | elv_rb_del(&cfqq->sort_list, rq); |
2463 | cfqq->queued[rq_is_sync(rq)]--; | 2476 | cfqq->queued[rq_is_sync(rq)]--; |
2464 | cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); | 2477 | cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags); |
2465 | cfq_add_rq_rb(rq); | 2478 | cfq_add_rq_rb(rq); |
2466 | cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, | 2479 | cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, |
2467 | rq->cmd_flags); | 2480 | req_op(rq), rq->cmd_flags); |
2468 | } | 2481 | } |
2469 | 2482 | ||
2470 | static struct request * | 2483 | static struct request * |
@@ -2517,7 +2530,7 @@ static void cfq_remove_request(struct request *rq) | |||
2517 | cfq_del_rq_rb(rq); | 2530 | cfq_del_rq_rb(rq); |
2518 | 2531 | ||
2519 | cfqq->cfqd->rq_queued--; | 2532 | cfqq->cfqd->rq_queued--; |
2520 | cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); | 2533 | cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags); |
2521 | if (rq->cmd_flags & REQ_PRIO) { | 2534 | if (rq->cmd_flags & REQ_PRIO) { |
2522 | WARN_ON(!cfqq->prio_pending); | 2535 | WARN_ON(!cfqq->prio_pending); |
2523 | cfqq->prio_pending--; | 2536 | cfqq->prio_pending--; |
@@ -2531,7 +2544,7 @@ static int cfq_merge(struct request_queue *q, struct request **req, | |||
2531 | struct request *__rq; | 2544 | struct request *__rq; |
2532 | 2545 | ||
2533 | __rq = cfq_find_rq_fmerge(cfqd, bio); | 2546 | __rq = cfq_find_rq_fmerge(cfqd, bio); |
2534 | if (__rq && elv_rq_merge_ok(__rq, bio)) { | 2547 | if (__rq && elv_bio_merge_ok(__rq, bio)) { |
2535 | *req = __rq; | 2548 | *req = __rq; |
2536 | return ELEVATOR_FRONT_MERGE; | 2549 | return ELEVATOR_FRONT_MERGE; |
2537 | } | 2550 | } |
@@ -2552,7 +2565,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req, | |||
2552 | static void cfq_bio_merged(struct request_queue *q, struct request *req, | 2565 | static void cfq_bio_merged(struct request_queue *q, struct request *req, |
2553 | struct bio *bio) | 2566 | struct bio *bio) |
2554 | { | 2567 | { |
2555 | cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw); | 2568 | cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw); |
2556 | } | 2569 | } |
2557 | 2570 | ||
2558 | static void | 2571 | static void |
@@ -2566,7 +2579,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, | |||
2566 | * reposition in fifo if next is older than rq | 2579 | * reposition in fifo if next is older than rq |
2567 | */ | 2580 | */ |
2568 | if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && | 2581 | if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && |
2569 | time_before(next->fifo_time, rq->fifo_time) && | 2582 | next->fifo_time < rq->fifo_time && |
2570 | cfqq == RQ_CFQQ(next)) { | 2583 | cfqq == RQ_CFQQ(next)) { |
2571 | list_move(&rq->queuelist, &next->queuelist); | 2584 | list_move(&rq->queuelist, &next->queuelist); |
2572 | rq->fifo_time = next->fifo_time; | 2585 | rq->fifo_time = next->fifo_time; |
@@ -2575,7 +2588,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, | |||
2575 | if (cfqq->next_rq == next) | 2588 | if (cfqq->next_rq == next) |
2576 | cfqq->next_rq = rq; | 2589 | cfqq->next_rq = rq; |
2577 | cfq_remove_request(next); | 2590 | cfq_remove_request(next); |
2578 | cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags); | 2591 | cfqg_stats_update_io_merged(RQ_CFQG(rq), req_op(next), next->cmd_flags); |
2579 | 2592 | ||
2580 | cfqq = RQ_CFQQ(next); | 2593 | cfqq = RQ_CFQQ(next); |
2581 | /* | 2594 | /* |
@@ -2588,8 +2601,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, | |||
2588 | cfq_del_cfqq_rr(cfqd, cfqq); | 2601 | cfq_del_cfqq_rr(cfqd, cfqq); |
2589 | } | 2602 | } |
2590 | 2603 | ||
2591 | static int cfq_allow_merge(struct request_queue *q, struct request *rq, | 2604 | static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq, |
2592 | struct bio *bio) | 2605 | struct bio *bio) |
2593 | { | 2606 | { |
2594 | struct cfq_data *cfqd = q->elevator->elevator_data; | 2607 | struct cfq_data *cfqd = q->elevator->elevator_data; |
2595 | struct cfq_io_cq *cic; | 2608 | struct cfq_io_cq *cic; |
@@ -2613,9 +2626,15 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, | |||
2613 | return cfqq == RQ_CFQQ(rq); | 2626 | return cfqq == RQ_CFQQ(rq); |
2614 | } | 2627 | } |
2615 | 2628 | ||
2629 | static int cfq_allow_rq_merge(struct request_queue *q, struct request *rq, | ||
2630 | struct request *next) | ||
2631 | { | ||
2632 | return RQ_CFQQ(rq) == RQ_CFQQ(next); | ||
2633 | } | ||
2634 | |||
2616 | static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 2635 | static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
2617 | { | 2636 | { |
2618 | del_timer(&cfqd->idle_slice_timer); | 2637 | hrtimer_try_to_cancel(&cfqd->idle_slice_timer); |
2619 | cfqg_stats_update_idle_time(cfqq->cfqg); | 2638 | cfqg_stats_update_idle_time(cfqq->cfqg); |
2620 | } | 2639 | } |
2621 | 2640 | ||
@@ -2627,7 +2646,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
2627 | cfqd->serving_wl_class, cfqd->serving_wl_type); | 2646 | cfqd->serving_wl_class, cfqd->serving_wl_type); |
2628 | cfqg_stats_update_avg_queue_size(cfqq->cfqg); | 2647 | cfqg_stats_update_avg_queue_size(cfqq->cfqg); |
2629 | cfqq->slice_start = 0; | 2648 | cfqq->slice_start = 0; |
2630 | cfqq->dispatch_start = jiffies; | 2649 | cfqq->dispatch_start = ktime_get_ns(); |
2631 | cfqq->allocated_slice = 0; | 2650 | cfqq->allocated_slice = 0; |
2632 | cfqq->slice_end = 0; | 2651 | cfqq->slice_end = 0; |
2633 | cfqq->slice_dispatch = 0; | 2652 | cfqq->slice_dispatch = 0; |
@@ -2676,8 +2695,8 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
2676 | if (cfq_cfqq_slice_new(cfqq)) | 2695 | if (cfq_cfqq_slice_new(cfqq)) |
2677 | cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); | 2696 | cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); |
2678 | else | 2697 | else |
2679 | cfqq->slice_resid = cfqq->slice_end - jiffies; | 2698 | cfqq->slice_resid = cfqq->slice_end - ktime_get_ns(); |
2680 | cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); | 2699 | cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid); |
2681 | } | 2700 | } |
2682 | 2701 | ||
2683 | cfq_group_served(cfqd, cfqq->cfqg, cfqq); | 2702 | cfq_group_served(cfqd, cfqq->cfqg, cfqq); |
@@ -2911,7 +2930,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
2911 | struct cfq_queue *cfqq = cfqd->active_queue; | 2930 | struct cfq_queue *cfqq = cfqd->active_queue; |
2912 | struct cfq_rb_root *st = cfqq->service_tree; | 2931 | struct cfq_rb_root *st = cfqq->service_tree; |
2913 | struct cfq_io_cq *cic; | 2932 | struct cfq_io_cq *cic; |
2914 | unsigned long sl, group_idle = 0; | 2933 | u64 sl, group_idle = 0; |
2934 | u64 now = ktime_get_ns(); | ||
2915 | 2935 | ||
2916 | /* | 2936 | /* |
2917 | * SSD device without seek penalty, disable idling. But only do so | 2937 | * SSD device without seek penalty, disable idling. But only do so |
@@ -2954,8 +2974,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
2954 | * time slice. | 2974 | * time slice. |
2955 | */ | 2975 | */ |
2956 | if (sample_valid(cic->ttime.ttime_samples) && | 2976 | if (sample_valid(cic->ttime.ttime_samples) && |
2957 | (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) { | 2977 | (cfqq->slice_end - now < cic->ttime.ttime_mean)) { |
2958 | cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu", | 2978 | cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%llu", |
2959 | cic->ttime.ttime_mean); | 2979 | cic->ttime.ttime_mean); |
2960 | return; | 2980 | return; |
2961 | } | 2981 | } |
@@ -2976,9 +2996,10 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
2976 | else | 2996 | else |
2977 | sl = cfqd->cfq_slice_idle; | 2997 | sl = cfqd->cfq_slice_idle; |
2978 | 2998 | ||
2979 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 2999 | hrtimer_start(&cfqd->idle_slice_timer, ns_to_ktime(sl), |
3000 | HRTIMER_MODE_REL); | ||
2980 | cfqg_stats_set_start_idle_time(cfqq->cfqg); | 3001 | cfqg_stats_set_start_idle_time(cfqq->cfqg); |
2981 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, | 3002 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %llu group_idle: %d", sl, |
2982 | group_idle ? 1 : 0); | 3003 | group_idle ? 1 : 0); |
2983 | } | 3004 | } |
2984 | 3005 | ||
@@ -3018,7 +3039,7 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) | |||
3018 | return NULL; | 3039 | return NULL; |
3019 | 3040 | ||
3020 | rq = rq_entry_fifo(cfqq->fifo.next); | 3041 | rq = rq_entry_fifo(cfqq->fifo.next); |
3021 | if (time_before(jiffies, rq->fifo_time)) | 3042 | if (ktime_get_ns() < rq->fifo_time) |
3022 | rq = NULL; | 3043 | rq = NULL; |
3023 | 3044 | ||
3024 | cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); | 3045 | cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); |
@@ -3096,14 +3117,14 @@ static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd, | |||
3096 | struct cfq_queue *queue; | 3117 | struct cfq_queue *queue; |
3097 | int i; | 3118 | int i; |
3098 | bool key_valid = false; | 3119 | bool key_valid = false; |
3099 | unsigned long lowest_key = 0; | 3120 | u64 lowest_key = 0; |
3100 | enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; | 3121 | enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; |
3101 | 3122 | ||
3102 | for (i = 0; i <= SYNC_WORKLOAD; ++i) { | 3123 | for (i = 0; i <= SYNC_WORKLOAD; ++i) { |
3103 | /* select the one with lowest rb_key */ | 3124 | /* select the one with lowest rb_key */ |
3104 | queue = cfq_rb_first(st_for(cfqg, wl_class, i)); | 3125 | queue = cfq_rb_first(st_for(cfqg, wl_class, i)); |
3105 | if (queue && | 3126 | if (queue && |
3106 | (!key_valid || time_before(queue->rb_key, lowest_key))) { | 3127 | (!key_valid || queue->rb_key < lowest_key)) { |
3107 | lowest_key = queue->rb_key; | 3128 | lowest_key = queue->rb_key; |
3108 | cur_best = i; | 3129 | cur_best = i; |
3109 | key_valid = true; | 3130 | key_valid = true; |
@@ -3116,11 +3137,12 @@ static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd, | |||
3116 | static void | 3137 | static void |
3117 | choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) | 3138 | choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) |
3118 | { | 3139 | { |
3119 | unsigned slice; | 3140 | u64 slice; |
3120 | unsigned count; | 3141 | unsigned count; |
3121 | struct cfq_rb_root *st; | 3142 | struct cfq_rb_root *st; |
3122 | unsigned group_slice; | 3143 | u64 group_slice; |
3123 | enum wl_class_t original_class = cfqd->serving_wl_class; | 3144 | enum wl_class_t original_class = cfqd->serving_wl_class; |
3145 | u64 now = ktime_get_ns(); | ||
3124 | 3146 | ||
3125 | /* Choose next priority. RT > BE > IDLE */ | 3147 | /* Choose next priority. RT > BE > IDLE */ |
3126 | if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) | 3148 | if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) |
@@ -3129,7 +3151,7 @@ choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
3129 | cfqd->serving_wl_class = BE_WORKLOAD; | 3151 | cfqd->serving_wl_class = BE_WORKLOAD; |
3130 | else { | 3152 | else { |
3131 | cfqd->serving_wl_class = IDLE_WORKLOAD; | 3153 | cfqd->serving_wl_class = IDLE_WORKLOAD; |
3132 | cfqd->workload_expires = jiffies + 1; | 3154 | cfqd->workload_expires = now + jiffies_to_nsecs(1); |
3133 | return; | 3155 | return; |
3134 | } | 3156 | } |
3135 | 3157 | ||
@@ -3147,7 +3169,7 @@ choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
3147 | /* | 3169 | /* |
3148 | * check workload expiration, and that we still have other queues ready | 3170 | * check workload expiration, and that we still have other queues ready |
3149 | */ | 3171 | */ |
3150 | if (count && !time_after(jiffies, cfqd->workload_expires)) | 3172 | if (count && !(now > cfqd->workload_expires)) |
3151 | return; | 3173 | return; |
3152 | 3174 | ||
3153 | new_workload: | 3175 | new_workload: |
@@ -3164,13 +3186,13 @@ new_workload: | |||
3164 | */ | 3186 | */ |
3165 | group_slice = cfq_group_slice(cfqd, cfqg); | 3187 | group_slice = cfq_group_slice(cfqd, cfqg); |
3166 | 3188 | ||
3167 | slice = group_slice * count / | 3189 | slice = div_u64(group_slice * count, |
3168 | max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], | 3190 | max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], |
3169 | cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd, | 3191 | cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd, |
3170 | cfqg)); | 3192 | cfqg))); |
3171 | 3193 | ||
3172 | if (cfqd->serving_wl_type == ASYNC_WORKLOAD) { | 3194 | if (cfqd->serving_wl_type == ASYNC_WORKLOAD) { |
3173 | unsigned int tmp; | 3195 | u64 tmp; |
3174 | 3196 | ||
3175 | /* | 3197 | /* |
3176 | * Async queues are currently system wide. Just taking | 3198 | * Async queues are currently system wide. Just taking |
@@ -3181,19 +3203,19 @@ new_workload: | |||
3181 | */ | 3203 | */ |
3182 | tmp = cfqd->cfq_target_latency * | 3204 | tmp = cfqd->cfq_target_latency * |
3183 | cfqg_busy_async_queues(cfqd, cfqg); | 3205 | cfqg_busy_async_queues(cfqd, cfqg); |
3184 | tmp = tmp/cfqd->busy_queues; | 3206 | tmp = div_u64(tmp, cfqd->busy_queues); |
3185 | slice = min_t(unsigned, slice, tmp); | 3207 | slice = min_t(u64, slice, tmp); |
3186 | 3208 | ||
3187 | /* async workload slice is scaled down according to | 3209 | /* async workload slice is scaled down according to |
3188 | * the sync/async slice ratio. */ | 3210 | * the sync/async slice ratio. */ |
3189 | slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; | 3211 | slice = div64_u64(slice*cfqd->cfq_slice[0], cfqd->cfq_slice[1]); |
3190 | } else | 3212 | } else |
3191 | /* sync workload slice is at least 2 * cfq_slice_idle */ | 3213 | /* sync workload slice is at least 2 * cfq_slice_idle */ |
3192 | slice = max(slice, 2 * cfqd->cfq_slice_idle); | 3214 | slice = max(slice, 2 * cfqd->cfq_slice_idle); |
3193 | 3215 | ||
3194 | slice = max_t(unsigned, slice, CFQ_MIN_TT); | 3216 | slice = max_t(u64, slice, CFQ_MIN_TT); |
3195 | cfq_log(cfqd, "workload slice:%d", slice); | 3217 | cfq_log(cfqd, "workload slice:%llu", slice); |
3196 | cfqd->workload_expires = jiffies + slice; | 3218 | cfqd->workload_expires = now + slice; |
3197 | } | 3219 | } |
3198 | 3220 | ||
3199 | static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) | 3221 | static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) |
@@ -3211,16 +3233,17 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) | |||
3211 | static void cfq_choose_cfqg(struct cfq_data *cfqd) | 3233 | static void cfq_choose_cfqg(struct cfq_data *cfqd) |
3212 | { | 3234 | { |
3213 | struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); | 3235 | struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); |
3236 | u64 now = ktime_get_ns(); | ||
3214 | 3237 | ||
3215 | cfqd->serving_group = cfqg; | 3238 | cfqd->serving_group = cfqg; |
3216 | 3239 | ||
3217 | /* Restore the workload type data */ | 3240 | /* Restore the workload type data */ |
3218 | if (cfqg->saved_wl_slice) { | 3241 | if (cfqg->saved_wl_slice) { |
3219 | cfqd->workload_expires = jiffies + cfqg->saved_wl_slice; | 3242 | cfqd->workload_expires = now + cfqg->saved_wl_slice; |
3220 | cfqd->serving_wl_type = cfqg->saved_wl_type; | 3243 | cfqd->serving_wl_type = cfqg->saved_wl_type; |
3221 | cfqd->serving_wl_class = cfqg->saved_wl_class; | 3244 | cfqd->serving_wl_class = cfqg->saved_wl_class; |
3222 | } else | 3245 | } else |
3223 | cfqd->workload_expires = jiffies - 1; | 3246 | cfqd->workload_expires = now - 1; |
3224 | 3247 | ||
3225 | choose_wl_class_and_type(cfqd, cfqg); | 3248 | choose_wl_class_and_type(cfqd, cfqg); |
3226 | } | 3249 | } |
@@ -3232,6 +3255,7 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd) | |||
3232 | static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | 3255 | static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) |
3233 | { | 3256 | { |
3234 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 3257 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
3258 | u64 now = ktime_get_ns(); | ||
3235 | 3259 | ||
3236 | cfqq = cfqd->active_queue; | 3260 | cfqq = cfqd->active_queue; |
3237 | if (!cfqq) | 3261 | if (!cfqq) |
@@ -3292,7 +3316,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
3292 | * flight or is idling for a new request, allow either of these | 3316 | * flight or is idling for a new request, allow either of these |
3293 | * conditions to happen (or time out) before selecting a new queue. | 3317 | * conditions to happen (or time out) before selecting a new queue. |
3294 | */ | 3318 | */ |
3295 | if (timer_pending(&cfqd->idle_slice_timer)) { | 3319 | if (hrtimer_active(&cfqd->idle_slice_timer)) { |
3296 | cfqq = NULL; | 3320 | cfqq = NULL; |
3297 | goto keep_queue; | 3321 | goto keep_queue; |
3298 | } | 3322 | } |
@@ -3303,7 +3327,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
3303 | **/ | 3327 | **/ |
3304 | if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) && | 3328 | if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) && |
3305 | (cfq_cfqq_slice_new(cfqq) || | 3329 | (cfq_cfqq_slice_new(cfqq) || |
3306 | (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) { | 3330 | (cfqq->slice_end - now > now - cfqq->slice_start))) { |
3307 | cfq_clear_cfqq_deep(cfqq); | 3331 | cfq_clear_cfqq_deep(cfqq); |
3308 | cfq_clear_cfqq_idle_window(cfqq); | 3332 | cfq_clear_cfqq_idle_window(cfqq); |
3309 | } | 3333 | } |
@@ -3381,11 +3405,12 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) | |||
3381 | static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, | 3405 | static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, |
3382 | struct cfq_queue *cfqq) | 3406 | struct cfq_queue *cfqq) |
3383 | { | 3407 | { |
3408 | u64 now = ktime_get_ns(); | ||
3409 | |||
3384 | /* the queue hasn't finished any request, can't estimate */ | 3410 | /* the queue hasn't finished any request, can't estimate */ |
3385 | if (cfq_cfqq_slice_new(cfqq)) | 3411 | if (cfq_cfqq_slice_new(cfqq)) |
3386 | return true; | 3412 | return true; |
3387 | if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, | 3413 | if (now + cfqd->cfq_slice_idle * cfqq->dispatched > cfqq->slice_end) |
3388 | cfqq->slice_end)) | ||
3389 | return true; | 3414 | return true; |
3390 | 3415 | ||
3391 | return false; | 3416 | return false; |
@@ -3460,10 +3485,10 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
3460 | * based on the last sync IO we serviced | 3485 | * based on the last sync IO we serviced |
3461 | */ | 3486 | */ |
3462 | if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { | 3487 | if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { |
3463 | unsigned long last_sync = jiffies - cfqd->last_delayed_sync; | 3488 | u64 last_sync = ktime_get_ns() - cfqd->last_delayed_sync; |
3464 | unsigned int depth; | 3489 | unsigned int depth; |
3465 | 3490 | ||
3466 | depth = last_sync / cfqd->cfq_slice[1]; | 3491 | depth = div64_u64(last_sync, cfqd->cfq_slice[1]); |
3467 | if (!depth && !cfqq->dispatched) | 3492 | if (!depth && !cfqq->dispatched) |
3468 | depth = 1; | 3493 | depth = 1; |
3469 | if (depth < max_dispatch) | 3494 | if (depth < max_dispatch) |
@@ -3546,7 +3571,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
3546 | if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && | 3571 | if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && |
3547 | cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || | 3572 | cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || |
3548 | cfq_class_idle(cfqq))) { | 3573 | cfq_class_idle(cfqq))) { |
3549 | cfqq->slice_end = jiffies + 1; | 3574 | cfqq->slice_end = ktime_get_ns() + 1; |
3550 | cfq_slice_expired(cfqd, 0); | 3575 | cfq_slice_expired(cfqd, 0); |
3551 | } | 3576 | } |
3552 | 3577 | ||
@@ -3624,7 +3649,7 @@ static void cfq_init_icq(struct io_cq *icq) | |||
3624 | { | 3649 | { |
3625 | struct cfq_io_cq *cic = icq_to_cic(icq); | 3650 | struct cfq_io_cq *cic = icq_to_cic(icq); |
3626 | 3651 | ||
3627 | cic->ttime.last_end_request = jiffies; | 3652 | cic->ttime.last_end_request = ktime_get_ns(); |
3628 | } | 3653 | } |
3629 | 3654 | ||
3630 | static void cfq_exit_icq(struct io_cq *icq) | 3655 | static void cfq_exit_icq(struct io_cq *icq) |
@@ -3682,6 +3707,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic) | |||
3682 | * elevate the priority of this queue | 3707 | * elevate the priority of this queue |
3683 | */ | 3708 | */ |
3684 | cfqq->org_ioprio = cfqq->ioprio; | 3709 | cfqq->org_ioprio = cfqq->ioprio; |
3710 | cfqq->org_ioprio_class = cfqq->ioprio_class; | ||
3685 | cfq_clear_cfqq_prio_changed(cfqq); | 3711 | cfq_clear_cfqq_prio_changed(cfqq); |
3686 | } | 3712 | } |
3687 | 3713 | ||
@@ -3845,14 +3871,15 @@ out: | |||
3845 | } | 3871 | } |
3846 | 3872 | ||
3847 | static void | 3873 | static void |
3848 | __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle) | 3874 | __cfq_update_io_thinktime(struct cfq_ttime *ttime, u64 slice_idle) |
3849 | { | 3875 | { |
3850 | unsigned long elapsed = jiffies - ttime->last_end_request; | 3876 | u64 elapsed = ktime_get_ns() - ttime->last_end_request; |
3851 | elapsed = min(elapsed, 2UL * slice_idle); | 3877 | elapsed = min(elapsed, 2UL * slice_idle); |
3852 | 3878 | ||
3853 | ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8; | 3879 | ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8; |
3854 | ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8; | 3880 | ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); |
3855 | ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples; | 3881 | ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, |
3882 | ttime->ttime_samples); | ||
3856 | } | 3883 | } |
3857 | 3884 | ||
3858 | static void | 3885 | static void |
@@ -4105,10 +4132,10 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
4105 | cfq_log_cfqq(cfqd, cfqq, "insert_request"); | 4132 | cfq_log_cfqq(cfqd, cfqq, "insert_request"); |
4106 | cfq_init_prio_data(cfqq, RQ_CIC(rq)); | 4133 | cfq_init_prio_data(cfqq, RQ_CIC(rq)); |
4107 | 4134 | ||
4108 | rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; | 4135 | rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; |
4109 | list_add_tail(&rq->queuelist, &cfqq->fifo); | 4136 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
4110 | cfq_add_rq_rb(rq); | 4137 | cfq_add_rq_rb(rq); |
4111 | cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, | 4138 | cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, req_op(rq), |
4112 | rq->cmd_flags); | 4139 | rq->cmd_flags); |
4113 | cfq_rq_enqueued(cfqd, cfqq, rq); | 4140 | cfq_rq_enqueued(cfqd, cfqq, rq); |
4114 | } | 4141 | } |
@@ -4153,6 +4180,7 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) | |||
4153 | static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 4180 | static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
4154 | { | 4181 | { |
4155 | struct cfq_io_cq *cic = cfqd->active_cic; | 4182 | struct cfq_io_cq *cic = cfqd->active_cic; |
4183 | u64 now = ktime_get_ns(); | ||
4156 | 4184 | ||
4157 | /* If the queue already has requests, don't wait */ | 4185 | /* If the queue already has requests, don't wait */ |
4158 | if (!RB_EMPTY_ROOT(&cfqq->sort_list)) | 4186 | if (!RB_EMPTY_ROOT(&cfqq->sort_list)) |
@@ -4171,7 +4199,7 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
4171 | 4199 | ||
4172 | /* if slice left is less than think time, wait busy */ | 4200 | /* if slice left is less than think time, wait busy */ |
4173 | if (cic && sample_valid(cic->ttime.ttime_samples) | 4201 | if (cic && sample_valid(cic->ttime.ttime_samples) |
4174 | && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) | 4202 | && (cfqq->slice_end - now < cic->ttime.ttime_mean)) |
4175 | return true; | 4203 | return true; |
4176 | 4204 | ||
4177 | /* | 4205 | /* |
@@ -4181,7 +4209,7 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
4181 | * case where think time is less than a jiffy, mark the queue wait | 4209 | * case where think time is less than a jiffy, mark the queue wait |
4182 | * busy if only 1 jiffy is left in the slice. | 4210 | * busy if only 1 jiffy is left in the slice. |
4183 | */ | 4211 | */ |
4184 | if (cfqq->slice_end - jiffies == 1) | 4212 | if (cfqq->slice_end - now <= jiffies_to_nsecs(1)) |
4185 | return true; | 4213 | return true; |
4186 | 4214 | ||
4187 | return false; | 4215 | return false; |
@@ -4192,9 +4220,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
4192 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 4220 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
4193 | struct cfq_data *cfqd = cfqq->cfqd; | 4221 | struct cfq_data *cfqd = cfqq->cfqd; |
4194 | const int sync = rq_is_sync(rq); | 4222 | const int sync = rq_is_sync(rq); |
4195 | unsigned long now; | 4223 | u64 now = ktime_get_ns(); |
4196 | 4224 | ||
4197 | now = jiffies; | ||
4198 | cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", | 4225 | cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", |
4199 | !!(rq->cmd_flags & REQ_NOIDLE)); | 4226 | !!(rq->cmd_flags & REQ_NOIDLE)); |
4200 | 4227 | ||
@@ -4206,7 +4233,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
4206 | cfqq->dispatched--; | 4233 | cfqq->dispatched--; |
4207 | (RQ_CFQG(rq))->dispatched--; | 4234 | (RQ_CFQG(rq))->dispatched--; |
4208 | cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), | 4235 | cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), |
4209 | rq_io_start_time_ns(rq), rq->cmd_flags); | 4236 | rq_io_start_time_ns(rq), req_op(rq), |
4237 | rq->cmd_flags); | ||
4210 | 4238 | ||
4211 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; | 4239 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; |
4212 | 4240 | ||
@@ -4222,7 +4250,16 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
4222 | cfqq_type(cfqq)); | 4250 | cfqq_type(cfqq)); |
4223 | 4251 | ||
4224 | st->ttime.last_end_request = now; | 4252 | st->ttime.last_end_request = now; |
4225 | if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) | 4253 | /* |
4254 | * We have to do this check in jiffies since start_time is in | ||
4255 | * jiffies and it is not trivial to convert to ns. If | ||
4256 | * cfq_fifo_expire[1] ever comes close to 1 jiffie, this test | ||
4257 | * will become problematic but so far we are fine (the default | ||
4258 | * is 128 ms). | ||
4259 | */ | ||
4260 | if (!time_after(rq->start_time + | ||
4261 | nsecs_to_jiffies(cfqd->cfq_fifo_expire[1]), | ||
4262 | jiffies)) | ||
4226 | cfqd->last_delayed_sync = now; | 4263 | cfqd->last_delayed_sync = now; |
4227 | } | 4264 | } |
4228 | 4265 | ||
@@ -4247,10 +4284,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
4247 | * the queue. | 4284 | * the queue. |
4248 | */ | 4285 | */ |
4249 | if (cfq_should_wait_busy(cfqd, cfqq)) { | 4286 | if (cfq_should_wait_busy(cfqd, cfqq)) { |
4250 | unsigned long extend_sl = cfqd->cfq_slice_idle; | 4287 | u64 extend_sl = cfqd->cfq_slice_idle; |
4251 | if (!cfqd->cfq_slice_idle) | 4288 | if (!cfqd->cfq_slice_idle) |
4252 | extend_sl = cfqd->cfq_group_idle; | 4289 | extend_sl = cfqd->cfq_group_idle; |
4253 | cfqq->slice_end = jiffies + extend_sl; | 4290 | cfqq->slice_end = now + extend_sl; |
4254 | cfq_mark_cfqq_wait_busy(cfqq); | 4291 | cfq_mark_cfqq_wait_busy(cfqq); |
4255 | cfq_log_cfqq(cfqd, cfqq, "will busy wait"); | 4292 | cfq_log_cfqq(cfqd, cfqq, "will busy wait"); |
4256 | } | 4293 | } |
@@ -4275,6 +4312,24 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
4275 | cfq_schedule_dispatch(cfqd); | 4312 | cfq_schedule_dispatch(cfqd); |
4276 | } | 4313 | } |
4277 | 4314 | ||
4315 | static void cfqq_boost_on_prio(struct cfq_queue *cfqq, int op_flags) | ||
4316 | { | ||
4317 | /* | ||
4318 | * If REQ_PRIO is set, boost class and prio level, if it's below | ||
4319 | * BE/NORM. If prio is not set, restore the potentially boosted | ||
4320 | * class/prio level. | ||
4321 | */ | ||
4322 | if (!(op_flags & REQ_PRIO)) { | ||
4323 | cfqq->ioprio_class = cfqq->org_ioprio_class; | ||
4324 | cfqq->ioprio = cfqq->org_ioprio; | ||
4325 | } else { | ||
4326 | if (cfq_class_idle(cfqq)) | ||
4327 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | ||
4328 | if (cfqq->ioprio > IOPRIO_NORM) | ||
4329 | cfqq->ioprio = IOPRIO_NORM; | ||
4330 | } | ||
4331 | } | ||
4332 | |||
4278 | static inline int __cfq_may_queue(struct cfq_queue *cfqq) | 4333 | static inline int __cfq_may_queue(struct cfq_queue *cfqq) |
4279 | { | 4334 | { |
4280 | if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { | 4335 | if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { |
@@ -4285,7 +4340,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq) | |||
4285 | return ELV_MQUEUE_MAY; | 4340 | return ELV_MQUEUE_MAY; |
4286 | } | 4341 | } |
4287 | 4342 | ||
4288 | static int cfq_may_queue(struct request_queue *q, int rw) | 4343 | static int cfq_may_queue(struct request_queue *q, int op, int op_flags) |
4289 | { | 4344 | { |
4290 | struct cfq_data *cfqd = q->elevator->elevator_data; | 4345 | struct cfq_data *cfqd = q->elevator->elevator_data; |
4291 | struct task_struct *tsk = current; | 4346 | struct task_struct *tsk = current; |
@@ -4302,9 +4357,10 @@ static int cfq_may_queue(struct request_queue *q, int rw) | |||
4302 | if (!cic) | 4357 | if (!cic) |
4303 | return ELV_MQUEUE_MAY; | 4358 | return ELV_MQUEUE_MAY; |
4304 | 4359 | ||
4305 | cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); | 4360 | cfqq = cic_to_cfqq(cic, rw_is_sync(op, op_flags)); |
4306 | if (cfqq) { | 4361 | if (cfqq) { |
4307 | cfq_init_prio_data(cfqq, cic); | 4362 | cfq_init_prio_data(cfqq, cic); |
4363 | cfqq_boost_on_prio(cfqq, op_flags); | ||
4308 | 4364 | ||
4309 | return __cfq_may_queue(cfqq); | 4365 | return __cfq_may_queue(cfqq); |
4310 | } | 4366 | } |
@@ -4435,9 +4491,10 @@ static void cfq_kick_queue(struct work_struct *work) | |||
4435 | /* | 4491 | /* |
4436 | * Timer running if the active_queue is currently idling inside its time slice | 4492 | * Timer running if the active_queue is currently idling inside its time slice |
4437 | */ | 4493 | */ |
4438 | static void cfq_idle_slice_timer(unsigned long data) | 4494 | static enum hrtimer_restart cfq_idle_slice_timer(struct hrtimer *timer) |
4439 | { | 4495 | { |
4440 | struct cfq_data *cfqd = (struct cfq_data *) data; | 4496 | struct cfq_data *cfqd = container_of(timer, struct cfq_data, |
4497 | idle_slice_timer); | ||
4441 | struct cfq_queue *cfqq; | 4498 | struct cfq_queue *cfqq; |
4442 | unsigned long flags; | 4499 | unsigned long flags; |
4443 | int timed_out = 1; | 4500 | int timed_out = 1; |
@@ -4486,11 +4543,12 @@ out_kick: | |||
4486 | cfq_schedule_dispatch(cfqd); | 4543 | cfq_schedule_dispatch(cfqd); |
4487 | out_cont: | 4544 | out_cont: |
4488 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | 4545 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
4546 | return HRTIMER_NORESTART; | ||
4489 | } | 4547 | } |
4490 | 4548 | ||
4491 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | 4549 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) |
4492 | { | 4550 | { |
4493 | del_timer_sync(&cfqd->idle_slice_timer); | 4551 | hrtimer_cancel(&cfqd->idle_slice_timer); |
4494 | cancel_work_sync(&cfqd->unplug_work); | 4552 | cancel_work_sync(&cfqd->unplug_work); |
4495 | } | 4553 | } |
4496 | 4554 | ||
@@ -4586,9 +4644,9 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e) | |||
4586 | cfqg_put(cfqd->root_group); | 4644 | cfqg_put(cfqd->root_group); |
4587 | spin_unlock_irq(q->queue_lock); | 4645 | spin_unlock_irq(q->queue_lock); |
4588 | 4646 | ||
4589 | init_timer(&cfqd->idle_slice_timer); | 4647 | hrtimer_init(&cfqd->idle_slice_timer, CLOCK_MONOTONIC, |
4648 | HRTIMER_MODE_REL); | ||
4590 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; | 4649 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; |
4591 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; | ||
4592 | 4650 | ||
4593 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); | 4651 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); |
4594 | 4652 | ||
@@ -4609,7 +4667,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e) | |||
4609 | * we optimistically start assuming sync ops weren't delayed in last | 4667 | * we optimistically start assuming sync ops weren't delayed in last |
4610 | * second, in order to have larger depth for async operations. | 4668 | * second, in order to have larger depth for async operations. |
4611 | */ | 4669 | */ |
4612 | cfqd->last_delayed_sync = jiffies - HZ; | 4670 | cfqd->last_delayed_sync = ktime_get_ns() - NSEC_PER_SEC; |
4613 | return 0; | 4671 | return 0; |
4614 | 4672 | ||
4615 | out_free: | 4673 | out_free: |
@@ -4652,9 +4710,9 @@ cfq_var_store(unsigned int *var, const char *page, size_t count) | |||
4652 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ | 4710 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
4653 | { \ | 4711 | { \ |
4654 | struct cfq_data *cfqd = e->elevator_data; \ | 4712 | struct cfq_data *cfqd = e->elevator_data; \ |
4655 | unsigned int __data = __VAR; \ | 4713 | u64 __data = __VAR; \ |
4656 | if (__CONV) \ | 4714 | if (__CONV) \ |
4657 | __data = jiffies_to_msecs(__data); \ | 4715 | __data = div_u64(__data, NSEC_PER_MSEC); \ |
4658 | return cfq_var_show(__data, (page)); \ | 4716 | return cfq_var_show(__data, (page)); \ |
4659 | } | 4717 | } |
4660 | SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); | 4718 | SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); |
@@ -4671,6 +4729,21 @@ SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); | |||
4671 | SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1); | 4729 | SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1); |
4672 | #undef SHOW_FUNCTION | 4730 | #undef SHOW_FUNCTION |
4673 | 4731 | ||
4732 | #define USEC_SHOW_FUNCTION(__FUNC, __VAR) \ | ||
4733 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ | ||
4734 | { \ | ||
4735 | struct cfq_data *cfqd = e->elevator_data; \ | ||
4736 | u64 __data = __VAR; \ | ||
4737 | __data = div_u64(__data, NSEC_PER_USEC); \ | ||
4738 | return cfq_var_show(__data, (page)); \ | ||
4739 | } | ||
4740 | USEC_SHOW_FUNCTION(cfq_slice_idle_us_show, cfqd->cfq_slice_idle); | ||
4741 | USEC_SHOW_FUNCTION(cfq_group_idle_us_show, cfqd->cfq_group_idle); | ||
4742 | USEC_SHOW_FUNCTION(cfq_slice_sync_us_show, cfqd->cfq_slice[1]); | ||
4743 | USEC_SHOW_FUNCTION(cfq_slice_async_us_show, cfqd->cfq_slice[0]); | ||
4744 | USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency); | ||
4745 | #undef USEC_SHOW_FUNCTION | ||
4746 | |||
4674 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 4747 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
4675 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ | 4748 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
4676 | { \ | 4749 | { \ |
@@ -4682,7 +4755,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) | |||
4682 | else if (__data > (MAX)) \ | 4755 | else if (__data > (MAX)) \ |
4683 | __data = (MAX); \ | 4756 | __data = (MAX); \ |
4684 | if (__CONV) \ | 4757 | if (__CONV) \ |
4685 | *(__PTR) = msecs_to_jiffies(__data); \ | 4758 | *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ |
4686 | else \ | 4759 | else \ |
4687 | *(__PTR) = __data; \ | 4760 | *(__PTR) = __data; \ |
4688 | return ret; \ | 4761 | return ret; \ |
@@ -4705,6 +4778,26 @@ STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); | |||
4705 | STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1); | 4778 | STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1); |
4706 | #undef STORE_FUNCTION | 4779 | #undef STORE_FUNCTION |
4707 | 4780 | ||
4781 | #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ | ||
4782 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ | ||
4783 | { \ | ||
4784 | struct cfq_data *cfqd = e->elevator_data; \ | ||
4785 | unsigned int __data; \ | ||
4786 | int ret = cfq_var_store(&__data, (page), count); \ | ||
4787 | if (__data < (MIN)) \ | ||
4788 | __data = (MIN); \ | ||
4789 | else if (__data > (MAX)) \ | ||
4790 | __data = (MAX); \ | ||
4791 | *(__PTR) = (u64)__data * NSEC_PER_USEC; \ | ||
4792 | return ret; \ | ||
4793 | } | ||
4794 | USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX); | ||
4795 | USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX); | ||
4796 | USEC_STORE_FUNCTION(cfq_slice_sync_us_store, &cfqd->cfq_slice[1], 1, UINT_MAX); | ||
4797 | USEC_STORE_FUNCTION(cfq_slice_async_us_store, &cfqd->cfq_slice[0], 1, UINT_MAX); | ||
4798 | USEC_STORE_FUNCTION(cfq_target_latency_us_store, &cfqd->cfq_target_latency, 1, UINT_MAX); | ||
4799 | #undef USEC_STORE_FUNCTION | ||
4800 | |||
4708 | #define CFQ_ATTR(name) \ | 4801 | #define CFQ_ATTR(name) \ |
4709 | __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) | 4802 | __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) |
4710 | 4803 | ||
@@ -4715,12 +4808,17 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
4715 | CFQ_ATTR(back_seek_max), | 4808 | CFQ_ATTR(back_seek_max), |
4716 | CFQ_ATTR(back_seek_penalty), | 4809 | CFQ_ATTR(back_seek_penalty), |
4717 | CFQ_ATTR(slice_sync), | 4810 | CFQ_ATTR(slice_sync), |
4811 | CFQ_ATTR(slice_sync_us), | ||
4718 | CFQ_ATTR(slice_async), | 4812 | CFQ_ATTR(slice_async), |
4813 | CFQ_ATTR(slice_async_us), | ||
4719 | CFQ_ATTR(slice_async_rq), | 4814 | CFQ_ATTR(slice_async_rq), |
4720 | CFQ_ATTR(slice_idle), | 4815 | CFQ_ATTR(slice_idle), |
4816 | CFQ_ATTR(slice_idle_us), | ||
4721 | CFQ_ATTR(group_idle), | 4817 | CFQ_ATTR(group_idle), |
4818 | CFQ_ATTR(group_idle_us), | ||
4722 | CFQ_ATTR(low_latency), | 4819 | CFQ_ATTR(low_latency), |
4723 | CFQ_ATTR(target_latency), | 4820 | CFQ_ATTR(target_latency), |
4821 | CFQ_ATTR(target_latency_us), | ||
4724 | __ATTR_NULL | 4822 | __ATTR_NULL |
4725 | }; | 4823 | }; |
4726 | 4824 | ||
@@ -4729,7 +4827,8 @@ static struct elevator_type iosched_cfq = { | |||
4729 | .elevator_merge_fn = cfq_merge, | 4827 | .elevator_merge_fn = cfq_merge, |
4730 | .elevator_merged_fn = cfq_merged_request, | 4828 | .elevator_merged_fn = cfq_merged_request, |
4731 | .elevator_merge_req_fn = cfq_merged_requests, | 4829 | .elevator_merge_req_fn = cfq_merged_requests, |
4732 | .elevator_allow_merge_fn = cfq_allow_merge, | 4830 | .elevator_allow_bio_merge_fn = cfq_allow_bio_merge, |
4831 | .elevator_allow_rq_merge_fn = cfq_allow_rq_merge, | ||
4733 | .elevator_bio_merged_fn = cfq_bio_merged, | 4832 | .elevator_bio_merged_fn = cfq_bio_merged, |
4734 | .elevator_dispatch_fn = cfq_dispatch_requests, | 4833 | .elevator_dispatch_fn = cfq_dispatch_requests, |
4735 | .elevator_add_req_fn = cfq_insert_request, | 4834 | .elevator_add_req_fn = cfq_insert_request, |
@@ -4776,18 +4875,7 @@ static int __init cfq_init(void) | |||
4776 | { | 4875 | { |
4777 | int ret; | 4876 | int ret; |
4778 | 4877 | ||
4779 | /* | ||
4780 | * could be 0 on HZ < 1000 setups | ||
4781 | */ | ||
4782 | if (!cfq_slice_async) | ||
4783 | cfq_slice_async = 1; | ||
4784 | if (!cfq_slice_idle) | ||
4785 | cfq_slice_idle = 1; | ||
4786 | |||
4787 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | 4878 | #ifdef CONFIG_CFQ_GROUP_IOSCHED |
4788 | if (!cfq_group_idle) | ||
4789 | cfq_group_idle = 1; | ||
4790 | |||
4791 | ret = blkcg_policy_register(&blkcg_policy_cfq); | 4879 | ret = blkcg_policy_register(&blkcg_policy_cfq); |
4792 | if (ret) | 4880 | if (ret) |
4793 | return ret; | 4881 | return ret; |
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index d0dd7882d8c7..55e0bb6d7da7 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -137,7 +137,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
137 | if (__rq) { | 137 | if (__rq) { |
138 | BUG_ON(sector != blk_rq_pos(__rq)); | 138 | BUG_ON(sector != blk_rq_pos(__rq)); |
139 | 139 | ||
140 | if (elv_rq_merge_ok(__rq, bio)) { | 140 | if (elv_bio_merge_ok(__rq, bio)) { |
141 | ret = ELEVATOR_FRONT_MERGE; | 141 | ret = ELEVATOR_FRONT_MERGE; |
142 | goto out; | 142 | goto out; |
143 | } | 143 | } |
@@ -173,7 +173,8 @@ deadline_merged_requests(struct request_queue *q, struct request *req, | |||
173 | * and move into next position (next will be deleted) in fifo | 173 | * and move into next position (next will be deleted) in fifo |
174 | */ | 174 | */ |
175 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { | 175 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { |
176 | if (time_before(next->fifo_time, req->fifo_time)) { | 176 | if (time_before((unsigned long)next->fifo_time, |
177 | (unsigned long)req->fifo_time)) { | ||
177 | list_move(&req->queuelist, &next->queuelist); | 178 | list_move(&req->queuelist, &next->queuelist); |
178 | req->fifo_time = next->fifo_time; | 179 | req->fifo_time = next->fifo_time; |
179 | } | 180 | } |
@@ -227,7 +228,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | |||
227 | /* | 228 | /* |
228 | * rq is expired! | 229 | * rq is expired! |
229 | */ | 230 | */ |
230 | if (time_after_eq(jiffies, rq->fifo_time)) | 231 | if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) |
231 | return 1; | 232 | return 1; |
232 | 233 | ||
233 | return 0; | 234 | return 0; |
diff --git a/block/elevator.c b/block/elevator.c index c3555c9c672f..7096c22041e7 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -53,13 +53,13 @@ static LIST_HEAD(elv_list); | |||
53 | * Query io scheduler to see if the current process issuing bio may be | 53 | * Query io scheduler to see if the current process issuing bio may be |
54 | * merged with rq. | 54 | * merged with rq. |
55 | */ | 55 | */ |
56 | static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) | 56 | static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) |
57 | { | 57 | { |
58 | struct request_queue *q = rq->q; | 58 | struct request_queue *q = rq->q; |
59 | struct elevator_queue *e = q->elevator; | 59 | struct elevator_queue *e = q->elevator; |
60 | 60 | ||
61 | if (e->type->ops.elevator_allow_merge_fn) | 61 | if (e->type->ops.elevator_allow_bio_merge_fn) |
62 | return e->type->ops.elevator_allow_merge_fn(q, rq, bio); | 62 | return e->type->ops.elevator_allow_bio_merge_fn(q, rq, bio); |
63 | 63 | ||
64 | return 1; | 64 | return 1; |
65 | } | 65 | } |
@@ -67,17 +67,17 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) | |||
67 | /* | 67 | /* |
68 | * can we safely merge with this request? | 68 | * can we safely merge with this request? |
69 | */ | 69 | */ |
70 | bool elv_rq_merge_ok(struct request *rq, struct bio *bio) | 70 | bool elv_bio_merge_ok(struct request *rq, struct bio *bio) |
71 | { | 71 | { |
72 | if (!blk_rq_merge_ok(rq, bio)) | 72 | if (!blk_rq_merge_ok(rq, bio)) |
73 | return 0; | 73 | return false; |
74 | 74 | ||
75 | if (!elv_iosched_allow_merge(rq, bio)) | 75 | if (!elv_iosched_allow_bio_merge(rq, bio)) |
76 | return 0; | 76 | return false; |
77 | 77 | ||
78 | return 1; | 78 | return true; |
79 | } | 79 | } |
80 | EXPORT_SYMBOL(elv_rq_merge_ok); | 80 | EXPORT_SYMBOL(elv_bio_merge_ok); |
81 | 81 | ||
82 | static struct elevator_type *elevator_find(const char *name) | 82 | static struct elevator_type *elevator_find(const char *name) |
83 | { | 83 | { |
@@ -366,8 +366,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) | |||
366 | list_for_each_prev(entry, &q->queue_head) { | 366 | list_for_each_prev(entry, &q->queue_head) { |
367 | struct request *pos = list_entry_rq(entry); | 367 | struct request *pos = list_entry_rq(entry); |
368 | 368 | ||
369 | if ((rq->cmd_flags & REQ_DISCARD) != | 369 | if ((req_op(rq) == REQ_OP_DISCARD) != (req_op(pos) == REQ_OP_DISCARD)) |
370 | (pos->cmd_flags & REQ_DISCARD)) | ||
371 | break; | 370 | break; |
372 | if (rq_data_dir(rq) != rq_data_dir(pos)) | 371 | if (rq_data_dir(rq) != rq_data_dir(pos)) |
373 | break; | 372 | break; |
@@ -426,7 +425,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
426 | /* | 425 | /* |
427 | * First try one-hit cache. | 426 | * First try one-hit cache. |
428 | */ | 427 | */ |
429 | if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) { | 428 | if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { |
430 | ret = blk_try_merge(q->last_merge, bio); | 429 | ret = blk_try_merge(q->last_merge, bio); |
431 | if (ret != ELEVATOR_NO_MERGE) { | 430 | if (ret != ELEVATOR_NO_MERGE) { |
432 | *req = q->last_merge; | 431 | *req = q->last_merge; |
@@ -441,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
441 | * See if our hash lookup can find a potential backmerge. | 440 | * See if our hash lookup can find a potential backmerge. |
442 | */ | 441 | */ |
443 | __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); | 442 | __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); |
444 | if (__rq && elv_rq_merge_ok(__rq, bio)) { | 443 | if (__rq && elv_bio_merge_ok(__rq, bio)) { |
445 | *req = __rq; | 444 | *req = __rq; |
446 | return ELEVATOR_BACK_MERGE; | 445 | return ELEVATOR_BACK_MERGE; |
447 | } | 446 | } |
@@ -717,12 +716,12 @@ void elv_put_request(struct request_queue *q, struct request *rq) | |||
717 | e->type->ops.elevator_put_req_fn(rq); | 716 | e->type->ops.elevator_put_req_fn(rq); |
718 | } | 717 | } |
719 | 718 | ||
720 | int elv_may_queue(struct request_queue *q, int rw) | 719 | int elv_may_queue(struct request_queue *q, int op, int op_flags) |
721 | { | 720 | { |
722 | struct elevator_queue *e = q->elevator; | 721 | struct elevator_queue *e = q->elevator; |
723 | 722 | ||
724 | if (e->type->ops.elevator_may_queue_fn) | 723 | if (e->type->ops.elevator_may_queue_fn) |
725 | return e->type->ops.elevator_may_queue_fn(q, rw); | 724 | return e->type->ops.elevator_may_queue_fn(q, op, op_flags); |
726 | 725 | ||
727 | return ELV_MQUEUE_MAY; | 726 | return ELV_MQUEUE_MAY; |
728 | } | 727 | } |
diff --git a/block/partition-generic.c b/block/partition-generic.c index d7eb77e1e3a8..71d9ed9df8da 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c | |||
@@ -495,7 +495,6 @@ rescan: | |||
495 | /* add partitions */ | 495 | /* add partitions */ |
496 | for (p = 1; p < state->limit; p++) { | 496 | for (p = 1; p < state->limit; p++) { |
497 | sector_t size, from; | 497 | sector_t size, from; |
498 | struct partition_meta_info *info = NULL; | ||
499 | 498 | ||
500 | size = state->parts[p].size; | 499 | size = state->parts[p].size; |
501 | if (!size) | 500 | if (!size) |
@@ -530,8 +529,6 @@ rescan: | |||
530 | } | 529 | } |
531 | } | 530 | } |
532 | 531 | ||
533 | if (state->parts[p].has_info) | ||
534 | info = &state->parts[p].info; | ||
535 | part = add_partition(disk, p, from, size, | 532 | part = add_partition(disk, p, from, size, |
536 | state->parts[p].flags, | 533 | state->parts[p].flags, |
537 | &state->parts[p].info); | 534 | &state->parts[p].info); |
diff --git a/block/partitions/atari.c b/block/partitions/atari.c index 9875b05e80a2..ff1fb93712c1 100644 --- a/block/partitions/atari.c +++ b/block/partitions/atari.c | |||
@@ -42,6 +42,13 @@ int atari_partition(struct parsed_partitions *state) | |||
42 | int part_fmt = 0; /* 0:unknown, 1:AHDI, 2:ICD/Supra */ | 42 | int part_fmt = 0; /* 0:unknown, 1:AHDI, 2:ICD/Supra */ |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | /* | ||
46 | * ATARI partition scheme supports 512 lba only. If this is not | ||
47 | * the case, bail early to avoid miscalculating hd_size. | ||
48 | */ | ||
49 | if (bdev_logical_block_size(state->bdev) != 512) | ||
50 | return 0; | ||
51 | |||
45 | rs = read_part_sector(state, 0, §); | 52 | rs = read_part_sector(state, 0, §); |
46 | if (!rs) | 53 | if (!rs) |
47 | return -1; | 54 | return -1; |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 2bdb5dab922b..e207b33e4ce9 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -1190,7 +1190,7 @@ static int atapi_drain_needed(struct request *rq) | |||
1190 | if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) | 1190 | if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) |
1191 | return 0; | 1191 | return 0; |
1192 | 1192 | ||
1193 | if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE)) | 1193 | if (!blk_rq_bytes(rq) || op_is_write(req_op(rq))) |
1194 | return 0; | 1194 | return 0; |
1195 | 1195 | ||
1196 | return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; | 1196 | return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index c04bd9bc39fd..dd96a935fba0 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -339,7 +339,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) | |||
339 | if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) | 339 | if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) |
340 | goto io_error; | 340 | goto io_error; |
341 | 341 | ||
342 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { | 342 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { |
343 | if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) || | 343 | if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) || |
344 | bio->bi_iter.bi_size & ~PAGE_MASK) | 344 | bio->bi_iter.bi_size & ~PAGE_MASK) |
345 | goto io_error; | 345 | goto io_error; |
@@ -509,7 +509,9 @@ static struct brd_device *brd_alloc(int i) | |||
509 | blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX); | 509 | blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX); |
510 | brd->brd_queue->limits.discard_zeroes_data = 1; | 510 | brd->brd_queue->limits.discard_zeroes_data = 1; |
511 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue); | 511 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue); |
512 | 512 | #ifdef CONFIG_BLK_DEV_RAM_DAX | |
513 | queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue); | ||
514 | #endif | ||
513 | disk = brd->brd_disk = alloc_disk(max_part); | 515 | disk = brd->brd_disk = alloc_disk(max_part); |
514 | if (!disk) | 516 | if (!disk) |
515 | goto out_free_queue; | 517 | goto out_free_queue; |
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 10459a145062..d524973f94b3 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -137,19 +137,19 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b | |||
137 | 137 | ||
138 | static int _drbd_md_sync_page_io(struct drbd_device *device, | 138 | static int _drbd_md_sync_page_io(struct drbd_device *device, |
139 | struct drbd_backing_dev *bdev, | 139 | struct drbd_backing_dev *bdev, |
140 | sector_t sector, int rw) | 140 | sector_t sector, int op) |
141 | { | 141 | { |
142 | struct bio *bio; | 142 | struct bio *bio; |
143 | /* we do all our meta data IO in aligned 4k blocks. */ | 143 | /* we do all our meta data IO in aligned 4k blocks. */ |
144 | const int size = 4096; | 144 | const int size = 4096; |
145 | int err; | 145 | int err, op_flags = 0; |
146 | 146 | ||
147 | device->md_io.done = 0; | 147 | device->md_io.done = 0; |
148 | device->md_io.error = -ENODEV; | 148 | device->md_io.error = -ENODEV; |
149 | 149 | ||
150 | if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags)) | 150 | if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags)) |
151 | rw |= REQ_FUA | REQ_FLUSH; | 151 | op_flags |= REQ_FUA | REQ_PREFLUSH; |
152 | rw |= REQ_SYNC | REQ_NOIDLE; | 152 | op_flags |= REQ_SYNC | REQ_NOIDLE; |
153 | 153 | ||
154 | bio = bio_alloc_drbd(GFP_NOIO); | 154 | bio = bio_alloc_drbd(GFP_NOIO); |
155 | bio->bi_bdev = bdev->md_bdev; | 155 | bio->bi_bdev = bdev->md_bdev; |
@@ -159,9 +159,9 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, | |||
159 | goto out; | 159 | goto out; |
160 | bio->bi_private = device; | 160 | bio->bi_private = device; |
161 | bio->bi_end_io = drbd_md_endio; | 161 | bio->bi_end_io = drbd_md_endio; |
162 | bio->bi_rw = rw; | 162 | bio_set_op_attrs(bio, op, op_flags); |
163 | 163 | ||
164 | if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL) | 164 | if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL) |
165 | /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */ | 165 | /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */ |
166 | ; | 166 | ; |
167 | else if (!get_ldev_if_state(device, D_ATTACHING)) { | 167 | else if (!get_ldev_if_state(device, D_ATTACHING)) { |
@@ -174,10 +174,10 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, | |||
174 | bio_get(bio); /* one bio_put() is in the completion handler */ | 174 | bio_get(bio); /* one bio_put() is in the completion handler */ |
175 | atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */ | 175 | atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */ |
176 | device->md_io.submit_jif = jiffies; | 176 | device->md_io.submit_jif = jiffies; |
177 | if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) | 177 | if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) |
178 | bio_io_error(bio); | 178 | bio_io_error(bio); |
179 | else | 179 | else |
180 | submit_bio(rw, bio); | 180 | submit_bio(bio); |
181 | wait_until_done_or_force_detached(device, bdev, &device->md_io.done); | 181 | wait_until_done_or_force_detached(device, bdev, &device->md_io.done); |
182 | if (!bio->bi_error) | 182 | if (!bio->bi_error) |
183 | err = device->md_io.error; | 183 | err = device->md_io.error; |
@@ -188,7 +188,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, | |||
188 | } | 188 | } |
189 | 189 | ||
190 | int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, | 190 | int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, |
191 | sector_t sector, int rw) | 191 | sector_t sector, int op) |
192 | { | 192 | { |
193 | int err; | 193 | int err; |
194 | D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1); | 194 | D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1); |
@@ -197,19 +197,21 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd | |||
197 | 197 | ||
198 | dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n", | 198 | dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n", |
199 | current->comm, current->pid, __func__, | 199 | current->comm, current->pid, __func__, |
200 | (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", | 200 | (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ", |
201 | (void*)_RET_IP_ ); | 201 | (void*)_RET_IP_ ); |
202 | 202 | ||
203 | if (sector < drbd_md_first_sector(bdev) || | 203 | if (sector < drbd_md_first_sector(bdev) || |
204 | sector + 7 > drbd_md_last_sector(bdev)) | 204 | sector + 7 > drbd_md_last_sector(bdev)) |
205 | drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n", | 205 | drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n", |
206 | current->comm, current->pid, __func__, | 206 | current->comm, current->pid, __func__, |
207 | (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); | 207 | (unsigned long long)sector, |
208 | (op == REQ_OP_WRITE) ? "WRITE" : "READ"); | ||
208 | 209 | ||
209 | err = _drbd_md_sync_page_io(device, bdev, sector, rw); | 210 | err = _drbd_md_sync_page_io(device, bdev, sector, op); |
210 | if (err) { | 211 | if (err) { |
211 | drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n", | 212 | drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n", |
212 | (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err); | 213 | (unsigned long long)sector, |
214 | (op == REQ_OP_WRITE) ? "WRITE" : "READ", err); | ||
213 | } | 215 | } |
214 | return err; | 216 | return err; |
215 | } | 217 | } |
@@ -845,7 +847,7 @@ int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, | |||
845 | unsigned long count = 0; | 847 | unsigned long count = 0; |
846 | sector_t esector, nr_sectors; | 848 | sector_t esector, nr_sectors; |
847 | 849 | ||
848 | /* This would be an empty REQ_FLUSH, be silent. */ | 850 | /* This would be an empty REQ_PREFLUSH, be silent. */ |
849 | if ((mode == SET_OUT_OF_SYNC) && size == 0) | 851 | if ((mode == SET_OUT_OF_SYNC) && size == 0) |
850 | return 0; | 852 | return 0; |
851 | 853 | ||
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 92d6fc020a65..e5d89f623b90 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c | |||
@@ -980,7 +980,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho | |||
980 | struct drbd_bitmap *b = device->bitmap; | 980 | struct drbd_bitmap *b = device->bitmap; |
981 | struct page *page; | 981 | struct page *page; |
982 | unsigned int len; | 982 | unsigned int len; |
983 | unsigned int rw = (ctx->flags & BM_AIO_READ) ? READ : WRITE; | 983 | unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE; |
984 | 984 | ||
985 | sector_t on_disk_sector = | 985 | sector_t on_disk_sector = |
986 | device->ldev->md.md_offset + device->ldev->md.bm_offset; | 986 | device->ldev->md.md_offset + device->ldev->md.bm_offset; |
@@ -1011,12 +1011,12 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho | |||
1011 | bio_add_page(bio, page, len, 0); | 1011 | bio_add_page(bio, page, len, 0); |
1012 | bio->bi_private = ctx; | 1012 | bio->bi_private = ctx; |
1013 | bio->bi_end_io = drbd_bm_endio; | 1013 | bio->bi_end_io = drbd_bm_endio; |
1014 | bio_set_op_attrs(bio, op, 0); | ||
1014 | 1015 | ||
1015 | if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { | 1016 | if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { |
1016 | bio->bi_rw |= rw; | ||
1017 | bio_io_error(bio); | 1017 | bio_io_error(bio); |
1018 | } else { | 1018 | } else { |
1019 | submit_bio(rw, bio); | 1019 | submit_bio(bio); |
1020 | /* this should not count as user activity and cause the | 1020 | /* this should not count as user activity and cause the |
1021 | * resync to throttle -- see drbd_rs_should_slow_down(). */ | 1021 | * resync to throttle -- see drbd_rs_should_slow_down(). */ |
1022 | atomic_add(len >> 9, &device->rs_sect_ev); | 1022 | atomic_add(len >> 9, &device->rs_sect_ev); |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 7a1cf7eaa71d..a64c645b4184 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -1327,14 +1327,14 @@ struct bm_extent { | |||
1327 | #endif | 1327 | #endif |
1328 | #endif | 1328 | #endif |
1329 | 1329 | ||
1330 | /* BIO_MAX_SIZE is 256 * PAGE_SIZE, | 1330 | /* Estimate max bio size as 256 * PAGE_SIZE, |
1331 | * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte. | 1331 | * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte. |
1332 | * Since we may live in a mixed-platform cluster, | 1332 | * Since we may live in a mixed-platform cluster, |
1333 | * we limit us to a platform agnostic constant here for now. | 1333 | * we limit us to a platform agnostic constant here for now. |
1334 | * A followup commit may allow even bigger BIO sizes, | 1334 | * A followup commit may allow even bigger BIO sizes, |
1335 | * once we thought that through. */ | 1335 | * once we thought that through. */ |
1336 | #define DRBD_MAX_BIO_SIZE (1U << 20) | 1336 | #define DRBD_MAX_BIO_SIZE (1U << 20) |
1337 | #if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE | 1337 | #if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT) |
1338 | #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE | 1338 | #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE |
1339 | #endif | 1339 | #endif |
1340 | #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ | 1340 | #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ |
@@ -1507,7 +1507,7 @@ extern int drbd_resync_finished(struct drbd_device *device); | |||
1507 | extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent); | 1507 | extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent); |
1508 | extern void drbd_md_put_buffer(struct drbd_device *device); | 1508 | extern void drbd_md_put_buffer(struct drbd_device *device); |
1509 | extern int drbd_md_sync_page_io(struct drbd_device *device, | 1509 | extern int drbd_md_sync_page_io(struct drbd_device *device, |
1510 | struct drbd_backing_dev *bdev, sector_t sector, int rw); | 1510 | struct drbd_backing_dev *bdev, sector_t sector, int op); |
1511 | extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int); | 1511 | extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int); |
1512 | extern void wait_until_done_or_force_detached(struct drbd_device *device, | 1512 | extern void wait_until_done_or_force_detached(struct drbd_device *device, |
1513 | struct drbd_backing_dev *bdev, unsigned int *done); | 1513 | struct drbd_backing_dev *bdev, unsigned int *done); |
@@ -1557,7 +1557,7 @@ extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector | |||
1557 | bool throttle_if_app_is_waiting); | 1557 | bool throttle_if_app_is_waiting); |
1558 | extern int drbd_submit_peer_request(struct drbd_device *, | 1558 | extern int drbd_submit_peer_request(struct drbd_device *, |
1559 | struct drbd_peer_request *, const unsigned, | 1559 | struct drbd_peer_request *, const unsigned, |
1560 | const int); | 1560 | const unsigned, const int); |
1561 | extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); | 1561 | extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); |
1562 | extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64, | 1562 | extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64, |
1563 | sector_t, unsigned int, | 1563 | sector_t, unsigned int, |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 2ba1494b2799..2b37744db0fa 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -1603,15 +1603,16 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device, | |||
1603 | return 0; | 1603 | return 0; |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw) | 1606 | static u32 bio_flags_to_wire(struct drbd_connection *connection, |
1607 | struct bio *bio) | ||
1607 | { | 1608 | { |
1608 | if (connection->agreed_pro_version >= 95) | 1609 | if (connection->agreed_pro_version >= 95) |
1609 | return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | | 1610 | return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | |
1610 | (bi_rw & REQ_FUA ? DP_FUA : 0) | | 1611 | (bio->bi_rw & REQ_FUA ? DP_FUA : 0) | |
1611 | (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | | 1612 | (bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) | |
1612 | (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); | 1613 | (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0); |
1613 | else | 1614 | else |
1614 | return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; | 1615 | return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; |
1615 | } | 1616 | } |
1616 | 1617 | ||
1617 | /* Used to send write or TRIM aka REQ_DISCARD requests | 1618 | /* Used to send write or TRIM aka REQ_DISCARD requests |
@@ -1636,7 +1637,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request * | |||
1636 | p->sector = cpu_to_be64(req->i.sector); | 1637 | p->sector = cpu_to_be64(req->i.sector); |
1637 | p->block_id = (unsigned long)req; | 1638 | p->block_id = (unsigned long)req; |
1638 | p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq)); | 1639 | p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq)); |
1639 | dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw); | 1640 | dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio); |
1640 | if (device->state.conn >= C_SYNC_SOURCE && | 1641 | if (device->state.conn >= C_SYNC_SOURCE && |
1641 | device->state.conn <= C_PAUSED_SYNC_T) | 1642 | device->state.conn <= C_PAUSED_SYNC_T) |
1642 | dp_flags |= DP_MAY_SET_IN_SYNC; | 1643 | dp_flags |= DP_MAY_SET_IN_SYNC; |
@@ -3061,7 +3062,7 @@ void drbd_md_write(struct drbd_device *device, void *b) | |||
3061 | D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset); | 3062 | D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset); |
3062 | sector = device->ldev->md.md_offset; | 3063 | sector = device->ldev->md.md_offset; |
3063 | 3064 | ||
3064 | if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) { | 3065 | if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) { |
3065 | /* this was a try anyways ... */ | 3066 | /* this was a try anyways ... */ |
3066 | drbd_err(device, "meta data update failed!\n"); | 3067 | drbd_err(device, "meta data update failed!\n"); |
3067 | drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); | 3068 | drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); |
@@ -3263,7 +3264,8 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev) | |||
3263 | * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */ | 3264 | * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */ |
3264 | bdev->md.md_size_sect = 8; | 3265 | bdev->md.md_size_sect = 8; |
3265 | 3266 | ||
3266 | if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) { | 3267 | if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, |
3268 | REQ_OP_READ)) { | ||
3267 | /* NOTE: can't do normal error processing here as this is | 3269 | /* NOTE: can't do normal error processing here as this is |
3268 | called BEFORE disk is attached */ | 3270 | called BEFORE disk is attached */ |
3269 | drbd_err(device, "Error while reading metadata.\n"); | 3271 | drbd_err(device, "Error while reading metadata.\n"); |
diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h index ef9245363dcc..129f8c76c9b1 100644 --- a/drivers/block/drbd/drbd_protocol.h +++ b/drivers/block/drbd/drbd_protocol.h | |||
@@ -112,7 +112,7 @@ struct p_header100 { | |||
112 | #define DP_MAY_SET_IN_SYNC 4 | 112 | #define DP_MAY_SET_IN_SYNC 4 |
113 | #define DP_UNPLUG 8 /* not used anymore */ | 113 | #define DP_UNPLUG 8 /* not used anymore */ |
114 | #define DP_FUA 16 /* equals REQ_FUA */ | 114 | #define DP_FUA 16 /* equals REQ_FUA */ |
115 | #define DP_FLUSH 32 /* equals REQ_FLUSH */ | 115 | #define DP_FLUSH 32 /* equals REQ_PREFLUSH */ |
116 | #define DP_DISCARD 64 /* equals REQ_DISCARD */ | 116 | #define DP_DISCARD 64 /* equals REQ_DISCARD */ |
117 | #define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */ | 117 | #define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */ |
118 | #define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */ | 118 | #define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */ |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 050aaa1c0350..1ee002352ea2 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -1398,7 +1398,8 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin | |||
1398 | /* TODO allocate from our own bio_set. */ | 1398 | /* TODO allocate from our own bio_set. */ |
1399 | int drbd_submit_peer_request(struct drbd_device *device, | 1399 | int drbd_submit_peer_request(struct drbd_device *device, |
1400 | struct drbd_peer_request *peer_req, | 1400 | struct drbd_peer_request *peer_req, |
1401 | const unsigned rw, const int fault_type) | 1401 | const unsigned op, const unsigned op_flags, |
1402 | const int fault_type) | ||
1402 | { | 1403 | { |
1403 | struct bio *bios = NULL; | 1404 | struct bio *bios = NULL; |
1404 | struct bio *bio; | 1405 | struct bio *bio; |
@@ -1450,7 +1451,7 @@ next_bio: | |||
1450 | /* > peer_req->i.sector, unless this is the first bio */ | 1451 | /* > peer_req->i.sector, unless this is the first bio */ |
1451 | bio->bi_iter.bi_sector = sector; | 1452 | bio->bi_iter.bi_sector = sector; |
1452 | bio->bi_bdev = device->ldev->backing_bdev; | 1453 | bio->bi_bdev = device->ldev->backing_bdev; |
1453 | bio->bi_rw = rw; | 1454 | bio_set_op_attrs(bio, op, op_flags); |
1454 | bio->bi_private = peer_req; | 1455 | bio->bi_private = peer_req; |
1455 | bio->bi_end_io = drbd_peer_request_endio; | 1456 | bio->bi_end_io = drbd_peer_request_endio; |
1456 | 1457 | ||
@@ -1458,7 +1459,7 @@ next_bio: | |||
1458 | bios = bio; | 1459 | bios = bio; |
1459 | ++n_bios; | 1460 | ++n_bios; |
1460 | 1461 | ||
1461 | if (rw & REQ_DISCARD) { | 1462 | if (op == REQ_OP_DISCARD) { |
1462 | bio->bi_iter.bi_size = data_size; | 1463 | bio->bi_iter.bi_size = data_size; |
1463 | goto submit; | 1464 | goto submit; |
1464 | } | 1465 | } |
@@ -1830,7 +1831,8 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto | |||
1830 | spin_unlock_irq(&device->resource->req_lock); | 1831 | spin_unlock_irq(&device->resource->req_lock); |
1831 | 1832 | ||
1832 | atomic_add(pi->size >> 9, &device->rs_sect_ev); | 1833 | atomic_add(pi->size >> 9, &device->rs_sect_ev); |
1833 | if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0) | 1834 | if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0, |
1835 | DRBD_FAULT_RS_WR) == 0) | ||
1834 | return 0; | 1836 | return 0; |
1835 | 1837 | ||
1836 | /* don't care for the reason here */ | 1838 | /* don't care for the reason here */ |
@@ -2152,12 +2154,19 @@ static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, co | |||
2152 | /* see also bio_flags_to_wire() | 2154 | /* see also bio_flags_to_wire() |
2153 | * DRBD_REQ_*, because we need to semantically map the flags to data packet | 2155 | * DRBD_REQ_*, because we need to semantically map the flags to data packet |
2154 | * flags and back. We may replicate to other kernel versions. */ | 2156 | * flags and back. We may replicate to other kernel versions. */ |
2155 | static unsigned long wire_flags_to_bio(u32 dpf) | 2157 | static unsigned long wire_flags_to_bio_flags(u32 dpf) |
2156 | { | 2158 | { |
2157 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | | 2159 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | |
2158 | (dpf & DP_FUA ? REQ_FUA : 0) | | 2160 | (dpf & DP_FUA ? REQ_FUA : 0) | |
2159 | (dpf & DP_FLUSH ? REQ_FLUSH : 0) | | 2161 | (dpf & DP_FLUSH ? REQ_PREFLUSH : 0); |
2160 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); | 2162 | } |
2163 | |||
2164 | static unsigned long wire_flags_to_bio_op(u32 dpf) | ||
2165 | { | ||
2166 | if (dpf & DP_DISCARD) | ||
2167 | return REQ_OP_DISCARD; | ||
2168 | else | ||
2169 | return REQ_OP_WRITE; | ||
2161 | } | 2170 | } |
2162 | 2171 | ||
2163 | static void fail_postponed_requests(struct drbd_device *device, sector_t sector, | 2172 | static void fail_postponed_requests(struct drbd_device *device, sector_t sector, |
@@ -2303,7 +2312,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * | |||
2303 | struct drbd_peer_request *peer_req; | 2312 | struct drbd_peer_request *peer_req; |
2304 | struct p_data *p = pi->data; | 2313 | struct p_data *p = pi->data; |
2305 | u32 peer_seq = be32_to_cpu(p->seq_num); | 2314 | u32 peer_seq = be32_to_cpu(p->seq_num); |
2306 | int rw = WRITE; | 2315 | int op, op_flags; |
2307 | u32 dp_flags; | 2316 | u32 dp_flags; |
2308 | int err, tp; | 2317 | int err, tp; |
2309 | 2318 | ||
@@ -2342,14 +2351,15 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * | |||
2342 | peer_req->flags |= EE_APPLICATION; | 2351 | peer_req->flags |= EE_APPLICATION; |
2343 | 2352 | ||
2344 | dp_flags = be32_to_cpu(p->dp_flags); | 2353 | dp_flags = be32_to_cpu(p->dp_flags); |
2345 | rw |= wire_flags_to_bio(dp_flags); | 2354 | op = wire_flags_to_bio_op(dp_flags); |
2355 | op_flags = wire_flags_to_bio_flags(dp_flags); | ||
2346 | if (pi->cmd == P_TRIM) { | 2356 | if (pi->cmd == P_TRIM) { |
2347 | struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); | 2357 | struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); |
2348 | peer_req->flags |= EE_IS_TRIM; | 2358 | peer_req->flags |= EE_IS_TRIM; |
2349 | if (!blk_queue_discard(q)) | 2359 | if (!blk_queue_discard(q)) |
2350 | peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT; | 2360 | peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT; |
2351 | D_ASSERT(peer_device, peer_req->i.size > 0); | 2361 | D_ASSERT(peer_device, peer_req->i.size > 0); |
2352 | D_ASSERT(peer_device, rw & REQ_DISCARD); | 2362 | D_ASSERT(peer_device, op == REQ_OP_DISCARD); |
2353 | D_ASSERT(peer_device, peer_req->pages == NULL); | 2363 | D_ASSERT(peer_device, peer_req->pages == NULL); |
2354 | } else if (peer_req->pages == NULL) { | 2364 | } else if (peer_req->pages == NULL) { |
2355 | D_ASSERT(device, peer_req->i.size == 0); | 2365 | D_ASSERT(device, peer_req->i.size == 0); |
@@ -2433,7 +2443,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * | |||
2433 | peer_req->flags |= EE_CALL_AL_COMPLETE_IO; | 2443 | peer_req->flags |= EE_CALL_AL_COMPLETE_IO; |
2434 | } | 2444 | } |
2435 | 2445 | ||
2436 | err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR); | 2446 | err = drbd_submit_peer_request(device, peer_req, op, op_flags, |
2447 | DRBD_FAULT_DT_WR); | ||
2437 | if (!err) | 2448 | if (!err) |
2438 | return 0; | 2449 | return 0; |
2439 | 2450 | ||
@@ -2723,7 +2734,8 @@ submit_for_resync: | |||
2723 | submit: | 2734 | submit: |
2724 | update_receiver_timing_details(connection, drbd_submit_peer_request); | 2735 | update_receiver_timing_details(connection, drbd_submit_peer_request); |
2725 | inc_unacked(device); | 2736 | inc_unacked(device); |
2726 | if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0) | 2737 | if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, |
2738 | fault_type) == 0) | ||
2727 | return 0; | 2739 | return 0; |
2728 | 2740 | ||
2729 | /* don't care for the reason here */ | 2741 | /* don't care for the reason here */ |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 2255dcfebd2b..eef6e9575b4e 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -1132,7 +1132,7 @@ static int drbd_process_write_request(struct drbd_request *req) | |||
1132 | * replicating, in which case there is no point. */ | 1132 | * replicating, in which case there is no point. */ |
1133 | if (unlikely(req->i.size == 0)) { | 1133 | if (unlikely(req->i.size == 0)) { |
1134 | /* The only size==0 bios we expect are empty flushes. */ | 1134 | /* The only size==0 bios we expect are empty flushes. */ |
1135 | D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH); | 1135 | D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH); |
1136 | if (remote) | 1136 | if (remote) |
1137 | _req_mod(req, QUEUE_AS_DRBD_BARRIER); | 1137 | _req_mod(req, QUEUE_AS_DRBD_BARRIER); |
1138 | return remote; | 1138 | return remote; |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 4d87499f0d54..51fab978eb61 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -174,7 +174,7 @@ void drbd_peer_request_endio(struct bio *bio) | |||
174 | struct drbd_peer_request *peer_req = bio->bi_private; | 174 | struct drbd_peer_request *peer_req = bio->bi_private; |
175 | struct drbd_device *device = peer_req->peer_device->device; | 175 | struct drbd_device *device = peer_req->peer_device->device; |
176 | int is_write = bio_data_dir(bio) == WRITE; | 176 | int is_write = bio_data_dir(bio) == WRITE; |
177 | int is_discard = !!(bio->bi_rw & REQ_DISCARD); | 177 | int is_discard = !!(bio_op(bio) == REQ_OP_DISCARD); |
178 | 178 | ||
179 | if (bio->bi_error && __ratelimit(&drbd_ratelimit_state)) | 179 | if (bio->bi_error && __ratelimit(&drbd_ratelimit_state)) |
180 | drbd_warn(device, "%s: error=%d s=%llus\n", | 180 | drbd_warn(device, "%s: error=%d s=%llus\n", |
@@ -248,7 +248,7 @@ void drbd_request_endio(struct bio *bio) | |||
248 | 248 | ||
249 | /* to avoid recursion in __req_mod */ | 249 | /* to avoid recursion in __req_mod */ |
250 | if (unlikely(bio->bi_error)) { | 250 | if (unlikely(bio->bi_error)) { |
251 | if (bio->bi_rw & REQ_DISCARD) | 251 | if (bio_op(bio) == REQ_OP_DISCARD) |
252 | what = (bio->bi_error == -EOPNOTSUPP) | 252 | what = (bio->bi_error == -EOPNOTSUPP) |
253 | ? DISCARD_COMPLETED_NOTSUPP | 253 | ? DISCARD_COMPLETED_NOTSUPP |
254 | : DISCARD_COMPLETED_WITH_ERROR; | 254 | : DISCARD_COMPLETED_WITH_ERROR; |
@@ -397,7 +397,8 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, | |||
397 | spin_unlock_irq(&device->resource->req_lock); | 397 | spin_unlock_irq(&device->resource->req_lock); |
398 | 398 | ||
399 | atomic_add(size >> 9, &device->rs_sect_ev); | 399 | atomic_add(size >> 9, &device->rs_sect_ev); |
400 | if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0) | 400 | if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, |
401 | DRBD_FAULT_RS_RD) == 0) | ||
401 | return 0; | 402 | return 0; |
402 | 403 | ||
403 | /* If it failed because of ENOMEM, retry should help. If it failed | 404 | /* If it failed because of ENOMEM, retry should help. If it failed |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 84708a5f8c52..f9bfecd733a8 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -3822,8 +3822,9 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) | |||
3822 | bio.bi_flags |= (1 << BIO_QUIET); | 3822 | bio.bi_flags |= (1 << BIO_QUIET); |
3823 | bio.bi_private = &cbdata; | 3823 | bio.bi_private = &cbdata; |
3824 | bio.bi_end_io = floppy_rb0_cb; | 3824 | bio.bi_end_io = floppy_rb0_cb; |
3825 | bio_set_op_attrs(&bio, REQ_OP_READ, 0); | ||
3825 | 3826 | ||
3826 | submit_bio(READ, &bio); | 3827 | submit_bio(&bio); |
3827 | process_fd_request(); | 3828 | process_fd_request(); |
3828 | 3829 | ||
3829 | init_completion(&cbdata.complete); | 3830 | init_completion(&cbdata.complete); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1fa8cc235977..364d491d4bdd 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -447,7 +447,7 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq) | |||
447 | 447 | ||
448 | static inline void handle_partial_read(struct loop_cmd *cmd, long bytes) | 448 | static inline void handle_partial_read(struct loop_cmd *cmd, long bytes) |
449 | { | 449 | { |
450 | if (bytes < 0 || (cmd->rq->cmd_flags & REQ_WRITE)) | 450 | if (bytes < 0 || op_is_write(req_op(cmd->rq))) |
451 | return; | 451 | return; |
452 | 452 | ||
453 | if (unlikely(bytes < blk_rq_bytes(cmd->rq))) { | 453 | if (unlikely(bytes < blk_rq_bytes(cmd->rq))) { |
@@ -541,10 +541,10 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) | |||
541 | 541 | ||
542 | pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; | 542 | pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; |
543 | 543 | ||
544 | if (rq->cmd_flags & REQ_WRITE) { | 544 | if (op_is_write(req_op(rq))) { |
545 | if (rq->cmd_flags & REQ_FLUSH) | 545 | if (req_op(rq) == REQ_OP_FLUSH) |
546 | ret = lo_req_flush(lo, rq); | 546 | ret = lo_req_flush(lo, rq); |
547 | else if (rq->cmd_flags & REQ_DISCARD) | 547 | else if (req_op(rq) == REQ_OP_DISCARD) |
548 | ret = lo_discard(lo, rq, pos); | 548 | ret = lo_discard(lo, rq, pos); |
549 | else if (lo->transfer) | 549 | else if (lo->transfer) |
550 | ret = lo_write_transfer(lo, rq, pos); | 550 | ret = lo_write_transfer(lo, rq, pos); |
@@ -1659,8 +1659,8 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
1659 | if (lo->lo_state != Lo_bound) | 1659 | if (lo->lo_state != Lo_bound) |
1660 | return -EIO; | 1660 | return -EIO; |
1661 | 1661 | ||
1662 | if (lo->use_dio && !(cmd->rq->cmd_flags & (REQ_FLUSH | | 1662 | if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH || |
1663 | REQ_DISCARD))) | 1663 | req_op(cmd->rq) == REQ_OP_DISCARD)) |
1664 | cmd->use_aio = true; | 1664 | cmd->use_aio = true; |
1665 | else | 1665 | else |
1666 | cmd->use_aio = false; | 1666 | cmd->use_aio = false; |
@@ -1672,7 +1672,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
1672 | 1672 | ||
1673 | static void loop_handle_cmd(struct loop_cmd *cmd) | 1673 | static void loop_handle_cmd(struct loop_cmd *cmd) |
1674 | { | 1674 | { |
1675 | const bool write = cmd->rq->cmd_flags & REQ_WRITE; | 1675 | const bool write = op_is_write(req_op(cmd->rq)); |
1676 | struct loop_device *lo = cmd->rq->q->queuedata; | 1676 | struct loop_device *lo = cmd->rq->q->queuedata; |
1677 | int ret = 0; | 1677 | int ret = 0; |
1678 | 1678 | ||
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 6053e4659fa2..8e3e708cb9ee 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -3765,7 +3765,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) | |||
3765 | return -ENODATA; | 3765 | return -ENODATA; |
3766 | } | 3766 | } |
3767 | 3767 | ||
3768 | if (rq->cmd_flags & REQ_DISCARD) { | 3768 | if (req_op(rq) == REQ_OP_DISCARD) { |
3769 | int err; | 3769 | int err; |
3770 | 3770 | ||
3771 | err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); | 3771 | err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 6a48ed41963f..6f55b262b5ce 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -282,9 +282,9 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) | |||
282 | 282 | ||
283 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) | 283 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) |
284 | type = NBD_CMD_DISC; | 284 | type = NBD_CMD_DISC; |
285 | else if (req->cmd_flags & REQ_DISCARD) | 285 | else if (req_op(req) == REQ_OP_DISCARD) |
286 | type = NBD_CMD_TRIM; | 286 | type = NBD_CMD_TRIM; |
287 | else if (req->cmd_flags & REQ_FLUSH) | 287 | else if (req_op(req) == REQ_OP_FLUSH) |
288 | type = NBD_CMD_FLUSH; | 288 | type = NBD_CMD_FLUSH; |
289 | else if (rq_data_dir(req) == WRITE) | 289 | else if (rq_data_dir(req) == WRITE) |
290 | type = NBD_CMD_WRITE; | 290 | type = NBD_CMD_WRITE; |
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c index c2854a2bfdb0..92900f5f0b47 100644 --- a/drivers/block/osdblk.c +++ b/drivers/block/osdblk.c | |||
@@ -321,7 +321,7 @@ static void osdblk_rq_fn(struct request_queue *q) | |||
321 | * driver-specific, etc. | 321 | * driver-specific, etc. |
322 | */ | 322 | */ |
323 | 323 | ||
324 | do_flush = rq->cmd_flags & REQ_FLUSH; | 324 | do_flush = (req_op(rq) == REQ_OP_FLUSH); |
325 | do_write = (rq_data_dir(rq) == WRITE); | 325 | do_write = (rq_data_dir(rq) == WRITE); |
326 | 326 | ||
327 | if (!do_flush) { /* osd_flush does not use a bio */ | 327 | if (!do_flush) { /* osd_flush does not use a bio */ |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index d06c62eccdf0..9393bc730acf 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -1074,7 +1074,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1074 | BUG(); | 1074 | BUG(); |
1075 | 1075 | ||
1076 | atomic_inc(&pkt->io_wait); | 1076 | atomic_inc(&pkt->io_wait); |
1077 | bio->bi_rw = READ; | 1077 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
1078 | pkt_queue_bio(pd, bio); | 1078 | pkt_queue_bio(pd, bio); |
1079 | frames_read++; | 1079 | frames_read++; |
1080 | } | 1080 | } |
@@ -1336,7 +1336,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1336 | 1336 | ||
1337 | /* Start the write request */ | 1337 | /* Start the write request */ |
1338 | atomic_set(&pkt->io_wait, 1); | 1338 | atomic_set(&pkt->io_wait, 1); |
1339 | pkt->w_bio->bi_rw = WRITE; | 1339 | bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0); |
1340 | pkt_queue_bio(pd, pkt->w_bio); | 1340 | pkt_queue_bio(pd, pkt->w_bio); |
1341 | } | 1341 | } |
1342 | 1342 | ||
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 4b7e405830d7..acb44529c05e 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c | |||
@@ -196,7 +196,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev, | |||
196 | dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); | 196 | dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); |
197 | 197 | ||
198 | while ((req = blk_fetch_request(q))) { | 198 | while ((req = blk_fetch_request(q))) { |
199 | if (req->cmd_flags & REQ_FLUSH) { | 199 | if (req_op(req) == REQ_OP_FLUSH) { |
200 | if (ps3disk_submit_flush_request(dev, req)) | 200 | if (ps3disk_submit_flush_request(dev, req)) |
201 | break; | 201 | break; |
202 | } else if (req->cmd_type == REQ_TYPE_FS) { | 202 | } else if (req->cmd_type == REQ_TYPE_FS) { |
@@ -256,7 +256,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data) | |||
256 | return IRQ_HANDLED; | 256 | return IRQ_HANDLED; |
257 | } | 257 | } |
258 | 258 | ||
259 | if (req->cmd_flags & REQ_FLUSH) { | 259 | if (req_op(req) == REQ_OP_FLUSH) { |
260 | read = 0; | 260 | read = 0; |
261 | op = "flush"; | 261 | op = "flush"; |
262 | } else { | 262 | } else { |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 81666a56415e..450662055d97 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -3286,9 +3286,9 @@ static void rbd_queue_workfn(struct work_struct *work) | |||
3286 | goto err; | 3286 | goto err; |
3287 | } | 3287 | } |
3288 | 3288 | ||
3289 | if (rq->cmd_flags & REQ_DISCARD) | 3289 | if (req_op(rq) == REQ_OP_DISCARD) |
3290 | op_type = OBJ_OP_DISCARD; | 3290 | op_type = OBJ_OP_DISCARD; |
3291 | else if (rq->cmd_flags & REQ_WRITE) | 3291 | else if (req_op(rq) == REQ_OP_WRITE) |
3292 | op_type = OBJ_OP_WRITE; | 3292 | op_type = OBJ_OP_WRITE; |
3293 | else | 3293 | else |
3294 | op_type = OBJ_OP_READ; | 3294 | op_type = OBJ_OP_READ; |
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index cf8cd293abb5..5a20385f87d0 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
@@ -705,7 +705,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
705 | dma_cnt[i] = 0; | 705 | dma_cnt[i] = 0; |
706 | } | 706 | } |
707 | 707 | ||
708 | if (bio->bi_rw & REQ_DISCARD) { | 708 | if (bio_op(bio) == REQ_OP_DISCARD) { |
709 | bv_len = bio->bi_iter.bi_size; | 709 | bv_len = bio->bi_iter.bi_size; |
710 | 710 | ||
711 | while (bv_len > 0) { | 711 | while (bv_len > 0) { |
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 910e065918af..5c07a23e2ada 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c | |||
@@ -597,7 +597,7 @@ static void skd_request_fn(struct request_queue *q) | |||
597 | data_dir = rq_data_dir(req); | 597 | data_dir = rq_data_dir(req); |
598 | io_flags = req->cmd_flags; | 598 | io_flags = req->cmd_flags; |
599 | 599 | ||
600 | if (io_flags & REQ_FLUSH) | 600 | if (req_op(req) == REQ_OP_FLUSH) |
601 | flush++; | 601 | flush++; |
602 | 602 | ||
603 | if (io_flags & REQ_FUA) | 603 | if (io_flags & REQ_FUA) |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 7939b9f87441..4b3ba74e9d22 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -462,7 +462,7 @@ static void process_page(unsigned long data) | |||
462 | le32_to_cpu(desc->local_addr)>>9, | 462 | le32_to_cpu(desc->local_addr)>>9, |
463 | le32_to_cpu(desc->transfer_size)); | 463 | le32_to_cpu(desc->transfer_size)); |
464 | dump_dmastat(card, control); | 464 | dump_dmastat(card, control); |
465 | } else if ((bio->bi_rw & REQ_WRITE) && | 465 | } else if (op_is_write(bio_op(bio)) && |
466 | le32_to_cpu(desc->local_addr) >> 9 == | 466 | le32_to_cpu(desc->local_addr) >> 9 == |
467 | card->init_size) { | 467 | card->init_size) { |
468 | card->init_size += le32_to_cpu(desc->transfer_size) >> 9; | 468 | card->init_size += le32_to_cpu(desc->transfer_size) >> 9; |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 42758b52768c..18e4069dd24b 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -172,7 +172,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
172 | BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); | 172 | BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); |
173 | 173 | ||
174 | vbr->req = req; | 174 | vbr->req = req; |
175 | if (req->cmd_flags & REQ_FLUSH) { | 175 | if (req_op(req) == REQ_OP_FLUSH) { |
176 | vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH); | 176 | vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH); |
177 | vbr->out_hdr.sector = 0; | 177 | vbr->out_hdr.sector = 0; |
178 | vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); | 178 | vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 4809c1501d7e..4a80ee752597 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -501,7 +501,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, | |||
501 | struct xen_vbd *vbd = &blkif->vbd; | 501 | struct xen_vbd *vbd = &blkif->vbd; |
502 | int rc = -EACCES; | 502 | int rc = -EACCES; |
503 | 503 | ||
504 | if ((operation != READ) && vbd->readonly) | 504 | if ((operation != REQ_OP_READ) && vbd->readonly) |
505 | goto out; | 505 | goto out; |
506 | 506 | ||
507 | if (likely(req->nr_sects)) { | 507 | if (likely(req->nr_sects)) { |
@@ -1014,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring, | |||
1014 | preq.sector_number = req->u.discard.sector_number; | 1014 | preq.sector_number = req->u.discard.sector_number; |
1015 | preq.nr_sects = req->u.discard.nr_sectors; | 1015 | preq.nr_sects = req->u.discard.nr_sectors; |
1016 | 1016 | ||
1017 | err = xen_vbd_translate(&preq, blkif, WRITE); | 1017 | err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE); |
1018 | if (err) { | 1018 | if (err) { |
1019 | pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n", | 1019 | pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n", |
1020 | preq.sector_number, | 1020 | preq.sector_number, |
@@ -1229,6 +1229,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1229 | struct bio **biolist = pending_req->biolist; | 1229 | struct bio **biolist = pending_req->biolist; |
1230 | int i, nbio = 0; | 1230 | int i, nbio = 0; |
1231 | int operation; | 1231 | int operation; |
1232 | int operation_flags = 0; | ||
1232 | struct blk_plug plug; | 1233 | struct blk_plug plug; |
1233 | bool drain = false; | 1234 | bool drain = false; |
1234 | struct grant_page **pages = pending_req->segments; | 1235 | struct grant_page **pages = pending_req->segments; |
@@ -1247,17 +1248,19 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1247 | switch (req_operation) { | 1248 | switch (req_operation) { |
1248 | case BLKIF_OP_READ: | 1249 | case BLKIF_OP_READ: |
1249 | ring->st_rd_req++; | 1250 | ring->st_rd_req++; |
1250 | operation = READ; | 1251 | operation = REQ_OP_READ; |
1251 | break; | 1252 | break; |
1252 | case BLKIF_OP_WRITE: | 1253 | case BLKIF_OP_WRITE: |
1253 | ring->st_wr_req++; | 1254 | ring->st_wr_req++; |
1254 | operation = WRITE_ODIRECT; | 1255 | operation = REQ_OP_WRITE; |
1256 | operation_flags = WRITE_ODIRECT; | ||
1255 | break; | 1257 | break; |
1256 | case BLKIF_OP_WRITE_BARRIER: | 1258 | case BLKIF_OP_WRITE_BARRIER: |
1257 | drain = true; | 1259 | drain = true; |
1258 | case BLKIF_OP_FLUSH_DISKCACHE: | 1260 | case BLKIF_OP_FLUSH_DISKCACHE: |
1259 | ring->st_f_req++; | 1261 | ring->st_f_req++; |
1260 | operation = WRITE_FLUSH; | 1262 | operation = REQ_OP_WRITE; |
1263 | operation_flags = WRITE_FLUSH; | ||
1261 | break; | 1264 | break; |
1262 | default: | 1265 | default: |
1263 | operation = 0; /* make gcc happy */ | 1266 | operation = 0; /* make gcc happy */ |
@@ -1269,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1269 | nseg = req->operation == BLKIF_OP_INDIRECT ? | 1272 | nseg = req->operation == BLKIF_OP_INDIRECT ? |
1270 | req->u.indirect.nr_segments : req->u.rw.nr_segments; | 1273 | req->u.indirect.nr_segments : req->u.rw.nr_segments; |
1271 | 1274 | ||
1272 | if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || | 1275 | if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) || |
1273 | unlikely((req->operation != BLKIF_OP_INDIRECT) && | 1276 | unlikely((req->operation != BLKIF_OP_INDIRECT) && |
1274 | (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || | 1277 | (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || |
1275 | unlikely((req->operation == BLKIF_OP_INDIRECT) && | 1278 | unlikely((req->operation == BLKIF_OP_INDIRECT) && |
@@ -1310,7 +1313,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1310 | 1313 | ||
1311 | if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) { | 1314 | if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) { |
1312 | pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n", | 1315 | pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n", |
1313 | operation == READ ? "read" : "write", | 1316 | operation == REQ_OP_READ ? "read" : "write", |
1314 | preq.sector_number, | 1317 | preq.sector_number, |
1315 | preq.sector_number + preq.nr_sects, | 1318 | preq.sector_number + preq.nr_sects, |
1316 | ring->blkif->vbd.pdevice); | 1319 | ring->blkif->vbd.pdevice); |
@@ -1369,6 +1372,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1369 | bio->bi_private = pending_req; | 1372 | bio->bi_private = pending_req; |
1370 | bio->bi_end_io = end_block_io_op; | 1373 | bio->bi_end_io = end_block_io_op; |
1371 | bio->bi_iter.bi_sector = preq.sector_number; | 1374 | bio->bi_iter.bi_sector = preq.sector_number; |
1375 | bio_set_op_attrs(bio, operation, operation_flags); | ||
1372 | } | 1376 | } |
1373 | 1377 | ||
1374 | preq.sector_number += seg[i].nsec; | 1378 | preq.sector_number += seg[i].nsec; |
@@ -1376,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1376 | 1380 | ||
1377 | /* This will be hit if the operation was a flush or discard. */ | 1381 | /* This will be hit if the operation was a flush or discard. */ |
1378 | if (!bio) { | 1382 | if (!bio) { |
1379 | BUG_ON(operation != WRITE_FLUSH); | 1383 | BUG_ON(operation_flags != WRITE_FLUSH); |
1380 | 1384 | ||
1381 | bio = bio_alloc(GFP_KERNEL, 0); | 1385 | bio = bio_alloc(GFP_KERNEL, 0); |
1382 | if (unlikely(bio == NULL)) | 1386 | if (unlikely(bio == NULL)) |
@@ -1386,20 +1390,21 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, | |||
1386 | bio->bi_bdev = preq.bdev; | 1390 | bio->bi_bdev = preq.bdev; |
1387 | bio->bi_private = pending_req; | 1391 | bio->bi_private = pending_req; |
1388 | bio->bi_end_io = end_block_io_op; | 1392 | bio->bi_end_io = end_block_io_op; |
1393 | bio_set_op_attrs(bio, operation, operation_flags); | ||
1389 | } | 1394 | } |
1390 | 1395 | ||
1391 | atomic_set(&pending_req->pendcnt, nbio); | 1396 | atomic_set(&pending_req->pendcnt, nbio); |
1392 | blk_start_plug(&plug); | 1397 | blk_start_plug(&plug); |
1393 | 1398 | ||
1394 | for (i = 0; i < nbio; i++) | 1399 | for (i = 0; i < nbio; i++) |
1395 | submit_bio(operation, biolist[i]); | 1400 | submit_bio(biolist[i]); |
1396 | 1401 | ||
1397 | /* Let the I/Os go.. */ | 1402 | /* Let the I/Os go.. */ |
1398 | blk_finish_plug(&plug); | 1403 | blk_finish_plug(&plug); |
1399 | 1404 | ||
1400 | if (operation == READ) | 1405 | if (operation == REQ_OP_READ) |
1401 | ring->st_rd_sect += preq.nr_sects; | 1406 | ring->st_rd_sect += preq.nr_sects; |
1402 | else if (operation & WRITE) | 1407 | else if (operation == REQ_OP_WRITE) |
1403 | ring->st_wr_sect += preq.nr_sects; | 1408 | ring->st_wr_sect += preq.nr_sects; |
1404 | 1409 | ||
1405 | return 0; | 1410 | return 0; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index fcc5b4e0aef2..da05d3f9bad2 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -196,6 +196,7 @@ struct blkfront_info | |||
196 | unsigned int nr_ring_pages; | 196 | unsigned int nr_ring_pages; |
197 | struct request_queue *rq; | 197 | struct request_queue *rq; |
198 | unsigned int feature_flush; | 198 | unsigned int feature_flush; |
199 | unsigned int feature_fua; | ||
199 | unsigned int feature_discard:1; | 200 | unsigned int feature_discard:1; |
200 | unsigned int feature_secdiscard:1; | 201 | unsigned int feature_secdiscard:1; |
201 | unsigned int discard_granularity; | 202 | unsigned int discard_granularity; |
@@ -746,7 +747,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri | |||
746 | * The indirect operation can only be a BLKIF_OP_READ or | 747 | * The indirect operation can only be a BLKIF_OP_READ or |
747 | * BLKIF_OP_WRITE | 748 | * BLKIF_OP_WRITE |
748 | */ | 749 | */ |
749 | BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); | 750 | BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA); |
750 | ring_req->operation = BLKIF_OP_INDIRECT; | 751 | ring_req->operation = BLKIF_OP_INDIRECT; |
751 | ring_req->u.indirect.indirect_op = rq_data_dir(req) ? | 752 | ring_req->u.indirect.indirect_op = rq_data_dir(req) ? |
752 | BLKIF_OP_WRITE : BLKIF_OP_READ; | 753 | BLKIF_OP_WRITE : BLKIF_OP_READ; |
@@ -758,7 +759,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri | |||
758 | ring_req->u.rw.handle = info->handle; | 759 | ring_req->u.rw.handle = info->handle; |
759 | ring_req->operation = rq_data_dir(req) ? | 760 | ring_req->operation = rq_data_dir(req) ? |
760 | BLKIF_OP_WRITE : BLKIF_OP_READ; | 761 | BLKIF_OP_WRITE : BLKIF_OP_READ; |
761 | if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { | 762 | if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) { |
762 | /* | 763 | /* |
763 | * Ideally we can do an unordered flush-to-disk. | 764 | * Ideally we can do an unordered flush-to-disk. |
764 | * In case the backend onlysupports barriers, use that. | 765 | * In case the backend onlysupports barriers, use that. |
@@ -766,19 +767,14 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri | |||
766 | * implement it the same way. (It's also a FLUSH+FUA, | 767 | * implement it the same way. (It's also a FLUSH+FUA, |
767 | * since it is guaranteed ordered WRT previous writes.) | 768 | * since it is guaranteed ordered WRT previous writes.) |
768 | */ | 769 | */ |
769 | switch (info->feature_flush & | 770 | if (info->feature_flush && info->feature_fua) |
770 | ((REQ_FLUSH|REQ_FUA))) { | ||
771 | case REQ_FLUSH|REQ_FUA: | ||
772 | ring_req->operation = | 771 | ring_req->operation = |
773 | BLKIF_OP_WRITE_BARRIER; | 772 | BLKIF_OP_WRITE_BARRIER; |
774 | break; | 773 | else if (info->feature_flush) |
775 | case REQ_FLUSH: | ||
776 | ring_req->operation = | 774 | ring_req->operation = |
777 | BLKIF_OP_FLUSH_DISKCACHE; | 775 | BLKIF_OP_FLUSH_DISKCACHE; |
778 | break; | 776 | else |
779 | default: | ||
780 | ring_req->operation = 0; | 777 | ring_req->operation = 0; |
781 | } | ||
782 | } | 778 | } |
783 | ring_req->u.rw.nr_segments = num_grant; | 779 | ring_req->u.rw.nr_segments = num_grant; |
784 | if (unlikely(require_extra_req)) { | 780 | if (unlikely(require_extra_req)) { |
@@ -847,7 +843,8 @@ static int blkif_queue_request(struct request *req, struct blkfront_ring_info *r | |||
847 | if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED)) | 843 | if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED)) |
848 | return 1; | 844 | return 1; |
849 | 845 | ||
850 | if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) | 846 | if (unlikely(req_op(req) == REQ_OP_DISCARD || |
847 | req->cmd_flags & REQ_SECURE)) | ||
851 | return blkif_queue_discard_req(req, rinfo); | 848 | return blkif_queue_discard_req(req, rinfo); |
852 | else | 849 | else |
853 | return blkif_queue_rw_req(req, rinfo); | 850 | return blkif_queue_rw_req(req, rinfo); |
@@ -867,10 +864,10 @@ static inline bool blkif_request_flush_invalid(struct request *req, | |||
867 | struct blkfront_info *info) | 864 | struct blkfront_info *info) |
868 | { | 865 | { |
869 | return ((req->cmd_type != REQ_TYPE_FS) || | 866 | return ((req->cmd_type != REQ_TYPE_FS) || |
870 | ((req->cmd_flags & REQ_FLUSH) && | 867 | ((req_op(req) == REQ_OP_FLUSH) && |
871 | !(info->feature_flush & REQ_FLUSH)) || | 868 | !info->feature_flush) || |
872 | ((req->cmd_flags & REQ_FUA) && | 869 | ((req->cmd_flags & REQ_FUA) && |
873 | !(info->feature_flush & REQ_FUA))); | 870 | !info->feature_fua)); |
874 | } | 871 | } |
875 | 872 | ||
876 | static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, | 873 | static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, |
@@ -981,24 +978,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | |||
981 | return 0; | 978 | return 0; |
982 | } | 979 | } |
983 | 980 | ||
984 | static const char *flush_info(unsigned int feature_flush) | 981 | static const char *flush_info(struct blkfront_info *info) |
985 | { | 982 | { |
986 | switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) { | 983 | if (info->feature_flush && info->feature_fua) |
987 | case REQ_FLUSH|REQ_FUA: | ||
988 | return "barrier: enabled;"; | 984 | return "barrier: enabled;"; |
989 | case REQ_FLUSH: | 985 | else if (info->feature_flush) |
990 | return "flush diskcache: enabled;"; | 986 | return "flush diskcache: enabled;"; |
991 | default: | 987 | else |
992 | return "barrier or flush: disabled;"; | 988 | return "barrier or flush: disabled;"; |
993 | } | ||
994 | } | 989 | } |
995 | 990 | ||
996 | static void xlvbd_flush(struct blkfront_info *info) | 991 | static void xlvbd_flush(struct blkfront_info *info) |
997 | { | 992 | { |
998 | blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH, | 993 | blk_queue_write_cache(info->rq, info->feature_flush ? true : false, |
999 | info->feature_flush & REQ_FUA); | 994 | info->feature_fua ? true : false); |
1000 | pr_info("blkfront: %s: %s %s %s %s %s\n", | 995 | pr_info("blkfront: %s: %s %s %s %s %s\n", |
1001 | info->gd->disk_name, flush_info(info->feature_flush), | 996 | info->gd->disk_name, flush_info(info), |
1002 | "persistent grants:", info->feature_persistent ? | 997 | "persistent grants:", info->feature_persistent ? |
1003 | "enabled;" : "disabled;", "indirect descriptors:", | 998 | "enabled;" : "disabled;", "indirect descriptors:", |
1004 | info->max_indirect_segments ? "enabled;" : "disabled;"); | 999 | info->max_indirect_segments ? "enabled;" : "disabled;"); |
@@ -1617,6 +1612,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
1617 | if (unlikely(error)) { | 1612 | if (unlikely(error)) { |
1618 | if (error == -EOPNOTSUPP) | 1613 | if (error == -EOPNOTSUPP) |
1619 | error = 0; | 1614 | error = 0; |
1615 | info->feature_fua = 0; | ||
1620 | info->feature_flush = 0; | 1616 | info->feature_flush = 0; |
1621 | xlvbd_flush(info); | 1617 | xlvbd_flush(info); |
1622 | } | 1618 | } |
@@ -2064,7 +2060,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
2064 | bio_trim(cloned_bio, offset, size); | 2060 | bio_trim(cloned_bio, offset, size); |
2065 | cloned_bio->bi_private = split_bio; | 2061 | cloned_bio->bi_private = split_bio; |
2066 | cloned_bio->bi_end_io = split_bio_end; | 2062 | cloned_bio->bi_end_io = split_bio_end; |
2067 | submit_bio(cloned_bio->bi_rw, cloned_bio); | 2063 | submit_bio(cloned_bio); |
2068 | } | 2064 | } |
2069 | /* | 2065 | /* |
2070 | * Now we have to wait for all those smaller bios to | 2066 | * Now we have to wait for all those smaller bios to |
@@ -2073,7 +2069,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
2073 | continue; | 2069 | continue; |
2074 | } | 2070 | } |
2075 | /* We don't need to split this bio */ | 2071 | /* We don't need to split this bio */ |
2076 | submit_bio(bio->bi_rw, bio); | 2072 | submit_bio(bio); |
2077 | } | 2073 | } |
2078 | 2074 | ||
2079 | return 0; | 2075 | return 0; |
@@ -2108,8 +2104,10 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
2108 | /* | 2104 | /* |
2109 | * Get the bios in the request so we can re-queue them. | 2105 | * Get the bios in the request so we can re-queue them. |
2110 | */ | 2106 | */ |
2111 | if (shadow[j].request->cmd_flags & | 2107 | if (req_op(shadow[i].request) == REQ_OP_FLUSH || |
2112 | (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { | 2108 | req_op(shadow[i].request) == REQ_OP_DISCARD || |
2109 | shadow[j].request->cmd_flags & (REQ_FUA | REQ_SECURE)) { | ||
2110 | |||
2113 | /* | 2111 | /* |
2114 | * Flush operations don't contain bios, so | 2112 | * Flush operations don't contain bios, so |
2115 | * we need to requeue the whole request | 2113 | * we need to requeue the whole request |
@@ -2298,6 +2296,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) | |||
2298 | unsigned int indirect_segments; | 2296 | unsigned int indirect_segments; |
2299 | 2297 | ||
2300 | info->feature_flush = 0; | 2298 | info->feature_flush = 0; |
2299 | info->feature_fua = 0; | ||
2301 | 2300 | ||
2302 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | 2301 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
2303 | "feature-barrier", "%d", &barrier, | 2302 | "feature-barrier", "%d", &barrier, |
@@ -2310,8 +2309,11 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) | |||
2310 | * | 2309 | * |
2311 | * If there are barriers, then we use flush. | 2310 | * If there are barriers, then we use flush. |
2312 | */ | 2311 | */ |
2313 | if (!err && barrier) | 2312 | if (!err && barrier) { |
2314 | info->feature_flush = REQ_FLUSH | REQ_FUA; | 2313 | info->feature_flush = 1; |
2314 | info->feature_fua = 1; | ||
2315 | } | ||
2316 | |||
2315 | /* | 2317 | /* |
2316 | * And if there is "feature-flush-cache" use that above | 2318 | * And if there is "feature-flush-cache" use that above |
2317 | * barriers. | 2319 | * barriers. |
@@ -2320,8 +2322,10 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) | |||
2320 | "feature-flush-cache", "%d", &flush, | 2322 | "feature-flush-cache", "%d", &flush, |
2321 | NULL); | 2323 | NULL); |
2322 | 2324 | ||
2323 | if (!err && flush) | 2325 | if (!err && flush) { |
2324 | info->feature_flush = REQ_FLUSH; | 2326 | info->feature_flush = 1; |
2327 | info->feature_fua = 0; | ||
2328 | } | ||
2325 | 2329 | ||
2326 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | 2330 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
2327 | "feature-discard", "%d", &discard, | 2331 | "feature-discard", "%d", &discard, |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 8fcad8b761f1..e5e5d19f2172 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -874,7 +874,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) | |||
874 | offset = (bio->bi_iter.bi_sector & | 874 | offset = (bio->bi_iter.bi_sector & |
875 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; | 875 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; |
876 | 876 | ||
877 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { | 877 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { |
878 | zram_bio_discard(zram, index, offset, bio); | 878 | zram_bio_discard(zram, index, offset, bio); |
879 | bio_endio(bio); | 879 | bio_endio(bio); |
880 | return; | 880 | return; |
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c index 474173eb31bb..5887a7a09e37 100644 --- a/drivers/ide/ide-cd_ioctl.c +++ b/drivers/ide/ide-cd_ioctl.c | |||
@@ -459,9 +459,6 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi, | |||
459 | layer. the packet must be complete, as we do not | 459 | layer. the packet must be complete, as we do not |
460 | touch it at all. */ | 460 | touch it at all. */ |
461 | 461 | ||
462 | if (cgc->data_direction == CGC_DATA_WRITE) | ||
463 | flags |= REQ_WRITE; | ||
464 | |||
465 | if (cgc->sense) | 462 | if (cgc->sense) |
466 | memset(cgc->sense, 0, sizeof(struct request_sense)); | 463 | memset(cgc->sense, 0, sizeof(struct request_sense)); |
467 | 464 | ||
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 05dbcce70b0e..e378ef70ed63 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -431,7 +431,7 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq) | |||
431 | ide_drive_t *drive = q->queuedata; | 431 | ide_drive_t *drive = q->queuedata; |
432 | struct ide_cmd *cmd; | 432 | struct ide_cmd *cmd; |
433 | 433 | ||
434 | if (!(rq->cmd_flags & REQ_FLUSH)) | 434 | if (req_op(rq) != REQ_OP_FLUSH) |
435 | return BLKPREP_OK; | 435 | return BLKPREP_OK; |
436 | 436 | ||
437 | if (rq->special) { | 437 | if (rq->special) { |
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 2fb5350c5410..f079d8d1d856 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c | |||
@@ -206,7 +206,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive, | |||
206 | memcpy(rq->cmd, pc->c, 12); | 206 | memcpy(rq->cmd, pc->c, 12); |
207 | 207 | ||
208 | pc->rq = rq; | 208 | pc->rq = rq; |
209 | if (rq->cmd_flags & REQ_WRITE) | 209 | if (cmd == WRITE) |
210 | pc->flags |= PC_FLAG_WRITING; | 210 | pc->flags |= PC_FLAG_WRITING; |
211 | 211 | ||
212 | pc->flags |= PC_FLAG_DMA_OK; | 212 | pc->flags |= PC_FLAG_DMA_OK; |
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 2103e97a974f..de86d72dcdf0 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c | |||
@@ -342,7 +342,7 @@ try: | |||
342 | 342 | ||
343 | /* Perform read to do GC */ | 343 | /* Perform read to do GC */ |
344 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); | 344 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); |
345 | bio->bi_rw = READ; | 345 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
346 | bio->bi_private = &wait; | 346 | bio->bi_private = &wait; |
347 | bio->bi_end_io = rrpc_end_sync_bio; | 347 | bio->bi_end_io = rrpc_end_sync_bio; |
348 | 348 | ||
@@ -364,7 +364,7 @@ try: | |||
364 | reinit_completion(&wait); | 364 | reinit_completion(&wait); |
365 | 365 | ||
366 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); | 366 | bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); |
367 | bio->bi_rw = WRITE; | 367 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
368 | bio->bi_private = &wait; | 368 | bio->bi_private = &wait; |
369 | bio->bi_end_io = rrpc_end_sync_bio; | 369 | bio->bi_end_io = rrpc_end_sync_bio; |
370 | 370 | ||
@@ -908,7 +908,7 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio) | |||
908 | struct nvm_rq *rqd; | 908 | struct nvm_rq *rqd; |
909 | int err; | 909 | int err; |
910 | 910 | ||
911 | if (bio->bi_rw & REQ_DISCARD) { | 911 | if (bio_op(bio) == REQ_OP_DISCARD) { |
912 | rrpc_discard(rrpc, bio); | 912 | rrpc_discard(rrpc, bio); |
913 | return BLK_QC_T_NONE; | 913 | return BLK_QC_T_NONE; |
914 | } | 914 | } |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index eab505ee0027..76f7534d1dd1 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -294,10 +294,10 @@ static void bch_btree_node_read(struct btree *b) | |||
294 | closure_init_stack(&cl); | 294 | closure_init_stack(&cl); |
295 | 295 | ||
296 | bio = bch_bbio_alloc(b->c); | 296 | bio = bch_bbio_alloc(b->c); |
297 | bio->bi_rw = REQ_META|READ_SYNC; | ||
298 | bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; | 297 | bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; |
299 | bio->bi_end_io = btree_node_read_endio; | 298 | bio->bi_end_io = btree_node_read_endio; |
300 | bio->bi_private = &cl; | 299 | bio->bi_private = &cl; |
300 | bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); | ||
301 | 301 | ||
302 | bch_bio_map(bio, b->keys.set[0].data); | 302 | bch_bio_map(bio, b->keys.set[0].data); |
303 | 303 | ||
@@ -396,8 +396,8 @@ static void do_btree_node_write(struct btree *b) | |||
396 | 396 | ||
397 | b->bio->bi_end_io = btree_node_write_endio; | 397 | b->bio->bi_end_io = btree_node_write_endio; |
398 | b->bio->bi_private = cl; | 398 | b->bio->bi_private = cl; |
399 | b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; | ||
400 | b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); | 399 | b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); |
400 | bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA); | ||
401 | bch_bio_map(b->bio, i); | 401 | bch_bio_map(b->bio, i); |
402 | 402 | ||
403 | /* | 403 | /* |
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 8b1f1d5c1819..c28df164701e 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c | |||
@@ -52,9 +52,10 @@ void bch_btree_verify(struct btree *b) | |||
52 | bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; | 52 | bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; |
53 | bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); | 53 | bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); |
54 | bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; | 54 | bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; |
55 | bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); | ||
55 | bch_bio_map(bio, sorted); | 56 | bch_bio_map(bio, sorted); |
56 | 57 | ||
57 | submit_bio_wait(REQ_META|READ_SYNC, bio); | 58 | submit_bio_wait(bio); |
58 | bch_bbio_free(bio, b->c); | 59 | bch_bbio_free(bio, b->c); |
59 | 60 | ||
60 | memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9); | 61 | memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9); |
@@ -113,11 +114,12 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) | |||
113 | check = bio_clone(bio, GFP_NOIO); | 114 | check = bio_clone(bio, GFP_NOIO); |
114 | if (!check) | 115 | if (!check) |
115 | return; | 116 | return; |
117 | bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC); | ||
116 | 118 | ||
117 | if (bio_alloc_pages(check, GFP_NOIO)) | 119 | if (bio_alloc_pages(check, GFP_NOIO)) |
118 | goto out_put; | 120 | goto out_put; |
119 | 121 | ||
120 | submit_bio_wait(READ_SYNC, check); | 122 | submit_bio_wait(check); |
121 | 123 | ||
122 | bio_for_each_segment(bv, bio, iter) { | 124 | bio_for_each_segment(bv, bio, iter) { |
123 | void *p1 = kmap_atomic(bv.bv_page); | 125 | void *p1 = kmap_atomic(bv.bv_page); |
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 86a0bb87124e..fd885cc2afad 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c | |||
@@ -111,7 +111,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, | |||
111 | struct bbio *b = container_of(bio, struct bbio, bio); | 111 | struct bbio *b = container_of(bio, struct bbio, bio); |
112 | struct cache *ca = PTR_CACHE(c, &b->key, 0); | 112 | struct cache *ca = PTR_CACHE(c, &b->key, 0); |
113 | 113 | ||
114 | unsigned threshold = bio->bi_rw & REQ_WRITE | 114 | unsigned threshold = op_is_write(bio_op(bio)) |
115 | ? c->congested_write_threshold_us | 115 | ? c->congested_write_threshold_us |
116 | : c->congested_read_threshold_us; | 116 | : c->congested_read_threshold_us; |
117 | 117 | ||
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 29eba7219b01..6925023e12d4 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c | |||
@@ -54,11 +54,11 @@ reread: left = ca->sb.bucket_size - offset; | |||
54 | bio_reset(bio); | 54 | bio_reset(bio); |
55 | bio->bi_iter.bi_sector = bucket + offset; | 55 | bio->bi_iter.bi_sector = bucket + offset; |
56 | bio->bi_bdev = ca->bdev; | 56 | bio->bi_bdev = ca->bdev; |
57 | bio->bi_rw = READ; | ||
58 | bio->bi_iter.bi_size = len << 9; | 57 | bio->bi_iter.bi_size = len << 9; |
59 | 58 | ||
60 | bio->bi_end_io = journal_read_endio; | 59 | bio->bi_end_io = journal_read_endio; |
61 | bio->bi_private = &cl; | 60 | bio->bi_private = &cl; |
61 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||
62 | bch_bio_map(bio, data); | 62 | bch_bio_map(bio, data); |
63 | 63 | ||
64 | closure_bio_submit(bio, &cl); | 64 | closure_bio_submit(bio, &cl); |
@@ -418,7 +418,7 @@ static void journal_discard_work(struct work_struct *work) | |||
418 | struct journal_device *ja = | 418 | struct journal_device *ja = |
419 | container_of(work, struct journal_device, discard_work); | 419 | container_of(work, struct journal_device, discard_work); |
420 | 420 | ||
421 | submit_bio(0, &ja->discard_bio); | 421 | submit_bio(&ja->discard_bio); |
422 | } | 422 | } |
423 | 423 | ||
424 | static void do_journal_discard(struct cache *ca) | 424 | static void do_journal_discard(struct cache *ca) |
@@ -449,10 +449,10 @@ static void do_journal_discard(struct cache *ca) | |||
449 | atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); | 449 | atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); |
450 | 450 | ||
451 | bio_init(bio); | 451 | bio_init(bio); |
452 | bio_set_op_attrs(bio, REQ_OP_DISCARD, 0); | ||
452 | bio->bi_iter.bi_sector = bucket_to_sector(ca->set, | 453 | bio->bi_iter.bi_sector = bucket_to_sector(ca->set, |
453 | ca->sb.d[ja->discard_idx]); | 454 | ca->sb.d[ja->discard_idx]); |
454 | bio->bi_bdev = ca->bdev; | 455 | bio->bi_bdev = ca->bdev; |
455 | bio->bi_rw = REQ_WRITE|REQ_DISCARD; | ||
456 | bio->bi_max_vecs = 1; | 456 | bio->bi_max_vecs = 1; |
457 | bio->bi_io_vec = bio->bi_inline_vecs; | 457 | bio->bi_io_vec = bio->bi_inline_vecs; |
458 | bio->bi_iter.bi_size = bucket_bytes(ca); | 458 | bio->bi_iter.bi_size = bucket_bytes(ca); |
@@ -626,11 +626,12 @@ static void journal_write_unlocked(struct closure *cl) | |||
626 | bio_reset(bio); | 626 | bio_reset(bio); |
627 | bio->bi_iter.bi_sector = PTR_OFFSET(k, i); | 627 | bio->bi_iter.bi_sector = PTR_OFFSET(k, i); |
628 | bio->bi_bdev = ca->bdev; | 628 | bio->bi_bdev = ca->bdev; |
629 | bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; | ||
630 | bio->bi_iter.bi_size = sectors << 9; | 629 | bio->bi_iter.bi_size = sectors << 9; |
631 | 630 | ||
632 | bio->bi_end_io = journal_write_endio; | 631 | bio->bi_end_io = journal_write_endio; |
633 | bio->bi_private = w; | 632 | bio->bi_private = w; |
633 | bio_set_op_attrs(bio, REQ_OP_WRITE, | ||
634 | REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA); | ||
634 | bch_bio_map(bio, w->data); | 635 | bch_bio_map(bio, w->data); |
635 | 636 | ||
636 | trace_bcache_journal_write(bio); | 637 | trace_bcache_journal_write(bio); |
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index b929fc944e9c..1881319f2298 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c | |||
@@ -163,7 +163,7 @@ static void read_moving(struct cache_set *c) | |||
163 | moving_init(io); | 163 | moving_init(io); |
164 | bio = &io->bio.bio; | 164 | bio = &io->bio.bio; |
165 | 165 | ||
166 | bio->bi_rw = READ; | 166 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
167 | bio->bi_end_io = read_moving_endio; | 167 | bio->bi_end_io = read_moving_endio; |
168 | 168 | ||
169 | if (bio_alloc_pages(bio, GFP_KERNEL)) | 169 | if (bio_alloc_pages(bio, GFP_KERNEL)) |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 25fa8445bb24..69f16f43f8ab 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -205,10 +205,10 @@ static void bch_data_insert_start(struct closure *cl) | |||
205 | return bch_data_invalidate(cl); | 205 | return bch_data_invalidate(cl); |
206 | 206 | ||
207 | /* | 207 | /* |
208 | * Journal writes are marked REQ_FLUSH; if the original write was a | 208 | * Journal writes are marked REQ_PREFLUSH; if the original write was a |
209 | * flush, it'll wait on the journal write. | 209 | * flush, it'll wait on the journal write. |
210 | */ | 210 | */ |
211 | bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA); | 211 | bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA); |
212 | 212 | ||
213 | do { | 213 | do { |
214 | unsigned i; | 214 | unsigned i; |
@@ -253,7 +253,7 @@ static void bch_data_insert_start(struct closure *cl) | |||
253 | trace_bcache_cache_insert(k); | 253 | trace_bcache_cache_insert(k); |
254 | bch_keylist_push(&op->insert_keys); | 254 | bch_keylist_push(&op->insert_keys); |
255 | 255 | ||
256 | n->bi_rw |= REQ_WRITE; | 256 | bio_set_op_attrs(n, REQ_OP_WRITE, 0); |
257 | bch_submit_bbio(n, op->c, k, 0); | 257 | bch_submit_bbio(n, op->c, k, 0); |
258 | } while (n != bio); | 258 | } while (n != bio); |
259 | 259 | ||
@@ -378,12 +378,12 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |||
378 | 378 | ||
379 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || | 379 | if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
380 | c->gc_stats.in_use > CUTOFF_CACHE_ADD || | 380 | c->gc_stats.in_use > CUTOFF_CACHE_ADD || |
381 | (bio->bi_rw & REQ_DISCARD)) | 381 | (bio_op(bio) == REQ_OP_DISCARD)) |
382 | goto skip; | 382 | goto skip; |
383 | 383 | ||
384 | if (mode == CACHE_MODE_NONE || | 384 | if (mode == CACHE_MODE_NONE || |
385 | (mode == CACHE_MODE_WRITEAROUND && | 385 | (mode == CACHE_MODE_WRITEAROUND && |
386 | (bio->bi_rw & REQ_WRITE))) | 386 | op_is_write(bio_op(bio)))) |
387 | goto skip; | 387 | goto skip; |
388 | 388 | ||
389 | if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || | 389 | if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || |
@@ -404,7 +404,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |||
404 | 404 | ||
405 | if (!congested && | 405 | if (!congested && |
406 | mode == CACHE_MODE_WRITEBACK && | 406 | mode == CACHE_MODE_WRITEBACK && |
407 | (bio->bi_rw & REQ_WRITE) && | 407 | op_is_write(bio_op(bio)) && |
408 | (bio->bi_rw & REQ_SYNC)) | 408 | (bio->bi_rw & REQ_SYNC)) |
409 | goto rescale; | 409 | goto rescale; |
410 | 410 | ||
@@ -657,7 +657,7 @@ static inline struct search *search_alloc(struct bio *bio, | |||
657 | s->cache_miss = NULL; | 657 | s->cache_miss = NULL; |
658 | s->d = d; | 658 | s->d = d; |
659 | s->recoverable = 1; | 659 | s->recoverable = 1; |
660 | s->write = (bio->bi_rw & REQ_WRITE) != 0; | 660 | s->write = op_is_write(bio_op(bio)); |
661 | s->read_dirty_data = 0; | 661 | s->read_dirty_data = 0; |
662 | s->start_time = jiffies; | 662 | s->start_time = jiffies; |
663 | 663 | ||
@@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio, | |||
668 | s->iop.write_prio = 0; | 668 | s->iop.write_prio = 0; |
669 | s->iop.error = 0; | 669 | s->iop.error = 0; |
670 | s->iop.flags = 0; | 670 | s->iop.flags = 0; |
671 | s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; | 671 | s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0; |
672 | s->iop.wq = bcache_wq; | 672 | s->iop.wq = bcache_wq; |
673 | 673 | ||
674 | return s; | 674 | return s; |
@@ -899,7 +899,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) | |||
899 | * But check_overlapping drops dirty keys for which io hasn't started, | 899 | * But check_overlapping drops dirty keys for which io hasn't started, |
900 | * so we still want to call it. | 900 | * so we still want to call it. |
901 | */ | 901 | */ |
902 | if (bio->bi_rw & REQ_DISCARD) | 902 | if (bio_op(bio) == REQ_OP_DISCARD) |
903 | s->iop.bypass = true; | 903 | s->iop.bypass = true; |
904 | 904 | ||
905 | if (should_writeback(dc, s->orig_bio, | 905 | if (should_writeback(dc, s->orig_bio, |
@@ -913,22 +913,22 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) | |||
913 | s->iop.bio = s->orig_bio; | 913 | s->iop.bio = s->orig_bio; |
914 | bio_get(s->iop.bio); | 914 | bio_get(s->iop.bio); |
915 | 915 | ||
916 | if (!(bio->bi_rw & REQ_DISCARD) || | 916 | if ((bio_op(bio) != REQ_OP_DISCARD) || |
917 | blk_queue_discard(bdev_get_queue(dc->bdev))) | 917 | blk_queue_discard(bdev_get_queue(dc->bdev))) |
918 | closure_bio_submit(bio, cl); | 918 | closure_bio_submit(bio, cl); |
919 | } else if (s->iop.writeback) { | 919 | } else if (s->iop.writeback) { |
920 | bch_writeback_add(dc); | 920 | bch_writeback_add(dc); |
921 | s->iop.bio = bio; | 921 | s->iop.bio = bio; |
922 | 922 | ||
923 | if (bio->bi_rw & REQ_FLUSH) { | 923 | if (bio->bi_rw & REQ_PREFLUSH) { |
924 | /* Also need to send a flush to the backing device */ | 924 | /* Also need to send a flush to the backing device */ |
925 | struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, | 925 | struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, |
926 | dc->disk.bio_split); | 926 | dc->disk.bio_split); |
927 | 927 | ||
928 | flush->bi_rw = WRITE_FLUSH; | ||
929 | flush->bi_bdev = bio->bi_bdev; | 928 | flush->bi_bdev = bio->bi_bdev; |
930 | flush->bi_end_io = request_endio; | 929 | flush->bi_end_io = request_endio; |
931 | flush->bi_private = cl; | 930 | flush->bi_private = cl; |
931 | bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH); | ||
932 | 932 | ||
933 | closure_bio_submit(flush, cl); | 933 | closure_bio_submit(flush, cl); |
934 | } | 934 | } |
@@ -992,7 +992,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, | |||
992 | cached_dev_read(dc, s); | 992 | cached_dev_read(dc, s); |
993 | } | 993 | } |
994 | } else { | 994 | } else { |
995 | if ((bio->bi_rw & REQ_DISCARD) && | 995 | if ((bio_op(bio) == REQ_OP_DISCARD) && |
996 | !blk_queue_discard(bdev_get_queue(dc->bdev))) | 996 | !blk_queue_discard(bdev_get_queue(dc->bdev))) |
997 | bio_endio(bio); | 997 | bio_endio(bio); |
998 | else | 998 | else |
@@ -1103,7 +1103,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, | |||
1103 | &KEY(d->id, bio->bi_iter.bi_sector, 0), | 1103 | &KEY(d->id, bio->bi_iter.bi_sector, 0), |
1104 | &KEY(d->id, bio_end_sector(bio), 0)); | 1104 | &KEY(d->id, bio_end_sector(bio), 0)); |
1105 | 1105 | ||
1106 | s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; | 1106 | s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; |
1107 | s->iop.writeback = true; | 1107 | s->iop.writeback = true; |
1108 | s->iop.bio = bio; | 1108 | s->iop.bio = bio; |
1109 | 1109 | ||
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index f5dbb4e884d8..c944daf75dd0 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -212,8 +212,8 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) | |||
212 | unsigned i; | 212 | unsigned i; |
213 | 213 | ||
214 | bio->bi_iter.bi_sector = SB_SECTOR; | 214 | bio->bi_iter.bi_sector = SB_SECTOR; |
215 | bio->bi_rw = REQ_SYNC|REQ_META; | ||
216 | bio->bi_iter.bi_size = SB_SIZE; | 215 | bio->bi_iter.bi_size = SB_SIZE; |
216 | bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); | ||
217 | bch_bio_map(bio, NULL); | 217 | bch_bio_map(bio, NULL); |
218 | 218 | ||
219 | out->offset = cpu_to_le64(sb->offset); | 219 | out->offset = cpu_to_le64(sb->offset); |
@@ -238,7 +238,7 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) | |||
238 | pr_debug("ver %llu, flags %llu, seq %llu", | 238 | pr_debug("ver %llu, flags %llu, seq %llu", |
239 | sb->version, sb->flags, sb->seq); | 239 | sb->version, sb->flags, sb->seq); |
240 | 240 | ||
241 | submit_bio(REQ_WRITE, bio); | 241 | submit_bio(bio); |
242 | } | 242 | } |
243 | 243 | ||
244 | static void bch_write_bdev_super_unlock(struct closure *cl) | 244 | static void bch_write_bdev_super_unlock(struct closure *cl) |
@@ -333,7 +333,7 @@ static void uuid_io_unlock(struct closure *cl) | |||
333 | up(&c->uuid_write_mutex); | 333 | up(&c->uuid_write_mutex); |
334 | } | 334 | } |
335 | 335 | ||
336 | static void uuid_io(struct cache_set *c, unsigned long rw, | 336 | static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, |
337 | struct bkey *k, struct closure *parent) | 337 | struct bkey *k, struct closure *parent) |
338 | { | 338 | { |
339 | struct closure *cl = &c->uuid_write; | 339 | struct closure *cl = &c->uuid_write; |
@@ -348,21 +348,22 @@ static void uuid_io(struct cache_set *c, unsigned long rw, | |||
348 | for (i = 0; i < KEY_PTRS(k); i++) { | 348 | for (i = 0; i < KEY_PTRS(k); i++) { |
349 | struct bio *bio = bch_bbio_alloc(c); | 349 | struct bio *bio = bch_bbio_alloc(c); |
350 | 350 | ||
351 | bio->bi_rw = REQ_SYNC|REQ_META|rw; | 351 | bio->bi_rw = REQ_SYNC|REQ_META|op_flags; |
352 | bio->bi_iter.bi_size = KEY_SIZE(k) << 9; | 352 | bio->bi_iter.bi_size = KEY_SIZE(k) << 9; |
353 | 353 | ||
354 | bio->bi_end_io = uuid_endio; | 354 | bio->bi_end_io = uuid_endio; |
355 | bio->bi_private = cl; | 355 | bio->bi_private = cl; |
356 | bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); | ||
356 | bch_bio_map(bio, c->uuids); | 357 | bch_bio_map(bio, c->uuids); |
357 | 358 | ||
358 | bch_submit_bbio(bio, c, k, i); | 359 | bch_submit_bbio(bio, c, k, i); |
359 | 360 | ||
360 | if (!(rw & WRITE)) | 361 | if (op != REQ_OP_WRITE) |
361 | break; | 362 | break; |
362 | } | 363 | } |
363 | 364 | ||
364 | bch_extent_to_text(buf, sizeof(buf), k); | 365 | bch_extent_to_text(buf, sizeof(buf), k); |
365 | pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf); | 366 | pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf); |
366 | 367 | ||
367 | for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) | 368 | for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) |
368 | if (!bch_is_zero(u->uuid, 16)) | 369 | if (!bch_is_zero(u->uuid, 16)) |
@@ -381,7 +382,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) | |||
381 | return "bad uuid pointer"; | 382 | return "bad uuid pointer"; |
382 | 383 | ||
383 | bkey_copy(&c->uuid_bucket, k); | 384 | bkey_copy(&c->uuid_bucket, k); |
384 | uuid_io(c, READ_SYNC, k, cl); | 385 | uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl); |
385 | 386 | ||
386 | if (j->version < BCACHE_JSET_VERSION_UUIDv1) { | 387 | if (j->version < BCACHE_JSET_VERSION_UUIDv1) { |
387 | struct uuid_entry_v0 *u0 = (void *) c->uuids; | 388 | struct uuid_entry_v0 *u0 = (void *) c->uuids; |
@@ -426,7 +427,7 @@ static int __uuid_write(struct cache_set *c) | |||
426 | return 1; | 427 | return 1; |
427 | 428 | ||
428 | SET_KEY_SIZE(&k.key, c->sb.bucket_size); | 429 | SET_KEY_SIZE(&k.key, c->sb.bucket_size); |
429 | uuid_io(c, REQ_WRITE, &k.key, &cl); | 430 | uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); |
430 | closure_sync(&cl); | 431 | closure_sync(&cl); |
431 | 432 | ||
432 | bkey_copy(&c->uuid_bucket, &k.key); | 433 | bkey_copy(&c->uuid_bucket, &k.key); |
@@ -498,7 +499,8 @@ static void prio_endio(struct bio *bio) | |||
498 | closure_put(&ca->prio); | 499 | closure_put(&ca->prio); |
499 | } | 500 | } |
500 | 501 | ||
501 | static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) | 502 | static void prio_io(struct cache *ca, uint64_t bucket, int op, |
503 | unsigned long op_flags) | ||
502 | { | 504 | { |
503 | struct closure *cl = &ca->prio; | 505 | struct closure *cl = &ca->prio; |
504 | struct bio *bio = bch_bbio_alloc(ca->set); | 506 | struct bio *bio = bch_bbio_alloc(ca->set); |
@@ -507,11 +509,11 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) | |||
507 | 509 | ||
508 | bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; | 510 | bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; |
509 | bio->bi_bdev = ca->bdev; | 511 | bio->bi_bdev = ca->bdev; |
510 | bio->bi_rw = REQ_SYNC|REQ_META|rw; | ||
511 | bio->bi_iter.bi_size = bucket_bytes(ca); | 512 | bio->bi_iter.bi_size = bucket_bytes(ca); |
512 | 513 | ||
513 | bio->bi_end_io = prio_endio; | 514 | bio->bi_end_io = prio_endio; |
514 | bio->bi_private = ca; | 515 | bio->bi_private = ca; |
516 | bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); | ||
515 | bch_bio_map(bio, ca->disk_buckets); | 517 | bch_bio_map(bio, ca->disk_buckets); |
516 | 518 | ||
517 | closure_bio_submit(bio, &ca->prio); | 519 | closure_bio_submit(bio, &ca->prio); |
@@ -557,7 +559,7 @@ void bch_prio_write(struct cache *ca) | |||
557 | BUG_ON(bucket == -1); | 559 | BUG_ON(bucket == -1); |
558 | 560 | ||
559 | mutex_unlock(&ca->set->bucket_lock); | 561 | mutex_unlock(&ca->set->bucket_lock); |
560 | prio_io(ca, bucket, REQ_WRITE); | 562 | prio_io(ca, bucket, REQ_OP_WRITE, 0); |
561 | mutex_lock(&ca->set->bucket_lock); | 563 | mutex_lock(&ca->set->bucket_lock); |
562 | 564 | ||
563 | ca->prio_buckets[i] = bucket; | 565 | ca->prio_buckets[i] = bucket; |
@@ -599,7 +601,7 @@ static void prio_read(struct cache *ca, uint64_t bucket) | |||
599 | ca->prio_last_buckets[bucket_nr] = bucket; | 601 | ca->prio_last_buckets[bucket_nr] = bucket; |
600 | bucket_nr++; | 602 | bucket_nr++; |
601 | 603 | ||
602 | prio_io(ca, bucket, READ_SYNC); | 604 | prio_io(ca, bucket, REQ_OP_READ, READ_SYNC); |
603 | 605 | ||
604 | if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) | 606 | if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) |
605 | pr_warn("bad csum reading priorities"); | 607 | pr_warn("bad csum reading priorities"); |
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 60123677b382..d9fd2a62e5f6 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c | |||
@@ -182,7 +182,7 @@ static void write_dirty(struct closure *cl) | |||
182 | struct keybuf_key *w = io->bio.bi_private; | 182 | struct keybuf_key *w = io->bio.bi_private; |
183 | 183 | ||
184 | dirty_init(w); | 184 | dirty_init(w); |
185 | io->bio.bi_rw = WRITE; | 185 | bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); |
186 | io->bio.bi_iter.bi_sector = KEY_START(&w->key); | 186 | io->bio.bi_iter.bi_sector = KEY_START(&w->key); |
187 | io->bio.bi_bdev = io->dc->bdev; | 187 | io->bio.bi_bdev = io->dc->bdev; |
188 | io->bio.bi_end_io = dirty_endio; | 188 | io->bio.bi_end_io = dirty_endio; |
@@ -251,10 +251,10 @@ static void read_dirty(struct cached_dev *dc) | |||
251 | io->dc = dc; | 251 | io->dc = dc; |
252 | 252 | ||
253 | dirty_init(w); | 253 | dirty_init(w); |
254 | bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); | ||
254 | io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); | 255 | io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); |
255 | io->bio.bi_bdev = PTR_CACHE(dc->disk.c, | 256 | io->bio.bi_bdev = PTR_CACHE(dc->disk.c, |
256 | &w->key, 0)->bdev; | 257 | &w->key, 0)->bdev; |
257 | io->bio.bi_rw = READ; | ||
258 | io->bio.bi_end_io = read_dirty_endio; | 258 | io->bio.bi_end_io = read_dirty_endio; |
259 | 259 | ||
260 | if (bio_alloc_pages(&io->bio, GFP_KERNEL)) | 260 | if (bio_alloc_pages(&io->bio, GFP_KERNEL)) |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index d8129ec93ebd..6fff794e0c72 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -162,7 +162,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset, | |||
162 | 162 | ||
163 | if (sync_page_io(rdev, target, | 163 | if (sync_page_io(rdev, target, |
164 | roundup(size, bdev_logical_block_size(rdev->bdev)), | 164 | roundup(size, bdev_logical_block_size(rdev->bdev)), |
165 | page, READ, true)) { | 165 | page, REQ_OP_READ, 0, true)) { |
166 | page->index = index; | 166 | page->index = index; |
167 | return 0; | 167 | return 0; |
168 | } | 168 | } |
@@ -297,7 +297,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait) | |||
297 | atomic_inc(&bitmap->pending_writes); | 297 | atomic_inc(&bitmap->pending_writes); |
298 | set_buffer_locked(bh); | 298 | set_buffer_locked(bh); |
299 | set_buffer_mapped(bh); | 299 | set_buffer_mapped(bh); |
300 | submit_bh(WRITE | REQ_SYNC, bh); | 300 | submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); |
301 | bh = bh->b_this_page; | 301 | bh = bh->b_this_page; |
302 | } | 302 | } |
303 | 303 | ||
@@ -392,7 +392,7 @@ static int read_page(struct file *file, unsigned long index, | |||
392 | atomic_inc(&bitmap->pending_writes); | 392 | atomic_inc(&bitmap->pending_writes); |
393 | set_buffer_locked(bh); | 393 | set_buffer_locked(bh); |
394 | set_buffer_mapped(bh); | 394 | set_buffer_mapped(bh); |
395 | submit_bh(READ, bh); | 395 | submit_bh(REQ_OP_READ, 0, bh); |
396 | } | 396 | } |
397 | block++; | 397 | block++; |
398 | bh = bh->b_this_page; | 398 | bh = bh->b_this_page; |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cd77216beff1..6571c81465e1 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -574,7 +574,8 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block, | |||
574 | { | 574 | { |
575 | int r; | 575 | int r; |
576 | struct dm_io_request io_req = { | 576 | struct dm_io_request io_req = { |
577 | .bi_rw = rw, | 577 | .bi_op = rw, |
578 | .bi_op_flags = 0, | ||
578 | .notify.fn = dmio_complete, | 579 | .notify.fn = dmio_complete, |
579 | .notify.context = b, | 580 | .notify.context = b, |
580 | .client = b->c->dm_io, | 581 | .client = b->c->dm_io, |
@@ -634,6 +635,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, | |||
634 | * the dm_buffer's inline bio is local to bufio. | 635 | * the dm_buffer's inline bio is local to bufio. |
635 | */ | 636 | */ |
636 | b->bio.bi_private = end_io; | 637 | b->bio.bi_private = end_io; |
638 | bio_set_op_attrs(&b->bio, rw, 0); | ||
637 | 639 | ||
638 | /* | 640 | /* |
639 | * We assume that if len >= PAGE_SIZE ptr is page-aligned. | 641 | * We assume that if len >= PAGE_SIZE ptr is page-aligned. |
@@ -660,7 +662,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, | |||
660 | ptr += PAGE_SIZE; | 662 | ptr += PAGE_SIZE; |
661 | } while (len > 0); | 663 | } while (len > 0); |
662 | 664 | ||
663 | submit_bio(rw, &b->bio); | 665 | submit_bio(&b->bio); |
664 | } | 666 | } |
665 | 667 | ||
666 | static void submit_io(struct dm_buffer *b, int rw, sector_t block, | 668 | static void submit_io(struct dm_buffer *b, int rw, sector_t block, |
@@ -1326,7 +1328,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); | |||
1326 | int dm_bufio_issue_flush(struct dm_bufio_client *c) | 1328 | int dm_bufio_issue_flush(struct dm_bufio_client *c) |
1327 | { | 1329 | { |
1328 | struct dm_io_request io_req = { | 1330 | struct dm_io_request io_req = { |
1329 | .bi_rw = WRITE_FLUSH, | 1331 | .bi_op = REQ_OP_WRITE, |
1332 | .bi_op_flags = WRITE_FLUSH, | ||
1330 | .mem.type = DM_IO_KMEM, | 1333 | .mem.type = DM_IO_KMEM, |
1331 | .mem.ptr.addr = NULL, | 1334 | .mem.ptr.addr = NULL, |
1332 | .client = c->dm_io, | 1335 | .client = c->dm_io, |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index ee0510f9a85e..718744db62df 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -788,7 +788,8 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) | |||
788 | 788 | ||
789 | spin_lock_irqsave(&cache->lock, flags); | 789 | spin_lock_irqsave(&cache->lock, flags); |
790 | if (cache->need_tick_bio && | 790 | if (cache->need_tick_bio && |
791 | !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) { | 791 | !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) && |
792 | bio_op(bio) != REQ_OP_DISCARD) { | ||
792 | pb->tick = true; | 793 | pb->tick = true; |
793 | cache->need_tick_bio = false; | 794 | cache->need_tick_bio = false; |
794 | } | 795 | } |
@@ -829,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) | |||
829 | 830 | ||
830 | static int bio_triggers_commit(struct cache *cache, struct bio *bio) | 831 | static int bio_triggers_commit(struct cache *cache, struct bio *bio) |
831 | { | 832 | { |
832 | return bio->bi_rw & (REQ_FLUSH | REQ_FUA); | 833 | return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); |
833 | } | 834 | } |
834 | 835 | ||
835 | /* | 836 | /* |
@@ -851,7 +852,7 @@ static void inc_ds(struct cache *cache, struct bio *bio, | |||
851 | static bool accountable_bio(struct cache *cache, struct bio *bio) | 852 | static bool accountable_bio(struct cache *cache, struct bio *bio) |
852 | { | 853 | { |
853 | return ((bio->bi_bdev == cache->origin_dev->bdev) && | 854 | return ((bio->bi_bdev == cache->origin_dev->bdev) && |
854 | !(bio->bi_rw & REQ_DISCARD)); | 855 | bio_op(bio) != REQ_OP_DISCARD); |
855 | } | 856 | } |
856 | 857 | ||
857 | static void accounted_begin(struct cache *cache, struct bio *bio) | 858 | static void accounted_begin(struct cache *cache, struct bio *bio) |
@@ -1067,7 +1068,8 @@ static void dec_io_migrations(struct cache *cache) | |||
1067 | 1068 | ||
1068 | static bool discard_or_flush(struct bio *bio) | 1069 | static bool discard_or_flush(struct bio *bio) |
1069 | { | 1070 | { |
1070 | return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD); | 1071 | return bio_op(bio) == REQ_OP_DISCARD || |
1072 | bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); | ||
1071 | } | 1073 | } |
1072 | 1074 | ||
1073 | static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) | 1075 | static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) |
@@ -1612,8 +1614,8 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) | |||
1612 | remap_to_cache(cache, bio, 0); | 1614 | remap_to_cache(cache, bio, 0); |
1613 | 1615 | ||
1614 | /* | 1616 | /* |
1615 | * REQ_FLUSH is not directed at any particular block so we don't | 1617 | * REQ_PREFLUSH is not directed at any particular block so we don't |
1616 | * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH | 1618 | * need to inc_ds(). REQ_FUA's are split into a write + REQ_PREFLUSH |
1617 | * by dm-core. | 1619 | * by dm-core. |
1618 | */ | 1620 | */ |
1619 | issue(cache, bio); | 1621 | issue(cache, bio); |
@@ -1978,9 +1980,9 @@ static void process_deferred_bios(struct cache *cache) | |||
1978 | 1980 | ||
1979 | bio = bio_list_pop(&bios); | 1981 | bio = bio_list_pop(&bios); |
1980 | 1982 | ||
1981 | if (bio->bi_rw & REQ_FLUSH) | 1983 | if (bio->bi_rw & REQ_PREFLUSH) |
1982 | process_flush_bio(cache, bio); | 1984 | process_flush_bio(cache, bio); |
1983 | else if (bio->bi_rw & REQ_DISCARD) | 1985 | else if (bio_op(bio) == REQ_OP_DISCARD) |
1984 | process_discard_bio(cache, &structs, bio); | 1986 | process_discard_bio(cache, &structs, bio); |
1985 | else | 1987 | else |
1986 | process_bio(cache, &structs, bio); | 1988 | process_bio(cache, &structs, bio); |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4f3cb3554944..96dd5d7e454a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) | |||
1136 | clone->bi_private = io; | 1136 | clone->bi_private = io; |
1137 | clone->bi_end_io = crypt_endio; | 1137 | clone->bi_end_io = crypt_endio; |
1138 | clone->bi_bdev = cc->dev->bdev; | 1138 | clone->bi_bdev = cc->dev->bdev; |
1139 | clone->bi_rw = io->base_bio->bi_rw; | 1139 | bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw); |
1140 | } | 1140 | } |
1141 | 1141 | ||
1142 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | 1142 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) |
@@ -1911,11 +1911,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) | |||
1911 | struct crypt_config *cc = ti->private; | 1911 | struct crypt_config *cc = ti->private; |
1912 | 1912 | ||
1913 | /* | 1913 | /* |
1914 | * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. | 1914 | * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. |
1915 | * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight | 1915 | * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight |
1916 | * - for REQ_DISCARD caller must use flush if IO ordering matters | 1916 | * - for REQ_OP_DISCARD caller must use flush if IO ordering matters |
1917 | */ | 1917 | */ |
1918 | if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { | 1918 | if (unlikely(bio->bi_rw & REQ_PREFLUSH || |
1919 | bio_op(bio) == REQ_OP_DISCARD)) { | ||
1919 | bio->bi_bdev = cc->dev->bdev; | 1920 | bio->bi_bdev = cc->dev->bdev; |
1920 | if (bio_sectors(bio)) | 1921 | if (bio_sectors(bio)) |
1921 | bio->bi_iter.bi_sector = cc->start + | 1922 | bio->bi_iter.bi_sector = cc->start + |
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 665bf3285618..2faf49d8f4d7 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c | |||
@@ -1540,9 +1540,9 @@ static int era_map(struct dm_target *ti, struct bio *bio) | |||
1540 | remap_to_origin(era, bio); | 1540 | remap_to_origin(era, bio); |
1541 | 1541 | ||
1542 | /* | 1542 | /* |
1543 | * REQ_FLUSH bios carry no data, so we're not interested in them. | 1543 | * REQ_PREFLUSH bios carry no data, so we're not interested in them. |
1544 | */ | 1544 | */ |
1545 | if (!(bio->bi_rw & REQ_FLUSH) && | 1545 | if (!(bio->bi_rw & REQ_PREFLUSH) && |
1546 | (bio_data_dir(bio) == WRITE) && | 1546 | (bio_data_dir(bio) == WRITE) && |
1547 | !metadata_current_marked(era->md, block)) { | 1547 | !metadata_current_marked(era->md, block)) { |
1548 | defer_bio(era, bio); | 1548 | defer_bio(era, bio); |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index b7341de87015..29b99fb6a16a 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -266,7 +266,7 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) | |||
266 | data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value; | 266 | data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value; |
267 | 267 | ||
268 | DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " | 268 | DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " |
269 | "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", | 269 | "(rw=%c bi_rw=%u bi_sector=%llu cur_bytes=%u)\n", |
270 | bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, | 270 | bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, |
271 | (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, | 271 | (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, |
272 | (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); | 272 | (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 06d426eb5a30..0e225fd4a8d1 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -278,8 +278,9 @@ static void km_dp_init(struct dpages *dp, void *data) | |||
278 | /*----------------------------------------------------------------- | 278 | /*----------------------------------------------------------------- |
279 | * IO routines that accept a list of pages. | 279 | * IO routines that accept a list of pages. |
280 | *---------------------------------------------------------------*/ | 280 | *---------------------------------------------------------------*/ |
281 | static void do_region(int rw, unsigned region, struct dm_io_region *where, | 281 | static void do_region(int op, int op_flags, unsigned region, |
282 | struct dpages *dp, struct io *io) | 282 | struct dm_io_region *where, struct dpages *dp, |
283 | struct io *io) | ||
283 | { | 284 | { |
284 | struct bio *bio; | 285 | struct bio *bio; |
285 | struct page *page; | 286 | struct page *page; |
@@ -295,24 +296,25 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
295 | /* | 296 | /* |
296 | * Reject unsupported discard and write same requests. | 297 | * Reject unsupported discard and write same requests. |
297 | */ | 298 | */ |
298 | if (rw & REQ_DISCARD) | 299 | if (op == REQ_OP_DISCARD) |
299 | special_cmd_max_sectors = q->limits.max_discard_sectors; | 300 | special_cmd_max_sectors = q->limits.max_discard_sectors; |
300 | else if (rw & REQ_WRITE_SAME) | 301 | else if (op == REQ_OP_WRITE_SAME) |
301 | special_cmd_max_sectors = q->limits.max_write_same_sectors; | 302 | special_cmd_max_sectors = q->limits.max_write_same_sectors; |
302 | if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { | 303 | if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) && |
304 | special_cmd_max_sectors == 0) { | ||
303 | dec_count(io, region, -EOPNOTSUPP); | 305 | dec_count(io, region, -EOPNOTSUPP); |
304 | return; | 306 | return; |
305 | } | 307 | } |
306 | 308 | ||
307 | /* | 309 | /* |
308 | * where->count may be zero if rw holds a flush and we need to | 310 | * where->count may be zero if op holds a flush and we need to |
309 | * send a zero-sized flush. | 311 | * send a zero-sized flush. |
310 | */ | 312 | */ |
311 | do { | 313 | do { |
312 | /* | 314 | /* |
313 | * Allocate a suitably sized-bio. | 315 | * Allocate a suitably sized-bio. |
314 | */ | 316 | */ |
315 | if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME)) | 317 | if ((op == REQ_OP_DISCARD) || (op == REQ_OP_WRITE_SAME)) |
316 | num_bvecs = 1; | 318 | num_bvecs = 1; |
317 | else | 319 | else |
318 | num_bvecs = min_t(int, BIO_MAX_PAGES, | 320 | num_bvecs = min_t(int, BIO_MAX_PAGES, |
@@ -322,13 +324,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
322 | bio->bi_iter.bi_sector = where->sector + (where->count - remaining); | 324 | bio->bi_iter.bi_sector = where->sector + (where->count - remaining); |
323 | bio->bi_bdev = where->bdev; | 325 | bio->bi_bdev = where->bdev; |
324 | bio->bi_end_io = endio; | 326 | bio->bi_end_io = endio; |
327 | bio_set_op_attrs(bio, op, op_flags); | ||
325 | store_io_and_region_in_bio(bio, io, region); | 328 | store_io_and_region_in_bio(bio, io, region); |
326 | 329 | ||
327 | if (rw & REQ_DISCARD) { | 330 | if (op == REQ_OP_DISCARD) { |
328 | num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); | 331 | num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); |
329 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; | 332 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
330 | remaining -= num_sectors; | 333 | remaining -= num_sectors; |
331 | } else if (rw & REQ_WRITE_SAME) { | 334 | } else if (op == REQ_OP_WRITE_SAME) { |
332 | /* | 335 | /* |
333 | * WRITE SAME only uses a single page. | 336 | * WRITE SAME only uses a single page. |
334 | */ | 337 | */ |
@@ -355,11 +358,11 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
355 | } | 358 | } |
356 | 359 | ||
357 | atomic_inc(&io->count); | 360 | atomic_inc(&io->count); |
358 | submit_bio(rw, bio); | 361 | submit_bio(bio); |
359 | } while (remaining); | 362 | } while (remaining); |
360 | } | 363 | } |
361 | 364 | ||
362 | static void dispatch_io(int rw, unsigned int num_regions, | 365 | static void dispatch_io(int op, int op_flags, unsigned int num_regions, |
363 | struct dm_io_region *where, struct dpages *dp, | 366 | struct dm_io_region *where, struct dpages *dp, |
364 | struct io *io, int sync) | 367 | struct io *io, int sync) |
365 | { | 368 | { |
@@ -369,7 +372,7 @@ static void dispatch_io(int rw, unsigned int num_regions, | |||
369 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); | 372 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); |
370 | 373 | ||
371 | if (sync) | 374 | if (sync) |
372 | rw |= REQ_SYNC; | 375 | op_flags |= REQ_SYNC; |
373 | 376 | ||
374 | /* | 377 | /* |
375 | * For multiple regions we need to be careful to rewind | 378 | * For multiple regions we need to be careful to rewind |
@@ -377,8 +380,8 @@ static void dispatch_io(int rw, unsigned int num_regions, | |||
377 | */ | 380 | */ |
378 | for (i = 0; i < num_regions; i++) { | 381 | for (i = 0; i < num_regions; i++) { |
379 | *dp = old_pages; | 382 | *dp = old_pages; |
380 | if (where[i].count || (rw & REQ_FLUSH)) | 383 | if (where[i].count || (op_flags & REQ_PREFLUSH)) |
381 | do_region(rw, i, where + i, dp, io); | 384 | do_region(op, op_flags, i, where + i, dp, io); |
382 | } | 385 | } |
383 | 386 | ||
384 | /* | 387 | /* |
@@ -402,13 +405,13 @@ static void sync_io_complete(unsigned long error, void *context) | |||
402 | } | 405 | } |
403 | 406 | ||
404 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, | 407 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
405 | struct dm_io_region *where, int rw, struct dpages *dp, | 408 | struct dm_io_region *where, int op, int op_flags, |
406 | unsigned long *error_bits) | 409 | struct dpages *dp, unsigned long *error_bits) |
407 | { | 410 | { |
408 | struct io *io; | 411 | struct io *io; |
409 | struct sync_io sio; | 412 | struct sync_io sio; |
410 | 413 | ||
411 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { | 414 | if (num_regions > 1 && !op_is_write(op)) { |
412 | WARN_ON(1); | 415 | WARN_ON(1); |
413 | return -EIO; | 416 | return -EIO; |
414 | } | 417 | } |
@@ -425,7 +428,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, | |||
425 | io->vma_invalidate_address = dp->vma_invalidate_address; | 428 | io->vma_invalidate_address = dp->vma_invalidate_address; |
426 | io->vma_invalidate_size = dp->vma_invalidate_size; | 429 | io->vma_invalidate_size = dp->vma_invalidate_size; |
427 | 430 | ||
428 | dispatch_io(rw, num_regions, where, dp, io, 1); | 431 | dispatch_io(op, op_flags, num_regions, where, dp, io, 1); |
429 | 432 | ||
430 | wait_for_completion_io(&sio.wait); | 433 | wait_for_completion_io(&sio.wait); |
431 | 434 | ||
@@ -436,12 +439,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, | |||
436 | } | 439 | } |
437 | 440 | ||
438 | static int async_io(struct dm_io_client *client, unsigned int num_regions, | 441 | static int async_io(struct dm_io_client *client, unsigned int num_regions, |
439 | struct dm_io_region *where, int rw, struct dpages *dp, | 442 | struct dm_io_region *where, int op, int op_flags, |
440 | io_notify_fn fn, void *context) | 443 | struct dpages *dp, io_notify_fn fn, void *context) |
441 | { | 444 | { |
442 | struct io *io; | 445 | struct io *io; |
443 | 446 | ||
444 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { | 447 | if (num_regions > 1 && !op_is_write(op)) { |
445 | WARN_ON(1); | 448 | WARN_ON(1); |
446 | fn(1, context); | 449 | fn(1, context); |
447 | return -EIO; | 450 | return -EIO; |
@@ -457,7 +460,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, | |||
457 | io->vma_invalidate_address = dp->vma_invalidate_address; | 460 | io->vma_invalidate_address = dp->vma_invalidate_address; |
458 | io->vma_invalidate_size = dp->vma_invalidate_size; | 461 | io->vma_invalidate_size = dp->vma_invalidate_size; |
459 | 462 | ||
460 | dispatch_io(rw, num_regions, where, dp, io, 0); | 463 | dispatch_io(op, op_flags, num_regions, where, dp, io, 0); |
461 | return 0; | 464 | return 0; |
462 | } | 465 | } |
463 | 466 | ||
@@ -480,7 +483,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, | |||
480 | 483 | ||
481 | case DM_IO_VMA: | 484 | case DM_IO_VMA: |
482 | flush_kernel_vmap_range(io_req->mem.ptr.vma, size); | 485 | flush_kernel_vmap_range(io_req->mem.ptr.vma, size); |
483 | if ((io_req->bi_rw & RW_MASK) == READ) { | 486 | if (io_req->bi_op == REQ_OP_READ) { |
484 | dp->vma_invalidate_address = io_req->mem.ptr.vma; | 487 | dp->vma_invalidate_address = io_req->mem.ptr.vma; |
485 | dp->vma_invalidate_size = size; | 488 | dp->vma_invalidate_size = size; |
486 | } | 489 | } |
@@ -518,10 +521,12 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions, | |||
518 | 521 | ||
519 | if (!io_req->notify.fn) | 522 | if (!io_req->notify.fn) |
520 | return sync_io(io_req->client, num_regions, where, | 523 | return sync_io(io_req->client, num_regions, where, |
521 | io_req->bi_rw, &dp, sync_error_bits); | 524 | io_req->bi_op, io_req->bi_op_flags, &dp, |
525 | sync_error_bits); | ||
522 | 526 | ||
523 | return async_io(io_req->client, num_regions, where, io_req->bi_rw, | 527 | return async_io(io_req->client, num_regions, where, io_req->bi_op, |
524 | &dp, io_req->notify.fn, io_req->notify.context); | 528 | io_req->bi_op_flags, &dp, io_req->notify.fn, |
529 | io_req->notify.context); | ||
525 | } | 530 | } |
526 | EXPORT_SYMBOL(dm_io); | 531 | EXPORT_SYMBOL(dm_io); |
527 | 532 | ||
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 1452ed9aacb4..9da1d54ac6cb 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -465,7 +465,7 @@ static void complete_io(unsigned long error, void *context) | |||
465 | io_job_finish(kc->throttle); | 465 | io_job_finish(kc->throttle); |
466 | 466 | ||
467 | if (error) { | 467 | if (error) { |
468 | if (job->rw & WRITE) | 468 | if (op_is_write(job->rw)) |
469 | job->write_err |= error; | 469 | job->write_err |= error; |
470 | else | 470 | else |
471 | job->read_err = 1; | 471 | job->read_err = 1; |
@@ -477,7 +477,7 @@ static void complete_io(unsigned long error, void *context) | |||
477 | } | 477 | } |
478 | } | 478 | } |
479 | 479 | ||
480 | if (job->rw & WRITE) | 480 | if (op_is_write(job->rw)) |
481 | push(&kc->complete_jobs, job); | 481 | push(&kc->complete_jobs, job); |
482 | 482 | ||
483 | else { | 483 | else { |
@@ -496,7 +496,8 @@ static int run_io_job(struct kcopyd_job *job) | |||
496 | { | 496 | { |
497 | int r; | 497 | int r; |
498 | struct dm_io_request io_req = { | 498 | struct dm_io_request io_req = { |
499 | .bi_rw = job->rw, | 499 | .bi_op = job->rw, |
500 | .bi_op_flags = 0, | ||
500 | .mem.type = DM_IO_PAGE_LIST, | 501 | .mem.type = DM_IO_PAGE_LIST, |
501 | .mem.ptr.pl = job->pages, | 502 | .mem.ptr.pl = job->pages, |
502 | .mem.offset = 0, | 503 | .mem.offset = 0, |
@@ -550,7 +551,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, | |||
550 | 551 | ||
551 | if (r < 0) { | 552 | if (r < 0) { |
552 | /* error this rogue job */ | 553 | /* error this rogue job */ |
553 | if (job->rw & WRITE) | 554 | if (op_is_write(job->rw)) |
554 | job->write_err = (unsigned long) -1L; | 555 | job->write_err = (unsigned long) -1L; |
555 | else | 556 | else |
556 | job->read_err = 1; | 557 | job->read_err = 1; |
@@ -734,7 +735,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, | |||
734 | /* | 735 | /* |
735 | * Use WRITE SAME to optimize zeroing if all dests support it. | 736 | * Use WRITE SAME to optimize zeroing if all dests support it. |
736 | */ | 737 | */ |
737 | job->rw = WRITE | REQ_WRITE_SAME; | 738 | job->rw = REQ_OP_WRITE_SAME; |
738 | for (i = 0; i < job->num_dests; i++) | 739 | for (i = 0; i < job->num_dests; i++) |
739 | if (!bdev_write_same(job->dests[i].bdev)) { | 740 | if (!bdev_write_same(job->dests[i].bdev)) { |
740 | job->rw = WRITE; | 741 | job->rw = WRITE; |
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 608302e222af..b5dbf7a0515e 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c | |||
@@ -205,6 +205,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry, | |||
205 | bio->bi_bdev = lc->logdev->bdev; | 205 | bio->bi_bdev = lc->logdev->bdev; |
206 | bio->bi_end_io = log_end_io; | 206 | bio->bi_end_io = log_end_io; |
207 | bio->bi_private = lc; | 207 | bio->bi_private = lc; |
208 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
208 | 209 | ||
209 | page = alloc_page(GFP_KERNEL); | 210 | page = alloc_page(GFP_KERNEL); |
210 | if (!page) { | 211 | if (!page) { |
@@ -226,7 +227,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry, | |||
226 | DMERR("Couldn't add page to the log block"); | 227 | DMERR("Couldn't add page to the log block"); |
227 | goto error_bio; | 228 | goto error_bio; |
228 | } | 229 | } |
229 | submit_bio(WRITE, bio); | 230 | submit_bio(bio); |
230 | return 0; | 231 | return 0; |
231 | error_bio: | 232 | error_bio: |
232 | bio_put(bio); | 233 | bio_put(bio); |
@@ -269,6 +270,7 @@ static int log_one_block(struct log_writes_c *lc, | |||
269 | bio->bi_bdev = lc->logdev->bdev; | 270 | bio->bi_bdev = lc->logdev->bdev; |
270 | bio->bi_end_io = log_end_io; | 271 | bio->bi_end_io = log_end_io; |
271 | bio->bi_private = lc; | 272 | bio->bi_private = lc; |
273 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
272 | 274 | ||
273 | for (i = 0; i < block->vec_cnt; i++) { | 275 | for (i = 0; i < block->vec_cnt; i++) { |
274 | /* | 276 | /* |
@@ -279,7 +281,7 @@ static int log_one_block(struct log_writes_c *lc, | |||
279 | block->vecs[i].bv_len, 0); | 281 | block->vecs[i].bv_len, 0); |
280 | if (ret != block->vecs[i].bv_len) { | 282 | if (ret != block->vecs[i].bv_len) { |
281 | atomic_inc(&lc->io_blocks); | 283 | atomic_inc(&lc->io_blocks); |
282 | submit_bio(WRITE, bio); | 284 | submit_bio(bio); |
283 | bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i); | 285 | bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i); |
284 | if (!bio) { | 286 | if (!bio) { |
285 | DMERR("Couldn't alloc log bio"); | 287 | DMERR("Couldn't alloc log bio"); |
@@ -290,6 +292,7 @@ static int log_one_block(struct log_writes_c *lc, | |||
290 | bio->bi_bdev = lc->logdev->bdev; | 292 | bio->bi_bdev = lc->logdev->bdev; |
291 | bio->bi_end_io = log_end_io; | 293 | bio->bi_end_io = log_end_io; |
292 | bio->bi_private = lc; | 294 | bio->bi_private = lc; |
295 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
293 | 296 | ||
294 | ret = bio_add_page(bio, block->vecs[i].bv_page, | 297 | ret = bio_add_page(bio, block->vecs[i].bv_page, |
295 | block->vecs[i].bv_len, 0); | 298 | block->vecs[i].bv_len, 0); |
@@ -301,7 +304,7 @@ static int log_one_block(struct log_writes_c *lc, | |||
301 | } | 304 | } |
302 | sector += block->vecs[i].bv_len >> SECTOR_SHIFT; | 305 | sector += block->vecs[i].bv_len >> SECTOR_SHIFT; |
303 | } | 306 | } |
304 | submit_bio(WRITE, bio); | 307 | submit_bio(bio); |
305 | out: | 308 | out: |
306 | kfree(block->data); | 309 | kfree(block->data); |
307 | kfree(block); | 310 | kfree(block); |
@@ -552,9 +555,9 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) | |||
552 | struct bio_vec bv; | 555 | struct bio_vec bv; |
553 | size_t alloc_size; | 556 | size_t alloc_size; |
554 | int i = 0; | 557 | int i = 0; |
555 | bool flush_bio = (bio->bi_rw & REQ_FLUSH); | 558 | bool flush_bio = (bio->bi_rw & REQ_PREFLUSH); |
556 | bool fua_bio = (bio->bi_rw & REQ_FUA); | 559 | bool fua_bio = (bio->bi_rw & REQ_FUA); |
557 | bool discard_bio = (bio->bi_rw & REQ_DISCARD); | 560 | bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD); |
558 | 561 | ||
559 | pb->block = NULL; | 562 | pb->block = NULL; |
560 | 563 | ||
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 627d19186d5a..4ca2d1df5b44 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -293,7 +293,7 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis | |||
293 | 293 | ||
294 | static int rw_header(struct log_c *lc, int rw) | 294 | static int rw_header(struct log_c *lc, int rw) |
295 | { | 295 | { |
296 | lc->io_req.bi_rw = rw; | 296 | lc->io_req.bi_op = rw; |
297 | 297 | ||
298 | return dm_io(&lc->io_req, 1, &lc->header_location, NULL); | 298 | return dm_io(&lc->io_req, 1, &lc->header_location, NULL); |
299 | } | 299 | } |
@@ -306,7 +306,8 @@ static int flush_header(struct log_c *lc) | |||
306 | .count = 0, | 306 | .count = 0, |
307 | }; | 307 | }; |
308 | 308 | ||
309 | lc->io_req.bi_rw = WRITE_FLUSH; | 309 | lc->io_req.bi_op = REQ_OP_WRITE; |
310 | lc->io_req.bi_op_flags = WRITE_FLUSH; | ||
310 | 311 | ||
311 | return dm_io(&lc->io_req, 1, &null_location, NULL); | 312 | return dm_io(&lc->io_req, 1, &null_location, NULL); |
312 | } | 313 | } |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 52532745a50f..8cbac62b1602 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -792,7 +792,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size) | |||
792 | if (rdev->sb_loaded) | 792 | if (rdev->sb_loaded) |
793 | return 0; | 793 | return 0; |
794 | 794 | ||
795 | if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) { | 795 | if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, 1)) { |
796 | DMERR("Failed to read superblock of device at position %d", | 796 | DMERR("Failed to read superblock of device at position %d", |
797 | rdev->raid_disk); | 797 | rdev->raid_disk); |
798 | md_error(rdev->mddev, rdev); | 798 | md_error(rdev->mddev, rdev); |
@@ -1651,7 +1651,8 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs) | |||
1651 | for (i = 0; i < rs->md.raid_disks; i++) { | 1651 | for (i = 0; i < rs->md.raid_disks; i++) { |
1652 | r = &rs->dev[i].rdev; | 1652 | r = &rs->dev[i].rdev; |
1653 | if (test_bit(Faulty, &r->flags) && r->sb_page && | 1653 | if (test_bit(Faulty, &r->flags) && r->sb_page && |
1654 | sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) { | 1654 | sync_page_io(r, 0, r->sb_size, r->sb_page, REQ_OP_READ, 0, |
1655 | 1)) { | ||
1655 | DMINFO("Faulty %s device #%d has readable super block." | 1656 | DMINFO("Faulty %s device #%d has readable super block." |
1656 | " Attempting to revive it.", | 1657 | " Attempting to revive it.", |
1657 | rs->raid_type->name, i); | 1658 | rs->raid_type->name, i); |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index b3ccf1e0d4f2..9f5f460c0e92 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -260,7 +260,8 @@ static int mirror_flush(struct dm_target *ti) | |||
260 | struct dm_io_region io[ms->nr_mirrors]; | 260 | struct dm_io_region io[ms->nr_mirrors]; |
261 | struct mirror *m; | 261 | struct mirror *m; |
262 | struct dm_io_request io_req = { | 262 | struct dm_io_request io_req = { |
263 | .bi_rw = WRITE_FLUSH, | 263 | .bi_op = REQ_OP_WRITE, |
264 | .bi_op_flags = WRITE_FLUSH, | ||
264 | .mem.type = DM_IO_KMEM, | 265 | .mem.type = DM_IO_KMEM, |
265 | .mem.ptr.addr = NULL, | 266 | .mem.ptr.addr = NULL, |
266 | .client = ms->io_client, | 267 | .client = ms->io_client, |
@@ -541,7 +542,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio) | |||
541 | { | 542 | { |
542 | struct dm_io_region io; | 543 | struct dm_io_region io; |
543 | struct dm_io_request io_req = { | 544 | struct dm_io_request io_req = { |
544 | .bi_rw = READ, | 545 | .bi_op = REQ_OP_READ, |
546 | .bi_op_flags = 0, | ||
545 | .mem.type = DM_IO_BIO, | 547 | .mem.type = DM_IO_BIO, |
546 | .mem.ptr.bio = bio, | 548 | .mem.ptr.bio = bio, |
547 | .notify.fn = read_callback, | 549 | .notify.fn = read_callback, |
@@ -624,7 +626,7 @@ static void write_callback(unsigned long error, void *context) | |||
624 | * If the bio is discard, return an error, but do not | 626 | * If the bio is discard, return an error, but do not |
625 | * degrade the array. | 627 | * degrade the array. |
626 | */ | 628 | */ |
627 | if (bio->bi_rw & REQ_DISCARD) { | 629 | if (bio_op(bio) == REQ_OP_DISCARD) { |
628 | bio->bi_error = -EOPNOTSUPP; | 630 | bio->bi_error = -EOPNOTSUPP; |
629 | bio_endio(bio); | 631 | bio_endio(bio); |
630 | return; | 632 | return; |
@@ -654,7 +656,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio) | |||
654 | struct dm_io_region io[ms->nr_mirrors], *dest = io; | 656 | struct dm_io_region io[ms->nr_mirrors], *dest = io; |
655 | struct mirror *m; | 657 | struct mirror *m; |
656 | struct dm_io_request io_req = { | 658 | struct dm_io_request io_req = { |
657 | .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), | 659 | .bi_op = REQ_OP_WRITE, |
660 | .bi_op_flags = bio->bi_rw & WRITE_FLUSH_FUA, | ||
658 | .mem.type = DM_IO_BIO, | 661 | .mem.type = DM_IO_BIO, |
659 | .mem.ptr.bio = bio, | 662 | .mem.ptr.bio = bio, |
660 | .notify.fn = write_callback, | 663 | .notify.fn = write_callback, |
@@ -662,8 +665,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio) | |||
662 | .client = ms->io_client, | 665 | .client = ms->io_client, |
663 | }; | 666 | }; |
664 | 667 | ||
665 | if (bio->bi_rw & REQ_DISCARD) { | 668 | if (bio_op(bio) == REQ_OP_DISCARD) { |
666 | io_req.bi_rw |= REQ_DISCARD; | 669 | io_req.bi_op = REQ_OP_DISCARD; |
667 | io_req.mem.type = DM_IO_KMEM; | 670 | io_req.mem.type = DM_IO_KMEM; |
668 | io_req.mem.ptr.addr = NULL; | 671 | io_req.mem.ptr.addr = NULL; |
669 | } | 672 | } |
@@ -701,8 +704,8 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |||
701 | bio_list_init(&requeue); | 704 | bio_list_init(&requeue); |
702 | 705 | ||
703 | while ((bio = bio_list_pop(writes))) { | 706 | while ((bio = bio_list_pop(writes))) { |
704 | if ((bio->bi_rw & REQ_FLUSH) || | 707 | if ((bio->bi_rw & REQ_PREFLUSH) || |
705 | (bio->bi_rw & REQ_DISCARD)) { | 708 | (bio_op(bio) == REQ_OP_DISCARD)) { |
706 | bio_list_add(&sync, bio); | 709 | bio_list_add(&sync, bio); |
707 | continue; | 710 | continue; |
708 | } | 711 | } |
@@ -1250,7 +1253,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
1250 | * We need to dec pending if this was a write. | 1253 | * We need to dec pending if this was a write. |
1251 | */ | 1254 | */ |
1252 | if (rw == WRITE) { | 1255 | if (rw == WRITE) { |
1253 | if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) | 1256 | if (!(bio->bi_rw & REQ_PREFLUSH) && |
1257 | bio_op(bio) != REQ_OP_DISCARD) | ||
1254 | dm_rh_dec(ms->rh, bio_record->write_region); | 1258 | dm_rh_dec(ms->rh, bio_record->write_region); |
1255 | return error; | 1259 | return error; |
1256 | } | 1260 | } |
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 74cb7b991d41..b11813431f31 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c | |||
@@ -398,12 +398,12 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) | |||
398 | region_t region = dm_rh_bio_to_region(rh, bio); | 398 | region_t region = dm_rh_bio_to_region(rh, bio); |
399 | int recovering = 0; | 399 | int recovering = 0; |
400 | 400 | ||
401 | if (bio->bi_rw & REQ_FLUSH) { | 401 | if (bio->bi_rw & REQ_PREFLUSH) { |
402 | rh->flush_failure = 1; | 402 | rh->flush_failure = 1; |
403 | return; | 403 | return; |
404 | } | 404 | } |
405 | 405 | ||
406 | if (bio->bi_rw & REQ_DISCARD) | 406 | if (bio_op(bio) == REQ_OP_DISCARD) |
407 | return; | 407 | return; |
408 | 408 | ||
409 | /* We must inform the log that the sync count has changed. */ | 409 | /* We must inform the log that the sync count has changed. */ |
@@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) | |||
526 | struct bio *bio; | 526 | struct bio *bio; |
527 | 527 | ||
528 | for (bio = bios->head; bio; bio = bio->bi_next) { | 528 | for (bio = bios->head; bio; bio = bio->bi_next) { |
529 | if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)) | 529 | if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD) |
530 | continue; | 530 | continue; |
531 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); | 531 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); |
532 | } | 532 | } |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 4d3909393f2c..b8cf956b577b 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -226,8 +226,8 @@ static void do_metadata(struct work_struct *work) | |||
226 | /* | 226 | /* |
227 | * Read or write a chunk aligned and sized block of data from a device. | 227 | * Read or write a chunk aligned and sized block of data from a device. |
228 | */ | 228 | */ |
229 | static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, | 229 | static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op, |
230 | int metadata) | 230 | int op_flags, int metadata) |
231 | { | 231 | { |
232 | struct dm_io_region where = { | 232 | struct dm_io_region where = { |
233 | .bdev = dm_snap_cow(ps->store->snap)->bdev, | 233 | .bdev = dm_snap_cow(ps->store->snap)->bdev, |
@@ -235,7 +235,8 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, | |||
235 | .count = ps->store->chunk_size, | 235 | .count = ps->store->chunk_size, |
236 | }; | 236 | }; |
237 | struct dm_io_request io_req = { | 237 | struct dm_io_request io_req = { |
238 | .bi_rw = rw, | 238 | .bi_op = op, |
239 | .bi_op_flags = op_flags, | ||
239 | .mem.type = DM_IO_VMA, | 240 | .mem.type = DM_IO_VMA, |
240 | .mem.ptr.vma = area, | 241 | .mem.ptr.vma = area, |
241 | .client = ps->io_client, | 242 | .client = ps->io_client, |
@@ -281,14 +282,14 @@ static void skip_metadata(struct pstore *ps) | |||
281 | * Read or write a metadata area. Remembering to skip the first | 282 | * Read or write a metadata area. Remembering to skip the first |
282 | * chunk which holds the header. | 283 | * chunk which holds the header. |
283 | */ | 284 | */ |
284 | static int area_io(struct pstore *ps, int rw) | 285 | static int area_io(struct pstore *ps, int op, int op_flags) |
285 | { | 286 | { |
286 | int r; | 287 | int r; |
287 | chunk_t chunk; | 288 | chunk_t chunk; |
288 | 289 | ||
289 | chunk = area_location(ps, ps->current_area); | 290 | chunk = area_location(ps, ps->current_area); |
290 | 291 | ||
291 | r = chunk_io(ps, ps->area, chunk, rw, 0); | 292 | r = chunk_io(ps, ps->area, chunk, op, op_flags, 0); |
292 | if (r) | 293 | if (r) |
293 | return r; | 294 | return r; |
294 | 295 | ||
@@ -302,7 +303,8 @@ static void zero_memory_area(struct pstore *ps) | |||
302 | 303 | ||
303 | static int zero_disk_area(struct pstore *ps, chunk_t area) | 304 | static int zero_disk_area(struct pstore *ps, chunk_t area) |
304 | { | 305 | { |
305 | return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0); | 306 | return chunk_io(ps, ps->zero_area, area_location(ps, area), |
307 | REQ_OP_WRITE, 0, 0); | ||
306 | } | 308 | } |
307 | 309 | ||
308 | static int read_header(struct pstore *ps, int *new_snapshot) | 310 | static int read_header(struct pstore *ps, int *new_snapshot) |
@@ -334,7 +336,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
334 | if (r) | 336 | if (r) |
335 | return r; | 337 | return r; |
336 | 338 | ||
337 | r = chunk_io(ps, ps->header_area, 0, READ, 1); | 339 | r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1); |
338 | if (r) | 340 | if (r) |
339 | goto bad; | 341 | goto bad; |
340 | 342 | ||
@@ -395,7 +397,7 @@ static int write_header(struct pstore *ps) | |||
395 | dh->version = cpu_to_le32(ps->version); | 397 | dh->version = cpu_to_le32(ps->version); |
396 | dh->chunk_size = cpu_to_le32(ps->store->chunk_size); | 398 | dh->chunk_size = cpu_to_le32(ps->store->chunk_size); |
397 | 399 | ||
398 | return chunk_io(ps, ps->header_area, 0, WRITE, 1); | 400 | return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1); |
399 | } | 401 | } |
400 | 402 | ||
401 | /* | 403 | /* |
@@ -739,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store, | |||
739 | /* | 741 | /* |
740 | * Commit exceptions to disk. | 742 | * Commit exceptions to disk. |
741 | */ | 743 | */ |
742 | if (ps->valid && area_io(ps, WRITE_FLUSH_FUA)) | 744 | if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA)) |
743 | ps->valid = 0; | 745 | ps->valid = 0; |
744 | 746 | ||
745 | /* | 747 | /* |
@@ -779,7 +781,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store, | |||
779 | return 0; | 781 | return 0; |
780 | 782 | ||
781 | ps->current_area--; | 783 | ps->current_area--; |
782 | r = area_io(ps, READ); | 784 | r = area_io(ps, REQ_OP_READ, 0); |
783 | if (r < 0) | 785 | if (r < 0) |
784 | return r; | 786 | return r; |
785 | ps->current_committed = ps->exceptions_per_area; | 787 | ps->current_committed = ps->exceptions_per_area; |
@@ -816,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store, | |||
816 | for (i = 0; i < nr_merged; i++) | 818 | for (i = 0; i < nr_merged; i++) |
817 | clear_exception(ps, ps->current_committed - 1 - i); | 819 | clear_exception(ps, ps->current_committed - 1 - i); |
818 | 820 | ||
819 | r = area_io(ps, WRITE_FLUSH_FUA); | 821 | r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA); |
820 | if (r < 0) | 822 | if (r < 0) |
821 | return r; | 823 | return r; |
822 | 824 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 70bb0e8b62ce..69ab1ff5f5c9 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) | |||
1680 | 1680 | ||
1681 | init_tracked_chunk(bio); | 1681 | init_tracked_chunk(bio); |
1682 | 1682 | ||
1683 | if (bio->bi_rw & REQ_FLUSH) { | 1683 | if (bio->bi_rw & REQ_PREFLUSH) { |
1684 | bio->bi_bdev = s->cow->bdev; | 1684 | bio->bi_bdev = s->cow->bdev; |
1685 | return DM_MAPIO_REMAPPED; | 1685 | return DM_MAPIO_REMAPPED; |
1686 | } | 1686 | } |
@@ -1799,7 +1799,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) | |||
1799 | 1799 | ||
1800 | init_tracked_chunk(bio); | 1800 | init_tracked_chunk(bio); |
1801 | 1801 | ||
1802 | if (bio->bi_rw & REQ_FLUSH) { | 1802 | if (bio->bi_rw & REQ_PREFLUSH) { |
1803 | if (!dm_bio_get_target_bio_nr(bio)) | 1803 | if (!dm_bio_get_target_bio_nr(bio)) |
1804 | bio->bi_bdev = s->origin->bdev; | 1804 | bio->bi_bdev = s->origin->bdev; |
1805 | else | 1805 | else |
@@ -2285,7 +2285,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio) | |||
2285 | 2285 | ||
2286 | bio->bi_bdev = o->dev->bdev; | 2286 | bio->bi_bdev = o->dev->bdev; |
2287 | 2287 | ||
2288 | if (unlikely(bio->bi_rw & REQ_FLUSH)) | 2288 | if (unlikely(bio->bi_rw & REQ_PREFLUSH)) |
2289 | return DM_MAPIO_REMAPPED; | 2289 | return DM_MAPIO_REMAPPED; |
2290 | 2290 | ||
2291 | if (bio_rw(bio) != WRITE) | 2291 | if (bio_rw(bio) != WRITE) |
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 8289804ccd99..4fba26cd6bdb 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c | |||
@@ -514,11 +514,10 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared, | |||
514 | } | 514 | } |
515 | 515 | ||
516 | static void dm_stat_for_entry(struct dm_stat *s, size_t entry, | 516 | static void dm_stat_for_entry(struct dm_stat *s, size_t entry, |
517 | unsigned long bi_rw, sector_t len, | 517 | int idx, sector_t len, |
518 | struct dm_stats_aux *stats_aux, bool end, | 518 | struct dm_stats_aux *stats_aux, bool end, |
519 | unsigned long duration_jiffies) | 519 | unsigned long duration_jiffies) |
520 | { | 520 | { |
521 | unsigned long idx = bi_rw & REQ_WRITE; | ||
522 | struct dm_stat_shared *shared = &s->stat_shared[entry]; | 521 | struct dm_stat_shared *shared = &s->stat_shared[entry]; |
523 | struct dm_stat_percpu *p; | 522 | struct dm_stat_percpu *p; |
524 | 523 | ||
@@ -584,7 +583,7 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry, | |||
584 | #endif | 583 | #endif |
585 | } | 584 | } |
586 | 585 | ||
587 | static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, | 586 | static void __dm_stat_bio(struct dm_stat *s, int bi_rw, |
588 | sector_t bi_sector, sector_t end_sector, | 587 | sector_t bi_sector, sector_t end_sector, |
589 | bool end, unsigned long duration_jiffies, | 588 | bool end, unsigned long duration_jiffies, |
590 | struct dm_stats_aux *stats_aux) | 589 | struct dm_stats_aux *stats_aux) |
@@ -645,8 +644,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, | |||
645 | last = raw_cpu_ptr(stats->last); | 644 | last = raw_cpu_ptr(stats->last); |
646 | stats_aux->merged = | 645 | stats_aux->merged = |
647 | (bi_sector == (ACCESS_ONCE(last->last_sector) && | 646 | (bi_sector == (ACCESS_ONCE(last->last_sector) && |
648 | ((bi_rw & (REQ_WRITE | REQ_DISCARD)) == | 647 | ((bi_rw == WRITE) == |
649 | (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD))) | 648 | (ACCESS_ONCE(last->last_rw) == WRITE)) |
650 | )); | 649 | )); |
651 | ACCESS_ONCE(last->last_sector) = end_sector; | 650 | ACCESS_ONCE(last->last_sector) = end_sector; |
652 | ACCESS_ONCE(last->last_rw) = bi_rw; | 651 | ACCESS_ONCE(last->last_rw) = bi_rw; |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 797ddb900b06..48f1c01d7b9f 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -286,14 +286,14 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) | |||
286 | uint32_t stripe; | 286 | uint32_t stripe; |
287 | unsigned target_bio_nr; | 287 | unsigned target_bio_nr; |
288 | 288 | ||
289 | if (bio->bi_rw & REQ_FLUSH) { | 289 | if (bio->bi_rw & REQ_PREFLUSH) { |
290 | target_bio_nr = dm_bio_get_target_bio_nr(bio); | 290 | target_bio_nr = dm_bio_get_target_bio_nr(bio); |
291 | BUG_ON(target_bio_nr >= sc->stripes); | 291 | BUG_ON(target_bio_nr >= sc->stripes); |
292 | bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; | 292 | bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; |
293 | return DM_MAPIO_REMAPPED; | 293 | return DM_MAPIO_REMAPPED; |
294 | } | 294 | } |
295 | if (unlikely(bio->bi_rw & REQ_DISCARD) || | 295 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || |
296 | unlikely(bio->bi_rw & REQ_WRITE_SAME)) { | 296 | unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) { |
297 | target_bio_nr = dm_bio_get_target_bio_nr(bio); | 297 | target_bio_nr = dm_bio_get_target_bio_nr(bio); |
298 | BUG_ON(target_bio_nr >= sc->stripes); | 298 | BUG_ON(target_bio_nr >= sc->stripes); |
299 | return stripe_map_range(sc, bio, target_bio_nr); | 299 | return stripe_map_range(sc, bio, target_bio_nr); |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index fc803d50f9f0..5f9e3d799d66 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -360,7 +360,7 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da | |||
360 | sector_t len = block_to_sectors(tc->pool, data_e - data_b); | 360 | sector_t len = block_to_sectors(tc->pool, data_e - data_b); |
361 | 361 | ||
362 | return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, | 362 | return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, |
363 | GFP_NOWAIT, REQ_WRITE | REQ_DISCARD, &op->bio); | 363 | GFP_NOWAIT, 0, &op->bio); |
364 | } | 364 | } |
365 | 365 | ||
366 | static void end_discard(struct discard_op *op, int r) | 366 | static void end_discard(struct discard_op *op, int r) |
@@ -371,7 +371,8 @@ static void end_discard(struct discard_op *op, int r) | |||
371 | * need to wait for the chain to complete. | 371 | * need to wait for the chain to complete. |
372 | */ | 372 | */ |
373 | bio_chain(op->bio, op->parent_bio); | 373 | bio_chain(op->bio, op->parent_bio); |
374 | submit_bio(REQ_WRITE | REQ_DISCARD, op->bio); | 374 | bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0); |
375 | submit_bio(op->bio); | ||
375 | } | 376 | } |
376 | 377 | ||
377 | blk_finish_plug(&op->plug); | 378 | blk_finish_plug(&op->plug); |
@@ -696,7 +697,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio) | |||
696 | 697 | ||
697 | static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) | 698 | static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) |
698 | { | 699 | { |
699 | return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && | 700 | return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) && |
700 | dm_thin_changed_this_transaction(tc->td); | 701 | dm_thin_changed_this_transaction(tc->td); |
701 | } | 702 | } |
702 | 703 | ||
@@ -704,7 +705,7 @@ static void inc_all_io_entry(struct pool *pool, struct bio *bio) | |||
704 | { | 705 | { |
705 | struct dm_thin_endio_hook *h; | 706 | struct dm_thin_endio_hook *h; |
706 | 707 | ||
707 | if (bio->bi_rw & REQ_DISCARD) | 708 | if (bio_op(bio) == REQ_OP_DISCARD) |
708 | return; | 709 | return; |
709 | 710 | ||
710 | h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); | 711 | h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); |
@@ -867,7 +868,8 @@ static void __inc_remap_and_issue_cell(void *context, | |||
867 | struct bio *bio; | 868 | struct bio *bio; |
868 | 869 | ||
869 | while ((bio = bio_list_pop(&cell->bios))) { | 870 | while ((bio = bio_list_pop(&cell->bios))) { |
870 | if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) | 871 | if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || |
872 | bio_op(bio) == REQ_OP_DISCARD) | ||
871 | bio_list_add(&info->defer_bios, bio); | 873 | bio_list_add(&info->defer_bios, bio); |
872 | else { | 874 | else { |
873 | inc_all_io_entry(info->tc->pool, bio); | 875 | inc_all_io_entry(info->tc->pool, bio); |
@@ -1639,7 +1641,8 @@ static void __remap_and_issue_shared_cell(void *context, | |||
1639 | 1641 | ||
1640 | while ((bio = bio_list_pop(&cell->bios))) { | 1642 | while ((bio = bio_list_pop(&cell->bios))) { |
1641 | if ((bio_data_dir(bio) == WRITE) || | 1643 | if ((bio_data_dir(bio) == WRITE) || |
1642 | (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))) | 1644 | (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || |
1645 | bio_op(bio) == REQ_OP_DISCARD)) | ||
1643 | bio_list_add(&info->defer_bios, bio); | 1646 | bio_list_add(&info->defer_bios, bio); |
1644 | else { | 1647 | else { |
1645 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));; | 1648 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));; |
@@ -2028,7 +2031,7 @@ static void process_thin_deferred_bios(struct thin_c *tc) | |||
2028 | break; | 2031 | break; |
2029 | } | 2032 | } |
2030 | 2033 | ||
2031 | if (bio->bi_rw & REQ_DISCARD) | 2034 | if (bio_op(bio) == REQ_OP_DISCARD) |
2032 | pool->process_discard(tc, bio); | 2035 | pool->process_discard(tc, bio); |
2033 | else | 2036 | else |
2034 | pool->process_bio(tc, bio); | 2037 | pool->process_bio(tc, bio); |
@@ -2115,7 +2118,7 @@ static void process_thin_deferred_cells(struct thin_c *tc) | |||
2115 | return; | 2118 | return; |
2116 | } | 2119 | } |
2117 | 2120 | ||
2118 | if (cell->holder->bi_rw & REQ_DISCARD) | 2121 | if (bio_op(cell->holder) == REQ_OP_DISCARD) |
2119 | pool->process_discard_cell(tc, cell); | 2122 | pool->process_discard_cell(tc, cell); |
2120 | else | 2123 | else |
2121 | pool->process_cell(tc, cell); | 2124 | pool->process_cell(tc, cell); |
@@ -2553,7 +2556,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) | |||
2553 | return DM_MAPIO_SUBMITTED; | 2556 | return DM_MAPIO_SUBMITTED; |
2554 | } | 2557 | } |
2555 | 2558 | ||
2556 | if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) { | 2559 | if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || |
2560 | bio_op(bio) == REQ_OP_DISCARD) { | ||
2557 | thin_defer_bio_with_throttle(tc, bio); | 2561 | thin_defer_bio_with_throttle(tc, bio); |
2558 | return DM_MAPIO_SUBMITTED; | 2562 | return DM_MAPIO_SUBMITTED; |
2559 | } | 2563 | } |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1b2f96205361..aba7ed9abb3a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -723,8 +723,9 @@ static void start_io_acct(struct dm_io *io) | |||
723 | atomic_inc_return(&md->pending[rw])); | 723 | atomic_inc_return(&md->pending[rw])); |
724 | 724 | ||
725 | if (unlikely(dm_stats_used(&md->stats))) | 725 | if (unlikely(dm_stats_used(&md->stats))) |
726 | dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, | 726 | dm_stats_account_io(&md->stats, bio_data_dir(bio), |
727 | bio_sectors(bio), false, 0, &io->stats_aux); | 727 | bio->bi_iter.bi_sector, bio_sectors(bio), |
728 | false, 0, &io->stats_aux); | ||
728 | } | 729 | } |
729 | 730 | ||
730 | static void end_io_acct(struct dm_io *io) | 731 | static void end_io_acct(struct dm_io *io) |
@@ -738,8 +739,9 @@ static void end_io_acct(struct dm_io *io) | |||
738 | generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); | 739 | generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); |
739 | 740 | ||
740 | if (unlikely(dm_stats_used(&md->stats))) | 741 | if (unlikely(dm_stats_used(&md->stats))) |
741 | dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, | 742 | dm_stats_account_io(&md->stats, bio_data_dir(bio), |
742 | bio_sectors(bio), true, duration, &io->stats_aux); | 743 | bio->bi_iter.bi_sector, bio_sectors(bio), |
744 | true, duration, &io->stats_aux); | ||
743 | 745 | ||
744 | /* | 746 | /* |
745 | * After this is decremented the bio must not be touched if it is | 747 | * After this is decremented the bio must not be touched if it is |
@@ -1001,12 +1003,12 @@ static void dec_pending(struct dm_io *io, int error) | |||
1001 | if (io_error == DM_ENDIO_REQUEUE) | 1003 | if (io_error == DM_ENDIO_REQUEUE) |
1002 | return; | 1004 | return; |
1003 | 1005 | ||
1004 | if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { | 1006 | if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) { |
1005 | /* | 1007 | /* |
1006 | * Preflush done for flush with data, reissue | 1008 | * Preflush done for flush with data, reissue |
1007 | * without REQ_FLUSH. | 1009 | * without REQ_PREFLUSH. |
1008 | */ | 1010 | */ |
1009 | bio->bi_rw &= ~REQ_FLUSH; | 1011 | bio->bi_rw &= ~REQ_PREFLUSH; |
1010 | queue_io(md, bio); | 1012 | queue_io(md, bio); |
1011 | } else { | 1013 | } else { |
1012 | /* done with normal IO or empty flush */ | 1014 | /* done with normal IO or empty flush */ |
@@ -1051,7 +1053,7 @@ static void clone_endio(struct bio *bio) | |||
1051 | } | 1053 | } |
1052 | } | 1054 | } |
1053 | 1055 | ||
1054 | if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) && | 1056 | if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) && |
1055 | !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) | 1057 | !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)) |
1056 | disable_write_same(md); | 1058 | disable_write_same(md); |
1057 | 1059 | ||
@@ -1121,9 +1123,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) | |||
1121 | if (unlikely(dm_stats_used(&md->stats))) { | 1123 | if (unlikely(dm_stats_used(&md->stats))) { |
1122 | struct dm_rq_target_io *tio = tio_from_request(orig); | 1124 | struct dm_rq_target_io *tio = tio_from_request(orig); |
1123 | tio->duration_jiffies = jiffies - tio->duration_jiffies; | 1125 | tio->duration_jiffies = jiffies - tio->duration_jiffies; |
1124 | dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), | 1126 | dm_stats_account_io(&md->stats, rq_data_dir(orig), |
1125 | tio->n_sectors, true, tio->duration_jiffies, | 1127 | blk_rq_pos(orig), tio->n_sectors, true, |
1126 | &tio->stats_aux); | 1128 | tio->duration_jiffies, &tio->stats_aux); |
1127 | } | 1129 | } |
1128 | } | 1130 | } |
1129 | 1131 | ||
@@ -1320,7 +1322,7 @@ static void dm_done(struct request *clone, int error, bool mapped) | |||
1320 | r = rq_end_io(tio->ti, clone, error, &tio->info); | 1322 | r = rq_end_io(tio->ti, clone, error, &tio->info); |
1321 | } | 1323 | } |
1322 | 1324 | ||
1323 | if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && | 1325 | if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) && |
1324 | !clone->q->limits.max_write_same_sectors)) | 1326 | !clone->q->limits.max_write_same_sectors)) |
1325 | disable_write_same(tio->md); | 1327 | disable_write_same(tio->md); |
1326 | 1328 | ||
@@ -1475,7 +1477,7 @@ EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); | |||
1475 | 1477 | ||
1476 | /* | 1478 | /* |
1477 | * A target may call dm_accept_partial_bio only from the map routine. It is | 1479 | * A target may call dm_accept_partial_bio only from the map routine. It is |
1478 | * allowed for all bio types except REQ_FLUSH. | 1480 | * allowed for all bio types except REQ_PREFLUSH. |
1479 | * | 1481 | * |
1480 | * dm_accept_partial_bio informs the dm that the target only wants to process | 1482 | * dm_accept_partial_bio informs the dm that the target only wants to process |
1481 | * additional n_sectors sectors of the bio and the rest of the data should be | 1483 | * additional n_sectors sectors of the bio and the rest of the data should be |
@@ -1505,7 +1507,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) | |||
1505 | { | 1507 | { |
1506 | struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); | 1508 | struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); |
1507 | unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; | 1509 | unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; |
1508 | BUG_ON(bio->bi_rw & REQ_FLUSH); | 1510 | BUG_ON(bio->bi_rw & REQ_PREFLUSH); |
1509 | BUG_ON(bi_size > *tio->len_ptr); | 1511 | BUG_ON(bi_size > *tio->len_ptr); |
1510 | BUG_ON(n_sectors > bi_size); | 1512 | BUG_ON(n_sectors > bi_size); |
1511 | *tio->len_ptr -= bi_size - n_sectors; | 1513 | *tio->len_ptr -= bi_size - n_sectors; |
@@ -1746,9 +1748,9 @@ static int __split_and_process_non_flush(struct clone_info *ci) | |||
1746 | unsigned len; | 1748 | unsigned len; |
1747 | int r; | 1749 | int r; |
1748 | 1750 | ||
1749 | if (unlikely(bio->bi_rw & REQ_DISCARD)) | 1751 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) |
1750 | return __send_discard(ci); | 1752 | return __send_discard(ci); |
1751 | else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) | 1753 | else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) |
1752 | return __send_write_same(ci); | 1754 | return __send_write_same(ci); |
1753 | 1755 | ||
1754 | ti = dm_table_find_target(ci->map, ci->sector); | 1756 | ti = dm_table_find_target(ci->map, ci->sector); |
@@ -1793,7 +1795,7 @@ static void __split_and_process_bio(struct mapped_device *md, | |||
1793 | 1795 | ||
1794 | start_io_acct(ci.io); | 1796 | start_io_acct(ci.io); |
1795 | 1797 | ||
1796 | if (bio->bi_rw & REQ_FLUSH) { | 1798 | if (bio->bi_rw & REQ_PREFLUSH) { |
1797 | ci.bio = &ci.md->flush_bio; | 1799 | ci.bio = &ci.md->flush_bio; |
1798 | ci.sector_count = 0; | 1800 | ci.sector_count = 0; |
1799 | error = __send_empty_flush(&ci); | 1801 | error = __send_empty_flush(&ci); |
@@ -2082,8 +2084,9 @@ static void dm_start_request(struct mapped_device *md, struct request *orig) | |||
2082 | struct dm_rq_target_io *tio = tio_from_request(orig); | 2084 | struct dm_rq_target_io *tio = tio_from_request(orig); |
2083 | tio->duration_jiffies = jiffies; | 2085 | tio->duration_jiffies = jiffies; |
2084 | tio->n_sectors = blk_rq_sectors(orig); | 2086 | tio->n_sectors = blk_rq_sectors(orig); |
2085 | dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), | 2087 | dm_stats_account_io(&md->stats, rq_data_dir(orig), |
2086 | tio->n_sectors, false, 0, &tio->stats_aux); | 2088 | blk_rq_pos(orig), tio->n_sectors, false, 0, |
2089 | &tio->stats_aux); | ||
2087 | } | 2090 | } |
2088 | 2091 | ||
2089 | /* | 2092 | /* |
@@ -2168,7 +2171,7 @@ static void dm_request_fn(struct request_queue *q) | |||
2168 | 2171 | ||
2169 | /* always use block 0 to find the target for flushes for now */ | 2172 | /* always use block 0 to find the target for flushes for now */ |
2170 | pos = 0; | 2173 | pos = 0; |
2171 | if (!(rq->cmd_flags & REQ_FLUSH)) | 2174 | if (req_op(rq) != REQ_OP_FLUSH) |
2172 | pos = blk_rq_pos(rq); | 2175 | pos = blk_rq_pos(rq); |
2173 | 2176 | ||
2174 | if ((dm_request_peeked_before_merge_deadline(md) && | 2177 | if ((dm_request_peeked_before_merge_deadline(md) && |
@@ -2412,7 +2415,7 @@ static struct mapped_device *alloc_dev(int minor) | |||
2412 | 2415 | ||
2413 | bio_init(&md->flush_bio); | 2416 | bio_init(&md->flush_bio); |
2414 | md->flush_bio.bi_bdev = md->bdev; | 2417 | md->flush_bio.bi_bdev = md->bdev; |
2415 | md->flush_bio.bi_rw = WRITE_FLUSH; | 2418 | bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); |
2416 | 2419 | ||
2417 | dm_stats_init(&md->stats); | 2420 | dm_stats_init(&md->stats); |
2418 | 2421 | ||
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index b7fe7e9fc777..70ff888d25d0 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) | |||
221 | struct bio *split; | 221 | struct bio *split; |
222 | sector_t start_sector, end_sector, data_offset; | 222 | sector_t start_sector, end_sector, data_offset; |
223 | 223 | ||
224 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 224 | if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { |
225 | md_flush_request(mddev, bio); | 225 | md_flush_request(mddev, bio); |
226 | return; | 226 | return; |
227 | } | 227 | } |
@@ -252,7 +252,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) | |||
252 | split->bi_iter.bi_sector = split->bi_iter.bi_sector - | 252 | split->bi_iter.bi_sector = split->bi_iter.bi_sector - |
253 | start_sector + data_offset; | 253 | start_sector + data_offset; |
254 | 254 | ||
255 | if (unlikely((split->bi_rw & REQ_DISCARD) && | 255 | if (unlikely((bio_op(split) == REQ_OP_DISCARD) && |
256 | !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { | 256 | !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { |
257 | /* Just ignore it */ | 257 | /* Just ignore it */ |
258 | bio_endio(split); | 258 | bio_endio(split); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 866825f10b4c..1f123f5a29da 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -394,8 +394,9 @@ static void submit_flushes(struct work_struct *ws) | |||
394 | bi->bi_end_io = md_end_flush; | 394 | bi->bi_end_io = md_end_flush; |
395 | bi->bi_private = rdev; | 395 | bi->bi_private = rdev; |
396 | bi->bi_bdev = rdev->bdev; | 396 | bi->bi_bdev = rdev->bdev; |
397 | bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH); | ||
397 | atomic_inc(&mddev->flush_pending); | 398 | atomic_inc(&mddev->flush_pending); |
398 | submit_bio(WRITE_FLUSH, bi); | 399 | submit_bio(bi); |
399 | rcu_read_lock(); | 400 | rcu_read_lock(); |
400 | rdev_dec_pending(rdev, mddev); | 401 | rdev_dec_pending(rdev, mddev); |
401 | } | 402 | } |
@@ -413,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws) | |||
413 | /* an empty barrier - all done */ | 414 | /* an empty barrier - all done */ |
414 | bio_endio(bio); | 415 | bio_endio(bio); |
415 | else { | 416 | else { |
416 | bio->bi_rw &= ~REQ_FLUSH; | 417 | bio->bi_rw &= ~REQ_PREFLUSH; |
417 | mddev->pers->make_request(mddev, bio); | 418 | mddev->pers->make_request(mddev, bio); |
418 | } | 419 | } |
419 | 420 | ||
@@ -742,9 +743,10 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, | |||
742 | bio_add_page(bio, page, size, 0); | 743 | bio_add_page(bio, page, size, 0); |
743 | bio->bi_private = rdev; | 744 | bio->bi_private = rdev; |
744 | bio->bi_end_io = super_written; | 745 | bio->bi_end_io = super_written; |
746 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA); | ||
745 | 747 | ||
746 | atomic_inc(&mddev->pending_writes); | 748 | atomic_inc(&mddev->pending_writes); |
747 | submit_bio(WRITE_FLUSH_FUA, bio); | 749 | submit_bio(bio); |
748 | } | 750 | } |
749 | 751 | ||
750 | void md_super_wait(struct mddev *mddev) | 752 | void md_super_wait(struct mddev *mddev) |
@@ -754,13 +756,14 @@ void md_super_wait(struct mddev *mddev) | |||
754 | } | 756 | } |
755 | 757 | ||
756 | int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, | 758 | int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, |
757 | struct page *page, int rw, bool metadata_op) | 759 | struct page *page, int op, int op_flags, bool metadata_op) |
758 | { | 760 | { |
759 | struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); | 761 | struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); |
760 | int ret; | 762 | int ret; |
761 | 763 | ||
762 | bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? | 764 | bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? |
763 | rdev->meta_bdev : rdev->bdev; | 765 | rdev->meta_bdev : rdev->bdev; |
766 | bio_set_op_attrs(bio, op, op_flags); | ||
764 | if (metadata_op) | 767 | if (metadata_op) |
765 | bio->bi_iter.bi_sector = sector + rdev->sb_start; | 768 | bio->bi_iter.bi_sector = sector + rdev->sb_start; |
766 | else if (rdev->mddev->reshape_position != MaxSector && | 769 | else if (rdev->mddev->reshape_position != MaxSector && |
@@ -770,7 +773,8 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, | |||
770 | else | 773 | else |
771 | bio->bi_iter.bi_sector = sector + rdev->data_offset; | 774 | bio->bi_iter.bi_sector = sector + rdev->data_offset; |
772 | bio_add_page(bio, page, size, 0); | 775 | bio_add_page(bio, page, size, 0); |
773 | submit_bio_wait(rw, bio); | 776 | |
777 | submit_bio_wait(bio); | ||
774 | 778 | ||
775 | ret = !bio->bi_error; | 779 | ret = !bio->bi_error; |
776 | bio_put(bio); | 780 | bio_put(bio); |
@@ -785,7 +789,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size) | |||
785 | if (rdev->sb_loaded) | 789 | if (rdev->sb_loaded) |
786 | return 0; | 790 | return 0; |
787 | 791 | ||
788 | if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) | 792 | if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) |
789 | goto fail; | 793 | goto fail; |
790 | rdev->sb_loaded = 1; | 794 | rdev->sb_loaded = 1; |
791 | return 0; | 795 | return 0; |
@@ -1471,7 +1475,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ | |||
1471 | return -EINVAL; | 1475 | return -EINVAL; |
1472 | bb_sector = (long long)offset; | 1476 | bb_sector = (long long)offset; |
1473 | if (!sync_page_io(rdev, bb_sector, sectors << 9, | 1477 | if (!sync_page_io(rdev, bb_sector, sectors << 9, |
1474 | rdev->bb_page, READ, true)) | 1478 | rdev->bb_page, REQ_OP_READ, 0, true)) |
1475 | return -EIO; | 1479 | return -EIO; |
1476 | bbp = (u64 *)page_address(rdev->bb_page); | 1480 | bbp = (u64 *)page_address(rdev->bb_page); |
1477 | rdev->badblocks.shift = sb->bblog_shift; | 1481 | rdev->badblocks.shift = sb->bblog_shift; |
diff --git a/drivers/md/md.h b/drivers/md/md.h index b5c4be73e6e4..b4f335245bd6 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -424,7 +424,7 @@ struct mddev { | |||
424 | 424 | ||
425 | /* Generic flush handling. | 425 | /* Generic flush handling. |
426 | * The last to finish preflush schedules a worker to submit | 426 | * The last to finish preflush schedules a worker to submit |
427 | * the rest of the request (without the REQ_FLUSH flag). | 427 | * the rest of the request (without the REQ_PREFLUSH flag). |
428 | */ | 428 | */ |
429 | struct bio *flush_bio; | 429 | struct bio *flush_bio; |
430 | atomic_t flush_pending; | 430 | atomic_t flush_pending; |
@@ -618,7 +618,8 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, | |||
618 | sector_t sector, int size, struct page *page); | 618 | sector_t sector, int size, struct page *page); |
619 | extern void md_super_wait(struct mddev *mddev); | 619 | extern void md_super_wait(struct mddev *mddev); |
620 | extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, | 620 | extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, |
621 | struct page *page, int rw, bool metadata_op); | 621 | struct page *page, int op, int op_flags, |
622 | bool metadata_op); | ||
622 | extern void md_do_sync(struct md_thread *thread); | 623 | extern void md_do_sync(struct md_thread *thread); |
623 | extern void md_new_event(struct mddev *mddev); | 624 | extern void md_new_event(struct mddev *mddev); |
624 | extern int md_allow_write(struct mddev *mddev); | 625 | extern int md_allow_write(struct mddev *mddev); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index dd483bb2e111..72ea98e89e57 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -111,7 +111,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) | |||
111 | struct multipath_bh * mp_bh; | 111 | struct multipath_bh * mp_bh; |
112 | struct multipath_info *multipath; | 112 | struct multipath_info *multipath; |
113 | 113 | ||
114 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 114 | if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { |
115 | md_flush_request(mddev, bio); | 115 | md_flush_request(mddev, bio); |
116 | return; | 116 | return; |
117 | } | 117 | } |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 34783a3c8b3c..c3d439083212 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
458 | struct md_rdev *tmp_dev; | 458 | struct md_rdev *tmp_dev; |
459 | struct bio *split; | 459 | struct bio *split; |
460 | 460 | ||
461 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 461 | if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { |
462 | md_flush_request(mddev, bio); | 462 | md_flush_request(mddev, bio); |
463 | return; | 463 | return; |
464 | } | 464 | } |
@@ -488,7 +488,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
488 | split->bi_iter.bi_sector = sector + zone->dev_start + | 488 | split->bi_iter.bi_sector = sector + zone->dev_start + |
489 | tmp_dev->data_offset; | 489 | tmp_dev->data_offset; |
490 | 490 | ||
491 | if (unlikely((split->bi_rw & REQ_DISCARD) && | 491 | if (unlikely((bio_op(split) == REQ_OP_DISCARD) && |
492 | !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { | 492 | !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { |
493 | /* Just ignore it */ | 493 | /* Just ignore it */ |
494 | bio_endio(split); | 494 | bio_endio(split); |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c7c8cde0ab21..10e53cd6a995 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -759,7 +759,7 @@ static void flush_pending_writes(struct r1conf *conf) | |||
759 | while (bio) { /* submit pending writes */ | 759 | while (bio) { /* submit pending writes */ |
760 | struct bio *next = bio->bi_next; | 760 | struct bio *next = bio->bi_next; |
761 | bio->bi_next = NULL; | 761 | bio->bi_next = NULL; |
762 | if (unlikely((bio->bi_rw & REQ_DISCARD) && | 762 | if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
763 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 763 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
764 | /* Just ignore it */ | 764 | /* Just ignore it */ |
765 | bio_endio(bio); | 765 | bio_endio(bio); |
@@ -1033,7 +1033,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) | |||
1033 | while (bio) { /* submit pending writes */ | 1033 | while (bio) { /* submit pending writes */ |
1034 | struct bio *next = bio->bi_next; | 1034 | struct bio *next = bio->bi_next; |
1035 | bio->bi_next = NULL; | 1035 | bio->bi_next = NULL; |
1036 | if (unlikely((bio->bi_rw & REQ_DISCARD) && | 1036 | if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
1037 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 1037 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
1038 | /* Just ignore it */ | 1038 | /* Just ignore it */ |
1039 | bio_endio(bio); | 1039 | bio_endio(bio); |
@@ -1053,12 +1053,12 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio) | |||
1053 | int i, disks; | 1053 | int i, disks; |
1054 | struct bitmap *bitmap; | 1054 | struct bitmap *bitmap; |
1055 | unsigned long flags; | 1055 | unsigned long flags; |
1056 | const int op = bio_op(bio); | ||
1056 | const int rw = bio_data_dir(bio); | 1057 | const int rw = bio_data_dir(bio); |
1057 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 1058 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
1058 | const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); | 1059 | const unsigned long do_flush_fua = (bio->bi_rw & |
1059 | const unsigned long do_discard = (bio->bi_rw | 1060 | (REQ_PREFLUSH | REQ_FUA)); |
1060 | & (REQ_DISCARD | REQ_SECURE)); | 1061 | const unsigned long do_sec = (bio->bi_rw & REQ_SECURE); |
1061 | const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); | ||
1062 | struct md_rdev *blocked_rdev; | 1062 | struct md_rdev *blocked_rdev; |
1063 | struct blk_plug_cb *cb; | 1063 | struct blk_plug_cb *cb; |
1064 | struct raid1_plug_cb *plug = NULL; | 1064 | struct raid1_plug_cb *plug = NULL; |
@@ -1166,7 +1166,7 @@ read_again: | |||
1166 | mirror->rdev->data_offset; | 1166 | mirror->rdev->data_offset; |
1167 | read_bio->bi_bdev = mirror->rdev->bdev; | 1167 | read_bio->bi_bdev = mirror->rdev->bdev; |
1168 | read_bio->bi_end_io = raid1_end_read_request; | 1168 | read_bio->bi_end_io = raid1_end_read_request; |
1169 | read_bio->bi_rw = READ | do_sync; | 1169 | bio_set_op_attrs(read_bio, op, do_sync); |
1170 | read_bio->bi_private = r1_bio; | 1170 | read_bio->bi_private = r1_bio; |
1171 | 1171 | ||
1172 | if (max_sectors < r1_bio->sectors) { | 1172 | if (max_sectors < r1_bio->sectors) { |
@@ -1376,8 +1376,7 @@ read_again: | |||
1376 | conf->mirrors[i].rdev->data_offset); | 1376 | conf->mirrors[i].rdev->data_offset); |
1377 | mbio->bi_bdev = conf->mirrors[i].rdev->bdev; | 1377 | mbio->bi_bdev = conf->mirrors[i].rdev->bdev; |
1378 | mbio->bi_end_io = raid1_end_write_request; | 1378 | mbio->bi_end_io = raid1_end_write_request; |
1379 | mbio->bi_rw = | 1379 | bio_set_op_attrs(mbio, op, do_flush_fua | do_sync | do_sec); |
1380 | WRITE | do_flush_fua | do_sync | do_discard | do_same; | ||
1381 | mbio->bi_private = r1_bio; | 1380 | mbio->bi_private = r1_bio; |
1382 | 1381 | ||
1383 | atomic_inc(&r1_bio->remaining); | 1382 | atomic_inc(&r1_bio->remaining); |
@@ -1771,7 +1770,7 @@ static void end_sync_write(struct bio *bio) | |||
1771 | static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, | 1770 | static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, |
1772 | int sectors, struct page *page, int rw) | 1771 | int sectors, struct page *page, int rw) |
1773 | { | 1772 | { |
1774 | if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) | 1773 | if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) |
1775 | /* success */ | 1774 | /* success */ |
1776 | return 1; | 1775 | return 1; |
1777 | if (rw == WRITE) { | 1776 | if (rw == WRITE) { |
@@ -1825,7 +1824,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) | |||
1825 | rdev = conf->mirrors[d].rdev; | 1824 | rdev = conf->mirrors[d].rdev; |
1826 | if (sync_page_io(rdev, sect, s<<9, | 1825 | if (sync_page_io(rdev, sect, s<<9, |
1827 | bio->bi_io_vec[idx].bv_page, | 1826 | bio->bi_io_vec[idx].bv_page, |
1828 | READ, false)) { | 1827 | REQ_OP_READ, 0, false)) { |
1829 | success = 1; | 1828 | success = 1; |
1830 | break; | 1829 | break; |
1831 | } | 1830 | } |
@@ -2030,7 +2029,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) | |||
2030 | !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) | 2029 | !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) |
2031 | continue; | 2030 | continue; |
2032 | 2031 | ||
2033 | wbio->bi_rw = WRITE; | 2032 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); |
2034 | wbio->bi_end_io = end_sync_write; | 2033 | wbio->bi_end_io = end_sync_write; |
2035 | atomic_inc(&r1_bio->remaining); | 2034 | atomic_inc(&r1_bio->remaining); |
2036 | md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); | 2035 | md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); |
@@ -2090,7 +2089,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, | |||
2090 | is_badblock(rdev, sect, s, | 2089 | is_badblock(rdev, sect, s, |
2091 | &first_bad, &bad_sectors) == 0 && | 2090 | &first_bad, &bad_sectors) == 0 && |
2092 | sync_page_io(rdev, sect, s<<9, | 2091 | sync_page_io(rdev, sect, s<<9, |
2093 | conf->tmppage, READ, false)) | 2092 | conf->tmppage, REQ_OP_READ, 0, false)) |
2094 | success = 1; | 2093 | success = 1; |
2095 | else { | 2094 | else { |
2096 | d++; | 2095 | d++; |
@@ -2201,14 +2200,15 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) | |||
2201 | wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); | 2200 | wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); |
2202 | } | 2201 | } |
2203 | 2202 | ||
2204 | wbio->bi_rw = WRITE; | 2203 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); |
2205 | wbio->bi_iter.bi_sector = r1_bio->sector; | 2204 | wbio->bi_iter.bi_sector = r1_bio->sector; |
2206 | wbio->bi_iter.bi_size = r1_bio->sectors << 9; | 2205 | wbio->bi_iter.bi_size = r1_bio->sectors << 9; |
2207 | 2206 | ||
2208 | bio_trim(wbio, sector - r1_bio->sector, sectors); | 2207 | bio_trim(wbio, sector - r1_bio->sector, sectors); |
2209 | wbio->bi_iter.bi_sector += rdev->data_offset; | 2208 | wbio->bi_iter.bi_sector += rdev->data_offset; |
2210 | wbio->bi_bdev = rdev->bdev; | 2209 | wbio->bi_bdev = rdev->bdev; |
2211 | if (submit_bio_wait(WRITE, wbio) < 0) | 2210 | |
2211 | if (submit_bio_wait(wbio) < 0) | ||
2212 | /* failure! */ | 2212 | /* failure! */ |
2213 | ok = rdev_set_badblocks(rdev, sector, | 2213 | ok = rdev_set_badblocks(rdev, sector, |
2214 | sectors, 0) | 2214 | sectors, 0) |
@@ -2343,7 +2343,7 @@ read_more: | |||
2343 | bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; | 2343 | bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; |
2344 | bio->bi_bdev = rdev->bdev; | 2344 | bio->bi_bdev = rdev->bdev; |
2345 | bio->bi_end_io = raid1_end_read_request; | 2345 | bio->bi_end_io = raid1_end_read_request; |
2346 | bio->bi_rw = READ | do_sync; | 2346 | bio_set_op_attrs(bio, REQ_OP_READ, do_sync); |
2347 | bio->bi_private = r1_bio; | 2347 | bio->bi_private = r1_bio; |
2348 | if (max_sectors < r1_bio->sectors) { | 2348 | if (max_sectors < r1_bio->sectors) { |
2349 | /* Drat - have to split this up more */ | 2349 | /* Drat - have to split this up more */ |
@@ -2571,7 +2571,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2571 | if (i < conf->raid_disks) | 2571 | if (i < conf->raid_disks) |
2572 | still_degraded = 1; | 2572 | still_degraded = 1; |
2573 | } else if (!test_bit(In_sync, &rdev->flags)) { | 2573 | } else if (!test_bit(In_sync, &rdev->flags)) { |
2574 | bio->bi_rw = WRITE; | 2574 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
2575 | bio->bi_end_io = end_sync_write; | 2575 | bio->bi_end_io = end_sync_write; |
2576 | write_targets ++; | 2576 | write_targets ++; |
2577 | } else { | 2577 | } else { |
@@ -2598,7 +2598,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2598 | if (disk < 0) | 2598 | if (disk < 0) |
2599 | disk = i; | 2599 | disk = i; |
2600 | } | 2600 | } |
2601 | bio->bi_rw = READ; | 2601 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
2602 | bio->bi_end_io = end_sync_read; | 2602 | bio->bi_end_io = end_sync_read; |
2603 | read_targets++; | 2603 | read_targets++; |
2604 | } else if (!test_bit(WriteErrorSeen, &rdev->flags) && | 2604 | } else if (!test_bit(WriteErrorSeen, &rdev->flags) && |
@@ -2610,7 +2610,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2610 | * if we are doing resync or repair. Otherwise, leave | 2610 | * if we are doing resync or repair. Otherwise, leave |
2611 | * this device alone for this sync request. | 2611 | * this device alone for this sync request. |
2612 | */ | 2612 | */ |
2613 | bio->bi_rw = WRITE; | 2613 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
2614 | bio->bi_end_io = end_sync_write; | 2614 | bio->bi_end_io = end_sync_write; |
2615 | write_targets++; | 2615 | write_targets++; |
2616 | } | 2616 | } |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c7de2a53e625..245640b50153 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -865,7 +865,7 @@ static void flush_pending_writes(struct r10conf *conf) | |||
865 | while (bio) { /* submit pending writes */ | 865 | while (bio) { /* submit pending writes */ |
866 | struct bio *next = bio->bi_next; | 866 | struct bio *next = bio->bi_next; |
867 | bio->bi_next = NULL; | 867 | bio->bi_next = NULL; |
868 | if (unlikely((bio->bi_rw & REQ_DISCARD) && | 868 | if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
869 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 869 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
870 | /* Just ignore it */ | 870 | /* Just ignore it */ |
871 | bio_endio(bio); | 871 | bio_endio(bio); |
@@ -1041,7 +1041,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) | |||
1041 | while (bio) { /* submit pending writes */ | 1041 | while (bio) { /* submit pending writes */ |
1042 | struct bio *next = bio->bi_next; | 1042 | struct bio *next = bio->bi_next; |
1043 | bio->bi_next = NULL; | 1043 | bio->bi_next = NULL; |
1044 | if (unlikely((bio->bi_rw & REQ_DISCARD) && | 1044 | if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
1045 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 1045 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
1046 | /* Just ignore it */ | 1046 | /* Just ignore it */ |
1047 | bio_endio(bio); | 1047 | bio_endio(bio); |
@@ -1058,12 +1058,11 @@ static void __make_request(struct mddev *mddev, struct bio *bio) | |||
1058 | struct r10bio *r10_bio; | 1058 | struct r10bio *r10_bio; |
1059 | struct bio *read_bio; | 1059 | struct bio *read_bio; |
1060 | int i; | 1060 | int i; |
1061 | const int op = bio_op(bio); | ||
1061 | const int rw = bio_data_dir(bio); | 1062 | const int rw = bio_data_dir(bio); |
1062 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 1063 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
1063 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); | 1064 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); |
1064 | const unsigned long do_discard = (bio->bi_rw | 1065 | const unsigned long do_sec = (bio->bi_rw & REQ_SECURE); |
1065 | & (REQ_DISCARD | REQ_SECURE)); | ||
1066 | const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); | ||
1067 | unsigned long flags; | 1066 | unsigned long flags; |
1068 | struct md_rdev *blocked_rdev; | 1067 | struct md_rdev *blocked_rdev; |
1069 | struct blk_plug_cb *cb; | 1068 | struct blk_plug_cb *cb; |
@@ -1156,7 +1155,7 @@ read_again: | |||
1156 | choose_data_offset(r10_bio, rdev); | 1155 | choose_data_offset(r10_bio, rdev); |
1157 | read_bio->bi_bdev = rdev->bdev; | 1156 | read_bio->bi_bdev = rdev->bdev; |
1158 | read_bio->bi_end_io = raid10_end_read_request; | 1157 | read_bio->bi_end_io = raid10_end_read_request; |
1159 | read_bio->bi_rw = READ | do_sync; | 1158 | bio_set_op_attrs(read_bio, op, do_sync); |
1160 | read_bio->bi_private = r10_bio; | 1159 | read_bio->bi_private = r10_bio; |
1161 | 1160 | ||
1162 | if (max_sectors < r10_bio->sectors) { | 1161 | if (max_sectors < r10_bio->sectors) { |
@@ -1363,8 +1362,7 @@ retry_write: | |||
1363 | rdev)); | 1362 | rdev)); |
1364 | mbio->bi_bdev = rdev->bdev; | 1363 | mbio->bi_bdev = rdev->bdev; |
1365 | mbio->bi_end_io = raid10_end_write_request; | 1364 | mbio->bi_end_io = raid10_end_write_request; |
1366 | mbio->bi_rw = | 1365 | bio_set_op_attrs(mbio, op, do_sync | do_fua | do_sec); |
1367 | WRITE | do_sync | do_fua | do_discard | do_same; | ||
1368 | mbio->bi_private = r10_bio; | 1366 | mbio->bi_private = r10_bio; |
1369 | 1367 | ||
1370 | atomic_inc(&r10_bio->remaining); | 1368 | atomic_inc(&r10_bio->remaining); |
@@ -1406,8 +1404,7 @@ retry_write: | |||
1406 | r10_bio, rdev)); | 1404 | r10_bio, rdev)); |
1407 | mbio->bi_bdev = rdev->bdev; | 1405 | mbio->bi_bdev = rdev->bdev; |
1408 | mbio->bi_end_io = raid10_end_write_request; | 1406 | mbio->bi_end_io = raid10_end_write_request; |
1409 | mbio->bi_rw = | 1407 | bio_set_op_attrs(mbio, op, do_sync | do_fua | do_sec); |
1410 | WRITE | do_sync | do_fua | do_discard | do_same; | ||
1411 | mbio->bi_private = r10_bio; | 1408 | mbio->bi_private = r10_bio; |
1412 | 1409 | ||
1413 | atomic_inc(&r10_bio->remaining); | 1410 | atomic_inc(&r10_bio->remaining); |
@@ -1450,7 +1447,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) | |||
1450 | 1447 | ||
1451 | struct bio *split; | 1448 | struct bio *split; |
1452 | 1449 | ||
1453 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 1450 | if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { |
1454 | md_flush_request(mddev, bio); | 1451 | md_flush_request(mddev, bio); |
1455 | return; | 1452 | return; |
1456 | } | 1453 | } |
@@ -1992,10 +1989,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
1992 | 1989 | ||
1993 | tbio->bi_vcnt = vcnt; | 1990 | tbio->bi_vcnt = vcnt; |
1994 | tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; | 1991 | tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; |
1995 | tbio->bi_rw = WRITE; | ||
1996 | tbio->bi_private = r10_bio; | 1992 | tbio->bi_private = r10_bio; |
1997 | tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; | 1993 | tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; |
1998 | tbio->bi_end_io = end_sync_write; | 1994 | tbio->bi_end_io = end_sync_write; |
1995 | bio_set_op_attrs(tbio, REQ_OP_WRITE, 0); | ||
1999 | 1996 | ||
2000 | bio_copy_data(tbio, fbio); | 1997 | bio_copy_data(tbio, fbio); |
2001 | 1998 | ||
@@ -2078,7 +2075,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) | |||
2078 | addr, | 2075 | addr, |
2079 | s << 9, | 2076 | s << 9, |
2080 | bio->bi_io_vec[idx].bv_page, | 2077 | bio->bi_io_vec[idx].bv_page, |
2081 | READ, false); | 2078 | REQ_OP_READ, 0, false); |
2082 | if (ok) { | 2079 | if (ok) { |
2083 | rdev = conf->mirrors[dw].rdev; | 2080 | rdev = conf->mirrors[dw].rdev; |
2084 | addr = r10_bio->devs[1].addr + sect; | 2081 | addr = r10_bio->devs[1].addr + sect; |
@@ -2086,7 +2083,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio) | |||
2086 | addr, | 2083 | addr, |
2087 | s << 9, | 2084 | s << 9, |
2088 | bio->bi_io_vec[idx].bv_page, | 2085 | bio->bi_io_vec[idx].bv_page, |
2089 | WRITE, false); | 2086 | REQ_OP_WRITE, 0, false); |
2090 | if (!ok) { | 2087 | if (!ok) { |
2091 | set_bit(WriteErrorSeen, &rdev->flags); | 2088 | set_bit(WriteErrorSeen, &rdev->flags); |
2092 | if (!test_and_set_bit(WantReplacement, | 2089 | if (!test_and_set_bit(WantReplacement, |
@@ -2213,7 +2210,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, | |||
2213 | if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) | 2210 | if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) |
2214 | && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) | 2211 | && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) |
2215 | return -1; | 2212 | return -1; |
2216 | if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) | 2213 | if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) |
2217 | /* success */ | 2214 | /* success */ |
2218 | return 1; | 2215 | return 1; |
2219 | if (rw == WRITE) { | 2216 | if (rw == WRITE) { |
@@ -2299,7 +2296,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 | |||
2299 | r10_bio->devs[sl].addr + | 2296 | r10_bio->devs[sl].addr + |
2300 | sect, | 2297 | sect, |
2301 | s<<9, | 2298 | s<<9, |
2302 | conf->tmppage, READ, false); | 2299 | conf->tmppage, |
2300 | REQ_OP_READ, 0, false); | ||
2303 | rdev_dec_pending(rdev, mddev); | 2301 | rdev_dec_pending(rdev, mddev); |
2304 | rcu_read_lock(); | 2302 | rcu_read_lock(); |
2305 | if (success) | 2303 | if (success) |
@@ -2474,7 +2472,9 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) | |||
2474 | choose_data_offset(r10_bio, rdev) + | 2472 | choose_data_offset(r10_bio, rdev) + |
2475 | (sector - r10_bio->sector)); | 2473 | (sector - r10_bio->sector)); |
2476 | wbio->bi_bdev = rdev->bdev; | 2474 | wbio->bi_bdev = rdev->bdev; |
2477 | if (submit_bio_wait(WRITE, wbio) < 0) | 2475 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); |
2476 | |||
2477 | if (submit_bio_wait(wbio) < 0) | ||
2478 | /* Failure! */ | 2478 | /* Failure! */ |
2479 | ok = rdev_set_badblocks(rdev, sector, | 2479 | ok = rdev_set_badblocks(rdev, sector, |
2480 | sectors, 0) | 2480 | sectors, 0) |
@@ -2548,7 +2548,7 @@ read_more: | |||
2548 | bio->bi_iter.bi_sector = r10_bio->devs[slot].addr | 2548 | bio->bi_iter.bi_sector = r10_bio->devs[slot].addr |
2549 | + choose_data_offset(r10_bio, rdev); | 2549 | + choose_data_offset(r10_bio, rdev); |
2550 | bio->bi_bdev = rdev->bdev; | 2550 | bio->bi_bdev = rdev->bdev; |
2551 | bio->bi_rw = READ | do_sync; | 2551 | bio_set_op_attrs(bio, REQ_OP_READ, do_sync); |
2552 | bio->bi_private = r10_bio; | 2552 | bio->bi_private = r10_bio; |
2553 | bio->bi_end_io = raid10_end_read_request; | 2553 | bio->bi_end_io = raid10_end_read_request; |
2554 | if (max_sectors < r10_bio->sectors) { | 2554 | if (max_sectors < r10_bio->sectors) { |
@@ -3038,7 +3038,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3038 | biolist = bio; | 3038 | biolist = bio; |
3039 | bio->bi_private = r10_bio; | 3039 | bio->bi_private = r10_bio; |
3040 | bio->bi_end_io = end_sync_read; | 3040 | bio->bi_end_io = end_sync_read; |
3041 | bio->bi_rw = READ; | 3041 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
3042 | from_addr = r10_bio->devs[j].addr; | 3042 | from_addr = r10_bio->devs[j].addr; |
3043 | bio->bi_iter.bi_sector = from_addr + | 3043 | bio->bi_iter.bi_sector = from_addr + |
3044 | rdev->data_offset; | 3044 | rdev->data_offset; |
@@ -3064,7 +3064,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3064 | biolist = bio; | 3064 | biolist = bio; |
3065 | bio->bi_private = r10_bio; | 3065 | bio->bi_private = r10_bio; |
3066 | bio->bi_end_io = end_sync_write; | 3066 | bio->bi_end_io = end_sync_write; |
3067 | bio->bi_rw = WRITE; | 3067 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
3068 | bio->bi_iter.bi_sector = to_addr | 3068 | bio->bi_iter.bi_sector = to_addr |
3069 | + rdev->data_offset; | 3069 | + rdev->data_offset; |
3070 | bio->bi_bdev = rdev->bdev; | 3070 | bio->bi_bdev = rdev->bdev; |
@@ -3093,7 +3093,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3093 | biolist = bio; | 3093 | biolist = bio; |
3094 | bio->bi_private = r10_bio; | 3094 | bio->bi_private = r10_bio; |
3095 | bio->bi_end_io = end_sync_write; | 3095 | bio->bi_end_io = end_sync_write; |
3096 | bio->bi_rw = WRITE; | 3096 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
3097 | bio->bi_iter.bi_sector = to_addr + | 3097 | bio->bi_iter.bi_sector = to_addr + |
3098 | rdev->data_offset; | 3098 | rdev->data_offset; |
3099 | bio->bi_bdev = rdev->bdev; | 3099 | bio->bi_bdev = rdev->bdev; |
@@ -3213,7 +3213,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3213 | biolist = bio; | 3213 | biolist = bio; |
3214 | bio->bi_private = r10_bio; | 3214 | bio->bi_private = r10_bio; |
3215 | bio->bi_end_io = end_sync_read; | 3215 | bio->bi_end_io = end_sync_read; |
3216 | bio->bi_rw = READ; | 3216 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
3217 | bio->bi_iter.bi_sector = sector + | 3217 | bio->bi_iter.bi_sector = sector + |
3218 | conf->mirrors[d].rdev->data_offset; | 3218 | conf->mirrors[d].rdev->data_offset; |
3219 | bio->bi_bdev = conf->mirrors[d].rdev->bdev; | 3219 | bio->bi_bdev = conf->mirrors[d].rdev->bdev; |
@@ -3235,7 +3235,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3235 | biolist = bio; | 3235 | biolist = bio; |
3236 | bio->bi_private = r10_bio; | 3236 | bio->bi_private = r10_bio; |
3237 | bio->bi_end_io = end_sync_write; | 3237 | bio->bi_end_io = end_sync_write; |
3238 | bio->bi_rw = WRITE; | 3238 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
3239 | bio->bi_iter.bi_sector = sector + | 3239 | bio->bi_iter.bi_sector = sector + |
3240 | conf->mirrors[d].replacement->data_offset; | 3240 | conf->mirrors[d].replacement->data_offset; |
3241 | bio->bi_bdev = conf->mirrors[d].replacement->bdev; | 3241 | bio->bi_bdev = conf->mirrors[d].replacement->bdev; |
@@ -4320,7 +4320,7 @@ read_more: | |||
4320 | + rdev->data_offset); | 4320 | + rdev->data_offset); |
4321 | read_bio->bi_private = r10_bio; | 4321 | read_bio->bi_private = r10_bio; |
4322 | read_bio->bi_end_io = end_sync_read; | 4322 | read_bio->bi_end_io = end_sync_read; |
4323 | read_bio->bi_rw = READ; | 4323 | bio_set_op_attrs(read_bio, REQ_OP_READ, 0); |
4324 | read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); | 4324 | read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); |
4325 | read_bio->bi_error = 0; | 4325 | read_bio->bi_error = 0; |
4326 | read_bio->bi_vcnt = 0; | 4326 | read_bio->bi_vcnt = 0; |
@@ -4354,7 +4354,7 @@ read_more: | |||
4354 | rdev2->new_data_offset; | 4354 | rdev2->new_data_offset; |
4355 | b->bi_private = r10_bio; | 4355 | b->bi_private = r10_bio; |
4356 | b->bi_end_io = end_reshape_write; | 4356 | b->bi_end_io = end_reshape_write; |
4357 | b->bi_rw = WRITE; | 4357 | bio_set_op_attrs(b, REQ_OP_WRITE, 0); |
4358 | b->bi_next = blist; | 4358 | b->bi_next = blist; |
4359 | blist = b; | 4359 | blist = b; |
4360 | } | 4360 | } |
@@ -4522,7 +4522,7 @@ static int handle_reshape_read_error(struct mddev *mddev, | |||
4522 | addr, | 4522 | addr, |
4523 | s << 9, | 4523 | s << 9, |
4524 | bvec[idx].bv_page, | 4524 | bvec[idx].bv_page, |
4525 | READ, false); | 4525 | REQ_OP_READ, 0, false); |
4526 | if (success) | 4526 | if (success) |
4527 | break; | 4527 | break; |
4528 | failed: | 4528 | failed: |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index e889e2deb7b3..5504ce2bac06 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
@@ -254,14 +254,14 @@ static void r5l_submit_current_io(struct r5l_log *log) | |||
254 | __r5l_set_io_unit_state(io, IO_UNIT_IO_START); | 254 | __r5l_set_io_unit_state(io, IO_UNIT_IO_START); |
255 | spin_unlock_irqrestore(&log->io_list_lock, flags); | 255 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
256 | 256 | ||
257 | submit_bio(WRITE, io->current_bio); | 257 | submit_bio(io->current_bio); |
258 | } | 258 | } |
259 | 259 | ||
260 | static struct bio *r5l_bio_alloc(struct r5l_log *log) | 260 | static struct bio *r5l_bio_alloc(struct r5l_log *log) |
261 | { | 261 | { |
262 | struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs); | 262 | struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs); |
263 | 263 | ||
264 | bio->bi_rw = WRITE; | 264 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
265 | bio->bi_bdev = log->rdev->bdev; | 265 | bio->bi_bdev = log->rdev->bdev; |
266 | bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; | 266 | bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; |
267 | 267 | ||
@@ -373,7 +373,7 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page) | |||
373 | io->current_bio = r5l_bio_alloc(log); | 373 | io->current_bio = r5l_bio_alloc(log); |
374 | bio_chain(io->current_bio, prev); | 374 | bio_chain(io->current_bio, prev); |
375 | 375 | ||
376 | submit_bio(WRITE, prev); | 376 | submit_bio(prev); |
377 | } | 377 | } |
378 | 378 | ||
379 | if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) | 379 | if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) |
@@ -536,7 +536,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) | |||
536 | bio_endio(bio); | 536 | bio_endio(bio); |
537 | return 0; | 537 | return 0; |
538 | } | 538 | } |
539 | bio->bi_rw &= ~REQ_FLUSH; | 539 | bio->bi_rw &= ~REQ_PREFLUSH; |
540 | return -EAGAIN; | 540 | return -EAGAIN; |
541 | } | 541 | } |
542 | 542 | ||
@@ -686,7 +686,8 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log) | |||
686 | bio_reset(&log->flush_bio); | 686 | bio_reset(&log->flush_bio); |
687 | log->flush_bio.bi_bdev = log->rdev->bdev; | 687 | log->flush_bio.bi_bdev = log->rdev->bdev; |
688 | log->flush_bio.bi_end_io = r5l_log_flush_endio; | 688 | log->flush_bio.bi_end_io = r5l_log_flush_endio; |
689 | submit_bio(WRITE_FLUSH, &log->flush_bio); | 689 | bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); |
690 | submit_bio(&log->flush_bio); | ||
690 | } | 691 | } |
691 | 692 | ||
692 | static void r5l_write_super(struct r5l_log *log, sector_t cp); | 693 | static void r5l_write_super(struct r5l_log *log, sector_t cp); |
@@ -881,7 +882,8 @@ static int r5l_read_meta_block(struct r5l_log *log, | |||
881 | struct r5l_meta_block *mb; | 882 | struct r5l_meta_block *mb; |
882 | u32 crc, stored_crc; | 883 | u32 crc, stored_crc; |
883 | 884 | ||
884 | if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false)) | 885 | if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_READ, 0, |
886 | false)) | ||
885 | return -EIO; | 887 | return -EIO; |
886 | 888 | ||
887 | mb = page_address(page); | 889 | mb = page_address(page); |
@@ -926,7 +928,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, | |||
926 | &disk_index, sh); | 928 | &disk_index, sh); |
927 | 929 | ||
928 | sync_page_io(log->rdev, *log_offset, PAGE_SIZE, | 930 | sync_page_io(log->rdev, *log_offset, PAGE_SIZE, |
929 | sh->dev[disk_index].page, READ, false); | 931 | sh->dev[disk_index].page, REQ_OP_READ, 0, |
932 | false); | ||
930 | sh->dev[disk_index].log_checksum = | 933 | sh->dev[disk_index].log_checksum = |
931 | le32_to_cpu(payload->checksum[0]); | 934 | le32_to_cpu(payload->checksum[0]); |
932 | set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); | 935 | set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); |
@@ -934,7 +937,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, | |||
934 | } else { | 937 | } else { |
935 | disk_index = sh->pd_idx; | 938 | disk_index = sh->pd_idx; |
936 | sync_page_io(log->rdev, *log_offset, PAGE_SIZE, | 939 | sync_page_io(log->rdev, *log_offset, PAGE_SIZE, |
937 | sh->dev[disk_index].page, READ, false); | 940 | sh->dev[disk_index].page, REQ_OP_READ, 0, |
941 | false); | ||
938 | sh->dev[disk_index].log_checksum = | 942 | sh->dev[disk_index].log_checksum = |
939 | le32_to_cpu(payload->checksum[0]); | 943 | le32_to_cpu(payload->checksum[0]); |
940 | set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); | 944 | set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); |
@@ -944,7 +948,7 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, | |||
944 | sync_page_io(log->rdev, | 948 | sync_page_io(log->rdev, |
945 | r5l_ring_add(log, *log_offset, BLOCK_SECTORS), | 949 | r5l_ring_add(log, *log_offset, BLOCK_SECTORS), |
946 | PAGE_SIZE, sh->dev[disk_index].page, | 950 | PAGE_SIZE, sh->dev[disk_index].page, |
947 | READ, false); | 951 | REQ_OP_READ, 0, false); |
948 | sh->dev[disk_index].log_checksum = | 952 | sh->dev[disk_index].log_checksum = |
949 | le32_to_cpu(payload->checksum[1]); | 953 | le32_to_cpu(payload->checksum[1]); |
950 | set_bit(R5_Wantwrite, | 954 | set_bit(R5_Wantwrite, |
@@ -986,11 +990,13 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log, | |||
986 | rdev = rcu_dereference(conf->disks[disk_index].rdev); | 990 | rdev = rcu_dereference(conf->disks[disk_index].rdev); |
987 | if (rdev) | 991 | if (rdev) |
988 | sync_page_io(rdev, stripe_sect, PAGE_SIZE, | 992 | sync_page_io(rdev, stripe_sect, PAGE_SIZE, |
989 | sh->dev[disk_index].page, WRITE, false); | 993 | sh->dev[disk_index].page, REQ_OP_WRITE, 0, |
994 | false); | ||
990 | rrdev = rcu_dereference(conf->disks[disk_index].replacement); | 995 | rrdev = rcu_dereference(conf->disks[disk_index].replacement); |
991 | if (rrdev) | 996 | if (rrdev) |
992 | sync_page_io(rrdev, stripe_sect, PAGE_SIZE, | 997 | sync_page_io(rrdev, stripe_sect, PAGE_SIZE, |
993 | sh->dev[disk_index].page, WRITE, false); | 998 | sh->dev[disk_index].page, REQ_OP_WRITE, 0, |
999 | false); | ||
994 | } | 1000 | } |
995 | raid5_release_stripe(sh); | 1001 | raid5_release_stripe(sh); |
996 | return 0; | 1002 | return 0; |
@@ -1062,7 +1068,8 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, | |||
1062 | crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); | 1068 | crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); |
1063 | mb->checksum = cpu_to_le32(crc); | 1069 | mb->checksum = cpu_to_le32(crc); |
1064 | 1070 | ||
1065 | if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) { | 1071 | if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, |
1072 | WRITE_FUA, false)) { | ||
1066 | __free_page(page); | 1073 | __free_page(page); |
1067 | return -EIO; | 1074 | return -EIO; |
1068 | } | 1075 | } |
@@ -1137,7 +1144,7 @@ static int r5l_load_log(struct r5l_log *log) | |||
1137 | if (!page) | 1144 | if (!page) |
1138 | return -ENOMEM; | 1145 | return -ENOMEM; |
1139 | 1146 | ||
1140 | if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) { | 1147 | if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) { |
1141 | ret = -EIO; | 1148 | ret = -EIO; |
1142 | goto ioerr; | 1149 | goto ioerr; |
1143 | } | 1150 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8959e6dd31dd..7aacf5b55e15 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -806,7 +806,8 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh | |||
806 | dd_idx = 0; | 806 | dd_idx = 0; |
807 | while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) | 807 | while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) |
808 | dd_idx++; | 808 | dd_idx++; |
809 | if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw) | 809 | if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw || |
810 | bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) | ||
810 | goto unlock_out; | 811 | goto unlock_out; |
811 | 812 | ||
812 | if (head->batch_head) { | 813 | if (head->batch_head) { |
@@ -891,29 +892,28 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
891 | if (r5l_write_stripe(conf->log, sh) == 0) | 892 | if (r5l_write_stripe(conf->log, sh) == 0) |
892 | return; | 893 | return; |
893 | for (i = disks; i--; ) { | 894 | for (i = disks; i--; ) { |
894 | int rw; | 895 | int op, op_flags = 0; |
895 | int replace_only = 0; | 896 | int replace_only = 0; |
896 | struct bio *bi, *rbi; | 897 | struct bio *bi, *rbi; |
897 | struct md_rdev *rdev, *rrdev = NULL; | 898 | struct md_rdev *rdev, *rrdev = NULL; |
898 | 899 | ||
899 | sh = head_sh; | 900 | sh = head_sh; |
900 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { | 901 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { |
902 | op = REQ_OP_WRITE; | ||
901 | if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) | 903 | if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) |
902 | rw = WRITE_FUA; | 904 | op_flags = WRITE_FUA; |
903 | else | ||
904 | rw = WRITE; | ||
905 | if (test_bit(R5_Discard, &sh->dev[i].flags)) | 905 | if (test_bit(R5_Discard, &sh->dev[i].flags)) |
906 | rw |= REQ_DISCARD; | 906 | op = REQ_OP_DISCARD; |
907 | } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) | 907 | } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) |
908 | rw = READ; | 908 | op = REQ_OP_READ; |
909 | else if (test_and_clear_bit(R5_WantReplace, | 909 | else if (test_and_clear_bit(R5_WantReplace, |
910 | &sh->dev[i].flags)) { | 910 | &sh->dev[i].flags)) { |
911 | rw = WRITE; | 911 | op = REQ_OP_WRITE; |
912 | replace_only = 1; | 912 | replace_only = 1; |
913 | } else | 913 | } else |
914 | continue; | 914 | continue; |
915 | if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) | 915 | if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) |
916 | rw |= REQ_SYNC; | 916 | op_flags |= REQ_SYNC; |
917 | 917 | ||
918 | again: | 918 | again: |
919 | bi = &sh->dev[i].req; | 919 | bi = &sh->dev[i].req; |
@@ -927,7 +927,7 @@ again: | |||
927 | rdev = rrdev; | 927 | rdev = rrdev; |
928 | rrdev = NULL; | 928 | rrdev = NULL; |
929 | } | 929 | } |
930 | if (rw & WRITE) { | 930 | if (op_is_write(op)) { |
931 | if (replace_only) | 931 | if (replace_only) |
932 | rdev = NULL; | 932 | rdev = NULL; |
933 | if (rdev == rrdev) | 933 | if (rdev == rrdev) |
@@ -953,7 +953,7 @@ again: | |||
953 | * need to check for writes. We never accept write errors | 953 | * need to check for writes. We never accept write errors |
954 | * on the replacement, so we don't to check rrdev. | 954 | * on the replacement, so we don't to check rrdev. |
955 | */ | 955 | */ |
956 | while ((rw & WRITE) && rdev && | 956 | while (op_is_write(op) && rdev && |
957 | test_bit(WriteErrorSeen, &rdev->flags)) { | 957 | test_bit(WriteErrorSeen, &rdev->flags)) { |
958 | sector_t first_bad; | 958 | sector_t first_bad; |
959 | int bad_sectors; | 959 | int bad_sectors; |
@@ -995,13 +995,13 @@ again: | |||
995 | 995 | ||
996 | bio_reset(bi); | 996 | bio_reset(bi); |
997 | bi->bi_bdev = rdev->bdev; | 997 | bi->bi_bdev = rdev->bdev; |
998 | bi->bi_rw = rw; | 998 | bio_set_op_attrs(bi, op, op_flags); |
999 | bi->bi_end_io = (rw & WRITE) | 999 | bi->bi_end_io = op_is_write(op) |
1000 | ? raid5_end_write_request | 1000 | ? raid5_end_write_request |
1001 | : raid5_end_read_request; | 1001 | : raid5_end_read_request; |
1002 | bi->bi_private = sh; | 1002 | bi->bi_private = sh; |
1003 | 1003 | ||
1004 | pr_debug("%s: for %llu schedule op %ld on disc %d\n", | 1004 | pr_debug("%s: for %llu schedule op %d on disc %d\n", |
1005 | __func__, (unsigned long long)sh->sector, | 1005 | __func__, (unsigned long long)sh->sector, |
1006 | bi->bi_rw, i); | 1006 | bi->bi_rw, i); |
1007 | atomic_inc(&sh->count); | 1007 | atomic_inc(&sh->count); |
@@ -1027,7 +1027,7 @@ again: | |||
1027 | * If this is discard request, set bi_vcnt 0. We don't | 1027 | * If this is discard request, set bi_vcnt 0. We don't |
1028 | * want to confuse SCSI because SCSI will replace payload | 1028 | * want to confuse SCSI because SCSI will replace payload |
1029 | */ | 1029 | */ |
1030 | if (rw & REQ_DISCARD) | 1030 | if (op == REQ_OP_DISCARD) |
1031 | bi->bi_vcnt = 0; | 1031 | bi->bi_vcnt = 0; |
1032 | if (rrdev) | 1032 | if (rrdev) |
1033 | set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); | 1033 | set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); |
@@ -1047,12 +1047,12 @@ again: | |||
1047 | 1047 | ||
1048 | bio_reset(rbi); | 1048 | bio_reset(rbi); |
1049 | rbi->bi_bdev = rrdev->bdev; | 1049 | rbi->bi_bdev = rrdev->bdev; |
1050 | rbi->bi_rw = rw; | 1050 | bio_set_op_attrs(rbi, op, op_flags); |
1051 | BUG_ON(!(rw & WRITE)); | 1051 | BUG_ON(!op_is_write(op)); |
1052 | rbi->bi_end_io = raid5_end_write_request; | 1052 | rbi->bi_end_io = raid5_end_write_request; |
1053 | rbi->bi_private = sh; | 1053 | rbi->bi_private = sh; |
1054 | 1054 | ||
1055 | pr_debug("%s: for %llu schedule op %ld on " | 1055 | pr_debug("%s: for %llu schedule op %d on " |
1056 | "replacement disc %d\n", | 1056 | "replacement disc %d\n", |
1057 | __func__, (unsigned long long)sh->sector, | 1057 | __func__, (unsigned long long)sh->sector, |
1058 | rbi->bi_rw, i); | 1058 | rbi->bi_rw, i); |
@@ -1076,7 +1076,7 @@ again: | |||
1076 | * If this is discard request, set bi_vcnt 0. We don't | 1076 | * If this is discard request, set bi_vcnt 0. We don't |
1077 | * want to confuse SCSI because SCSI will replace payload | 1077 | * want to confuse SCSI because SCSI will replace payload |
1078 | */ | 1078 | */ |
1079 | if (rw & REQ_DISCARD) | 1079 | if (op == REQ_OP_DISCARD) |
1080 | rbi->bi_vcnt = 0; | 1080 | rbi->bi_vcnt = 0; |
1081 | if (conf->mddev->gendisk) | 1081 | if (conf->mddev->gendisk) |
1082 | trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), | 1082 | trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), |
@@ -1085,9 +1085,9 @@ again: | |||
1085 | generic_make_request(rbi); | 1085 | generic_make_request(rbi); |
1086 | } | 1086 | } |
1087 | if (!rdev && !rrdev) { | 1087 | if (!rdev && !rrdev) { |
1088 | if (rw & WRITE) | 1088 | if (op_is_write(op)) |
1089 | set_bit(STRIPE_DEGRADED, &sh->state); | 1089 | set_bit(STRIPE_DEGRADED, &sh->state); |
1090 | pr_debug("skip op %ld on disc %d for sector %llu\n", | 1090 | pr_debug("skip op %d on disc %d for sector %llu\n", |
1091 | bi->bi_rw, i, (unsigned long long)sh->sector); | 1091 | bi->bi_rw, i, (unsigned long long)sh->sector); |
1092 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | 1092 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
1093 | set_bit(STRIPE_HANDLE, &sh->state); | 1093 | set_bit(STRIPE_HANDLE, &sh->state); |
@@ -1623,7 +1623,7 @@ again: | |||
1623 | set_bit(R5_WantFUA, &dev->flags); | 1623 | set_bit(R5_WantFUA, &dev->flags); |
1624 | if (wbi->bi_rw & REQ_SYNC) | 1624 | if (wbi->bi_rw & REQ_SYNC) |
1625 | set_bit(R5_SyncIO, &dev->flags); | 1625 | set_bit(R5_SyncIO, &dev->flags); |
1626 | if (wbi->bi_rw & REQ_DISCARD) | 1626 | if (bio_op(wbi) == REQ_OP_DISCARD) |
1627 | set_bit(R5_Discard, &dev->flags); | 1627 | set_bit(R5_Discard, &dev->flags); |
1628 | else { | 1628 | else { |
1629 | tx = async_copy_data(1, wbi, &dev->page, | 1629 | tx = async_copy_data(1, wbi, &dev->page, |
@@ -5150,7 +5150,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) | |||
5150 | DEFINE_WAIT(w); | 5150 | DEFINE_WAIT(w); |
5151 | bool do_prepare; | 5151 | bool do_prepare; |
5152 | 5152 | ||
5153 | if (unlikely(bi->bi_rw & REQ_FLUSH)) { | 5153 | if (unlikely(bi->bi_rw & REQ_PREFLUSH)) { |
5154 | int ret = r5l_handle_flush_request(conf->log, bi); | 5154 | int ret = r5l_handle_flush_request(conf->log, bi); |
5155 | 5155 | ||
5156 | if (ret == 0) | 5156 | if (ret == 0) |
@@ -5176,7 +5176,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) | |||
5176 | return; | 5176 | return; |
5177 | } | 5177 | } |
5178 | 5178 | ||
5179 | if (unlikely(bi->bi_rw & REQ_DISCARD)) { | 5179 | if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) { |
5180 | make_discard_request(mddev, bi); | 5180 | make_discard_request(mddev, bi); |
5181 | return; | 5181 | return; |
5182 | } | 5182 | } |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index c5472e3c9231..11ee4145983b 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -1724,8 +1724,8 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) | |||
1724 | !IS_ALIGNED(blk_rq_sectors(next), 8)) | 1724 | !IS_ALIGNED(blk_rq_sectors(next), 8)) |
1725 | break; | 1725 | break; |
1726 | 1726 | ||
1727 | if (next->cmd_flags & REQ_DISCARD || | 1727 | if (req_op(next) == REQ_OP_DISCARD || |
1728 | next->cmd_flags & REQ_FLUSH) | 1728 | req_op(next) == REQ_OP_FLUSH) |
1729 | break; | 1729 | break; |
1730 | 1730 | ||
1731 | if (rq_data_dir(cur) != rq_data_dir(next)) | 1731 | if (rq_data_dir(cur) != rq_data_dir(next)) |
@@ -2150,7 +2150,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
2150 | struct mmc_card *card = md->queue.card; | 2150 | struct mmc_card *card = md->queue.card; |
2151 | struct mmc_host *host = card->host; | 2151 | struct mmc_host *host = card->host; |
2152 | unsigned long flags; | 2152 | unsigned long flags; |
2153 | unsigned int cmd_flags = req ? req->cmd_flags : 0; | ||
2154 | 2153 | ||
2155 | if (req && !mq->mqrq_prev->req) | 2154 | if (req && !mq->mqrq_prev->req) |
2156 | /* claim host only for the first request */ | 2155 | /* claim host only for the first request */ |
@@ -2166,7 +2165,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
2166 | } | 2165 | } |
2167 | 2166 | ||
2168 | mq->flags &= ~MMC_QUEUE_NEW_REQUEST; | 2167 | mq->flags &= ~MMC_QUEUE_NEW_REQUEST; |
2169 | if (cmd_flags & REQ_DISCARD) { | 2168 | if (req && req_op(req) == REQ_OP_DISCARD) { |
2170 | /* complete ongoing async transfer before issuing discard */ | 2169 | /* complete ongoing async transfer before issuing discard */ |
2171 | if (card->host->areq) | 2170 | if (card->host->areq) |
2172 | mmc_blk_issue_rw_rq(mq, NULL); | 2171 | mmc_blk_issue_rw_rq(mq, NULL); |
@@ -2174,7 +2173,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
2174 | ret = mmc_blk_issue_secdiscard_rq(mq, req); | 2173 | ret = mmc_blk_issue_secdiscard_rq(mq, req); |
2175 | else | 2174 | else |
2176 | ret = mmc_blk_issue_discard_rq(mq, req); | 2175 | ret = mmc_blk_issue_discard_rq(mq, req); |
2177 | } else if (cmd_flags & REQ_FLUSH) { | 2176 | } else if (req && req_op(req) == REQ_OP_FLUSH) { |
2178 | /* complete ongoing async transfer before issuing flush */ | 2177 | /* complete ongoing async transfer before issuing flush */ |
2179 | if (card->host->areq) | 2178 | if (card->host->areq) |
2180 | mmc_blk_issue_rw_rq(mq, NULL); | 2179 | mmc_blk_issue_rw_rq(mq, NULL); |
@@ -2190,7 +2189,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
2190 | 2189 | ||
2191 | out: | 2190 | out: |
2192 | if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || | 2191 | if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || |
2193 | (cmd_flags & MMC_REQ_SPECIAL_MASK)) | 2192 | mmc_req_is_special(req)) |
2194 | /* | 2193 | /* |
2195 | * Release host when there are no more requests | 2194 | * Release host when there are no more requests |
2196 | * and after special request(discard, flush) is done. | 2195 | * and after special request(discard, flush) is done. |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 6f4323c6d653..c2d5f6f35145 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -33,7 +33,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) | |||
33 | /* | 33 | /* |
34 | * We only like normal block requests and discards. | 34 | * We only like normal block requests and discards. |
35 | */ | 35 | */ |
36 | if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { | 36 | if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) { |
37 | blk_dump_rq_flags(req, "MMC bad request"); | 37 | blk_dump_rq_flags(req, "MMC bad request"); |
38 | return BLKPREP_KILL; | 38 | return BLKPREP_KILL; |
39 | } | 39 | } |
@@ -56,7 +56,6 @@ static int mmc_queue_thread(void *d) | |||
56 | down(&mq->thread_sem); | 56 | down(&mq->thread_sem); |
57 | do { | 57 | do { |
58 | struct request *req = NULL; | 58 | struct request *req = NULL; |
59 | unsigned int cmd_flags = 0; | ||
60 | 59 | ||
61 | spin_lock_irq(q->queue_lock); | 60 | spin_lock_irq(q->queue_lock); |
62 | set_current_state(TASK_INTERRUPTIBLE); | 61 | set_current_state(TASK_INTERRUPTIBLE); |
@@ -66,7 +65,6 @@ static int mmc_queue_thread(void *d) | |||
66 | 65 | ||
67 | if (req || mq->mqrq_prev->req) { | 66 | if (req || mq->mqrq_prev->req) { |
68 | set_current_state(TASK_RUNNING); | 67 | set_current_state(TASK_RUNNING); |
69 | cmd_flags = req ? req->cmd_flags : 0; | ||
70 | mq->issue_fn(mq, req); | 68 | mq->issue_fn(mq, req); |
71 | cond_resched(); | 69 | cond_resched(); |
72 | if (mq->flags & MMC_QUEUE_NEW_REQUEST) { | 70 | if (mq->flags & MMC_QUEUE_NEW_REQUEST) { |
@@ -81,7 +79,7 @@ static int mmc_queue_thread(void *d) | |||
81 | * has been finished. Do not assign it to previous | 79 | * has been finished. Do not assign it to previous |
82 | * request. | 80 | * request. |
83 | */ | 81 | */ |
84 | if (cmd_flags & MMC_REQ_SPECIAL_MASK) | 82 | if (mmc_req_is_special(req)) |
85 | mq->mqrq_cur->req = NULL; | 83 | mq->mqrq_cur->req = NULL; |
86 | 84 | ||
87 | mq->mqrq_prev->brq.mrq.data = NULL; | 85 | mq->mqrq_prev->brq.mrq.data = NULL; |
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 36cddab57d77..d62531124d54 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h | |||
@@ -1,7 +1,11 @@ | |||
1 | #ifndef MMC_QUEUE_H | 1 | #ifndef MMC_QUEUE_H |
2 | #define MMC_QUEUE_H | 2 | #define MMC_QUEUE_H |
3 | 3 | ||
4 | #define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH) | 4 | static inline bool mmc_req_is_special(struct request *req) |
5 | { | ||
6 | return req && | ||
7 | (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD); | ||
8 | } | ||
5 | 9 | ||
6 | struct request; | 10 | struct request; |
7 | struct task_struct; | 11 | struct task_struct; |
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 74ae24364a8d..78b3eb45faf6 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -87,14 +87,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
87 | if (req->cmd_type != REQ_TYPE_FS) | 87 | if (req->cmd_type != REQ_TYPE_FS) |
88 | return -EIO; | 88 | return -EIO; |
89 | 89 | ||
90 | if (req->cmd_flags & REQ_FLUSH) | 90 | if (req_op(req) == REQ_OP_FLUSH) |
91 | return tr->flush(dev); | 91 | return tr->flush(dev); |
92 | 92 | ||
93 | if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > | 93 | if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > |
94 | get_capacity(req->rq_disk)) | 94 | get_capacity(req->rq_disk)) |
95 | return -EIO; | 95 | return -EIO; |
96 | 96 | ||
97 | if (req->cmd_flags & REQ_DISCARD) | 97 | if (req_op(req) == REQ_OP_DISCARD) |
98 | return tr->discard(dev, block, nsect); | 98 | return tr->discard(dev, block, nsect); |
99 | 99 | ||
100 | if (rq_data_dir(req) == READ) { | 100 | if (rq_data_dir(req) == READ) { |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 608fc4464574..53b701b2f73e 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -283,6 +283,7 @@ static int pmem_attach_disk(struct device *dev, | |||
283 | blk_queue_max_hw_sectors(q, UINT_MAX); | 283 | blk_queue_max_hw_sectors(q, UINT_MAX); |
284 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); | 284 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
285 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | 285 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); |
286 | queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); | ||
286 | q->queuedata = pmem; | 287 | q->queuedata = pmem; |
287 | 288 | ||
288 | disk = alloc_disk_node(0, nid); | 289 | disk = alloc_disk_node(0, nid); |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index d5fb55c0a9d9..1c5a032d490d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -290,9 +290,9 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, | |||
290 | 290 | ||
291 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) | 291 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) |
292 | memcpy(cmd, req->cmd, sizeof(*cmd)); | 292 | memcpy(cmd, req->cmd, sizeof(*cmd)); |
293 | else if (req->cmd_flags & REQ_FLUSH) | 293 | else if (req_op(req) == REQ_OP_FLUSH) |
294 | nvme_setup_flush(ns, cmd); | 294 | nvme_setup_flush(ns, cmd); |
295 | else if (req->cmd_flags & REQ_DISCARD) | 295 | else if (req_op(req) == REQ_OP_DISCARD) |
296 | ret = nvme_setup_discard(ns, req, cmd); | 296 | ret = nvme_setup_discard(ns, req, cmd); |
297 | else | 297 | else |
298 | nvme_setup_rw(ns, req, cmd); | 298 | nvme_setup_rw(ns, req, cmd); |
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 1daa0482de0e..4d196d2d57da 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -177,7 +177,7 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) | |||
177 | 177 | ||
178 | static inline unsigned nvme_map_len(struct request *rq) | 178 | static inline unsigned nvme_map_len(struct request *rq) |
179 | { | 179 | { |
180 | if (rq->cmd_flags & REQ_DISCARD) | 180 | if (req_op(rq) == REQ_OP_DISCARD) |
181 | return sizeof(struct nvme_dsm_range); | 181 | return sizeof(struct nvme_dsm_range); |
182 | else | 182 | else |
183 | return blk_rq_bytes(rq); | 183 | return blk_rq_bytes(rq); |
@@ -185,7 +185,7 @@ static inline unsigned nvme_map_len(struct request *rq) | |||
185 | 185 | ||
186 | static inline void nvme_cleanup_cmd(struct request *req) | 186 | static inline void nvme_cleanup_cmd(struct request *req) |
187 | { | 187 | { |
188 | if (req->cmd_flags & REQ_DISCARD) | 188 | if (req_op(req) == REQ_OP_DISCARD) |
189 | kfree(req->completion_data); | 189 | kfree(req->completion_data); |
190 | } | 190 | } |
191 | 191 | ||
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index bed53c46dd90..093e9e18e7e7 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -618,6 +618,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
618 | dev_info->gd->driverfs_dev = &dev_info->dev; | 618 | dev_info->gd->driverfs_dev = &dev_info->dev; |
619 | blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); | 619 | blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); |
620 | blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); | 620 | blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); |
621 | queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); | ||
621 | 622 | ||
622 | seg_byte_size = (dev_info->end - dev_info->start + 1); | 623 | seg_byte_size = (dev_info->end - dev_info->start + 1); |
623 | set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors | 624 | set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors |
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 3b11aad03752..daa4dc17f172 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c | |||
@@ -726,7 +726,7 @@ static int _osd_req_list_objects(struct osd_request *or, | |||
726 | return PTR_ERR(bio); | 726 | return PTR_ERR(bio); |
727 | } | 727 | } |
728 | 728 | ||
729 | bio->bi_rw &= ~REQ_WRITE; | 729 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
730 | or->in.bio = bio; | 730 | or->in.bio = bio; |
731 | or->in.total_bytes = bio->bi_iter.bi_size; | 731 | or->in.total_bytes = bio->bi_iter.bi_size; |
732 | return 0; | 732 | return 0; |
@@ -824,7 +824,7 @@ void osd_req_write(struct osd_request *or, | |||
824 | { | 824 | { |
825 | _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); | 825 | _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); |
826 | WARN_ON(or->out.bio || or->out.total_bytes); | 826 | WARN_ON(or->out.bio || or->out.total_bytes); |
827 | WARN_ON(0 == (bio->bi_rw & REQ_WRITE)); | 827 | WARN_ON(!op_is_write(bio_op(bio))); |
828 | or->out.bio = bio; | 828 | or->out.bio = bio; |
829 | or->out.total_bytes = len; | 829 | or->out.total_bytes = len; |
830 | } | 830 | } |
@@ -839,7 +839,7 @@ int osd_req_write_kern(struct osd_request *or, | |||
839 | if (IS_ERR(bio)) | 839 | if (IS_ERR(bio)) |
840 | return PTR_ERR(bio); | 840 | return PTR_ERR(bio); |
841 | 841 | ||
842 | bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */ | 842 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
843 | osd_req_write(or, obj, offset, bio, len); | 843 | osd_req_write(or, obj, offset, bio, len); |
844 | return 0; | 844 | return 0; |
845 | } | 845 | } |
@@ -875,7 +875,7 @@ void osd_req_read(struct osd_request *or, | |||
875 | { | 875 | { |
876 | _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); | 876 | _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); |
877 | WARN_ON(or->in.bio || or->in.total_bytes); | 877 | WARN_ON(or->in.bio || or->in.total_bytes); |
878 | WARN_ON(bio->bi_rw & REQ_WRITE); | 878 | WARN_ON(op_is_write(bio_op(bio))); |
879 | or->in.bio = bio; | 879 | or->in.bio = bio; |
880 | or->in.total_bytes = len; | 880 | or->in.total_bytes = len; |
881 | } | 881 | } |
@@ -956,7 +956,7 @@ static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key) | |||
956 | if (IS_ERR(bio)) | 956 | if (IS_ERR(bio)) |
957 | return PTR_ERR(bio); | 957 | return PTR_ERR(bio); |
958 | 958 | ||
959 | bio->bi_rw |= REQ_WRITE; | 959 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
960 | 960 | ||
961 | /* integrity check the continuation before the bio is linked | 961 | /* integrity check the continuation before the bio is linked |
962 | * with the other data segments since the continuation | 962 | * with the other data segments since the continuation |
@@ -1077,7 +1077,7 @@ int osd_req_write_sg_kern(struct osd_request *or, | |||
1077 | if (IS_ERR(bio)) | 1077 | if (IS_ERR(bio)) |
1078 | return PTR_ERR(bio); | 1078 | return PTR_ERR(bio); |
1079 | 1079 | ||
1080 | bio->bi_rw |= REQ_WRITE; | 1080 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
1081 | osd_req_write_sg(or, obj, bio, sglist, numentries); | 1081 | osd_req_write_sg(or, obj, bio, sglist, numentries); |
1082 | 1082 | ||
1083 | return 0; | 1083 | return 0; |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 60bff78e9ead..0609d6802d93 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -1012,7 +1012,8 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) | |||
1012 | } else if (rq_data_dir(rq) == READ) { | 1012 | } else if (rq_data_dir(rq) == READ) { |
1013 | SCpnt->cmnd[0] = READ_6; | 1013 | SCpnt->cmnd[0] = READ_6; |
1014 | } else { | 1014 | } else { |
1015 | scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags); | 1015 | scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n", |
1016 | req_op(rq), (unsigned long long) rq->cmd_flags); | ||
1016 | goto out; | 1017 | goto out; |
1017 | } | 1018 | } |
1018 | 1019 | ||
@@ -1137,21 +1138,26 @@ static int sd_init_command(struct scsi_cmnd *cmd) | |||
1137 | { | 1138 | { |
1138 | struct request *rq = cmd->request; | 1139 | struct request *rq = cmd->request; |
1139 | 1140 | ||
1140 | if (rq->cmd_flags & REQ_DISCARD) | 1141 | switch (req_op(rq)) { |
1142 | case REQ_OP_DISCARD: | ||
1141 | return sd_setup_discard_cmnd(cmd); | 1143 | return sd_setup_discard_cmnd(cmd); |
1142 | else if (rq->cmd_flags & REQ_WRITE_SAME) | 1144 | case REQ_OP_WRITE_SAME: |
1143 | return sd_setup_write_same_cmnd(cmd); | 1145 | return sd_setup_write_same_cmnd(cmd); |
1144 | else if (rq->cmd_flags & REQ_FLUSH) | 1146 | case REQ_OP_FLUSH: |
1145 | return sd_setup_flush_cmnd(cmd); | 1147 | return sd_setup_flush_cmnd(cmd); |
1146 | else | 1148 | case REQ_OP_READ: |
1149 | case REQ_OP_WRITE: | ||
1147 | return sd_setup_read_write_cmnd(cmd); | 1150 | return sd_setup_read_write_cmnd(cmd); |
1151 | default: | ||
1152 | BUG(); | ||
1153 | } | ||
1148 | } | 1154 | } |
1149 | 1155 | ||
1150 | static void sd_uninit_command(struct scsi_cmnd *SCpnt) | 1156 | static void sd_uninit_command(struct scsi_cmnd *SCpnt) |
1151 | { | 1157 | { |
1152 | struct request *rq = SCpnt->request; | 1158 | struct request *rq = SCpnt->request; |
1153 | 1159 | ||
1154 | if (rq->cmd_flags & REQ_DISCARD) | 1160 | if (req_op(rq) == REQ_OP_DISCARD) |
1155 | __free_page(rq->completion_data); | 1161 | __free_page(rq->completion_data); |
1156 | 1162 | ||
1157 | if (SCpnt->cmnd != rq->cmd) { | 1163 | if (SCpnt->cmnd != rq->cmd) { |
@@ -1774,7 +1780,7 @@ static int sd_done(struct scsi_cmnd *SCpnt) | |||
1774 | unsigned char op = SCpnt->cmnd[0]; | 1780 | unsigned char op = SCpnt->cmnd[0]; |
1775 | unsigned char unmap = SCpnt->cmnd[1] & 8; | 1781 | unsigned char unmap = SCpnt->cmnd[1] & 8; |
1776 | 1782 | ||
1777 | if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) { | 1783 | if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) { |
1778 | if (!result) { | 1784 | if (!result) { |
1779 | good_bytes = blk_rq_bytes(req); | 1785 | good_bytes = blk_rq_bytes(req); |
1780 | scsi_set_resid(SCpnt, 0); | 1786 | scsi_set_resid(SCpnt, 0); |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7c4efb4417b0..22af12f8b8eb 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -312,7 +312,8 @@ static void iblock_bio_done(struct bio *bio) | |||
312 | } | 312 | } |
313 | 313 | ||
314 | static struct bio * | 314 | static struct bio * |
315 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) | 315 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op, |
316 | int op_flags) | ||
316 | { | 317 | { |
317 | struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); | 318 | struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); |
318 | struct bio *bio; | 319 | struct bio *bio; |
@@ -334,18 +335,19 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) | |||
334 | bio->bi_private = cmd; | 335 | bio->bi_private = cmd; |
335 | bio->bi_end_io = &iblock_bio_done; | 336 | bio->bi_end_io = &iblock_bio_done; |
336 | bio->bi_iter.bi_sector = lba; | 337 | bio->bi_iter.bi_sector = lba; |
338 | bio_set_op_attrs(bio, op, op_flags); | ||
337 | 339 | ||
338 | return bio; | 340 | return bio; |
339 | } | 341 | } |
340 | 342 | ||
341 | static void iblock_submit_bios(struct bio_list *list, int rw) | 343 | static void iblock_submit_bios(struct bio_list *list) |
342 | { | 344 | { |
343 | struct blk_plug plug; | 345 | struct blk_plug plug; |
344 | struct bio *bio; | 346 | struct bio *bio; |
345 | 347 | ||
346 | blk_start_plug(&plug); | 348 | blk_start_plug(&plug); |
347 | while ((bio = bio_list_pop(list))) | 349 | while ((bio = bio_list_pop(list))) |
348 | submit_bio(rw, bio); | 350 | submit_bio(bio); |
349 | blk_finish_plug(&plug); | 351 | blk_finish_plug(&plug); |
350 | } | 352 | } |
351 | 353 | ||
@@ -387,9 +389,10 @@ iblock_execute_sync_cache(struct se_cmd *cmd) | |||
387 | bio = bio_alloc(GFP_KERNEL, 0); | 389 | bio = bio_alloc(GFP_KERNEL, 0); |
388 | bio->bi_end_io = iblock_end_io_flush; | 390 | bio->bi_end_io = iblock_end_io_flush; |
389 | bio->bi_bdev = ib_dev->ibd_bd; | 391 | bio->bi_bdev = ib_dev->ibd_bd; |
392 | bio->bi_rw = WRITE_FLUSH; | ||
390 | if (!immed) | 393 | if (!immed) |
391 | bio->bi_private = cmd; | 394 | bio->bi_private = cmd; |
392 | submit_bio(WRITE_FLUSH, bio); | 395 | submit_bio(bio); |
393 | return 0; | 396 | return 0; |
394 | } | 397 | } |
395 | 398 | ||
@@ -478,7 +481,7 @@ iblock_execute_write_same(struct se_cmd *cmd) | |||
478 | goto fail; | 481 | goto fail; |
479 | cmd->priv = ibr; | 482 | cmd->priv = ibr; |
480 | 483 | ||
481 | bio = iblock_get_bio(cmd, block_lba, 1); | 484 | bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0); |
482 | if (!bio) | 485 | if (!bio) |
483 | goto fail_free_ibr; | 486 | goto fail_free_ibr; |
484 | 487 | ||
@@ -491,7 +494,8 @@ iblock_execute_write_same(struct se_cmd *cmd) | |||
491 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) | 494 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) |
492 | != sg->length) { | 495 | != sg->length) { |
493 | 496 | ||
494 | bio = iblock_get_bio(cmd, block_lba, 1); | 497 | bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, |
498 | 0); | ||
495 | if (!bio) | 499 | if (!bio) |
496 | goto fail_put_bios; | 500 | goto fail_put_bios; |
497 | 501 | ||
@@ -504,7 +508,7 @@ iblock_execute_write_same(struct se_cmd *cmd) | |||
504 | sectors -= 1; | 508 | sectors -= 1; |
505 | } | 509 | } |
506 | 510 | ||
507 | iblock_submit_bios(&list, WRITE); | 511 | iblock_submit_bios(&list); |
508 | return 0; | 512 | return 0; |
509 | 513 | ||
510 | fail_put_bios: | 514 | fail_put_bios: |
@@ -677,8 +681,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
677 | struct scatterlist *sg; | 681 | struct scatterlist *sg; |
678 | u32 sg_num = sgl_nents; | 682 | u32 sg_num = sgl_nents; |
679 | unsigned bio_cnt; | 683 | unsigned bio_cnt; |
680 | int rw = 0; | 684 | int i, op, op_flags = 0; |
681 | int i; | ||
682 | 685 | ||
683 | if (data_direction == DMA_TO_DEVICE) { | 686 | if (data_direction == DMA_TO_DEVICE) { |
684 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); | 687 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
@@ -687,18 +690,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
687 | * Force writethrough using WRITE_FUA if a volatile write cache | 690 | * Force writethrough using WRITE_FUA if a volatile write cache |
688 | * is not enabled, or if initiator set the Force Unit Access bit. | 691 | * is not enabled, or if initiator set the Force Unit Access bit. |
689 | */ | 692 | */ |
693 | op = REQ_OP_WRITE; | ||
690 | if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) { | 694 | if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) { |
691 | if (cmd->se_cmd_flags & SCF_FUA) | 695 | if (cmd->se_cmd_flags & SCF_FUA) |
692 | rw = WRITE_FUA; | 696 | op_flags = WRITE_FUA; |
693 | else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) | 697 | else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) |
694 | rw = WRITE_FUA; | 698 | op_flags = WRITE_FUA; |
695 | else | ||
696 | rw = WRITE; | ||
697 | } else { | ||
698 | rw = WRITE; | ||
699 | } | 699 | } |
700 | } else { | 700 | } else { |
701 | rw = READ; | 701 | op = REQ_OP_READ; |
702 | } | 702 | } |
703 | 703 | ||
704 | ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); | 704 | ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); |
@@ -712,7 +712,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
712 | return 0; | 712 | return 0; |
713 | } | 713 | } |
714 | 714 | ||
715 | bio = iblock_get_bio(cmd, block_lba, sgl_nents); | 715 | bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags); |
716 | if (!bio) | 716 | if (!bio) |
717 | goto fail_free_ibr; | 717 | goto fail_free_ibr; |
718 | 718 | ||
@@ -732,11 +732,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
732 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) | 732 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) |
733 | != sg->length) { | 733 | != sg->length) { |
734 | if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { | 734 | if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { |
735 | iblock_submit_bios(&list, rw); | 735 | iblock_submit_bios(&list); |
736 | bio_cnt = 0; | 736 | bio_cnt = 0; |
737 | } | 737 | } |
738 | 738 | ||
739 | bio = iblock_get_bio(cmd, block_lba, sg_num); | 739 | bio = iblock_get_bio(cmd, block_lba, sg_num, op, |
740 | op_flags); | ||
740 | if (!bio) | 741 | if (!bio) |
741 | goto fail_put_bios; | 742 | goto fail_put_bios; |
742 | 743 | ||
@@ -756,7 +757,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
756 | goto fail_put_bios; | 757 | goto fail_put_bios; |
757 | } | 758 | } |
758 | 759 | ||
759 | iblock_submit_bios(&list, rw); | 760 | iblock_submit_bios(&list); |
760 | iblock_complete_cmd(cmd); | 761 | iblock_complete_cmd(cmd); |
761 | return 0; | 762 | return 0; |
762 | 763 | ||
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index de18790eb21c..81564c87f24b 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -922,7 +922,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
922 | goto fail; | 922 | goto fail; |
923 | 923 | ||
924 | if (rw) | 924 | if (rw) |
925 | bio->bi_rw |= REQ_WRITE; | 925 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
926 | 926 | ||
927 | pr_debug("PSCSI: Allocated bio: %p," | 927 | pr_debug("PSCSI: Allocated bio: %p," |
928 | " dir: %s nr_vecs: %d\n", bio, | 928 | " dir: %s nr_vecs: %d\n", bio, |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 71ccab1d22c6..d012be4ab977 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -493,7 +493,7 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax) | |||
493 | 493 | ||
494 | if (size < 0) | 494 | if (size < 0) |
495 | return size; | 495 | return size; |
496 | if (!ops->direct_access) | 496 | if (!blk_queue_dax(bdev_get_queue(bdev)) || !ops->direct_access) |
497 | return -EOPNOTSUPP; | 497 | return -EOPNOTSUPP; |
498 | if ((sector + DIV_ROUND_UP(size, 512)) > | 498 | if ((sector + DIV_ROUND_UP(size, 512)) > |
499 | part_nr_sects_read(bdev->bd_part)) | 499 | part_nr_sects_read(bdev->bd_part)) |
@@ -1287,7 +1287,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1287 | bdev->bd_disk = disk; | 1287 | bdev->bd_disk = disk; |
1288 | bdev->bd_queue = disk->queue; | 1288 | bdev->bd_queue = disk->queue; |
1289 | bdev->bd_contains = bdev; | 1289 | bdev->bd_contains = bdev; |
1290 | if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && disk->fops->direct_access) | 1290 | if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && |
1291 | blk_queue_dax(disk->queue)) | ||
1291 | bdev->bd_inode->i_flags = S_DAX; | 1292 | bdev->bd_inode->i_flags = S_DAX; |
1292 | else | 1293 | else |
1293 | bdev->bd_inode->i_flags = 0; | 1294 | bdev->bd_inode->i_flags = 0; |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 7706c8dc5fa6..5d5cae05818d 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
@@ -1673,6 +1673,7 @@ static int btrfsic_read_block(struct btrfsic_state *state, | |||
1673 | } | 1673 | } |
1674 | bio->bi_bdev = block_ctx->dev->bdev; | 1674 | bio->bi_bdev = block_ctx->dev->bdev; |
1675 | bio->bi_iter.bi_sector = dev_bytenr >> 9; | 1675 | bio->bi_iter.bi_sector = dev_bytenr >> 9; |
1676 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||
1676 | 1677 | ||
1677 | for (j = i; j < num_pages; j++) { | 1678 | for (j = i; j < num_pages; j++) { |
1678 | ret = bio_add_page(bio, block_ctx->pagev[j], | 1679 | ret = bio_add_page(bio, block_ctx->pagev[j], |
@@ -1685,7 +1686,7 @@ static int btrfsic_read_block(struct btrfsic_state *state, | |||
1685 | "btrfsic: error, failed to add a single page!\n"); | 1686 | "btrfsic: error, failed to add a single page!\n"); |
1686 | return -1; | 1687 | return -1; |
1687 | } | 1688 | } |
1688 | if (submit_bio_wait(READ, bio)) { | 1689 | if (submit_bio_wait(bio)) { |
1689 | printk(KERN_INFO | 1690 | printk(KERN_INFO |
1690 | "btrfsic: read error at logical %llu dev %s!\n", | 1691 | "btrfsic: read error at logical %llu dev %s!\n", |
1691 | block_ctx->start, block_ctx->dev->name); | 1692 | block_ctx->start, block_ctx->dev->name); |
@@ -2206,7 +2207,7 @@ static void btrfsic_bio_end_io(struct bio *bp) | |||
2206 | block->dev_bytenr, block->mirror_num); | 2207 | block->dev_bytenr, block->mirror_num); |
2207 | next_block = block->next_in_same_bio; | 2208 | next_block = block->next_in_same_bio; |
2208 | block->iodone_w_error = iodone_w_error; | 2209 | block->iodone_w_error = iodone_w_error; |
2209 | if (block->submit_bio_bh_rw & REQ_FLUSH) { | 2210 | if (block->submit_bio_bh_rw & REQ_PREFLUSH) { |
2210 | dev_state->last_flush_gen++; | 2211 | dev_state->last_flush_gen++; |
2211 | if ((dev_state->state->print_mask & | 2212 | if ((dev_state->state->print_mask & |
2212 | BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) | 2213 | BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) |
@@ -2242,7 +2243,7 @@ static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate) | |||
2242 | block->dev_bytenr, block->mirror_num); | 2243 | block->dev_bytenr, block->mirror_num); |
2243 | 2244 | ||
2244 | block->iodone_w_error = iodone_w_error; | 2245 | block->iodone_w_error = iodone_w_error; |
2245 | if (block->submit_bio_bh_rw & REQ_FLUSH) { | 2246 | if (block->submit_bio_bh_rw & REQ_PREFLUSH) { |
2246 | dev_state->last_flush_gen++; | 2247 | dev_state->last_flush_gen++; |
2247 | if ((dev_state->state->print_mask & | 2248 | if ((dev_state->state->print_mask & |
2248 | BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) | 2249 | BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) |
@@ -2855,12 +2856,12 @@ static struct btrfsic_dev_state *btrfsic_dev_state_lookup( | |||
2855 | return ds; | 2856 | return ds; |
2856 | } | 2857 | } |
2857 | 2858 | ||
2858 | int btrfsic_submit_bh(int rw, struct buffer_head *bh) | 2859 | int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh) |
2859 | { | 2860 | { |
2860 | struct btrfsic_dev_state *dev_state; | 2861 | struct btrfsic_dev_state *dev_state; |
2861 | 2862 | ||
2862 | if (!btrfsic_is_initialized) | 2863 | if (!btrfsic_is_initialized) |
2863 | return submit_bh(rw, bh); | 2864 | return submit_bh(op, op_flags, bh); |
2864 | 2865 | ||
2865 | mutex_lock(&btrfsic_mutex); | 2866 | mutex_lock(&btrfsic_mutex); |
2866 | /* since btrfsic_submit_bh() might also be called before | 2867 | /* since btrfsic_submit_bh() might also be called before |
@@ -2869,26 +2870,26 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh) | |||
2869 | 2870 | ||
2870 | /* Only called to write the superblock (incl. FLUSH/FUA) */ | 2871 | /* Only called to write the superblock (incl. FLUSH/FUA) */ |
2871 | if (NULL != dev_state && | 2872 | if (NULL != dev_state && |
2872 | (rw & WRITE) && bh->b_size > 0) { | 2873 | (op == REQ_OP_WRITE) && bh->b_size > 0) { |
2873 | u64 dev_bytenr; | 2874 | u64 dev_bytenr; |
2874 | 2875 | ||
2875 | dev_bytenr = 4096 * bh->b_blocknr; | 2876 | dev_bytenr = 4096 * bh->b_blocknr; |
2876 | if (dev_state->state->print_mask & | 2877 | if (dev_state->state->print_mask & |
2877 | BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) | 2878 | BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) |
2878 | printk(KERN_INFO | 2879 | printk(KERN_INFO |
2879 | "submit_bh(rw=0x%x, blocknr=%llu (bytenr %llu)," | 2880 | "submit_bh(op=0x%x,0x%x, blocknr=%llu " |
2880 | " size=%zu, data=%p, bdev=%p)\n", | 2881 | "(bytenr %llu), size=%zu, data=%p, bdev=%p)\n", |
2881 | rw, (unsigned long long)bh->b_blocknr, | 2882 | op, op_flags, (unsigned long long)bh->b_blocknr, |
2882 | dev_bytenr, bh->b_size, bh->b_data, bh->b_bdev); | 2883 | dev_bytenr, bh->b_size, bh->b_data, bh->b_bdev); |
2883 | btrfsic_process_written_block(dev_state, dev_bytenr, | 2884 | btrfsic_process_written_block(dev_state, dev_bytenr, |
2884 | &bh->b_data, 1, NULL, | 2885 | &bh->b_data, 1, NULL, |
2885 | NULL, bh, rw); | 2886 | NULL, bh, op_flags); |
2886 | } else if (NULL != dev_state && (rw & REQ_FLUSH)) { | 2887 | } else if (NULL != dev_state && (op_flags & REQ_PREFLUSH)) { |
2887 | if (dev_state->state->print_mask & | 2888 | if (dev_state->state->print_mask & |
2888 | BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) | 2889 | BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) |
2889 | printk(KERN_INFO | 2890 | printk(KERN_INFO |
2890 | "submit_bh(rw=0x%x FLUSH, bdev=%p)\n", | 2891 | "submit_bh(op=0x%x,0x%x FLUSH, bdev=%p)\n", |
2891 | rw, bh->b_bdev); | 2892 | op, op_flags, bh->b_bdev); |
2892 | if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { | 2893 | if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { |
2893 | if ((dev_state->state->print_mask & | 2894 | if ((dev_state->state->print_mask & |
2894 | (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | | 2895 | (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | |
@@ -2906,7 +2907,7 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh) | |||
2906 | block->never_written = 0; | 2907 | block->never_written = 0; |
2907 | block->iodone_w_error = 0; | 2908 | block->iodone_w_error = 0; |
2908 | block->flush_gen = dev_state->last_flush_gen + 1; | 2909 | block->flush_gen = dev_state->last_flush_gen + 1; |
2909 | block->submit_bio_bh_rw = rw; | 2910 | block->submit_bio_bh_rw = op_flags; |
2910 | block->orig_bio_bh_private = bh->b_private; | 2911 | block->orig_bio_bh_private = bh->b_private; |
2911 | block->orig_bio_bh_end_io.bh = bh->b_end_io; | 2912 | block->orig_bio_bh_end_io.bh = bh->b_end_io; |
2912 | block->next_in_same_bio = NULL; | 2913 | block->next_in_same_bio = NULL; |
@@ -2915,10 +2916,10 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh) | |||
2915 | } | 2916 | } |
2916 | } | 2917 | } |
2917 | mutex_unlock(&btrfsic_mutex); | 2918 | mutex_unlock(&btrfsic_mutex); |
2918 | return submit_bh(rw, bh); | 2919 | return submit_bh(op, op_flags, bh); |
2919 | } | 2920 | } |
2920 | 2921 | ||
2921 | static void __btrfsic_submit_bio(int rw, struct bio *bio) | 2922 | static void __btrfsic_submit_bio(struct bio *bio) |
2922 | { | 2923 | { |
2923 | struct btrfsic_dev_state *dev_state; | 2924 | struct btrfsic_dev_state *dev_state; |
2924 | 2925 | ||
@@ -2930,7 +2931,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio) | |||
2930 | * btrfsic_mount(), this might return NULL */ | 2931 | * btrfsic_mount(), this might return NULL */ |
2931 | dev_state = btrfsic_dev_state_lookup(bio->bi_bdev); | 2932 | dev_state = btrfsic_dev_state_lookup(bio->bi_bdev); |
2932 | if (NULL != dev_state && | 2933 | if (NULL != dev_state && |
2933 | (rw & WRITE) && NULL != bio->bi_io_vec) { | 2934 | (bio_op(bio) == REQ_OP_WRITE) && NULL != bio->bi_io_vec) { |
2934 | unsigned int i; | 2935 | unsigned int i; |
2935 | u64 dev_bytenr; | 2936 | u64 dev_bytenr; |
2936 | u64 cur_bytenr; | 2937 | u64 cur_bytenr; |
@@ -2942,9 +2943,9 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio) | |||
2942 | if (dev_state->state->print_mask & | 2943 | if (dev_state->state->print_mask & |
2943 | BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) | 2944 | BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) |
2944 | printk(KERN_INFO | 2945 | printk(KERN_INFO |
2945 | "submit_bio(rw=0x%x, bi_vcnt=%u," | 2946 | "submit_bio(rw=%d,0x%x, bi_vcnt=%u," |
2946 | " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", | 2947 | " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", |
2947 | rw, bio->bi_vcnt, | 2948 | bio_op(bio), bio->bi_rw, bio->bi_vcnt, |
2948 | (unsigned long long)bio->bi_iter.bi_sector, | 2949 | (unsigned long long)bio->bi_iter.bi_sector, |
2949 | dev_bytenr, bio->bi_bdev); | 2950 | dev_bytenr, bio->bi_bdev); |
2950 | 2951 | ||
@@ -2975,18 +2976,18 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio) | |||
2975 | btrfsic_process_written_block(dev_state, dev_bytenr, | 2976 | btrfsic_process_written_block(dev_state, dev_bytenr, |
2976 | mapped_datav, bio->bi_vcnt, | 2977 | mapped_datav, bio->bi_vcnt, |
2977 | bio, &bio_is_patched, | 2978 | bio, &bio_is_patched, |
2978 | NULL, rw); | 2979 | NULL, bio->bi_rw); |
2979 | while (i > 0) { | 2980 | while (i > 0) { |
2980 | i--; | 2981 | i--; |
2981 | kunmap(bio->bi_io_vec[i].bv_page); | 2982 | kunmap(bio->bi_io_vec[i].bv_page); |
2982 | } | 2983 | } |
2983 | kfree(mapped_datav); | 2984 | kfree(mapped_datav); |
2984 | } else if (NULL != dev_state && (rw & REQ_FLUSH)) { | 2985 | } else if (NULL != dev_state && (bio->bi_rw & REQ_PREFLUSH)) { |
2985 | if (dev_state->state->print_mask & | 2986 | if (dev_state->state->print_mask & |
2986 | BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) | 2987 | BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) |
2987 | printk(KERN_INFO | 2988 | printk(KERN_INFO |
2988 | "submit_bio(rw=0x%x FLUSH, bdev=%p)\n", | 2989 | "submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n", |
2989 | rw, bio->bi_bdev); | 2990 | bio_op(bio), bio->bi_rw, bio->bi_bdev); |
2990 | if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { | 2991 | if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { |
2991 | if ((dev_state->state->print_mask & | 2992 | if ((dev_state->state->print_mask & |
2992 | (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | | 2993 | (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | |
@@ -3004,7 +3005,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio) | |||
3004 | block->never_written = 0; | 3005 | block->never_written = 0; |
3005 | block->iodone_w_error = 0; | 3006 | block->iodone_w_error = 0; |
3006 | block->flush_gen = dev_state->last_flush_gen + 1; | 3007 | block->flush_gen = dev_state->last_flush_gen + 1; |
3007 | block->submit_bio_bh_rw = rw; | 3008 | block->submit_bio_bh_rw = bio->bi_rw; |
3008 | block->orig_bio_bh_private = bio->bi_private; | 3009 | block->orig_bio_bh_private = bio->bi_private; |
3009 | block->orig_bio_bh_end_io.bio = bio->bi_end_io; | 3010 | block->orig_bio_bh_end_io.bio = bio->bi_end_io; |
3010 | block->next_in_same_bio = NULL; | 3011 | block->next_in_same_bio = NULL; |
@@ -3016,16 +3017,16 @@ leave: | |||
3016 | mutex_unlock(&btrfsic_mutex); | 3017 | mutex_unlock(&btrfsic_mutex); |
3017 | } | 3018 | } |
3018 | 3019 | ||
3019 | void btrfsic_submit_bio(int rw, struct bio *bio) | 3020 | void btrfsic_submit_bio(struct bio *bio) |
3020 | { | 3021 | { |
3021 | __btrfsic_submit_bio(rw, bio); | 3022 | __btrfsic_submit_bio(bio); |
3022 | submit_bio(rw, bio); | 3023 | submit_bio(bio); |
3023 | } | 3024 | } |
3024 | 3025 | ||
3025 | int btrfsic_submit_bio_wait(int rw, struct bio *bio) | 3026 | int btrfsic_submit_bio_wait(struct bio *bio) |
3026 | { | 3027 | { |
3027 | __btrfsic_submit_bio(rw, bio); | 3028 | __btrfsic_submit_bio(bio); |
3028 | return submit_bio_wait(rw, bio); | 3029 | return submit_bio_wait(bio); |
3029 | } | 3030 | } |
3030 | 3031 | ||
3031 | int btrfsic_mount(struct btrfs_root *root, | 3032 | int btrfsic_mount(struct btrfs_root *root, |
diff --git a/fs/btrfs/check-integrity.h b/fs/btrfs/check-integrity.h index 13b8566c97ab..f78dff1c7e86 100644 --- a/fs/btrfs/check-integrity.h +++ b/fs/btrfs/check-integrity.h | |||
@@ -20,9 +20,9 @@ | |||
20 | #define __BTRFS_CHECK_INTEGRITY__ | 20 | #define __BTRFS_CHECK_INTEGRITY__ |
21 | 21 | ||
22 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY | 22 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
23 | int btrfsic_submit_bh(int rw, struct buffer_head *bh); | 23 | int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh); |
24 | void btrfsic_submit_bio(int rw, struct bio *bio); | 24 | void btrfsic_submit_bio(struct bio *bio); |
25 | int btrfsic_submit_bio_wait(int rw, struct bio *bio); | 25 | int btrfsic_submit_bio_wait(struct bio *bio); |
26 | #else | 26 | #else |
27 | #define btrfsic_submit_bh submit_bh | 27 | #define btrfsic_submit_bh submit_bh |
28 | #define btrfsic_submit_bio submit_bio | 28 | #define btrfsic_submit_bio submit_bio |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 658c39b70fba..cefedabf0a92 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -363,6 +363,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
363 | kfree(cb); | 363 | kfree(cb); |
364 | return -ENOMEM; | 364 | return -ENOMEM; |
365 | } | 365 | } |
366 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
366 | bio->bi_private = cb; | 367 | bio->bi_private = cb; |
367 | bio->bi_end_io = end_compressed_bio_write; | 368 | bio->bi_end_io = end_compressed_bio_write; |
368 | atomic_inc(&cb->pending_bios); | 369 | atomic_inc(&cb->pending_bios); |
@@ -373,7 +374,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
373 | page = compressed_pages[pg_index]; | 374 | page = compressed_pages[pg_index]; |
374 | page->mapping = inode->i_mapping; | 375 | page->mapping = inode->i_mapping; |
375 | if (bio->bi_iter.bi_size) | 376 | if (bio->bi_iter.bi_size) |
376 | ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, | 377 | ret = io_tree->ops->merge_bio_hook(page, 0, |
377 | PAGE_SIZE, | 378 | PAGE_SIZE, |
378 | bio, 0); | 379 | bio, 0); |
379 | else | 380 | else |
@@ -401,13 +402,14 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
401 | BUG_ON(ret); /* -ENOMEM */ | 402 | BUG_ON(ret); /* -ENOMEM */ |
402 | } | 403 | } |
403 | 404 | ||
404 | ret = btrfs_map_bio(root, WRITE, bio, 0, 1); | 405 | ret = btrfs_map_bio(root, bio, 0, 1); |
405 | BUG_ON(ret); /* -ENOMEM */ | 406 | BUG_ON(ret); /* -ENOMEM */ |
406 | 407 | ||
407 | bio_put(bio); | 408 | bio_put(bio); |
408 | 409 | ||
409 | bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); | 410 | bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); |
410 | BUG_ON(!bio); | 411 | BUG_ON(!bio); |
412 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
411 | bio->bi_private = cb; | 413 | bio->bi_private = cb; |
412 | bio->bi_end_io = end_compressed_bio_write; | 414 | bio->bi_end_io = end_compressed_bio_write; |
413 | bio_add_page(bio, page, PAGE_SIZE, 0); | 415 | bio_add_page(bio, page, PAGE_SIZE, 0); |
@@ -431,7 +433,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
431 | BUG_ON(ret); /* -ENOMEM */ | 433 | BUG_ON(ret); /* -ENOMEM */ |
432 | } | 434 | } |
433 | 435 | ||
434 | ret = btrfs_map_bio(root, WRITE, bio, 0, 1); | 436 | ret = btrfs_map_bio(root, bio, 0, 1); |
435 | BUG_ON(ret); /* -ENOMEM */ | 437 | BUG_ON(ret); /* -ENOMEM */ |
436 | 438 | ||
437 | bio_put(bio); | 439 | bio_put(bio); |
@@ -646,6 +648,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
646 | comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); | 648 | comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); |
647 | if (!comp_bio) | 649 | if (!comp_bio) |
648 | goto fail2; | 650 | goto fail2; |
651 | bio_set_op_attrs (comp_bio, REQ_OP_READ, 0); | ||
649 | comp_bio->bi_private = cb; | 652 | comp_bio->bi_private = cb; |
650 | comp_bio->bi_end_io = end_compressed_bio_read; | 653 | comp_bio->bi_end_io = end_compressed_bio_read; |
651 | atomic_inc(&cb->pending_bios); | 654 | atomic_inc(&cb->pending_bios); |
@@ -656,7 +659,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
656 | page->index = em_start >> PAGE_SHIFT; | 659 | page->index = em_start >> PAGE_SHIFT; |
657 | 660 | ||
658 | if (comp_bio->bi_iter.bi_size) | 661 | if (comp_bio->bi_iter.bi_size) |
659 | ret = tree->ops->merge_bio_hook(READ, page, 0, | 662 | ret = tree->ops->merge_bio_hook(page, 0, |
660 | PAGE_SIZE, | 663 | PAGE_SIZE, |
661 | comp_bio, 0); | 664 | comp_bio, 0); |
662 | else | 665 | else |
@@ -687,8 +690,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
687 | sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, | 690 | sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, |
688 | root->sectorsize); | 691 | root->sectorsize); |
689 | 692 | ||
690 | ret = btrfs_map_bio(root, READ, comp_bio, | 693 | ret = btrfs_map_bio(root, comp_bio, mirror_num, 0); |
691 | mirror_num, 0); | ||
692 | if (ret) { | 694 | if (ret) { |
693 | bio->bi_error = ret; | 695 | bio->bi_error = ret; |
694 | bio_endio(comp_bio); | 696 | bio_endio(comp_bio); |
@@ -699,6 +701,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
699 | comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, | 701 | comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, |
700 | GFP_NOFS); | 702 | GFP_NOFS); |
701 | BUG_ON(!comp_bio); | 703 | BUG_ON(!comp_bio); |
704 | bio_set_op_attrs(comp_bio, REQ_OP_READ, 0); | ||
702 | comp_bio->bi_private = cb; | 705 | comp_bio->bi_private = cb; |
703 | comp_bio->bi_end_io = end_compressed_bio_read; | 706 | comp_bio->bi_end_io = end_compressed_bio_read; |
704 | 707 | ||
@@ -717,7 +720,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
717 | BUG_ON(ret); /* -ENOMEM */ | 720 | BUG_ON(ret); /* -ENOMEM */ |
718 | } | 721 | } |
719 | 722 | ||
720 | ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); | 723 | ret = btrfs_map_bio(root, comp_bio, mirror_num, 0); |
721 | if (ret) { | 724 | if (ret) { |
722 | bio->bi_error = ret; | 725 | bio->bi_error = ret; |
723 | bio_endio(comp_bio); | 726 | bio_endio(comp_bio); |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 4274a7bfdaed..b2620d1f883f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -3091,7 +3091,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, | |||
3091 | struct btrfs_root *new_root, | 3091 | struct btrfs_root *new_root, |
3092 | struct btrfs_root *parent_root, | 3092 | struct btrfs_root *parent_root, |
3093 | u64 new_dirid); | 3093 | u64 new_dirid); |
3094 | int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, | 3094 | int btrfs_merge_bio_hook(struct page *page, unsigned long offset, |
3095 | size_t size, struct bio *bio, | 3095 | size_t size, struct bio *bio, |
3096 | unsigned long bio_flags); | 3096 | unsigned long bio_flags); |
3097 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 3097 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 60ce1190307b..9a726ded2c6d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -124,7 +124,6 @@ struct async_submit_bio { | |||
124 | struct list_head list; | 124 | struct list_head list; |
125 | extent_submit_bio_hook_t *submit_bio_start; | 125 | extent_submit_bio_hook_t *submit_bio_start; |
126 | extent_submit_bio_hook_t *submit_bio_done; | 126 | extent_submit_bio_hook_t *submit_bio_done; |
127 | int rw; | ||
128 | int mirror_num; | 127 | int mirror_num; |
129 | unsigned long bio_flags; | 128 | unsigned long bio_flags; |
130 | /* | 129 | /* |
@@ -727,7 +726,7 @@ static void end_workqueue_bio(struct bio *bio) | |||
727 | fs_info = end_io_wq->info; | 726 | fs_info = end_io_wq->info; |
728 | end_io_wq->error = bio->bi_error; | 727 | end_io_wq->error = bio->bi_error; |
729 | 728 | ||
730 | if (bio->bi_rw & REQ_WRITE) { | 729 | if (bio_op(bio) == REQ_OP_WRITE) { |
731 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { | 730 | if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { |
732 | wq = fs_info->endio_meta_write_workers; | 731 | wq = fs_info->endio_meta_write_workers; |
733 | func = btrfs_endio_meta_write_helper; | 732 | func = btrfs_endio_meta_write_helper; |
@@ -797,7 +796,7 @@ static void run_one_async_start(struct btrfs_work *work) | |||
797 | int ret; | 796 | int ret; |
798 | 797 | ||
799 | async = container_of(work, struct async_submit_bio, work); | 798 | async = container_of(work, struct async_submit_bio, work); |
800 | ret = async->submit_bio_start(async->inode, async->rw, async->bio, | 799 | ret = async->submit_bio_start(async->inode, async->bio, |
801 | async->mirror_num, async->bio_flags, | 800 | async->mirror_num, async->bio_flags, |
802 | async->bio_offset); | 801 | async->bio_offset); |
803 | if (ret) | 802 | if (ret) |
@@ -830,9 +829,8 @@ static void run_one_async_done(struct btrfs_work *work) | |||
830 | return; | 829 | return; |
831 | } | 830 | } |
832 | 831 | ||
833 | async->submit_bio_done(async->inode, async->rw, async->bio, | 832 | async->submit_bio_done(async->inode, async->bio, async->mirror_num, |
834 | async->mirror_num, async->bio_flags, | 833 | async->bio_flags, async->bio_offset); |
835 | async->bio_offset); | ||
836 | } | 834 | } |
837 | 835 | ||
838 | static void run_one_async_free(struct btrfs_work *work) | 836 | static void run_one_async_free(struct btrfs_work *work) |
@@ -844,7 +842,7 @@ static void run_one_async_free(struct btrfs_work *work) | |||
844 | } | 842 | } |
845 | 843 | ||
846 | int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | 844 | int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, |
847 | int rw, struct bio *bio, int mirror_num, | 845 | struct bio *bio, int mirror_num, |
848 | unsigned long bio_flags, | 846 | unsigned long bio_flags, |
849 | u64 bio_offset, | 847 | u64 bio_offset, |
850 | extent_submit_bio_hook_t *submit_bio_start, | 848 | extent_submit_bio_hook_t *submit_bio_start, |
@@ -857,7 +855,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | |||
857 | return -ENOMEM; | 855 | return -ENOMEM; |
858 | 856 | ||
859 | async->inode = inode; | 857 | async->inode = inode; |
860 | async->rw = rw; | ||
861 | async->bio = bio; | 858 | async->bio = bio; |
862 | async->mirror_num = mirror_num; | 859 | async->mirror_num = mirror_num; |
863 | async->submit_bio_start = submit_bio_start; | 860 | async->submit_bio_start = submit_bio_start; |
@@ -873,7 +870,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | |||
873 | 870 | ||
874 | atomic_inc(&fs_info->nr_async_submits); | 871 | atomic_inc(&fs_info->nr_async_submits); |
875 | 872 | ||
876 | if (rw & REQ_SYNC) | 873 | if (bio->bi_rw & REQ_SYNC) |
877 | btrfs_set_work_high_priority(&async->work); | 874 | btrfs_set_work_high_priority(&async->work); |
878 | 875 | ||
879 | btrfs_queue_work(fs_info->workers, &async->work); | 876 | btrfs_queue_work(fs_info->workers, &async->work); |
@@ -903,9 +900,8 @@ static int btree_csum_one_bio(struct bio *bio) | |||
903 | return ret; | 900 | return ret; |
904 | } | 901 | } |
905 | 902 | ||
906 | static int __btree_submit_bio_start(struct inode *inode, int rw, | 903 | static int __btree_submit_bio_start(struct inode *inode, struct bio *bio, |
907 | struct bio *bio, int mirror_num, | 904 | int mirror_num, unsigned long bio_flags, |
908 | unsigned long bio_flags, | ||
909 | u64 bio_offset) | 905 | u64 bio_offset) |
910 | { | 906 | { |
911 | /* | 907 | /* |
@@ -915,7 +911,7 @@ static int __btree_submit_bio_start(struct inode *inode, int rw, | |||
915 | return btree_csum_one_bio(bio); | 911 | return btree_csum_one_bio(bio); |
916 | } | 912 | } |
917 | 913 | ||
918 | static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, | 914 | static int __btree_submit_bio_done(struct inode *inode, struct bio *bio, |
919 | int mirror_num, unsigned long bio_flags, | 915 | int mirror_num, unsigned long bio_flags, |
920 | u64 bio_offset) | 916 | u64 bio_offset) |
921 | { | 917 | { |
@@ -925,7 +921,7 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, | |||
925 | * when we're called for a write, we're already in the async | 921 | * when we're called for a write, we're already in the async |
926 | * submission context. Just jump into btrfs_map_bio | 922 | * submission context. Just jump into btrfs_map_bio |
927 | */ | 923 | */ |
928 | ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); | 924 | ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 1); |
929 | if (ret) { | 925 | if (ret) { |
930 | bio->bi_error = ret; | 926 | bio->bi_error = ret; |
931 | bio_endio(bio); | 927 | bio_endio(bio); |
@@ -944,14 +940,14 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags) | |||
944 | return 1; | 940 | return 1; |
945 | } | 941 | } |
946 | 942 | ||
947 | static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | 943 | static int btree_submit_bio_hook(struct inode *inode, struct bio *bio, |
948 | int mirror_num, unsigned long bio_flags, | 944 | int mirror_num, unsigned long bio_flags, |
949 | u64 bio_offset) | 945 | u64 bio_offset) |
950 | { | 946 | { |
951 | int async = check_async_write(inode, bio_flags); | 947 | int async = check_async_write(inode, bio_flags); |
952 | int ret; | 948 | int ret; |
953 | 949 | ||
954 | if (!(rw & REQ_WRITE)) { | 950 | if (bio_op(bio) != REQ_OP_WRITE) { |
955 | /* | 951 | /* |
956 | * called for a read, do the setup so that checksum validation | 952 | * called for a read, do the setup so that checksum validation |
957 | * can happen in the async kernel threads | 953 | * can happen in the async kernel threads |
@@ -960,21 +956,19 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
960 | bio, BTRFS_WQ_ENDIO_METADATA); | 956 | bio, BTRFS_WQ_ENDIO_METADATA); |
961 | if (ret) | 957 | if (ret) |
962 | goto out_w_error; | 958 | goto out_w_error; |
963 | ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, | 959 | ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0); |
964 | mirror_num, 0); | ||
965 | } else if (!async) { | 960 | } else if (!async) { |
966 | ret = btree_csum_one_bio(bio); | 961 | ret = btree_csum_one_bio(bio); |
967 | if (ret) | 962 | if (ret) |
968 | goto out_w_error; | 963 | goto out_w_error; |
969 | ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, | 964 | ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0); |
970 | mirror_num, 0); | ||
971 | } else { | 965 | } else { |
972 | /* | 966 | /* |
973 | * kthread helpers are used to submit writes so that | 967 | * kthread helpers are used to submit writes so that |
974 | * checksumming can happen in parallel across all CPUs | 968 | * checksumming can happen in parallel across all CPUs |
975 | */ | 969 | */ |
976 | ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, | 970 | ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, |
977 | inode, rw, bio, mirror_num, 0, | 971 | inode, bio, mirror_num, 0, |
978 | bio_offset, | 972 | bio_offset, |
979 | __btree_submit_bio_start, | 973 | __btree_submit_bio_start, |
980 | __btree_submit_bio_done); | 974 | __btree_submit_bio_done); |
@@ -3418,9 +3412,9 @@ static int write_dev_supers(struct btrfs_device *device, | |||
3418 | * to go down lazy. | 3412 | * to go down lazy. |
3419 | */ | 3413 | */ |
3420 | if (i == 0) | 3414 | if (i == 0) |
3421 | ret = btrfsic_submit_bh(WRITE_FUA, bh); | 3415 | ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh); |
3422 | else | 3416 | else |
3423 | ret = btrfsic_submit_bh(WRITE_SYNC, bh); | 3417 | ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); |
3424 | if (ret) | 3418 | if (ret) |
3425 | errors++; | 3419 | errors++; |
3426 | } | 3420 | } |
@@ -3484,12 +3478,13 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
3484 | 3478 | ||
3485 | bio->bi_end_io = btrfs_end_empty_barrier; | 3479 | bio->bi_end_io = btrfs_end_empty_barrier; |
3486 | bio->bi_bdev = device->bdev; | 3480 | bio->bi_bdev = device->bdev; |
3481 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); | ||
3487 | init_completion(&device->flush_wait); | 3482 | init_completion(&device->flush_wait); |
3488 | bio->bi_private = &device->flush_wait; | 3483 | bio->bi_private = &device->flush_wait; |
3489 | device->flush_bio = bio; | 3484 | device->flush_bio = bio; |
3490 | 3485 | ||
3491 | bio_get(bio); | 3486 | bio_get(bio); |
3492 | btrfsic_submit_bio(WRITE_FLUSH, bio); | 3487 | btrfsic_submit_bio(bio); |
3493 | 3488 | ||
3494 | return 0; | 3489 | return 0; |
3495 | } | 3490 | } |
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index acba821499a9..dbf3e1aab69e 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h | |||
@@ -122,7 +122,7 @@ void btrfs_csum_final(u32 crc, char *result); | |||
122 | int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, | 122 | int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, |
123 | enum btrfs_wq_endio_type metadata); | 123 | enum btrfs_wq_endio_type metadata); |
124 | int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | 124 | int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, |
125 | int rw, struct bio *bio, int mirror_num, | 125 | struct bio *bio, int mirror_num, |
126 | unsigned long bio_flags, u64 bio_offset, | 126 | unsigned long bio_flags, u64 bio_offset, |
127 | extent_submit_bio_hook_t *submit_bio_start, | 127 | extent_submit_bio_hook_t *submit_bio_start, |
128 | extent_submit_bio_hook_t *submit_bio_done); | 128 | extent_submit_bio_hook_t *submit_bio_done); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 82b912a293ab..b480fd555774 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -2048,7 +2048,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, | |||
2048 | */ | 2048 | */ |
2049 | btrfs_bio_counter_inc_blocked(root->fs_info); | 2049 | btrfs_bio_counter_inc_blocked(root->fs_info); |
2050 | /* Tell the block device(s) that the sectors can be discarded */ | 2050 | /* Tell the block device(s) that the sectors can be discarded */ |
2051 | ret = btrfs_map_block(root->fs_info, REQ_DISCARD, | 2051 | ret = btrfs_map_block(root->fs_info, REQ_OP_DISCARD, |
2052 | bytenr, &num_bytes, &bbio, 0); | 2052 | bytenr, &num_bytes, &bbio, 0); |
2053 | /* Error condition is -ENOMEM */ | 2053 | /* Error condition is -ENOMEM */ |
2054 | if (!ret) { | 2054 | if (!ret) { |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 75533adef998..27c214941004 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2049,9 +2049,10 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, | |||
2049 | return -EIO; | 2049 | return -EIO; |
2050 | } | 2050 | } |
2051 | bio->bi_bdev = dev->bdev; | 2051 | bio->bi_bdev = dev->bdev; |
2052 | bio->bi_rw = WRITE_SYNC; | ||
2052 | bio_add_page(bio, page, length, pg_offset); | 2053 | bio_add_page(bio, page, length, pg_offset); |
2053 | 2054 | ||
2054 | if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) { | 2055 | if (btrfsic_submit_bio_wait(bio)) { |
2055 | /* try to remap that extent elsewhere? */ | 2056 | /* try to remap that extent elsewhere? */ |
2056 | btrfs_bio_counter_dec(fs_info); | 2057 | btrfs_bio_counter_dec(fs_info); |
2057 | bio_put(bio); | 2058 | bio_put(bio); |
@@ -2386,7 +2387,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, | |||
2386 | int read_mode; | 2387 | int read_mode; |
2387 | int ret; | 2388 | int ret; |
2388 | 2389 | ||
2389 | BUG_ON(failed_bio->bi_rw & REQ_WRITE); | 2390 | BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); |
2390 | 2391 | ||
2391 | ret = btrfs_get_io_failure_record(inode, start, end, &failrec); | 2392 | ret = btrfs_get_io_failure_record(inode, start, end, &failrec); |
2392 | if (ret) | 2393 | if (ret) |
@@ -2412,12 +2413,12 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, | |||
2412 | free_io_failure(inode, failrec); | 2413 | free_io_failure(inode, failrec); |
2413 | return -EIO; | 2414 | return -EIO; |
2414 | } | 2415 | } |
2416 | bio_set_op_attrs(bio, REQ_OP_READ, read_mode); | ||
2415 | 2417 | ||
2416 | pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n", | 2418 | pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n", |
2417 | read_mode, failrec->this_mirror, failrec->in_validation); | 2419 | read_mode, failrec->this_mirror, failrec->in_validation); |
2418 | 2420 | ||
2419 | ret = tree->ops->submit_bio_hook(inode, read_mode, bio, | 2421 | ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror, |
2420 | failrec->this_mirror, | ||
2421 | failrec->bio_flags, 0); | 2422 | failrec->bio_flags, 0); |
2422 | if (ret) { | 2423 | if (ret) { |
2423 | free_io_failure(inode, failrec); | 2424 | free_io_failure(inode, failrec); |
@@ -2723,8 +2724,8 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) | |||
2723 | } | 2724 | } |
2724 | 2725 | ||
2725 | 2726 | ||
2726 | static int __must_check submit_one_bio(int rw, struct bio *bio, | 2727 | static int __must_check submit_one_bio(struct bio *bio, int mirror_num, |
2727 | int mirror_num, unsigned long bio_flags) | 2728 | unsigned long bio_flags) |
2728 | { | 2729 | { |
2729 | int ret = 0; | 2730 | int ret = 0; |
2730 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | 2731 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; |
@@ -2735,33 +2736,32 @@ static int __must_check submit_one_bio(int rw, struct bio *bio, | |||
2735 | start = page_offset(page) + bvec->bv_offset; | 2736 | start = page_offset(page) + bvec->bv_offset; |
2736 | 2737 | ||
2737 | bio->bi_private = NULL; | 2738 | bio->bi_private = NULL; |
2738 | |||
2739 | bio_get(bio); | 2739 | bio_get(bio); |
2740 | 2740 | ||
2741 | if (tree->ops && tree->ops->submit_bio_hook) | 2741 | if (tree->ops && tree->ops->submit_bio_hook) |
2742 | ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, | 2742 | ret = tree->ops->submit_bio_hook(page->mapping->host, bio, |
2743 | mirror_num, bio_flags, start); | 2743 | mirror_num, bio_flags, start); |
2744 | else | 2744 | else |
2745 | btrfsic_submit_bio(rw, bio); | 2745 | btrfsic_submit_bio(bio); |
2746 | 2746 | ||
2747 | bio_put(bio); | 2747 | bio_put(bio); |
2748 | return ret; | 2748 | return ret; |
2749 | } | 2749 | } |
2750 | 2750 | ||
2751 | static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page, | 2751 | static int merge_bio(struct extent_io_tree *tree, struct page *page, |
2752 | unsigned long offset, size_t size, struct bio *bio, | 2752 | unsigned long offset, size_t size, struct bio *bio, |
2753 | unsigned long bio_flags) | 2753 | unsigned long bio_flags) |
2754 | { | 2754 | { |
2755 | int ret = 0; | 2755 | int ret = 0; |
2756 | if (tree->ops && tree->ops->merge_bio_hook) | 2756 | if (tree->ops && tree->ops->merge_bio_hook) |
2757 | ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio, | 2757 | ret = tree->ops->merge_bio_hook(page, offset, size, bio, |
2758 | bio_flags); | 2758 | bio_flags); |
2759 | BUG_ON(ret < 0); | 2759 | BUG_ON(ret < 0); |
2760 | return ret; | 2760 | return ret; |
2761 | 2761 | ||
2762 | } | 2762 | } |
2763 | 2763 | ||
2764 | static int submit_extent_page(int rw, struct extent_io_tree *tree, | 2764 | static int submit_extent_page(int op, int op_flags, struct extent_io_tree *tree, |
2765 | struct writeback_control *wbc, | 2765 | struct writeback_control *wbc, |
2766 | struct page *page, sector_t sector, | 2766 | struct page *page, sector_t sector, |
2767 | size_t size, unsigned long offset, | 2767 | size_t size, unsigned long offset, |
@@ -2789,10 +2789,9 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, | |||
2789 | 2789 | ||
2790 | if (prev_bio_flags != bio_flags || !contig || | 2790 | if (prev_bio_flags != bio_flags || !contig || |
2791 | force_bio_submit || | 2791 | force_bio_submit || |
2792 | merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || | 2792 | merge_bio(tree, page, offset, page_size, bio, bio_flags) || |
2793 | bio_add_page(bio, page, page_size, offset) < page_size) { | 2793 | bio_add_page(bio, page, page_size, offset) < page_size) { |
2794 | ret = submit_one_bio(rw, bio, mirror_num, | 2794 | ret = submit_one_bio(bio, mirror_num, prev_bio_flags); |
2795 | prev_bio_flags); | ||
2796 | if (ret < 0) { | 2795 | if (ret < 0) { |
2797 | *bio_ret = NULL; | 2796 | *bio_ret = NULL; |
2798 | return ret; | 2797 | return ret; |
@@ -2813,6 +2812,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, | |||
2813 | bio_add_page(bio, page, page_size, offset); | 2812 | bio_add_page(bio, page, page_size, offset); |
2814 | bio->bi_end_io = end_io_func; | 2813 | bio->bi_end_io = end_io_func; |
2815 | bio->bi_private = tree; | 2814 | bio->bi_private = tree; |
2815 | bio_set_op_attrs(bio, op, op_flags); | ||
2816 | if (wbc) { | 2816 | if (wbc) { |
2817 | wbc_init_bio(wbc, bio); | 2817 | wbc_init_bio(wbc, bio); |
2818 | wbc_account_io(wbc, page, page_size); | 2818 | wbc_account_io(wbc, page, page_size); |
@@ -2821,7 +2821,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, | |||
2821 | if (bio_ret) | 2821 | if (bio_ret) |
2822 | *bio_ret = bio; | 2822 | *bio_ret = bio; |
2823 | else | 2823 | else |
2824 | ret = submit_one_bio(rw, bio, mirror_num, bio_flags); | 2824 | ret = submit_one_bio(bio, mirror_num, bio_flags); |
2825 | 2825 | ||
2826 | return ret; | 2826 | return ret; |
2827 | } | 2827 | } |
@@ -2885,7 +2885,7 @@ static int __do_readpage(struct extent_io_tree *tree, | |||
2885 | get_extent_t *get_extent, | 2885 | get_extent_t *get_extent, |
2886 | struct extent_map **em_cached, | 2886 | struct extent_map **em_cached, |
2887 | struct bio **bio, int mirror_num, | 2887 | struct bio **bio, int mirror_num, |
2888 | unsigned long *bio_flags, int rw, | 2888 | unsigned long *bio_flags, int read_flags, |
2889 | u64 *prev_em_start) | 2889 | u64 *prev_em_start) |
2890 | { | 2890 | { |
2891 | struct inode *inode = page->mapping->host; | 2891 | struct inode *inode = page->mapping->host; |
@@ -3068,8 +3068,8 @@ static int __do_readpage(struct extent_io_tree *tree, | |||
3068 | } | 3068 | } |
3069 | 3069 | ||
3070 | pnr -= page->index; | 3070 | pnr -= page->index; |
3071 | ret = submit_extent_page(rw, tree, NULL, page, | 3071 | ret = submit_extent_page(REQ_OP_READ, read_flags, tree, NULL, |
3072 | sector, disk_io_size, pg_offset, | 3072 | page, sector, disk_io_size, pg_offset, |
3073 | bdev, bio, pnr, | 3073 | bdev, bio, pnr, |
3074 | end_bio_extent_readpage, mirror_num, | 3074 | end_bio_extent_readpage, mirror_num, |
3075 | *bio_flags, | 3075 | *bio_flags, |
@@ -3100,7 +3100,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, | |||
3100 | get_extent_t *get_extent, | 3100 | get_extent_t *get_extent, |
3101 | struct extent_map **em_cached, | 3101 | struct extent_map **em_cached, |
3102 | struct bio **bio, int mirror_num, | 3102 | struct bio **bio, int mirror_num, |
3103 | unsigned long *bio_flags, int rw, | 3103 | unsigned long *bio_flags, |
3104 | u64 *prev_em_start) | 3104 | u64 *prev_em_start) |
3105 | { | 3105 | { |
3106 | struct inode *inode; | 3106 | struct inode *inode; |
@@ -3121,7 +3121,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, | |||
3121 | 3121 | ||
3122 | for (index = 0; index < nr_pages; index++) { | 3122 | for (index = 0; index < nr_pages; index++) { |
3123 | __do_readpage(tree, pages[index], get_extent, em_cached, bio, | 3123 | __do_readpage(tree, pages[index], get_extent, em_cached, bio, |
3124 | mirror_num, bio_flags, rw, prev_em_start); | 3124 | mirror_num, bio_flags, 0, prev_em_start); |
3125 | put_page(pages[index]); | 3125 | put_page(pages[index]); |
3126 | } | 3126 | } |
3127 | } | 3127 | } |
@@ -3131,7 +3131,7 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
3131 | int nr_pages, get_extent_t *get_extent, | 3131 | int nr_pages, get_extent_t *get_extent, |
3132 | struct extent_map **em_cached, | 3132 | struct extent_map **em_cached, |
3133 | struct bio **bio, int mirror_num, | 3133 | struct bio **bio, int mirror_num, |
3134 | unsigned long *bio_flags, int rw, | 3134 | unsigned long *bio_flags, |
3135 | u64 *prev_em_start) | 3135 | u64 *prev_em_start) |
3136 | { | 3136 | { |
3137 | u64 start = 0; | 3137 | u64 start = 0; |
@@ -3153,7 +3153,7 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
3153 | index - first_index, start, | 3153 | index - first_index, start, |
3154 | end, get_extent, em_cached, | 3154 | end, get_extent, em_cached, |
3155 | bio, mirror_num, bio_flags, | 3155 | bio, mirror_num, bio_flags, |
3156 | rw, prev_em_start); | 3156 | prev_em_start); |
3157 | start = page_start; | 3157 | start = page_start; |
3158 | end = start + PAGE_SIZE - 1; | 3158 | end = start + PAGE_SIZE - 1; |
3159 | first_index = index; | 3159 | first_index = index; |
@@ -3164,7 +3164,7 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
3164 | __do_contiguous_readpages(tree, &pages[first_index], | 3164 | __do_contiguous_readpages(tree, &pages[first_index], |
3165 | index - first_index, start, | 3165 | index - first_index, start, |
3166 | end, get_extent, em_cached, bio, | 3166 | end, get_extent, em_cached, bio, |
3167 | mirror_num, bio_flags, rw, | 3167 | mirror_num, bio_flags, |
3168 | prev_em_start); | 3168 | prev_em_start); |
3169 | } | 3169 | } |
3170 | 3170 | ||
@@ -3172,7 +3172,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
3172 | struct page *page, | 3172 | struct page *page, |
3173 | get_extent_t *get_extent, | 3173 | get_extent_t *get_extent, |
3174 | struct bio **bio, int mirror_num, | 3174 | struct bio **bio, int mirror_num, |
3175 | unsigned long *bio_flags, int rw) | 3175 | unsigned long *bio_flags, int read_flags) |
3176 | { | 3176 | { |
3177 | struct inode *inode = page->mapping->host; | 3177 | struct inode *inode = page->mapping->host; |
3178 | struct btrfs_ordered_extent *ordered; | 3178 | struct btrfs_ordered_extent *ordered; |
@@ -3192,7 +3192,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
3192 | } | 3192 | } |
3193 | 3193 | ||
3194 | ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, | 3194 | ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, |
3195 | bio_flags, rw, NULL); | 3195 | bio_flags, read_flags, NULL); |
3196 | return ret; | 3196 | return ret; |
3197 | } | 3197 | } |
3198 | 3198 | ||
@@ -3204,9 +3204,9 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, | |||
3204 | int ret; | 3204 | int ret; |
3205 | 3205 | ||
3206 | ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num, | 3206 | ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num, |
3207 | &bio_flags, READ); | 3207 | &bio_flags, 0); |
3208 | if (bio) | 3208 | if (bio) |
3209 | ret = submit_one_bio(READ, bio, mirror_num, bio_flags); | 3209 | ret = submit_one_bio(bio, mirror_num, bio_flags); |
3210 | return ret; | 3210 | return ret; |
3211 | } | 3211 | } |
3212 | 3212 | ||
@@ -3440,8 +3440,8 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, | |||
3440 | page->index, cur, end); | 3440 | page->index, cur, end); |
3441 | } | 3441 | } |
3442 | 3442 | ||
3443 | ret = submit_extent_page(write_flags, tree, wbc, page, | 3443 | ret = submit_extent_page(REQ_OP_WRITE, write_flags, tree, wbc, |
3444 | sector, iosize, pg_offset, | 3444 | page, sector, iosize, pg_offset, |
3445 | bdev, &epd->bio, max_nr, | 3445 | bdev, &epd->bio, max_nr, |
3446 | end_bio_extent_writepage, | 3446 | end_bio_extent_writepage, |
3447 | 0, 0, 0, false); | 3447 | 0, 0, 0, false); |
@@ -3480,13 +3480,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
3480 | size_t pg_offset = 0; | 3480 | size_t pg_offset = 0; |
3481 | loff_t i_size = i_size_read(inode); | 3481 | loff_t i_size = i_size_read(inode); |
3482 | unsigned long end_index = i_size >> PAGE_SHIFT; | 3482 | unsigned long end_index = i_size >> PAGE_SHIFT; |
3483 | int write_flags; | 3483 | int write_flags = 0; |
3484 | unsigned long nr_written = 0; | 3484 | unsigned long nr_written = 0; |
3485 | 3485 | ||
3486 | if (wbc->sync_mode == WB_SYNC_ALL) | 3486 | if (wbc->sync_mode == WB_SYNC_ALL) |
3487 | write_flags = WRITE_SYNC; | 3487 | write_flags = WRITE_SYNC; |
3488 | else | ||
3489 | write_flags = WRITE; | ||
3490 | 3488 | ||
3491 | trace___extent_writepage(page, inode, wbc); | 3489 | trace___extent_writepage(page, inode, wbc); |
3492 | 3490 | ||
@@ -3730,7 +3728,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, | |||
3730 | u64 offset = eb->start; | 3728 | u64 offset = eb->start; |
3731 | unsigned long i, num_pages; | 3729 | unsigned long i, num_pages; |
3732 | unsigned long bio_flags = 0; | 3730 | unsigned long bio_flags = 0; |
3733 | int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META; | 3731 | int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META; |
3734 | int ret = 0; | 3732 | int ret = 0; |
3735 | 3733 | ||
3736 | clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); | 3734 | clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); |
@@ -3744,9 +3742,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, | |||
3744 | 3742 | ||
3745 | clear_page_dirty_for_io(p); | 3743 | clear_page_dirty_for_io(p); |
3746 | set_page_writeback(p); | 3744 | set_page_writeback(p); |
3747 | ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, | 3745 | ret = submit_extent_page(REQ_OP_WRITE, write_flags, tree, wbc, |
3748 | PAGE_SIZE, 0, bdev, &epd->bio, | 3746 | p, offset >> 9, PAGE_SIZE, 0, bdev, |
3749 | -1, end_bio_extent_buffer_writepage, | 3747 | &epd->bio, -1, |
3748 | end_bio_extent_buffer_writepage, | ||
3750 | 0, epd->bio_flags, bio_flags, false); | 3749 | 0, epd->bio_flags, bio_flags, false); |
3751 | epd->bio_flags = bio_flags; | 3750 | epd->bio_flags = bio_flags; |
3752 | if (ret) { | 3751 | if (ret) { |
@@ -4056,13 +4055,12 @@ retry: | |||
4056 | static void flush_epd_write_bio(struct extent_page_data *epd) | 4055 | static void flush_epd_write_bio(struct extent_page_data *epd) |
4057 | { | 4056 | { |
4058 | if (epd->bio) { | 4057 | if (epd->bio) { |
4059 | int rw = WRITE; | ||
4060 | int ret; | 4058 | int ret; |
4061 | 4059 | ||
4062 | if (epd->sync_io) | 4060 | bio_set_op_attrs(epd->bio, REQ_OP_WRITE, |
4063 | rw = WRITE_SYNC; | 4061 | epd->sync_io ? WRITE_SYNC : 0); |
4064 | 4062 | ||
4065 | ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags); | 4063 | ret = submit_one_bio(epd->bio, 0, epd->bio_flags); |
4066 | BUG_ON(ret < 0); /* -ENOMEM */ | 4064 | BUG_ON(ret < 0); /* -ENOMEM */ |
4067 | epd->bio = NULL; | 4065 | epd->bio = NULL; |
4068 | } | 4066 | } |
@@ -4189,19 +4187,19 @@ int extent_readpages(struct extent_io_tree *tree, | |||
4189 | if (nr < ARRAY_SIZE(pagepool)) | 4187 | if (nr < ARRAY_SIZE(pagepool)) |
4190 | continue; | 4188 | continue; |
4191 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, | 4189 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, |
4192 | &bio, 0, &bio_flags, READ, &prev_em_start); | 4190 | &bio, 0, &bio_flags, &prev_em_start); |
4193 | nr = 0; | 4191 | nr = 0; |
4194 | } | 4192 | } |
4195 | if (nr) | 4193 | if (nr) |
4196 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, | 4194 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, |
4197 | &bio, 0, &bio_flags, READ, &prev_em_start); | 4195 | &bio, 0, &bio_flags, &prev_em_start); |
4198 | 4196 | ||
4199 | if (em_cached) | 4197 | if (em_cached) |
4200 | free_extent_map(em_cached); | 4198 | free_extent_map(em_cached); |
4201 | 4199 | ||
4202 | BUG_ON(!list_empty(pages)); | 4200 | BUG_ON(!list_empty(pages)); |
4203 | if (bio) | 4201 | if (bio) |
4204 | return submit_one_bio(READ, bio, 0, bio_flags); | 4202 | return submit_one_bio(bio, 0, bio_flags); |
4205 | return 0; | 4203 | return 0; |
4206 | } | 4204 | } |
4207 | 4205 | ||
@@ -5236,7 +5234,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
5236 | err = __extent_read_full_page(tree, page, | 5234 | err = __extent_read_full_page(tree, page, |
5237 | get_extent, &bio, | 5235 | get_extent, &bio, |
5238 | mirror_num, &bio_flags, | 5236 | mirror_num, &bio_flags, |
5239 | READ | REQ_META); | 5237 | REQ_META); |
5240 | if (err) | 5238 | if (err) |
5241 | ret = err; | 5239 | ret = err; |
5242 | } else { | 5240 | } else { |
@@ -5245,8 +5243,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
5245 | } | 5243 | } |
5246 | 5244 | ||
5247 | if (bio) { | 5245 | if (bio) { |
5248 | err = submit_one_bio(READ | REQ_META, bio, mirror_num, | 5246 | err = submit_one_bio(bio, mirror_num, bio_flags); |
5249 | bio_flags); | ||
5250 | if (err) | 5247 | if (err) |
5251 | return err; | 5248 | return err; |
5252 | } | 5249 | } |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index c0c1c4fef6ce..bc2729a7612d 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -63,16 +63,16 @@ struct btrfs_root; | |||
63 | struct btrfs_io_bio; | 63 | struct btrfs_io_bio; |
64 | struct io_failure_record; | 64 | struct io_failure_record; |
65 | 65 | ||
66 | typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw, | 66 | typedef int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio, |
67 | struct bio *bio, int mirror_num, | 67 | int mirror_num, unsigned long bio_flags, |
68 | unsigned long bio_flags, u64 bio_offset); | 68 | u64 bio_offset); |
69 | struct extent_io_ops { | 69 | struct extent_io_ops { |
70 | int (*fill_delalloc)(struct inode *inode, struct page *locked_page, | 70 | int (*fill_delalloc)(struct inode *inode, struct page *locked_page, |
71 | u64 start, u64 end, int *page_started, | 71 | u64 start, u64 end, int *page_started, |
72 | unsigned long *nr_written); | 72 | unsigned long *nr_written); |
73 | int (*writepage_start_hook)(struct page *page, u64 start, u64 end); | 73 | int (*writepage_start_hook)(struct page *page, u64 start, u64 end); |
74 | extent_submit_bio_hook_t *submit_bio_hook; | 74 | extent_submit_bio_hook_t *submit_bio_hook; |
75 | int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset, | 75 | int (*merge_bio_hook)(struct page *page, unsigned long offset, |
76 | size_t size, struct bio *bio, | 76 | size_t size, struct bio *bio, |
77 | unsigned long bio_flags); | 77 | unsigned long bio_flags); |
78 | int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); | 78 | int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4421954720b8..df731c0ebec7 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1823,7 +1823,7 @@ static void btrfs_clear_bit_hook(struct inode *inode, | |||
1823 | * extent_io.c merge_bio_hook, this must check the chunk tree to make sure | 1823 | * extent_io.c merge_bio_hook, this must check the chunk tree to make sure |
1824 | * we don't create bios that span stripes or chunks | 1824 | * we don't create bios that span stripes or chunks |
1825 | */ | 1825 | */ |
1826 | int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, | 1826 | int btrfs_merge_bio_hook(struct page *page, unsigned long offset, |
1827 | size_t size, struct bio *bio, | 1827 | size_t size, struct bio *bio, |
1828 | unsigned long bio_flags) | 1828 | unsigned long bio_flags) |
1829 | { | 1829 | { |
@@ -1838,7 +1838,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, | |||
1838 | 1838 | ||
1839 | length = bio->bi_iter.bi_size; | 1839 | length = bio->bi_iter.bi_size; |
1840 | map_length = length; | 1840 | map_length = length; |
1841 | ret = btrfs_map_block(root->fs_info, rw, logical, | 1841 | ret = btrfs_map_block(root->fs_info, bio_op(bio), logical, |
1842 | &map_length, NULL, 0); | 1842 | &map_length, NULL, 0); |
1843 | /* Will always return 0 with map_multi == NULL */ | 1843 | /* Will always return 0 with map_multi == NULL */ |
1844 | BUG_ON(ret < 0); | 1844 | BUG_ON(ret < 0); |
@@ -1855,9 +1855,8 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, | |||
1855 | * At IO completion time the cums attached on the ordered extent record | 1855 | * At IO completion time the cums attached on the ordered extent record |
1856 | * are inserted into the btree | 1856 | * are inserted into the btree |
1857 | */ | 1857 | */ |
1858 | static int __btrfs_submit_bio_start(struct inode *inode, int rw, | 1858 | static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio, |
1859 | struct bio *bio, int mirror_num, | 1859 | int mirror_num, unsigned long bio_flags, |
1860 | unsigned long bio_flags, | ||
1861 | u64 bio_offset) | 1860 | u64 bio_offset) |
1862 | { | 1861 | { |
1863 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1862 | struct btrfs_root *root = BTRFS_I(inode)->root; |
@@ -1876,14 +1875,14 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw, | |||
1876 | * At IO completion time the cums attached on the ordered extent record | 1875 | * At IO completion time the cums attached on the ordered extent record |
1877 | * are inserted into the btree | 1876 | * are inserted into the btree |
1878 | */ | 1877 | */ |
1879 | static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, | 1878 | static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio, |
1880 | int mirror_num, unsigned long bio_flags, | 1879 | int mirror_num, unsigned long bio_flags, |
1881 | u64 bio_offset) | 1880 | u64 bio_offset) |
1882 | { | 1881 | { |
1883 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1882 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1884 | int ret; | 1883 | int ret; |
1885 | 1884 | ||
1886 | ret = btrfs_map_bio(root, rw, bio, mirror_num, 1); | 1885 | ret = btrfs_map_bio(root, bio, mirror_num, 1); |
1887 | if (ret) { | 1886 | if (ret) { |
1888 | bio->bi_error = ret; | 1887 | bio->bi_error = ret; |
1889 | bio_endio(bio); | 1888 | bio_endio(bio); |
@@ -1895,7 +1894,7 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, | |||
1895 | * extent_io.c submission hook. This does the right thing for csum calculation | 1894 | * extent_io.c submission hook. This does the right thing for csum calculation |
1896 | * on write, or reading the csums from the tree before a read | 1895 | * on write, or reading the csums from the tree before a read |
1897 | */ | 1896 | */ |
1898 | static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | 1897 | static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, |
1899 | int mirror_num, unsigned long bio_flags, | 1898 | int mirror_num, unsigned long bio_flags, |
1900 | u64 bio_offset) | 1899 | u64 bio_offset) |
1901 | { | 1900 | { |
@@ -1910,7 +1909,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
1910 | if (btrfs_is_free_space_inode(inode)) | 1909 | if (btrfs_is_free_space_inode(inode)) |
1911 | metadata = BTRFS_WQ_ENDIO_FREE_SPACE; | 1910 | metadata = BTRFS_WQ_ENDIO_FREE_SPACE; |
1912 | 1911 | ||
1913 | if (!(rw & REQ_WRITE)) { | 1912 | if (bio_op(bio) != REQ_OP_WRITE) { |
1914 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); | 1913 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); |
1915 | if (ret) | 1914 | if (ret) |
1916 | goto out; | 1915 | goto out; |
@@ -1932,7 +1931,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
1932 | goto mapit; | 1931 | goto mapit; |
1933 | /* we're doing a write, do the async checksumming */ | 1932 | /* we're doing a write, do the async checksumming */ |
1934 | ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, | 1933 | ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, |
1935 | inode, rw, bio, mirror_num, | 1934 | inode, bio, mirror_num, |
1936 | bio_flags, bio_offset, | 1935 | bio_flags, bio_offset, |
1937 | __btrfs_submit_bio_start, | 1936 | __btrfs_submit_bio_start, |
1938 | __btrfs_submit_bio_done); | 1937 | __btrfs_submit_bio_done); |
@@ -1944,7 +1943,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
1944 | } | 1943 | } |
1945 | 1944 | ||
1946 | mapit: | 1945 | mapit: |
1947 | ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); | 1946 | ret = btrfs_map_bio(root, bio, mirror_num, 0); |
1948 | 1947 | ||
1949 | out: | 1948 | out: |
1950 | if (ret < 0) { | 1949 | if (ret < 0) { |
@@ -7790,12 +7789,12 @@ err: | |||
7790 | } | 7789 | } |
7791 | 7790 | ||
7792 | static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio, | 7791 | static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio, |
7793 | int rw, int mirror_num) | 7792 | int mirror_num) |
7794 | { | 7793 | { |
7795 | struct btrfs_root *root = BTRFS_I(inode)->root; | 7794 | struct btrfs_root *root = BTRFS_I(inode)->root; |
7796 | int ret; | 7795 | int ret; |
7797 | 7796 | ||
7798 | BUG_ON(rw & REQ_WRITE); | 7797 | BUG_ON(bio_op(bio) == REQ_OP_WRITE); |
7799 | 7798 | ||
7800 | bio_get(bio); | 7799 | bio_get(bio); |
7801 | 7800 | ||
@@ -7804,7 +7803,7 @@ static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio, | |||
7804 | if (ret) | 7803 | if (ret) |
7805 | goto err; | 7804 | goto err; |
7806 | 7805 | ||
7807 | ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); | 7806 | ret = btrfs_map_bio(root, bio, mirror_num, 0); |
7808 | err: | 7807 | err: |
7809 | bio_put(bio); | 7808 | bio_put(bio); |
7810 | return ret; | 7809 | return ret; |
@@ -7855,7 +7854,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, | |||
7855 | int read_mode; | 7854 | int read_mode; |
7856 | int ret; | 7855 | int ret; |
7857 | 7856 | ||
7858 | BUG_ON(failed_bio->bi_rw & REQ_WRITE); | 7857 | BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); |
7859 | 7858 | ||
7860 | ret = btrfs_get_io_failure_record(inode, start, end, &failrec); | 7859 | ret = btrfs_get_io_failure_record(inode, start, end, &failrec); |
7861 | if (ret) | 7860 | if (ret) |
@@ -7883,13 +7882,13 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, | |||
7883 | free_io_failure(inode, failrec); | 7882 | free_io_failure(inode, failrec); |
7884 | return -EIO; | 7883 | return -EIO; |
7885 | } | 7884 | } |
7885 | bio_set_op_attrs(bio, REQ_OP_READ, read_mode); | ||
7886 | 7886 | ||
7887 | btrfs_debug(BTRFS_I(inode)->root->fs_info, | 7887 | btrfs_debug(BTRFS_I(inode)->root->fs_info, |
7888 | "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n", | 7888 | "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n", |
7889 | read_mode, failrec->this_mirror, failrec->in_validation); | 7889 | read_mode, failrec->this_mirror, failrec->in_validation); |
7890 | 7890 | ||
7891 | ret = submit_dio_repair_bio(inode, bio, read_mode, | 7891 | ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror); |
7892 | failrec->this_mirror); | ||
7893 | if (ret) { | 7892 | if (ret) { |
7894 | free_io_failure(inode, failrec); | 7893 | free_io_failure(inode, failrec); |
7895 | bio_put(bio); | 7894 | bio_put(bio); |
@@ -8179,7 +8178,7 @@ static void btrfs_endio_direct_write(struct bio *bio) | |||
8179 | bio_put(bio); | 8178 | bio_put(bio); |
8180 | } | 8179 | } |
8181 | 8180 | ||
8182 | static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, | 8181 | static int __btrfs_submit_bio_start_direct_io(struct inode *inode, |
8183 | struct bio *bio, int mirror_num, | 8182 | struct bio *bio, int mirror_num, |
8184 | unsigned long bio_flags, u64 offset) | 8183 | unsigned long bio_flags, u64 offset) |
8185 | { | 8184 | { |
@@ -8197,8 +8196,8 @@ static void btrfs_end_dio_bio(struct bio *bio) | |||
8197 | 8196 | ||
8198 | if (err) | 8197 | if (err) |
8199 | btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, | 8198 | btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, |
8200 | "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d", | 8199 | "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d", |
8201 | btrfs_ino(dip->inode), bio->bi_rw, | 8200 | btrfs_ino(dip->inode), bio_op(bio), bio->bi_rw, |
8202 | (unsigned long long)bio->bi_iter.bi_sector, | 8201 | (unsigned long long)bio->bi_iter.bi_sector, |
8203 | bio->bi_iter.bi_size, err); | 8202 | bio->bi_iter.bi_size, err); |
8204 | 8203 | ||
@@ -8272,11 +8271,11 @@ static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root, | |||
8272 | } | 8271 | } |
8273 | 8272 | ||
8274 | static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | 8273 | static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, |
8275 | int rw, u64 file_offset, int skip_sum, | 8274 | u64 file_offset, int skip_sum, |
8276 | int async_submit) | 8275 | int async_submit) |
8277 | { | 8276 | { |
8278 | struct btrfs_dio_private *dip = bio->bi_private; | 8277 | struct btrfs_dio_private *dip = bio->bi_private; |
8279 | int write = rw & REQ_WRITE; | 8278 | bool write = bio_op(bio) == REQ_OP_WRITE; |
8280 | struct btrfs_root *root = BTRFS_I(inode)->root; | 8279 | struct btrfs_root *root = BTRFS_I(inode)->root; |
8281 | int ret; | 8280 | int ret; |
8282 | 8281 | ||
@@ -8297,8 +8296,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | |||
8297 | 8296 | ||
8298 | if (write && async_submit) { | 8297 | if (write && async_submit) { |
8299 | ret = btrfs_wq_submit_bio(root->fs_info, | 8298 | ret = btrfs_wq_submit_bio(root->fs_info, |
8300 | inode, rw, bio, 0, 0, | 8299 | inode, bio, 0, 0, file_offset, |
8301 | file_offset, | ||
8302 | __btrfs_submit_bio_start_direct_io, | 8300 | __btrfs_submit_bio_start_direct_io, |
8303 | __btrfs_submit_bio_done); | 8301 | __btrfs_submit_bio_done); |
8304 | goto err; | 8302 | goto err; |
@@ -8317,13 +8315,13 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | |||
8317 | goto err; | 8315 | goto err; |
8318 | } | 8316 | } |
8319 | map: | 8317 | map: |
8320 | ret = btrfs_map_bio(root, rw, bio, 0, async_submit); | 8318 | ret = btrfs_map_bio(root, bio, 0, async_submit); |
8321 | err: | 8319 | err: |
8322 | bio_put(bio); | 8320 | bio_put(bio); |
8323 | return ret; | 8321 | return ret; |
8324 | } | 8322 | } |
8325 | 8323 | ||
8326 | static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | 8324 | static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip, |
8327 | int skip_sum) | 8325 | int skip_sum) |
8328 | { | 8326 | { |
8329 | struct inode *inode = dip->inode; | 8327 | struct inode *inode = dip->inode; |
@@ -8342,8 +8340,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
8342 | int i; | 8340 | int i; |
8343 | 8341 | ||
8344 | map_length = orig_bio->bi_iter.bi_size; | 8342 | map_length = orig_bio->bi_iter.bi_size; |
8345 | ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, | 8343 | ret = btrfs_map_block(root->fs_info, bio_op(orig_bio), |
8346 | &map_length, NULL, 0); | 8344 | start_sector << 9, &map_length, NULL, 0); |
8347 | if (ret) | 8345 | if (ret) |
8348 | return -EIO; | 8346 | return -EIO; |
8349 | 8347 | ||
@@ -8363,6 +8361,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
8363 | if (!bio) | 8361 | if (!bio) |
8364 | return -ENOMEM; | 8362 | return -ENOMEM; |
8365 | 8363 | ||
8364 | bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw); | ||
8366 | bio->bi_private = dip; | 8365 | bio->bi_private = dip; |
8367 | bio->bi_end_io = btrfs_end_dio_bio; | 8366 | bio->bi_end_io = btrfs_end_dio_bio; |
8368 | btrfs_io_bio(bio)->logical = file_offset; | 8367 | btrfs_io_bio(bio)->logical = file_offset; |
@@ -8382,7 +8381,7 @@ next_block: | |||
8382 | * before we're done setting it up | 8381 | * before we're done setting it up |
8383 | */ | 8382 | */ |
8384 | atomic_inc(&dip->pending_bios); | 8383 | atomic_inc(&dip->pending_bios); |
8385 | ret = __btrfs_submit_dio_bio(bio, inode, rw, | 8384 | ret = __btrfs_submit_dio_bio(bio, inode, |
8386 | file_offset, skip_sum, | 8385 | file_offset, skip_sum, |
8387 | async_submit); | 8386 | async_submit); |
8388 | if (ret) { | 8387 | if (ret) { |
@@ -8400,12 +8399,13 @@ next_block: | |||
8400 | start_sector, GFP_NOFS); | 8399 | start_sector, GFP_NOFS); |
8401 | if (!bio) | 8400 | if (!bio) |
8402 | goto out_err; | 8401 | goto out_err; |
8402 | bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw); | ||
8403 | bio->bi_private = dip; | 8403 | bio->bi_private = dip; |
8404 | bio->bi_end_io = btrfs_end_dio_bio; | 8404 | bio->bi_end_io = btrfs_end_dio_bio; |
8405 | btrfs_io_bio(bio)->logical = file_offset; | 8405 | btrfs_io_bio(bio)->logical = file_offset; |
8406 | 8406 | ||
8407 | map_length = orig_bio->bi_iter.bi_size; | 8407 | map_length = orig_bio->bi_iter.bi_size; |
8408 | ret = btrfs_map_block(root->fs_info, rw, | 8408 | ret = btrfs_map_block(root->fs_info, bio_op(orig_bio), |
8409 | start_sector << 9, | 8409 | start_sector << 9, |
8410 | &map_length, NULL, 0); | 8410 | &map_length, NULL, 0); |
8411 | if (ret) { | 8411 | if (ret) { |
@@ -8425,7 +8425,7 @@ next_block: | |||
8425 | } | 8425 | } |
8426 | 8426 | ||
8427 | submit: | 8427 | submit: |
8428 | ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, | 8428 | ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum, |
8429 | async_submit); | 8429 | async_submit); |
8430 | if (!ret) | 8430 | if (!ret) |
8431 | return 0; | 8431 | return 0; |
@@ -8445,14 +8445,14 @@ out_err: | |||
8445 | return 0; | 8445 | return 0; |
8446 | } | 8446 | } |
8447 | 8447 | ||
8448 | static void btrfs_submit_direct(int rw, struct bio *dio_bio, | 8448 | static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, |
8449 | struct inode *inode, loff_t file_offset) | 8449 | loff_t file_offset) |
8450 | { | 8450 | { |
8451 | struct btrfs_dio_private *dip = NULL; | 8451 | struct btrfs_dio_private *dip = NULL; |
8452 | struct bio *io_bio = NULL; | 8452 | struct bio *io_bio = NULL; |
8453 | struct btrfs_io_bio *btrfs_bio; | 8453 | struct btrfs_io_bio *btrfs_bio; |
8454 | int skip_sum; | 8454 | int skip_sum; |
8455 | int write = rw & REQ_WRITE; | 8455 | bool write = (bio_op(dio_bio) == REQ_OP_WRITE); |
8456 | int ret = 0; | 8456 | int ret = 0; |
8457 | 8457 | ||
8458 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | 8458 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
@@ -8503,7 +8503,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio, | |||
8503 | dio_data->unsubmitted_oe_range_end; | 8503 | dio_data->unsubmitted_oe_range_end; |
8504 | } | 8504 | } |
8505 | 8505 | ||
8506 | ret = btrfs_submit_direct_hook(rw, dip, skip_sum); | 8506 | ret = btrfs_submit_direct_hook(dip, skip_sum); |
8507 | if (!ret) | 8507 | if (!ret) |
8508 | return; | 8508 | return; |
8509 | 8509 | ||
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index f8b6d411a034..cd8d302a1f61 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c | |||
@@ -1320,7 +1320,9 @@ write_data: | |||
1320 | 1320 | ||
1321 | bio->bi_private = rbio; | 1321 | bio->bi_private = rbio; |
1322 | bio->bi_end_io = raid_write_end_io; | 1322 | bio->bi_end_io = raid_write_end_io; |
1323 | submit_bio(WRITE, bio); | 1323 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
1324 | |||
1325 | submit_bio(bio); | ||
1324 | } | 1326 | } |
1325 | return; | 1327 | return; |
1326 | 1328 | ||
@@ -1573,11 +1575,12 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) | |||
1573 | 1575 | ||
1574 | bio->bi_private = rbio; | 1576 | bio->bi_private = rbio; |
1575 | bio->bi_end_io = raid_rmw_end_io; | 1577 | bio->bi_end_io = raid_rmw_end_io; |
1578 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||
1576 | 1579 | ||
1577 | btrfs_bio_wq_end_io(rbio->fs_info, bio, | 1580 | btrfs_bio_wq_end_io(rbio->fs_info, bio, |
1578 | BTRFS_WQ_ENDIO_RAID56); | 1581 | BTRFS_WQ_ENDIO_RAID56); |
1579 | 1582 | ||
1580 | submit_bio(READ, bio); | 1583 | submit_bio(bio); |
1581 | } | 1584 | } |
1582 | /* the actual write will happen once the reads are done */ | 1585 | /* the actual write will happen once the reads are done */ |
1583 | return 0; | 1586 | return 0; |
@@ -2097,11 +2100,12 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) | |||
2097 | 2100 | ||
2098 | bio->bi_private = rbio; | 2101 | bio->bi_private = rbio; |
2099 | bio->bi_end_io = raid_recover_end_io; | 2102 | bio->bi_end_io = raid_recover_end_io; |
2103 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||
2100 | 2104 | ||
2101 | btrfs_bio_wq_end_io(rbio->fs_info, bio, | 2105 | btrfs_bio_wq_end_io(rbio->fs_info, bio, |
2102 | BTRFS_WQ_ENDIO_RAID56); | 2106 | BTRFS_WQ_ENDIO_RAID56); |
2103 | 2107 | ||
2104 | submit_bio(READ, bio); | 2108 | submit_bio(bio); |
2105 | } | 2109 | } |
2106 | out: | 2110 | out: |
2107 | return 0; | 2111 | return 0; |
@@ -2433,7 +2437,9 @@ submit_write: | |||
2433 | 2437 | ||
2434 | bio->bi_private = rbio; | 2438 | bio->bi_private = rbio; |
2435 | bio->bi_end_io = raid_write_end_io; | 2439 | bio->bi_end_io = raid_write_end_io; |
2436 | submit_bio(WRITE, bio); | 2440 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
2441 | |||
2442 | submit_bio(bio); | ||
2437 | } | 2443 | } |
2438 | return; | 2444 | return; |
2439 | 2445 | ||
@@ -2610,11 +2616,12 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) | |||
2610 | 2616 | ||
2611 | bio->bi_private = rbio; | 2617 | bio->bi_private = rbio; |
2612 | bio->bi_end_io = raid56_parity_scrub_end_io; | 2618 | bio->bi_end_io = raid56_parity_scrub_end_io; |
2619 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||
2613 | 2620 | ||
2614 | btrfs_bio_wq_end_io(rbio->fs_info, bio, | 2621 | btrfs_bio_wq_end_io(rbio->fs_info, bio, |
2615 | BTRFS_WQ_ENDIO_RAID56); | 2622 | BTRFS_WQ_ENDIO_RAID56); |
2616 | 2623 | ||
2617 | submit_bio(READ, bio); | 2624 | submit_bio(bio); |
2618 | } | 2625 | } |
2619 | /* the actual write will happen once the reads are done */ | 2626 | /* the actual write will happen once the reads are done */ |
2620 | return; | 2627 | return; |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 70427ef66b04..e08b6bc676e3 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -1504,8 +1504,9 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, | |||
1504 | sblock->no_io_error_seen = 0; | 1504 | sblock->no_io_error_seen = 0; |
1505 | } else { | 1505 | } else { |
1506 | bio->bi_iter.bi_sector = page->physical >> 9; | 1506 | bio->bi_iter.bi_sector = page->physical >> 9; |
1507 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||
1507 | 1508 | ||
1508 | if (btrfsic_submit_bio_wait(READ, bio)) | 1509 | if (btrfsic_submit_bio_wait(bio)) |
1509 | sblock->no_io_error_seen = 0; | 1510 | sblock->no_io_error_seen = 0; |
1510 | } | 1511 | } |
1511 | 1512 | ||
@@ -1583,6 +1584,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, | |||
1583 | return -EIO; | 1584 | return -EIO; |
1584 | bio->bi_bdev = page_bad->dev->bdev; | 1585 | bio->bi_bdev = page_bad->dev->bdev; |
1585 | bio->bi_iter.bi_sector = page_bad->physical >> 9; | 1586 | bio->bi_iter.bi_sector = page_bad->physical >> 9; |
1587 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
1586 | 1588 | ||
1587 | ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); | 1589 | ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); |
1588 | if (PAGE_SIZE != ret) { | 1590 | if (PAGE_SIZE != ret) { |
@@ -1590,7 +1592,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, | |||
1590 | return -EIO; | 1592 | return -EIO; |
1591 | } | 1593 | } |
1592 | 1594 | ||
1593 | if (btrfsic_submit_bio_wait(WRITE, bio)) { | 1595 | if (btrfsic_submit_bio_wait(bio)) { |
1594 | btrfs_dev_stat_inc_and_print(page_bad->dev, | 1596 | btrfs_dev_stat_inc_and_print(page_bad->dev, |
1595 | BTRFS_DEV_STAT_WRITE_ERRS); | 1597 | BTRFS_DEV_STAT_WRITE_ERRS); |
1596 | btrfs_dev_replace_stats_inc( | 1598 | btrfs_dev_replace_stats_inc( |
@@ -1684,6 +1686,7 @@ again: | |||
1684 | bio->bi_end_io = scrub_wr_bio_end_io; | 1686 | bio->bi_end_io = scrub_wr_bio_end_io; |
1685 | bio->bi_bdev = sbio->dev->bdev; | 1687 | bio->bi_bdev = sbio->dev->bdev; |
1686 | bio->bi_iter.bi_sector = sbio->physical >> 9; | 1688 | bio->bi_iter.bi_sector = sbio->physical >> 9; |
1689 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
1687 | sbio->err = 0; | 1690 | sbio->err = 0; |
1688 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != | 1691 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != |
1689 | spage->physical_for_dev_replace || | 1692 | spage->physical_for_dev_replace || |
@@ -1731,7 +1734,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx) | |||
1731 | * orders the requests before sending them to the driver which | 1734 | * orders the requests before sending them to the driver which |
1732 | * doubled the write performance on spinning disks when measured | 1735 | * doubled the write performance on spinning disks when measured |
1733 | * with Linux 3.5 */ | 1736 | * with Linux 3.5 */ |
1734 | btrfsic_submit_bio(WRITE, sbio->bio); | 1737 | btrfsic_submit_bio(sbio->bio); |
1735 | } | 1738 | } |
1736 | 1739 | ||
1737 | static void scrub_wr_bio_end_io(struct bio *bio) | 1740 | static void scrub_wr_bio_end_io(struct bio *bio) |
@@ -2041,7 +2044,7 @@ static void scrub_submit(struct scrub_ctx *sctx) | |||
2041 | sbio = sctx->bios[sctx->curr]; | 2044 | sbio = sctx->bios[sctx->curr]; |
2042 | sctx->curr = -1; | 2045 | sctx->curr = -1; |
2043 | scrub_pending_bio_inc(sctx); | 2046 | scrub_pending_bio_inc(sctx); |
2044 | btrfsic_submit_bio(READ, sbio->bio); | 2047 | btrfsic_submit_bio(sbio->bio); |
2045 | } | 2048 | } |
2046 | 2049 | ||
2047 | static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, | 2050 | static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, |
@@ -2088,6 +2091,7 @@ again: | |||
2088 | bio->bi_end_io = scrub_bio_end_io; | 2091 | bio->bi_end_io = scrub_bio_end_io; |
2089 | bio->bi_bdev = sbio->dev->bdev; | 2092 | bio->bi_bdev = sbio->dev->bdev; |
2090 | bio->bi_iter.bi_sector = sbio->physical >> 9; | 2093 | bio->bi_iter.bi_sector = sbio->physical >> 9; |
2094 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||
2091 | sbio->err = 0; | 2095 | sbio->err = 0; |
2092 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != | 2096 | } else if (sbio->physical + sbio->page_count * PAGE_SIZE != |
2093 | spage->physical || | 2097 | spage->physical || |
@@ -4436,6 +4440,7 @@ static int write_page_nocow(struct scrub_ctx *sctx, | |||
4436 | bio->bi_iter.bi_size = 0; | 4440 | bio->bi_iter.bi_size = 0; |
4437 | bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; | 4441 | bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; |
4438 | bio->bi_bdev = dev->bdev; | 4442 | bio->bi_bdev = dev->bdev; |
4443 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC); | ||
4439 | ret = bio_add_page(bio, page, PAGE_SIZE, 0); | 4444 | ret = bio_add_page(bio, page, PAGE_SIZE, 0); |
4440 | if (ret != PAGE_SIZE) { | 4445 | if (ret != PAGE_SIZE) { |
4441 | leave_with_eio: | 4446 | leave_with_eio: |
@@ -4444,7 +4449,7 @@ leave_with_eio: | |||
4444 | return -EIO; | 4449 | return -EIO; |
4445 | } | 4450 | } |
4446 | 4451 | ||
4447 | if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) | 4452 | if (btrfsic_submit_bio_wait(bio)) |
4448 | goto leave_with_eio; | 4453 | goto leave_with_eio; |
4449 | 4454 | ||
4450 | bio_put(bio); | 4455 | bio_put(bio); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 589f128173b1..0fb4a959012e 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -462,7 +462,7 @@ loop_lock: | |||
462 | sync_pending = 0; | 462 | sync_pending = 0; |
463 | } | 463 | } |
464 | 464 | ||
465 | btrfsic_submit_bio(cur->bi_rw, cur); | 465 | btrfsic_submit_bio(cur); |
466 | num_run++; | 466 | num_run++; |
467 | batch_run++; | 467 | batch_run++; |
468 | 468 | ||
@@ -5260,7 +5260,7 @@ void btrfs_put_bbio(struct btrfs_bio *bbio) | |||
5260 | kfree(bbio); | 5260 | kfree(bbio); |
5261 | } | 5261 | } |
5262 | 5262 | ||
5263 | static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | 5263 | static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op, |
5264 | u64 logical, u64 *length, | 5264 | u64 logical, u64 *length, |
5265 | struct btrfs_bio **bbio_ret, | 5265 | struct btrfs_bio **bbio_ret, |
5266 | int mirror_num, int need_raid_map) | 5266 | int mirror_num, int need_raid_map) |
@@ -5346,7 +5346,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5346 | raid56_full_stripe_start *= full_stripe_len; | 5346 | raid56_full_stripe_start *= full_stripe_len; |
5347 | } | 5347 | } |
5348 | 5348 | ||
5349 | if (rw & REQ_DISCARD) { | 5349 | if (op == REQ_OP_DISCARD) { |
5350 | /* we don't discard raid56 yet */ | 5350 | /* we don't discard raid56 yet */ |
5351 | if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { | 5351 | if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { |
5352 | ret = -EOPNOTSUPP; | 5352 | ret = -EOPNOTSUPP; |
@@ -5359,7 +5359,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5359 | For other RAID types and for RAID[56] reads, just allow a single | 5359 | For other RAID types and for RAID[56] reads, just allow a single |
5360 | stripe (on a single disk). */ | 5360 | stripe (on a single disk). */ |
5361 | if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && | 5361 | if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && |
5362 | (rw & REQ_WRITE)) { | 5362 | (op == REQ_OP_WRITE)) { |
5363 | max_len = stripe_len * nr_data_stripes(map) - | 5363 | max_len = stripe_len * nr_data_stripes(map) - |
5364 | (offset - raid56_full_stripe_start); | 5364 | (offset - raid56_full_stripe_start); |
5365 | } else { | 5365 | } else { |
@@ -5384,8 +5384,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5384 | btrfs_dev_replace_set_lock_blocking(dev_replace); | 5384 | btrfs_dev_replace_set_lock_blocking(dev_replace); |
5385 | 5385 | ||
5386 | if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && | 5386 | if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && |
5387 | !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) && | 5387 | op != REQ_OP_WRITE && op != REQ_OP_DISCARD && |
5388 | dev_replace->tgtdev != NULL) { | 5388 | op != REQ_GET_READ_MIRRORS && dev_replace->tgtdev != NULL) { |
5389 | /* | 5389 | /* |
5390 | * in dev-replace case, for repair case (that's the only | 5390 | * in dev-replace case, for repair case (that's the only |
5391 | * case where the mirror is selected explicitly when | 5391 | * case where the mirror is selected explicitly when |
@@ -5472,15 +5472,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5472 | (offset + *length); | 5472 | (offset + *length); |
5473 | 5473 | ||
5474 | if (map->type & BTRFS_BLOCK_GROUP_RAID0) { | 5474 | if (map->type & BTRFS_BLOCK_GROUP_RAID0) { |
5475 | if (rw & REQ_DISCARD) | 5475 | if (op == REQ_OP_DISCARD) |
5476 | num_stripes = min_t(u64, map->num_stripes, | 5476 | num_stripes = min_t(u64, map->num_stripes, |
5477 | stripe_nr_end - stripe_nr_orig); | 5477 | stripe_nr_end - stripe_nr_orig); |
5478 | stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, | 5478 | stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, |
5479 | &stripe_index); | 5479 | &stripe_index); |
5480 | if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))) | 5480 | if (op != REQ_OP_WRITE && op != REQ_OP_DISCARD && |
5481 | op != REQ_GET_READ_MIRRORS) | ||
5481 | mirror_num = 1; | 5482 | mirror_num = 1; |
5482 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { | 5483 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { |
5483 | if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) | 5484 | if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD || |
5485 | op == REQ_GET_READ_MIRRORS) | ||
5484 | num_stripes = map->num_stripes; | 5486 | num_stripes = map->num_stripes; |
5485 | else if (mirror_num) | 5487 | else if (mirror_num) |
5486 | stripe_index = mirror_num - 1; | 5488 | stripe_index = mirror_num - 1; |
@@ -5493,7 +5495,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5493 | } | 5495 | } |
5494 | 5496 | ||
5495 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { | 5497 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { |
5496 | if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) { | 5498 | if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD || |
5499 | op == REQ_GET_READ_MIRRORS) { | ||
5497 | num_stripes = map->num_stripes; | 5500 | num_stripes = map->num_stripes; |
5498 | } else if (mirror_num) { | 5501 | } else if (mirror_num) { |
5499 | stripe_index = mirror_num - 1; | 5502 | stripe_index = mirror_num - 1; |
@@ -5507,9 +5510,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5507 | stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); | 5510 | stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); |
5508 | stripe_index *= map->sub_stripes; | 5511 | stripe_index *= map->sub_stripes; |
5509 | 5512 | ||
5510 | if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) | 5513 | if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) |
5511 | num_stripes = map->sub_stripes; | 5514 | num_stripes = map->sub_stripes; |
5512 | else if (rw & REQ_DISCARD) | 5515 | else if (op == REQ_OP_DISCARD) |
5513 | num_stripes = min_t(u64, map->sub_stripes * | 5516 | num_stripes = min_t(u64, map->sub_stripes * |
5514 | (stripe_nr_end - stripe_nr_orig), | 5517 | (stripe_nr_end - stripe_nr_orig), |
5515 | map->num_stripes); | 5518 | map->num_stripes); |
@@ -5527,7 +5530,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5527 | 5530 | ||
5528 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { | 5531 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { |
5529 | if (need_raid_map && | 5532 | if (need_raid_map && |
5530 | ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || | 5533 | (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS || |
5531 | mirror_num > 1)) { | 5534 | mirror_num > 1)) { |
5532 | /* push stripe_nr back to the start of the full stripe */ | 5535 | /* push stripe_nr back to the start of the full stripe */ |
5533 | stripe_nr = div_u64(raid56_full_stripe_start, | 5536 | stripe_nr = div_u64(raid56_full_stripe_start, |
@@ -5555,8 +5558,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5555 | /* We distribute the parity blocks across stripes */ | 5558 | /* We distribute the parity blocks across stripes */ |
5556 | div_u64_rem(stripe_nr + stripe_index, map->num_stripes, | 5559 | div_u64_rem(stripe_nr + stripe_index, map->num_stripes, |
5557 | &stripe_index); | 5560 | &stripe_index); |
5558 | if (!(rw & (REQ_WRITE | REQ_DISCARD | | 5561 | if ((op != REQ_OP_WRITE && op != REQ_OP_DISCARD && |
5559 | REQ_GET_READ_MIRRORS)) && mirror_num <= 1) | 5562 | op != REQ_GET_READ_MIRRORS) && mirror_num <= 1) |
5560 | mirror_num = 1; | 5563 | mirror_num = 1; |
5561 | } | 5564 | } |
5562 | } else { | 5565 | } else { |
@@ -5579,9 +5582,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5579 | 5582 | ||
5580 | num_alloc_stripes = num_stripes; | 5583 | num_alloc_stripes = num_stripes; |
5581 | if (dev_replace_is_ongoing) { | 5584 | if (dev_replace_is_ongoing) { |
5582 | if (rw & (REQ_WRITE | REQ_DISCARD)) | 5585 | if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD) |
5583 | num_alloc_stripes <<= 1; | 5586 | num_alloc_stripes <<= 1; |
5584 | if (rw & REQ_GET_READ_MIRRORS) | 5587 | if (op == REQ_GET_READ_MIRRORS) |
5585 | num_alloc_stripes++; | 5588 | num_alloc_stripes++; |
5586 | tgtdev_indexes = num_stripes; | 5589 | tgtdev_indexes = num_stripes; |
5587 | } | 5590 | } |
@@ -5596,7 +5599,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5596 | 5599 | ||
5597 | /* build raid_map */ | 5600 | /* build raid_map */ |
5598 | if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && | 5601 | if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && |
5599 | need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || | 5602 | need_raid_map && |
5603 | ((op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) || | ||
5600 | mirror_num > 1)) { | 5604 | mirror_num > 1)) { |
5601 | u64 tmp; | 5605 | u64 tmp; |
5602 | unsigned rot; | 5606 | unsigned rot; |
@@ -5621,7 +5625,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5621 | RAID6_Q_STRIPE; | 5625 | RAID6_Q_STRIPE; |
5622 | } | 5626 | } |
5623 | 5627 | ||
5624 | if (rw & REQ_DISCARD) { | 5628 | if (op == REQ_OP_DISCARD) { |
5625 | u32 factor = 0; | 5629 | u32 factor = 0; |
5626 | u32 sub_stripes = 0; | 5630 | u32 sub_stripes = 0; |
5627 | u64 stripes_per_dev = 0; | 5631 | u64 stripes_per_dev = 0; |
@@ -5701,14 +5705,15 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5701 | } | 5705 | } |
5702 | } | 5706 | } |
5703 | 5707 | ||
5704 | if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) | 5708 | if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) |
5705 | max_errors = btrfs_chunk_max_errors(map); | 5709 | max_errors = btrfs_chunk_max_errors(map); |
5706 | 5710 | ||
5707 | if (bbio->raid_map) | 5711 | if (bbio->raid_map) |
5708 | sort_parity_stripes(bbio, num_stripes); | 5712 | sort_parity_stripes(bbio, num_stripes); |
5709 | 5713 | ||
5710 | tgtdev_indexes = 0; | 5714 | tgtdev_indexes = 0; |
5711 | if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && | 5715 | if (dev_replace_is_ongoing && |
5716 | (op == REQ_OP_WRITE || op == REQ_OP_DISCARD) && | ||
5712 | dev_replace->tgtdev != NULL) { | 5717 | dev_replace->tgtdev != NULL) { |
5713 | int index_where_to_add; | 5718 | int index_where_to_add; |
5714 | u64 srcdev_devid = dev_replace->srcdev->devid; | 5719 | u64 srcdev_devid = dev_replace->srcdev->devid; |
@@ -5743,7 +5748,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5743 | } | 5748 | } |
5744 | } | 5749 | } |
5745 | num_stripes = index_where_to_add; | 5750 | num_stripes = index_where_to_add; |
5746 | } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) && | 5751 | } else if (dev_replace_is_ongoing && (op == REQ_GET_READ_MIRRORS) && |
5747 | dev_replace->tgtdev != NULL) { | 5752 | dev_replace->tgtdev != NULL) { |
5748 | u64 srcdev_devid = dev_replace->srcdev->devid; | 5753 | u64 srcdev_devid = dev_replace->srcdev->devid; |
5749 | int index_srcdev = 0; | 5754 | int index_srcdev = 0; |
@@ -5815,21 +5820,21 @@ out: | |||
5815 | return ret; | 5820 | return ret; |
5816 | } | 5821 | } |
5817 | 5822 | ||
5818 | int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | 5823 | int btrfs_map_block(struct btrfs_fs_info *fs_info, int op, |
5819 | u64 logical, u64 *length, | 5824 | u64 logical, u64 *length, |
5820 | struct btrfs_bio **bbio_ret, int mirror_num) | 5825 | struct btrfs_bio **bbio_ret, int mirror_num) |
5821 | { | 5826 | { |
5822 | return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, | 5827 | return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, |
5823 | mirror_num, 0); | 5828 | mirror_num, 0); |
5824 | } | 5829 | } |
5825 | 5830 | ||
5826 | /* For Scrub/replace */ | 5831 | /* For Scrub/replace */ |
5827 | int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, | 5832 | int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op, |
5828 | u64 logical, u64 *length, | 5833 | u64 logical, u64 *length, |
5829 | struct btrfs_bio **bbio_ret, int mirror_num, | 5834 | struct btrfs_bio **bbio_ret, int mirror_num, |
5830 | int need_raid_map) | 5835 | int need_raid_map) |
5831 | { | 5836 | { |
5832 | return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, | 5837 | return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, |
5833 | mirror_num, need_raid_map); | 5838 | mirror_num, need_raid_map); |
5834 | } | 5839 | } |
5835 | 5840 | ||
@@ -5943,7 +5948,7 @@ static void btrfs_end_bio(struct bio *bio) | |||
5943 | BUG_ON(stripe_index >= bbio->num_stripes); | 5948 | BUG_ON(stripe_index >= bbio->num_stripes); |
5944 | dev = bbio->stripes[stripe_index].dev; | 5949 | dev = bbio->stripes[stripe_index].dev; |
5945 | if (dev->bdev) { | 5950 | if (dev->bdev) { |
5946 | if (bio->bi_rw & WRITE) | 5951 | if (bio_op(bio) == REQ_OP_WRITE) |
5947 | btrfs_dev_stat_inc(dev, | 5952 | btrfs_dev_stat_inc(dev, |
5948 | BTRFS_DEV_STAT_WRITE_ERRS); | 5953 | BTRFS_DEV_STAT_WRITE_ERRS); |
5949 | else | 5954 | else |
@@ -5997,7 +6002,7 @@ static void btrfs_end_bio(struct bio *bio) | |||
5997 | */ | 6002 | */ |
5998 | static noinline void btrfs_schedule_bio(struct btrfs_root *root, | 6003 | static noinline void btrfs_schedule_bio(struct btrfs_root *root, |
5999 | struct btrfs_device *device, | 6004 | struct btrfs_device *device, |
6000 | int rw, struct bio *bio) | 6005 | struct bio *bio) |
6001 | { | 6006 | { |
6002 | int should_queue = 1; | 6007 | int should_queue = 1; |
6003 | struct btrfs_pending_bios *pending_bios; | 6008 | struct btrfs_pending_bios *pending_bios; |
@@ -6008,9 +6013,9 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root, | |||
6008 | } | 6013 | } |
6009 | 6014 | ||
6010 | /* don't bother with additional async steps for reads, right now */ | 6015 | /* don't bother with additional async steps for reads, right now */ |
6011 | if (!(rw & REQ_WRITE)) { | 6016 | if (bio_op(bio) == REQ_OP_READ) { |
6012 | bio_get(bio); | 6017 | bio_get(bio); |
6013 | btrfsic_submit_bio(rw, bio); | 6018 | btrfsic_submit_bio(bio); |
6014 | bio_put(bio); | 6019 | bio_put(bio); |
6015 | return; | 6020 | return; |
6016 | } | 6021 | } |
@@ -6024,7 +6029,6 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root, | |||
6024 | atomic_inc(&root->fs_info->nr_async_bios); | 6029 | atomic_inc(&root->fs_info->nr_async_bios); |
6025 | WARN_ON(bio->bi_next); | 6030 | WARN_ON(bio->bi_next); |
6026 | bio->bi_next = NULL; | 6031 | bio->bi_next = NULL; |
6027 | bio->bi_rw |= rw; | ||
6028 | 6032 | ||
6029 | spin_lock(&device->io_lock); | 6033 | spin_lock(&device->io_lock); |
6030 | if (bio->bi_rw & REQ_SYNC) | 6034 | if (bio->bi_rw & REQ_SYNC) |
@@ -6050,7 +6054,7 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root, | |||
6050 | 6054 | ||
6051 | static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, | 6055 | static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, |
6052 | struct bio *bio, u64 physical, int dev_nr, | 6056 | struct bio *bio, u64 physical, int dev_nr, |
6053 | int rw, int async) | 6057 | int async) |
6054 | { | 6058 | { |
6055 | struct btrfs_device *dev = bbio->stripes[dev_nr].dev; | 6059 | struct btrfs_device *dev = bbio->stripes[dev_nr].dev; |
6056 | 6060 | ||
@@ -6064,8 +6068,8 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, | |||
6064 | 6068 | ||
6065 | rcu_read_lock(); | 6069 | rcu_read_lock(); |
6066 | name = rcu_dereference(dev->name); | 6070 | name = rcu_dereference(dev->name); |
6067 | pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu " | 6071 | pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu " |
6068 | "(%s id %llu), size=%u\n", rw, | 6072 | "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_rw, |
6069 | (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, | 6073 | (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, |
6070 | name->str, dev->devid, bio->bi_iter.bi_size); | 6074 | name->str, dev->devid, bio->bi_iter.bi_size); |
6071 | rcu_read_unlock(); | 6075 | rcu_read_unlock(); |
@@ -6076,9 +6080,9 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, | |||
6076 | btrfs_bio_counter_inc_noblocked(root->fs_info); | 6080 | btrfs_bio_counter_inc_noblocked(root->fs_info); |
6077 | 6081 | ||
6078 | if (async) | 6082 | if (async) |
6079 | btrfs_schedule_bio(root, dev, rw, bio); | 6083 | btrfs_schedule_bio(root, dev, bio); |
6080 | else | 6084 | else |
6081 | btrfsic_submit_bio(rw, bio); | 6085 | btrfsic_submit_bio(bio); |
6082 | } | 6086 | } |
6083 | 6087 | ||
6084 | static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) | 6088 | static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) |
@@ -6095,7 +6099,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) | |||
6095 | } | 6099 | } |
6096 | } | 6100 | } |
6097 | 6101 | ||
6098 | int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | 6102 | int btrfs_map_bio(struct btrfs_root *root, struct bio *bio, |
6099 | int mirror_num, int async_submit) | 6103 | int mirror_num, int async_submit) |
6100 | { | 6104 | { |
6101 | struct btrfs_device *dev; | 6105 | struct btrfs_device *dev; |
@@ -6112,8 +6116,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
6112 | map_length = length; | 6116 | map_length = length; |
6113 | 6117 | ||
6114 | btrfs_bio_counter_inc_blocked(root->fs_info); | 6118 | btrfs_bio_counter_inc_blocked(root->fs_info); |
6115 | ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, | 6119 | ret = __btrfs_map_block(root->fs_info, bio_op(bio), logical, |
6116 | mirror_num, 1); | 6120 | &map_length, &bbio, mirror_num, 1); |
6117 | if (ret) { | 6121 | if (ret) { |
6118 | btrfs_bio_counter_dec(root->fs_info); | 6122 | btrfs_bio_counter_dec(root->fs_info); |
6119 | return ret; | 6123 | return ret; |
@@ -6127,10 +6131,10 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
6127 | atomic_set(&bbio->stripes_pending, bbio->num_stripes); | 6131 | atomic_set(&bbio->stripes_pending, bbio->num_stripes); |
6128 | 6132 | ||
6129 | if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && | 6133 | if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && |
6130 | ((rw & WRITE) || (mirror_num > 1))) { | 6134 | ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) { |
6131 | /* In this case, map_length has been set to the length of | 6135 | /* In this case, map_length has been set to the length of |
6132 | a single stripe; not the whole write */ | 6136 | a single stripe; not the whole write */ |
6133 | if (rw & WRITE) { | 6137 | if (bio_op(bio) == REQ_OP_WRITE) { |
6134 | ret = raid56_parity_write(root, bio, bbio, map_length); | 6138 | ret = raid56_parity_write(root, bio, bbio, map_length); |
6135 | } else { | 6139 | } else { |
6136 | ret = raid56_parity_recover(root, bio, bbio, map_length, | 6140 | ret = raid56_parity_recover(root, bio, bbio, map_length, |
@@ -6149,7 +6153,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
6149 | 6153 | ||
6150 | for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { | 6154 | for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { |
6151 | dev = bbio->stripes[dev_nr].dev; | 6155 | dev = bbio->stripes[dev_nr].dev; |
6152 | if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) { | 6156 | if (!dev || !dev->bdev || |
6157 | (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) { | ||
6153 | bbio_error(bbio, first_bio, logical); | 6158 | bbio_error(bbio, first_bio, logical); |
6154 | continue; | 6159 | continue; |
6155 | } | 6160 | } |
@@ -6161,7 +6166,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
6161 | bio = first_bio; | 6166 | bio = first_bio; |
6162 | 6167 | ||
6163 | submit_stripe_bio(root, bbio, bio, | 6168 | submit_stripe_bio(root, bbio, bio, |
6164 | bbio->stripes[dev_nr].physical, dev_nr, rw, | 6169 | bbio->stripes[dev_nr].physical, dev_nr, |
6165 | async_submit); | 6170 | async_submit); |
6166 | } | 6171 | } |
6167 | btrfs_bio_counter_dec(root->fs_info); | 6172 | btrfs_bio_counter_dec(root->fs_info); |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 0ac90f8d85bd..6613e6335ca2 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -375,10 +375,10 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, | |||
375 | u64 end, u64 *length); | 375 | u64 end, u64 *length); |
376 | void btrfs_get_bbio(struct btrfs_bio *bbio); | 376 | void btrfs_get_bbio(struct btrfs_bio *bbio); |
377 | void btrfs_put_bbio(struct btrfs_bio *bbio); | 377 | void btrfs_put_bbio(struct btrfs_bio *bbio); |
378 | int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | 378 | int btrfs_map_block(struct btrfs_fs_info *fs_info, int op, |
379 | u64 logical, u64 *length, | 379 | u64 logical, u64 *length, |
380 | struct btrfs_bio **bbio_ret, int mirror_num); | 380 | struct btrfs_bio **bbio_ret, int mirror_num); |
381 | int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, | 381 | int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op, |
382 | u64 logical, u64 *length, | 382 | u64 logical, u64 *length, |
383 | struct btrfs_bio **bbio_ret, int mirror_num, | 383 | struct btrfs_bio **bbio_ret, int mirror_num, |
384 | int need_raid_map); | 384 | int need_raid_map); |
@@ -391,7 +391,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
391 | struct btrfs_root *extent_root, u64 type); | 391 | struct btrfs_root *extent_root, u64 type); |
392 | void btrfs_mapping_init(struct btrfs_mapping_tree *tree); | 392 | void btrfs_mapping_init(struct btrfs_mapping_tree *tree); |
393 | void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); | 393 | void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); |
394 | int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | 394 | int btrfs_map_bio(struct btrfs_root *root, struct bio *bio, |
395 | int mirror_num, int async_submit); | 395 | int mirror_num, int async_submit); |
396 | int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | 396 | int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, |
397 | fmode_t flags, void *holder); | 397 | fmode_t flags, void *holder); |
diff --git a/fs/buffer.c b/fs/buffer.c index 6c15012a75d9..e156a36463a1 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -45,7 +45,7 @@ | |||
45 | #include <trace/events/block.h> | 45 | #include <trace/events/block.h> |
46 | 46 | ||
47 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); | 47 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); |
48 | static int submit_bh_wbc(int rw, struct buffer_head *bh, | 48 | static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, |
49 | unsigned long bio_flags, | 49 | unsigned long bio_flags, |
50 | struct writeback_control *wbc); | 50 | struct writeback_control *wbc); |
51 | 51 | ||
@@ -588,7 +588,7 @@ void write_boundary_block(struct block_device *bdev, | |||
588 | struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); | 588 | struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); |
589 | if (bh) { | 589 | if (bh) { |
590 | if (buffer_dirty(bh)) | 590 | if (buffer_dirty(bh)) |
591 | ll_rw_block(WRITE, 1, &bh); | 591 | ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); |
592 | put_bh(bh); | 592 | put_bh(bh); |
593 | } | 593 | } |
594 | } | 594 | } |
@@ -1225,7 +1225,7 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh) | |||
1225 | } else { | 1225 | } else { |
1226 | get_bh(bh); | 1226 | get_bh(bh); |
1227 | bh->b_end_io = end_buffer_read_sync; | 1227 | bh->b_end_io = end_buffer_read_sync; |
1228 | submit_bh(READ, bh); | 1228 | submit_bh(REQ_OP_READ, 0, bh); |
1229 | wait_on_buffer(bh); | 1229 | wait_on_buffer(bh); |
1230 | if (buffer_uptodate(bh)) | 1230 | if (buffer_uptodate(bh)) |
1231 | return bh; | 1231 | return bh; |
@@ -1395,7 +1395,7 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) | |||
1395 | { | 1395 | { |
1396 | struct buffer_head *bh = __getblk(bdev, block, size); | 1396 | struct buffer_head *bh = __getblk(bdev, block, size); |
1397 | if (likely(bh)) { | 1397 | if (likely(bh)) { |
1398 | ll_rw_block(READA, 1, &bh); | 1398 | ll_rw_block(REQ_OP_READ, READA, 1, &bh); |
1399 | brelse(bh); | 1399 | brelse(bh); |
1400 | } | 1400 | } |
1401 | } | 1401 | } |
@@ -1697,7 +1697,7 @@ int __block_write_full_page(struct inode *inode, struct page *page, | |||
1697 | struct buffer_head *bh, *head; | 1697 | struct buffer_head *bh, *head; |
1698 | unsigned int blocksize, bbits; | 1698 | unsigned int blocksize, bbits; |
1699 | int nr_underway = 0; | 1699 | int nr_underway = 0; |
1700 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | 1700 | int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); |
1701 | 1701 | ||
1702 | head = create_page_buffers(page, inode, | 1702 | head = create_page_buffers(page, inode, |
1703 | (1 << BH_Dirty)|(1 << BH_Uptodate)); | 1703 | (1 << BH_Dirty)|(1 << BH_Uptodate)); |
@@ -1786,7 +1786,7 @@ int __block_write_full_page(struct inode *inode, struct page *page, | |||
1786 | do { | 1786 | do { |
1787 | struct buffer_head *next = bh->b_this_page; | 1787 | struct buffer_head *next = bh->b_this_page; |
1788 | if (buffer_async_write(bh)) { | 1788 | if (buffer_async_write(bh)) { |
1789 | submit_bh_wbc(write_op, bh, 0, wbc); | 1789 | submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc); |
1790 | nr_underway++; | 1790 | nr_underway++; |
1791 | } | 1791 | } |
1792 | bh = next; | 1792 | bh = next; |
@@ -1840,7 +1840,7 @@ recover: | |||
1840 | struct buffer_head *next = bh->b_this_page; | 1840 | struct buffer_head *next = bh->b_this_page; |
1841 | if (buffer_async_write(bh)) { | 1841 | if (buffer_async_write(bh)) { |
1842 | clear_buffer_dirty(bh); | 1842 | clear_buffer_dirty(bh); |
1843 | submit_bh_wbc(write_op, bh, 0, wbc); | 1843 | submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc); |
1844 | nr_underway++; | 1844 | nr_underway++; |
1845 | } | 1845 | } |
1846 | bh = next; | 1846 | bh = next; |
@@ -1956,7 +1956,7 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len, | |||
1956 | if (!buffer_uptodate(bh) && !buffer_delay(bh) && | 1956 | if (!buffer_uptodate(bh) && !buffer_delay(bh) && |
1957 | !buffer_unwritten(bh) && | 1957 | !buffer_unwritten(bh) && |
1958 | (block_start < from || block_end > to)) { | 1958 | (block_start < from || block_end > to)) { |
1959 | ll_rw_block(READ, 1, &bh); | 1959 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
1960 | *wait_bh++=bh; | 1960 | *wait_bh++=bh; |
1961 | } | 1961 | } |
1962 | } | 1962 | } |
@@ -2249,7 +2249,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) | |||
2249 | if (buffer_uptodate(bh)) | 2249 | if (buffer_uptodate(bh)) |
2250 | end_buffer_async_read(bh, 1); | 2250 | end_buffer_async_read(bh, 1); |
2251 | else | 2251 | else |
2252 | submit_bh(READ, bh); | 2252 | submit_bh(REQ_OP_READ, 0, bh); |
2253 | } | 2253 | } |
2254 | return 0; | 2254 | return 0; |
2255 | } | 2255 | } |
@@ -2583,7 +2583,7 @@ int nobh_write_begin(struct address_space *mapping, | |||
2583 | if (block_start < from || block_end > to) { | 2583 | if (block_start < from || block_end > to) { |
2584 | lock_buffer(bh); | 2584 | lock_buffer(bh); |
2585 | bh->b_end_io = end_buffer_read_nobh; | 2585 | bh->b_end_io = end_buffer_read_nobh; |
2586 | submit_bh(READ, bh); | 2586 | submit_bh(REQ_OP_READ, 0, bh); |
2587 | nr_reads++; | 2587 | nr_reads++; |
2588 | } | 2588 | } |
2589 | } | 2589 | } |
@@ -2853,7 +2853,7 @@ int block_truncate_page(struct address_space *mapping, | |||
2853 | 2853 | ||
2854 | if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { | 2854 | if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { |
2855 | err = -EIO; | 2855 | err = -EIO; |
2856 | ll_rw_block(READ, 1, &bh); | 2856 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
2857 | wait_on_buffer(bh); | 2857 | wait_on_buffer(bh); |
2858 | /* Uhhuh. Read error. Complain and punt. */ | 2858 | /* Uhhuh. Read error. Complain and punt. */ |
2859 | if (!buffer_uptodate(bh)) | 2859 | if (!buffer_uptodate(bh)) |
@@ -2950,7 +2950,7 @@ static void end_bio_bh_io_sync(struct bio *bio) | |||
2950 | * errors, this only handles the "we need to be able to | 2950 | * errors, this only handles the "we need to be able to |
2951 | * do IO at the final sector" case. | 2951 | * do IO at the final sector" case. |
2952 | */ | 2952 | */ |
2953 | void guard_bio_eod(int rw, struct bio *bio) | 2953 | void guard_bio_eod(int op, struct bio *bio) |
2954 | { | 2954 | { |
2955 | sector_t maxsector; | 2955 | sector_t maxsector; |
2956 | struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; | 2956 | struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; |
@@ -2980,13 +2980,13 @@ void guard_bio_eod(int rw, struct bio *bio) | |||
2980 | bvec->bv_len -= truncated_bytes; | 2980 | bvec->bv_len -= truncated_bytes; |
2981 | 2981 | ||
2982 | /* ..and clear the end of the buffer for reads */ | 2982 | /* ..and clear the end of the buffer for reads */ |
2983 | if ((rw & RW_MASK) == READ) { | 2983 | if (op == REQ_OP_READ) { |
2984 | zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, | 2984 | zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, |
2985 | truncated_bytes); | 2985 | truncated_bytes); |
2986 | } | 2986 | } |
2987 | } | 2987 | } |
2988 | 2988 | ||
2989 | static int submit_bh_wbc(int rw, struct buffer_head *bh, | 2989 | static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, |
2990 | unsigned long bio_flags, struct writeback_control *wbc) | 2990 | unsigned long bio_flags, struct writeback_control *wbc) |
2991 | { | 2991 | { |
2992 | struct bio *bio; | 2992 | struct bio *bio; |
@@ -3000,7 +3000,7 @@ static int submit_bh_wbc(int rw, struct buffer_head *bh, | |||
3000 | /* | 3000 | /* |
3001 | * Only clear out a write error when rewriting | 3001 | * Only clear out a write error when rewriting |
3002 | */ | 3002 | */ |
3003 | if (test_set_buffer_req(bh) && (rw & WRITE)) | 3003 | if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) |
3004 | clear_buffer_write_io_error(bh); | 3004 | clear_buffer_write_io_error(bh); |
3005 | 3005 | ||
3006 | /* | 3006 | /* |
@@ -3025,32 +3025,35 @@ static int submit_bh_wbc(int rw, struct buffer_head *bh, | |||
3025 | bio->bi_flags |= bio_flags; | 3025 | bio->bi_flags |= bio_flags; |
3026 | 3026 | ||
3027 | /* Take care of bh's that straddle the end of the device */ | 3027 | /* Take care of bh's that straddle the end of the device */ |
3028 | guard_bio_eod(rw, bio); | 3028 | guard_bio_eod(op, bio); |
3029 | 3029 | ||
3030 | if (buffer_meta(bh)) | 3030 | if (buffer_meta(bh)) |
3031 | rw |= REQ_META; | 3031 | op_flags |= REQ_META; |
3032 | if (buffer_prio(bh)) | 3032 | if (buffer_prio(bh)) |
3033 | rw |= REQ_PRIO; | 3033 | op_flags |= REQ_PRIO; |
3034 | bio_set_op_attrs(bio, op, op_flags); | ||
3034 | 3035 | ||
3035 | submit_bio(rw, bio); | 3036 | submit_bio(bio); |
3036 | return 0; | 3037 | return 0; |
3037 | } | 3038 | } |
3038 | 3039 | ||
3039 | int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags) | 3040 | int _submit_bh(int op, int op_flags, struct buffer_head *bh, |
3041 | unsigned long bio_flags) | ||
3040 | { | 3042 | { |
3041 | return submit_bh_wbc(rw, bh, bio_flags, NULL); | 3043 | return submit_bh_wbc(op, op_flags, bh, bio_flags, NULL); |
3042 | } | 3044 | } |
3043 | EXPORT_SYMBOL_GPL(_submit_bh); | 3045 | EXPORT_SYMBOL_GPL(_submit_bh); |
3044 | 3046 | ||
3045 | int submit_bh(int rw, struct buffer_head *bh) | 3047 | int submit_bh(int op, int op_flags, struct buffer_head *bh) |
3046 | { | 3048 | { |
3047 | return submit_bh_wbc(rw, bh, 0, NULL); | 3049 | return submit_bh_wbc(op, op_flags, bh, 0, NULL); |
3048 | } | 3050 | } |
3049 | EXPORT_SYMBOL(submit_bh); | 3051 | EXPORT_SYMBOL(submit_bh); |
3050 | 3052 | ||
3051 | /** | 3053 | /** |
3052 | * ll_rw_block: low-level access to block devices (DEPRECATED) | 3054 | * ll_rw_block: low-level access to block devices (DEPRECATED) |
3053 | * @rw: whether to %READ or %WRITE or maybe %READA (readahead) | 3055 | * @op: whether to %READ or %WRITE |
3056 | * @op_flags: rq_flag_bits or %READA (readahead) | ||
3054 | * @nr: number of &struct buffer_heads in the array | 3057 | * @nr: number of &struct buffer_heads in the array |
3055 | * @bhs: array of pointers to &struct buffer_head | 3058 | * @bhs: array of pointers to &struct buffer_head |
3056 | * | 3059 | * |
@@ -3073,7 +3076,7 @@ EXPORT_SYMBOL(submit_bh); | |||
3073 | * All of the buffers must be for the same device, and must also be a | 3076 | * All of the buffers must be for the same device, and must also be a |
3074 | * multiple of the current approved size for the device. | 3077 | * multiple of the current approved size for the device. |
3075 | */ | 3078 | */ |
3076 | void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | 3079 | void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[]) |
3077 | { | 3080 | { |
3078 | int i; | 3081 | int i; |
3079 | 3082 | ||
@@ -3082,18 +3085,18 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | |||
3082 | 3085 | ||
3083 | if (!trylock_buffer(bh)) | 3086 | if (!trylock_buffer(bh)) |
3084 | continue; | 3087 | continue; |
3085 | if (rw == WRITE) { | 3088 | if (op == WRITE) { |
3086 | if (test_clear_buffer_dirty(bh)) { | 3089 | if (test_clear_buffer_dirty(bh)) { |
3087 | bh->b_end_io = end_buffer_write_sync; | 3090 | bh->b_end_io = end_buffer_write_sync; |
3088 | get_bh(bh); | 3091 | get_bh(bh); |
3089 | submit_bh(WRITE, bh); | 3092 | submit_bh(op, op_flags, bh); |
3090 | continue; | 3093 | continue; |
3091 | } | 3094 | } |
3092 | } else { | 3095 | } else { |
3093 | if (!buffer_uptodate(bh)) { | 3096 | if (!buffer_uptodate(bh)) { |
3094 | bh->b_end_io = end_buffer_read_sync; | 3097 | bh->b_end_io = end_buffer_read_sync; |
3095 | get_bh(bh); | 3098 | get_bh(bh); |
3096 | submit_bh(rw, bh); | 3099 | submit_bh(op, op_flags, bh); |
3097 | continue; | 3100 | continue; |
3098 | } | 3101 | } |
3099 | } | 3102 | } |
@@ -3102,7 +3105,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | |||
3102 | } | 3105 | } |
3103 | EXPORT_SYMBOL(ll_rw_block); | 3106 | EXPORT_SYMBOL(ll_rw_block); |
3104 | 3107 | ||
3105 | void write_dirty_buffer(struct buffer_head *bh, int rw) | 3108 | void write_dirty_buffer(struct buffer_head *bh, int op_flags) |
3106 | { | 3109 | { |
3107 | lock_buffer(bh); | 3110 | lock_buffer(bh); |
3108 | if (!test_clear_buffer_dirty(bh)) { | 3111 | if (!test_clear_buffer_dirty(bh)) { |
@@ -3111,7 +3114,7 @@ void write_dirty_buffer(struct buffer_head *bh, int rw) | |||
3111 | } | 3114 | } |
3112 | bh->b_end_io = end_buffer_write_sync; | 3115 | bh->b_end_io = end_buffer_write_sync; |
3113 | get_bh(bh); | 3116 | get_bh(bh); |
3114 | submit_bh(rw, bh); | 3117 | submit_bh(REQ_OP_WRITE, op_flags, bh); |
3115 | } | 3118 | } |
3116 | EXPORT_SYMBOL(write_dirty_buffer); | 3119 | EXPORT_SYMBOL(write_dirty_buffer); |
3117 | 3120 | ||
@@ -3120,7 +3123,7 @@ EXPORT_SYMBOL(write_dirty_buffer); | |||
3120 | * and then start new I/O and then wait upon it. The caller must have a ref on | 3123 | * and then start new I/O and then wait upon it. The caller must have a ref on |
3121 | * the buffer_head. | 3124 | * the buffer_head. |
3122 | */ | 3125 | */ |
3123 | int __sync_dirty_buffer(struct buffer_head *bh, int rw) | 3126 | int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) |
3124 | { | 3127 | { |
3125 | int ret = 0; | 3128 | int ret = 0; |
3126 | 3129 | ||
@@ -3129,7 +3132,7 @@ int __sync_dirty_buffer(struct buffer_head *bh, int rw) | |||
3129 | if (test_clear_buffer_dirty(bh)) { | 3132 | if (test_clear_buffer_dirty(bh)) { |
3130 | get_bh(bh); | 3133 | get_bh(bh); |
3131 | bh->b_end_io = end_buffer_write_sync; | 3134 | bh->b_end_io = end_buffer_write_sync; |
3132 | ret = submit_bh(rw, bh); | 3135 | ret = submit_bh(REQ_OP_WRITE, op_flags, bh); |
3133 | wait_on_buffer(bh); | 3136 | wait_on_buffer(bh); |
3134 | if (!ret && !buffer_uptodate(bh)) | 3137 | if (!ret && !buffer_uptodate(bh)) |
3135 | ret = -EIO; | 3138 | ret = -EIO; |
@@ -3392,7 +3395,7 @@ int bh_submit_read(struct buffer_head *bh) | |||
3392 | 3395 | ||
3393 | get_bh(bh); | 3396 | get_bh(bh); |
3394 | bh->b_end_io = end_buffer_read_sync; | 3397 | bh->b_end_io = end_buffer_read_sync; |
3395 | submit_bh(READ, bh); | 3398 | submit_bh(REQ_OP_READ, 0, bh); |
3396 | wait_on_buffer(bh); | 3399 | wait_on_buffer(bh); |
3397 | if (buffer_uptodate(bh)) | 3400 | if (buffer_uptodate(bh)) |
3398 | return 0; | 3401 | return 0; |
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 2fc8c43ce531..c502c116924c 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c | |||
@@ -318,6 +318,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk, | |||
318 | bio->bi_bdev = inode->i_sb->s_bdev; | 318 | bio->bi_bdev = inode->i_sb->s_bdev; |
319 | bio->bi_iter.bi_sector = | 319 | bio->bi_iter.bi_sector = |
320 | pblk << (inode->i_sb->s_blocksize_bits - 9); | 320 | pblk << (inode->i_sb->s_blocksize_bits - 9); |
321 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
321 | ret = bio_add_page(bio, ciphertext_page, | 322 | ret = bio_add_page(bio, ciphertext_page, |
322 | inode->i_sb->s_blocksize, 0); | 323 | inode->i_sb->s_blocksize, 0); |
323 | if (ret != inode->i_sb->s_blocksize) { | 324 | if (ret != inode->i_sb->s_blocksize) { |
@@ -327,7 +328,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk, | |||
327 | err = -EIO; | 328 | err = -EIO; |
328 | goto errout; | 329 | goto errout; |
329 | } | 330 | } |
330 | err = submit_bio_wait(WRITE, bio); | 331 | err = submit_bio_wait(bio); |
331 | if ((err == 0) && bio->bi_error) | 332 | if ((err == 0) && bio->bi_error) |
332 | err = -EIO; | 333 | err = -EIO; |
333 | bio_put(bio); | 334 | bio_put(bio); |
diff --git a/fs/direct-io.c b/fs/direct-io.c index f3b4408be590..7c3ce73cb617 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -108,7 +108,8 @@ struct dio_submit { | |||
108 | /* dio_state communicated between submission path and end_io */ | 108 | /* dio_state communicated between submission path and end_io */ |
109 | struct dio { | 109 | struct dio { |
110 | int flags; /* doesn't change */ | 110 | int flags; /* doesn't change */ |
111 | int rw; | 111 | int op; |
112 | int op_flags; | ||
112 | blk_qc_t bio_cookie; | 113 | blk_qc_t bio_cookie; |
113 | struct block_device *bio_bdev; | 114 | struct block_device *bio_bdev; |
114 | struct inode *inode; | 115 | struct inode *inode; |
@@ -163,7 +164,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) | |||
163 | ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, | 164 | ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, |
164 | &sdio->from); | 165 | &sdio->from); |
165 | 166 | ||
166 | if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) { | 167 | if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) { |
167 | struct page *page = ZERO_PAGE(0); | 168 | struct page *page = ZERO_PAGE(0); |
168 | /* | 169 | /* |
169 | * A memory fault, but the filesystem has some outstanding | 170 | * A memory fault, but the filesystem has some outstanding |
@@ -242,7 +243,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) | |||
242 | transferred = dio->result; | 243 | transferred = dio->result; |
243 | 244 | ||
244 | /* Check for short read case */ | 245 | /* Check for short read case */ |
245 | if ((dio->rw == READ) && ((offset + transferred) > dio->i_size)) | 246 | if ((dio->op == REQ_OP_READ) && |
247 | ((offset + transferred) > dio->i_size)) | ||
246 | transferred = dio->i_size - offset; | 248 | transferred = dio->i_size - offset; |
247 | } | 249 | } |
248 | 250 | ||
@@ -273,7 +275,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) | |||
273 | */ | 275 | */ |
274 | dio->iocb->ki_pos += transferred; | 276 | dio->iocb->ki_pos += transferred; |
275 | 277 | ||
276 | if (dio->rw & WRITE) | 278 | if (dio->op == REQ_OP_WRITE) |
277 | ret = generic_write_sync(dio->iocb, transferred); | 279 | ret = generic_write_sync(dio->iocb, transferred); |
278 | dio->iocb->ki_complete(dio->iocb, ret, 0); | 280 | dio->iocb->ki_complete(dio->iocb, ret, 0); |
279 | } | 281 | } |
@@ -375,6 +377,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, | |||
375 | 377 | ||
376 | bio->bi_bdev = bdev; | 378 | bio->bi_bdev = bdev; |
377 | bio->bi_iter.bi_sector = first_sector; | 379 | bio->bi_iter.bi_sector = first_sector; |
380 | bio_set_op_attrs(bio, dio->op, dio->op_flags); | ||
378 | if (dio->is_async) | 381 | if (dio->is_async) |
379 | bio->bi_end_io = dio_bio_end_aio; | 382 | bio->bi_end_io = dio_bio_end_aio; |
380 | else | 383 | else |
@@ -402,17 +405,16 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) | |||
402 | dio->refcount++; | 405 | dio->refcount++; |
403 | spin_unlock_irqrestore(&dio->bio_lock, flags); | 406 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
404 | 407 | ||
405 | if (dio->is_async && dio->rw == READ && dio->should_dirty) | 408 | if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) |
406 | bio_set_pages_dirty(bio); | 409 | bio_set_pages_dirty(bio); |
407 | 410 | ||
408 | dio->bio_bdev = bio->bi_bdev; | 411 | dio->bio_bdev = bio->bi_bdev; |
409 | 412 | ||
410 | if (sdio->submit_io) { | 413 | if (sdio->submit_io) { |
411 | sdio->submit_io(dio->rw, bio, dio->inode, | 414 | sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); |
412 | sdio->logical_offset_in_bio); | ||
413 | dio->bio_cookie = BLK_QC_T_NONE; | 415 | dio->bio_cookie = BLK_QC_T_NONE; |
414 | } else | 416 | } else |
415 | dio->bio_cookie = submit_bio(dio->rw, bio); | 417 | dio->bio_cookie = submit_bio(bio); |
416 | 418 | ||
417 | sdio->bio = NULL; | 419 | sdio->bio = NULL; |
418 | sdio->boundary = 0; | 420 | sdio->boundary = 0; |
@@ -478,14 +480,14 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio) | |||
478 | if (bio->bi_error) | 480 | if (bio->bi_error) |
479 | dio->io_error = -EIO; | 481 | dio->io_error = -EIO; |
480 | 482 | ||
481 | if (dio->is_async && dio->rw == READ && dio->should_dirty) { | 483 | if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) { |
482 | err = bio->bi_error; | 484 | err = bio->bi_error; |
483 | bio_check_pages_dirty(bio); /* transfers ownership */ | 485 | bio_check_pages_dirty(bio); /* transfers ownership */ |
484 | } else { | 486 | } else { |
485 | bio_for_each_segment_all(bvec, bio, i) { | 487 | bio_for_each_segment_all(bvec, bio, i) { |
486 | struct page *page = bvec->bv_page; | 488 | struct page *page = bvec->bv_page; |
487 | 489 | ||
488 | if (dio->rw == READ && !PageCompound(page) && | 490 | if (dio->op == REQ_OP_READ && !PageCompound(page) && |
489 | dio->should_dirty) | 491 | dio->should_dirty) |
490 | set_page_dirty_lock(page); | 492 | set_page_dirty_lock(page); |
491 | put_page(page); | 493 | put_page(page); |
@@ -638,7 +640,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, | |||
638 | * which may decide to handle it or also return an unmapped | 640 | * which may decide to handle it or also return an unmapped |
639 | * buffer head. | 641 | * buffer head. |
640 | */ | 642 | */ |
641 | create = dio->rw & WRITE; | 643 | create = dio->op == REQ_OP_WRITE; |
642 | if (dio->flags & DIO_SKIP_HOLES) { | 644 | if (dio->flags & DIO_SKIP_HOLES) { |
643 | if (fs_startblk <= ((i_size_read(dio->inode) - 1) >> | 645 | if (fs_startblk <= ((i_size_read(dio->inode) - 1) >> |
644 | i_blkbits)) | 646 | i_blkbits)) |
@@ -788,7 +790,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, | |||
788 | { | 790 | { |
789 | int ret = 0; | 791 | int ret = 0; |
790 | 792 | ||
791 | if (dio->rw & WRITE) { | 793 | if (dio->op == REQ_OP_WRITE) { |
792 | /* | 794 | /* |
793 | * Read accounting is performed in submit_bio() | 795 | * Read accounting is performed in submit_bio() |
794 | */ | 796 | */ |
@@ -988,7 +990,7 @@ do_holes: | |||
988 | loff_t i_size_aligned; | 990 | loff_t i_size_aligned; |
989 | 991 | ||
990 | /* AKPM: eargh, -ENOTBLK is a hack */ | 992 | /* AKPM: eargh, -ENOTBLK is a hack */ |
991 | if (dio->rw & WRITE) { | 993 | if (dio->op == REQ_OP_WRITE) { |
992 | put_page(page); | 994 | put_page(page); |
993 | return -ENOTBLK; | 995 | return -ENOTBLK; |
994 | } | 996 | } |
@@ -1202,7 +1204,12 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | |||
1202 | dio->is_async = true; | 1204 | dio->is_async = true; |
1203 | 1205 | ||
1204 | dio->inode = inode; | 1206 | dio->inode = inode; |
1205 | dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ; | 1207 | if (iov_iter_rw(iter) == WRITE) { |
1208 | dio->op = REQ_OP_WRITE; | ||
1209 | dio->op_flags = WRITE_ODIRECT; | ||
1210 | } else { | ||
1211 | dio->op = REQ_OP_READ; | ||
1212 | } | ||
1206 | 1213 | ||
1207 | /* | 1214 | /* |
1208 | * For AIO O_(D)SYNC writes we need to defer completions to a workqueue | 1215 | * For AIO O_(D)SYNC writes we need to defer completions to a workqueue |
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c index 7bd8ac8dfb28..8bb72807e70d 100644 --- a/fs/exofs/ore.c +++ b/fs/exofs/ore.c | |||
@@ -878,7 +878,7 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp) | |||
878 | } else { | 878 | } else { |
879 | bio = master_dev->bio; | 879 | bio = master_dev->bio; |
880 | /* FIXME: bio_set_dir() */ | 880 | /* FIXME: bio_set_dir() */ |
881 | bio->bi_rw |= REQ_WRITE; | 881 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
882 | } | 882 | } |
883 | 883 | ||
884 | osd_req_write(or, _ios_obj(ios, cur_comp), | 884 | osd_req_write(or, _ios_obj(ios, cur_comp), |
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 3020fd70c392..a806b58e4646 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -470,7 +470,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) | |||
470 | trace_ext4_read_block_bitmap_load(sb, block_group); | 470 | trace_ext4_read_block_bitmap_load(sb, block_group); |
471 | bh->b_end_io = ext4_end_bitmap_read; | 471 | bh->b_end_io = ext4_end_bitmap_read; |
472 | get_bh(bh); | 472 | get_bh(bh); |
473 | submit_bh(READ | REQ_META | REQ_PRIO, bh); | 473 | submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); |
474 | return bh; | 474 | return bh; |
475 | verify: | 475 | verify: |
476 | err = ext4_validate_block_bitmap(sb, desc, block_group, bh); | 476 | err = ext4_validate_block_bitmap(sb, desc, block_group, bh); |
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c index 6a6c27373b54..d3fa47c2b8a3 100644 --- a/fs/ext4/crypto.c +++ b/fs/ext4/crypto.c | |||
@@ -428,6 +428,7 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, | |||
428 | bio->bi_bdev = inode->i_sb->s_bdev; | 428 | bio->bi_bdev = inode->i_sb->s_bdev; |
429 | bio->bi_iter.bi_sector = | 429 | bio->bi_iter.bi_sector = |
430 | pblk << (inode->i_sb->s_blocksize_bits - 9); | 430 | pblk << (inode->i_sb->s_blocksize_bits - 9); |
431 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
431 | ret = bio_add_page(bio, ciphertext_page, | 432 | ret = bio_add_page(bio, ciphertext_page, |
432 | inode->i_sb->s_blocksize, 0); | 433 | inode->i_sb->s_blocksize, 0); |
433 | if (ret != inode->i_sb->s_blocksize) { | 434 | if (ret != inode->i_sb->s_blocksize) { |
@@ -439,7 +440,7 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, | |||
439 | err = -EIO; | 440 | err = -EIO; |
440 | goto errout; | 441 | goto errout; |
441 | } | 442 | } |
442 | err = submit_bio_wait(WRITE, bio); | 443 | err = submit_bio_wait(bio); |
443 | if ((err == 0) && bio->bi_error) | 444 | if ((err == 0) && bio->bi_error) |
444 | err = -EIO; | 445 | err = -EIO; |
445 | bio_put(bio); | 446 | bio_put(bio); |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 3da4cf8d18b6..1e4b0b7425e5 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
@@ -214,7 +214,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
214 | trace_ext4_load_inode_bitmap(sb, block_group); | 214 | trace_ext4_load_inode_bitmap(sb, block_group); |
215 | bh->b_end_io = ext4_end_bitmap_read; | 215 | bh->b_end_io = ext4_end_bitmap_read; |
216 | get_bh(bh); | 216 | get_bh(bh); |
217 | submit_bh(READ | REQ_META | REQ_PRIO, bh); | 217 | submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); |
218 | wait_on_buffer(bh); | 218 | wait_on_buffer(bh); |
219 | if (!buffer_uptodate(bh)) { | 219 | if (!buffer_uptodate(bh)) { |
220 | put_bh(bh); | 220 | put_bh(bh); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index f7140ca66e3b..ae44916d40e2 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -981,7 +981,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, | |||
981 | return bh; | 981 | return bh; |
982 | if (!bh || buffer_uptodate(bh)) | 982 | if (!bh || buffer_uptodate(bh)) |
983 | return bh; | 983 | return bh; |
984 | ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); | 984 | ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh); |
985 | wait_on_buffer(bh); | 985 | wait_on_buffer(bh); |
986 | if (buffer_uptodate(bh)) | 986 | if (buffer_uptodate(bh)) |
987 | return bh; | 987 | return bh; |
@@ -1135,7 +1135,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, | |||
1135 | if (!buffer_uptodate(bh) && !buffer_delay(bh) && | 1135 | if (!buffer_uptodate(bh) && !buffer_delay(bh) && |
1136 | !buffer_unwritten(bh) && | 1136 | !buffer_unwritten(bh) && |
1137 | (block_start < from || block_end > to)) { | 1137 | (block_start < from || block_end > to)) { |
1138 | ll_rw_block(READ, 1, &bh); | 1138 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
1139 | *wait_bh++ = bh; | 1139 | *wait_bh++ = bh; |
1140 | decrypt = ext4_encrypted_inode(inode) && | 1140 | decrypt = ext4_encrypted_inode(inode) && |
1141 | S_ISREG(inode->i_mode); | 1141 | S_ISREG(inode->i_mode); |
@@ -3698,7 +3698,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, | |||
3698 | 3698 | ||
3699 | if (!buffer_uptodate(bh)) { | 3699 | if (!buffer_uptodate(bh)) { |
3700 | err = -EIO; | 3700 | err = -EIO; |
3701 | ll_rw_block(READ, 1, &bh); | 3701 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
3702 | wait_on_buffer(bh); | 3702 | wait_on_buffer(bh); |
3703 | /* Uhhuh. Read error. Complain and punt. */ | 3703 | /* Uhhuh. Read error. Complain and punt. */ |
3704 | if (!buffer_uptodate(bh)) | 3704 | if (!buffer_uptodate(bh)) |
@@ -4281,7 +4281,7 @@ make_io: | |||
4281 | trace_ext4_load_inode(inode); | 4281 | trace_ext4_load_inode(inode); |
4282 | get_bh(bh); | 4282 | get_bh(bh); |
4283 | bh->b_end_io = end_buffer_read_sync; | 4283 | bh->b_end_io = end_buffer_read_sync; |
4284 | submit_bh(READ | REQ_META | REQ_PRIO, bh); | 4284 | submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); |
4285 | wait_on_buffer(bh); | 4285 | wait_on_buffer(bh); |
4286 | if (!buffer_uptodate(bh)) { | 4286 | if (!buffer_uptodate(bh)) { |
4287 | EXT4_ERROR_INODE_BLOCK(inode, block, | 4287 | EXT4_ERROR_INODE_BLOCK(inode, block, |
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index 23d436d6f8b8..d89754ef1aab 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c | |||
@@ -52,7 +52,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) | |||
52 | lock_buffer(bh); | 52 | lock_buffer(bh); |
53 | bh->b_end_io = end_buffer_write_sync; | 53 | bh->b_end_io = end_buffer_write_sync; |
54 | get_bh(bh); | 54 | get_bh(bh); |
55 | submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh); | 55 | submit_bh(REQ_OP_WRITE, WRITE_SYNC | REQ_META | REQ_PRIO, bh); |
56 | wait_on_buffer(bh); | 56 | wait_on_buffer(bh); |
57 | sb_end_write(sb); | 57 | sb_end_write(sb); |
58 | if (unlikely(!buffer_uptodate(bh))) | 58 | if (unlikely(!buffer_uptodate(bh))) |
@@ -88,7 +88,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh, | |||
88 | get_bh(*bh); | 88 | get_bh(*bh); |
89 | lock_buffer(*bh); | 89 | lock_buffer(*bh); |
90 | (*bh)->b_end_io = end_buffer_read_sync; | 90 | (*bh)->b_end_io = end_buffer_read_sync; |
91 | submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh); | 91 | submit_bh(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, *bh); |
92 | wait_on_buffer(*bh); | 92 | wait_on_buffer(*bh); |
93 | if (!buffer_uptodate(*bh)) { | 93 | if (!buffer_uptodate(*bh)) { |
94 | ret = -EIO; | 94 | ret = -EIO; |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index ec4c39952e84..6569c6b47da4 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -1443,7 +1443,8 @@ restart: | |||
1443 | } | 1443 | } |
1444 | bh_use[ra_max] = bh; | 1444 | bh_use[ra_max] = bh; |
1445 | if (bh) | 1445 | if (bh) |
1446 | ll_rw_block(READ | REQ_META | REQ_PRIO, | 1446 | ll_rw_block(REQ_OP_READ, |
1447 | REQ_META | REQ_PRIO, | ||
1447 | 1, &bh); | 1448 | 1, &bh); |
1448 | } | 1449 | } |
1449 | } | 1450 | } |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 2a01df9cc1c3..5185fed40fab 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -340,9 +340,10 @@ void ext4_io_submit(struct ext4_io_submit *io) | |||
340 | struct bio *bio = io->io_bio; | 340 | struct bio *bio = io->io_bio; |
341 | 341 | ||
342 | if (bio) { | 342 | if (bio) { |
343 | int io_op = io->io_wbc->sync_mode == WB_SYNC_ALL ? | 343 | int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ? |
344 | WRITE_SYNC : WRITE; | 344 | WRITE_SYNC : 0; |
345 | submit_bio(io_op, io->io_bio); | 345 | bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags); |
346 | submit_bio(io->io_bio); | ||
346 | } | 347 | } |
347 | io->io_bio = NULL; | 348 | io->io_bio = NULL; |
348 | } | 349 | } |
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index dc54a4b60eba..2ced5a823354 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c | |||
@@ -271,7 +271,7 @@ int ext4_mpage_readpages(struct address_space *mapping, | |||
271 | */ | 271 | */ |
272 | if (bio && (last_block_in_bio != blocks[0] - 1)) { | 272 | if (bio && (last_block_in_bio != blocks[0] - 1)) { |
273 | submit_and_realloc: | 273 | submit_and_realloc: |
274 | submit_bio(READ, bio); | 274 | submit_bio(bio); |
275 | bio = NULL; | 275 | bio = NULL; |
276 | } | 276 | } |
277 | if (bio == NULL) { | 277 | if (bio == NULL) { |
@@ -294,6 +294,7 @@ int ext4_mpage_readpages(struct address_space *mapping, | |||
294 | bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); | 294 | bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); |
295 | bio->bi_end_io = mpage_end_io; | 295 | bio->bi_end_io = mpage_end_io; |
296 | bio->bi_private = ctx; | 296 | bio->bi_private = ctx; |
297 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||
297 | } | 298 | } |
298 | 299 | ||
299 | length = first_hole << blkbits; | 300 | length = first_hole << blkbits; |
@@ -303,14 +304,14 @@ int ext4_mpage_readpages(struct address_space *mapping, | |||
303 | if (((map.m_flags & EXT4_MAP_BOUNDARY) && | 304 | if (((map.m_flags & EXT4_MAP_BOUNDARY) && |
304 | (relative_block == map.m_len)) || | 305 | (relative_block == map.m_len)) || |
305 | (first_hole != blocks_per_page)) { | 306 | (first_hole != blocks_per_page)) { |
306 | submit_bio(READ, bio); | 307 | submit_bio(bio); |
307 | bio = NULL; | 308 | bio = NULL; |
308 | } else | 309 | } else |
309 | last_block_in_bio = blocks[blocks_per_page - 1]; | 310 | last_block_in_bio = blocks[blocks_per_page - 1]; |
310 | goto next_page; | 311 | goto next_page; |
311 | confused: | 312 | confused: |
312 | if (bio) { | 313 | if (bio) { |
313 | submit_bio(READ, bio); | 314 | submit_bio(bio); |
314 | bio = NULL; | 315 | bio = NULL; |
315 | } | 316 | } |
316 | if (!PageUptodate(page)) | 317 | if (!PageUptodate(page)) |
@@ -323,6 +324,6 @@ int ext4_mpage_readpages(struct address_space *mapping, | |||
323 | } | 324 | } |
324 | BUG_ON(pages && !list_empty(pages)); | 325 | BUG_ON(pages && !list_empty(pages)); |
325 | if (bio) | 326 | if (bio) |
326 | submit_bio(READ, bio); | 327 | submit_bio(bio); |
327 | return 0; | 328 | return 0; |
328 | } | 329 | } |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 3822a5aedc61..b1a347100d54 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -4204,7 +4204,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, | |||
4204 | goto out_bdev; | 4204 | goto out_bdev; |
4205 | } | 4205 | } |
4206 | journal->j_private = sb; | 4206 | journal->j_private = sb; |
4207 | ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer); | 4207 | ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer); |
4208 | wait_on_buffer(journal->j_sb_buffer); | 4208 | wait_on_buffer(journal->j_sb_buffer); |
4209 | if (!buffer_uptodate(journal->j_sb_buffer)) { | 4209 | if (!buffer_uptodate(journal->j_sb_buffer)) { |
4210 | ext4_msg(sb, KERN_ERR, "I/O error on journal device"); | 4210 | ext4_msg(sb, KERN_ERR, "I/O error on journal device"); |
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 389160049993..b6d600e91f39 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c | |||
@@ -63,14 +63,15 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, | |||
63 | struct f2fs_io_info fio = { | 63 | struct f2fs_io_info fio = { |
64 | .sbi = sbi, | 64 | .sbi = sbi, |
65 | .type = META, | 65 | .type = META, |
66 | .rw = READ_SYNC | REQ_META | REQ_PRIO, | 66 | .op = REQ_OP_READ, |
67 | .op_flags = READ_SYNC | REQ_META | REQ_PRIO, | ||
67 | .old_blkaddr = index, | 68 | .old_blkaddr = index, |
68 | .new_blkaddr = index, | 69 | .new_blkaddr = index, |
69 | .encrypted_page = NULL, | 70 | .encrypted_page = NULL, |
70 | }; | 71 | }; |
71 | 72 | ||
72 | if (unlikely(!is_meta)) | 73 | if (unlikely(!is_meta)) |
73 | fio.rw &= ~REQ_META; | 74 | fio.op_flags &= ~REQ_META; |
74 | repeat: | 75 | repeat: |
75 | page = f2fs_grab_cache_page(mapping, index, false); | 76 | page = f2fs_grab_cache_page(mapping, index, false); |
76 | if (!page) { | 77 | if (!page) { |
@@ -157,13 +158,14 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, | |||
157 | struct f2fs_io_info fio = { | 158 | struct f2fs_io_info fio = { |
158 | .sbi = sbi, | 159 | .sbi = sbi, |
159 | .type = META, | 160 | .type = META, |
160 | .rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA, | 161 | .op = REQ_OP_READ, |
162 | .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA, | ||
161 | .encrypted_page = NULL, | 163 | .encrypted_page = NULL, |
162 | }; | 164 | }; |
163 | struct blk_plug plug; | 165 | struct blk_plug plug; |
164 | 166 | ||
165 | if (unlikely(type == META_POR)) | 167 | if (unlikely(type == META_POR)) |
166 | fio.rw &= ~REQ_META; | 168 | fio.op_flags &= ~REQ_META; |
167 | 169 | ||
168 | blk_start_plug(&plug); | 170 | blk_start_plug(&plug); |
169 | for (; nrpages-- > 0; blkno++) { | 171 | for (; nrpages-- > 0; blkno++) { |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 9a8bbc1fb1fa..8769e8349dff 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -97,12 +97,11 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, | |||
97 | return bio; | 97 | return bio; |
98 | } | 98 | } |
99 | 99 | ||
100 | static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw, | 100 | static inline void __submit_bio(struct f2fs_sb_info *sbi, struct bio *bio) |
101 | struct bio *bio) | ||
102 | { | 101 | { |
103 | if (!is_read_io(rw)) | 102 | if (!is_read_io(bio_op(bio))) |
104 | atomic_inc(&sbi->nr_wb_bios); | 103 | atomic_inc(&sbi->nr_wb_bios); |
105 | submit_bio(rw, bio); | 104 | submit_bio(bio); |
106 | } | 105 | } |
107 | 106 | ||
108 | static void __submit_merged_bio(struct f2fs_bio_info *io) | 107 | static void __submit_merged_bio(struct f2fs_bio_info *io) |
@@ -112,12 +111,14 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) | |||
112 | if (!io->bio) | 111 | if (!io->bio) |
113 | return; | 112 | return; |
114 | 113 | ||
115 | if (is_read_io(fio->rw)) | 114 | if (is_read_io(fio->op)) |
116 | trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); | 115 | trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); |
117 | else | 116 | else |
118 | trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); | 117 | trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); |
119 | 118 | ||
120 | __submit_bio(io->sbi, fio->rw, io->bio); | 119 | bio_set_op_attrs(io->bio, fio->op, fio->op_flags); |
120 | |||
121 | __submit_bio(io->sbi, io->bio); | ||
121 | io->bio = NULL; | 122 | io->bio = NULL; |
122 | } | 123 | } |
123 | 124 | ||
@@ -183,10 +184,12 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, | |||
183 | /* change META to META_FLUSH in the checkpoint procedure */ | 184 | /* change META to META_FLUSH in the checkpoint procedure */ |
184 | if (type >= META_FLUSH) { | 185 | if (type >= META_FLUSH) { |
185 | io->fio.type = META_FLUSH; | 186 | io->fio.type = META_FLUSH; |
187 | io->fio.op = REQ_OP_WRITE; | ||
186 | if (test_opt(sbi, NOBARRIER)) | 188 | if (test_opt(sbi, NOBARRIER)) |
187 | io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO; | 189 | io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO; |
188 | else | 190 | else |
189 | io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; | 191 | io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META | |
192 | REQ_PRIO; | ||
190 | } | 193 | } |
191 | __submit_merged_bio(io); | 194 | __submit_merged_bio(io); |
192 | out: | 195 | out: |
@@ -228,14 +231,16 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) | |||
228 | f2fs_trace_ios(fio, 0); | 231 | f2fs_trace_ios(fio, 0); |
229 | 232 | ||
230 | /* Allocate a new bio */ | 233 | /* Allocate a new bio */ |
231 | bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw)); | 234 | bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op)); |
232 | 235 | ||
233 | if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { | 236 | if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { |
234 | bio_put(bio); | 237 | bio_put(bio); |
235 | return -EFAULT; | 238 | return -EFAULT; |
236 | } | 239 | } |
240 | bio->bi_rw = fio->op_flags; | ||
241 | bio_set_op_attrs(bio, fio->op, fio->op_flags); | ||
237 | 242 | ||
238 | __submit_bio(fio->sbi, fio->rw, bio); | 243 | __submit_bio(fio->sbi, bio); |
239 | return 0; | 244 | return 0; |
240 | } | 245 | } |
241 | 246 | ||
@@ -244,7 +249,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio) | |||
244 | struct f2fs_sb_info *sbi = fio->sbi; | 249 | struct f2fs_sb_info *sbi = fio->sbi; |
245 | enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); | 250 | enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); |
246 | struct f2fs_bio_info *io; | 251 | struct f2fs_bio_info *io; |
247 | bool is_read = is_read_io(fio->rw); | 252 | bool is_read = is_read_io(fio->op); |
248 | struct page *bio_page; | 253 | struct page *bio_page; |
249 | 254 | ||
250 | io = is_read ? &sbi->read_io : &sbi->write_io[btype]; | 255 | io = is_read ? &sbi->read_io : &sbi->write_io[btype]; |
@@ -256,7 +261,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio) | |||
256 | down_write(&io->io_rwsem); | 261 | down_write(&io->io_rwsem); |
257 | 262 | ||
258 | if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 || | 263 | if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 || |
259 | io->fio.rw != fio->rw)) | 264 | (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags))) |
260 | __submit_merged_bio(io); | 265 | __submit_merged_bio(io); |
261 | alloc_new: | 266 | alloc_new: |
262 | if (io->bio == NULL) { | 267 | if (io->bio == NULL) { |
@@ -390,7 +395,7 @@ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index) | |||
390 | } | 395 | } |
391 | 396 | ||
392 | struct page *get_read_data_page(struct inode *inode, pgoff_t index, | 397 | struct page *get_read_data_page(struct inode *inode, pgoff_t index, |
393 | int rw, bool for_write) | 398 | int op_flags, bool for_write) |
394 | { | 399 | { |
395 | struct address_space *mapping = inode->i_mapping; | 400 | struct address_space *mapping = inode->i_mapping; |
396 | struct dnode_of_data dn; | 401 | struct dnode_of_data dn; |
@@ -400,7 +405,8 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index, | |||
400 | struct f2fs_io_info fio = { | 405 | struct f2fs_io_info fio = { |
401 | .sbi = F2FS_I_SB(inode), | 406 | .sbi = F2FS_I_SB(inode), |
402 | .type = DATA, | 407 | .type = DATA, |
403 | .rw = rw, | 408 | .op = REQ_OP_READ, |
409 | .op_flags = op_flags, | ||
404 | .encrypted_page = NULL, | 410 | .encrypted_page = NULL, |
405 | }; | 411 | }; |
406 | 412 | ||
@@ -1051,7 +1057,7 @@ got_it: | |||
1051 | */ | 1057 | */ |
1052 | if (bio && (last_block_in_bio != block_nr - 1)) { | 1058 | if (bio && (last_block_in_bio != block_nr - 1)) { |
1053 | submit_and_realloc: | 1059 | submit_and_realloc: |
1054 | __submit_bio(F2FS_I_SB(inode), READ, bio); | 1060 | __submit_bio(F2FS_I_SB(inode), bio); |
1055 | bio = NULL; | 1061 | bio = NULL; |
1056 | } | 1062 | } |
1057 | if (bio == NULL) { | 1063 | if (bio == NULL) { |
@@ -1080,6 +1086,7 @@ submit_and_realloc: | |||
1080 | bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr); | 1086 | bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr); |
1081 | bio->bi_end_io = f2fs_read_end_io; | 1087 | bio->bi_end_io = f2fs_read_end_io; |
1082 | bio->bi_private = ctx; | 1088 | bio->bi_private = ctx; |
1089 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||
1083 | } | 1090 | } |
1084 | 1091 | ||
1085 | if (bio_add_page(bio, page, blocksize, 0) < blocksize) | 1092 | if (bio_add_page(bio, page, blocksize, 0) < blocksize) |
@@ -1094,7 +1101,7 @@ set_error_page: | |||
1094 | goto next_page; | 1101 | goto next_page; |
1095 | confused: | 1102 | confused: |
1096 | if (bio) { | 1103 | if (bio) { |
1097 | __submit_bio(F2FS_I_SB(inode), READ, bio); | 1104 | __submit_bio(F2FS_I_SB(inode), bio); |
1098 | bio = NULL; | 1105 | bio = NULL; |
1099 | } | 1106 | } |
1100 | unlock_page(page); | 1107 | unlock_page(page); |
@@ -1104,7 +1111,7 @@ next_page: | |||
1104 | } | 1111 | } |
1105 | BUG_ON(pages && !list_empty(pages)); | 1112 | BUG_ON(pages && !list_empty(pages)); |
1106 | if (bio) | 1113 | if (bio) |
1107 | __submit_bio(F2FS_I_SB(inode), READ, bio); | 1114 | __submit_bio(F2FS_I_SB(inode), bio); |
1108 | return 0; | 1115 | return 0; |
1109 | } | 1116 | } |
1110 | 1117 | ||
@@ -1221,7 +1228,8 @@ static int f2fs_write_data_page(struct page *page, | |||
1221 | struct f2fs_io_info fio = { | 1228 | struct f2fs_io_info fio = { |
1222 | .sbi = sbi, | 1229 | .sbi = sbi, |
1223 | .type = DATA, | 1230 | .type = DATA, |
1224 | .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, | 1231 | .op = REQ_OP_WRITE, |
1232 | .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0, | ||
1225 | .page = page, | 1233 | .page = page, |
1226 | .encrypted_page = NULL, | 1234 | .encrypted_page = NULL, |
1227 | }; | 1235 | }; |
@@ -1662,7 +1670,8 @@ repeat: | |||
1662 | struct f2fs_io_info fio = { | 1670 | struct f2fs_io_info fio = { |
1663 | .sbi = sbi, | 1671 | .sbi = sbi, |
1664 | .type = DATA, | 1672 | .type = DATA, |
1665 | .rw = READ_SYNC, | 1673 | .op = REQ_OP_READ, |
1674 | .op_flags = READ_SYNC, | ||
1666 | .old_blkaddr = blkaddr, | 1675 | .old_blkaddr = blkaddr, |
1667 | .new_blkaddr = blkaddr, | 1676 | .new_blkaddr = blkaddr, |
1668 | .page = page, | 1677 | .page = page, |
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 916e7c238e3d..23ae6a81ccd6 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
@@ -686,14 +686,15 @@ enum page_type { | |||
686 | struct f2fs_io_info { | 686 | struct f2fs_io_info { |
687 | struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ | 687 | struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ |
688 | enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ | 688 | enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ |
689 | int rw; /* contains R/RS/W/WS with REQ_META/REQ_PRIO */ | 689 | int op; /* contains REQ_OP_ */ |
690 | int op_flags; /* rq_flag_bits */ | ||
690 | block_t new_blkaddr; /* new block address to be written */ | 691 | block_t new_blkaddr; /* new block address to be written */ |
691 | block_t old_blkaddr; /* old block address before Cow */ | 692 | block_t old_blkaddr; /* old block address before Cow */ |
692 | struct page *page; /* page to be written */ | 693 | struct page *page; /* page to be written */ |
693 | struct page *encrypted_page; /* encrypted page */ | 694 | struct page *encrypted_page; /* encrypted page */ |
694 | }; | 695 | }; |
695 | 696 | ||
696 | #define is_read_io(rw) (((rw) & 1) == READ) | 697 | #define is_read_io(rw) (rw == READ) |
697 | struct f2fs_bio_info { | 698 | struct f2fs_bio_info { |
698 | struct f2fs_sb_info *sbi; /* f2fs superblock */ | 699 | struct f2fs_sb_info *sbi; /* f2fs superblock */ |
699 | struct bio *bio; /* bios to merge */ | 700 | struct bio *bio; /* bios to merge */ |
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 38d56f678912..3649d86bb431 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c | |||
@@ -538,7 +538,8 @@ static void move_encrypted_block(struct inode *inode, block_t bidx) | |||
538 | struct f2fs_io_info fio = { | 538 | struct f2fs_io_info fio = { |
539 | .sbi = F2FS_I_SB(inode), | 539 | .sbi = F2FS_I_SB(inode), |
540 | .type = DATA, | 540 | .type = DATA, |
541 | .rw = READ_SYNC, | 541 | .op = REQ_OP_READ, |
542 | .op_flags = READ_SYNC, | ||
542 | .encrypted_page = NULL, | 543 | .encrypted_page = NULL, |
543 | }; | 544 | }; |
544 | struct dnode_of_data dn; | 545 | struct dnode_of_data dn; |
@@ -612,7 +613,8 @@ static void move_encrypted_block(struct inode *inode, block_t bidx) | |||
612 | /* allocate block address */ | 613 | /* allocate block address */ |
613 | f2fs_wait_on_page_writeback(dn.node_page, NODE, true); | 614 | f2fs_wait_on_page_writeback(dn.node_page, NODE, true); |
614 | 615 | ||
615 | fio.rw = WRITE_SYNC; | 616 | fio.op = REQ_OP_WRITE; |
617 | fio.op_flags = WRITE_SYNC; | ||
616 | fio.new_blkaddr = newaddr; | 618 | fio.new_blkaddr = newaddr; |
617 | f2fs_submit_page_mbio(&fio); | 619 | f2fs_submit_page_mbio(&fio); |
618 | 620 | ||
@@ -649,7 +651,8 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type) | |||
649 | struct f2fs_io_info fio = { | 651 | struct f2fs_io_info fio = { |
650 | .sbi = F2FS_I_SB(inode), | 652 | .sbi = F2FS_I_SB(inode), |
651 | .type = DATA, | 653 | .type = DATA, |
652 | .rw = WRITE_SYNC, | 654 | .op = REQ_OP_WRITE, |
655 | .op_flags = WRITE_SYNC, | ||
653 | .page = page, | 656 | .page = page, |
654 | .encrypted_page = NULL, | 657 | .encrypted_page = NULL, |
655 | }; | 658 | }; |
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index a4bb155dd00a..c15e53c1d794 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c | |||
@@ -108,7 +108,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) | |||
108 | struct f2fs_io_info fio = { | 108 | struct f2fs_io_info fio = { |
109 | .sbi = F2FS_I_SB(dn->inode), | 109 | .sbi = F2FS_I_SB(dn->inode), |
110 | .type = DATA, | 110 | .type = DATA, |
111 | .rw = WRITE_SYNC | REQ_PRIO, | 111 | .op = REQ_OP_WRITE, |
112 | .op_flags = WRITE_SYNC | REQ_PRIO, | ||
112 | .page = page, | 113 | .page = page, |
113 | .encrypted_page = NULL, | 114 | .encrypted_page = NULL, |
114 | }; | 115 | }; |
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 1f21aae80c40..e53403987f6d 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
@@ -1070,14 +1070,15 @@ fail: | |||
1070 | * 0: f2fs_put_page(page, 0) | 1070 | * 0: f2fs_put_page(page, 0) |
1071 | * LOCKED_PAGE or error: f2fs_put_page(page, 1) | 1071 | * LOCKED_PAGE or error: f2fs_put_page(page, 1) |
1072 | */ | 1072 | */ |
1073 | static int read_node_page(struct page *page, int rw) | 1073 | static int read_node_page(struct page *page, int op_flags) |
1074 | { | 1074 | { |
1075 | struct f2fs_sb_info *sbi = F2FS_P_SB(page); | 1075 | struct f2fs_sb_info *sbi = F2FS_P_SB(page); |
1076 | struct node_info ni; | 1076 | struct node_info ni; |
1077 | struct f2fs_io_info fio = { | 1077 | struct f2fs_io_info fio = { |
1078 | .sbi = sbi, | 1078 | .sbi = sbi, |
1079 | .type = NODE, | 1079 | .type = NODE, |
1080 | .rw = rw, | 1080 | .op = REQ_OP_READ, |
1081 | .op_flags = op_flags, | ||
1081 | .page = page, | 1082 | .page = page, |
1082 | .encrypted_page = NULL, | 1083 | .encrypted_page = NULL, |
1083 | }; | 1084 | }; |
@@ -1568,7 +1569,8 @@ static int f2fs_write_node_page(struct page *page, | |||
1568 | struct f2fs_io_info fio = { | 1569 | struct f2fs_io_info fio = { |
1569 | .sbi = sbi, | 1570 | .sbi = sbi, |
1570 | .type = NODE, | 1571 | .type = NODE, |
1571 | .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, | 1572 | .op = REQ_OP_WRITE, |
1573 | .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0, | ||
1572 | .page = page, | 1574 | .page = page, |
1573 | .encrypted_page = NULL, | 1575 | .encrypted_page = NULL, |
1574 | }; | 1576 | }; |
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 2e6f537a0e7d..4c2d1fa1e0e2 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
@@ -257,7 +257,8 @@ static int __commit_inmem_pages(struct inode *inode, | |||
257 | struct f2fs_io_info fio = { | 257 | struct f2fs_io_info fio = { |
258 | .sbi = sbi, | 258 | .sbi = sbi, |
259 | .type = DATA, | 259 | .type = DATA, |
260 | .rw = WRITE_SYNC | REQ_PRIO, | 260 | .op = REQ_OP_WRITE, |
261 | .op_flags = WRITE_SYNC | REQ_PRIO, | ||
261 | .encrypted_page = NULL, | 262 | .encrypted_page = NULL, |
262 | }; | 263 | }; |
263 | bool submit_bio = false; | 264 | bool submit_bio = false; |
@@ -406,7 +407,8 @@ repeat: | |||
406 | fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); | 407 | fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); |
407 | 408 | ||
408 | bio->bi_bdev = sbi->sb->s_bdev; | 409 | bio->bi_bdev = sbi->sb->s_bdev; |
409 | ret = submit_bio_wait(WRITE_FLUSH, bio); | 410 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); |
411 | ret = submit_bio_wait(bio); | ||
410 | 412 | ||
411 | llist_for_each_entry_safe(cmd, next, | 413 | llist_for_each_entry_safe(cmd, next, |
412 | fcc->dispatch_list, llnode) { | 414 | fcc->dispatch_list, llnode) { |
@@ -438,7 +440,8 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi) | |||
438 | int ret; | 440 | int ret; |
439 | 441 | ||
440 | bio->bi_bdev = sbi->sb->s_bdev; | 442 | bio->bi_bdev = sbi->sb->s_bdev; |
441 | ret = submit_bio_wait(WRITE_FLUSH, bio); | 443 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); |
444 | ret = submit_bio_wait(bio); | ||
442 | bio_put(bio); | 445 | bio_put(bio); |
443 | return ret; | 446 | return ret; |
444 | } | 447 | } |
@@ -1401,7 +1404,8 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) | |||
1401 | struct f2fs_io_info fio = { | 1404 | struct f2fs_io_info fio = { |
1402 | .sbi = sbi, | 1405 | .sbi = sbi, |
1403 | .type = META, | 1406 | .type = META, |
1404 | .rw = WRITE_SYNC | REQ_META | REQ_PRIO, | 1407 | .op = REQ_OP_WRITE, |
1408 | .op_flags = WRITE_SYNC | REQ_META | REQ_PRIO, | ||
1405 | .old_blkaddr = page->index, | 1409 | .old_blkaddr = page->index, |
1406 | .new_blkaddr = page->index, | 1410 | .new_blkaddr = page->index, |
1407 | .page = page, | 1411 | .page = page, |
@@ -1409,7 +1413,7 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) | |||
1409 | }; | 1413 | }; |
1410 | 1414 | ||
1411 | if (unlikely(page->index >= MAIN_BLKADDR(sbi))) | 1415 | if (unlikely(page->index >= MAIN_BLKADDR(sbi))) |
1412 | fio.rw &= ~REQ_META; | 1416 | fio.op_flags &= ~REQ_META; |
1413 | 1417 | ||
1414 | set_page_writeback(page); | 1418 | set_page_writeback(page); |
1415 | f2fs_submit_page_mbio(&fio); | 1419 | f2fs_submit_page_mbio(&fio); |
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c index 562ce0821559..73b4e1d1912a 100644 --- a/fs/f2fs/trace.c +++ b/fs/f2fs/trace.c | |||
@@ -25,11 +25,11 @@ static inline void __print_last_io(void) | |||
25 | if (!last_io.len) | 25 | if (!last_io.len) |
26 | return; | 26 | return; |
27 | 27 | ||
28 | trace_printk("%3x:%3x %4x %-16s %2x %5x %12x %4x\n", | 28 | trace_printk("%3x:%3x %4x %-16s %2x %5x %5x %12x %4x\n", |
29 | last_io.major, last_io.minor, | 29 | last_io.major, last_io.minor, |
30 | last_io.pid, "----------------", | 30 | last_io.pid, "----------------", |
31 | last_io.type, | 31 | last_io.type, |
32 | last_io.fio.rw, | 32 | last_io.fio.op, last_io.fio.op_flags, |
33 | last_io.fio.new_blkaddr, | 33 | last_io.fio.new_blkaddr, |
34 | last_io.len); | 34 | last_io.len); |
35 | memset(&last_io, 0, sizeof(last_io)); | 35 | memset(&last_io, 0, sizeof(last_io)); |
@@ -101,7 +101,8 @@ void f2fs_trace_ios(struct f2fs_io_info *fio, int flush) | |||
101 | if (last_io.major == major && last_io.minor == minor && | 101 | if (last_io.major == major && last_io.minor == minor && |
102 | last_io.pid == pid && | 102 | last_io.pid == pid && |
103 | last_io.type == __file_type(inode, pid) && | 103 | last_io.type == __file_type(inode, pid) && |
104 | last_io.fio.rw == fio->rw && | 104 | last_io.fio.op == fio->op && |
105 | last_io.fio.op_flags == fio->op_flags && | ||
105 | last_io.fio.new_blkaddr + last_io.len == | 106 | last_io.fio.new_blkaddr + last_io.len == |
106 | fio->new_blkaddr) { | 107 | fio->new_blkaddr) { |
107 | last_io.len++; | 108 | last_io.len++; |
diff --git a/fs/fat/misc.c b/fs/fat/misc.c index c4589e981760..8a8698119ff7 100644 --- a/fs/fat/misc.c +++ b/fs/fat/misc.c | |||
@@ -267,7 +267,7 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs) | |||
267 | int i, err = 0; | 267 | int i, err = 0; |
268 | 268 | ||
269 | for (i = 0; i < nr_bhs; i++) | 269 | for (i = 0; i < nr_bhs; i++) |
270 | write_dirty_buffer(bhs[i], WRITE); | 270 | write_dirty_buffer(bhs[i], 0); |
271 | 271 | ||
272 | for (i = 0; i < nr_bhs; i++) { | 272 | for (i = 0; i < nr_bhs; i++) { |
273 | wait_on_buffer(bhs[i]); | 273 | wait_on_buffer(bhs[i]); |
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 24ce1cdd434a..fd6389cf0f14 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c | |||
@@ -285,7 +285,7 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl, | |||
285 | if (trylock_buffer(rabh)) { | 285 | if (trylock_buffer(rabh)) { |
286 | if (!buffer_uptodate(rabh)) { | 286 | if (!buffer_uptodate(rabh)) { |
287 | rabh->b_end_io = end_buffer_read_sync; | 287 | rabh->b_end_io = end_buffer_read_sync; |
288 | submit_bh(READA | REQ_META, rabh); | 288 | submit_bh(REQ_OP_READ, READA | REQ_META, rabh); |
289 | continue; | 289 | continue; |
290 | } | 290 | } |
291 | unlock_buffer(rabh); | 291 | unlock_buffer(rabh); |
@@ -974,7 +974,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from) | |||
974 | 974 | ||
975 | if (!buffer_uptodate(bh)) { | 975 | if (!buffer_uptodate(bh)) { |
976 | err = -EIO; | 976 | err = -EIO; |
977 | ll_rw_block(READ, 1, &bh); | 977 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
978 | wait_on_buffer(bh); | 978 | wait_on_buffer(bh); |
979 | /* Uhhuh. Read error. Complain and punt. */ | 979 | /* Uhhuh. Read error. Complain and punt. */ |
980 | if (!buffer_uptodate(bh)) | 980 | if (!buffer_uptodate(bh)) |
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index e30cc9fb2bef..4d68530d6636 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c | |||
@@ -1513,7 +1513,7 @@ static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index, | |||
1513 | continue; | 1513 | continue; |
1514 | } | 1514 | } |
1515 | bh->b_end_io = end_buffer_read_sync; | 1515 | bh->b_end_io = end_buffer_read_sync; |
1516 | submit_bh(READA | REQ_META, bh); | 1516 | submit_bh(REQ_OP_READ, READA | REQ_META, bh); |
1517 | continue; | 1517 | continue; |
1518 | } | 1518 | } |
1519 | brelse(bh); | 1519 | brelse(bh); |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 0ff028c15199..e58ccef09c91 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -657,7 +657,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags) | |||
657 | struct gfs2_log_header *lh; | 657 | struct gfs2_log_header *lh; |
658 | unsigned int tail; | 658 | unsigned int tail; |
659 | u32 hash; | 659 | u32 hash; |
660 | int rw = WRITE_FLUSH_FUA | REQ_META; | 660 | int op_flags = WRITE_FLUSH_FUA | REQ_META; |
661 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); | 661 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
662 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); | 662 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); |
663 | lh = page_address(page); | 663 | lh = page_address(page); |
@@ -682,12 +682,12 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags) | |||
682 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { | 682 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { |
683 | gfs2_ordered_wait(sdp); | 683 | gfs2_ordered_wait(sdp); |
684 | log_flush_wait(sdp); | 684 | log_flush_wait(sdp); |
685 | rw = WRITE_SYNC | REQ_META | REQ_PRIO; | 685 | op_flags = WRITE_SYNC | REQ_META | REQ_PRIO; |
686 | } | 686 | } |
687 | 687 | ||
688 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); | 688 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); |
689 | gfs2_log_write_page(sdp, page); | 689 | gfs2_log_write_page(sdp, page); |
690 | gfs2_log_flush_bio(sdp, rw); | 690 | gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags); |
691 | log_flush_wait(sdp); | 691 | log_flush_wait(sdp); |
692 | 692 | ||
693 | if (sdp->sd_log_tail != tail) | 693 | if (sdp->sd_log_tail != tail) |
@@ -738,7 +738,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, | |||
738 | 738 | ||
739 | gfs2_ordered_write(sdp); | 739 | gfs2_ordered_write(sdp); |
740 | lops_before_commit(sdp, tr); | 740 | lops_before_commit(sdp, tr); |
741 | gfs2_log_flush_bio(sdp, WRITE); | 741 | gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0); |
742 | 742 | ||
743 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { | 743 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { |
744 | log_flush_wait(sdp); | 744 | log_flush_wait(sdp); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 8e3ba20d5e9d..49d5a1b61b06 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -230,17 +230,19 @@ static void gfs2_end_log_write(struct bio *bio) | |||
230 | /** | 230 | /** |
231 | * gfs2_log_flush_bio - Submit any pending log bio | 231 | * gfs2_log_flush_bio - Submit any pending log bio |
232 | * @sdp: The superblock | 232 | * @sdp: The superblock |
233 | * @rw: The rw flags | 233 | * @op: REQ_OP |
234 | * @op_flags: rq_flag_bits | ||
234 | * | 235 | * |
235 | * Submit any pending part-built or full bio to the block device. If | 236 | * Submit any pending part-built or full bio to the block device. If |
236 | * there is no pending bio, then this is a no-op. | 237 | * there is no pending bio, then this is a no-op. |
237 | */ | 238 | */ |
238 | 239 | ||
239 | void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw) | 240 | void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags) |
240 | { | 241 | { |
241 | if (sdp->sd_log_bio) { | 242 | if (sdp->sd_log_bio) { |
242 | atomic_inc(&sdp->sd_log_in_flight); | 243 | atomic_inc(&sdp->sd_log_in_flight); |
243 | submit_bio(rw, sdp->sd_log_bio); | 244 | bio_set_op_attrs(sdp->sd_log_bio, op, op_flags); |
245 | submit_bio(sdp->sd_log_bio); | ||
244 | sdp->sd_log_bio = NULL; | 246 | sdp->sd_log_bio = NULL; |
245 | } | 247 | } |
246 | } | 248 | } |
@@ -299,7 +301,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno) | |||
299 | nblk >>= sdp->sd_fsb2bb_shift; | 301 | nblk >>= sdp->sd_fsb2bb_shift; |
300 | if (blkno == nblk) | 302 | if (blkno == nblk) |
301 | return bio; | 303 | return bio; |
302 | gfs2_log_flush_bio(sdp, WRITE); | 304 | gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0); |
303 | } | 305 | } |
304 | 306 | ||
305 | return gfs2_log_alloc_bio(sdp, blkno); | 307 | return gfs2_log_alloc_bio(sdp, blkno); |
@@ -328,7 +330,7 @@ static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, | |||
328 | bio = gfs2_log_get_bio(sdp, blkno); | 330 | bio = gfs2_log_get_bio(sdp, blkno); |
329 | ret = bio_add_page(bio, page, size, offset); | 331 | ret = bio_add_page(bio, page, size, offset); |
330 | if (ret == 0) { | 332 | if (ret == 0) { |
331 | gfs2_log_flush_bio(sdp, WRITE); | 333 | gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0); |
332 | bio = gfs2_log_alloc_bio(sdp, blkno); | 334 | bio = gfs2_log_alloc_bio(sdp, blkno); |
333 | ret = bio_add_page(bio, page, size, offset); | 335 | ret = bio_add_page(bio, page, size, offset); |
334 | WARN_ON(ret == 0); | 336 | WARN_ON(ret == 0); |
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index a65a7ba32ffd..e529f536c117 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h | |||
@@ -27,7 +27,7 @@ extern const struct gfs2_log_operations gfs2_databuf_lops; | |||
27 | 27 | ||
28 | extern const struct gfs2_log_operations *gfs2_log_ops[]; | 28 | extern const struct gfs2_log_operations *gfs2_log_ops[]; |
29 | extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); | 29 | extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); |
30 | extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw); | 30 | extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags); |
31 | extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); | 31 | extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); |
32 | 32 | ||
33 | static inline unsigned int buf_limit(struct gfs2_sbd *sdp) | 33 | static inline unsigned int buf_limit(struct gfs2_sbd *sdp) |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 8eaadabbc771..052c1132e5b6 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
@@ -37,8 +37,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb | |||
37 | { | 37 | { |
38 | struct buffer_head *bh, *head; | 38 | struct buffer_head *bh, *head; |
39 | int nr_underway = 0; | 39 | int nr_underway = 0; |
40 | int write_op = REQ_META | REQ_PRIO | | 40 | int write_flags = REQ_META | REQ_PRIO | |
41 | (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | 41 | (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); |
42 | 42 | ||
43 | BUG_ON(!PageLocked(page)); | 43 | BUG_ON(!PageLocked(page)); |
44 | BUG_ON(!page_has_buffers(page)); | 44 | BUG_ON(!page_has_buffers(page)); |
@@ -79,7 +79,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb | |||
79 | do { | 79 | do { |
80 | struct buffer_head *next = bh->b_this_page; | 80 | struct buffer_head *next = bh->b_this_page; |
81 | if (buffer_async_write(bh)) { | 81 | if (buffer_async_write(bh)) { |
82 | submit_bh(write_op, bh); | 82 | submit_bh(REQ_OP_WRITE, write_flags, bh); |
83 | nr_underway++; | 83 | nr_underway++; |
84 | } | 84 | } |
85 | bh = next; | 85 | bh = next; |
@@ -213,7 +213,8 @@ static void gfs2_meta_read_endio(struct bio *bio) | |||
213 | * Submit several consecutive buffer head I/O requests as a single bio I/O | 213 | * Submit several consecutive buffer head I/O requests as a single bio I/O |
214 | * request. (See submit_bh_wbc.) | 214 | * request. (See submit_bh_wbc.) |
215 | */ | 215 | */ |
216 | static void gfs2_submit_bhs(int rw, struct buffer_head *bhs[], int num) | 216 | static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[], |
217 | int num) | ||
217 | { | 218 | { |
218 | struct buffer_head *bh = bhs[0]; | 219 | struct buffer_head *bh = bhs[0]; |
219 | struct bio *bio; | 220 | struct bio *bio; |
@@ -230,7 +231,8 @@ static void gfs2_submit_bhs(int rw, struct buffer_head *bhs[], int num) | |||
230 | bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); | 231 | bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); |
231 | } | 232 | } |
232 | bio->bi_end_io = gfs2_meta_read_endio; | 233 | bio->bi_end_io = gfs2_meta_read_endio; |
233 | submit_bio(rw, bio); | 234 | bio_set_op_attrs(bio, op, op_flags); |
235 | submit_bio(bio); | ||
234 | } | 236 | } |
235 | 237 | ||
236 | /** | 238 | /** |
@@ -280,7 +282,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, | |||
280 | } | 282 | } |
281 | } | 283 | } |
282 | 284 | ||
283 | gfs2_submit_bhs(READ_SYNC | REQ_META | REQ_PRIO, bhs, num); | 285 | gfs2_submit_bhs(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, bhs, num); |
284 | if (!(flags & DIO_WAIT)) | 286 | if (!(flags & DIO_WAIT)) |
285 | return 0; | 287 | return 0; |
286 | 288 | ||
@@ -448,7 +450,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) | |||
448 | if (buffer_uptodate(first_bh)) | 450 | if (buffer_uptodate(first_bh)) |
449 | goto out; | 451 | goto out; |
450 | if (!buffer_locked(first_bh)) | 452 | if (!buffer_locked(first_bh)) |
451 | ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh); | 453 | ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh); |
452 | 454 | ||
453 | dblock++; | 455 | dblock++; |
454 | extlen--; | 456 | extlen--; |
@@ -457,7 +459,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) | |||
457 | bh = gfs2_getbuf(gl, dblock, CREATE); | 459 | bh = gfs2_getbuf(gl, dblock, CREATE); |
458 | 460 | ||
459 | if (!buffer_uptodate(bh) && !buffer_locked(bh)) | 461 | if (!buffer_uptodate(bh) && !buffer_locked(bh)) |
460 | ll_rw_block(READA | REQ_META, 1, &bh); | 462 | ll_rw_block(REQ_OP_READ, READA | REQ_META, 1, &bh); |
461 | brelse(bh); | 463 | brelse(bh); |
462 | dblock++; | 464 | dblock++; |
463 | extlen--; | 465 | extlen--; |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index b8f6fc9513ef..ef1e1822977f 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -246,7 +246,8 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) | |||
246 | 246 | ||
247 | bio->bi_end_io = end_bio_io_page; | 247 | bio->bi_end_io = end_bio_io_page; |
248 | bio->bi_private = page; | 248 | bio->bi_private = page; |
249 | submit_bio(READ_SYNC | REQ_META, bio); | 249 | bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC | REQ_META); |
250 | submit_bio(bio); | ||
250 | wait_on_page_locked(page); | 251 | wait_on_page_locked(page); |
251 | bio_put(bio); | 252 | bio_put(bio); |
252 | if (!PageUptodate(page)) { | 253 | if (!PageUptodate(page)) { |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 6c657b202501..77930ca25303 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -730,7 +730,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, | |||
730 | if (PageUptodate(page)) | 730 | if (PageUptodate(page)) |
731 | set_buffer_uptodate(bh); | 731 | set_buffer_uptodate(bh); |
732 | if (!buffer_uptodate(bh)) { | 732 | if (!buffer_uptodate(bh)) { |
733 | ll_rw_block(READ | REQ_META, 1, &bh); | 733 | ll_rw_block(REQ_OP_READ, REQ_META, 1, &bh); |
734 | wait_on_buffer(bh); | 734 | wait_on_buffer(bh); |
735 | if (!buffer_uptodate(bh)) | 735 | if (!buffer_uptodate(bh)) |
736 | goto unlock_out; | 736 | goto unlock_out; |
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index fdc3446d934a..047245bd2cd6 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h | |||
@@ -526,7 +526,7 @@ int hfsplus_compare_dentry(const struct dentry *parent, | |||
526 | 526 | ||
527 | /* wrapper.c */ | 527 | /* wrapper.c */ |
528 | int hfsplus_submit_bio(struct super_block *sb, sector_t sector, void *buf, | 528 | int hfsplus_submit_bio(struct super_block *sb, sector_t sector, void *buf, |
529 | void **data, int rw); | 529 | void **data, int op, int op_flags); |
530 | int hfsplus_read_wrapper(struct super_block *sb); | 530 | int hfsplus_read_wrapper(struct super_block *sb); |
531 | 531 | ||
532 | /* time macros */ | 532 | /* time macros */ |
diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c index eb355d81e279..63164ebc52fa 100644 --- a/fs/hfsplus/part_tbl.c +++ b/fs/hfsplus/part_tbl.c | |||
@@ -112,7 +112,8 @@ static int hfs_parse_new_pmap(struct super_block *sb, void *buf, | |||
112 | if ((u8 *)pm - (u8 *)buf >= buf_size) { | 112 | if ((u8 *)pm - (u8 *)buf >= buf_size) { |
113 | res = hfsplus_submit_bio(sb, | 113 | res = hfsplus_submit_bio(sb, |
114 | *part_start + HFS_PMAP_BLK + i, | 114 | *part_start + HFS_PMAP_BLK + i, |
115 | buf, (void **)&pm, READ); | 115 | buf, (void **)&pm, REQ_OP_READ, |
116 | 0); | ||
116 | if (res) | 117 | if (res) |
117 | return res; | 118 | return res; |
118 | } | 119 | } |
@@ -136,7 +137,7 @@ int hfs_part_find(struct super_block *sb, | |||
136 | return -ENOMEM; | 137 | return -ENOMEM; |
137 | 138 | ||
138 | res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK, | 139 | res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK, |
139 | buf, &data, READ); | 140 | buf, &data, REQ_OP_READ, 0); |
140 | if (res) | 141 | if (res) |
141 | goto out; | 142 | goto out; |
142 | 143 | ||
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 755bf30ba1ce..11854dd84572 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c | |||
@@ -220,7 +220,8 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait) | |||
220 | 220 | ||
221 | error2 = hfsplus_submit_bio(sb, | 221 | error2 = hfsplus_submit_bio(sb, |
222 | sbi->part_start + HFSPLUS_VOLHEAD_SECTOR, | 222 | sbi->part_start + HFSPLUS_VOLHEAD_SECTOR, |
223 | sbi->s_vhdr_buf, NULL, WRITE_SYNC); | 223 | sbi->s_vhdr_buf, NULL, REQ_OP_WRITE, |
224 | WRITE_SYNC); | ||
224 | if (!error) | 225 | if (!error) |
225 | error = error2; | 226 | error = error2; |
226 | if (!write_backup) | 227 | if (!write_backup) |
@@ -228,7 +229,8 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait) | |||
228 | 229 | ||
229 | error2 = hfsplus_submit_bio(sb, | 230 | error2 = hfsplus_submit_bio(sb, |
230 | sbi->part_start + sbi->sect_count - 2, | 231 | sbi->part_start + sbi->sect_count - 2, |
231 | sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC); | 232 | sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE, |
233 | WRITE_SYNC); | ||
232 | if (!error) | 234 | if (!error) |
233 | error2 = error; | 235 | error2 = error; |
234 | out: | 236 | out: |
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index cc6235671437..ebb85e5f6549 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c | |||
@@ -30,7 +30,8 @@ struct hfsplus_wd { | |||
30 | * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes | 30 | * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes |
31 | * @buf: buffer for I/O | 31 | * @buf: buffer for I/O |
32 | * @data: output pointer for location of requested data | 32 | * @data: output pointer for location of requested data |
33 | * @rw: direction of I/O | 33 | * @op: direction of I/O |
34 | * @op_flags: request op flags | ||
34 | * | 35 | * |
35 | * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than | 36 | * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than |
36 | * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads | 37 | * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads |
@@ -44,7 +45,7 @@ struct hfsplus_wd { | |||
44 | * will work correctly. | 45 | * will work correctly. |
45 | */ | 46 | */ |
46 | int hfsplus_submit_bio(struct super_block *sb, sector_t sector, | 47 | int hfsplus_submit_bio(struct super_block *sb, sector_t sector, |
47 | void *buf, void **data, int rw) | 48 | void *buf, void **data, int op, int op_flags) |
48 | { | 49 | { |
49 | struct bio *bio; | 50 | struct bio *bio; |
50 | int ret = 0; | 51 | int ret = 0; |
@@ -65,8 +66,9 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, | |||
65 | bio = bio_alloc(GFP_NOIO, 1); | 66 | bio = bio_alloc(GFP_NOIO, 1); |
66 | bio->bi_iter.bi_sector = sector; | 67 | bio->bi_iter.bi_sector = sector; |
67 | bio->bi_bdev = sb->s_bdev; | 68 | bio->bi_bdev = sb->s_bdev; |
69 | bio_set_op_attrs(bio, op, op_flags); | ||
68 | 70 | ||
69 | if (!(rw & WRITE) && data) | 71 | if (op != WRITE && data) |
70 | *data = (u8 *)buf + offset; | 72 | *data = (u8 *)buf + offset; |
71 | 73 | ||
72 | while (io_size > 0) { | 74 | while (io_size > 0) { |
@@ -83,7 +85,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, | |||
83 | buf = (u8 *)buf + len; | 85 | buf = (u8 *)buf + len; |
84 | } | 86 | } |
85 | 87 | ||
86 | ret = submit_bio_wait(rw, bio); | 88 | ret = submit_bio_wait(bio); |
87 | out: | 89 | out: |
88 | bio_put(bio); | 90 | bio_put(bio); |
89 | return ret < 0 ? ret : 0; | 91 | return ret < 0 ? ret : 0; |
@@ -181,7 +183,7 @@ int hfsplus_read_wrapper(struct super_block *sb) | |||
181 | reread: | 183 | reread: |
182 | error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, | 184 | error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, |
183 | sbi->s_vhdr_buf, (void **)&sbi->s_vhdr, | 185 | sbi->s_vhdr_buf, (void **)&sbi->s_vhdr, |
184 | READ); | 186 | REQ_OP_READ, 0); |
185 | if (error) | 187 | if (error) |
186 | goto out_free_backup_vhdr; | 188 | goto out_free_backup_vhdr; |
187 | 189 | ||
@@ -213,7 +215,8 @@ reread: | |||
213 | 215 | ||
214 | error = hfsplus_submit_bio(sb, part_start + part_size - 2, | 216 | error = hfsplus_submit_bio(sb, part_start + part_size - 2, |
215 | sbi->s_backup_vhdr_buf, | 217 | sbi->s_backup_vhdr_buf, |
216 | (void **)&sbi->s_backup_vhdr, READ); | 218 | (void **)&sbi->s_backup_vhdr, REQ_OP_READ, |
219 | 0); | ||
217 | if (error) | 220 | if (error) |
218 | goto out_free_backup_vhdr; | 221 | goto out_free_backup_vhdr; |
219 | 222 | ||
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c index 2e4e834d1a98..2ce5b75ee9a5 100644 --- a/fs/isofs/compress.c +++ b/fs/isofs/compress.c | |||
@@ -81,7 +81,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, | |||
81 | blocknum = block_start >> bufshift; | 81 | blocknum = block_start >> bufshift; |
82 | memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *)); | 82 | memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *)); |
83 | haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); | 83 | haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); |
84 | ll_rw_block(READ, haveblocks, bhs); | 84 | ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs); |
85 | 85 | ||
86 | curbh = 0; | 86 | curbh = 0; |
87 | curpage = 0; | 87 | curpage = 0; |
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 70078096117d..8f7d1339c973 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c | |||
@@ -155,9 +155,9 @@ static int journal_submit_commit_record(journal_t *journal, | |||
155 | 155 | ||
156 | if (journal->j_flags & JBD2_BARRIER && | 156 | if (journal->j_flags & JBD2_BARRIER && |
157 | !jbd2_has_feature_async_commit(journal)) | 157 | !jbd2_has_feature_async_commit(journal)) |
158 | ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh); | 158 | ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC | WRITE_FLUSH_FUA, bh); |
159 | else | 159 | else |
160 | ret = submit_bh(WRITE_SYNC, bh); | 160 | ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); |
161 | 161 | ||
162 | *cbh = bh; | 162 | *cbh = bh; |
163 | return ret; | 163 | return ret; |
@@ -718,7 +718,7 @@ start_journal_io: | |||
718 | clear_buffer_dirty(bh); | 718 | clear_buffer_dirty(bh); |
719 | set_buffer_uptodate(bh); | 719 | set_buffer_uptodate(bh); |
720 | bh->b_end_io = journal_end_buffer_io_sync; | 720 | bh->b_end_io = journal_end_buffer_io_sync; |
721 | submit_bh(WRITE_SYNC, bh); | 721 | submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); |
722 | } | 722 | } |
723 | cond_resched(); | 723 | cond_resched(); |
724 | stats.run.rs_blocks_logged += bufs; | 724 | stats.run.rs_blocks_logged += bufs; |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index e3ca4b4cac84..a7c4c101fe3e 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -1346,15 +1346,15 @@ static int journal_reset(journal_t *journal) | |||
1346 | return jbd2_journal_start_thread(journal); | 1346 | return jbd2_journal_start_thread(journal); |
1347 | } | 1347 | } |
1348 | 1348 | ||
1349 | static int jbd2_write_superblock(journal_t *journal, int write_op) | 1349 | static int jbd2_write_superblock(journal_t *journal, int write_flags) |
1350 | { | 1350 | { |
1351 | struct buffer_head *bh = journal->j_sb_buffer; | 1351 | struct buffer_head *bh = journal->j_sb_buffer; |
1352 | journal_superblock_t *sb = journal->j_superblock; | 1352 | journal_superblock_t *sb = journal->j_superblock; |
1353 | int ret; | 1353 | int ret; |
1354 | 1354 | ||
1355 | trace_jbd2_write_superblock(journal, write_op); | 1355 | trace_jbd2_write_superblock(journal, write_flags); |
1356 | if (!(journal->j_flags & JBD2_BARRIER)) | 1356 | if (!(journal->j_flags & JBD2_BARRIER)) |
1357 | write_op &= ~(REQ_FUA | REQ_FLUSH); | 1357 | write_flags &= ~(REQ_FUA | REQ_PREFLUSH); |
1358 | lock_buffer(bh); | 1358 | lock_buffer(bh); |
1359 | if (buffer_write_io_error(bh)) { | 1359 | if (buffer_write_io_error(bh)) { |
1360 | /* | 1360 | /* |
@@ -1374,7 +1374,7 @@ static int jbd2_write_superblock(journal_t *journal, int write_op) | |||
1374 | jbd2_superblock_csum_set(journal, sb); | 1374 | jbd2_superblock_csum_set(journal, sb); |
1375 | get_bh(bh); | 1375 | get_bh(bh); |
1376 | bh->b_end_io = end_buffer_write_sync; | 1376 | bh->b_end_io = end_buffer_write_sync; |
1377 | ret = submit_bh(write_op, bh); | 1377 | ret = submit_bh(REQ_OP_WRITE, write_flags, bh); |
1378 | wait_on_buffer(bh); | 1378 | wait_on_buffer(bh); |
1379 | if (buffer_write_io_error(bh)) { | 1379 | if (buffer_write_io_error(bh)) { |
1380 | clear_buffer_write_io_error(bh); | 1380 | clear_buffer_write_io_error(bh); |
@@ -1498,7 +1498,7 @@ static int journal_get_superblock(journal_t *journal) | |||
1498 | 1498 | ||
1499 | J_ASSERT(bh != NULL); | 1499 | J_ASSERT(bh != NULL); |
1500 | if (!buffer_uptodate(bh)) { | 1500 | if (!buffer_uptodate(bh)) { |
1501 | ll_rw_block(READ, 1, &bh); | 1501 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
1502 | wait_on_buffer(bh); | 1502 | wait_on_buffer(bh); |
1503 | if (!buffer_uptodate(bh)) { | 1503 | if (!buffer_uptodate(bh)) { |
1504 | printk(KERN_ERR | 1504 | printk(KERN_ERR |
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 805bc6bcd8ab..02dd3360cb20 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c | |||
@@ -104,7 +104,7 @@ static int do_readahead(journal_t *journal, unsigned int start) | |||
104 | if (!buffer_uptodate(bh) && !buffer_locked(bh)) { | 104 | if (!buffer_uptodate(bh) && !buffer_locked(bh)) { |
105 | bufs[nbufs++] = bh; | 105 | bufs[nbufs++] = bh; |
106 | if (nbufs == MAXBUF) { | 106 | if (nbufs == MAXBUF) { |
107 | ll_rw_block(READ, nbufs, bufs); | 107 | ll_rw_block(REQ_OP_READ, 0, nbufs, bufs); |
108 | journal_brelse_array(bufs, nbufs); | 108 | journal_brelse_array(bufs, nbufs); |
109 | nbufs = 0; | 109 | nbufs = 0; |
110 | } | 110 | } |
@@ -113,7 +113,7 @@ static int do_readahead(journal_t *journal, unsigned int start) | |||
113 | } | 113 | } |
114 | 114 | ||
115 | if (nbufs) | 115 | if (nbufs) |
116 | ll_rw_block(READ, nbufs, bufs); | 116 | ll_rw_block(REQ_OP_READ, 0, nbufs, bufs); |
117 | err = 0; | 117 | err = 0; |
118 | 118 | ||
119 | failed: | 119 | failed: |
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 63759d723920..a74752146ec9 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c | |||
@@ -2002,12 +2002,13 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) | |||
2002 | 2002 | ||
2003 | bio->bi_end_io = lbmIODone; | 2003 | bio->bi_end_io = lbmIODone; |
2004 | bio->bi_private = bp; | 2004 | bio->bi_private = bp; |
2005 | bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC); | ||
2005 | /*check if journaling to disk has been disabled*/ | 2006 | /*check if journaling to disk has been disabled*/ |
2006 | if (log->no_integrity) { | 2007 | if (log->no_integrity) { |
2007 | bio->bi_iter.bi_size = 0; | 2008 | bio->bi_iter.bi_size = 0; |
2008 | lbmIODone(bio); | 2009 | lbmIODone(bio); |
2009 | } else { | 2010 | } else { |
2010 | submit_bio(READ_SYNC, bio); | 2011 | submit_bio(bio); |
2011 | } | 2012 | } |
2012 | 2013 | ||
2013 | wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD)); | 2014 | wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD)); |
@@ -2145,13 +2146,14 @@ static void lbmStartIO(struct lbuf * bp) | |||
2145 | 2146 | ||
2146 | bio->bi_end_io = lbmIODone; | 2147 | bio->bi_end_io = lbmIODone; |
2147 | bio->bi_private = bp; | 2148 | bio->bi_private = bp; |
2149 | bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC); | ||
2148 | 2150 | ||
2149 | /* check if journaling to disk has been disabled */ | 2151 | /* check if journaling to disk has been disabled */ |
2150 | if (log->no_integrity) { | 2152 | if (log->no_integrity) { |
2151 | bio->bi_iter.bi_size = 0; | 2153 | bio->bi_iter.bi_size = 0; |
2152 | lbmIODone(bio); | 2154 | lbmIODone(bio); |
2153 | } else { | 2155 | } else { |
2154 | submit_bio(WRITE_SYNC, bio); | 2156 | submit_bio(bio); |
2155 | INCREMENT(lmStat.submitted); | 2157 | INCREMENT(lmStat.submitted); |
2156 | } | 2158 | } |
2157 | } | 2159 | } |
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index b60e015cc757..e7fa9e513040 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c | |||
@@ -411,7 +411,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) | |||
411 | inc_io(page); | 411 | inc_io(page); |
412 | if (!bio->bi_iter.bi_size) | 412 | if (!bio->bi_iter.bi_size) |
413 | goto dump_bio; | 413 | goto dump_bio; |
414 | submit_bio(WRITE, bio); | 414 | submit_bio(bio); |
415 | nr_underway++; | 415 | nr_underway++; |
416 | bio = NULL; | 416 | bio = NULL; |
417 | } else | 417 | } else |
@@ -434,6 +434,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) | |||
434 | bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); | 434 | bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); |
435 | bio->bi_end_io = metapage_write_end_io; | 435 | bio->bi_end_io = metapage_write_end_io; |
436 | bio->bi_private = page; | 436 | bio->bi_private = page; |
437 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
437 | 438 | ||
438 | /* Don't call bio_add_page yet, we may add to this vec */ | 439 | /* Don't call bio_add_page yet, we may add to this vec */ |
439 | bio_offset = offset; | 440 | bio_offset = offset; |
@@ -448,7 +449,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) | |||
448 | if (!bio->bi_iter.bi_size) | 449 | if (!bio->bi_iter.bi_size) |
449 | goto dump_bio; | 450 | goto dump_bio; |
450 | 451 | ||
451 | submit_bio(WRITE, bio); | 452 | submit_bio(bio); |
452 | nr_underway++; | 453 | nr_underway++; |
453 | } | 454 | } |
454 | if (redirty) | 455 | if (redirty) |
@@ -506,7 +507,7 @@ static int metapage_readpage(struct file *fp, struct page *page) | |||
506 | insert_metapage(page, NULL); | 507 | insert_metapage(page, NULL); |
507 | inc_io(page); | 508 | inc_io(page); |
508 | if (bio) | 509 | if (bio) |
509 | submit_bio(READ, bio); | 510 | submit_bio(bio); |
510 | 511 | ||
511 | bio = bio_alloc(GFP_NOFS, 1); | 512 | bio = bio_alloc(GFP_NOFS, 1); |
512 | bio->bi_bdev = inode->i_sb->s_bdev; | 513 | bio->bi_bdev = inode->i_sb->s_bdev; |
@@ -514,6 +515,7 @@ static int metapage_readpage(struct file *fp, struct page *page) | |||
514 | pblock << (inode->i_blkbits - 9); | 515 | pblock << (inode->i_blkbits - 9); |
515 | bio->bi_end_io = metapage_read_end_io; | 516 | bio->bi_end_io = metapage_read_end_io; |
516 | bio->bi_private = page; | 517 | bio->bi_private = page; |
518 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||
517 | len = xlen << inode->i_blkbits; | 519 | len = xlen << inode->i_blkbits; |
518 | offset = block_offset << inode->i_blkbits; | 520 | offset = block_offset << inode->i_blkbits; |
519 | if (bio_add_page(bio, page, len, offset) < len) | 521 | if (bio_add_page(bio, page, len, offset) < len) |
@@ -523,7 +525,7 @@ static int metapage_readpage(struct file *fp, struct page *page) | |||
523 | block_offset++; | 525 | block_offset++; |
524 | } | 526 | } |
525 | if (bio) | 527 | if (bio) |
526 | submit_bio(READ, bio); | 528 | submit_bio(bio); |
527 | else | 529 | else |
528 | unlock_page(page); | 530 | unlock_page(page); |
529 | 531 | ||
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index cc26f8f215f5..a8329cc47dec 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) | 15 | #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) |
16 | 16 | ||
17 | static int sync_request(struct page *page, struct block_device *bdev, int rw) | 17 | static int sync_request(struct page *page, struct block_device *bdev, int op) |
18 | { | 18 | { |
19 | struct bio bio; | 19 | struct bio bio; |
20 | struct bio_vec bio_vec; | 20 | struct bio_vec bio_vec; |
@@ -29,8 +29,9 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw) | |||
29 | bio.bi_bdev = bdev; | 29 | bio.bi_bdev = bdev; |
30 | bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9); | 30 | bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9); |
31 | bio.bi_iter.bi_size = PAGE_SIZE; | 31 | bio.bi_iter.bi_size = PAGE_SIZE; |
32 | bio_set_op_attrs(&bio, op, 0); | ||
32 | 33 | ||
33 | return submit_bio_wait(rw, &bio); | 34 | return submit_bio_wait(&bio); |
34 | } | 35 | } |
35 | 36 | ||
36 | static int bdev_readpage(void *_sb, struct page *page) | 37 | static int bdev_readpage(void *_sb, struct page *page) |
@@ -95,8 +96,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, | |||
95 | bio->bi_iter.bi_sector = ofs >> 9; | 96 | bio->bi_iter.bi_sector = ofs >> 9; |
96 | bio->bi_private = sb; | 97 | bio->bi_private = sb; |
97 | bio->bi_end_io = writeseg_end_io; | 98 | bio->bi_end_io = writeseg_end_io; |
99 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
98 | atomic_inc(&super->s_pending_writes); | 100 | atomic_inc(&super->s_pending_writes); |
99 | submit_bio(WRITE, bio); | 101 | submit_bio(bio); |
100 | 102 | ||
101 | ofs += i * PAGE_SIZE; | 103 | ofs += i * PAGE_SIZE; |
102 | index += i; | 104 | index += i; |
@@ -122,8 +124,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, | |||
122 | bio->bi_iter.bi_sector = ofs >> 9; | 124 | bio->bi_iter.bi_sector = ofs >> 9; |
123 | bio->bi_private = sb; | 125 | bio->bi_private = sb; |
124 | bio->bi_end_io = writeseg_end_io; | 126 | bio->bi_end_io = writeseg_end_io; |
127 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
125 | atomic_inc(&super->s_pending_writes); | 128 | atomic_inc(&super->s_pending_writes); |
126 | submit_bio(WRITE, bio); | 129 | submit_bio(bio); |
127 | return 0; | 130 | return 0; |
128 | } | 131 | } |
129 | 132 | ||
@@ -185,8 +188,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, | |||
185 | bio->bi_iter.bi_sector = ofs >> 9; | 188 | bio->bi_iter.bi_sector = ofs >> 9; |
186 | bio->bi_private = sb; | 189 | bio->bi_private = sb; |
187 | bio->bi_end_io = erase_end_io; | 190 | bio->bi_end_io = erase_end_io; |
191 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
188 | atomic_inc(&super->s_pending_writes); | 192 | atomic_inc(&super->s_pending_writes); |
189 | submit_bio(WRITE, bio); | 193 | submit_bio(bio); |
190 | 194 | ||
191 | ofs += i * PAGE_SIZE; | 195 | ofs += i * PAGE_SIZE; |
192 | index += i; | 196 | index += i; |
@@ -206,8 +210,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, | |||
206 | bio->bi_iter.bi_sector = ofs >> 9; | 210 | bio->bi_iter.bi_sector = ofs >> 9; |
207 | bio->bi_private = sb; | 211 | bio->bi_private = sb; |
208 | bio->bi_end_io = erase_end_io; | 212 | bio->bi_end_io = erase_end_io; |
213 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
209 | atomic_inc(&super->s_pending_writes); | 214 | atomic_inc(&super->s_pending_writes); |
210 | submit_bio(WRITE, bio); | 215 | submit_bio(bio); |
211 | return 0; | 216 | return 0; |
212 | } | 217 | } |
213 | 218 | ||
diff --git a/fs/mpage.c b/fs/mpage.c index eedc644b78d7..37b28280ad04 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -56,11 +56,12 @@ static void mpage_end_io(struct bio *bio) | |||
56 | bio_put(bio); | 56 | bio_put(bio); |
57 | } | 57 | } |
58 | 58 | ||
59 | static struct bio *mpage_bio_submit(int rw, struct bio *bio) | 59 | static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio) |
60 | { | 60 | { |
61 | bio->bi_end_io = mpage_end_io; | 61 | bio->bi_end_io = mpage_end_io; |
62 | guard_bio_eod(rw, bio); | 62 | bio_set_op_attrs(bio, op, op_flags); |
63 | submit_bio(rw, bio); | 63 | guard_bio_eod(op, bio); |
64 | submit_bio(bio); | ||
64 | return NULL; | 65 | return NULL; |
65 | } | 66 | } |
66 | 67 | ||
@@ -269,7 +270,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
269 | * This page will go to BIO. Do we need to send this BIO off first? | 270 | * This page will go to BIO. Do we need to send this BIO off first? |
270 | */ | 271 | */ |
271 | if (bio && (*last_block_in_bio != blocks[0] - 1)) | 272 | if (bio && (*last_block_in_bio != blocks[0] - 1)) |
272 | bio = mpage_bio_submit(READ, bio); | 273 | bio = mpage_bio_submit(REQ_OP_READ, 0, bio); |
273 | 274 | ||
274 | alloc_new: | 275 | alloc_new: |
275 | if (bio == NULL) { | 276 | if (bio == NULL) { |
@@ -286,7 +287,7 @@ alloc_new: | |||
286 | 287 | ||
287 | length = first_hole << blkbits; | 288 | length = first_hole << blkbits; |
288 | if (bio_add_page(bio, page, length, 0) < length) { | 289 | if (bio_add_page(bio, page, length, 0) < length) { |
289 | bio = mpage_bio_submit(READ, bio); | 290 | bio = mpage_bio_submit(REQ_OP_READ, 0, bio); |
290 | goto alloc_new; | 291 | goto alloc_new; |
291 | } | 292 | } |
292 | 293 | ||
@@ -294,7 +295,7 @@ alloc_new: | |||
294 | nblocks = map_bh->b_size >> blkbits; | 295 | nblocks = map_bh->b_size >> blkbits; |
295 | if ((buffer_boundary(map_bh) && relative_block == nblocks) || | 296 | if ((buffer_boundary(map_bh) && relative_block == nblocks) || |
296 | (first_hole != blocks_per_page)) | 297 | (first_hole != blocks_per_page)) |
297 | bio = mpage_bio_submit(READ, bio); | 298 | bio = mpage_bio_submit(REQ_OP_READ, 0, bio); |
298 | else | 299 | else |
299 | *last_block_in_bio = blocks[blocks_per_page - 1]; | 300 | *last_block_in_bio = blocks[blocks_per_page - 1]; |
300 | out: | 301 | out: |
@@ -302,7 +303,7 @@ out: | |||
302 | 303 | ||
303 | confused: | 304 | confused: |
304 | if (bio) | 305 | if (bio) |
305 | bio = mpage_bio_submit(READ, bio); | 306 | bio = mpage_bio_submit(REQ_OP_READ, 0, bio); |
306 | if (!PageUptodate(page)) | 307 | if (!PageUptodate(page)) |
307 | block_read_full_page(page, get_block); | 308 | block_read_full_page(page, get_block); |
308 | else | 309 | else |
@@ -384,7 +385,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, | |||
384 | } | 385 | } |
385 | BUG_ON(!list_empty(pages)); | 386 | BUG_ON(!list_empty(pages)); |
386 | if (bio) | 387 | if (bio) |
387 | mpage_bio_submit(READ, bio); | 388 | mpage_bio_submit(REQ_OP_READ, 0, bio); |
388 | return 0; | 389 | return 0; |
389 | } | 390 | } |
390 | EXPORT_SYMBOL(mpage_readpages); | 391 | EXPORT_SYMBOL(mpage_readpages); |
@@ -405,7 +406,7 @@ int mpage_readpage(struct page *page, get_block_t get_block) | |||
405 | bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, | 406 | bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, |
406 | &map_bh, &first_logical_block, get_block, gfp); | 407 | &map_bh, &first_logical_block, get_block, gfp); |
407 | if (bio) | 408 | if (bio) |
408 | mpage_bio_submit(READ, bio); | 409 | mpage_bio_submit(REQ_OP_READ, 0, bio); |
409 | return 0; | 410 | return 0; |
410 | } | 411 | } |
411 | EXPORT_SYMBOL(mpage_readpage); | 412 | EXPORT_SYMBOL(mpage_readpage); |
@@ -486,7 +487,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | |||
486 | struct buffer_head map_bh; | 487 | struct buffer_head map_bh; |
487 | loff_t i_size = i_size_read(inode); | 488 | loff_t i_size = i_size_read(inode); |
488 | int ret = 0; | 489 | int ret = 0; |
489 | int wr = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | 490 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); |
490 | 491 | ||
491 | if (page_has_buffers(page)) { | 492 | if (page_has_buffers(page)) { |
492 | struct buffer_head *head = page_buffers(page); | 493 | struct buffer_head *head = page_buffers(page); |
@@ -595,7 +596,7 @@ page_is_mapped: | |||
595 | * This page will go to BIO. Do we need to send this BIO off first? | 596 | * This page will go to BIO. Do we need to send this BIO off first? |
596 | */ | 597 | */ |
597 | if (bio && mpd->last_block_in_bio != blocks[0] - 1) | 598 | if (bio && mpd->last_block_in_bio != blocks[0] - 1) |
598 | bio = mpage_bio_submit(wr, bio); | 599 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
599 | 600 | ||
600 | alloc_new: | 601 | alloc_new: |
601 | if (bio == NULL) { | 602 | if (bio == NULL) { |
@@ -622,7 +623,7 @@ alloc_new: | |||
622 | wbc_account_io(wbc, page, PAGE_SIZE); | 623 | wbc_account_io(wbc, page, PAGE_SIZE); |
623 | length = first_unmapped << blkbits; | 624 | length = first_unmapped << blkbits; |
624 | if (bio_add_page(bio, page, length, 0) < length) { | 625 | if (bio_add_page(bio, page, length, 0) < length) { |
625 | bio = mpage_bio_submit(wr, bio); | 626 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
626 | goto alloc_new; | 627 | goto alloc_new; |
627 | } | 628 | } |
628 | 629 | ||
@@ -632,7 +633,7 @@ alloc_new: | |||
632 | set_page_writeback(page); | 633 | set_page_writeback(page); |
633 | unlock_page(page); | 634 | unlock_page(page); |
634 | if (boundary || (first_unmapped != blocks_per_page)) { | 635 | if (boundary || (first_unmapped != blocks_per_page)) { |
635 | bio = mpage_bio_submit(wr, bio); | 636 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
636 | if (boundary_block) { | 637 | if (boundary_block) { |
637 | write_boundary_block(boundary_bdev, | 638 | write_boundary_block(boundary_bdev, |
638 | boundary_block, 1 << blkbits); | 639 | boundary_block, 1 << blkbits); |
@@ -644,7 +645,7 @@ alloc_new: | |||
644 | 645 | ||
645 | confused: | 646 | confused: |
646 | if (bio) | 647 | if (bio) |
647 | bio = mpage_bio_submit(wr, bio); | 648 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
648 | 649 | ||
649 | if (mpd->use_writepage) { | 650 | if (mpd->use_writepage) { |
650 | ret = mapping->a_ops->writepage(page, wbc); | 651 | ret = mapping->a_ops->writepage(page, wbc); |
@@ -701,9 +702,9 @@ mpage_writepages(struct address_space *mapping, | |||
701 | 702 | ||
702 | ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); | 703 | ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); |
703 | if (mpd.bio) { | 704 | if (mpd.bio) { |
704 | int wr = (wbc->sync_mode == WB_SYNC_ALL ? | 705 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? |
705 | WRITE_SYNC : WRITE); | 706 | WRITE_SYNC : 0); |
706 | mpage_bio_submit(wr, mpd.bio); | 707 | mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); |
707 | } | 708 | } |
708 | } | 709 | } |
709 | blk_finish_plug(&plug); | 710 | blk_finish_plug(&plug); |
@@ -722,9 +723,9 @@ int mpage_writepage(struct page *page, get_block_t get_block, | |||
722 | }; | 723 | }; |
723 | int ret = __mpage_writepage(page, wbc, &mpd); | 724 | int ret = __mpage_writepage(page, wbc, &mpd); |
724 | if (mpd.bio) { | 725 | if (mpd.bio) { |
725 | int wr = (wbc->sync_mode == WB_SYNC_ALL ? | 726 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? |
726 | WRITE_SYNC : WRITE); | 727 | WRITE_SYNC : 0); |
727 | mpage_bio_submit(wr, mpd.bio); | 728 | mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); |
728 | } | 729 | } |
729 | return ret; | 730 | return ret; |
730 | } | 731 | } |
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 17a42e4eb872..f55a4e756047 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c | |||
@@ -102,14 +102,15 @@ static inline void put_parallel(struct parallel_io *p) | |||
102 | } | 102 | } |
103 | 103 | ||
104 | static struct bio * | 104 | static struct bio * |
105 | bl_submit_bio(int rw, struct bio *bio) | 105 | bl_submit_bio(struct bio *bio) |
106 | { | 106 | { |
107 | if (bio) { | 107 | if (bio) { |
108 | get_parallel(bio->bi_private); | 108 | get_parallel(bio->bi_private); |
109 | dprintk("%s submitting %s bio %u@%llu\n", __func__, | 109 | dprintk("%s submitting %s bio %u@%llu\n", __func__, |
110 | rw == READ ? "read" : "write", bio->bi_iter.bi_size, | 110 | bio_op(bio) == READ ? "read" : "write", |
111 | bio->bi_iter.bi_size, | ||
111 | (unsigned long long)bio->bi_iter.bi_sector); | 112 | (unsigned long long)bio->bi_iter.bi_sector); |
112 | submit_bio(rw, bio); | 113 | submit_bio(bio); |
113 | } | 114 | } |
114 | return NULL; | 115 | return NULL; |
115 | } | 116 | } |
@@ -158,7 +159,7 @@ do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, | |||
158 | if (disk_addr < map->start || disk_addr >= map->start + map->len) { | 159 | if (disk_addr < map->start || disk_addr >= map->start + map->len) { |
159 | if (!dev->map(dev, disk_addr, map)) | 160 | if (!dev->map(dev, disk_addr, map)) |
160 | return ERR_PTR(-EIO); | 161 | return ERR_PTR(-EIO); |
161 | bio = bl_submit_bio(rw, bio); | 162 | bio = bl_submit_bio(bio); |
162 | } | 163 | } |
163 | disk_addr += map->disk_offset; | 164 | disk_addr += map->disk_offset; |
164 | disk_addr -= map->start; | 165 | disk_addr -= map->start; |
@@ -174,9 +175,10 @@ retry: | |||
174 | disk_addr >> SECTOR_SHIFT, end_io, par); | 175 | disk_addr >> SECTOR_SHIFT, end_io, par); |
175 | if (!bio) | 176 | if (!bio) |
176 | return ERR_PTR(-ENOMEM); | 177 | return ERR_PTR(-ENOMEM); |
178 | bio_set_op_attrs(bio, rw, 0); | ||
177 | } | 179 | } |
178 | if (bio_add_page(bio, page, *len, offset) < *len) { | 180 | if (bio_add_page(bio, page, *len, offset) < *len) { |
179 | bio = bl_submit_bio(rw, bio); | 181 | bio = bl_submit_bio(bio); |
180 | goto retry; | 182 | goto retry; |
181 | } | 183 | } |
182 | return bio; | 184 | return bio; |
@@ -252,7 +254,7 @@ bl_read_pagelist(struct nfs_pgio_header *header) | |||
252 | for (i = pg_index; i < header->page_array.npages; i++) { | 254 | for (i = pg_index; i < header->page_array.npages; i++) { |
253 | if (extent_length <= 0) { | 255 | if (extent_length <= 0) { |
254 | /* We've used up the previous extent */ | 256 | /* We've used up the previous extent */ |
255 | bio = bl_submit_bio(READ, bio); | 257 | bio = bl_submit_bio(bio); |
256 | 258 | ||
257 | /* Get the next one */ | 259 | /* Get the next one */ |
258 | if (!ext_tree_lookup(bl, isect, &be, false)) { | 260 | if (!ext_tree_lookup(bl, isect, &be, false)) { |
@@ -273,7 +275,7 @@ bl_read_pagelist(struct nfs_pgio_header *header) | |||
273 | } | 275 | } |
274 | 276 | ||
275 | if (is_hole(&be)) { | 277 | if (is_hole(&be)) { |
276 | bio = bl_submit_bio(READ, bio); | 278 | bio = bl_submit_bio(bio); |
277 | /* Fill hole w/ zeroes w/o accessing device */ | 279 | /* Fill hole w/ zeroes w/o accessing device */ |
278 | dprintk("%s Zeroing page for hole\n", __func__); | 280 | dprintk("%s Zeroing page for hole\n", __func__); |
279 | zero_user_segment(pages[i], pg_offset, pg_len); | 281 | zero_user_segment(pages[i], pg_offset, pg_len); |
@@ -306,7 +308,7 @@ bl_read_pagelist(struct nfs_pgio_header *header) | |||
306 | header->res.count = (isect << SECTOR_SHIFT) - header->args.offset; | 308 | header->res.count = (isect << SECTOR_SHIFT) - header->args.offset; |
307 | } | 309 | } |
308 | out: | 310 | out: |
309 | bl_submit_bio(READ, bio); | 311 | bl_submit_bio(bio); |
310 | blk_finish_plug(&plug); | 312 | blk_finish_plug(&plug); |
311 | put_parallel(par); | 313 | put_parallel(par); |
312 | return PNFS_ATTEMPTED; | 314 | return PNFS_ATTEMPTED; |
@@ -398,7 +400,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync) | |||
398 | for (i = pg_index; i < header->page_array.npages; i++) { | 400 | for (i = pg_index; i < header->page_array.npages; i++) { |
399 | if (extent_length <= 0) { | 401 | if (extent_length <= 0) { |
400 | /* We've used up the previous extent */ | 402 | /* We've used up the previous extent */ |
401 | bio = bl_submit_bio(WRITE, bio); | 403 | bio = bl_submit_bio(bio); |
402 | /* Get the next one */ | 404 | /* Get the next one */ |
403 | if (!ext_tree_lookup(bl, isect, &be, true)) { | 405 | if (!ext_tree_lookup(bl, isect, &be, true)) { |
404 | header->pnfs_error = -EINVAL; | 406 | header->pnfs_error = -EINVAL; |
@@ -427,7 +429,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync) | |||
427 | 429 | ||
428 | header->res.count = header->args.count; | 430 | header->res.count = header->args.count; |
429 | out: | 431 | out: |
430 | bl_submit_bio(WRITE, bio); | 432 | bl_submit_bio(bio); |
431 | blk_finish_plug(&plug); | 433 | blk_finish_plug(&plug); |
432 | put_parallel(par); | 434 | put_parallel(par); |
433 | return PNFS_ATTEMPTED; | 435 | return PNFS_ATTEMPTED; |
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 0576033699bc..4cca998ec7a0 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c | |||
@@ -62,7 +62,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) | |||
62 | } | 62 | } |
63 | 63 | ||
64 | int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, | 64 | int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, |
65 | sector_t pblocknr, int mode, | 65 | sector_t pblocknr, int mode, int mode_flags, |
66 | struct buffer_head **pbh, sector_t *submit_ptr) | 66 | struct buffer_head **pbh, sector_t *submit_ptr) |
67 | { | 67 | { |
68 | struct buffer_head *bh; | 68 | struct buffer_head *bh; |
@@ -95,7 +95,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, | |||
95 | } | 95 | } |
96 | } | 96 | } |
97 | 97 | ||
98 | if (mode == READA) { | 98 | if (mode_flags & REQ_RAHEAD) { |
99 | if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) { | 99 | if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) { |
100 | err = -EBUSY; /* internal code */ | 100 | err = -EBUSY; /* internal code */ |
101 | brelse(bh); | 101 | brelse(bh); |
@@ -114,7 +114,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, | |||
114 | bh->b_blocknr = pblocknr; /* set block address for read */ | 114 | bh->b_blocknr = pblocknr; /* set block address for read */ |
115 | bh->b_end_io = end_buffer_read_sync; | 115 | bh->b_end_io = end_buffer_read_sync; |
116 | get_bh(bh); | 116 | get_bh(bh); |
117 | submit_bh(mode, bh); | 117 | submit_bh(mode, mode_flags, bh); |
118 | bh->b_blocknr = blocknr; /* set back to the given block address */ | 118 | bh->b_blocknr = blocknr; /* set back to the given block address */ |
119 | *submit_ptr = pblocknr; | 119 | *submit_ptr = pblocknr; |
120 | err = 0; | 120 | err = 0; |
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h index 2cc1b80e18f7..4e8aaa1aeb65 100644 --- a/fs/nilfs2/btnode.h +++ b/fs/nilfs2/btnode.h | |||
@@ -43,7 +43,7 @@ void nilfs_btnode_cache_clear(struct address_space *); | |||
43 | struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, | 43 | struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, |
44 | __u64 blocknr); | 44 | __u64 blocknr); |
45 | int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, int, | 45 | int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, int, |
46 | struct buffer_head **, sector_t *); | 46 | int, struct buffer_head **, sector_t *); |
47 | void nilfs_btnode_delete(struct buffer_head *); | 47 | void nilfs_btnode_delete(struct buffer_head *); |
48 | int nilfs_btnode_prepare_change_key(struct address_space *, | 48 | int nilfs_btnode_prepare_change_key(struct address_space *, |
49 | struct nilfs_btnode_chkey_ctxt *); | 49 | struct nilfs_btnode_chkey_ctxt *); |
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index eccb1c89ccbb..982d1e3df3a5 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c | |||
@@ -476,7 +476,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, | |||
476 | sector_t submit_ptr = 0; | 476 | sector_t submit_ptr = 0; |
477 | int ret; | 477 | int ret; |
478 | 478 | ||
479 | ret = nilfs_btnode_submit_block(btnc, ptr, 0, READ, &bh, &submit_ptr); | 479 | ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, 0, &bh, |
480 | &submit_ptr); | ||
480 | if (ret) { | 481 | if (ret) { |
481 | if (ret != -EEXIST) | 482 | if (ret != -EEXIST) |
482 | return ret; | 483 | return ret; |
@@ -492,7 +493,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, | |||
492 | n > 0 && i < ra->ncmax; n--, i++) { | 493 | n > 0 && i < ra->ncmax; n--, i++) { |
493 | ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax); | 494 | ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax); |
494 | 495 | ||
495 | ret = nilfs_btnode_submit_block(btnc, ptr2, 0, READA, | 496 | ret = nilfs_btnode_submit_block(btnc, ptr2, 0, |
497 | REQ_OP_READ, REQ_RAHEAD, | ||
496 | &ra_bh, &submit_ptr); | 498 | &ra_bh, &submit_ptr); |
497 | if (likely(!ret || ret == -EEXIST)) | 499 | if (likely(!ret || ret == -EEXIST)) |
498 | brelse(ra_bh); | 500 | brelse(ra_bh); |
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 693aded72498..e9148f94d696 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c | |||
@@ -101,7 +101,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, | |||
101 | bh->b_blocknr = pbn; | 101 | bh->b_blocknr = pbn; |
102 | bh->b_end_io = end_buffer_read_sync; | 102 | bh->b_end_io = end_buffer_read_sync; |
103 | get_bh(bh); | 103 | get_bh(bh); |
104 | submit_bh(READ, bh); | 104 | submit_bh(REQ_OP_READ, 0, bh); |
105 | if (vbn) | 105 | if (vbn) |
106 | bh->b_blocknr = vbn; | 106 | bh->b_blocknr = vbn; |
107 | out: | 107 | out: |
@@ -138,7 +138,8 @@ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, | |||
138 | int ret; | 138 | int ret; |
139 | 139 | ||
140 | ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache, | 140 | ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache, |
141 | vbn ? : pbn, pbn, READ, out_bh, &pbn); | 141 | vbn ? : pbn, pbn, REQ_OP_READ, 0, |
142 | out_bh, &pbn); | ||
142 | if (ret == -EEXIST) /* internal code (cache hit) */ | 143 | if (ret == -EEXIST) /* internal code (cache hit) */ |
143 | ret = 0; | 144 | ret = 0; |
144 | return ret; | 145 | return ret; |
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 3417d859a03c..0d7b71fbeff8 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c | |||
@@ -121,7 +121,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block, | |||
121 | 121 | ||
122 | static int | 122 | static int |
123 | nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, | 123 | nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, |
124 | int mode, struct buffer_head **out_bh) | 124 | int mode, int mode_flags, struct buffer_head **out_bh) |
125 | { | 125 | { |
126 | struct buffer_head *bh; | 126 | struct buffer_head *bh; |
127 | __u64 blknum = 0; | 127 | __u64 blknum = 0; |
@@ -135,7 +135,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, | |||
135 | if (buffer_uptodate(bh)) | 135 | if (buffer_uptodate(bh)) |
136 | goto out; | 136 | goto out; |
137 | 137 | ||
138 | if (mode == READA) { | 138 | if (mode_flags & REQ_RAHEAD) { |
139 | if (!trylock_buffer(bh)) { | 139 | if (!trylock_buffer(bh)) { |
140 | ret = -EBUSY; | 140 | ret = -EBUSY; |
141 | goto failed_bh; | 141 | goto failed_bh; |
@@ -157,7 +157,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, | |||
157 | 157 | ||
158 | bh->b_end_io = end_buffer_read_sync; | 158 | bh->b_end_io = end_buffer_read_sync; |
159 | get_bh(bh); | 159 | get_bh(bh); |
160 | submit_bh(mode, bh); | 160 | submit_bh(mode, mode_flags, bh); |
161 | ret = 0; | 161 | ret = 0; |
162 | 162 | ||
163 | trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff, mode); | 163 | trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff, mode); |
@@ -181,7 +181,7 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, | |||
181 | int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS; | 181 | int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS; |
182 | int err; | 182 | int err; |
183 | 183 | ||
184 | err = nilfs_mdt_submit_block(inode, block, READ, &first_bh); | 184 | err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, 0, &first_bh); |
185 | if (err == -EEXIST) /* internal code */ | 185 | if (err == -EEXIST) /* internal code */ |
186 | goto out; | 186 | goto out; |
187 | 187 | ||
@@ -191,7 +191,8 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, | |||
191 | if (readahead) { | 191 | if (readahead) { |
192 | blkoff = block + 1; | 192 | blkoff = block + 1; |
193 | for (i = 0; i < nr_ra_blocks; i++, blkoff++) { | 193 | for (i = 0; i < nr_ra_blocks; i++, blkoff++) { |
194 | err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh); | 194 | err = nilfs_mdt_submit_block(inode, blkoff, REQ_OP_READ, |
195 | REQ_RAHEAD, &bh); | ||
195 | if (likely(!err || err == -EEXIST)) | 196 | if (likely(!err || err == -EEXIST)) |
196 | brelse(bh); | 197 | brelse(bh); |
197 | else if (err != -EBUSY) | 198 | else if (err != -EBUSY) |
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index bf36df10540b..a962d7d83447 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -346,7 +346,8 @@ static void nilfs_end_bio_write(struct bio *bio) | |||
346 | } | 346 | } |
347 | 347 | ||
348 | static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, | 348 | static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, |
349 | struct nilfs_write_info *wi, int mode) | 349 | struct nilfs_write_info *wi, int mode, |
350 | int mode_flags) | ||
350 | { | 351 | { |
351 | struct bio *bio = wi->bio; | 352 | struct bio *bio = wi->bio; |
352 | int err; | 353 | int err; |
@@ -364,7 +365,8 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, | |||
364 | 365 | ||
365 | bio->bi_end_io = nilfs_end_bio_write; | 366 | bio->bi_end_io = nilfs_end_bio_write; |
366 | bio->bi_private = segbuf; | 367 | bio->bi_private = segbuf; |
367 | submit_bio(mode, bio); | 368 | bio_set_op_attrs(bio, mode, mode_flags); |
369 | submit_bio(bio); | ||
368 | segbuf->sb_nbio++; | 370 | segbuf->sb_nbio++; |
369 | 371 | ||
370 | wi->bio = NULL; | 372 | wi->bio = NULL; |
@@ -437,7 +439,7 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf, | |||
437 | return 0; | 439 | return 0; |
438 | } | 440 | } |
439 | /* bio is FULL */ | 441 | /* bio is FULL */ |
440 | err = nilfs_segbuf_submit_bio(segbuf, wi, mode); | 442 | err = nilfs_segbuf_submit_bio(segbuf, wi, mode, 0); |
441 | /* never submit current bh */ | 443 | /* never submit current bh */ |
442 | if (likely(!err)) | 444 | if (likely(!err)) |
443 | goto repeat; | 445 | goto repeat; |
@@ -461,19 +463,19 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, | |||
461 | { | 463 | { |
462 | struct nilfs_write_info wi; | 464 | struct nilfs_write_info wi; |
463 | struct buffer_head *bh; | 465 | struct buffer_head *bh; |
464 | int res = 0, rw = WRITE; | 466 | int res = 0; |
465 | 467 | ||
466 | wi.nilfs = nilfs; | 468 | wi.nilfs = nilfs; |
467 | nilfs_segbuf_prepare_write(segbuf, &wi); | 469 | nilfs_segbuf_prepare_write(segbuf, &wi); |
468 | 470 | ||
469 | list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { | 471 | list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { |
470 | res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw); | 472 | res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, REQ_OP_WRITE); |
471 | if (unlikely(res)) | 473 | if (unlikely(res)) |
472 | goto failed_bio; | 474 | goto failed_bio; |
473 | } | 475 | } |
474 | 476 | ||
475 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { | 477 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { |
476 | res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw); | 478 | res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, REQ_OP_WRITE); |
477 | if (unlikely(res)) | 479 | if (unlikely(res)) |
478 | goto failed_bio; | 480 | goto failed_bio; |
479 | } | 481 | } |
@@ -483,8 +485,8 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, | |||
483 | * Last BIO is always sent through the following | 485 | * Last BIO is always sent through the following |
484 | * submission. | 486 | * submission. |
485 | */ | 487 | */ |
486 | rw |= REQ_SYNC; | 488 | res = nilfs_segbuf_submit_bio(segbuf, &wi, REQ_OP_WRITE, |
487 | res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); | 489 | REQ_SYNC); |
488 | } | 490 | } |
489 | 491 | ||
490 | failed_bio: | 492 | failed_bio: |
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 97768a1379f2..fe251f187ff8 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c | |||
@@ -362,7 +362,7 @@ handle_zblock: | |||
362 | for (i = 0; i < nr; i++) { | 362 | for (i = 0; i < nr; i++) { |
363 | tbh = arr[i]; | 363 | tbh = arr[i]; |
364 | if (likely(!buffer_uptodate(tbh))) | 364 | if (likely(!buffer_uptodate(tbh))) |
365 | submit_bh(READ, tbh); | 365 | submit_bh(REQ_OP_READ, 0, tbh); |
366 | else | 366 | else |
367 | ntfs_end_buffer_async_read(tbh, 1); | 367 | ntfs_end_buffer_async_read(tbh, 1); |
368 | } | 368 | } |
@@ -877,7 +877,7 @@ lock_retry_remap: | |||
877 | do { | 877 | do { |
878 | struct buffer_head *next = bh->b_this_page; | 878 | struct buffer_head *next = bh->b_this_page; |
879 | if (buffer_async_write(bh)) { | 879 | if (buffer_async_write(bh)) { |
880 | submit_bh(WRITE, bh); | 880 | submit_bh(REQ_OP_WRITE, 0, bh); |
881 | need_end_writeback = false; | 881 | need_end_writeback = false; |
882 | } | 882 | } |
883 | bh = next; | 883 | bh = next; |
@@ -1202,7 +1202,7 @@ lock_retry_remap: | |||
1202 | BUG_ON(!buffer_mapped(tbh)); | 1202 | BUG_ON(!buffer_mapped(tbh)); |
1203 | get_bh(tbh); | 1203 | get_bh(tbh); |
1204 | tbh->b_end_io = end_buffer_write_sync; | 1204 | tbh->b_end_io = end_buffer_write_sync; |
1205 | submit_bh(WRITE, tbh); | 1205 | submit_bh(REQ_OP_WRITE, 0, tbh); |
1206 | } | 1206 | } |
1207 | /* Synchronize the mft mirror now if not @sync. */ | 1207 | /* Synchronize the mft mirror now if not @sync. */ |
1208 | if (is_mft && !sync) | 1208 | if (is_mft && !sync) |
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c index f2b5e746f49b..f8eb04387ca4 100644 --- a/fs/ntfs/compress.c +++ b/fs/ntfs/compress.c | |||
@@ -670,7 +670,7 @@ lock_retry_remap: | |||
670 | } | 670 | } |
671 | get_bh(tbh); | 671 | get_bh(tbh); |
672 | tbh->b_end_io = end_buffer_read_sync; | 672 | tbh->b_end_io = end_buffer_read_sync; |
673 | submit_bh(READ, tbh); | 673 | submit_bh(REQ_OP_READ, 0, tbh); |
674 | } | 674 | } |
675 | 675 | ||
676 | /* Wait for io completion on all buffer heads. */ | 676 | /* Wait for io completion on all buffer heads. */ |
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 5622ed5a201e..f548629dfaac 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c | |||
@@ -553,7 +553,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh) | |||
553 | lock_buffer(bh); | 553 | lock_buffer(bh); |
554 | get_bh(bh); | 554 | get_bh(bh); |
555 | bh->b_end_io = end_buffer_read_sync; | 555 | bh->b_end_io = end_buffer_read_sync; |
556 | return submit_bh(READ, bh); | 556 | return submit_bh(REQ_OP_READ, 0, bh); |
557 | } | 557 | } |
558 | 558 | ||
559 | /** | 559 | /** |
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c index 9d71213ca81e..761f12f7f3ef 100644 --- a/fs/ntfs/logfile.c +++ b/fs/ntfs/logfile.c | |||
@@ -821,7 +821,7 @@ map_vcn: | |||
821 | * completed ignore errors afterwards as we can assume | 821 | * completed ignore errors afterwards as we can assume |
822 | * that if one buffer worked all of them will work. | 822 | * that if one buffer worked all of them will work. |
823 | */ | 823 | */ |
824 | submit_bh(WRITE, bh); | 824 | submit_bh(REQ_OP_WRITE, 0, bh); |
825 | if (should_wait) { | 825 | if (should_wait) { |
826 | should_wait = false; | 826 | should_wait = false; |
827 | wait_on_buffer(bh); | 827 | wait_on_buffer(bh); |
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 37b2501caaa4..d15d492ce47b 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c | |||
@@ -592,7 +592,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, | |||
592 | clear_buffer_dirty(tbh); | 592 | clear_buffer_dirty(tbh); |
593 | get_bh(tbh); | 593 | get_bh(tbh); |
594 | tbh->b_end_io = end_buffer_write_sync; | 594 | tbh->b_end_io = end_buffer_write_sync; |
595 | submit_bh(WRITE, tbh); | 595 | submit_bh(REQ_OP_WRITE, 0, tbh); |
596 | } | 596 | } |
597 | /* Wait on i/o completion of buffers. */ | 597 | /* Wait on i/o completion of buffers. */ |
598 | for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) { | 598 | for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) { |
@@ -785,7 +785,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync) | |||
785 | clear_buffer_dirty(tbh); | 785 | clear_buffer_dirty(tbh); |
786 | get_bh(tbh); | 786 | get_bh(tbh); |
787 | tbh->b_end_io = end_buffer_write_sync; | 787 | tbh->b_end_io = end_buffer_write_sync; |
788 | submit_bh(WRITE, tbh); | 788 | submit_bh(REQ_OP_WRITE, 0, tbh); |
789 | } | 789 | } |
790 | /* Synchronize the mft mirror now if not @sync. */ | 790 | /* Synchronize the mft mirror now if not @sync. */ |
791 | if (!sync && ni->mft_no < vol->mftmirr_size) | 791 | if (!sync && ni->mft_no < vol->mftmirr_size) |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index c034edf3ef38..e97a37179614 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -640,7 +640,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, | |||
640 | !buffer_new(bh) && | 640 | !buffer_new(bh) && |
641 | ocfs2_should_read_blk(inode, page, block_start) && | 641 | ocfs2_should_read_blk(inode, page, block_start) && |
642 | (block_start < from || block_end > to)) { | 642 | (block_start < from || block_end > to)) { |
643 | ll_rw_block(READ, 1, &bh); | 643 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
644 | *wait_bh++=bh; | 644 | *wait_bh++=bh; |
645 | } | 645 | } |
646 | 646 | ||
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 498641eed2db..8f040f88ade4 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c | |||
@@ -79,7 +79,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, | |||
79 | 79 | ||
80 | get_bh(bh); /* for end_buffer_write_sync() */ | 80 | get_bh(bh); /* for end_buffer_write_sync() */ |
81 | bh->b_end_io = end_buffer_write_sync; | 81 | bh->b_end_io = end_buffer_write_sync; |
82 | submit_bh(WRITE, bh); | 82 | submit_bh(REQ_OP_WRITE, 0, bh); |
83 | 83 | ||
84 | wait_on_buffer(bh); | 84 | wait_on_buffer(bh); |
85 | 85 | ||
@@ -154,7 +154,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, | |||
154 | clear_buffer_uptodate(bh); | 154 | clear_buffer_uptodate(bh); |
155 | get_bh(bh); /* for end_buffer_read_sync() */ | 155 | get_bh(bh); /* for end_buffer_read_sync() */ |
156 | bh->b_end_io = end_buffer_read_sync; | 156 | bh->b_end_io = end_buffer_read_sync; |
157 | submit_bh(READ, bh); | 157 | submit_bh(REQ_OP_READ, 0, bh); |
158 | } | 158 | } |
159 | 159 | ||
160 | for (i = nr; i > 0; i--) { | 160 | for (i = nr; i > 0; i--) { |
@@ -310,7 +310,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, | |||
310 | if (validate) | 310 | if (validate) |
311 | set_buffer_needs_validate(bh); | 311 | set_buffer_needs_validate(bh); |
312 | bh->b_end_io = end_buffer_read_sync; | 312 | bh->b_end_io = end_buffer_read_sync; |
313 | submit_bh(READ, bh); | 313 | submit_bh(REQ_OP_READ, 0, bh); |
314 | continue; | 314 | continue; |
315 | } | 315 | } |
316 | } | 316 | } |
@@ -424,7 +424,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb, | |||
424 | get_bh(bh); /* for end_buffer_write_sync() */ | 424 | get_bh(bh); /* for end_buffer_write_sync() */ |
425 | bh->b_end_io = end_buffer_write_sync; | 425 | bh->b_end_io = end_buffer_write_sync; |
426 | ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check); | 426 | ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check); |
427 | submit_bh(WRITE, bh); | 427 | submit_bh(REQ_OP_WRITE, 0, bh); |
428 | 428 | ||
429 | wait_on_buffer(bh); | 429 | wait_on_buffer(bh); |
430 | 430 | ||
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 6aaf3e351391..636abcbd4650 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -530,7 +530,8 @@ static void o2hb_bio_end_io(struct bio *bio) | |||
530 | static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, | 530 | static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, |
531 | struct o2hb_bio_wait_ctxt *wc, | 531 | struct o2hb_bio_wait_ctxt *wc, |
532 | unsigned int *current_slot, | 532 | unsigned int *current_slot, |
533 | unsigned int max_slots) | 533 | unsigned int max_slots, int op, |
534 | int op_flags) | ||
534 | { | 535 | { |
535 | int len, current_page; | 536 | int len, current_page; |
536 | unsigned int vec_len, vec_start; | 537 | unsigned int vec_len, vec_start; |
@@ -556,6 +557,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, | |||
556 | bio->bi_bdev = reg->hr_bdev; | 557 | bio->bi_bdev = reg->hr_bdev; |
557 | bio->bi_private = wc; | 558 | bio->bi_private = wc; |
558 | bio->bi_end_io = o2hb_bio_end_io; | 559 | bio->bi_end_io = o2hb_bio_end_io; |
560 | bio_set_op_attrs(bio, op, op_flags); | ||
559 | 561 | ||
560 | vec_start = (cs << bits) % PAGE_SIZE; | 562 | vec_start = (cs << bits) % PAGE_SIZE; |
561 | while(cs < max_slots) { | 563 | while(cs < max_slots) { |
@@ -591,7 +593,8 @@ static int o2hb_read_slots(struct o2hb_region *reg, | |||
591 | o2hb_bio_wait_init(&wc); | 593 | o2hb_bio_wait_init(&wc); |
592 | 594 | ||
593 | while(current_slot < max_slots) { | 595 | while(current_slot < max_slots) { |
594 | bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots); | 596 | bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots, |
597 | REQ_OP_READ, 0); | ||
595 | if (IS_ERR(bio)) { | 598 | if (IS_ERR(bio)) { |
596 | status = PTR_ERR(bio); | 599 | status = PTR_ERR(bio); |
597 | mlog_errno(status); | 600 | mlog_errno(status); |
@@ -599,7 +602,7 @@ static int o2hb_read_slots(struct o2hb_region *reg, | |||
599 | } | 602 | } |
600 | 603 | ||
601 | atomic_inc(&wc.wc_num_reqs); | 604 | atomic_inc(&wc.wc_num_reqs); |
602 | submit_bio(READ, bio); | 605 | submit_bio(bio); |
603 | } | 606 | } |
604 | 607 | ||
605 | status = 0; | 608 | status = 0; |
@@ -623,7 +626,8 @@ static int o2hb_issue_node_write(struct o2hb_region *reg, | |||
623 | 626 | ||
624 | slot = o2nm_this_node(); | 627 | slot = o2nm_this_node(); |
625 | 628 | ||
626 | bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1); | 629 | bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE, |
630 | WRITE_SYNC); | ||
627 | if (IS_ERR(bio)) { | 631 | if (IS_ERR(bio)) { |
628 | status = PTR_ERR(bio); | 632 | status = PTR_ERR(bio); |
629 | mlog_errno(status); | 633 | mlog_errno(status); |
@@ -631,7 +635,7 @@ static int o2hb_issue_node_write(struct o2hb_region *reg, | |||
631 | } | 635 | } |
632 | 636 | ||
633 | atomic_inc(&write_wc->wc_num_reqs); | 637 | atomic_inc(&write_wc->wc_num_reqs); |
634 | submit_bio(WRITE_SYNC, bio); | 638 | submit_bio(bio); |
635 | 639 | ||
636 | status = 0; | 640 | status = 0; |
637 | bail: | 641 | bail: |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index d7cae3327de5..3971146228d3 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -1819,7 +1819,7 @@ static int ocfs2_get_sector(struct super_block *sb, | |||
1819 | if (!buffer_dirty(*bh)) | 1819 | if (!buffer_dirty(*bh)) |
1820 | clear_buffer_uptodate(*bh); | 1820 | clear_buffer_uptodate(*bh); |
1821 | unlock_buffer(*bh); | 1821 | unlock_buffer(*bh); |
1822 | ll_rw_block(READ, 1, bh); | 1822 | ll_rw_block(REQ_OP_READ, 0, 1, bh); |
1823 | wait_on_buffer(*bh); | 1823 | wait_on_buffer(*bh); |
1824 | if (!buffer_uptodate(*bh)) { | 1824 | if (!buffer_uptodate(*bh)) { |
1825 | mlog_errno(-EIO); | 1825 | mlog_errno(-EIO); |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 825455d3e4ba..c2c59f9ff04b 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
@@ -2668,7 +2668,7 @@ static int reiserfs_write_full_page(struct page *page, | |||
2668 | do { | 2668 | do { |
2669 | struct buffer_head *next = bh->b_this_page; | 2669 | struct buffer_head *next = bh->b_this_page; |
2670 | if (buffer_async_write(bh)) { | 2670 | if (buffer_async_write(bh)) { |
2671 | submit_bh(WRITE, bh); | 2671 | submit_bh(REQ_OP_WRITE, 0, bh); |
2672 | nr++; | 2672 | nr++; |
2673 | } | 2673 | } |
2674 | put_bh(bh); | 2674 | put_bh(bh); |
@@ -2728,7 +2728,7 @@ fail: | |||
2728 | struct buffer_head *next = bh->b_this_page; | 2728 | struct buffer_head *next = bh->b_this_page; |
2729 | if (buffer_async_write(bh)) { | 2729 | if (buffer_async_write(bh)) { |
2730 | clear_buffer_dirty(bh); | 2730 | clear_buffer_dirty(bh); |
2731 | submit_bh(WRITE, bh); | 2731 | submit_bh(REQ_OP_WRITE, 0, bh); |
2732 | nr++; | 2732 | nr++; |
2733 | } | 2733 | } |
2734 | put_bh(bh); | 2734 | put_bh(bh); |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 2ace90e981f0..bc2dde2423c2 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -652,7 +652,7 @@ static void submit_logged_buffer(struct buffer_head *bh) | |||
652 | BUG(); | 652 | BUG(); |
653 | if (!buffer_uptodate(bh)) | 653 | if (!buffer_uptodate(bh)) |
654 | BUG(); | 654 | BUG(); |
655 | submit_bh(WRITE, bh); | 655 | submit_bh(REQ_OP_WRITE, 0, bh); |
656 | } | 656 | } |
657 | 657 | ||
658 | static void submit_ordered_buffer(struct buffer_head *bh) | 658 | static void submit_ordered_buffer(struct buffer_head *bh) |
@@ -662,7 +662,7 @@ static void submit_ordered_buffer(struct buffer_head *bh) | |||
662 | clear_buffer_dirty(bh); | 662 | clear_buffer_dirty(bh); |
663 | if (!buffer_uptodate(bh)) | 663 | if (!buffer_uptodate(bh)) |
664 | BUG(); | 664 | BUG(); |
665 | submit_bh(WRITE, bh); | 665 | submit_bh(REQ_OP_WRITE, 0, bh); |
666 | } | 666 | } |
667 | 667 | ||
668 | #define CHUNK_SIZE 32 | 668 | #define CHUNK_SIZE 32 |
@@ -870,7 +870,7 @@ loop_next: | |||
870 | */ | 870 | */ |
871 | if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) { | 871 | if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) { |
872 | spin_unlock(lock); | 872 | spin_unlock(lock); |
873 | ll_rw_block(WRITE, 1, &bh); | 873 | ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); |
874 | spin_lock(lock); | 874 | spin_lock(lock); |
875 | } | 875 | } |
876 | put_bh(bh); | 876 | put_bh(bh); |
@@ -1057,7 +1057,7 @@ static int flush_commit_list(struct super_block *s, | |||
1057 | if (tbh) { | 1057 | if (tbh) { |
1058 | if (buffer_dirty(tbh)) { | 1058 | if (buffer_dirty(tbh)) { |
1059 | depth = reiserfs_write_unlock_nested(s); | 1059 | depth = reiserfs_write_unlock_nested(s); |
1060 | ll_rw_block(WRITE, 1, &tbh); | 1060 | ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh); |
1061 | reiserfs_write_lock_nested(s, depth); | 1061 | reiserfs_write_lock_nested(s, depth); |
1062 | } | 1062 | } |
1063 | put_bh(tbh) ; | 1063 | put_bh(tbh) ; |
@@ -2244,7 +2244,7 @@ abort_replay: | |||
2244 | } | 2244 | } |
2245 | } | 2245 | } |
2246 | /* read in the log blocks, memcpy to the corresponding real block */ | 2246 | /* read in the log blocks, memcpy to the corresponding real block */ |
2247 | ll_rw_block(READ, get_desc_trans_len(desc), log_blocks); | 2247 | ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks); |
2248 | for (i = 0; i < get_desc_trans_len(desc); i++) { | 2248 | for (i = 0; i < get_desc_trans_len(desc); i++) { |
2249 | 2249 | ||
2250 | wait_on_buffer(log_blocks[i]); | 2250 | wait_on_buffer(log_blocks[i]); |
@@ -2269,7 +2269,7 @@ abort_replay: | |||
2269 | /* flush out the real blocks */ | 2269 | /* flush out the real blocks */ |
2270 | for (i = 0; i < get_desc_trans_len(desc); i++) { | 2270 | for (i = 0; i < get_desc_trans_len(desc); i++) { |
2271 | set_buffer_dirty(real_blocks[i]); | 2271 | set_buffer_dirty(real_blocks[i]); |
2272 | write_dirty_buffer(real_blocks[i], WRITE); | 2272 | write_dirty_buffer(real_blocks[i], 0); |
2273 | } | 2273 | } |
2274 | for (i = 0; i < get_desc_trans_len(desc); i++) { | 2274 | for (i = 0; i < get_desc_trans_len(desc); i++) { |
2275 | wait_on_buffer(real_blocks[i]); | 2275 | wait_on_buffer(real_blocks[i]); |
@@ -2346,7 +2346,7 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev, | |||
2346 | } else | 2346 | } else |
2347 | bhlist[j++] = bh; | 2347 | bhlist[j++] = bh; |
2348 | } | 2348 | } |
2349 | ll_rw_block(READ, j, bhlist); | 2349 | ll_rw_block(REQ_OP_READ, 0, j, bhlist); |
2350 | for (i = 1; i < j; i++) | 2350 | for (i = 1; i < j; i++) |
2351 | brelse(bhlist[i]); | 2351 | brelse(bhlist[i]); |
2352 | bh = bhlist[0]; | 2352 | bh = bhlist[0]; |
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 5feacd689241..64b29b592d86 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c | |||
@@ -551,7 +551,7 @@ static int search_by_key_reada(struct super_block *s, | |||
551 | if (!buffer_uptodate(bh[j])) { | 551 | if (!buffer_uptodate(bh[j])) { |
552 | if (depth == -1) | 552 | if (depth == -1) |
553 | depth = reiserfs_write_unlock_nested(s); | 553 | depth = reiserfs_write_unlock_nested(s); |
554 | ll_rw_block(READA, 1, bh + j); | 554 | ll_rw_block(REQ_OP_READ, READA, 1, bh + j); |
555 | } | 555 | } |
556 | brelse(bh[j]); | 556 | brelse(bh[j]); |
557 | } | 557 | } |
@@ -660,7 +660,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, | |||
660 | if (!buffer_uptodate(bh) && depth == -1) | 660 | if (!buffer_uptodate(bh) && depth == -1) |
661 | depth = reiserfs_write_unlock_nested(sb); | 661 | depth = reiserfs_write_unlock_nested(sb); |
662 | 662 | ||
663 | ll_rw_block(READ, 1, &bh); | 663 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
664 | wait_on_buffer(bh); | 664 | wait_on_buffer(bh); |
665 | 665 | ||
666 | if (depth != -1) | 666 | if (depth != -1) |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index c72c16c5a60f..7a4a85a6821e 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
@@ -1666,7 +1666,7 @@ static int read_super_block(struct super_block *s, int offset) | |||
1666 | /* after journal replay, reread all bitmap and super blocks */ | 1666 | /* after journal replay, reread all bitmap and super blocks */ |
1667 | static int reread_meta_blocks(struct super_block *s) | 1667 | static int reread_meta_blocks(struct super_block *s) |
1668 | { | 1668 | { |
1669 | ll_rw_block(READ, 1, &SB_BUFFER_WITH_SB(s)); | 1669 | ll_rw_block(REQ_OP_READ, 0, 1, &SB_BUFFER_WITH_SB(s)); |
1670 | wait_on_buffer(SB_BUFFER_WITH_SB(s)); | 1670 | wait_on_buffer(SB_BUFFER_WITH_SB(s)); |
1671 | if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) { | 1671 | if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) { |
1672 | reiserfs_warning(s, "reiserfs-2504", "error reading the super"); | 1672 | reiserfs_warning(s, "reiserfs-2504", "error reading the super"); |
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 2c2618410d51..ce62a380314f 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c | |||
@@ -124,7 +124,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, | |||
124 | goto block_release; | 124 | goto block_release; |
125 | bytes += msblk->devblksize; | 125 | bytes += msblk->devblksize; |
126 | } | 126 | } |
127 | ll_rw_block(READ, b, bh); | 127 | ll_rw_block(REQ_OP_READ, 0, b, bh); |
128 | } else { | 128 | } else { |
129 | /* | 129 | /* |
130 | * Metadata block. | 130 | * Metadata block. |
@@ -156,7 +156,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, | |||
156 | goto block_release; | 156 | goto block_release; |
157 | bytes += msblk->devblksize; | 157 | bytes += msblk->devblksize; |
158 | } | 158 | } |
159 | ll_rw_block(READ, b - 1, bh + 1); | 159 | ll_rw_block(REQ_OP_READ, 0, b - 1, bh + 1); |
160 | } | 160 | } |
161 | 161 | ||
162 | for (i = 0; i < b; i++) { | 162 | for (i = 0; i < b; i++) { |
diff --git a/fs/udf/dir.c b/fs/udf/dir.c index 4c5593abc553..80c8a21daed9 100644 --- a/fs/udf/dir.c +++ b/fs/udf/dir.c | |||
@@ -113,7 +113,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) | |||
113 | brelse(tmp); | 113 | brelse(tmp); |
114 | } | 114 | } |
115 | if (num) { | 115 | if (num) { |
116 | ll_rw_block(READA, num, bha); | 116 | ll_rw_block(REQ_OP_READ, READA, num, bha); |
117 | for (i = 0; i < num; i++) | 117 | for (i = 0; i < num; i++) |
118 | brelse(bha[i]); | 118 | brelse(bha[i]); |
119 | } | 119 | } |
diff --git a/fs/udf/directory.c b/fs/udf/directory.c index c763fda257bf..71f3e0b5b8ab 100644 --- a/fs/udf/directory.c +++ b/fs/udf/directory.c | |||
@@ -87,7 +87,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos, | |||
87 | brelse(tmp); | 87 | brelse(tmp); |
88 | } | 88 | } |
89 | if (num) { | 89 | if (num) { |
90 | ll_rw_block(READA, num, bha); | 90 | ll_rw_block(REQ_OP_READ, READA, num, bha); |
91 | for (i = 0; i < num; i++) | 91 | for (i = 0; i < num; i++) |
92 | brelse(bha[i]); | 92 | brelse(bha[i]); |
93 | } | 93 | } |
diff --git a/fs/udf/inode.c b/fs/udf/inode.c index f323aff740ef..55aa587bbc38 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c | |||
@@ -1199,7 +1199,7 @@ struct buffer_head *udf_bread(struct inode *inode, int block, | |||
1199 | if (buffer_uptodate(bh)) | 1199 | if (buffer_uptodate(bh)) |
1200 | return bh; | 1200 | return bh; |
1201 | 1201 | ||
1202 | ll_rw_block(READ, 1, &bh); | 1202 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
1203 | 1203 | ||
1204 | wait_on_buffer(bh); | 1204 | wait_on_buffer(bh); |
1205 | if (buffer_uptodate(bh)) | 1205 | if (buffer_uptodate(bh)) |
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 0447b949c7f5..67e085d591d8 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c | |||
@@ -292,7 +292,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg, | |||
292 | if (!buffer_mapped(bh)) | 292 | if (!buffer_mapped(bh)) |
293 | map_bh(bh, inode->i_sb, oldb + pos); | 293 | map_bh(bh, inode->i_sb, oldb + pos); |
294 | if (!buffer_uptodate(bh)) { | 294 | if (!buffer_uptodate(bh)) { |
295 | ll_rw_block(READ, 1, &bh); | 295 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
296 | wait_on_buffer(bh); | 296 | wait_on_buffer(bh); |
297 | if (!buffer_uptodate(bh)) { | 297 | if (!buffer_uptodate(bh)) { |
298 | ufs_error(inode->i_sb, __func__, | 298 | ufs_error(inode->i_sb, __func__, |
diff --git a/fs/ufs/util.c b/fs/ufs/util.c index a409e3e7827a..f41ad0a6106f 100644 --- a/fs/ufs/util.c +++ b/fs/ufs/util.c | |||
@@ -118,7 +118,7 @@ void ubh_sync_block(struct ufs_buffer_head *ubh) | |||
118 | unsigned i; | 118 | unsigned i; |
119 | 119 | ||
120 | for (i = 0; i < ubh->count; i++) | 120 | for (i = 0; i < ubh->count; i++) |
121 | write_dirty_buffer(ubh->bh[i], WRITE); | 121 | write_dirty_buffer(ubh->bh[i], 0); |
122 | 122 | ||
123 | for (i = 0; i < ubh->count; i++) | 123 | for (i = 0; i < ubh->count; i++) |
124 | wait_on_buffer(ubh->bh[i]); | 124 | wait_on_buffer(ubh->bh[i]); |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 4c463b99fe57..87d2b215cbbd 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -438,7 +438,8 @@ xfs_submit_ioend( | |||
438 | 438 | ||
439 | ioend->io_bio->bi_private = ioend; | 439 | ioend->io_bio->bi_private = ioend; |
440 | ioend->io_bio->bi_end_io = xfs_end_bio; | 440 | ioend->io_bio->bi_end_io = xfs_end_bio; |
441 | 441 | bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE, | |
442 | (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0); | ||
442 | /* | 443 | /* |
443 | * If we are failing the IO now, just mark the ioend with an | 444 | * If we are failing the IO now, just mark the ioend with an |
444 | * error and finish it. This will run IO completion immediately | 445 | * error and finish it. This will run IO completion immediately |
@@ -451,8 +452,7 @@ xfs_submit_ioend( | |||
451 | return status; | 452 | return status; |
452 | } | 453 | } |
453 | 454 | ||
454 | submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, | 455 | submit_bio(ioend->io_bio); |
455 | ioend->io_bio); | ||
456 | return 0; | 456 | return 0; |
457 | } | 457 | } |
458 | 458 | ||
@@ -510,8 +510,9 @@ xfs_chain_bio( | |||
510 | 510 | ||
511 | bio_chain(ioend->io_bio, new); | 511 | bio_chain(ioend->io_bio, new); |
512 | bio_get(ioend->io_bio); /* for xfs_destroy_ioend */ | 512 | bio_get(ioend->io_bio); /* for xfs_destroy_ioend */ |
513 | submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, | 513 | bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE, |
514 | ioend->io_bio); | 514 | (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0); |
515 | submit_bio(ioend->io_bio); | ||
515 | ioend->io_bio = new; | 516 | ioend->io_bio = new; |
516 | } | 517 | } |
517 | 518 | ||
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index e71cfbd5acb3..a87a0d5477bd 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -1127,7 +1127,8 @@ xfs_buf_ioapply_map( | |||
1127 | int map, | 1127 | int map, |
1128 | int *buf_offset, | 1128 | int *buf_offset, |
1129 | int *count, | 1129 | int *count, |
1130 | int rw) | 1130 | int op, |
1131 | int op_flags) | ||
1131 | { | 1132 | { |
1132 | int page_index; | 1133 | int page_index; |
1133 | int total_nr_pages = bp->b_page_count; | 1134 | int total_nr_pages = bp->b_page_count; |
@@ -1157,16 +1158,14 @@ xfs_buf_ioapply_map( | |||
1157 | 1158 | ||
1158 | next_chunk: | 1159 | next_chunk: |
1159 | atomic_inc(&bp->b_io_remaining); | 1160 | atomic_inc(&bp->b_io_remaining); |
1160 | nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); | 1161 | nr_pages = min(total_nr_pages, BIO_MAX_PAGES); |
1161 | if (nr_pages > total_nr_pages) | ||
1162 | nr_pages = total_nr_pages; | ||
1163 | 1162 | ||
1164 | bio = bio_alloc(GFP_NOIO, nr_pages); | 1163 | bio = bio_alloc(GFP_NOIO, nr_pages); |
1165 | bio->bi_bdev = bp->b_target->bt_bdev; | 1164 | bio->bi_bdev = bp->b_target->bt_bdev; |
1166 | bio->bi_iter.bi_sector = sector; | 1165 | bio->bi_iter.bi_sector = sector; |
1167 | bio->bi_end_io = xfs_buf_bio_end_io; | 1166 | bio->bi_end_io = xfs_buf_bio_end_io; |
1168 | bio->bi_private = bp; | 1167 | bio->bi_private = bp; |
1169 | 1168 | bio_set_op_attrs(bio, op, op_flags); | |
1170 | 1169 | ||
1171 | for (; size && nr_pages; nr_pages--, page_index++) { | 1170 | for (; size && nr_pages; nr_pages--, page_index++) { |
1172 | int rbytes, nbytes = PAGE_SIZE - offset; | 1171 | int rbytes, nbytes = PAGE_SIZE - offset; |
@@ -1190,7 +1189,7 @@ next_chunk: | |||
1190 | flush_kernel_vmap_range(bp->b_addr, | 1189 | flush_kernel_vmap_range(bp->b_addr, |
1191 | xfs_buf_vmap_len(bp)); | 1190 | xfs_buf_vmap_len(bp)); |
1192 | } | 1191 | } |
1193 | submit_bio(rw, bio); | 1192 | submit_bio(bio); |
1194 | if (size) | 1193 | if (size) |
1195 | goto next_chunk; | 1194 | goto next_chunk; |
1196 | } else { | 1195 | } else { |
@@ -1210,7 +1209,8 @@ _xfs_buf_ioapply( | |||
1210 | struct xfs_buf *bp) | 1209 | struct xfs_buf *bp) |
1211 | { | 1210 | { |
1212 | struct blk_plug plug; | 1211 | struct blk_plug plug; |
1213 | int rw; | 1212 | int op; |
1213 | int op_flags = 0; | ||
1214 | int offset; | 1214 | int offset; |
1215 | int size; | 1215 | int size; |
1216 | int i; | 1216 | int i; |
@@ -1229,14 +1229,13 @@ _xfs_buf_ioapply( | |||
1229 | bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue; | 1229 | bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue; |
1230 | 1230 | ||
1231 | if (bp->b_flags & XBF_WRITE) { | 1231 | if (bp->b_flags & XBF_WRITE) { |
1232 | op = REQ_OP_WRITE; | ||
1232 | if (bp->b_flags & XBF_SYNCIO) | 1233 | if (bp->b_flags & XBF_SYNCIO) |
1233 | rw = WRITE_SYNC; | 1234 | op_flags = WRITE_SYNC; |
1234 | else | ||
1235 | rw = WRITE; | ||
1236 | if (bp->b_flags & XBF_FUA) | 1235 | if (bp->b_flags & XBF_FUA) |
1237 | rw |= REQ_FUA; | 1236 | op_flags |= REQ_FUA; |
1238 | if (bp->b_flags & XBF_FLUSH) | 1237 | if (bp->b_flags & XBF_FLUSH) |
1239 | rw |= REQ_FLUSH; | 1238 | op_flags |= REQ_PREFLUSH; |
1240 | 1239 | ||
1241 | /* | 1240 | /* |
1242 | * Run the write verifier callback function if it exists. If | 1241 | * Run the write verifier callback function if it exists. If |
@@ -1266,13 +1265,14 @@ _xfs_buf_ioapply( | |||
1266 | } | 1265 | } |
1267 | } | 1266 | } |
1268 | } else if (bp->b_flags & XBF_READ_AHEAD) { | 1267 | } else if (bp->b_flags & XBF_READ_AHEAD) { |
1269 | rw = READA; | 1268 | op = REQ_OP_READ; |
1269 | op_flags = REQ_RAHEAD; | ||
1270 | } else { | 1270 | } else { |
1271 | rw = READ; | 1271 | op = REQ_OP_READ; |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | /* we only use the buffer cache for meta-data */ | 1274 | /* we only use the buffer cache for meta-data */ |
1275 | rw |= REQ_META; | 1275 | op_flags |= REQ_META; |
1276 | 1276 | ||
1277 | /* | 1277 | /* |
1278 | * Walk all the vectors issuing IO on them. Set up the initial offset | 1278 | * Walk all the vectors issuing IO on them. Set up the initial offset |
@@ -1284,7 +1284,7 @@ _xfs_buf_ioapply( | |||
1284 | size = BBTOB(bp->b_io_length); | 1284 | size = BBTOB(bp->b_io_length); |
1285 | blk_start_plug(&plug); | 1285 | blk_start_plug(&plug); |
1286 | for (i = 0; i < bp->b_map_count; i++) { | 1286 | for (i = 0; i < bp->b_map_count; i++) { |
1287 | xfs_buf_ioapply_map(bp, i, &offset, &size, rw); | 1287 | xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags); |
1288 | if (bp->b_error) | 1288 | if (bp->b_error) |
1289 | break; | 1289 | break; |
1290 | if (size <= 0) | 1290 | if (size <= 0) |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 9faebf7f9a33..b7e1a00810f2 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -41,44 +41,9 @@ | |||
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #define BIO_MAX_PAGES 256 | 43 | #define BIO_MAX_PAGES 256 |
44 | #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT) | ||
45 | #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) | ||
46 | 44 | ||
47 | /* | 45 | #define bio_prio(bio) (bio)->bi_ioprio |
48 | * upper 16 bits of bi_rw define the io priority of this bio | 46 | #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) |
49 | */ | ||
50 | #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) | ||
51 | #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) | ||
52 | #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) | ||
53 | |||
54 | #define bio_set_prio(bio, prio) do { \ | ||
55 | WARN_ON(prio >= (1 << IOPRIO_BITS)); \ | ||
56 | (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ | ||
57 | (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ | ||
58 | } while (0) | ||
59 | |||
60 | /* | ||
61 | * various member access, note that bio_data should of course not be used | ||
62 | * on highmem page vectors | ||
63 | */ | ||
64 | #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) | ||
65 | |||
66 | #define bvec_iter_page(bvec, iter) \ | ||
67 | (__bvec_iter_bvec((bvec), (iter))->bv_page) | ||
68 | |||
69 | #define bvec_iter_len(bvec, iter) \ | ||
70 | min((iter).bi_size, \ | ||
71 | __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) | ||
72 | |||
73 | #define bvec_iter_offset(bvec, iter) \ | ||
74 | (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) | ||
75 | |||
76 | #define bvec_iter_bvec(bvec, iter) \ | ||
77 | ((struct bio_vec) { \ | ||
78 | .bv_page = bvec_iter_page((bvec), (iter)), \ | ||
79 | .bv_len = bvec_iter_len((bvec), (iter)), \ | ||
80 | .bv_offset = bvec_iter_offset((bvec), (iter)), \ | ||
81 | }) | ||
82 | 47 | ||
83 | #define bio_iter_iovec(bio, iter) \ | 48 | #define bio_iter_iovec(bio, iter) \ |
84 | bvec_iter_bvec((bio)->bi_io_vec, (iter)) | 49 | bvec_iter_bvec((bio)->bi_io_vec, (iter)) |
@@ -106,18 +71,23 @@ static inline bool bio_has_data(struct bio *bio) | |||
106 | { | 71 | { |
107 | if (bio && | 72 | if (bio && |
108 | bio->bi_iter.bi_size && | 73 | bio->bi_iter.bi_size && |
109 | !(bio->bi_rw & REQ_DISCARD)) | 74 | bio_op(bio) != REQ_OP_DISCARD) |
110 | return true; | 75 | return true; |
111 | 76 | ||
112 | return false; | 77 | return false; |
113 | } | 78 | } |
114 | 79 | ||
80 | static inline bool bio_no_advance_iter(struct bio *bio) | ||
81 | { | ||
82 | return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_WRITE_SAME; | ||
83 | } | ||
84 | |||
115 | static inline bool bio_is_rw(struct bio *bio) | 85 | static inline bool bio_is_rw(struct bio *bio) |
116 | { | 86 | { |
117 | if (!bio_has_data(bio)) | 87 | if (!bio_has_data(bio)) |
118 | return false; | 88 | return false; |
119 | 89 | ||
120 | if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) | 90 | if (bio_no_advance_iter(bio)) |
121 | return false; | 91 | return false; |
122 | 92 | ||
123 | return true; | 93 | return true; |
@@ -193,39 +163,12 @@ static inline void *bio_data(struct bio *bio) | |||
193 | #define bio_for_each_segment_all(bvl, bio, i) \ | 163 | #define bio_for_each_segment_all(bvl, bio, i) \ |
194 | for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) | 164 | for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) |
195 | 165 | ||
196 | static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter, | ||
197 | unsigned bytes) | ||
198 | { | ||
199 | WARN_ONCE(bytes > iter->bi_size, | ||
200 | "Attempted to advance past end of bvec iter\n"); | ||
201 | |||
202 | while (bytes) { | ||
203 | unsigned len = min(bytes, bvec_iter_len(bv, *iter)); | ||
204 | |||
205 | bytes -= len; | ||
206 | iter->bi_size -= len; | ||
207 | iter->bi_bvec_done += len; | ||
208 | |||
209 | if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { | ||
210 | iter->bi_bvec_done = 0; | ||
211 | iter->bi_idx++; | ||
212 | } | ||
213 | } | ||
214 | } | ||
215 | |||
216 | #define for_each_bvec(bvl, bio_vec, iter, start) \ | ||
217 | for (iter = (start); \ | ||
218 | (iter).bi_size && \ | ||
219 | ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ | ||
220 | bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) | ||
221 | |||
222 | |||
223 | static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, | 166 | static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, |
224 | unsigned bytes) | 167 | unsigned bytes) |
225 | { | 168 | { |
226 | iter->bi_sector += bytes >> 9; | 169 | iter->bi_sector += bytes >> 9; |
227 | 170 | ||
228 | if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) | 171 | if (bio_no_advance_iter(bio)) |
229 | iter->bi_size -= bytes; | 172 | iter->bi_size -= bytes; |
230 | else | 173 | else |
231 | bvec_iter_advance(bio->bi_io_vec, iter, bytes); | 174 | bvec_iter_advance(bio->bi_io_vec, iter, bytes); |
@@ -253,10 +196,10 @@ static inline unsigned bio_segments(struct bio *bio) | |||
253 | * differently: | 196 | * differently: |
254 | */ | 197 | */ |
255 | 198 | ||
256 | if (bio->bi_rw & REQ_DISCARD) | 199 | if (bio_op(bio) == REQ_OP_DISCARD) |
257 | return 1; | 200 | return 1; |
258 | 201 | ||
259 | if (bio->bi_rw & REQ_WRITE_SAME) | 202 | if (bio_op(bio) == REQ_OP_WRITE_SAME) |
260 | return 1; | 203 | return 1; |
261 | 204 | ||
262 | bio_for_each_segment(bv, bio, iter) | 205 | bio_for_each_segment(bv, bio, iter) |
@@ -473,7 +416,7 @@ static inline void bio_io_error(struct bio *bio) | |||
473 | struct request_queue; | 416 | struct request_queue; |
474 | extern int bio_phys_segments(struct request_queue *, struct bio *); | 417 | extern int bio_phys_segments(struct request_queue *, struct bio *); |
475 | 418 | ||
476 | extern int submit_bio_wait(int rw, struct bio *bio); | 419 | extern int submit_bio_wait(struct bio *bio); |
477 | extern void bio_advance(struct bio *, unsigned); | 420 | extern void bio_advance(struct bio *, unsigned); |
478 | 421 | ||
479 | extern void bio_init(struct bio *); | 422 | extern void bio_init(struct bio *); |
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index c02e669945e9..f77150a4a96a 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h | |||
@@ -590,25 +590,26 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) | |||
590 | /** | 590 | /** |
591 | * blkg_rwstat_add - add a value to a blkg_rwstat | 591 | * blkg_rwstat_add - add a value to a blkg_rwstat |
592 | * @rwstat: target blkg_rwstat | 592 | * @rwstat: target blkg_rwstat |
593 | * @rw: mask of REQ_{WRITE|SYNC} | 593 | * @op: REQ_OP |
594 | * @op_flags: rq_flag_bits | ||
594 | * @val: value to add | 595 | * @val: value to add |
595 | * | 596 | * |
596 | * Add @val to @rwstat. The counters are chosen according to @rw. The | 597 | * Add @val to @rwstat. The counters are chosen according to @rw. The |
597 | * caller is responsible for synchronizing calls to this function. | 598 | * caller is responsible for synchronizing calls to this function. |
598 | */ | 599 | */ |
599 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, | 600 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, |
600 | int rw, uint64_t val) | 601 | int op, int op_flags, uint64_t val) |
601 | { | 602 | { |
602 | struct percpu_counter *cnt; | 603 | struct percpu_counter *cnt; |
603 | 604 | ||
604 | if (rw & REQ_WRITE) | 605 | if (op_is_write(op)) |
605 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; | 606 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; |
606 | else | 607 | else |
607 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; | 608 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; |
608 | 609 | ||
609 | __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); | 610 | __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); |
610 | 611 | ||
611 | if (rw & REQ_SYNC) | 612 | if (op_flags & REQ_SYNC) |
612 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; | 613 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; |
613 | else | 614 | else |
614 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; | 615 | cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; |
@@ -713,9 +714,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, | |||
713 | 714 | ||
714 | if (!throtl) { | 715 | if (!throtl) { |
715 | blkg = blkg ?: q->root_blkg; | 716 | blkg = blkg ?: q->root_blkg; |
716 | blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw, | 717 | blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_rw, |
717 | bio->bi_iter.bi_size); | 718 | bio->bi_iter.bi_size); |
718 | blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1); | 719 | blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_rw, 1); |
719 | } | 720 | } |
720 | 721 | ||
721 | rcu_read_unlock(); | 722 | rcu_read_unlock(); |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 77e5d81f07aa..b588e968dc01 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #define __LINUX_BLK_TYPES_H | 6 | #define __LINUX_BLK_TYPES_H |
7 | 7 | ||
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | #include <linux/bvec.h> | ||
9 | 10 | ||
10 | struct bio_set; | 11 | struct bio_set; |
11 | struct bio; | 12 | struct bio; |
@@ -17,28 +18,7 @@ struct cgroup_subsys_state; | |||
17 | typedef void (bio_end_io_t) (struct bio *); | 18 | typedef void (bio_end_io_t) (struct bio *); |
18 | typedef void (bio_destructor_t) (struct bio *); | 19 | typedef void (bio_destructor_t) (struct bio *); |
19 | 20 | ||
20 | /* | ||
21 | * was unsigned short, but we might as well be ready for > 64kB I/O pages | ||
22 | */ | ||
23 | struct bio_vec { | ||
24 | struct page *bv_page; | ||
25 | unsigned int bv_len; | ||
26 | unsigned int bv_offset; | ||
27 | }; | ||
28 | |||
29 | #ifdef CONFIG_BLOCK | 21 | #ifdef CONFIG_BLOCK |
30 | |||
31 | struct bvec_iter { | ||
32 | sector_t bi_sector; /* device address in 512 byte | ||
33 | sectors */ | ||
34 | unsigned int bi_size; /* residual I/O count */ | ||
35 | |||
36 | unsigned int bi_idx; /* current index into bvl_vec */ | ||
37 | |||
38 | unsigned int bi_bvec_done; /* number of bytes completed in | ||
39 | current bvec */ | ||
40 | }; | ||
41 | |||
42 | /* | 22 | /* |
43 | * main unit of I/O for the block layer and lower layers (ie drivers and | 23 | * main unit of I/O for the block layer and lower layers (ie drivers and |
44 | * stacking drivers) | 24 | * stacking drivers) |
@@ -48,9 +28,10 @@ struct bio { | |||
48 | struct block_device *bi_bdev; | 28 | struct block_device *bi_bdev; |
49 | unsigned int bi_flags; /* status, command, etc */ | 29 | unsigned int bi_flags; /* status, command, etc */ |
50 | int bi_error; | 30 | int bi_error; |
51 | unsigned long bi_rw; /* bottom bits READ/WRITE, | 31 | unsigned int bi_rw; /* bottom bits req flags, |
52 | * top bits priority | 32 | * top bits REQ_OP |
53 | */ | 33 | */ |
34 | unsigned short bi_ioprio; | ||
54 | 35 | ||
55 | struct bvec_iter bi_iter; | 36 | struct bvec_iter bi_iter; |
56 | 37 | ||
@@ -107,6 +88,16 @@ struct bio { | |||
107 | struct bio_vec bi_inline_vecs[0]; | 88 | struct bio_vec bi_inline_vecs[0]; |
108 | }; | 89 | }; |
109 | 90 | ||
91 | #define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS) | ||
92 | #define bio_op(bio) ((bio)->bi_rw >> BIO_OP_SHIFT) | ||
93 | |||
94 | #define bio_set_op_attrs(bio, op, op_flags) do { \ | ||
95 | WARN_ON(op >= (1 << REQ_OP_BITS)); \ | ||
96 | (bio)->bi_rw &= ((1 << BIO_OP_SHIFT) - 1); \ | ||
97 | (bio)->bi_rw |= ((unsigned int) (op) << BIO_OP_SHIFT); \ | ||
98 | (bio)->bi_rw |= op_flags; \ | ||
99 | } while (0) | ||
100 | |||
110 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) | 101 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) |
111 | 102 | ||
112 | /* | 103 | /* |
@@ -145,7 +136,6 @@ struct bio { | |||
145 | */ | 136 | */ |
146 | enum rq_flag_bits { | 137 | enum rq_flag_bits { |
147 | /* common flags */ | 138 | /* common flags */ |
148 | __REQ_WRITE, /* not set, read. set, write */ | ||
149 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ | 139 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ |
150 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | 140 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ |
151 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | 141 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ |
@@ -153,14 +143,12 @@ enum rq_flag_bits { | |||
153 | __REQ_SYNC, /* request is sync (sync write or read) */ | 143 | __REQ_SYNC, /* request is sync (sync write or read) */ |
154 | __REQ_META, /* metadata io request */ | 144 | __REQ_META, /* metadata io request */ |
155 | __REQ_PRIO, /* boost priority in cfq */ | 145 | __REQ_PRIO, /* boost priority in cfq */ |
156 | __REQ_DISCARD, /* request to discard sectors */ | 146 | __REQ_SECURE, /* secure discard (used with REQ_OP_DISCARD) */ |
157 | __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ | ||
158 | __REQ_WRITE_SAME, /* write same block many times */ | ||
159 | 147 | ||
160 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ | 148 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ |
161 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ | 149 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ |
162 | __REQ_FUA, /* forced unit access */ | 150 | __REQ_FUA, /* forced unit access */ |
163 | __REQ_FLUSH, /* request for cache flush */ | 151 | __REQ_PREFLUSH, /* request for cache flush */ |
164 | 152 | ||
165 | /* bio only flags */ | 153 | /* bio only flags */ |
166 | __REQ_RAHEAD, /* read ahead, can fail anytime */ | 154 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
@@ -191,31 +179,25 @@ enum rq_flag_bits { | |||
191 | __REQ_NR_BITS, /* stops here */ | 179 | __REQ_NR_BITS, /* stops here */ |
192 | }; | 180 | }; |
193 | 181 | ||
194 | #define REQ_WRITE (1ULL << __REQ_WRITE) | ||
195 | #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) | 182 | #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) |
196 | #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) | 183 | #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) |
197 | #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) | 184 | #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) |
198 | #define REQ_SYNC (1ULL << __REQ_SYNC) | 185 | #define REQ_SYNC (1ULL << __REQ_SYNC) |
199 | #define REQ_META (1ULL << __REQ_META) | 186 | #define REQ_META (1ULL << __REQ_META) |
200 | #define REQ_PRIO (1ULL << __REQ_PRIO) | 187 | #define REQ_PRIO (1ULL << __REQ_PRIO) |
201 | #define REQ_DISCARD (1ULL << __REQ_DISCARD) | ||
202 | #define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME) | ||
203 | #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) | 188 | #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) |
204 | #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) | 189 | #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) |
205 | 190 | ||
206 | #define REQ_FAILFAST_MASK \ | 191 | #define REQ_FAILFAST_MASK \ |
207 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | 192 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) |
208 | #define REQ_COMMON_MASK \ | 193 | #define REQ_COMMON_MASK \ |
209 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ | 194 | (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ |
210 | REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \ | 195 | REQ_PREFLUSH | REQ_FUA | REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE) |
211 | REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE) | ||
212 | #define REQ_CLONE_MASK REQ_COMMON_MASK | 196 | #define REQ_CLONE_MASK REQ_COMMON_MASK |
213 | 197 | ||
214 | #define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME) | ||
215 | |||
216 | /* This mask is used for both bio and request merge checking */ | 198 | /* This mask is used for both bio and request merge checking */ |
217 | #define REQ_NOMERGE_FLAGS \ | 199 | #define REQ_NOMERGE_FLAGS \ |
218 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ) | 200 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) |
219 | 201 | ||
220 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) | 202 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) |
221 | #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) | 203 | #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) |
@@ -233,7 +215,7 @@ enum rq_flag_bits { | |||
233 | #define REQ_PREEMPT (1ULL << __REQ_PREEMPT) | 215 | #define REQ_PREEMPT (1ULL << __REQ_PREEMPT) |
234 | #define REQ_ALLOCED (1ULL << __REQ_ALLOCED) | 216 | #define REQ_ALLOCED (1ULL << __REQ_ALLOCED) |
235 | #define REQ_COPY_USER (1ULL << __REQ_COPY_USER) | 217 | #define REQ_COPY_USER (1ULL << __REQ_COPY_USER) |
236 | #define REQ_FLUSH (1ULL << __REQ_FLUSH) | 218 | #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) |
237 | #define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) | 219 | #define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) |
238 | #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) | 220 | #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) |
239 | #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) | 221 | #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) |
@@ -242,6 +224,16 @@ enum rq_flag_bits { | |||
242 | #define REQ_HASHED (1ULL << __REQ_HASHED) | 224 | #define REQ_HASHED (1ULL << __REQ_HASHED) |
243 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) | 225 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) |
244 | 226 | ||
227 | enum req_op { | ||
228 | REQ_OP_READ, | ||
229 | REQ_OP_WRITE, | ||
230 | REQ_OP_DISCARD, /* request to discard sectors */ | ||
231 | REQ_OP_WRITE_SAME, /* write same block many times */ | ||
232 | REQ_OP_FLUSH, /* request for cache flush */ | ||
233 | }; | ||
234 | |||
235 | #define REQ_OP_BITS 3 | ||
236 | |||
245 | typedef unsigned int blk_qc_t; | 237 | typedef unsigned int blk_qc_t; |
246 | #define BLK_QC_T_NONE -1U | 238 | #define BLK_QC_T_NONE -1U |
247 | #define BLK_QC_T_SHIFT 16 | 239 | #define BLK_QC_T_SHIFT 16 |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3d9cf326574f..48f05d768a53 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -90,18 +90,17 @@ struct request { | |||
90 | struct list_head queuelist; | 90 | struct list_head queuelist; |
91 | union { | 91 | union { |
92 | struct call_single_data csd; | 92 | struct call_single_data csd; |
93 | unsigned long fifo_time; | 93 | u64 fifo_time; |
94 | }; | 94 | }; |
95 | 95 | ||
96 | struct request_queue *q; | 96 | struct request_queue *q; |
97 | struct blk_mq_ctx *mq_ctx; | 97 | struct blk_mq_ctx *mq_ctx; |
98 | 98 | ||
99 | u64 cmd_flags; | 99 | int cpu; |
100 | unsigned cmd_type; | 100 | unsigned cmd_type; |
101 | u64 cmd_flags; | ||
101 | unsigned long atomic_flags; | 102 | unsigned long atomic_flags; |
102 | 103 | ||
103 | int cpu; | ||
104 | |||
105 | /* the following two fields are internal, NEVER access directly */ | 104 | /* the following two fields are internal, NEVER access directly */ |
106 | unsigned int __data_len; /* total data len */ | 105 | unsigned int __data_len; /* total data len */ |
107 | sector_t __sector; /* sector cursor */ | 106 | sector_t __sector; /* sector cursor */ |
@@ -200,6 +199,20 @@ struct request { | |||
200 | struct request *next_rq; | 199 | struct request *next_rq; |
201 | }; | 200 | }; |
202 | 201 | ||
202 | #define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS) | ||
203 | #define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT) | ||
204 | |||
205 | #define req_set_op(req, op) do { \ | ||
206 | WARN_ON(op >= (1 << REQ_OP_BITS)); \ | ||
207 | (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \ | ||
208 | (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \ | ||
209 | } while (0) | ||
210 | |||
211 | #define req_set_op_attrs(req, op, flags) do { \ | ||
212 | req_set_op(req, op); \ | ||
213 | (req)->cmd_flags |= flags; \ | ||
214 | } while (0) | ||
215 | |||
203 | static inline unsigned short req_get_ioprio(struct request *req) | 216 | static inline unsigned short req_get_ioprio(struct request *req) |
204 | { | 217 | { |
205 | return req->ioprio; | 218 | return req->ioprio; |
@@ -492,6 +505,7 @@ struct request_queue { | |||
492 | #define QUEUE_FLAG_WC 23 /* Write back caching */ | 505 | #define QUEUE_FLAG_WC 23 /* Write back caching */ |
493 | #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ | 506 | #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ |
494 | #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ | 507 | #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ |
508 | #define QUEUE_FLAG_DAX 26 /* device supports DAX */ | ||
495 | 509 | ||
496 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 510 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
497 | (1 << QUEUE_FLAG_STACKABLE) | \ | 511 | (1 << QUEUE_FLAG_STACKABLE) | \ |
@@ -581,6 +595,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
581 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | 595 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
582 | #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ | 596 | #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ |
583 | test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) | 597 | test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) |
598 | #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) | ||
584 | 599 | ||
585 | #define blk_noretry_request(rq) \ | 600 | #define blk_noretry_request(rq) \ |
586 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ | 601 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ |
@@ -597,7 +612,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
597 | 612 | ||
598 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) | 613 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
599 | 614 | ||
600 | #define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1)) | 615 | #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) |
601 | 616 | ||
602 | /* | 617 | /* |
603 | * Driver can handle struct request, if it either has an old style | 618 | * Driver can handle struct request, if it either has an old style |
@@ -616,14 +631,14 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q) | |||
616 | /* | 631 | /* |
617 | * We regard a request as sync, if either a read or a sync write | 632 | * We regard a request as sync, if either a read or a sync write |
618 | */ | 633 | */ |
619 | static inline bool rw_is_sync(unsigned int rw_flags) | 634 | static inline bool rw_is_sync(int op, unsigned int rw_flags) |
620 | { | 635 | { |
621 | return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); | 636 | return op == REQ_OP_READ || (rw_flags & REQ_SYNC); |
622 | } | 637 | } |
623 | 638 | ||
624 | static inline bool rq_is_sync(struct request *rq) | 639 | static inline bool rq_is_sync(struct request *rq) |
625 | { | 640 | { |
626 | return rw_is_sync(rq->cmd_flags); | 641 | return rw_is_sync(req_op(rq), rq->cmd_flags); |
627 | } | 642 | } |
628 | 643 | ||
629 | static inline bool blk_rl_full(struct request_list *rl, bool sync) | 644 | static inline bool blk_rl_full(struct request_list *rl, bool sync) |
@@ -652,22 +667,25 @@ static inline bool rq_mergeable(struct request *rq) | |||
652 | if (rq->cmd_type != REQ_TYPE_FS) | 667 | if (rq->cmd_type != REQ_TYPE_FS) |
653 | return false; | 668 | return false; |
654 | 669 | ||
670 | if (req_op(rq) == REQ_OP_FLUSH) | ||
671 | return false; | ||
672 | |||
655 | if (rq->cmd_flags & REQ_NOMERGE_FLAGS) | 673 | if (rq->cmd_flags & REQ_NOMERGE_FLAGS) |
656 | return false; | 674 | return false; |
657 | 675 | ||
658 | return true; | 676 | return true; |
659 | } | 677 | } |
660 | 678 | ||
661 | static inline bool blk_check_merge_flags(unsigned int flags1, | 679 | static inline bool blk_check_merge_flags(unsigned int flags1, unsigned int op1, |
662 | unsigned int flags2) | 680 | unsigned int flags2, unsigned int op2) |
663 | { | 681 | { |
664 | if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) | 682 | if ((op1 == REQ_OP_DISCARD) != (op2 == REQ_OP_DISCARD)) |
665 | return false; | 683 | return false; |
666 | 684 | ||
667 | if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) | 685 | if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) |
668 | return false; | 686 | return false; |
669 | 687 | ||
670 | if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) | 688 | if ((op1 == REQ_OP_WRITE_SAME) != (op2 == REQ_OP_WRITE_SAME)) |
671 | return false; | 689 | return false; |
672 | 690 | ||
673 | return true; | 691 | return true; |
@@ -879,12 +897,12 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | |||
879 | } | 897 | } |
880 | 898 | ||
881 | static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, | 899 | static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, |
882 | unsigned int cmd_flags) | 900 | int op) |
883 | { | 901 | { |
884 | if (unlikely(cmd_flags & REQ_DISCARD)) | 902 | if (unlikely(op == REQ_OP_DISCARD)) |
885 | return min(q->limits.max_discard_sectors, UINT_MAX >> 9); | 903 | return min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
886 | 904 | ||
887 | if (unlikely(cmd_flags & REQ_WRITE_SAME)) | 905 | if (unlikely(op == REQ_OP_WRITE_SAME)) |
888 | return q->limits.max_write_same_sectors; | 906 | return q->limits.max_write_same_sectors; |
889 | 907 | ||
890 | return q->limits.max_sectors; | 908 | return q->limits.max_sectors; |
@@ -904,18 +922,19 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, | |||
904 | (offset & (q->limits.chunk_sectors - 1)); | 922 | (offset & (q->limits.chunk_sectors - 1)); |
905 | } | 923 | } |
906 | 924 | ||
907 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq) | 925 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq, |
926 | sector_t offset) | ||
908 | { | 927 | { |
909 | struct request_queue *q = rq->q; | 928 | struct request_queue *q = rq->q; |
910 | 929 | ||
911 | if (unlikely(rq->cmd_type != REQ_TYPE_FS)) | 930 | if (unlikely(rq->cmd_type != REQ_TYPE_FS)) |
912 | return q->limits.max_hw_sectors; | 931 | return q->limits.max_hw_sectors; |
913 | 932 | ||
914 | if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) | 933 | if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD)) |
915 | return blk_queue_get_max_sectors(q, rq->cmd_flags); | 934 | return blk_queue_get_max_sectors(q, req_op(rq)); |
916 | 935 | ||
917 | return min(blk_max_size_offset(q, blk_rq_pos(rq)), | 936 | return min(blk_max_size_offset(q, offset), |
918 | blk_queue_get_max_sectors(q, rq->cmd_flags)); | 937 | blk_queue_get_max_sectors(q, req_op(rq))); |
919 | } | 938 | } |
920 | 939 | ||
921 | static inline unsigned int blk_rq_count_bios(struct request *rq) | 940 | static inline unsigned int blk_rq_count_bios(struct request *rq) |
@@ -1141,7 +1160,8 @@ extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); | |||
1141 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 1160 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
1142 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 1161 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); |
1143 | extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 1162 | extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
1144 | sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop); | 1163 | sector_t nr_sects, gfp_t gfp_mask, int op_flags, |
1164 | struct bio **biop); | ||
1145 | extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | 1165 | extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
1146 | sector_t nr_sects, gfp_t gfp_mask, struct page *page); | 1166 | sector_t nr_sects, gfp_t gfp_mask, struct page *page); |
1147 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 1167 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 0f3172b8b225..cceb72f9e29f 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq) | |||
118 | } | 118 | } |
119 | 119 | ||
120 | extern void blk_dump_cmd(char *buf, struct request *rq); | 120 | extern void blk_dump_cmd(char *buf, struct request *rq); |
121 | extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); | 121 | extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes); |
122 | 122 | ||
123 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ | 123 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ |
124 | 124 | ||
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 7e14e545c4b6..ebbacd14d450 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -187,12 +187,13 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); | |||
187 | void free_buffer_head(struct buffer_head * bh); | 187 | void free_buffer_head(struct buffer_head * bh); |
188 | void unlock_buffer(struct buffer_head *bh); | 188 | void unlock_buffer(struct buffer_head *bh); |
189 | void __lock_buffer(struct buffer_head *bh); | 189 | void __lock_buffer(struct buffer_head *bh); |
190 | void ll_rw_block(int, int, struct buffer_head * bh[]); | 190 | void ll_rw_block(int, int, int, struct buffer_head * bh[]); |
191 | int sync_dirty_buffer(struct buffer_head *bh); | 191 | int sync_dirty_buffer(struct buffer_head *bh); |
192 | int __sync_dirty_buffer(struct buffer_head *bh, int rw); | 192 | int __sync_dirty_buffer(struct buffer_head *bh, int op_flags); |
193 | void write_dirty_buffer(struct buffer_head *bh, int rw); | 193 | void write_dirty_buffer(struct buffer_head *bh, int op_flags); |
194 | int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags); | 194 | int _submit_bh(int op, int op_flags, struct buffer_head *bh, |
195 | int submit_bh(int, struct buffer_head *); | 195 | unsigned long bio_flags); |
196 | int submit_bh(int, int, struct buffer_head *); | ||
196 | void write_boundary_block(struct block_device *bdev, | 197 | void write_boundary_block(struct block_device *bdev, |
197 | sector_t bblock, unsigned blocksize); | 198 | sector_t bblock, unsigned blocksize); |
198 | int bh_uptodate_or_lock(struct buffer_head *bh); | 199 | int bh_uptodate_or_lock(struct buffer_head *bh); |
diff --git a/include/linux/bvec.h b/include/linux/bvec.h new file mode 100644 index 000000000000..701b64a3b7c5 --- /dev/null +++ b/include/linux/bvec.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * bvec iterator | ||
3 | * | ||
4 | * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public Licens | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- | ||
19 | */ | ||
20 | #ifndef __LINUX_BVEC_ITER_H | ||
21 | #define __LINUX_BVEC_ITER_H | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/bug.h> | ||
25 | |||
26 | /* | ||
27 | * was unsigned short, but we might as well be ready for > 64kB I/O pages | ||
28 | */ | ||
29 | struct bio_vec { | ||
30 | struct page *bv_page; | ||
31 | unsigned int bv_len; | ||
32 | unsigned int bv_offset; | ||
33 | }; | ||
34 | |||
35 | struct bvec_iter { | ||
36 | sector_t bi_sector; /* device address in 512 byte | ||
37 | sectors */ | ||
38 | unsigned int bi_size; /* residual I/O count */ | ||
39 | |||
40 | unsigned int bi_idx; /* current index into bvl_vec */ | ||
41 | |||
42 | unsigned int bi_bvec_done; /* number of bytes completed in | ||
43 | current bvec */ | ||
44 | }; | ||
45 | |||
46 | /* | ||
47 | * various member access, note that bio_data should of course not be used | ||
48 | * on highmem page vectors | ||
49 | */ | ||
50 | #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) | ||
51 | |||
52 | #define bvec_iter_page(bvec, iter) \ | ||
53 | (__bvec_iter_bvec((bvec), (iter))->bv_page) | ||
54 | |||
55 | #define bvec_iter_len(bvec, iter) \ | ||
56 | min((iter).bi_size, \ | ||
57 | __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) | ||
58 | |||
59 | #define bvec_iter_offset(bvec, iter) \ | ||
60 | (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) | ||
61 | |||
62 | #define bvec_iter_bvec(bvec, iter) \ | ||
63 | ((struct bio_vec) { \ | ||
64 | .bv_page = bvec_iter_page((bvec), (iter)), \ | ||
65 | .bv_len = bvec_iter_len((bvec), (iter)), \ | ||
66 | .bv_offset = bvec_iter_offset((bvec), (iter)), \ | ||
67 | }) | ||
68 | |||
69 | static inline void bvec_iter_advance(const struct bio_vec *bv, | ||
70 | struct bvec_iter *iter, | ||
71 | unsigned bytes) | ||
72 | { | ||
73 | WARN_ONCE(bytes > iter->bi_size, | ||
74 | "Attempted to advance past end of bvec iter\n"); | ||
75 | |||
76 | while (bytes) { | ||
77 | unsigned len = min(bytes, bvec_iter_len(bv, *iter)); | ||
78 | |||
79 | bytes -= len; | ||
80 | iter->bi_size -= len; | ||
81 | iter->bi_bvec_done += len; | ||
82 | |||
83 | if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { | ||
84 | iter->bi_bvec_done = 0; | ||
85 | iter->bi_idx++; | ||
86 | } | ||
87 | } | ||
88 | } | ||
89 | |||
90 | #define for_each_bvec(bvl, bio_vec, iter, start) \ | ||
91 | for (iter = (start); \ | ||
92 | (iter).bi_size && \ | ||
93 | ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ | ||
94 | bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) | ||
95 | |||
96 | #endif /* __LINUX_BVEC_ITER_H */ | ||
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index a68cbe59e6ad..b91b023deffb 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h | |||
@@ -57,7 +57,8 @@ struct dm_io_notify { | |||
57 | */ | 57 | */ |
58 | struct dm_io_client; | 58 | struct dm_io_client; |
59 | struct dm_io_request { | 59 | struct dm_io_request { |
60 | int bi_rw; /* READ|WRITE - not READA */ | 60 | int bi_op; /* REQ_OP */ |
61 | int bi_op_flags; /* rq_flag_bits */ | ||
61 | struct dm_io_memory mem; /* Memory to use for io */ | 62 | struct dm_io_memory mem; /* Memory to use for io */ |
62 | struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ | 63 | struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ |
63 | struct dm_io_client *client; /* Client memory handler */ | 64 | struct dm_io_client *client; /* Client memory handler */ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 638b324f0291..e7f358d2e5fc 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -16,7 +16,11 @@ typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, | |||
16 | 16 | ||
17 | typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int); | 17 | typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int); |
18 | 18 | ||
19 | typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *); | 19 | typedef int (elevator_allow_bio_merge_fn) (struct request_queue *, |
20 | struct request *, struct bio *); | ||
21 | |||
22 | typedef int (elevator_allow_rq_merge_fn) (struct request_queue *, | ||
23 | struct request *, struct request *); | ||
20 | 24 | ||
21 | typedef void (elevator_bio_merged_fn) (struct request_queue *, | 25 | typedef void (elevator_bio_merged_fn) (struct request_queue *, |
22 | struct request *, struct bio *); | 26 | struct request *, struct bio *); |
@@ -26,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int); | |||
26 | typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); | 30 | typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); |
27 | typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); | 31 | typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); |
28 | typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); | 32 | typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); |
29 | typedef int (elevator_may_queue_fn) (struct request_queue *, int); | 33 | typedef int (elevator_may_queue_fn) (struct request_queue *, int, int); |
30 | 34 | ||
31 | typedef void (elevator_init_icq_fn) (struct io_cq *); | 35 | typedef void (elevator_init_icq_fn) (struct io_cq *); |
32 | typedef void (elevator_exit_icq_fn) (struct io_cq *); | 36 | typedef void (elevator_exit_icq_fn) (struct io_cq *); |
@@ -46,7 +50,8 @@ struct elevator_ops | |||
46 | elevator_merge_fn *elevator_merge_fn; | 50 | elevator_merge_fn *elevator_merge_fn; |
47 | elevator_merged_fn *elevator_merged_fn; | 51 | elevator_merged_fn *elevator_merged_fn; |
48 | elevator_merge_req_fn *elevator_merge_req_fn; | 52 | elevator_merge_req_fn *elevator_merge_req_fn; |
49 | elevator_allow_merge_fn *elevator_allow_merge_fn; | 53 | elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn; |
54 | elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn; | ||
50 | elevator_bio_merged_fn *elevator_bio_merged_fn; | 55 | elevator_bio_merged_fn *elevator_bio_merged_fn; |
51 | 56 | ||
52 | elevator_dispatch_fn *elevator_dispatch_fn; | 57 | elevator_dispatch_fn *elevator_dispatch_fn; |
@@ -134,7 +139,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request | |||
134 | extern struct request *elv_latter_request(struct request_queue *, struct request *); | 139 | extern struct request *elv_latter_request(struct request_queue *, struct request *); |
135 | extern int elv_register_queue(struct request_queue *q); | 140 | extern int elv_register_queue(struct request_queue *q); |
136 | extern void elv_unregister_queue(struct request_queue *q); | 141 | extern void elv_unregister_queue(struct request_queue *q); |
137 | extern int elv_may_queue(struct request_queue *, int); | 142 | extern int elv_may_queue(struct request_queue *, int, int); |
138 | extern void elv_completed_request(struct request_queue *, struct request *); | 143 | extern void elv_completed_request(struct request_queue *, struct request *); |
139 | extern int elv_set_request(struct request_queue *q, struct request *rq, | 144 | extern int elv_set_request(struct request_queue *q, struct request *rq, |
140 | struct bio *bio, gfp_t gfp_mask); | 145 | struct bio *bio, gfp_t gfp_mask); |
@@ -157,7 +162,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); | |||
157 | extern int elevator_init(struct request_queue *, char *); | 162 | extern int elevator_init(struct request_queue *, char *); |
158 | extern void elevator_exit(struct elevator_queue *); | 163 | extern void elevator_exit(struct elevator_queue *); |
159 | extern int elevator_change(struct request_queue *, const char *); | 164 | extern int elevator_change(struct request_queue *, const char *); |
160 | extern bool elv_rq_merge_ok(struct request *, struct bio *); | 165 | extern bool elv_bio_merge_ok(struct request *, struct bio *); |
161 | extern struct elevator_queue *elevator_alloc(struct request_queue *, | 166 | extern struct elevator_queue *elevator_alloc(struct request_queue *, |
162 | struct elevator_type *); | 167 | struct elevator_type *); |
163 | 168 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index dd288148a6b1..183024525d40 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -152,9 +152,10 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
152 | #define CHECK_IOVEC_ONLY -1 | 152 | #define CHECK_IOVEC_ONLY -1 |
153 | 153 | ||
154 | /* | 154 | /* |
155 | * The below are the various read and write types that we support. Some of | 155 | * The below are the various read and write flags that we support. Some of |
156 | * them include behavioral modifiers that send information down to the | 156 | * them include behavioral modifiers that send information down to the |
157 | * block layer and IO scheduler. Terminology: | 157 | * block layer and IO scheduler. They should be used along with a req_op. |
158 | * Terminology: | ||
158 | * | 159 | * |
159 | * The block layer uses device plugging to defer IO a little bit, in | 160 | * The block layer uses device plugging to defer IO a little bit, in |
160 | * the hope that we will see more IO very shortly. This increases | 161 | * the hope that we will see more IO very shortly. This increases |
@@ -193,19 +194,19 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
193 | * non-volatile media on completion. | 194 | * non-volatile media on completion. |
194 | * | 195 | * |
195 | */ | 196 | */ |
196 | #define RW_MASK REQ_WRITE | 197 | #define RW_MASK REQ_OP_WRITE |
197 | #define RWA_MASK REQ_RAHEAD | 198 | #define RWA_MASK REQ_RAHEAD |
198 | 199 | ||
199 | #define READ 0 | 200 | #define READ REQ_OP_READ |
200 | #define WRITE RW_MASK | 201 | #define WRITE RW_MASK |
201 | #define READA RWA_MASK | 202 | #define READA RWA_MASK |
202 | 203 | ||
203 | #define READ_SYNC (READ | REQ_SYNC) | 204 | #define READ_SYNC REQ_SYNC |
204 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) | 205 | #define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE) |
205 | #define WRITE_ODIRECT (WRITE | REQ_SYNC) | 206 | #define WRITE_ODIRECT REQ_SYNC |
206 | #define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH) | 207 | #define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH) |
207 | #define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA) | 208 | #define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA) |
208 | #define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) | 209 | #define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA) |
209 | 210 | ||
210 | /* | 211 | /* |
211 | * Attribute flags. These should be or-ed together to figure out what | 212 | * Attribute flags. These should be or-ed together to figure out what |
@@ -2464,15 +2465,29 @@ extern void make_bad_inode(struct inode *); | |||
2464 | extern bool is_bad_inode(struct inode *); | 2465 | extern bool is_bad_inode(struct inode *); |
2465 | 2466 | ||
2466 | #ifdef CONFIG_BLOCK | 2467 | #ifdef CONFIG_BLOCK |
2468 | static inline bool op_is_write(unsigned int op) | ||
2469 | { | ||
2470 | return op == REQ_OP_READ ? false : true; | ||
2471 | } | ||
2472 | |||
2467 | /* | 2473 | /* |
2468 | * return READ, READA, or WRITE | 2474 | * return READ, READA, or WRITE |
2469 | */ | 2475 | */ |
2470 | #define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK)) | 2476 | static inline int bio_rw(struct bio *bio) |
2477 | { | ||
2478 | if (op_is_write(bio_op(bio))) | ||
2479 | return WRITE; | ||
2480 | |||
2481 | return bio->bi_rw & RWA_MASK; | ||
2482 | } | ||
2471 | 2483 | ||
2472 | /* | 2484 | /* |
2473 | * return data direction, READ or WRITE | 2485 | * return data direction, READ or WRITE |
2474 | */ | 2486 | */ |
2475 | #define bio_data_dir(bio) ((bio)->bi_rw & 1) | 2487 | static inline int bio_data_dir(struct bio *bio) |
2488 | { | ||
2489 | return op_is_write(bio_op(bio)) ? WRITE : READ; | ||
2490 | } | ||
2476 | 2491 | ||
2477 | extern void check_disk_size_change(struct gendisk *disk, | 2492 | extern void check_disk_size_change(struct gendisk *disk, |
2478 | struct block_device *bdev); | 2493 | struct block_device *bdev); |
@@ -2747,7 +2762,7 @@ static inline void remove_inode_hash(struct inode *inode) | |||
2747 | extern void inode_sb_list_add(struct inode *inode); | 2762 | extern void inode_sb_list_add(struct inode *inode); |
2748 | 2763 | ||
2749 | #ifdef CONFIG_BLOCK | 2764 | #ifdef CONFIG_BLOCK |
2750 | extern blk_qc_t submit_bio(int, struct bio *); | 2765 | extern blk_qc_t submit_bio(struct bio *); |
2751 | extern int bdev_read_only(struct block_device *); | 2766 | extern int bdev_read_only(struct block_device *); |
2752 | #endif | 2767 | #endif |
2753 | extern int set_blocksize(struct block_device *, int); | 2768 | extern int set_blocksize(struct block_device *, int); |
@@ -2802,7 +2817,7 @@ extern int generic_file_open(struct inode * inode, struct file * filp); | |||
2802 | extern int nonseekable_open(struct inode * inode, struct file * filp); | 2817 | extern int nonseekable_open(struct inode * inode, struct file * filp); |
2803 | 2818 | ||
2804 | #ifdef CONFIG_BLOCK | 2819 | #ifdef CONFIG_BLOCK |
2805 | typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, | 2820 | typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, |
2806 | loff_t file_offset); | 2821 | loff_t file_offset); |
2807 | 2822 | ||
2808 | enum { | 2823 | enum { |
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index 981acf74b14f..65673d8b81ac 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h | |||
@@ -27,7 +27,8 @@ DECLARE_EVENT_CLASS(bcache_request, | |||
27 | __entry->sector = bio->bi_iter.bi_sector; | 27 | __entry->sector = bio->bi_iter.bi_sector; |
28 | __entry->orig_sector = bio->bi_iter.bi_sector - 16; | 28 | __entry->orig_sector = bio->bi_iter.bi_sector - 16; |
29 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; | 29 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; |
30 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); | 30 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, |
31 | bio->bi_iter.bi_size); | ||
31 | ), | 32 | ), |
32 | 33 | ||
33 | TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", | 34 | TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", |
@@ -101,7 +102,8 @@ DECLARE_EVENT_CLASS(bcache_bio, | |||
101 | __entry->dev = bio->bi_bdev->bd_dev; | 102 | __entry->dev = bio->bi_bdev->bd_dev; |
102 | __entry->sector = bio->bi_iter.bi_sector; | 103 | __entry->sector = bio->bi_iter.bi_sector; |
103 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; | 104 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; |
104 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); | 105 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, |
106 | bio->bi_iter.bi_size); | ||
105 | ), | 107 | ), |
106 | 108 | ||
107 | TP_printk("%d,%d %s %llu + %u", | 109 | TP_printk("%d,%d %s %llu + %u", |
@@ -136,7 +138,8 @@ TRACE_EVENT(bcache_read, | |||
136 | __entry->dev = bio->bi_bdev->bd_dev; | 138 | __entry->dev = bio->bi_bdev->bd_dev; |
137 | __entry->sector = bio->bi_iter.bi_sector; | 139 | __entry->sector = bio->bi_iter.bi_sector; |
138 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; | 140 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; |
139 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); | 141 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, |
142 | bio->bi_iter.bi_size); | ||
140 | __entry->cache_hit = hit; | 143 | __entry->cache_hit = hit; |
141 | __entry->bypass = bypass; | 144 | __entry->bypass = bypass; |
142 | ), | 145 | ), |
@@ -167,7 +170,8 @@ TRACE_EVENT(bcache_write, | |||
167 | __entry->inode = inode; | 170 | __entry->inode = inode; |
168 | __entry->sector = bio->bi_iter.bi_sector; | 171 | __entry->sector = bio->bi_iter.bi_sector; |
169 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; | 172 | __entry->nr_sector = bio->bi_iter.bi_size >> 9; |
170 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); | 173 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, |
174 | bio->bi_iter.bi_size); | ||
171 | __entry->writeback = writeback; | 175 | __entry->writeback = writeback; |
172 | __entry->bypass = bypass; | 176 | __entry->bypass = bypass; |
173 | ), | 177 | ), |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index e8a5eca1dbe5..5a2a7592068f 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
@@ -84,7 +84,8 @@ DECLARE_EVENT_CLASS(block_rq_with_error, | |||
84 | 0 : blk_rq_sectors(rq); | 84 | 0 : blk_rq_sectors(rq); |
85 | __entry->errors = rq->errors; | 85 | __entry->errors = rq->errors; |
86 | 86 | ||
87 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); | 87 | blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, |
88 | blk_rq_bytes(rq)); | ||
88 | blk_dump_cmd(__get_str(cmd), rq); | 89 | blk_dump_cmd(__get_str(cmd), rq); |
89 | ), | 90 | ), |
90 | 91 | ||
@@ -162,7 +163,7 @@ TRACE_EVENT(block_rq_complete, | |||
162 | __entry->nr_sector = nr_bytes >> 9; | 163 | __entry->nr_sector = nr_bytes >> 9; |
163 | __entry->errors = rq->errors; | 164 | __entry->errors = rq->errors; |
164 | 165 | ||
165 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes); | 166 | blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, nr_bytes); |
166 | blk_dump_cmd(__get_str(cmd), rq); | 167 | blk_dump_cmd(__get_str(cmd), rq); |
167 | ), | 168 | ), |
168 | 169 | ||
@@ -198,7 +199,8 @@ DECLARE_EVENT_CLASS(block_rq, | |||
198 | __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? | 199 | __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
199 | blk_rq_bytes(rq) : 0; | 200 | blk_rq_bytes(rq) : 0; |
200 | 201 | ||
201 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); | 202 | blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, |
203 | blk_rq_bytes(rq)); | ||
202 | blk_dump_cmd(__get_str(cmd), rq); | 204 | blk_dump_cmd(__get_str(cmd), rq); |
203 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 205 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
204 | ), | 206 | ), |
@@ -272,7 +274,8 @@ TRACE_EVENT(block_bio_bounce, | |||
272 | bio->bi_bdev->bd_dev : 0; | 274 | bio->bi_bdev->bd_dev : 0; |
273 | __entry->sector = bio->bi_iter.bi_sector; | 275 | __entry->sector = bio->bi_iter.bi_sector; |
274 | __entry->nr_sector = bio_sectors(bio); | 276 | __entry->nr_sector = bio_sectors(bio); |
275 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); | 277 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, |
278 | bio->bi_iter.bi_size); | ||
276 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 279 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
277 | ), | 280 | ), |
278 | 281 | ||
@@ -310,7 +313,8 @@ TRACE_EVENT(block_bio_complete, | |||
310 | __entry->sector = bio->bi_iter.bi_sector; | 313 | __entry->sector = bio->bi_iter.bi_sector; |
311 | __entry->nr_sector = bio_sectors(bio); | 314 | __entry->nr_sector = bio_sectors(bio); |
312 | __entry->error = error; | 315 | __entry->error = error; |
313 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); | 316 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, |
317 | bio->bi_iter.bi_size); | ||
314 | ), | 318 | ), |
315 | 319 | ||
316 | TP_printk("%d,%d %s %llu + %u [%d]", | 320 | TP_printk("%d,%d %s %llu + %u [%d]", |
@@ -337,7 +341,8 @@ DECLARE_EVENT_CLASS(block_bio_merge, | |||
337 | __entry->dev = bio->bi_bdev->bd_dev; | 341 | __entry->dev = bio->bi_bdev->bd_dev; |
338 | __entry->sector = bio->bi_iter.bi_sector; | 342 | __entry->sector = bio->bi_iter.bi_sector; |
339 | __entry->nr_sector = bio_sectors(bio); | 343 | __entry->nr_sector = bio_sectors(bio); |
340 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); | 344 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, |
345 | bio->bi_iter.bi_size); | ||
341 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 346 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
342 | ), | 347 | ), |
343 | 348 | ||
@@ -404,7 +409,8 @@ TRACE_EVENT(block_bio_queue, | |||
404 | __entry->dev = bio->bi_bdev->bd_dev; | 409 | __entry->dev = bio->bi_bdev->bd_dev; |
405 | __entry->sector = bio->bi_iter.bi_sector; | 410 | __entry->sector = bio->bi_iter.bi_sector; |
406 | __entry->nr_sector = bio_sectors(bio); | 411 | __entry->nr_sector = bio_sectors(bio); |
407 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); | 412 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, |
413 | bio->bi_iter.bi_size); | ||
408 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 414 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
409 | ), | 415 | ), |
410 | 416 | ||
@@ -432,7 +438,7 @@ DECLARE_EVENT_CLASS(block_get_rq, | |||
432 | __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; | 438 | __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; |
433 | __entry->sector = bio ? bio->bi_iter.bi_sector : 0; | 439 | __entry->sector = bio ? bio->bi_iter.bi_sector : 0; |
434 | __entry->nr_sector = bio ? bio_sectors(bio) : 0; | 440 | __entry->nr_sector = bio ? bio_sectors(bio) : 0; |
435 | blk_fill_rwbs(__entry->rwbs, | 441 | blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0, |
436 | bio ? bio->bi_rw : 0, __entry->nr_sector); | 442 | bio ? bio->bi_rw : 0, __entry->nr_sector); |
437 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 443 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
438 | ), | 444 | ), |
@@ -567,7 +573,8 @@ TRACE_EVENT(block_split, | |||
567 | __entry->dev = bio->bi_bdev->bd_dev; | 573 | __entry->dev = bio->bi_bdev->bd_dev; |
568 | __entry->sector = bio->bi_iter.bi_sector; | 574 | __entry->sector = bio->bi_iter.bi_sector; |
569 | __entry->new_sector = new_sector; | 575 | __entry->new_sector = new_sector; |
570 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); | 576 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, |
577 | bio->bi_iter.bi_size); | ||
571 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 578 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
572 | ), | 579 | ), |
573 | 580 | ||
@@ -610,7 +617,8 @@ TRACE_EVENT(block_bio_remap, | |||
610 | __entry->nr_sector = bio_sectors(bio); | 617 | __entry->nr_sector = bio_sectors(bio); |
611 | __entry->old_dev = dev; | 618 | __entry->old_dev = dev; |
612 | __entry->old_sector = from; | 619 | __entry->old_sector = from; |
613 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); | 620 | blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw, |
621 | bio->bi_iter.bi_size); | ||
614 | ), | 622 | ), |
615 | 623 | ||
616 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", | 624 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", |
@@ -656,7 +664,8 @@ TRACE_EVENT(block_rq_remap, | |||
656 | __entry->old_dev = dev; | 664 | __entry->old_dev = dev; |
657 | __entry->old_sector = from; | 665 | __entry->old_sector = from; |
658 | __entry->nr_bios = blk_rq_count_bios(rq); | 666 | __entry->nr_bios = blk_rq_count_bios(rq); |
659 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); | 667 | blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, |
668 | blk_rq_bytes(rq)); | ||
660 | ), | 669 | ), |
661 | 670 | ||
662 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", | 671 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", |
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 3a09bb4dc3b2..878963a1f058 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h | |||
@@ -31,10 +31,9 @@ TRACE_DEFINE_ENUM(BG_GC); | |||
31 | TRACE_DEFINE_ENUM(LFS); | 31 | TRACE_DEFINE_ENUM(LFS); |
32 | TRACE_DEFINE_ENUM(SSR); | 32 | TRACE_DEFINE_ENUM(SSR); |
33 | TRACE_DEFINE_ENUM(__REQ_RAHEAD); | 33 | TRACE_DEFINE_ENUM(__REQ_RAHEAD); |
34 | TRACE_DEFINE_ENUM(__REQ_WRITE); | ||
35 | TRACE_DEFINE_ENUM(__REQ_SYNC); | 34 | TRACE_DEFINE_ENUM(__REQ_SYNC); |
36 | TRACE_DEFINE_ENUM(__REQ_NOIDLE); | 35 | TRACE_DEFINE_ENUM(__REQ_NOIDLE); |
37 | TRACE_DEFINE_ENUM(__REQ_FLUSH); | 36 | TRACE_DEFINE_ENUM(__REQ_PREFLUSH); |
38 | TRACE_DEFINE_ENUM(__REQ_FUA); | 37 | TRACE_DEFINE_ENUM(__REQ_FUA); |
39 | TRACE_DEFINE_ENUM(__REQ_PRIO); | 38 | TRACE_DEFINE_ENUM(__REQ_PRIO); |
40 | TRACE_DEFINE_ENUM(__REQ_META); | 39 | TRACE_DEFINE_ENUM(__REQ_META); |
@@ -56,17 +55,21 @@ TRACE_DEFINE_ENUM(CP_DISCARD); | |||
56 | { IPU, "IN-PLACE" }, \ | 55 | { IPU, "IN-PLACE" }, \ |
57 | { OPU, "OUT-OF-PLACE" }) | 56 | { OPU, "OUT-OF-PLACE" }) |
58 | 57 | ||
59 | #define F2FS_BIO_MASK(t) (t & (READA | WRITE_FLUSH_FUA)) | 58 | #define F2FS_BIO_FLAG_MASK(t) (t & (READA | WRITE_FLUSH_FUA)) |
60 | #define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) | 59 | #define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) |
61 | 60 | ||
62 | #define show_bio_type(type) show_bio_base(type), show_bio_extra(type) | 61 | #define show_bio_type(op, op_flags) show_bio_op(op), \ |
62 | show_bio_op_flags(op_flags), show_bio_extra(op_flags) | ||
63 | 63 | ||
64 | #define show_bio_base(type) \ | 64 | #define show_bio_op(op) \ |
65 | __print_symbolic(F2FS_BIO_MASK(type), \ | 65 | __print_symbolic(op, \ |
66 | { READ, "READ" }, \ | 66 | { READ, "READ" }, \ |
67 | { WRITE, "WRITE" }) | ||
68 | |||
69 | #define show_bio_op_flags(flags) \ | ||
70 | __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ | ||
67 | { READA, "READAHEAD" }, \ | 71 | { READA, "READAHEAD" }, \ |
68 | { READ_SYNC, "READ_SYNC" }, \ | 72 | { READ_SYNC, "READ_SYNC" }, \ |
69 | { WRITE, "WRITE" }, \ | ||
70 | { WRITE_SYNC, "WRITE_SYNC" }, \ | 73 | { WRITE_SYNC, "WRITE_SYNC" }, \ |
71 | { WRITE_FLUSH, "WRITE_FLUSH" }, \ | 74 | { WRITE_FLUSH, "WRITE_FLUSH" }, \ |
72 | { WRITE_FUA, "WRITE_FUA" }, \ | 75 | { WRITE_FUA, "WRITE_FUA" }, \ |
@@ -734,7 +737,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, | |||
734 | __field(pgoff_t, index) | 737 | __field(pgoff_t, index) |
735 | __field(block_t, old_blkaddr) | 738 | __field(block_t, old_blkaddr) |
736 | __field(block_t, new_blkaddr) | 739 | __field(block_t, new_blkaddr) |
737 | __field(int, rw) | 740 | __field(int, op) |
741 | __field(int, op_flags) | ||
738 | __field(int, type) | 742 | __field(int, type) |
739 | ), | 743 | ), |
740 | 744 | ||
@@ -744,17 +748,18 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, | |||
744 | __entry->index = page->index; | 748 | __entry->index = page->index; |
745 | __entry->old_blkaddr = fio->old_blkaddr; | 749 | __entry->old_blkaddr = fio->old_blkaddr; |
746 | __entry->new_blkaddr = fio->new_blkaddr; | 750 | __entry->new_blkaddr = fio->new_blkaddr; |
747 | __entry->rw = fio->rw; | 751 | __entry->op = fio->op; |
752 | __entry->op_flags = fio->op_flags; | ||
748 | __entry->type = fio->type; | 753 | __entry->type = fio->type; |
749 | ), | 754 | ), |
750 | 755 | ||
751 | TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " | 756 | TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " |
752 | "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%s, type = %s", | 757 | "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%si%s, type = %s", |
753 | show_dev_ino(__entry), | 758 | show_dev_ino(__entry), |
754 | (unsigned long)__entry->index, | 759 | (unsigned long)__entry->index, |
755 | (unsigned long long)__entry->old_blkaddr, | 760 | (unsigned long long)__entry->old_blkaddr, |
756 | (unsigned long long)__entry->new_blkaddr, | 761 | (unsigned long long)__entry->new_blkaddr, |
757 | show_bio_type(__entry->rw), | 762 | show_bio_type(__entry->op, __entry->op_flags), |
758 | show_block_type(__entry->type)) | 763 | show_block_type(__entry->type)) |
759 | ); | 764 | ); |
760 | 765 | ||
@@ -785,7 +790,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio, | |||
785 | 790 | ||
786 | TP_STRUCT__entry( | 791 | TP_STRUCT__entry( |
787 | __field(dev_t, dev) | 792 | __field(dev_t, dev) |
788 | __field(int, rw) | 793 | __field(int, op) |
794 | __field(int, op_flags) | ||
789 | __field(int, type) | 795 | __field(int, type) |
790 | __field(sector_t, sector) | 796 | __field(sector_t, sector) |
791 | __field(unsigned int, size) | 797 | __field(unsigned int, size) |
@@ -793,15 +799,16 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio, | |||
793 | 799 | ||
794 | TP_fast_assign( | 800 | TP_fast_assign( |
795 | __entry->dev = sb->s_dev; | 801 | __entry->dev = sb->s_dev; |
796 | __entry->rw = fio->rw; | 802 | __entry->op = fio->op; |
803 | __entry->op_flags = fio->op_flags; | ||
797 | __entry->type = fio->type; | 804 | __entry->type = fio->type; |
798 | __entry->sector = bio->bi_iter.bi_sector; | 805 | __entry->sector = bio->bi_iter.bi_sector; |
799 | __entry->size = bio->bi_iter.bi_size; | 806 | __entry->size = bio->bi_iter.bi_size; |
800 | ), | 807 | ), |
801 | 808 | ||
802 | TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u", | 809 | TP_printk("dev = (%d,%d), %s%s%s, %s, sector = %lld, size = %u", |
803 | show_dev(__entry), | 810 | show_dev(__entry), |
804 | show_bio_type(__entry->rw), | 811 | show_bio_type(__entry->op, __entry->op_flags), |
805 | show_block_type(__entry->type), | 812 | show_block_type(__entry->type), |
806 | (unsigned long long)__entry->sector, | 813 | (unsigned long long)__entry->sector, |
807 | __entry->size) | 814 | __entry->size) |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 160e1006640d..c1aaac431055 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -261,7 +261,7 @@ static void hib_end_io(struct bio *bio) | |||
261 | bio_put(bio); | 261 | bio_put(bio); |
262 | } | 262 | } |
263 | 263 | ||
264 | static int hib_submit_io(int rw, pgoff_t page_off, void *addr, | 264 | static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr, |
265 | struct hib_bio_batch *hb) | 265 | struct hib_bio_batch *hb) |
266 | { | 266 | { |
267 | struct page *page = virt_to_page(addr); | 267 | struct page *page = virt_to_page(addr); |
@@ -271,6 +271,7 @@ static int hib_submit_io(int rw, pgoff_t page_off, void *addr, | |||
271 | bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1); | 271 | bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1); |
272 | bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); | 272 | bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); |
273 | bio->bi_bdev = hib_resume_bdev; | 273 | bio->bi_bdev = hib_resume_bdev; |
274 | bio_set_op_attrs(bio, op, op_flags); | ||
274 | 275 | ||
275 | if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { | 276 | if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { |
276 | printk(KERN_ERR "PM: Adding page to bio failed at %llu\n", | 277 | printk(KERN_ERR "PM: Adding page to bio failed at %llu\n", |
@@ -283,9 +284,9 @@ static int hib_submit_io(int rw, pgoff_t page_off, void *addr, | |||
283 | bio->bi_end_io = hib_end_io; | 284 | bio->bi_end_io = hib_end_io; |
284 | bio->bi_private = hb; | 285 | bio->bi_private = hb; |
285 | atomic_inc(&hb->count); | 286 | atomic_inc(&hb->count); |
286 | submit_bio(rw, bio); | 287 | submit_bio(bio); |
287 | } else { | 288 | } else { |
288 | error = submit_bio_wait(rw, bio); | 289 | error = submit_bio_wait(bio); |
289 | bio_put(bio); | 290 | bio_put(bio); |
290 | } | 291 | } |
291 | 292 | ||
@@ -306,7 +307,8 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) | |||
306 | { | 307 | { |
307 | int error; | 308 | int error; |
308 | 309 | ||
309 | hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL); | 310 | hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block, |
311 | swsusp_header, NULL); | ||
310 | if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || | 312 | if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || |
311 | !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { | 313 | !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { |
312 | memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); | 314 | memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); |
@@ -315,8 +317,8 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) | |||
315 | swsusp_header->flags = flags; | 317 | swsusp_header->flags = flags; |
316 | if (flags & SF_CRC32_MODE) | 318 | if (flags & SF_CRC32_MODE) |
317 | swsusp_header->crc32 = handle->crc32; | 319 | swsusp_header->crc32 = handle->crc32; |
318 | error = hib_submit_io(WRITE_SYNC, swsusp_resume_block, | 320 | error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, |
319 | swsusp_header, NULL); | 321 | swsusp_resume_block, swsusp_header, NULL); |
320 | } else { | 322 | } else { |
321 | printk(KERN_ERR "PM: Swap header not found!\n"); | 323 | printk(KERN_ERR "PM: Swap header not found!\n"); |
322 | error = -ENODEV; | 324 | error = -ENODEV; |
@@ -389,7 +391,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) | |||
389 | } else { | 391 | } else { |
390 | src = buf; | 392 | src = buf; |
391 | } | 393 | } |
392 | return hib_submit_io(WRITE_SYNC, offset, src, hb); | 394 | return hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, offset, src, hb); |
393 | } | 395 | } |
394 | 396 | ||
395 | static void release_swap_writer(struct swap_map_handle *handle) | 397 | static void release_swap_writer(struct swap_map_handle *handle) |
@@ -992,7 +994,8 @@ static int get_swap_reader(struct swap_map_handle *handle, | |||
992 | return -ENOMEM; | 994 | return -ENOMEM; |
993 | } | 995 | } |
994 | 996 | ||
995 | error = hib_submit_io(READ_SYNC, offset, tmp->map, NULL); | 997 | error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset, |
998 | tmp->map, NULL); | ||
996 | if (error) { | 999 | if (error) { |
997 | release_swap_reader(handle); | 1000 | release_swap_reader(handle); |
998 | return error; | 1001 | return error; |
@@ -1016,7 +1019,7 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf, | |||
1016 | offset = handle->cur->entries[handle->k]; | 1019 | offset = handle->cur->entries[handle->k]; |
1017 | if (!offset) | 1020 | if (!offset) |
1018 | return -EFAULT; | 1021 | return -EFAULT; |
1019 | error = hib_submit_io(READ_SYNC, offset, buf, hb); | 1022 | error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset, buf, hb); |
1020 | if (error) | 1023 | if (error) |
1021 | return error; | 1024 | return error; |
1022 | if (++handle->k >= MAP_PAGE_ENTRIES) { | 1025 | if (++handle->k >= MAP_PAGE_ENTRIES) { |
@@ -1525,7 +1528,8 @@ int swsusp_check(void) | |||
1525 | if (!IS_ERR(hib_resume_bdev)) { | 1528 | if (!IS_ERR(hib_resume_bdev)) { |
1526 | set_blocksize(hib_resume_bdev, PAGE_SIZE); | 1529 | set_blocksize(hib_resume_bdev, PAGE_SIZE); |
1527 | clear_page(swsusp_header); | 1530 | clear_page(swsusp_header); |
1528 | error = hib_submit_io(READ_SYNC, swsusp_resume_block, | 1531 | error = hib_submit_io(REQ_OP_READ, READ_SYNC, |
1532 | swsusp_resume_block, | ||
1529 | swsusp_header, NULL); | 1533 | swsusp_header, NULL); |
1530 | if (error) | 1534 | if (error) |
1531 | goto put; | 1535 | goto put; |
@@ -1533,7 +1537,8 @@ int swsusp_check(void) | |||
1533 | if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { | 1537 | if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { |
1534 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); | 1538 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); |
1535 | /* Reset swap signature now */ | 1539 | /* Reset swap signature now */ |
1536 | error = hib_submit_io(WRITE_SYNC, swsusp_resume_block, | 1540 | error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, |
1541 | swsusp_resume_block, | ||
1537 | swsusp_header, NULL); | 1542 | swsusp_header, NULL); |
1538 | } else { | 1543 | } else { |
1539 | error = -EINVAL; | 1544 | error = -EINVAL; |
@@ -1577,10 +1582,12 @@ int swsusp_unmark(void) | |||
1577 | { | 1582 | { |
1578 | int error; | 1583 | int error; |
1579 | 1584 | ||
1580 | hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL); | 1585 | hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block, |
1586 | swsusp_header, NULL); | ||
1581 | if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) { | 1587 | if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) { |
1582 | memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); | 1588 | memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); |
1583 | error = hib_submit_io(WRITE_SYNC, swsusp_resume_block, | 1589 | error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, |
1590 | swsusp_resume_block, | ||
1584 | swsusp_header, NULL); | 1591 | swsusp_header, NULL); |
1585 | } else { | 1592 | } else { |
1586 | printk(KERN_ERR "PM: Cannot find swsusp signature!\n"); | 1593 | printk(KERN_ERR "PM: Cannot find swsusp signature!\n"); |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 9aef8654e90d..bedb84d168d1 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -127,12 +127,13 @@ static void trace_note_tsk(struct task_struct *tsk) | |||
127 | 127 | ||
128 | static void trace_note_time(struct blk_trace *bt) | 128 | static void trace_note_time(struct blk_trace *bt) |
129 | { | 129 | { |
130 | struct timespec now; | 130 | struct timespec64 now; |
131 | unsigned long flags; | 131 | unsigned long flags; |
132 | u32 words[2]; | 132 | u32 words[2]; |
133 | 133 | ||
134 | getnstimeofday(&now); | 134 | /* need to check user space to see if this breaks in y2038 or y2106 */ |
135 | words[0] = now.tv_sec; | 135 | ktime_get_real_ts64(&now); |
136 | words[0] = (u32)now.tv_sec; | ||
136 | words[1] = now.tv_nsec; | 137 | words[1] = now.tv_nsec; |
137 | 138 | ||
138 | local_irq_save(flags); | 139 | local_irq_save(flags); |
@@ -189,6 +190,7 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), | |||
189 | BLK_TC_ACT(BLK_TC_WRITE) }; | 190 | BLK_TC_ACT(BLK_TC_WRITE) }; |
190 | 191 | ||
191 | #define BLK_TC_RAHEAD BLK_TC_AHEAD | 192 | #define BLK_TC_RAHEAD BLK_TC_AHEAD |
193 | #define BLK_TC_PREFLUSH BLK_TC_FLUSH | ||
192 | 194 | ||
193 | /* The ilog2() calls fall out because they're constant */ | 195 | /* The ilog2() calls fall out because they're constant */ |
194 | #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ | 196 | #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ |
@@ -199,7 +201,8 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), | |||
199 | * blk_io_trace structure and places it in a per-cpu subbuffer. | 201 | * blk_io_trace structure and places it in a per-cpu subbuffer. |
200 | */ | 202 | */ |
201 | static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | 203 | static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, |
202 | int rw, u32 what, int error, int pdu_len, void *pdu_data) | 204 | int op, int op_flags, u32 what, int error, int pdu_len, |
205 | void *pdu_data) | ||
203 | { | 206 | { |
204 | struct task_struct *tsk = current; | 207 | struct task_struct *tsk = current; |
205 | struct ring_buffer_event *event = NULL; | 208 | struct ring_buffer_event *event = NULL; |
@@ -214,13 +217,16 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
214 | if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) | 217 | if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) |
215 | return; | 218 | return; |
216 | 219 | ||
217 | what |= ddir_act[rw & WRITE]; | 220 | what |= ddir_act[op_is_write(op) ? WRITE : READ]; |
218 | what |= MASK_TC_BIT(rw, SYNC); | 221 | what |= MASK_TC_BIT(op_flags, SYNC); |
219 | what |= MASK_TC_BIT(rw, RAHEAD); | 222 | what |= MASK_TC_BIT(op_flags, RAHEAD); |
220 | what |= MASK_TC_BIT(rw, META); | 223 | what |= MASK_TC_BIT(op_flags, META); |
221 | what |= MASK_TC_BIT(rw, DISCARD); | 224 | what |= MASK_TC_BIT(op_flags, PREFLUSH); |
222 | what |= MASK_TC_BIT(rw, FLUSH); | 225 | what |= MASK_TC_BIT(op_flags, FUA); |
223 | what |= MASK_TC_BIT(rw, FUA); | 226 | if (op == REQ_OP_DISCARD) |
227 | what |= BLK_TC_ACT(BLK_TC_DISCARD); | ||
228 | if (op == REQ_OP_FLUSH) | ||
229 | what |= BLK_TC_ACT(BLK_TC_FLUSH); | ||
224 | 230 | ||
225 | pid = tsk->pid; | 231 | pid = tsk->pid; |
226 | if (act_log_check(bt, what, sector, pid)) | 232 | if (act_log_check(bt, what, sector, pid)) |
@@ -708,11 +714,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |||
708 | 714 | ||
709 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 715 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
710 | what |= BLK_TC_ACT(BLK_TC_PC); | 716 | what |= BLK_TC_ACT(BLK_TC_PC); |
711 | __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags, | 717 | __blk_add_trace(bt, 0, nr_bytes, req_op(rq), rq->cmd_flags, |
712 | what, rq->errors, rq->cmd_len, rq->cmd); | 718 | what, rq->errors, rq->cmd_len, rq->cmd); |
713 | } else { | 719 | } else { |
714 | what |= BLK_TC_ACT(BLK_TC_FS); | 720 | what |= BLK_TC_ACT(BLK_TC_FS); |
715 | __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, | 721 | __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, req_op(rq), |
716 | rq->cmd_flags, what, rq->errors, 0, NULL); | 722 | rq->cmd_flags, what, rq->errors, 0, NULL); |
717 | } | 723 | } |
718 | } | 724 | } |
@@ -770,7 +776,7 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, | |||
770 | return; | 776 | return; |
771 | 777 | ||
772 | __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, | 778 | __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, |
773 | bio->bi_rw, what, error, 0, NULL); | 779 | bio_op(bio), bio->bi_rw, what, error, 0, NULL); |
774 | } | 780 | } |
775 | 781 | ||
776 | static void blk_add_trace_bio_bounce(void *ignore, | 782 | static void blk_add_trace_bio_bounce(void *ignore, |
@@ -818,7 +824,8 @@ static void blk_add_trace_getrq(void *ignore, | |||
818 | struct blk_trace *bt = q->blk_trace; | 824 | struct blk_trace *bt = q->blk_trace; |
819 | 825 | ||
820 | if (bt) | 826 | if (bt) |
821 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL); | 827 | __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0, |
828 | NULL); | ||
822 | } | 829 | } |
823 | } | 830 | } |
824 | 831 | ||
@@ -833,7 +840,7 @@ static void blk_add_trace_sleeprq(void *ignore, | |||
833 | struct blk_trace *bt = q->blk_trace; | 840 | struct blk_trace *bt = q->blk_trace; |
834 | 841 | ||
835 | if (bt) | 842 | if (bt) |
836 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, | 843 | __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ, |
837 | 0, 0, NULL); | 844 | 0, 0, NULL); |
838 | } | 845 | } |
839 | } | 846 | } |
@@ -843,7 +850,7 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q) | |||
843 | struct blk_trace *bt = q->blk_trace; | 850 | struct blk_trace *bt = q->blk_trace; |
844 | 851 | ||
845 | if (bt) | 852 | if (bt) |
846 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); | 853 | __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); |
847 | } | 854 | } |
848 | 855 | ||
849 | static void blk_add_trace_unplug(void *ignore, struct request_queue *q, | 856 | static void blk_add_trace_unplug(void *ignore, struct request_queue *q, |
@@ -860,7 +867,7 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q, | |||
860 | else | 867 | else |
861 | what = BLK_TA_UNPLUG_TIMER; | 868 | what = BLK_TA_UNPLUG_TIMER; |
862 | 869 | ||
863 | __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); | 870 | __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); |
864 | } | 871 | } |
865 | } | 872 | } |
866 | 873 | ||
@@ -874,8 +881,9 @@ static void blk_add_trace_split(void *ignore, | |||
874 | __be64 rpdu = cpu_to_be64(pdu); | 881 | __be64 rpdu = cpu_to_be64(pdu); |
875 | 882 | ||
876 | __blk_add_trace(bt, bio->bi_iter.bi_sector, | 883 | __blk_add_trace(bt, bio->bi_iter.bi_sector, |
877 | bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT, | 884 | bio->bi_iter.bi_size, bio_op(bio), bio->bi_rw, |
878 | bio->bi_error, sizeof(rpdu), &rpdu); | 885 | BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu), |
886 | &rpdu); | ||
879 | } | 887 | } |
880 | } | 888 | } |
881 | 889 | ||
@@ -907,7 +915,7 @@ static void blk_add_trace_bio_remap(void *ignore, | |||
907 | r.sector_from = cpu_to_be64(from); | 915 | r.sector_from = cpu_to_be64(from); |
908 | 916 | ||
909 | __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, | 917 | __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, |
910 | bio->bi_rw, BLK_TA_REMAP, bio->bi_error, | 918 | bio_op(bio), bio->bi_rw, BLK_TA_REMAP, bio->bi_error, |
911 | sizeof(r), &r); | 919 | sizeof(r), &r); |
912 | } | 920 | } |
913 | 921 | ||
@@ -940,7 +948,7 @@ static void blk_add_trace_rq_remap(void *ignore, | |||
940 | r.sector_from = cpu_to_be64(from); | 948 | r.sector_from = cpu_to_be64(from); |
941 | 949 | ||
942 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), | 950 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), |
943 | rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors, | 951 | rq_data_dir(rq), 0, BLK_TA_REMAP, !!rq->errors, |
944 | sizeof(r), &r); | 952 | sizeof(r), &r); |
945 | } | 953 | } |
946 | 954 | ||
@@ -965,10 +973,10 @@ void blk_add_driver_data(struct request_queue *q, | |||
965 | return; | 973 | return; |
966 | 974 | ||
967 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) | 975 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) |
968 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, | 976 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, 0, |
969 | BLK_TA_DRV_DATA, rq->errors, len, data); | 977 | BLK_TA_DRV_DATA, rq->errors, len, data); |
970 | else | 978 | else |
971 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, | 979 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, 0, |
972 | BLK_TA_DRV_DATA, rq->errors, len, data); | 980 | BLK_TA_DRV_DATA, rq->errors, len, data); |
973 | } | 981 | } |
974 | EXPORT_SYMBOL_GPL(blk_add_driver_data); | 982 | EXPORT_SYMBOL_GPL(blk_add_driver_data); |
@@ -1769,21 +1777,30 @@ void blk_dump_cmd(char *buf, struct request *rq) | |||
1769 | } | 1777 | } |
1770 | } | 1778 | } |
1771 | 1779 | ||
1772 | void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | 1780 | void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes) |
1773 | { | 1781 | { |
1774 | int i = 0; | 1782 | int i = 0; |
1775 | 1783 | ||
1776 | if (rw & REQ_FLUSH) | 1784 | if (rw & REQ_PREFLUSH) |
1777 | rwbs[i++] = 'F'; | 1785 | rwbs[i++] = 'F'; |
1778 | 1786 | ||
1779 | if (rw & WRITE) | 1787 | switch (op) { |
1788 | case REQ_OP_WRITE: | ||
1789 | case REQ_OP_WRITE_SAME: | ||
1780 | rwbs[i++] = 'W'; | 1790 | rwbs[i++] = 'W'; |
1781 | else if (rw & REQ_DISCARD) | 1791 | break; |
1792 | case REQ_OP_DISCARD: | ||
1782 | rwbs[i++] = 'D'; | 1793 | rwbs[i++] = 'D'; |
1783 | else if (bytes) | 1794 | break; |
1795 | case REQ_OP_FLUSH: | ||
1796 | rwbs[i++] = 'F'; | ||
1797 | break; | ||
1798 | case REQ_OP_READ: | ||
1784 | rwbs[i++] = 'R'; | 1799 | rwbs[i++] = 'R'; |
1785 | else | 1800 | break; |
1801 | default: | ||
1786 | rwbs[i++] = 'N'; | 1802 | rwbs[i++] = 'N'; |
1803 | } | ||
1787 | 1804 | ||
1788 | if (rw & REQ_FUA) | 1805 | if (rw & REQ_FUA) |
1789 | rwbs[i++] = 'F'; | 1806 | rwbs[i++] = 'F'; |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 0cd522753ff5..d67c8288d95d 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -56,37 +56,24 @@ | |||
56 | n = wanted; \ | 56 | n = wanted; \ |
57 | } | 57 | } |
58 | 58 | ||
59 | #define iterate_bvec(i, n, __v, __p, skip, STEP) { \ | 59 | #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \ |
60 | size_t wanted = n; \ | 60 | struct bvec_iter __start; \ |
61 | __p = i->bvec; \ | 61 | __start.bi_size = n; \ |
62 | __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \ | 62 | __start.bi_bvec_done = skip; \ |
63 | if (likely(__v.bv_len)) { \ | 63 | __start.bi_idx = 0; \ |
64 | __v.bv_page = __p->bv_page; \ | 64 | for_each_bvec(__v, i->bvec, __bi, __start) { \ |
65 | __v.bv_offset = __p->bv_offset + skip; \ | 65 | if (!__v.bv_len) \ |
66 | (void)(STEP); \ | ||
67 | skip += __v.bv_len; \ | ||
68 | n -= __v.bv_len; \ | ||
69 | } \ | ||
70 | while (unlikely(n)) { \ | ||
71 | __p++; \ | ||
72 | __v.bv_len = min_t(size_t, n, __p->bv_len); \ | ||
73 | if (unlikely(!__v.bv_len)) \ | ||
74 | continue; \ | 66 | continue; \ |
75 | __v.bv_page = __p->bv_page; \ | ||
76 | __v.bv_offset = __p->bv_offset; \ | ||
77 | (void)(STEP); \ | 67 | (void)(STEP); \ |
78 | skip = __v.bv_len; \ | ||
79 | n -= __v.bv_len; \ | ||
80 | } \ | 68 | } \ |
81 | n = wanted; \ | ||
82 | } | 69 | } |
83 | 70 | ||
84 | #define iterate_all_kinds(i, n, v, I, B, K) { \ | 71 | #define iterate_all_kinds(i, n, v, I, B, K) { \ |
85 | size_t skip = i->iov_offset; \ | 72 | size_t skip = i->iov_offset; \ |
86 | if (unlikely(i->type & ITER_BVEC)) { \ | 73 | if (unlikely(i->type & ITER_BVEC)) { \ |
87 | const struct bio_vec *bvec; \ | ||
88 | struct bio_vec v; \ | 74 | struct bio_vec v; \ |
89 | iterate_bvec(i, n, v, bvec, skip, (B)) \ | 75 | struct bvec_iter __bi; \ |
76 | iterate_bvec(i, n, v, __bi, skip, (B)) \ | ||
90 | } else if (unlikely(i->type & ITER_KVEC)) { \ | 77 | } else if (unlikely(i->type & ITER_KVEC)) { \ |
91 | const struct kvec *kvec; \ | 78 | const struct kvec *kvec; \ |
92 | struct kvec v; \ | 79 | struct kvec v; \ |
@@ -104,15 +91,13 @@ | |||
104 | if (i->count) { \ | 91 | if (i->count) { \ |
105 | size_t skip = i->iov_offset; \ | 92 | size_t skip = i->iov_offset; \ |
106 | if (unlikely(i->type & ITER_BVEC)) { \ | 93 | if (unlikely(i->type & ITER_BVEC)) { \ |
107 | const struct bio_vec *bvec; \ | 94 | const struct bio_vec *bvec = i->bvec; \ |
108 | struct bio_vec v; \ | 95 | struct bio_vec v; \ |
109 | iterate_bvec(i, n, v, bvec, skip, (B)) \ | 96 | struct bvec_iter __bi; \ |
110 | if (skip == bvec->bv_len) { \ | 97 | iterate_bvec(i, n, v, __bi, skip, (B)) \ |
111 | bvec++; \ | 98 | i->bvec = __bvec_iter_bvec(i->bvec, __bi); \ |
112 | skip = 0; \ | 99 | i->nr_segs -= i->bvec - bvec; \ |
113 | } \ | 100 | skip = __bi.bi_bvec_done; \ |
114 | i->nr_segs -= bvec - i->bvec; \ | ||
115 | i->bvec = bvec; \ | ||
116 | } else if (unlikely(i->type & ITER_KVEC)) { \ | 101 | } else if (unlikely(i->type & ITER_KVEC)) { \ |
117 | const struct kvec *kvec; \ | 102 | const struct kvec *kvec; \ |
118 | struct kvec v; \ | 103 | struct kvec v; \ |
diff --git a/mm/page_io.c b/mm/page_io.c index 242dba07545b..dcc5d3769608 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -259,7 +259,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, | |||
259 | bio_end_io_t end_write_func) | 259 | bio_end_io_t end_write_func) |
260 | { | 260 | { |
261 | struct bio *bio; | 261 | struct bio *bio; |
262 | int ret, rw = WRITE; | 262 | int ret; |
263 | struct swap_info_struct *sis = page_swap_info(page); | 263 | struct swap_info_struct *sis = page_swap_info(page); |
264 | 264 | ||
265 | if (sis->flags & SWP_FILE) { | 265 | if (sis->flags & SWP_FILE) { |
@@ -317,12 +317,13 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, | |||
317 | ret = -ENOMEM; | 317 | ret = -ENOMEM; |
318 | goto out; | 318 | goto out; |
319 | } | 319 | } |
320 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
320 | if (wbc->sync_mode == WB_SYNC_ALL) | 321 | if (wbc->sync_mode == WB_SYNC_ALL) |
321 | rw |= REQ_SYNC; | 322 | bio->bi_rw |= REQ_SYNC; |
322 | count_vm_event(PSWPOUT); | 323 | count_vm_event(PSWPOUT); |
323 | set_page_writeback(page); | 324 | set_page_writeback(page); |
324 | unlock_page(page); | 325 | unlock_page(page); |
325 | submit_bio(rw, bio); | 326 | submit_bio(bio); |
326 | out: | 327 | out: |
327 | return ret; | 328 | return ret; |
328 | } | 329 | } |
@@ -369,8 +370,9 @@ int swap_readpage(struct page *page) | |||
369 | ret = -ENOMEM; | 370 | ret = -ENOMEM; |
370 | goto out; | 371 | goto out; |
371 | } | 372 | } |
373 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | ||
372 | count_vm_event(PSWPIN); | 374 | count_vm_event(PSWPIN); |
373 | submit_bio(READ, bio); | 375 | submit_bio(bio); |
374 | out: | 376 | out: |
375 | return ret; | 377 | return ret; |
376 | } | 378 | } |