aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-09-24 19:26:05 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:51 -0500
commit458b76ed2f9517becb74dcc8eedd70d3068ea6e4 (patch)
treeb01b2150b197e0759b3ba59c0f9367d4477ebb8c
parentd57a5f7c6605f15f3b5134837e68b448a7cea88e (diff)
block: Kill bio_segments()/bi_vcnt usage
When we start sharing biovecs, keeping bi_vcnt accurate for splits is going to be error prone - and unnecessary, if we refactor some code. So bio_segments() has to go - but most of the existing users just needed to know if the bio had multiple segments, which is easier - add a bio_multiple_segments() for them. (Two of the current uses of bio_segments() are going to go away in a couple patches, but the current implementation of bio_segments() is unsafe as soon as we start doing driver conversions for immutable biovecs - so implement a dumb version for bisectability, it'll go away in a couple patches) Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Neil Brown <neilb@suse.de> Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
-rw-r--r--drivers/block/ps3disk.c7
-rw-r--r--drivers/md/bcache/io.c53
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/scsi/libsas/sas_expander.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c10
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--fs/bio.c2
-rw-r--r--include/linux/bio.h81
10 files changed, 94 insertions, 87 deletions
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 1c6edb9a9960..c120d70d3fb3 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -101,10 +101,9 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
101 101
102 rq_for_each_segment(bvec, req, iter) { 102 rq_for_each_segment(bvec, req, iter) {
103 unsigned long flags; 103 unsigned long flags;
104 dev_dbg(&dev->sbd.core, 104 dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
105 "%s:%u: bio %u: %u segs %u sectors from %lu\n", 105 __func__, __LINE__, i, bio_sectors(iter.bio),
106 __func__, __LINE__, i, bio_segments(iter.bio), 106 iter.bio->bi_iter.bi_sector);
107 bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector);
108 107
109 size = bvec.bv_len; 108 size = bvec.bv_len;
110 buf = bvec_kmap_irq(&bvec, &flags); 109 buf = bvec_kmap_irq(&bvec, &flags);
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9b5b6a41a9b6..6e04f3bb0286 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -24,7 +24,8 @@ static void bch_generic_make_request_hack(struct bio *bio)
24 if (bio->bi_iter.bi_idx) { 24 if (bio->bi_iter.bi_idx) {
25 struct bio_vec bv; 25 struct bio_vec bv;
26 struct bvec_iter iter; 26 struct bvec_iter iter;
27 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); 27 unsigned segs = bio_segments(bio);
28 struct bio *clone = bio_alloc(GFP_NOIO, segs);
28 29
29 bio_for_each_segment(bv, bio, iter) 30 bio_for_each_segment(bv, bio, iter)
30 clone->bi_io_vec[clone->bi_vcnt++] = bv; 31 clone->bi_io_vec[clone->bi_vcnt++] = bv;
@@ -32,7 +33,7 @@ static void bch_generic_make_request_hack(struct bio *bio)
32 clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; 33 clone->bi_iter.bi_sector = bio->bi_iter.bi_sector;
33 clone->bi_bdev = bio->bi_bdev; 34 clone->bi_bdev = bio->bi_bdev;
34 clone->bi_rw = bio->bi_rw; 35 clone->bi_rw = bio->bi_rw;
35 clone->bi_vcnt = bio_segments(bio); 36 clone->bi_vcnt = segs;
36 clone->bi_iter.bi_size = bio->bi_iter.bi_size; 37 clone->bi_iter.bi_size = bio->bi_iter.bi_size;
37 38
38 clone->bi_private = bio; 39 clone->bi_private = bio;
@@ -133,40 +134,32 @@ out:
133 134
134static unsigned bch_bio_max_sectors(struct bio *bio) 135static unsigned bch_bio_max_sectors(struct bio *bio)
135{ 136{
136 unsigned ret = bio_sectors(bio);
137 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 137 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
138 unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, 138 struct bio_vec bv;
139 queue_max_segments(q)); 139 struct bvec_iter iter;
140 unsigned ret = 0, seg = 0;
140 141
141 if (bio->bi_rw & REQ_DISCARD) 142 if (bio->bi_rw & REQ_DISCARD)
142 return min(ret, q->limits.max_discard_sectors); 143 return min(bio_sectors(bio), q->limits.max_discard_sectors);
143
144 if (bio_segments(bio) > max_segments ||
145 q->merge_bvec_fn) {
146 struct bio_vec bv;
147 struct bvec_iter iter;
148 unsigned seg = 0;
149
150 ret = 0;
151 144
152 bio_for_each_segment(bv, bio, iter) { 145 bio_for_each_segment(bv, bio, iter) {
153 struct bvec_merge_data bvm = { 146 struct bvec_merge_data bvm = {
154 .bi_bdev = bio->bi_bdev, 147 .bi_bdev = bio->bi_bdev,
155 .bi_sector = bio->bi_iter.bi_sector, 148 .bi_sector = bio->bi_iter.bi_sector,
156 .bi_size = ret << 9, 149 .bi_size = ret << 9,
157 .bi_rw = bio->bi_rw, 150 .bi_rw = bio->bi_rw,
158 }; 151 };
159 152
160 if (seg == max_segments) 153 if (seg == min_t(unsigned, BIO_MAX_PAGES,
161 break; 154 queue_max_segments(q)))
155 break;
162 156
163 if (q->merge_bvec_fn && 157 if (q->merge_bvec_fn &&
164 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) 158 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
165 break; 159 break;
166 160
167 seg++; 161 seg++;
168 ret += bv.bv_len >> 9; 162 ret += bv.bv_len >> 9;
169 }
170 } 163 }
171 164
172 ret = min(ret, queue_max_sectors(q)); 165 ret = min(ret, queue_max_sectors(q));
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index e38d1d3226f3..8ee1a6c658b4 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -528,7 +528,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
528 sector_t sector = bio->bi_iter.bi_sector; 528 sector_t sector = bio->bi_iter.bi_sector;
529 struct bio_pair *bp; 529 struct bio_pair *bp;
530 /* Sanity check -- queue functions should prevent this happening */ 530 /* Sanity check -- queue functions should prevent this happening */
531 if (bio_segments(bio) > 1) 531 if (bio_multiple_segments(bio))
532 goto bad_map; 532 goto bad_map;
533 /* This is a one page bio that upper layers 533 /* This is a one page bio that upper layers
534 * refuse to split for us, so we need to split it. 534 * refuse to split for us, so we need to split it.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index dbf3b63c2754..ac4bfa438c57 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1188,7 +1188,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1188 || conf->prev.near_copies < conf->prev.raid_disks))) { 1188 || conf->prev.near_copies < conf->prev.raid_disks))) {
1189 struct bio_pair *bp; 1189 struct bio_pair *bp;
1190 /* Sanity check -- queue functions should prevent this happening */ 1190 /* Sanity check -- queue functions should prevent this happening */
1191 if (bio_segments(bio) > 1) 1191 if (bio_multiple_segments(bio))
1192 goto bad_map; 1192 goto bad_map;
1193 /* This is a one page bio that upper layers 1193 /* This is a one page bio that upper layers
1194 * refuse to split for us, so we need to split it. 1194 * refuse to split for us, so we need to split it.
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index dd239bdbfcb4..00d339c361fc 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2235,10 +2235,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2235 } 2235 }
2236 2236
2237 /* do we need to support multiple segments? */ 2237 /* do we need to support multiple segments? */
2238 if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { 2238 if (bio_multiple_segments(req->bio) ||
2239 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 2239 bio_multiple_segments(rsp->bio)) {
2240 ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req), 2240 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n",
2241 bio_segments(rsp->bio), blk_rq_bytes(rsp)); 2241 ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
2242 return -EINVAL; 2242 return -EINVAL;
2243 } 2243 }
2244 2244
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 446b85110a1f..0cac7d8fd0f7 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2163,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2163 } 2163 }
2164 2164
2165 /* do we need to support multiple segments? */ 2165 /* do we need to support multiple segments? */
2166 if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { 2166 if (bio_multiple_segments(req->bio) ||
2167 printk("%s: multiple segments req %u %u, rsp %u %u\n", 2167 bio_multiple_segments(rsp->bio)) {
2168 __func__, bio_segments(req->bio), blk_rq_bytes(req), 2168 printk("%s: multiple segments req %u, rsp %u\n",
2169 bio_segments(rsp->bio), blk_rq_bytes(rsp)); 2169 __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
2170 return -EINVAL; 2170 return -EINVAL;
2171 } 2171 }
2172 2172
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 7143e86af326..410f4a3e8888 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1943,7 +1943,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1943 ioc->transport_cmds.status = MPT2_CMD_PENDING; 1943 ioc->transport_cmds.status = MPT2_CMD_PENDING;
1944 1944
1945 /* Check if the request is split across multiple segments */ 1945 /* Check if the request is split across multiple segments */
1946 if (bio_segments(req->bio) > 1) { 1946 if (bio_multiple_segments(req->bio)) {
1947 u32 offset = 0; 1947 u32 offset = 0;
1948 1948
1949 /* Allocate memory and copy the request */ 1949 /* Allocate memory and copy the request */
@@ -1975,7 +1975,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1975 1975
1976 /* Check if the response needs to be populated across 1976 /* Check if the response needs to be populated across
1977 * multiple segments */ 1977 * multiple segments */
1978 if (bio_segments(rsp->bio) > 1) { 1978 if (bio_multiple_segments(rsp->bio)) {
1979 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), 1979 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
1980 &pci_dma_in); 1980 &pci_dma_in);
1981 if (!pci_addr_in) { 1981 if (!pci_addr_in) {
@@ -2042,7 +2042,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2042 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2042 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2043 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 2043 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
2044 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2044 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2045 if (bio_segments(req->bio) > 1) { 2045 if (bio_multiple_segments(req->bio)) {
2046 ioc->base_add_sg_single(psge, sgl_flags | 2046 ioc->base_add_sg_single(psge, sgl_flags |
2047 (blk_rq_bytes(req) - 4), pci_dma_out); 2047 (blk_rq_bytes(req) - 4), pci_dma_out);
2048 } else { 2048 } else {
@@ -2058,7 +2058,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2058 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 2058 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2059 MPI2_SGE_FLAGS_END_OF_LIST); 2059 MPI2_SGE_FLAGS_END_OF_LIST);
2060 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2060 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2061 if (bio_segments(rsp->bio) > 1) { 2061 if (bio_multiple_segments(rsp->bio)) {
2062 ioc->base_add_sg_single(psge, sgl_flags | 2062 ioc->base_add_sg_single(psge, sgl_flags |
2063 (blk_rq_bytes(rsp) + 4), pci_dma_in); 2063 (blk_rq_bytes(rsp) + 4), pci_dma_in);
2064 } else { 2064 } else {
@@ -2103,7 +2103,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2103 le16_to_cpu(mpi_reply->ResponseDataLength); 2103 le16_to_cpu(mpi_reply->ResponseDataLength);
2104 /* check if the resp needs to be copied from the allocated 2104 /* check if the resp needs to be copied from the allocated
2105 * pci mem */ 2105 * pci mem */
2106 if (bio_segments(rsp->bio) > 1) { 2106 if (bio_multiple_segments(rsp->bio)) {
2107 u32 offset = 0; 2107 u32 offset = 0;
2108 u32 bytes_to_copy = 2108 u32 bytes_to_copy =
2109 le16_to_cpu(mpi_reply->ResponseDataLength); 2109 le16_to_cpu(mpi_reply->ResponseDataLength);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 196a67f2e95f..65170cb1a00f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1926,7 +1926,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1926 ioc->transport_cmds.status = MPT3_CMD_PENDING; 1926 ioc->transport_cmds.status = MPT3_CMD_PENDING;
1927 1927
1928 /* Check if the request is split across multiple segments */ 1928 /* Check if the request is split across multiple segments */
1929 if (req->bio->bi_vcnt > 1) { 1929 if (bio_multiple_segments(req->bio)) {
1930 u32 offset = 0; 1930 u32 offset = 0;
1931 1931
1932 /* Allocate memory and copy the request */ 1932 /* Allocate memory and copy the request */
@@ -1958,7 +1958,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1958 1958
1959 /* Check if the response needs to be populated across 1959 /* Check if the response needs to be populated across
1960 * multiple segments */ 1960 * multiple segments */
1961 if (rsp->bio->bi_vcnt > 1) { 1961 if (bio_multiple_segments(rsp->bio)) {
1962 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), 1962 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
1963 &pci_dma_in); 1963 &pci_dma_in);
1964 if (!pci_addr_in) { 1964 if (!pci_addr_in) {
@@ -2019,7 +2019,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2019 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); 2019 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
2020 psge = &mpi_request->SGL; 2020 psge = &mpi_request->SGL;
2021 2021
2022 if (req->bio->bi_vcnt > 1) 2022 if (bio_multiple_segments(req->bio))
2023 ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4), 2023 ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
2024 pci_dma_in, (blk_rq_bytes(rsp) + 4)); 2024 pci_dma_in, (blk_rq_bytes(rsp) + 4));
2025 else 2025 else
@@ -2064,7 +2064,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2064 2064
2065 /* check if the resp needs to be copied from the allocated 2065 /* check if the resp needs to be copied from the allocated
2066 * pci mem */ 2066 * pci mem */
2067 if (rsp->bio->bi_vcnt > 1) { 2067 if (bio_multiple_segments(rsp->bio)) {
2068 u32 offset = 0; 2068 u32 offset = 0;
2069 u32 bytes_to_copy = 2069 u32 bytes_to_copy =
2070 le16_to_cpu(mpi_reply->ResponseDataLength); 2070 le16_to_cpu(mpi_reply->ResponseDataLength);
diff --git a/fs/bio.c b/fs/bio.c
index f61e59b38815..e32f2ffc3f33 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1733,7 +1733,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1733 trace_block_split(bdev_get_queue(bi->bi_bdev), bi, 1733 trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
1734 bi->bi_iter.bi_sector + first_sectors); 1734 bi->bi_iter.bi_sector + first_sectors);
1735 1735
1736 BUG_ON(bio_segments(bi) > 1); 1736 BUG_ON(bio_multiple_segments(bi));
1737 atomic_set(&bp->cnt, 3); 1737 atomic_set(&bp->cnt, 3);
1738 bp->error = 0; 1738 bp->error = 0;
1739 bp->bio1 = *bi; 1739 bp->bio1 = *bi;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 930cb73c894b..aea9896a6289 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -97,13 +97,46 @@
97#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 97#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
98#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 98#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
99 99
100#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_iter.bi_idx) 100#define bio_multiple_segments(bio) \
101 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
101#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) 102#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
102#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) 103#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
103 104
105/*
106 * Check whether this bio carries any data or not. A NULL bio is allowed.
107 */
108static inline bool bio_has_data(struct bio *bio)
109{
110 if (bio &&
111 bio->bi_iter.bi_size &&
112 !(bio->bi_rw & REQ_DISCARD))
113 return true;
114
115 return false;
116}
117
118static inline bool bio_is_rw(struct bio *bio)
119{
120 if (!bio_has_data(bio))
121 return false;
122
123 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
124 return false;
125
126 return true;
127}
128
129static inline bool bio_mergeable(struct bio *bio)
130{
131 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
132 return false;
133
134 return true;
135}
136
104static inline unsigned int bio_cur_bytes(struct bio *bio) 137static inline unsigned int bio_cur_bytes(struct bio *bio)
105{ 138{
106 if (bio->bi_vcnt) 139 if (bio_has_data(bio))
107 return bio_iovec(bio).bv_len; 140 return bio_iovec(bio).bv_len;
108 else /* dataless requests such as discard */ 141 else /* dataless requests such as discard */
109 return bio->bi_iter.bi_size; 142 return bio->bi_iter.bi_size;
@@ -111,7 +144,7 @@ static inline unsigned int bio_cur_bytes(struct bio *bio)
111 144
112static inline void *bio_data(struct bio *bio) 145static inline void *bio_data(struct bio *bio)
113{ 146{
114 if (bio->bi_vcnt) 147 if (bio_has_data(bio))
115 return page_address(bio_page(bio)) + bio_offset(bio); 148 return page_address(bio_page(bio)) + bio_offset(bio);
116 149
117 return NULL; 150 return NULL;
@@ -221,6 +254,18 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
221 254
222#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 255#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
223 256
257static inline unsigned bio_segments(struct bio *bio)
258{
259 unsigned segs = 0;
260 struct bio_vec bv;
261 struct bvec_iter iter;
262
263 bio_for_each_segment(bv, bio, iter)
264 segs++;
265
266 return segs;
267}
268
224/* 269/*
225 * get a reference to a bio, so it won't disappear. the intended use is 270 * get a reference to a bio, so it won't disappear. the intended use is
226 * something like: 271 * something like:
@@ -435,36 +480,6 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
435#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 480#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
436 481
437/* 482/*
438 * Check whether this bio carries any data or not. A NULL bio is allowed.
439 */
440static inline bool bio_has_data(struct bio *bio)
441{
442 if (bio && bio->bi_vcnt)
443 return true;
444
445 return false;
446}
447
448static inline bool bio_is_rw(struct bio *bio)
449{
450 if (!bio_has_data(bio))
451 return false;
452
453 if (bio->bi_rw & REQ_WRITE_SAME)
454 return false;
455
456 return true;
457}
458
459static inline bool bio_mergeable(struct bio *bio)
460{
461 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
462 return false;
463
464 return true;
465}
466
467/*
468 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 483 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
469 * 484 *
470 * A bio_list anchors a singly-linked list of bios chained through the bi_next 485 * A bio_list anchors a singly-linked list of bios chained through the bi_next