aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-lib.c')
-rw-r--r--block/blk-lib.c134
1 files changed, 39 insertions, 95 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c
index c392029a104e..78e627e2581d 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -9,17 +9,20 @@
9 9
10#include "blk.h" 10#include "blk.h"
11 11
12static void blkdev_discard_end_io(struct bio *bio, int err) 12struct bio_batch {
13{ 13 atomic_t done;
14 if (err) { 14 unsigned long flags;
15 if (err == -EOPNOTSUPP) 15 struct completion *wait;
16 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 16};
17 clear_bit(BIO_UPTODATE, &bio->bi_flags);
18 }
19 17
20 if (bio->bi_private) 18static void bio_batch_end_io(struct bio *bio, int err)
21 complete(bio->bi_private); 19{
20 struct bio_batch *bb = bio->bi_private;
22 21
22 if (err && (err != -EOPNOTSUPP))
23 clear_bit(BIO_UPTODATE, &bb->flags);
24 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait);
23 bio_put(bio); 26 bio_put(bio);
24} 27}
25 28
@@ -39,9 +42,9 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
39{ 42{
40 DECLARE_COMPLETION_ONSTACK(wait); 43 DECLARE_COMPLETION_ONSTACK(wait);
41 struct request_queue *q = bdev_get_queue(bdev); 44 struct request_queue *q = bdev_get_queue(bdev);
42 int type = flags & BLKDEV_IFL_BARRIER ? 45 int type = REQ_WRITE | REQ_DISCARD;
43 DISCARD_BARRIER : DISCARD_NOBARRIER;
44 unsigned int max_discard_sectors; 46 unsigned int max_discard_sectors;
47 struct bio_batch bb;
45 struct bio *bio; 48 struct bio *bio;
46 int ret = 0; 49 int ret = 0;
47 50
@@ -62,13 +65,17 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
62 max_discard_sectors &= ~(disc_sects - 1); 65 max_discard_sectors &= ~(disc_sects - 1);
63 } 66 }
64 67
65 if (flags & BLKDEV_IFL_SECURE) { 68 if (flags & BLKDEV_DISCARD_SECURE) {
66 if (!blk_queue_secdiscard(q)) 69 if (!blk_queue_secdiscard(q))
67 return -EOPNOTSUPP; 70 return -EOPNOTSUPP;
68 type |= DISCARD_SECURE; 71 type |= REQ_SECURE;
69 } 72 }
70 73
71 while (nr_sects && !ret) { 74 atomic_set(&bb.done, 1);
75 bb.flags = 1 << BIO_UPTODATE;
76 bb.wait = &wait;
77
78 while (nr_sects) {
72 bio = bio_alloc(gfp_mask, 1); 79 bio = bio_alloc(gfp_mask, 1);
73 if (!bio) { 80 if (!bio) {
74 ret = -ENOMEM; 81 ret = -ENOMEM;
@@ -76,10 +83,9 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
76 } 83 }
77 84
78 bio->bi_sector = sector; 85 bio->bi_sector = sector;
79 bio->bi_end_io = blkdev_discard_end_io; 86 bio->bi_end_io = bio_batch_end_io;
80 bio->bi_bdev = bdev; 87 bio->bi_bdev = bdev;
81 if (flags & BLKDEV_IFL_WAIT) 88 bio->bi_private = &bb;
82 bio->bi_private = &wait;
83 89
84 if (nr_sects > max_discard_sectors) { 90 if (nr_sects > max_discard_sectors) {
85 bio->bi_size = max_discard_sectors << 9; 91 bio->bi_size = max_discard_sectors << 9;
@@ -90,85 +96,45 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
90 nr_sects = 0; 96 nr_sects = 0;
91 } 97 }
92 98
93 bio_get(bio); 99 atomic_inc(&bb.done);
94 submit_bio(type, bio); 100 submit_bio(type, bio);
101 }
95 102
96 if (flags & BLKDEV_IFL_WAIT) 103 /* Wait for bios in-flight */
97 wait_for_completion(&wait); 104 if (!atomic_dec_and_test(&bb.done))
105 wait_for_completion(&wait);
98 106
99 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 107 if (!test_bit(BIO_UPTODATE, &bb.flags))
100 ret = -EOPNOTSUPP; 108 ret = -EIO;
101 else if (!bio_flagged(bio, BIO_UPTODATE))
102 ret = -EIO;
103 bio_put(bio);
104 }
105 109
106 return ret; 110 return ret;
107} 111}
108EXPORT_SYMBOL(blkdev_issue_discard); 112EXPORT_SYMBOL(blkdev_issue_discard);
109 113
110struct bio_batch
111{
112 atomic_t done;
113 unsigned long flags;
114 struct completion *wait;
115 bio_end_io_t *end_io;
116};
117
118static void bio_batch_end_io(struct bio *bio, int err)
119{
120 struct bio_batch *bb = bio->bi_private;
121
122 if (err) {
123 if (err == -EOPNOTSUPP)
124 set_bit(BIO_EOPNOTSUPP, &bb->flags);
125 else
126 clear_bit(BIO_UPTODATE, &bb->flags);
127 }
128 if (bb) {
129 if (bb->end_io)
130 bb->end_io(bio, err);
131 atomic_inc(&bb->done);
132 complete(bb->wait);
133 }
134 bio_put(bio);
135}
136
137/** 114/**
138 * blkdev_issue_zeroout generate number of zero filed write bios 115 * blkdev_issue_zeroout - generate number of zero filed write bios
139 * @bdev: blockdev to issue 116 * @bdev: blockdev to issue
140 * @sector: start sector 117 * @sector: start sector
141 * @nr_sects: number of sectors to write 118 * @nr_sects: number of sectors to write
142 * @gfp_mask: memory allocation flags (for bio_alloc) 119 * @gfp_mask: memory allocation flags (for bio_alloc)
143 * @flags: BLKDEV_IFL_* flags to control behaviour
144 * 120 *
145 * Description: 121 * Description:
146 * Generate and issue number of bios with zerofiled pages. 122 * Generate and issue number of bios with zerofiled pages.
147 * Send barrier at the beginning and at the end if requested. This guarantie
148 * correct request ordering. Empty barrier allow us to avoid post queue flush.
149 */ 123 */
150 124
151int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 125int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
152 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 126 sector_t nr_sects, gfp_t gfp_mask)
153{ 127{
154 int ret; 128 int ret;
155 struct bio *bio; 129 struct bio *bio;
156 struct bio_batch bb; 130 struct bio_batch bb;
157 unsigned int sz, issued = 0; 131 unsigned int sz;
158 DECLARE_COMPLETION_ONSTACK(wait); 132 DECLARE_COMPLETION_ONSTACK(wait);
159 133
160 atomic_set(&bb.done, 0); 134 atomic_set(&bb.done, 1);
161 bb.flags = 1 << BIO_UPTODATE; 135 bb.flags = 1 << BIO_UPTODATE;
162 bb.wait = &wait; 136 bb.wait = &wait;
163 bb.end_io = NULL;
164 137
165 if (flags & BLKDEV_IFL_BARRIER) {
166 /* issue async barrier before the data */
167 ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
168 if (ret)
169 return ret;
170 }
171submit:
172 ret = 0; 138 ret = 0;
173 while (nr_sects != 0) { 139 while (nr_sects != 0) {
174 bio = bio_alloc(gfp_mask, 140 bio = bio_alloc(gfp_mask,
@@ -181,14 +147,10 @@ submit:
181 bio->bi_sector = sector; 147 bio->bi_sector = sector;
182 bio->bi_bdev = bdev; 148 bio->bi_bdev = bdev;
183 bio->bi_end_io = bio_batch_end_io; 149 bio->bi_end_io = bio_batch_end_io;
184 if (flags & BLKDEV_IFL_WAIT) 150 bio->bi_private = &bb;
185 bio->bi_private = &bb;
186 151
187 while (nr_sects != 0) { 152 while (nr_sects != 0) {
188 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); 153 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
189 if (sz == 0)
190 /* bio has maximum size possible */
191 break;
192 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); 154 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
193 nr_sects -= ret >> 9; 155 nr_sects -= ret >> 9;
194 sector += ret >> 9; 156 sector += ret >> 9;
@@ -196,36 +158,18 @@ submit:
196 break; 158 break;
197 } 159 }
198 ret = 0; 160 ret = 0;
199 issued++; 161 atomic_inc(&bb.done);
200 submit_bio(WRITE, bio); 162 submit_bio(WRITE, bio);
201 } 163 }
202 /*
203 * When all data bios are in flight. Send final barrier if requeted.
204 */
205 if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
206 ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
207 flags & BLKDEV_IFL_WAIT);
208
209 164
210 if (flags & BLKDEV_IFL_WAIT) 165 /* Wait for bios in-flight */
211 /* Wait for bios in-flight */ 166 if (!atomic_dec_and_test(&bb.done))
212 while ( issued != atomic_read(&bb.done)) 167 wait_for_completion(&wait);
213 wait_for_completion(&wait);
214 168
215 if (!test_bit(BIO_UPTODATE, &bb.flags)) 169 if (!test_bit(BIO_UPTODATE, &bb.flags))
216 /* One of bios in the batch was completed with error.*/ 170 /* One of bios in the batch was completed with error.*/
217 ret = -EIO; 171 ret = -EIO;
218 172
219 if (ret)
220 goto out;
221
222 if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
223 ret = -EOPNOTSUPP;
224 goto out;
225 }
226 if (nr_sects != 0)
227 goto submit;
228out:
229 return ret; 173 return ret;
230} 174}
231EXPORT_SYMBOL(blkdev_issue_zeroout); 175EXPORT_SYMBOL(blkdev_issue_zeroout);