aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorAdrian Bunk <bunk@kernel.org>2008-02-18 07:48:32 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-02-19 04:04:00 -0500
commit86b6c7a7f78feca58d2d8615e53aee4d59ab9dc6 (patch)
tree032a54a3493041fa1c31857d23e9184b89545b78 /fs
parent4c54ac62dceecedd82d4a865017bba0b738e2897 (diff)
fs/block_dev.c: remove #if 0'ed code
Commit b2e895dbd80c420bfc0937c3729b4afe073b3848 #if 0'ed this code stating: <-- snip --> [PATCH] revert blockdev direct io back to 2.6.19 version Andrew Vasquez is reporting as-iosched oopses and a 65% throughput slowdown due to the recent special-casing of direct-io against blockdevs. We don't know why either of these things are occurring. The patch minimally reverts us back to the 2.6.19 code for a 2.6.20 release. <-- snip --> It has since been dead code, and unless someone wants to revive it now it's time to remove it. This patch also makes bio_release_pages() static again and removes the ki_bio_count member from struct kiocb, reverting changes that had been done for this dead code. Signed-off-by: Adrian Bunk <bunk@kernel.org> Signed-off-by: Jens Axboe <axboe@carl.home.kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/bio.c2
-rw-r--r--fs/block_dev.c197
2 files changed, 1 insertions, 198 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 242e409dab4b..3312fcc3c098 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -903,7 +903,7 @@ void bio_set_pages_dirty(struct bio *bio)
903 } 903 }
904} 904}
905 905
906void bio_release_pages(struct bio *bio) 906static void bio_release_pages(struct bio *bio)
907{ 907{
908 struct bio_vec *bvec = bio->bi_io_vec; 908 struct bio_vec *bvec = bio->bi_io_vec;
909 int i; 909 int i;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 8335f0e1b0fb..7d822fae7765 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -173,203 +173,6 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
173 iov, offset, nr_segs, blkdev_get_blocks, NULL); 173 iov, offset, nr_segs, blkdev_get_blocks, NULL);
174} 174}
175 175
176#if 0
177static void blk_end_aio(struct bio *bio, int error)
178{
179 struct kiocb *iocb = bio->bi_private;
180 atomic_t *bio_count = &iocb->ki_bio_count;
181
182 if (bio_data_dir(bio) == READ)
183 bio_check_pages_dirty(bio);
184 else {
185 bio_release_pages(bio);
186 bio_put(bio);
187 }
188
189 /* iocb->ki_nbytes stores error code from LLDD */
190 if (error)
191 iocb->ki_nbytes = -EIO;
192
193 if (atomic_dec_and_test(bio_count)) {
194 if ((long)iocb->ki_nbytes < 0)
195 aio_complete(iocb, iocb->ki_nbytes, 0);
196 else
197 aio_complete(iocb, iocb->ki_left, 0);
198 }
199
200 return 0;
201}
202
203#define VEC_SIZE 16
204struct pvec {
205 unsigned short nr;
206 unsigned short idx;
207 struct page *page[VEC_SIZE];
208};
209
210#define PAGES_SPANNED(addr, len) \
211 (DIV_ROUND_UP((addr) + (len), PAGE_SIZE) - (addr) / PAGE_SIZE);
212
213/*
214 * get page pointer for user addr, we internally cache struct page array for
215 * (addr, count) range in pvec to avoid frequent call to get_user_pages. If
216 * internal page list is exhausted, a batch count of up to VEC_SIZE is used
217 * to get next set of page struct.
218 */
219static struct page *blk_get_page(unsigned long addr, size_t count, int rw,
220 struct pvec *pvec)
221{
222 int ret, nr_pages;
223 if (pvec->idx == pvec->nr) {
224 nr_pages = PAGES_SPANNED(addr, count);
225 nr_pages = min(nr_pages, VEC_SIZE);
226 down_read(&current->mm->mmap_sem);
227 ret = get_user_pages(current, current->mm, addr, nr_pages,
228 rw == READ, 0, pvec->page, NULL);
229 up_read(&current->mm->mmap_sem);
230 if (ret < 0)
231 return ERR_PTR(ret);
232 pvec->nr = ret;
233 pvec->idx = 0;
234 }
235 return pvec->page[pvec->idx++];
236}
237
238/* return a page back to pvec array */
239static void blk_unget_page(struct page *page, struct pvec *pvec)
240{
241 pvec->page[--pvec->idx] = page;
242}
243
244static ssize_t
245blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
246 loff_t pos, unsigned long nr_segs)
247{
248 struct inode *inode = iocb->ki_filp->f_mapping->host;
249 unsigned blkbits = blksize_bits(bdev_hardsect_size(I_BDEV(inode)));
250 unsigned blocksize_mask = (1 << blkbits) - 1;
251 unsigned long seg = 0; /* iov segment iterator */
252 unsigned long nvec; /* number of bio vec needed */
253 unsigned long cur_off; /* offset into current page */
254 unsigned long cur_len; /* I/O len of current page, up to PAGE_SIZE */
255
256 unsigned long addr; /* user iovec address */
257 size_t count; /* user iovec len */
258 size_t nbytes = iocb->ki_nbytes = iocb->ki_left; /* total xfer size */
259 loff_t size; /* size of block device */
260 struct bio *bio;
261 atomic_t *bio_count = &iocb->ki_bio_count;
262 struct page *page;
263 struct pvec pvec;
264
265 pvec.nr = 0;
266 pvec.idx = 0;
267
268 if (pos & blocksize_mask)
269 return -EINVAL;
270
271 size = i_size_read(inode);
272 if (pos + nbytes > size) {
273 nbytes = size - pos;
274 iocb->ki_left = nbytes;
275 }
276
277 /*
278 * check first non-zero iov alignment, the remaining
279 * iov alignment is checked inside bio loop below.
280 */
281 do {
282 addr = (unsigned long) iov[seg].iov_base;
283 count = min(iov[seg].iov_len, nbytes);
284 if (addr & blocksize_mask || count & blocksize_mask)
285 return -EINVAL;
286 } while (!count && ++seg < nr_segs);
287 atomic_set(bio_count, 1);
288
289 while (nbytes) {
290 /* roughly estimate number of bio vec needed */
291 nvec = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
292 nvec = max(nvec, nr_segs - seg);
293 nvec = min(nvec, (unsigned long) BIO_MAX_PAGES);
294
295 /* bio_alloc should not fail with GFP_KERNEL flag */
296 bio = bio_alloc(GFP_KERNEL, nvec);
297 bio->bi_bdev = I_BDEV(inode);
298 bio->bi_end_io = blk_end_aio;
299 bio->bi_private = iocb;
300 bio->bi_sector = pos >> blkbits;
301same_bio:
302 cur_off = addr & ~PAGE_MASK;
303 cur_len = PAGE_SIZE - cur_off;
304 if (count < cur_len)
305 cur_len = count;
306
307 page = blk_get_page(addr, count, rw, &pvec);
308 if (unlikely(IS_ERR(page)))
309 goto backout;
310
311 if (bio_add_page(bio, page, cur_len, cur_off)) {
312 pos += cur_len;
313 addr += cur_len;
314 count -= cur_len;
315 nbytes -= cur_len;
316
317 if (count)
318 goto same_bio;
319 while (++seg < nr_segs) {
320 addr = (unsigned long) iov[seg].iov_base;
321 count = iov[seg].iov_len;
322 if (!count)
323 continue;
324 if (unlikely(addr & blocksize_mask ||
325 count & blocksize_mask)) {
326 page = ERR_PTR(-EINVAL);
327 goto backout;
328 }
329 count = min(count, nbytes);
330 goto same_bio;
331 }
332 } else {
333 blk_unget_page(page, &pvec);
334 }
335
336 /* bio is ready, submit it */
337 if (rw == READ)
338 bio_set_pages_dirty(bio);
339 atomic_inc(bio_count);
340 submit_bio(rw, bio);
341 }
342
343completion:
344 iocb->ki_left -= nbytes;
345 nbytes = iocb->ki_left;
346 iocb->ki_pos += nbytes;
347
348 blk_run_address_space(inode->i_mapping);
349 if (atomic_dec_and_test(bio_count))
350 aio_complete(iocb, nbytes, 0);
351
352 return -EIOCBQUEUED;
353
354backout:
355 /*
356 * back out nbytes count constructed so far for this bio,
357 * we will throw away current bio.
358 */
359 nbytes += bio->bi_size;
360 bio_release_pages(bio);
361 bio_put(bio);
362
363 /*
364 * if no bio was submmitted, return the error code.
365 * otherwise, proceed with pending I/O completion.
366 */
367 if (atomic_read(bio_count) == 1)
368 return PTR_ERR(page);
369 goto completion;
370}
371#endif
372
373static int blkdev_writepage(struct page *page, struct writeback_control *wbc) 176static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
374{ 177{
375 return block_write_full_page(page, blkdev_get_block, wbc); 178 return block_write_full_page(page, blkdev_get_block, wbc);