aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-barrier.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2009-09-30 07:52:12 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-10-01 15:15:46 -0400
commit1122a26f2abe4245ccdaed95ec23f63fe086b332 (patch)
treead06f119f283cf8a6313681055e8132ba2851ddb /block/blk-barrier.c
parent3bd0f0c763e497c8674b28e3df2732f48683dabd (diff)
block: use normal I/O path for discard requests
prepare_discard_fn() was being called in a place where memory allocation was effectively impossible. This makes it inappropriate for all but the most trivial translations of Linux's DISCARD operation to the block command set. Additionally adding a payload there makes the ownership of the bio backing unclear as it's now allocated by the device driver and not the submitter as usual. It is replaced with QUEUE_FLAG_DISCARD which is used to indicate whether the queue supports discard operations or not. blkdev_issue_discard now allocates a one-page, sector-length payload which is the right thing for the common ATA and SCSI implementations. The mtd implementation of prepare_discard_fn() is replaced with simply checking for the request being a discard. Largely based on a previous patch from Matthew Wilcox <matthew@wil.cx> which did the prepare_discard_fn but not the different payload allocation yet. Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'block/blk-barrier.c')
-rw-r--r--block/blk-barrier.c35
1 files changed, 30 insertions, 5 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 6593ab39cfe9..21f5025c3945 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -350,6 +350,7 @@ static void blkdev_discard_end_io(struct bio *bio, int err)
350 350
351 if (bio->bi_private) 351 if (bio->bi_private)
352 complete(bio->bi_private); 352 complete(bio->bi_private);
353 __free_page(bio_page(bio));
353 354
354 bio_put(bio); 355 bio_put(bio);
355} 356}
@@ -372,26 +373,44 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
372 struct request_queue *q = bdev_get_queue(bdev); 373 struct request_queue *q = bdev_get_queue(bdev);
373 int type = flags & DISCARD_FL_BARRIER ? 374 int type = flags & DISCARD_FL_BARRIER ?
374 DISCARD_BARRIER : DISCARD_NOBARRIER; 375 DISCARD_BARRIER : DISCARD_NOBARRIER;
376 struct bio *bio;
377 struct page *page;
375 int ret = 0; 378 int ret = 0;
376 379
377 if (!q) 380 if (!q)
378 return -ENXIO; 381 return -ENXIO;
379 382
380 if (!q->prepare_discard_fn) 383 if (!blk_queue_discard(q))
381 return -EOPNOTSUPP; 384 return -EOPNOTSUPP;
382 385
383 while (nr_sects && !ret) { 386 while (nr_sects && !ret) {
384 struct bio *bio = bio_alloc(gfp_mask, 0); 387 unsigned int sector_size = q->limits.logical_block_size;
385 if (!bio)
386 return -ENOMEM;
387 388
389 bio = bio_alloc(gfp_mask, 1);
390 if (!bio)
391 goto out;
392 bio->bi_sector = sector;
388 bio->bi_end_io = blkdev_discard_end_io; 393 bio->bi_end_io = blkdev_discard_end_io;
389 bio->bi_bdev = bdev; 394 bio->bi_bdev = bdev;
390 if (flags & DISCARD_FL_WAIT) 395 if (flags & DISCARD_FL_WAIT)
391 bio->bi_private = &wait; 396 bio->bi_private = &wait;
392 397
393 bio->bi_sector = sector; 398 /*
399 * Add a zeroed one-sector payload as that's what
400 * our current implementations need. If we'll ever need
401 * more the interface will need revisiting.
402 */
403 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
404 if (!page)
405 goto out_free_bio;
406 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
407 goto out_free_page;
394 408
409 /*
410 * And override the bio size - the way discard works we
411 * touch many more blocks on disk than the actual payload
412 * length.
413 */
395 if (nr_sects > queue_max_hw_sectors(q)) { 414 if (nr_sects > queue_max_hw_sectors(q)) {
396 bio->bi_size = queue_max_hw_sectors(q) << 9; 415 bio->bi_size = queue_max_hw_sectors(q) << 9;
397 nr_sects -= queue_max_hw_sectors(q); 416 nr_sects -= queue_max_hw_sectors(q);
@@ -414,5 +433,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
414 bio_put(bio); 433 bio_put(bio);
415 } 434 }
416 return ret; 435 return ret;
436out_free_page:
437 __free_page(page);
438out_free_bio:
439 bio_put(bio);
440out:
441 return -ENOMEM;
417} 442}
418EXPORT_SYMBOL(blkdev_issue_discard); 443EXPORT_SYMBOL(blkdev_issue_discard);