aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-barrier.c
diff options
context:
space:
mode:
authorDmitry Monakhov <dmonakhov@openvz.org>2010-04-28 09:55:08 -0400
committerJens Axboe <jens.axboe@oracle.com>2010-04-28 13:47:36 -0400
commitf31e7e4022841c43c53b847b86b1bf97a08b2c94 (patch)
tree738821236a4e84f790d597bb2cc04bd812668b6a /block/blk-barrier.c
parentf17e232e9237c231daf9f0f4b177c61218bcb2e4 (diff)
blkdev: move blkdev_issue helper functions to separate file
Move blkdev_issue_discard from blk-barrier.c because it is not barrier related. Later the file will be populated by other helpers. Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-barrier.c')
-rw-r--r--block/blk-barrier.c104
1 files changed, 0 insertions, 104 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index f11eec9669e4..0d710c9d403b 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -347,107 +347,3 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
347 return ret; 347 return ret;
348} 348}
349EXPORT_SYMBOL(blkdev_issue_flush); 349EXPORT_SYMBOL(blkdev_issue_flush);
350
351static void blkdev_discard_end_io(struct bio *bio, int err)
352{
353 if (err) {
354 if (err == -EOPNOTSUPP)
355 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
356 clear_bit(BIO_UPTODATE, &bio->bi_flags);
357 }
358
359 if (bio->bi_private)
360 complete(bio->bi_private);
361 __free_page(bio_page(bio));
362
363 bio_put(bio);
364}
365
366/**
367 * blkdev_issue_discard - queue a discard
368 * @bdev: blockdev to issue discard for
369 * @sector: start sector
370 * @nr_sects: number of sectors to discard
371 * @gfp_mask: memory allocation flags (for bio_alloc)
372 * @flags: BLKDEV_IFL_* flags to control behaviour
373 *
374 * Description:
375 * Issue a discard request for the sectors in question.
376 */
377int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
378 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
379{
380 DECLARE_COMPLETION_ONSTACK(wait);
381 struct request_queue *q = bdev_get_queue(bdev);
382 int type = flags & BLKDEV_IFL_BARRIER ?
383 DISCARD_BARRIER : DISCARD_NOBARRIER;
384 struct bio *bio;
385 struct page *page;
386 int ret = 0;
387
388 if (!q)
389 return -ENXIO;
390
391 if (!blk_queue_discard(q))
392 return -EOPNOTSUPP;
393
394 while (nr_sects && !ret) {
395 unsigned int sector_size = q->limits.logical_block_size;
396 unsigned int max_discard_sectors =
397 min(q->limits.max_discard_sectors, UINT_MAX >> 9);
398
399 bio = bio_alloc(gfp_mask, 1);
400 if (!bio)
401 goto out;
402 bio->bi_sector = sector;
403 bio->bi_end_io = blkdev_discard_end_io;
404 bio->bi_bdev = bdev;
405 if (flags & BLKDEV_IFL_WAIT)
406 bio->bi_private = &wait;
407
408 /*
409 * Add a zeroed one-sector payload as that's what
410 * our current implementations need. If we'll ever need
411 * more the interface will need revisiting.
412 */
413 page = alloc_page(gfp_mask | __GFP_ZERO);
414 if (!page)
415 goto out_free_bio;
416 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
417 goto out_free_page;
418
419 /*
420 * And override the bio size - the way discard works we
421 * touch many more blocks on disk than the actual payload
422 * length.
423 */
424 if (nr_sects > max_discard_sectors) {
425 bio->bi_size = max_discard_sectors << 9;
426 nr_sects -= max_discard_sectors;
427 sector += max_discard_sectors;
428 } else {
429 bio->bi_size = nr_sects << 9;
430 nr_sects = 0;
431 }
432
433 bio_get(bio);
434 submit_bio(type, bio);
435
436 if (flags & BLKDEV_IFL_WAIT)
437 wait_for_completion(&wait);
438
439 if (bio_flagged(bio, BIO_EOPNOTSUPP))
440 ret = -EOPNOTSUPP;
441 else if (!bio_flagged(bio, BIO_UPTODATE))
442 ret = -EIO;
443 bio_put(bio);
444 }
445 return ret;
446out_free_page:
447 __free_page(page);
448out_free_bio:
449 bio_put(bio);
450out:
451 return -ENOMEM;
452}
453EXPORT_SYMBOL(blkdev_issue_discard);