aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDmitry Monakhov <dmonakhov@openvz.org>2010-04-28 09:55:08 -0400
committerJens Axboe <jens.axboe@oracle.com>2010-04-28 13:47:36 -0400
commitf31e7e4022841c43c53b847b86b1bf97a08b2c94 (patch)
tree738821236a4e84f790d597bb2cc04bd812668b6a /block
parentf17e232e9237c231daf9f0f4b177c61218bcb2e4 (diff)
blkdev: move blkdev_issue helper functions to separate file
Move blkdev_issue_discard from blk-barrier.c because it is not barrier related. Later the file will be populated by other helpers. Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/Makefile2
-rw-r--r--block/blk-barrier.c104
-rw-r--r--block/blk-lib.c114
3 files changed, 115 insertions, 105 deletions
diff --git a/block/Makefile b/block/Makefile
index cb2d515ebd6e..0bb499a739cd 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
5obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ 5obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
6 blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ 6 blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ 7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
8 blk-iopoll.o ioctl.o genhd.o scsi_ioctl.o 8 blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o
9 9
10obj-$(CONFIG_BLK_DEV_BSG) += bsg.o 10obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
11obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o 11obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index f11eec9669e4..0d710c9d403b 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -347,107 +347,3 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
347 return ret; 347 return ret;
348} 348}
349EXPORT_SYMBOL(blkdev_issue_flush); 349EXPORT_SYMBOL(blkdev_issue_flush);
350
351static void blkdev_discard_end_io(struct bio *bio, int err)
352{
353 if (err) {
354 if (err == -EOPNOTSUPP)
355 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
356 clear_bit(BIO_UPTODATE, &bio->bi_flags);
357 }
358
359 if (bio->bi_private)
360 complete(bio->bi_private);
361 __free_page(bio_page(bio));
362
363 bio_put(bio);
364}
365
366/**
367 * blkdev_issue_discard - queue a discard
368 * @bdev: blockdev to issue discard for
369 * @sector: start sector
370 * @nr_sects: number of sectors to discard
371 * @gfp_mask: memory allocation flags (for bio_alloc)
372 * @flags: BLKDEV_IFL_* flags to control behaviour
373 *
374 * Description:
375 * Issue a discard request for the sectors in question.
376 */
377int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
378 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
379{
380 DECLARE_COMPLETION_ONSTACK(wait);
381 struct request_queue *q = bdev_get_queue(bdev);
382 int type = flags & BLKDEV_IFL_BARRIER ?
383 DISCARD_BARRIER : DISCARD_NOBARRIER;
384 struct bio *bio;
385 struct page *page;
386 int ret = 0;
387
388 if (!q)
389 return -ENXIO;
390
391 if (!blk_queue_discard(q))
392 return -EOPNOTSUPP;
393
394 while (nr_sects && !ret) {
395 unsigned int sector_size = q->limits.logical_block_size;
396 unsigned int max_discard_sectors =
397 min(q->limits.max_discard_sectors, UINT_MAX >> 9);
398
399 bio = bio_alloc(gfp_mask, 1);
400 if (!bio)
401 goto out;
402 bio->bi_sector = sector;
403 bio->bi_end_io = blkdev_discard_end_io;
404 bio->bi_bdev = bdev;
405 if (flags & BLKDEV_IFL_WAIT)
406 bio->bi_private = &wait;
407
408 /*
409 * Add a zeroed one-sector payload as that's what
410 * our current implementations need. If we'll ever need
411 * more the interface will need revisiting.
412 */
413 page = alloc_page(gfp_mask | __GFP_ZERO);
414 if (!page)
415 goto out_free_bio;
416 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
417 goto out_free_page;
418
419 /*
420 * And override the bio size - the way discard works we
421 * touch many more blocks on disk than the actual payload
422 * length.
423 */
424 if (nr_sects > max_discard_sectors) {
425 bio->bi_size = max_discard_sectors << 9;
426 nr_sects -= max_discard_sectors;
427 sector += max_discard_sectors;
428 } else {
429 bio->bi_size = nr_sects << 9;
430 nr_sects = 0;
431 }
432
433 bio_get(bio);
434 submit_bio(type, bio);
435
436 if (flags & BLKDEV_IFL_WAIT)
437 wait_for_completion(&wait);
438
439 if (bio_flagged(bio, BIO_EOPNOTSUPP))
440 ret = -EOPNOTSUPP;
441 else if (!bio_flagged(bio, BIO_UPTODATE))
442 ret = -EIO;
443 bio_put(bio);
444 }
445 return ret;
446out_free_page:
447 __free_page(page);
448out_free_bio:
449 bio_put(bio);
450out:
451 return -ENOMEM;
452}
453EXPORT_SYMBOL(blkdev_issue_discard);
diff --git a/block/blk-lib.c b/block/blk-lib.c
new file mode 100644
index 000000000000..0dc438812d81
--- /dev/null
+++ b/block/blk-lib.c
@@ -0,0 +1,114 @@
1/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
12static void blkdev_discard_end_io(struct bio *bio, int err)
13{
14 if (err) {
15 if (err == -EOPNOTSUPP)
16 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
17 clear_bit(BIO_UPTODATE, &bio->bi_flags);
18 }
19
20 if (bio->bi_private)
21 complete(bio->bi_private);
22 __free_page(bio_page(bio));
23
24 bio_put(bio);
25}
26
27/**
28 * blkdev_issue_discard - queue a discard
29 * @bdev: blockdev to issue discard for
30 * @sector: start sector
31 * @nr_sects: number of sectors to discard
32 * @gfp_mask: memory allocation flags (for bio_alloc)
33 * @flags: BLKDEV_IFL_* flags to control behaviour
34 *
35 * Description:
36 * Issue a discard request for the sectors in question.
37 */
38int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
39 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
40{
41 DECLARE_COMPLETION_ONSTACK(wait);
42 struct request_queue *q = bdev_get_queue(bdev);
43 int type = flags & BLKDEV_IFL_BARRIER ?
44 DISCARD_BARRIER : DISCARD_NOBARRIER;
45 struct bio *bio;
46 struct page *page;
47 int ret = 0;
48
49 if (!q)
50 return -ENXIO;
51
52 if (!blk_queue_discard(q))
53 return -EOPNOTSUPP;
54
55 while (nr_sects && !ret) {
56 unsigned int sector_size = q->limits.logical_block_size;
57 unsigned int max_discard_sectors =
58 min(q->limits.max_discard_sectors, UINT_MAX >> 9);
59
60 bio = bio_alloc(gfp_mask, 1);
61 if (!bio)
62 goto out;
63 bio->bi_sector = sector;
64 bio->bi_end_io = blkdev_discard_end_io;
65 bio->bi_bdev = bdev;
66 if (flags & BLKDEV_IFL_WAIT)
67 bio->bi_private = &wait;
68
69 /*
70 * Add a zeroed one-sector payload as that's what
71 * our current implementations need. If we'll ever need
72 * more the interface will need revisiting.
73 */
74 page = alloc_page(gfp_mask | __GFP_ZERO);
75 if (!page)
76 goto out_free_bio;
77 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
78 goto out_free_page;
79
80 /*
81 * And override the bio size - the way discard works we
82 * touch many more blocks on disk than the actual payload
83 * length.
84 */
85 if (nr_sects > max_discard_sectors) {
86 bio->bi_size = max_discard_sectors << 9;
87 nr_sects -= max_discard_sectors;
88 sector += max_discard_sectors;
89 } else {
90 bio->bi_size = nr_sects << 9;
91 nr_sects = 0;
92 }
93
94 bio_get(bio);
95 submit_bio(type, bio);
96
97 if (flags & BLKDEV_IFL_WAIT)
98 wait_for_completion(&wait);
99
100 if (bio_flagged(bio, BIO_EOPNOTSUPP))
101 ret = -EOPNOTSUPP;
102 else if (!bio_flagged(bio, BIO_UPTODATE))
103 ret = -EIO;
104 bio_put(bio);
105 }
106 return ret;
107out_free_page:
108 __free_page(page);
109out_free_bio:
110 bio_put(bio);
111out:
112 return -ENOMEM;
113}
114EXPORT_SYMBOL(blkdev_issue_discard);