aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2010-06-18 10:59:42 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-08-07 12:23:08 -0400
commit66ac0280197981f88774e74b60c8e5f9f07c1dba (patch)
treed093ce493146779926df88b5831805c6f9ee14e1 /block
parent082439004b31adc146e96e5f1c574dd2b57dcd93 (diff)
block: don't allocate a payload for discard request
Allocating a fixed payload for discard requests always was a horrible hack, and it's not coming to byte us when adding support for discard in DM/MD. So change the code to leave the allocation of a payload to the lowlevel driver. Unfortunately that means we'll need another hack, which allows us to update the various block layer length fields indicating that we have a payload. Instead of hiding this in sd.c, which we already partially do for UNMAP support add a documented helper in the core block layer for it. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c32
-rw-r--r--block/blk-lib.c33
2 files changed, 38 insertions, 27 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 66c3cfe94d0..3531d8e1da0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1135,6 +1135,38 @@ void blk_put_request(struct request *req)
1135} 1135}
1136EXPORT_SYMBOL(blk_put_request); 1136EXPORT_SYMBOL(blk_put_request);
1137 1137
1138/**
1139 * blk_add_request_payload - add a payload to a request
1140 * @rq: request to update
1141 * @page: page backing the payload
1142 * @len: length of the payload.
1143 *
1144 * This allows to later add a payload to an already submitted request by
1145 * a block driver. The driver needs to take care of freeing the payload
1146 * itself.
1147 *
1148 * Note that this is a quite horrible hack and nothing but handling of
1149 * discard requests should ever use it.
1150 */
1151void blk_add_request_payload(struct request *rq, struct page *page,
1152 unsigned int len)
1153{
1154 struct bio *bio = rq->bio;
1155
1156 bio->bi_io_vec->bv_page = page;
1157 bio->bi_io_vec->bv_offset = 0;
1158 bio->bi_io_vec->bv_len = len;
1159
1160 bio->bi_size = len;
1161 bio->bi_vcnt = 1;
1162 bio->bi_phys_segments = 1;
1163
1164 rq->__data_len = rq->resid_len = len;
1165 rq->nr_phys_segments = 1;
1166 rq->buffer = bio_data(bio);
1167}
1168EXPORT_SYMBOL_GPL(blk_add_request_payload);
1169
1138void init_request_from_bio(struct request *req, struct bio *bio) 1170void init_request_from_bio(struct request *req, struct bio *bio)
1139{ 1171{
1140 req->cpu = bio->bi_comp_cpu; 1172 req->cpu = bio->bi_comp_cpu;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index d0216b9f22d..e16185b0d8e 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -19,7 +19,6 @@ static void blkdev_discard_end_io(struct bio *bio, int err)
19 19
20 if (bio->bi_private) 20 if (bio->bi_private)
21 complete(bio->bi_private); 21 complete(bio->bi_private);
22 __free_page(bio_page(bio));
23 22
24 bio_put(bio); 23 bio_put(bio);
25} 24}
@@ -43,7 +42,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
43 int type = flags & BLKDEV_IFL_BARRIER ? 42 int type = flags & BLKDEV_IFL_BARRIER ?
44 DISCARD_BARRIER : DISCARD_NOBARRIER; 43 DISCARD_BARRIER : DISCARD_NOBARRIER;
45 struct bio *bio; 44 struct bio *bio;
46 struct page *page;
47 int ret = 0; 45 int ret = 0;
48 46
49 if (!q) 47 if (!q)
@@ -53,35 +51,21 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
53 return -EOPNOTSUPP; 51 return -EOPNOTSUPP;
54 52
55 while (nr_sects && !ret) { 53 while (nr_sects && !ret) {
56 unsigned int sector_size = q->limits.logical_block_size;
57 unsigned int max_discard_sectors = 54 unsigned int max_discard_sectors =
58 min(q->limits.max_discard_sectors, UINT_MAX >> 9); 55 min(q->limits.max_discard_sectors, UINT_MAX >> 9);
59 56
60 bio = bio_alloc(gfp_mask, 1); 57 bio = bio_alloc(gfp_mask, 1);
61 if (!bio) 58 if (!bio) {
62 goto out; 59 ret = -ENOMEM;
60 break;
61 }
62
63 bio->bi_sector = sector; 63 bio->bi_sector = sector;
64 bio->bi_end_io = blkdev_discard_end_io; 64 bio->bi_end_io = blkdev_discard_end_io;
65 bio->bi_bdev = bdev; 65 bio->bi_bdev = bdev;
66 if (flags & BLKDEV_IFL_WAIT) 66 if (flags & BLKDEV_IFL_WAIT)
67 bio->bi_private = &wait; 67 bio->bi_private = &wait;
68 68
69 /*
70 * Add a zeroed one-sector payload as that's what
71 * our current implementations need. If we'll ever need
72 * more the interface will need revisiting.
73 */
74 page = alloc_page(gfp_mask | __GFP_ZERO);
75 if (!page)
76 goto out_free_bio;
77 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
78 goto out_free_page;
79
80 /*
81 * And override the bio size - the way discard works we
82 * touch many more blocks on disk than the actual payload
83 * length.
84 */
85 if (nr_sects > max_discard_sectors) { 69 if (nr_sects > max_discard_sectors) {
86 bio->bi_size = max_discard_sectors << 9; 70 bio->bi_size = max_discard_sectors << 9;
87 nr_sects -= max_discard_sectors; 71 nr_sects -= max_discard_sectors;
@@ -103,13 +87,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
103 ret = -EIO; 87 ret = -EIO;
104 bio_put(bio); 88 bio_put(bio);
105 } 89 }
90
106 return ret; 91 return ret;
107out_free_page:
108 __free_page(page);
109out_free_bio:
110 bio_put(bio);
111out:
112 return -ENOMEM;
113} 92}
114EXPORT_SYMBOL(blkdev_issue_discard); 93EXPORT_SYMBOL(blkdev_issue_discard);
115 94