diff options
author | Jens Axboe <axboe@suse.de> | 2005-06-20 08:06:01 -0400 |
---|---|---|
committer | Jens Axboe <axboe@suse.de> | 2005-06-20 08:06:01 -0400 |
commit | dd1cab95f356f1395278633565f198463cf6bd24 (patch) | |
tree | ddf12e2fad7c0df0656a10ee6aac3f12a04dbed8 /drivers/block | |
parent | b823825e8e09aac6dc1ca362cd5639a87329d636 (diff) |
[PATCH] Cleanup blk_rq_map_* interfaces
Change the blk_rq_map_user() and blk_rq_map_kern() interface to require
a previously allocated request to be passed in. This is both more efficient
for multiple iterations of mapping data to the same request, and it is also
a much nicer API.
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/ll_rw_blk.c | 68 | ||||
-rw-r--r-- | drivers/block/scsi_ioctl.c | 23 |
2 files changed, 41 insertions, 50 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 1471aca6fa18..42c4f3651cf8 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c | |||
@@ -2107,21 +2107,19 @@ EXPORT_SYMBOL(blk_insert_request); | |||
2107 | * original bio must be passed back in to blk_rq_unmap_user() for proper | 2107 | * original bio must be passed back in to blk_rq_unmap_user() for proper |
2108 | * unmapping. | 2108 | * unmapping. |
2109 | */ | 2109 | */ |
2110 | struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, | 2110 | int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, |
2111 | unsigned int len) | 2111 | unsigned int len) |
2112 | { | 2112 | { |
2113 | unsigned long uaddr; | 2113 | unsigned long uaddr; |
2114 | struct request *rq; | ||
2115 | struct bio *bio; | 2114 | struct bio *bio; |
2115 | int reading; | ||
2116 | 2116 | ||
2117 | if (len > (q->max_sectors << 9)) | 2117 | if (len > (q->max_sectors << 9)) |
2118 | return ERR_PTR(-EINVAL); | 2118 | return -EINVAL; |
2119 | if ((!len && ubuf) || (len && !ubuf)) | 2119 | if (!len || !ubuf) |
2120 | return ERR_PTR(-EINVAL); | 2120 | return -EINVAL; |
2121 | 2121 | ||
2122 | rq = blk_get_request(q, rw, __GFP_WAIT); | 2122 | reading = rq_data_dir(rq) == READ; |
2123 | if (!rq) | ||
2124 | return ERR_PTR(-ENOMEM); | ||
2125 | 2123 | ||
2126 | /* | 2124 | /* |
2127 | * if alignment requirement is satisfied, map in user pages for | 2125 | * if alignment requirement is satisfied, map in user pages for |
@@ -2129,9 +2127,9 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, | |||
2129 | */ | 2127 | */ |
2130 | uaddr = (unsigned long) ubuf; | 2128 | uaddr = (unsigned long) ubuf; |
2131 | if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) | 2129 | if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) |
2132 | bio = bio_map_user(q, NULL, uaddr, len, rw == READ); | 2130 | bio = bio_map_user(q, NULL, uaddr, len, reading); |
2133 | else | 2131 | else |
2134 | bio = bio_copy_user(q, uaddr, len, rw == READ); | 2132 | bio = bio_copy_user(q, uaddr, len, reading); |
2135 | 2133 | ||
2136 | if (!IS_ERR(bio)) { | 2134 | if (!IS_ERR(bio)) { |
2137 | rq->bio = rq->biotail = bio; | 2135 | rq->bio = rq->biotail = bio; |
@@ -2139,14 +2137,13 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, | |||
2139 | 2137 | ||
2140 | rq->buffer = rq->data = NULL; | 2138 | rq->buffer = rq->data = NULL; |
2141 | rq->data_len = len; | 2139 | rq->data_len = len; |
2142 | return rq; | 2140 | return 0; |
2143 | } | 2141 | } |
2144 | 2142 | ||
2145 | /* | 2143 | /* |
2146 | * bio is the err-ptr | 2144 | * bio is the err-ptr |
2147 | */ | 2145 | */ |
2148 | blk_put_request(rq); | 2146 | return PTR_ERR(bio); |
2149 | return (struct request *) bio; | ||
2150 | } | 2147 | } |
2151 | 2148 | ||
2152 | EXPORT_SYMBOL(blk_rq_map_user); | 2149 | EXPORT_SYMBOL(blk_rq_map_user); |
@@ -2160,7 +2157,7 @@ EXPORT_SYMBOL(blk_rq_map_user); | |||
2160 | * Description: | 2157 | * Description: |
2161 | * Unmap a request previously mapped by blk_rq_map_user(). | 2158 | * Unmap a request previously mapped by blk_rq_map_user(). |
2162 | */ | 2159 | */ |
2163 | int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) | 2160 | int blk_rq_unmap_user(struct bio *bio, unsigned int ulen) |
2164 | { | 2161 | { |
2165 | int ret = 0; | 2162 | int ret = 0; |
2166 | 2163 | ||
@@ -2171,8 +2168,7 @@ int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) | |||
2171 | ret = bio_uncopy_user(bio); | 2168 | ret = bio_uncopy_user(bio); |
2172 | } | 2169 | } |
2173 | 2170 | ||
2174 | blk_put_request(rq); | 2171 | return 0; |
2175 | return ret; | ||
2176 | } | 2172 | } |
2177 | 2173 | ||
2178 | EXPORT_SYMBOL(blk_rq_unmap_user); | 2174 | EXPORT_SYMBOL(blk_rq_unmap_user); |
@@ -2184,39 +2180,29 @@ EXPORT_SYMBOL(blk_rq_unmap_user); | |||
2184 | * @kbuf: the kernel buffer | 2180 | * @kbuf: the kernel buffer |
2185 | * @len: length of user data | 2181 | * @len: length of user data |
2186 | */ | 2182 | */ |
2187 | struct request *blk_rq_map_kern(request_queue_t *q, int rw, void *kbuf, | 2183 | int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, |
2188 | unsigned int len, unsigned int gfp_mask) | 2184 | unsigned int len, unsigned int gfp_mask) |
2189 | { | 2185 | { |
2190 | struct request *rq; | ||
2191 | struct bio *bio; | 2186 | struct bio *bio; |
2192 | 2187 | ||
2193 | if (len > (q->max_sectors << 9)) | 2188 | if (len > (q->max_sectors << 9)) |
2194 | return ERR_PTR(-EINVAL); | 2189 | return -EINVAL; |
2195 | if ((!len && kbuf) || (len && !kbuf)) | 2190 | if (!len || !kbuf) |
2196 | return ERR_PTR(-EINVAL); | 2191 | return -EINVAL; |
2197 | |||
2198 | rq = blk_get_request(q, rw, gfp_mask); | ||
2199 | if (!rq) | ||
2200 | return ERR_PTR(-ENOMEM); | ||
2201 | 2192 | ||
2202 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | 2193 | bio = bio_map_kern(q, kbuf, len, gfp_mask); |
2203 | if (!IS_ERR(bio)) { | 2194 | if (IS_ERR(bio)) |
2204 | if (rw) | 2195 | return PTR_ERR(bio); |
2205 | bio->bi_rw |= (1 << BIO_RW); | ||
2206 | 2196 | ||
2207 | rq->bio = rq->biotail = bio; | 2197 | if (rq_data_dir(rq) == WRITE) |
2208 | blk_rq_bio_prep(q, rq, bio); | 2198 | bio->bi_rw |= (1 << BIO_RW); |
2209 | 2199 | ||
2210 | rq->buffer = rq->data = NULL; | 2200 | rq->bio = rq->biotail = bio; |
2211 | rq->data_len = len; | 2201 | blk_rq_bio_prep(q, rq, bio); |
2212 | return rq; | ||
2213 | } | ||
2214 | 2202 | ||
2215 | /* | 2203 | rq->buffer = rq->data = NULL; |
2216 | * bio is the err-ptr | 2204 | rq->data_len = len; |
2217 | */ | 2205 | return 0; |
2218 | blk_put_request(rq); | ||
2219 | return (struct request *) bio; | ||
2220 | } | 2206 | } |
2221 | 2207 | ||
2222 | EXPORT_SYMBOL(blk_rq_map_kern); | 2208 | EXPORT_SYMBOL(blk_rq_map_kern); |
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c index 681871ca5d60..93c4ca874be3 100644 --- a/drivers/block/scsi_ioctl.c +++ b/drivers/block/scsi_ioctl.c | |||
@@ -216,7 +216,7 @@ static int sg_io(struct file *file, request_queue_t *q, | |||
216 | struct gendisk *bd_disk, struct sg_io_hdr *hdr) | 216 | struct gendisk *bd_disk, struct sg_io_hdr *hdr) |
217 | { | 217 | { |
218 | unsigned long start_time; | 218 | unsigned long start_time; |
219 | int reading, writing; | 219 | int reading, writing, ret; |
220 | struct request *rq; | 220 | struct request *rq; |
221 | struct bio *bio; | 221 | struct bio *bio; |
222 | char sense[SCSI_SENSE_BUFFERSIZE]; | 222 | char sense[SCSI_SENSE_BUFFERSIZE]; |
@@ -255,14 +255,17 @@ static int sg_io(struct file *file, request_queue_t *q, | |||
255 | reading = 1; | 255 | reading = 1; |
256 | break; | 256 | break; |
257 | } | 257 | } |
258 | } | ||
258 | 259 | ||
259 | rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp, | 260 | rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); |
260 | hdr->dxfer_len); | 261 | if (!rq) |
262 | return -ENOMEM; | ||
261 | 263 | ||
262 | if (IS_ERR(rq)) | 264 | if (reading || writing) { |
263 | return PTR_ERR(rq); | 265 | ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); |
264 | } else | 266 | if (ret) |
265 | rq = blk_get_request(q, READ, __GFP_WAIT); | 267 | goto out; |
268 | } | ||
266 | 269 | ||
267 | /* | 270 | /* |
268 | * fill in request structure | 271 | * fill in request structure |
@@ -321,11 +324,13 @@ static int sg_io(struct file *file, request_queue_t *q, | |||
321 | } | 324 | } |
322 | 325 | ||
323 | if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len)) | 326 | if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len)) |
324 | return -EFAULT; | 327 | ret = -EFAULT; |
325 | 328 | ||
326 | /* may not have succeeded, but output values written to control | 329 | /* may not have succeeded, but output values written to control |
327 | * structure (struct sg_io_hdr). */ | 330 | * structure (struct sg_io_hdr). */ |
328 | return 0; | 331 | out: |
332 | blk_put_request(rq); | ||
333 | return ret; | ||
329 | } | 334 | } |
330 | 335 | ||
331 | #define OMAX_SB_LEN 16 /* For backward compatibility */ | 336 | #define OMAX_SB_LEN 16 /* For backward compatibility */ |