aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2005-06-20 08:06:01 -0400
committerJens Axboe <axboe@suse.de>2005-06-20 08:06:01 -0400
commitdd1cab95f356f1395278633565f198463cf6bd24 (patch)
treeddf12e2fad7c0df0656a10ee6aac3f12a04dbed8 /drivers/block/ll_rw_blk.c
parentb823825e8e09aac6dc1ca362cd5639a87329d636 (diff)
[PATCH] Cleanup blk_rq_map_* interfaces
Change the blk_rq_map_user() and blk_rq_map_kern() interface to require a previously allocated request to be passed in. This is both more efficient for multiple iterations of mapping data to the same request, and it is also a much nicer API. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'drivers/block/ll_rw_blk.c')
-rw-r--r--drivers/block/ll_rw_blk.c68
1 files changed, 27 insertions, 41 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 1471aca6fa18..42c4f3651cf8 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -2107,21 +2107,19 @@ EXPORT_SYMBOL(blk_insert_request);
2107 * original bio must be passed back in to blk_rq_unmap_user() for proper 2107 * original bio must be passed back in to blk_rq_unmap_user() for proper
2108 * unmapping. 2108 * unmapping.
2109 */ 2109 */
2110struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, 2110int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2111 unsigned int len) 2111 unsigned int len)
2112{ 2112{
2113 unsigned long uaddr; 2113 unsigned long uaddr;
2114 struct request *rq;
2115 struct bio *bio; 2114 struct bio *bio;
2115 int reading;
2116 2116
2117 if (len > (q->max_sectors << 9)) 2117 if (len > (q->max_sectors << 9))
2118 return ERR_PTR(-EINVAL); 2118 return -EINVAL;
2119 if ((!len && ubuf) || (len && !ubuf)) 2119 if (!len || !ubuf)
2120 return ERR_PTR(-EINVAL); 2120 return -EINVAL;
2121 2121
2122 rq = blk_get_request(q, rw, __GFP_WAIT); 2122 reading = rq_data_dir(rq) == READ;
2123 if (!rq)
2124 return ERR_PTR(-ENOMEM);
2125 2123
2126 /* 2124 /*
2127 * if alignment requirement is satisfied, map in user pages for 2125 * if alignment requirement is satisfied, map in user pages for
@@ -2129,9 +2127,9 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2129 */ 2127 */
2130 uaddr = (unsigned long) ubuf; 2128 uaddr = (unsigned long) ubuf;
2131 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) 2129 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
2132 bio = bio_map_user(q, NULL, uaddr, len, rw == READ); 2130 bio = bio_map_user(q, NULL, uaddr, len, reading);
2133 else 2131 else
2134 bio = bio_copy_user(q, uaddr, len, rw == READ); 2132 bio = bio_copy_user(q, uaddr, len, reading);
2135 2133
2136 if (!IS_ERR(bio)) { 2134 if (!IS_ERR(bio)) {
2137 rq->bio = rq->biotail = bio; 2135 rq->bio = rq->biotail = bio;
@@ -2139,14 +2137,13 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2139 2137
2140 rq->buffer = rq->data = NULL; 2138 rq->buffer = rq->data = NULL;
2141 rq->data_len = len; 2139 rq->data_len = len;
2142 return rq; 2140 return 0;
2143 } 2141 }
2144 2142
2145 /* 2143 /*
2146 * bio is the err-ptr 2144 * bio is the err-ptr
2147 */ 2145 */
2148 blk_put_request(rq); 2146 return PTR_ERR(bio);
2149 return (struct request *) bio;
2150} 2147}
2151 2148
2152EXPORT_SYMBOL(blk_rq_map_user); 2149EXPORT_SYMBOL(blk_rq_map_user);
@@ -2160,7 +2157,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
2160 * Description: 2157 * Description:
2161 * Unmap a request previously mapped by blk_rq_map_user(). 2158 * Unmap a request previously mapped by blk_rq_map_user().
2162 */ 2159 */
2163int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) 2160int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
2164{ 2161{
2165 int ret = 0; 2162 int ret = 0;
2166 2163
@@ -2171,8 +2168,7 @@ int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
2171 ret = bio_uncopy_user(bio); 2168 ret = bio_uncopy_user(bio);
2172 } 2169 }
2173 2170
2174 blk_put_request(rq); 2171 return 0;
2175 return ret;
2176} 2172}
2177 2173
2178EXPORT_SYMBOL(blk_rq_unmap_user); 2174EXPORT_SYMBOL(blk_rq_unmap_user);
@@ -2184,39 +2180,29 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
2184 * @kbuf: the kernel buffer 2180 * @kbuf: the kernel buffer
2185 * @len: length of user data 2181 * @len: length of user data
2186 */ 2182 */
2187struct request *blk_rq_map_kern(request_queue_t *q, int rw, void *kbuf, 2183int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
2188 unsigned int len, unsigned int gfp_mask) 2184 unsigned int len, unsigned int gfp_mask)
2189{ 2185{
2190 struct request *rq;
2191 struct bio *bio; 2186 struct bio *bio;
2192 2187
2193 if (len > (q->max_sectors << 9)) 2188 if (len > (q->max_sectors << 9))
2194 return ERR_PTR(-EINVAL); 2189 return -EINVAL;
2195 if ((!len && kbuf) || (len && !kbuf)) 2190 if (!len || !kbuf)
2196 return ERR_PTR(-EINVAL); 2191 return -EINVAL;
2197
2198 rq = blk_get_request(q, rw, gfp_mask);
2199 if (!rq)
2200 return ERR_PTR(-ENOMEM);
2201 2192
2202 bio = bio_map_kern(q, kbuf, len, gfp_mask); 2193 bio = bio_map_kern(q, kbuf, len, gfp_mask);
2203 if (!IS_ERR(bio)) { 2194 if (IS_ERR(bio))
2204 if (rw) 2195 return PTR_ERR(bio);
2205 bio->bi_rw |= (1 << BIO_RW);
2206 2196
2207 rq->bio = rq->biotail = bio; 2197 if (rq_data_dir(rq) == WRITE)
2208 blk_rq_bio_prep(q, rq, bio); 2198 bio->bi_rw |= (1 << BIO_RW);
2209 2199
2210 rq->buffer = rq->data = NULL; 2200 rq->bio = rq->biotail = bio;
2211 rq->data_len = len; 2201 blk_rq_bio_prep(q, rq, bio);
2212 return rq;
2213 }
2214 2202
2215 /* 2203 rq->buffer = rq->data = NULL;
2216 * bio is the err-ptr 2204 rq->data_len = len;
2217 */ 2205 return 0;
2218 blk_put_request(rq);
2219 return (struct request *) bio;
2220} 2206}
2221 2207
2222EXPORT_SYMBOL(blk_rq_map_kern); 2208EXPORT_SYMBOL(blk_rq_map_kern);