aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-09-07 20:31:27 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-07 20:31:27 -0400
commit0481990b758628e12f4b0a9e15094e70cefc7cd1 (patch)
tree67a4b4b7acc6a688b87ef2a2d3ec0e296e6e480c /drivers/block/ll_rw_blk.c
parentdb400b3c4ee89d384d9163836a55577abdae772d (diff)
parent17fa53da1239b8712c5cebbd72a74c713b6c2db9 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-for-linus-2.6
Diffstat (limited to 'drivers/block/ll_rw_blk.c')
-rw-r--r--drivers/block/ll_rw_blk.c192
1 files changed, 163 insertions, 29 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index b4b17958d10..483d71b10cf 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -284,6 +284,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
284 rq->special = NULL; 284 rq->special = NULL;
285 rq->data_len = 0; 285 rq->data_len = 0;
286 rq->data = NULL; 286 rq->data = NULL;
287 rq->nr_phys_segments = 0;
287 rq->sense = NULL; 288 rq->sense = NULL;
288 rq->end_io = NULL; 289 rq->end_io = NULL;
289 rq->end_io_data = NULL; 290 rq->end_io_data = NULL;
@@ -2115,7 +2116,7 @@ EXPORT_SYMBOL(blk_insert_request);
2115/** 2116/**
2116 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage 2117 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
2117 * @q: request queue where request should be inserted 2118 * @q: request queue where request should be inserted
2118 * @rw: READ or WRITE data 2119 * @rq: request structure to fill
2119 * @ubuf: the user buffer 2120 * @ubuf: the user buffer
2120 * @len: length of user data 2121 * @len: length of user data
2121 * 2122 *
@@ -2132,21 +2133,19 @@ EXPORT_SYMBOL(blk_insert_request);
2132 * original bio must be passed back in to blk_rq_unmap_user() for proper 2133 * original bio must be passed back in to blk_rq_unmap_user() for proper
2133 * unmapping. 2134 * unmapping.
2134 */ 2135 */
2135struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, 2136int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2136 unsigned int len) 2137 unsigned int len)
2137{ 2138{
2138 unsigned long uaddr; 2139 unsigned long uaddr;
2139 struct request *rq;
2140 struct bio *bio; 2140 struct bio *bio;
2141 int reading;
2141 2142
2142 if (len > (q->max_sectors << 9)) 2143 if (len > (q->max_sectors << 9))
2143 return ERR_PTR(-EINVAL); 2144 return -EINVAL;
2144 if ((!len && ubuf) || (len && !ubuf)) 2145 if (!len || !ubuf)
2145 return ERR_PTR(-EINVAL); 2146 return -EINVAL;
2146 2147
2147 rq = blk_get_request(q, rw, __GFP_WAIT); 2148 reading = rq_data_dir(rq) == READ;
2148 if (!rq)
2149 return ERR_PTR(-ENOMEM);
2150 2149
2151 /* 2150 /*
2152 * if alignment requirement is satisfied, map in user pages for 2151 * if alignment requirement is satisfied, map in user pages for
@@ -2154,9 +2153,9 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2154 */ 2153 */
2155 uaddr = (unsigned long) ubuf; 2154 uaddr = (unsigned long) ubuf;
2156 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) 2155 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
2157 bio = bio_map_user(q, NULL, uaddr, len, rw == READ); 2156 bio = bio_map_user(q, NULL, uaddr, len, reading);
2158 else 2157 else
2159 bio = bio_copy_user(q, uaddr, len, rw == READ); 2158 bio = bio_copy_user(q, uaddr, len, reading);
2160 2159
2161 if (!IS_ERR(bio)) { 2160 if (!IS_ERR(bio)) {
2162 rq->bio = rq->biotail = bio; 2161 rq->bio = rq->biotail = bio;
@@ -2164,28 +2163,70 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2164 2163
2165 rq->buffer = rq->data = NULL; 2164 rq->buffer = rq->data = NULL;
2166 rq->data_len = len; 2165 rq->data_len = len;
2167 return rq; 2166 return 0;
2168 } 2167 }
2169 2168
2170 /* 2169 /*
2171 * bio is the err-ptr 2170 * bio is the err-ptr
2172 */ 2171 */
2173 blk_put_request(rq); 2172 return PTR_ERR(bio);
2174 return (struct request *) bio;
2175} 2173}
2176 2174
2177EXPORT_SYMBOL(blk_rq_map_user); 2175EXPORT_SYMBOL(blk_rq_map_user);
2178 2176
2179/** 2177/**
2178 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
2179 * @q: request queue where request should be inserted
2180 * @rq: request to map data to
2181 * @iov: pointer to the iovec
2182 * @iov_count: number of elements in the iovec
2183 *
2184 * Description:
2185 * Data will be mapped directly for zero copy io, if possible. Otherwise
2186 * a kernel bounce buffer is used.
2187 *
2188 * A matching blk_rq_unmap_user() must be issued at the end of io, while
2189 * still in process context.
2190 *
2191 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
2192 * before being submitted to the device, as pages mapped may be out of
2193 * reach. It's the callers responsibility to make sure this happens. The
2194 * original bio must be passed back in to blk_rq_unmap_user() for proper
2195 * unmapping.
2196 */
2197int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
2198 struct sg_iovec *iov, int iov_count)
2199{
2200 struct bio *bio;
2201
2202 if (!iov || iov_count <= 0)
2203 return -EINVAL;
2204
2205 /* we don't allow misaligned data like bio_map_user() does. If the
2206 * user is using sg, they're expected to know the alignment constraints
2207 * and respect them accordingly */
2208 bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
2209 if (IS_ERR(bio))
2210 return PTR_ERR(bio);
2211
2212 rq->bio = rq->biotail = bio;
2213 blk_rq_bio_prep(q, rq, bio);
2214 rq->buffer = rq->data = NULL;
2215 rq->data_len = bio->bi_size;
2216 return 0;
2217}
2218
2219EXPORT_SYMBOL(blk_rq_map_user_iov);
2220
2221/**
2180 * blk_rq_unmap_user - unmap a request with user data 2222 * blk_rq_unmap_user - unmap a request with user data
2181 * @rq: request to be unmapped 2223 * @bio: bio to be unmapped
2182 * @bio: bio for the request
2183 * @ulen: length of user buffer 2224 * @ulen: length of user buffer
2184 * 2225 *
2185 * Description: 2226 * Description:
2186 * Unmap a request previously mapped by blk_rq_map_user(). 2227 * Unmap a bio previously mapped by blk_rq_map_user().
2187 */ 2228 */
2188int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) 2229int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
2189{ 2230{
2190 int ret = 0; 2231 int ret = 0;
2191 2232
@@ -2196,31 +2237,89 @@ int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
2196 ret = bio_uncopy_user(bio); 2237 ret = bio_uncopy_user(bio);
2197 } 2238 }
2198 2239
2199 blk_put_request(rq); 2240 return 0;
2200 return ret;
2201} 2241}
2202 2242
2203EXPORT_SYMBOL(blk_rq_unmap_user); 2243EXPORT_SYMBOL(blk_rq_unmap_user);
2204 2244
2205/** 2245/**
2246 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
2247 * @q: request queue where request should be inserted
2248 * @rq: request to fill
2249 * @kbuf: the kernel buffer
2250 * @len: length of user data
2251 * @gfp_mask: memory allocation flags
2252 */
2253int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
2254 unsigned int len, unsigned int gfp_mask)
2255{
2256 struct bio *bio;
2257
2258 if (len > (q->max_sectors << 9))
2259 return -EINVAL;
2260 if (!len || !kbuf)
2261 return -EINVAL;
2262
2263 bio = bio_map_kern(q, kbuf, len, gfp_mask);
2264 if (IS_ERR(bio))
2265 return PTR_ERR(bio);
2266
2267 if (rq_data_dir(rq) == WRITE)
2268 bio->bi_rw |= (1 << BIO_RW);
2269
2270 rq->bio = rq->biotail = bio;
2271 blk_rq_bio_prep(q, rq, bio);
2272
2273 rq->buffer = rq->data = NULL;
2274 rq->data_len = len;
2275 return 0;
2276}
2277
2278EXPORT_SYMBOL(blk_rq_map_kern);
2279
2280/**
2281 * blk_execute_rq_nowait - insert a request into queue for execution
2282 * @q: queue to insert the request in
2283 * @bd_disk: matching gendisk
2284 * @rq: request to insert
2285 * @at_head: insert request at head or tail of queue
2286 * @done: I/O completion handler
2287 *
2288 * Description:
2289 * Insert a fully prepared request at the back of the io scheduler queue
2290 * for execution. Don't wait for completion.
2291 */
2292void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
2293 struct request *rq, int at_head,
2294 void (*done)(struct request *))
2295{
2296 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
2297
2298 rq->rq_disk = bd_disk;
2299 rq->flags |= REQ_NOMERGE;
2300 rq->end_io = done;
2301 elv_add_request(q, rq, where, 1);
2302 generic_unplug_device(q);
2303}
2304
2305/**
2206 * blk_execute_rq - insert a request into queue for execution 2306 * blk_execute_rq - insert a request into queue for execution
2207 * @q: queue to insert the request in 2307 * @q: queue to insert the request in
2208 * @bd_disk: matching gendisk 2308 * @bd_disk: matching gendisk
2209 * @rq: request to insert 2309 * @rq: request to insert
2310 * @at_head: insert request at head or tail of queue
2210 * 2311 *
2211 * Description: 2312 * Description:
2212 * Insert a fully prepared request at the back of the io scheduler queue 2313 * Insert a fully prepared request at the back of the io scheduler queue
2213 * for execution. 2314 * for execution and wait for completion.
2214 */ 2315 */
2215int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, 2316int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2216 struct request *rq) 2317 struct request *rq, int at_head)
2217{ 2318{
2218 DECLARE_COMPLETION(wait); 2319 DECLARE_COMPLETION(wait);
2219 char sense[SCSI_SENSE_BUFFERSIZE]; 2320 char sense[SCSI_SENSE_BUFFERSIZE];
2220 int err = 0; 2321 int err = 0;
2221 2322
2222 rq->rq_disk = bd_disk;
2223
2224 /* 2323 /*
2225 * we need an extra reference to the request, so we can look at 2324 * we need an extra reference to the request, so we can look at
2226 * it after io completion 2325 * it after io completion
@@ -2233,11 +2332,8 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2233 rq->sense_len = 0; 2332 rq->sense_len = 0;
2234 } 2333 }
2235 2334
2236 rq->flags |= REQ_NOMERGE;
2237 rq->waiting = &wait; 2335 rq->waiting = &wait;
2238 rq->end_io = blk_end_sync_rq; 2336 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
2239 elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
2240 generic_unplug_device(q);
2241 wait_for_completion(&wait); 2337 wait_for_completion(&wait);
2242 rq->waiting = NULL; 2338 rq->waiting = NULL;
2243 2339
@@ -2277,6 +2373,44 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2277 2373
2278EXPORT_SYMBOL(blkdev_issue_flush); 2374EXPORT_SYMBOL(blkdev_issue_flush);
2279 2375
2376/**
2377 * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
2378 * @q: device queue
2379 * @disk: gendisk
2380 * @error_sector: error offset
2381 *
2382 * Description:
2383 * Devices understanding the SCSI command set, can use this function as
2384 * a helper for issuing a cache flush. Note: driver is required to store
2385 * the error offset (in case of error flushing) in ->sector of struct
2386 * request.
2387 */
2388int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
2389 sector_t *error_sector)
2390{
2391 struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
2392 int ret;
2393
2394 rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
2395 rq->sector = 0;
2396 memset(rq->cmd, 0, sizeof(rq->cmd));
2397 rq->cmd[0] = 0x35;
2398 rq->cmd_len = 12;
2399 rq->data = NULL;
2400 rq->data_len = 0;
2401 rq->timeout = 60 * HZ;
2402
2403 ret = blk_execute_rq(q, disk, rq, 0);
2404
2405 if (ret && error_sector)
2406 *error_sector = rq->sector;
2407
2408 blk_put_request(rq);
2409 return ret;
2410}
2411
2412EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
2413
2280static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) 2414static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2281{ 2415{
2282 int rw = rq_data_dir(rq); 2416 int rw = rq_data_dir(rq);