aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorJames Bottomley <jejb@titanic.(none)>2005-08-28 11:43:07 -0400
committerJames Bottomley <jejb@titanic.(none)>2005-08-28 11:43:07 -0400
commit31151ba2cef171344beac254e65bd7e00138bb0d (patch)
treea8f9cd3c0395656d974125c0ca7ed635aacddeee /drivers/block
parent3d52acb34247816c453f94596e6c7fc4499b76dc (diff)
parent73747aed04d3b3fb694961d025f81863b99c6898 (diff)
fix mismerge in ll_rw_blk.c
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/ll_rw_blk.c192
-rw-r--r--drivers/block/scsi_ioctl.c60
2 files changed, 199 insertions, 53 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 3c818544475e..0c7599563b65 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -284,6 +284,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
284 rq->special = NULL; 284 rq->special = NULL;
285 rq->data_len = 0; 285 rq->data_len = 0;
286 rq->data = NULL; 286 rq->data = NULL;
287 rq->nr_phys_segments = 0;
287 rq->sense = NULL; 288 rq->sense = NULL;
288 rq->end_io = NULL; 289 rq->end_io = NULL;
289 rq->end_io_data = NULL; 290 rq->end_io_data = NULL;
@@ -2115,7 +2116,7 @@ EXPORT_SYMBOL(blk_insert_request);
2115/** 2116/**
2116 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage 2117 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
2117 * @q: request queue where request should be inserted 2118 * @q: request queue where request should be inserted
2118 * @rw: READ or WRITE data 2119 * @rq: request structure to fill
2119 * @ubuf: the user buffer 2120 * @ubuf: the user buffer
2120 * @len: length of user data 2121 * @len: length of user data
2121 * 2122 *
@@ -2132,21 +2133,19 @@ EXPORT_SYMBOL(blk_insert_request);
2132 * original bio must be passed back in to blk_rq_unmap_user() for proper 2133 * original bio must be passed back in to blk_rq_unmap_user() for proper
2133 * unmapping. 2134 * unmapping.
2134 */ 2135 */
2135struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, 2136int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2136 unsigned int len) 2137 unsigned int len)
2137{ 2138{
2138 unsigned long uaddr; 2139 unsigned long uaddr;
2139 struct request *rq;
2140 struct bio *bio; 2140 struct bio *bio;
2141 int reading;
2141 2142
2142 if (len > (q->max_sectors << 9)) 2143 if (len > (q->max_sectors << 9))
2143 return ERR_PTR(-EINVAL); 2144 return -EINVAL;
2144 if ((!len && ubuf) || (len && !ubuf)) 2145 if (!len || !ubuf)
2145 return ERR_PTR(-EINVAL); 2146 return -EINVAL;
2146 2147
2147 rq = blk_get_request(q, rw, __GFP_WAIT); 2148 reading = rq_data_dir(rq) == READ;
2148 if (!rq)
2149 return ERR_PTR(-ENOMEM);
2150 2149
2151 /* 2150 /*
2152 * if alignment requirement is satisfied, map in user pages for 2151 * if alignment requirement is satisfied, map in user pages for
@@ -2154,9 +2153,9 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2154 */ 2153 */
2155 uaddr = (unsigned long) ubuf; 2154 uaddr = (unsigned long) ubuf;
2156 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) 2155 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
2157 bio = bio_map_user(q, NULL, uaddr, len, rw == READ); 2156 bio = bio_map_user(q, NULL, uaddr, len, reading);
2158 else 2157 else
2159 bio = bio_copy_user(q, uaddr, len, rw == READ); 2158 bio = bio_copy_user(q, uaddr, len, reading);
2160 2159
2161 if (!IS_ERR(bio)) { 2160 if (!IS_ERR(bio)) {
2162 rq->bio = rq->biotail = bio; 2161 rq->bio = rq->biotail = bio;
@@ -2164,28 +2163,70 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2164 2163
2165 rq->buffer = rq->data = NULL; 2164 rq->buffer = rq->data = NULL;
2166 rq->data_len = len; 2165 rq->data_len = len;
2167 return rq; 2166 return 0;
2168 } 2167 }
2169 2168
2170 /* 2169 /*
2171 * bio is the err-ptr 2170 * bio is the err-ptr
2172 */ 2171 */
2173 blk_put_request(rq); 2172 return PTR_ERR(bio);
2174 return (struct request *) bio;
2175} 2173}
2176 2174
2177EXPORT_SYMBOL(blk_rq_map_user); 2175EXPORT_SYMBOL(blk_rq_map_user);
2178 2176
2179/** 2177/**
2178 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
2179 * @q: request queue where request should be inserted
2180 * @rq: request to map data to
2181 * @iov: pointer to the iovec
2182 * @iov_count: number of elements in the iovec
2183 *
2184 * Description:
2185 * Data will be mapped directly for zero copy io, if possible. Otherwise
2186 * a kernel bounce buffer is used.
2187 *
2188 * A matching blk_rq_unmap_user() must be issued at the end of io, while
2189 * still in process context.
2190 *
2191 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
2192 * before being submitted to the device, as pages mapped may be out of
2193 * reach. It's the callers responsibility to make sure this happens. The
2194 * original bio must be passed back in to blk_rq_unmap_user() for proper
2195 * unmapping.
2196 */
2197int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
2198 struct sg_iovec *iov, int iov_count)
2199{
2200 struct bio *bio;
2201
2202 if (!iov || iov_count <= 0)
2203 return -EINVAL;
2204
2205 /* we don't allow misaligned data like bio_map_user() does. If the
2206 * user is using sg, they're expected to know the alignment constraints
2207 * and respect them accordingly */
2208 bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
2209 if (IS_ERR(bio))
2210 return PTR_ERR(bio);
2211
2212 rq->bio = rq->biotail = bio;
2213 blk_rq_bio_prep(q, rq, bio);
2214 rq->buffer = rq->data = NULL;
2215 rq->data_len = bio->bi_size;
2216 return 0;
2217}
2218
2219EXPORT_SYMBOL(blk_rq_map_user_iov);
2220
2221/**
2180 * blk_rq_unmap_user - unmap a request with user data 2222 * blk_rq_unmap_user - unmap a request with user data
2181 * @rq: request to be unmapped 2223 * @bio: bio to be unmapped
2182 * @bio: bio for the request
2183 * @ulen: length of user buffer 2224 * @ulen: length of user buffer
2184 * 2225 *
2185 * Description: 2226 * Description:
2186 * Unmap a request previously mapped by blk_rq_map_user(). 2227 * Unmap a bio previously mapped by blk_rq_map_user().
2187 */ 2228 */
2188int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) 2229int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
2189{ 2230{
2190 int ret = 0; 2231 int ret = 0;
2191 2232
@@ -2196,31 +2237,89 @@ int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
2196 ret = bio_uncopy_user(bio); 2237 ret = bio_uncopy_user(bio);
2197 } 2238 }
2198 2239
2199 blk_put_request(rq); 2240 return 0;
2200 return ret;
2201} 2241}
2202 2242
2203EXPORT_SYMBOL(blk_rq_unmap_user); 2243EXPORT_SYMBOL(blk_rq_unmap_user);
2204 2244
2205/** 2245/**
2246 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
2247 * @q: request queue where request should be inserted
2248 * @rq: request to fill
2249 * @kbuf: the kernel buffer
2250 * @len: length of user data
2251 * @gfp_mask: memory allocation flags
2252 */
2253int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
2254 unsigned int len, unsigned int gfp_mask)
2255{
2256 struct bio *bio;
2257
2258 if (len > (q->max_sectors << 9))
2259 return -EINVAL;
2260 if (!len || !kbuf)
2261 return -EINVAL;
2262
2263 bio = bio_map_kern(q, kbuf, len, gfp_mask);
2264 if (IS_ERR(bio))
2265 return PTR_ERR(bio);
2266
2267 if (rq_data_dir(rq) == WRITE)
2268 bio->bi_rw |= (1 << BIO_RW);
2269
2270 rq->bio = rq->biotail = bio;
2271 blk_rq_bio_prep(q, rq, bio);
2272
2273 rq->buffer = rq->data = NULL;
2274 rq->data_len = len;
2275 return 0;
2276}
2277
2278EXPORT_SYMBOL(blk_rq_map_kern);
2279
2280/**
2281 * blk_execute_rq_nowait - insert a request into queue for execution
2282 * @q: queue to insert the request in
2283 * @bd_disk: matching gendisk
2284 * @rq: request to insert
2285 * @at_head: insert request at head or tail of queue
2286 * @done: I/O completion handler
2287 *
2288 * Description:
2289 * Insert a fully prepared request at the back of the io scheduler queue
2290 * for execution. Don't wait for completion.
2291 */
2292void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
2293 struct request *rq, int at_head,
2294 void (*done)(struct request *))
2295{
2296 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
2297
2298 rq->rq_disk = bd_disk;
2299 rq->flags |= REQ_NOMERGE;
2300 rq->end_io = done;
2301 elv_add_request(q, rq, where, 1);
2302 generic_unplug_device(q);
2303}
2304
2305/**
2206 * blk_execute_rq - insert a request into queue for execution 2306 * blk_execute_rq - insert a request into queue for execution
2207 * @q: queue to insert the request in 2307 * @q: queue to insert the request in
2208 * @bd_disk: matching gendisk 2308 * @bd_disk: matching gendisk
2209 * @rq: request to insert 2309 * @rq: request to insert
2310 * @at_head: insert request at head or tail of queue
2210 * 2311 *
2211 * Description: 2312 * Description:
2212 * Insert a fully prepared request at the back of the io scheduler queue 2313 * Insert a fully prepared request at the back of the io scheduler queue
2213 * for execution. 2314 * for execution and wait for completion.
2214 */ 2315 */
2215int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, 2316int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2216 struct request *rq) 2317 struct request *rq, int at_head)
2217{ 2318{
2218 DECLARE_COMPLETION(wait); 2319 DECLARE_COMPLETION(wait);
2219 char sense[SCSI_SENSE_BUFFERSIZE]; 2320 char sense[SCSI_SENSE_BUFFERSIZE];
2220 int err = 0; 2321 int err = 0;
2221 2322
2222 rq->rq_disk = bd_disk;
2223
2224 /* 2323 /*
2225 * we need an extra reference to the request, so we can look at 2324 * we need an extra reference to the request, so we can look at
2226 * it after io completion 2325 * it after io completion
@@ -2233,11 +2332,8 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2233 rq->sense_len = 0; 2332 rq->sense_len = 0;
2234 } 2333 }
2235 2334
2236 rq->flags |= REQ_NOMERGE;
2237 rq->waiting = &wait; 2335 rq->waiting = &wait;
2238 rq->end_io = blk_end_sync_rq; 2336 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
2239 elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
2240 generic_unplug_device(q);
2241 wait_for_completion(&wait); 2337 wait_for_completion(&wait);
2242 rq->waiting = NULL; 2338 rq->waiting = NULL;
2243 2339
@@ -2277,6 +2373,44 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2277 2373
2278EXPORT_SYMBOL(blkdev_issue_flush); 2374EXPORT_SYMBOL(blkdev_issue_flush);
2279 2375
2376/**
2377 * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
2378 * @q: device queue
2379 * @disk: gendisk
2380 * @error_sector: error offset
2381 *
2382 * Description:
2383 * Devices understanding the SCSI command set, can use this function as
2384 * a helper for issuing a cache flush. Note: driver is required to store
2385 * the error offset (in case of error flushing) in ->sector of struct
2386 * request.
2387 */
2388int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
2389 sector_t *error_sector)
2390{
2391 struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
2392 int ret;
2393
2394 rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
2395 rq->sector = 0;
2396 memset(rq->cmd, 0, sizeof(rq->cmd));
2397 rq->cmd[0] = 0x35;
2398 rq->cmd_len = 12;
2399 rq->data = NULL;
2400 rq->data_len = 0;
2401 rq->timeout = 60 * HZ;
2402
2403 ret = blk_execute_rq(q, disk, rq, 0);
2404
2405 if (ret && error_sector)
2406 *error_sector = rq->sector;
2407
2408 blk_put_request(rq);
2409 return ret;
2410}
2411
2412EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
2413
2280static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) 2414static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2281{ 2415{
2282 int rw = rq_data_dir(rq); 2416 int rw = rq_data_dir(rq);
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
index 681871ca5d60..abb2df249fd3 100644
--- a/drivers/block/scsi_ioctl.c
+++ b/drivers/block/scsi_ioctl.c
@@ -216,7 +216,7 @@ static int sg_io(struct file *file, request_queue_t *q,
216 struct gendisk *bd_disk, struct sg_io_hdr *hdr) 216 struct gendisk *bd_disk, struct sg_io_hdr *hdr)
217{ 217{
218 unsigned long start_time; 218 unsigned long start_time;
219 int reading, writing; 219 int writing = 0, ret = 0;
220 struct request *rq; 220 struct request *rq;
221 struct bio *bio; 221 struct bio *bio;
222 char sense[SCSI_SENSE_BUFFERSIZE]; 222 char sense[SCSI_SENSE_BUFFERSIZE];
@@ -231,38 +231,48 @@ static int sg_io(struct file *file, request_queue_t *q,
231 if (verify_command(file, cmd)) 231 if (verify_command(file, cmd))
232 return -EPERM; 232 return -EPERM;
233 233
234 /*
235 * we'll do that later
236 */
237 if (hdr->iovec_count)
238 return -EOPNOTSUPP;
239
240 if (hdr->dxfer_len > (q->max_sectors << 9)) 234 if (hdr->dxfer_len > (q->max_sectors << 9))
241 return -EIO; 235 return -EIO;
242 236
243 reading = writing = 0; 237 if (hdr->dxfer_len)
244 if (hdr->dxfer_len) {
245 switch (hdr->dxfer_direction) { 238 switch (hdr->dxfer_direction) {
246 default: 239 default:
247 return -EINVAL; 240 return -EINVAL;
248 case SG_DXFER_TO_FROM_DEV: 241 case SG_DXFER_TO_FROM_DEV:
249 reading = 1;
250 /* fall through */
251 case SG_DXFER_TO_DEV: 242 case SG_DXFER_TO_DEV:
252 writing = 1; 243 writing = 1;
253 break; 244 break;
254 case SG_DXFER_FROM_DEV: 245 case SG_DXFER_FROM_DEV:
255 reading = 1;
256 break; 246 break;
257 } 247 }
258 248
259 rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp, 249 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
260 hdr->dxfer_len); 250 if (!rq)
251 return -ENOMEM;
252
253 if (hdr->iovec_count) {
254 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
255 struct sg_iovec *iov;
256
257 iov = kmalloc(size, GFP_KERNEL);
258 if (!iov) {
259 ret = -ENOMEM;
260 goto out;
261 }
262
263 if (copy_from_user(iov, hdr->dxferp, size)) {
264 kfree(iov);
265 ret = -EFAULT;
266 goto out;
267 }
268
269 ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
270 kfree(iov);
271 } else if (hdr->dxfer_len)
272 ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
261 273
262 if (IS_ERR(rq)) 274 if (ret)
263 return PTR_ERR(rq); 275 goto out;
264 } else
265 rq = blk_get_request(q, READ, __GFP_WAIT);
266 276
267 /* 277 /*
268 * fill in request structure 278 * fill in request structure
@@ -298,7 +308,7 @@ static int sg_io(struct file *file, request_queue_t *q,
298 * (if he doesn't check that is his problem). 308 * (if he doesn't check that is his problem).
299 * N.B. a non-zero SCSI status is _not_ necessarily an error. 309 * N.B. a non-zero SCSI status is _not_ necessarily an error.
300 */ 310 */
301 blk_execute_rq(q, bd_disk, rq); 311 blk_execute_rq(q, bd_disk, rq, 0);
302 312
303 /* write to all output members */ 313 /* write to all output members */
304 hdr->status = 0xff & rq->errors; 314 hdr->status = 0xff & rq->errors;
@@ -320,12 +330,14 @@ static int sg_io(struct file *file, request_queue_t *q,
320 hdr->sb_len_wr = len; 330 hdr->sb_len_wr = len;
321 } 331 }
322 332
323 if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len)) 333 if (blk_rq_unmap_user(bio, hdr->dxfer_len))
324 return -EFAULT; 334 ret = -EFAULT;
325 335
326 /* may not have succeeded, but output values written to control 336 /* may not have succeeded, but output values written to control
327 * structure (struct sg_io_hdr). */ 337 * structure (struct sg_io_hdr). */
328 return 0; 338out:
339 blk_put_request(rq);
340 return ret;
329} 341}
330 342
331#define OMAX_SB_LEN 16 /* For backward compatibility */ 343#define OMAX_SB_LEN 16 /* For backward compatibility */
@@ -408,7 +420,7 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
408 rq->data_len = bytes; 420 rq->data_len = bytes;
409 rq->flags |= REQ_BLOCK_PC; 421 rq->flags |= REQ_BLOCK_PC;
410 422
411 blk_execute_rq(q, bd_disk, rq); 423 blk_execute_rq(q, bd_disk, rq, 0);
412 err = rq->errors & 0xff; /* only 8 bit SCSI status */ 424 err = rq->errors & 0xff; /* only 8 bit SCSI status */
413 if (err) { 425 if (err) {
414 if (rq->sense_len && rq->sense) { 426 if (rq->sense_len && rq->sense) {
@@ -561,7 +573,7 @@ int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd,
561 rq->cmd[0] = GPCMD_START_STOP_UNIT; 573 rq->cmd[0] = GPCMD_START_STOP_UNIT;
562 rq->cmd[4] = 0x02 + (close != 0); 574 rq->cmd[4] = 0x02 + (close != 0);
563 rq->cmd_len = 6; 575 rq->cmd_len = 6;
564 err = blk_execute_rq(q, bd_disk, rq); 576 err = blk_execute_rq(q, bd_disk, rq, 0);
565 blk_put_request(rq); 577 blk_put_request(rq);
566 break; 578 break;
567 default: 579 default: