diff options
author | Mike Christie <michaelc@cs.wisc.edu> | 2005-11-11 06:30:27 -0500 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.(none)> | 2005-12-14 22:03:35 -0500 |
commit | 6e68af666f5336254b5715dca591026b7324499a (patch) | |
tree | 5640209b6e2b75659149460f14531cfecffe2f5d | |
parent | 6e39b69e7ea9205c5f80aeac3ef999ab8fb1a4cc (diff) |
[SCSI] Convert SCSI mid-layer to scsi_execute_async
Add scsi helpers to create really-large-requests and convert
scsi-ml to scsi_execute_async().
Per Jens's previous comments, I placed this function in scsi_lib.c.
I made it follow all the queue's limits - I think I did at least :), so
I removed the warning on the function header.
I think the scsi_execute_* functions should eventually take a request_queue
and be placed some place where the dm-multipath hw_handler can use them
if that failover code is going to stay in the kernel. That conversion
patch will be sent in another mail though.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
-rw-r--r-- | drivers/scsi/scsi_error.c | 47 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 230 | ||||
-rw-r--r-- | drivers/scsi/scsi_priv.h | 1 | ||||
-rw-r--r-- | fs/bio.c | 20 | ||||
-rw-r--r-- | include/linux/bio.h | 2 | ||||
-rw-r--r-- | include/scsi/scsi_device.h | 6 |
6 files changed, 233 insertions, 73 deletions
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 18c5d2523014..53ea62d3b53d 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -1315,23 +1315,6 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) | |||
1315 | } | 1315 | } |
1316 | 1316 | ||
1317 | /** | 1317 | /** |
1318 | * scsi_eh_lock_done - done function for eh door lock request | ||
1319 | * @scmd: SCSI command block for the door lock request | ||
1320 | * | ||
1321 | * Notes: | ||
1322 | * We completed the asynchronous door lock request, and it has either | ||
1323 | * locked the door or failed. We must free the command structures | ||
1324 | * associated with this request. | ||
1325 | **/ | ||
1326 | static void scsi_eh_lock_done(struct scsi_cmnd *scmd) | ||
1327 | { | ||
1328 | struct scsi_request *sreq = scmd->sc_request; | ||
1329 | |||
1330 | scsi_release_request(sreq); | ||
1331 | } | ||
1332 | |||
1333 | |||
1334 | /** | ||
1335 | * scsi_eh_lock_door - Prevent medium removal for the specified device | 1318 | * scsi_eh_lock_door - Prevent medium removal for the specified device |
1336 | * @sdev: SCSI device to prevent medium removal | 1319 | * @sdev: SCSI device to prevent medium removal |
1337 | * | 1320 | * |
@@ -1353,29 +1336,17 @@ static void scsi_eh_lock_done(struct scsi_cmnd *scmd) | |||
1353 | **/ | 1336 | **/ |
1354 | static void scsi_eh_lock_door(struct scsi_device *sdev) | 1337 | static void scsi_eh_lock_door(struct scsi_device *sdev) |
1355 | { | 1338 | { |
1356 | struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL); | 1339 | unsigned char cmnd[MAX_COMMAND_SIZE]; |
1357 | 1340 | ||
1358 | if (unlikely(!sreq)) { | 1341 | cmnd[0] = ALLOW_MEDIUM_REMOVAL; |
1359 | printk(KERN_ERR "%s: request allocate failed," | 1342 | cmnd[1] = 0; |
1360 | "prevent media removal cmd not sent\n", __FUNCTION__); | 1343 | cmnd[2] = 0; |
1361 | return; | 1344 | cmnd[3] = 0; |
1362 | } | 1345 | cmnd[4] = SCSI_REMOVAL_PREVENT; |
1346 | cmnd[5] = 0; | ||
1363 | 1347 | ||
1364 | sreq->sr_cmnd[0] = ALLOW_MEDIUM_REMOVAL; | 1348 | scsi_execute_async(sdev, cmnd, DMA_NONE, NULL, 0, 0, 10 * HZ, |
1365 | sreq->sr_cmnd[1] = 0; | 1349 | 5, NULL, NULL, GFP_KERNEL); |
1366 | sreq->sr_cmnd[2] = 0; | ||
1367 | sreq->sr_cmnd[3] = 0; | ||
1368 | sreq->sr_cmnd[4] = SCSI_REMOVAL_PREVENT; | ||
1369 | sreq->sr_cmnd[5] = 0; | ||
1370 | sreq->sr_data_direction = DMA_NONE; | ||
1371 | sreq->sr_bufflen = 0; | ||
1372 | sreq->sr_buffer = NULL; | ||
1373 | sreq->sr_allowed = 5; | ||
1374 | sreq->sr_done = scsi_eh_lock_done; | ||
1375 | sreq->sr_timeout_per_command = 10 * HZ; | ||
1376 | sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]); | ||
1377 | |||
1378 | scsi_insert_special_req(sreq, 1); | ||
1379 | } | 1350 | } |
1380 | 1351 | ||
1381 | 1352 | ||
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1f2782767ca9..eb0cfbfbcf8f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -63,39 +63,6 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = { | |||
63 | }; | 63 | }; |
64 | #undef SP | 64 | #undef SP |
65 | 65 | ||
66 | |||
67 | /* | ||
68 | * Function: scsi_insert_special_req() | ||
69 | * | ||
70 | * Purpose: Insert pre-formed request into request queue. | ||
71 | * | ||
72 | * Arguments: sreq - request that is ready to be queued. | ||
73 | * at_head - boolean. True if we should insert at head | ||
74 | * of queue, false if we should insert at tail. | ||
75 | * | ||
76 | * Lock status: Assumed that lock is not held upon entry. | ||
77 | * | ||
78 | * Returns: Nothing | ||
79 | * | ||
80 | * Notes: This function is called from character device and from | ||
81 | * ioctl types of functions where the caller knows exactly | ||
82 | * what SCSI command needs to be issued. The idea is that | ||
83 | * we merely inject the command into the queue (at the head | ||
84 | * for now), and then call the queue request function to actually | ||
85 | * process it. | ||
86 | */ | ||
87 | int scsi_insert_special_req(struct scsi_request *sreq, int at_head) | ||
88 | { | ||
89 | /* | ||
90 | * Because users of this function are apt to reuse requests with no | ||
91 | * modification, we have to sanitise the request flags here | ||
92 | */ | ||
93 | sreq->sr_request->flags &= ~REQ_DONTPREP; | ||
94 | blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request, | ||
95 | at_head, sreq); | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static void scsi_run_queue(struct request_queue *q); | 66 | static void scsi_run_queue(struct request_queue *q); |
100 | 67 | ||
101 | /* | 68 | /* |
@@ -249,8 +216,13 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd, | |||
249 | 216 | ||
250 | /* | 217 | /* |
251 | * head injection *required* here otherwise quiesce won't work | 218 | * head injection *required* here otherwise quiesce won't work |
219 | * | ||
220 | * Because users of this function are apt to reuse requests with no | ||
221 | * modification, we have to sanitise the request flags here | ||
252 | */ | 222 | */ |
253 | scsi_insert_special_req(sreq, 1); | 223 | sreq->sr_request->flags &= ~REQ_DONTPREP; |
224 | blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request, | ||
225 | 1, sreq); | ||
254 | } | 226 | } |
255 | EXPORT_SYMBOL(scsi_do_req); | 227 | EXPORT_SYMBOL(scsi_do_req); |
256 | 228 | ||
@@ -327,6 +299,196 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, | |||
327 | } | 299 | } |
328 | EXPORT_SYMBOL(scsi_execute_req); | 300 | EXPORT_SYMBOL(scsi_execute_req); |
329 | 301 | ||
302 | struct scsi_io_context { | ||
303 | void *data; | ||
304 | void (*done)(void *data, char *sense, int result, int resid); | ||
305 | char sense[SCSI_SENSE_BUFFERSIZE]; | ||
306 | }; | ||
307 | |||
308 | static void scsi_end_async(struct request *req) | ||
309 | { | ||
310 | struct scsi_io_context *sioc = req->end_io_data; | ||
311 | |||
312 | if (sioc->done) | ||
313 | sioc->done(sioc->data, sioc->sense, req->errors, req->data_len); | ||
314 | |||
315 | kfree(sioc); | ||
316 | __blk_put_request(req->q, req); | ||
317 | } | ||
318 | |||
319 | static int scsi_merge_bio(struct request *rq, struct bio *bio) | ||
320 | { | ||
321 | struct request_queue *q = rq->q; | ||
322 | |||
323 | bio->bi_flags &= ~(1 << BIO_SEG_VALID); | ||
324 | if (rq_data_dir(rq) == WRITE) | ||
325 | bio->bi_rw |= (1 << BIO_RW); | ||
326 | blk_queue_bounce(q, &bio); | ||
327 | |||
328 | if (!rq->bio) | ||
329 | blk_rq_bio_prep(q, rq, bio); | ||
330 | else if (!q->back_merge_fn(q, rq, bio)) | ||
331 | return -EINVAL; | ||
332 | else { | ||
333 | rq->biotail->bi_next = bio; | ||
334 | rq->biotail = bio; | ||
335 | rq->hard_nr_sectors += bio_sectors(bio); | ||
336 | rq->nr_sectors = rq->hard_nr_sectors; | ||
337 | } | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error) | ||
343 | { | ||
344 | if (bio->bi_size) | ||
345 | return 1; | ||
346 | |||
347 | bio_put(bio); | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | /** | ||
352 | * scsi_req_map_sg - map a scatterlist into a request | ||
353 | * @rq: request to fill | ||
354 | * @sg: scatterlist | ||
355 | * @nsegs: number of elements | ||
356 | * @bufflen: len of buffer | ||
357 | * @gfp: memory allocation flags | ||
358 | * | ||
359 | * scsi_req_map_sg maps a scatterlist into a request so that the | ||
360 | * request can be sent to the block layer. We do not trust the scatterlist | ||
361 | * sent to use, as some ULDs use that struct to only organize the pages. | ||
362 | */ | ||
363 | static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, | ||
364 | int nsegs, unsigned bufflen, gfp_t gfp) | ||
365 | { | ||
366 | struct request_queue *q = rq->q; | ||
367 | int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
368 | unsigned int data_len = 0, len, bytes, off; | ||
369 | struct page *page; | ||
370 | struct bio *bio = NULL; | ||
371 | int i, err, nr_vecs = 0; | ||
372 | |||
373 | for (i = 0; i < nsegs; i++) { | ||
374 | page = sgl[i].page; | ||
375 | off = sgl[i].offset; | ||
376 | len = sgl[i].length; | ||
377 | data_len += len; | ||
378 | |||
379 | while (len > 0) { | ||
380 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); | ||
381 | |||
382 | if (!bio) { | ||
383 | nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); | ||
384 | nr_pages -= nr_vecs; | ||
385 | |||
386 | bio = bio_alloc(gfp, nr_vecs); | ||
387 | if (!bio) { | ||
388 | err = -ENOMEM; | ||
389 | goto free_bios; | ||
390 | } | ||
391 | bio->bi_end_io = scsi_bi_endio; | ||
392 | } | ||
393 | |||
394 | if (bio_add_pc_page(q, bio, page, bytes, off) != | ||
395 | bytes) { | ||
396 | bio_put(bio); | ||
397 | err = -EINVAL; | ||
398 | goto free_bios; | ||
399 | } | ||
400 | |||
401 | if (bio->bi_vcnt >= nr_vecs) { | ||
402 | err = scsi_merge_bio(rq, bio); | ||
403 | if (err) { | ||
404 | bio_endio(bio, bio->bi_size, 0); | ||
405 | goto free_bios; | ||
406 | } | ||
407 | bio = NULL; | ||
408 | } | ||
409 | |||
410 | page++; | ||
411 | len -= bytes; | ||
412 | off = 0; | ||
413 | } | ||
414 | } | ||
415 | |||
416 | rq->buffer = rq->data = NULL; | ||
417 | rq->data_len = data_len; | ||
418 | return 0; | ||
419 | |||
420 | free_bios: | ||
421 | while ((bio = rq->bio) != NULL) { | ||
422 | rq->bio = bio->bi_next; | ||
423 | /* | ||
424 | * call endio instead of bio_put incase it was bounced | ||
425 | */ | ||
426 | bio_endio(bio, bio->bi_size, 0); | ||
427 | } | ||
428 | |||
429 | return err; | ||
430 | } | ||
431 | |||
432 | /** | ||
433 | * scsi_execute_async - insert request | ||
434 | * @sdev: scsi device | ||
435 | * @cmd: scsi command | ||
436 | * @data_direction: data direction | ||
437 | * @buffer: data buffer (this can be a kernel buffer or scatterlist) | ||
438 | * @bufflen: len of buffer | ||
439 | * @use_sg: if buffer is a scatterlist this is the number of elements | ||
440 | * @timeout: request timeout in seconds | ||
441 | * @retries: number of times to retry request | ||
442 | * @flags: or into request flags | ||
443 | **/ | ||
444 | int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, | ||
445 | int data_direction, void *buffer, unsigned bufflen, | ||
446 | int use_sg, int timeout, int retries, void *privdata, | ||
447 | void (*done)(void *, char *, int, int), gfp_t gfp) | ||
448 | { | ||
449 | struct request *req; | ||
450 | struct scsi_io_context *sioc; | ||
451 | int err = 0; | ||
452 | int write = (data_direction == DMA_TO_DEVICE); | ||
453 | |||
454 | sioc = kzalloc(sizeof(*sioc), gfp); | ||
455 | if (!sioc) | ||
456 | return DRIVER_ERROR << 24; | ||
457 | |||
458 | req = blk_get_request(sdev->request_queue, write, gfp); | ||
459 | if (!req) | ||
460 | goto free_sense; | ||
461 | |||
462 | if (use_sg) | ||
463 | err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); | ||
464 | else if (bufflen) | ||
465 | err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp); | ||
466 | |||
467 | if (err) | ||
468 | goto free_req; | ||
469 | |||
470 | req->cmd_len = COMMAND_SIZE(cmd[0]); | ||
471 | memcpy(req->cmd, cmd, req->cmd_len); | ||
472 | req->sense = sioc->sense; | ||
473 | req->sense_len = 0; | ||
474 | req->timeout = timeout; | ||
475 | req->flags |= REQ_BLOCK_PC | REQ_QUIET; | ||
476 | req->end_io_data = sioc; | ||
477 | |||
478 | sioc->data = privdata; | ||
479 | sioc->done = done; | ||
480 | |||
481 | blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async); | ||
482 | return 0; | ||
483 | |||
484 | free_req: | ||
485 | blk_put_request(req); | ||
486 | free_sense: | ||
487 | kfree(sioc); | ||
488 | return DRIVER_ERROR << 24; | ||
489 | } | ||
490 | EXPORT_SYMBOL_GPL(scsi_execute_async); | ||
491 | |||
330 | /* | 492 | /* |
331 | * Function: scsi_init_cmd_errh() | 493 | * Function: scsi_init_cmd_errh() |
332 | * | 494 | * |
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index a8d121c8fbcd..f04e7e11f57a 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h | |||
@@ -40,7 +40,6 @@ extern void scsi_exit_hosts(void); | |||
40 | extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); | 40 | extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); |
41 | extern int scsi_setup_command_freelist(struct Scsi_Host *shost); | 41 | extern int scsi_setup_command_freelist(struct Scsi_Host *shost); |
42 | extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); | 42 | extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); |
43 | extern int scsi_insert_special_req(struct scsi_request *sreq, int); | ||
44 | extern void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, | 43 | extern void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, |
45 | struct scsi_request *sreq); | 44 | struct scsi_request *sreq); |
46 | extern void __scsi_release_request(struct scsi_request *sreq); | 45 | extern void __scsi_release_request(struct scsi_request *sreq); |
@@ -386,6 +386,25 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page | |||
386 | } | 386 | } |
387 | 387 | ||
388 | /** | 388 | /** |
389 | * bio_add_pc_page - attempt to add page to bio | ||
390 | * @bio: destination bio | ||
391 | * @page: page to add | ||
392 | * @len: vec entry length | ||
393 | * @offset: vec entry offset | ||
394 | * | ||
395 | * Attempt to add a page to the bio_vec maplist. This can fail for a | ||
396 | * number of reasons, such as the bio being full or target block | ||
397 | * device limitations. The target block device must allow bio's | ||
398 | * smaller than PAGE_SIZE, so it is always possible to add a single | ||
399 | * page to an empty bio. This should only be used by REQ_PC bios. | ||
400 | */ | ||
401 | int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, | ||
402 | unsigned int len, unsigned int offset) | ||
403 | { | ||
404 | return __bio_add_page(q, bio, page, len, offset); | ||
405 | } | ||
406 | |||
407 | /** | ||
389 | * bio_add_page - attempt to add page to bio | 408 | * bio_add_page - attempt to add page to bio |
390 | * @bio: destination bio | 409 | * @bio: destination bio |
391 | * @page: page to add | 410 | * @page: page to add |
@@ -1228,6 +1247,7 @@ EXPORT_SYMBOL(bio_clone); | |||
1228 | EXPORT_SYMBOL(bio_phys_segments); | 1247 | EXPORT_SYMBOL(bio_phys_segments); |
1229 | EXPORT_SYMBOL(bio_hw_segments); | 1248 | EXPORT_SYMBOL(bio_hw_segments); |
1230 | EXPORT_SYMBOL(bio_add_page); | 1249 | EXPORT_SYMBOL(bio_add_page); |
1250 | EXPORT_SYMBOL(bio_add_pc_page); | ||
1231 | EXPORT_SYMBOL(bio_get_nr_vecs); | 1251 | EXPORT_SYMBOL(bio_get_nr_vecs); |
1232 | EXPORT_SYMBOL(bio_map_user); | 1252 | EXPORT_SYMBOL(bio_map_user); |
1233 | EXPORT_SYMBOL(bio_unmap_user); | 1253 | EXPORT_SYMBOL(bio_unmap_user); |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 685fd3720df5..b60ffe32cd21 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -292,6 +292,8 @@ extern struct bio *bio_clone(struct bio *, gfp_t); | |||
292 | extern void bio_init(struct bio *); | 292 | extern void bio_init(struct bio *); |
293 | 293 | ||
294 | extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); | 294 | extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); |
295 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, | ||
296 | unsigned int, unsigned int); | ||
295 | extern int bio_get_nr_vecs(struct block_device *); | 297 | extern int bio_get_nr_vecs(struct block_device *); |
296 | extern struct bio *bio_map_user(struct request_queue *, struct block_device *, | 298 | extern struct bio *bio_map_user(struct request_queue *, struct block_device *, |
297 | unsigned long, unsigned int, int); | 299 | unsigned long, unsigned int, int); |
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 063e32fe036c..e94ca4d36035 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h | |||
@@ -274,6 +274,12 @@ extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, | |||
274 | extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, | 274 | extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, |
275 | int data_direction, void *buffer, unsigned bufflen, | 275 | int data_direction, void *buffer, unsigned bufflen, |
276 | struct scsi_sense_hdr *, int timeout, int retries); | 276 | struct scsi_sense_hdr *, int timeout, int retries); |
277 | extern int scsi_execute_async(struct scsi_device *sdev, | ||
278 | const unsigned char *cmd, int data_direction, | ||
279 | void *buffer, unsigned bufflen, int use_sg, | ||
280 | int timeout, int retries, void *privdata, | ||
281 | void (*done)(void *, char *, int, int), | ||
282 | gfp_t gfp); | ||
277 | 283 | ||
278 | static inline unsigned int sdev_channel(struct scsi_device *sdev) | 284 | static inline unsigned int sdev_channel(struct scsi_device *sdev) |
279 | { | 285 | { |