aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-12-13 11:23:45 -0500
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-03-12 13:58:10 -0400
commitf078727b250c2653fc9a564f15547c17ebac3f99 (patch)
tree60cbd84965cbebc6a2898aa7664c25977d128b46 /drivers
parent26243043f207b3faa00594a33e10b2103205f27b (diff)
[SCSI] remove scsi_req_map_sg
No one uses scsi_execute_async with data transfer now. We can remove scsi_req_map_sg. Only scsi_eh_lock_door uses scsi_execute_async. scsi_eh_lock_door doesn't handle sense and the callback. So we can remove scsi_io_context too. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/scsi_error.c34
-rw-r--r--drivers/scsi/scsi_lib.c203
2 files changed, 25 insertions, 212 deletions
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ad6a1370761e..0c2c73be1974 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1441,6 +1441,11 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1441 } 1441 }
1442} 1442}
1443 1443
1444static void eh_lock_door_done(struct request *req, int uptodate)
1445{
1446 __blk_put_request(req->q, req);
1447}
1448
1444/** 1449/**
1445 * scsi_eh_lock_door - Prevent medium removal for the specified device 1450 * scsi_eh_lock_door - Prevent medium removal for the specified device
1446 * @sdev: SCSI device to prevent medium removal 1451 * @sdev: SCSI device to prevent medium removal
@@ -1463,19 +1468,28 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1463 */ 1468 */
1464static void scsi_eh_lock_door(struct scsi_device *sdev) 1469static void scsi_eh_lock_door(struct scsi_device *sdev)
1465{ 1470{
1466 unsigned char cmnd[MAX_COMMAND_SIZE]; 1471 struct request *req;
1467 1472
1468 cmnd[0] = ALLOW_MEDIUM_REMOVAL; 1473 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
1469 cmnd[1] = 0; 1474 if (!req)
1470 cmnd[2] = 0; 1475 return;
1471 cmnd[3] = 0;
1472 cmnd[4] = SCSI_REMOVAL_PREVENT;
1473 cmnd[5] = 0;
1474 1476
1475 scsi_execute_async(sdev, cmnd, 6, DMA_NONE, NULL, 0, 0, 10 * HZ, 1477 req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1476 5, NULL, NULL, GFP_KERNEL); 1478 req->cmd[1] = 0;
1477} 1479 req->cmd[2] = 0;
1480 req->cmd[3] = 0;
1481 req->cmd[4] = SCSI_REMOVAL_PREVENT;
1482 req->cmd[5] = 0;
1478 1483
1484 req->cmd_len = COMMAND_SIZE(req->cmd[0]);
1485
1486 req->cmd_type = REQ_TYPE_BLOCK_PC;
1487 req->cmd_flags |= REQ_QUIET;
1488 req->timeout = 10 * HZ;
1489 req->retries = 5;
1490
1491 blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
1492}
1479 1493
1480/** 1494/**
1481 * scsi_restart_operations - restart io operations to the specified host. 1495 * scsi_restart_operations - restart io operations to the specified host.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b82ffd90632e..4b13e36d3aa0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -277,196 +277,6 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
277} 277}
278EXPORT_SYMBOL(scsi_execute_req); 278EXPORT_SYMBOL(scsi_execute_req);
279 279
280struct scsi_io_context {
281 void *data;
282 void (*done)(void *data, char *sense, int result, int resid);
283 char sense[SCSI_SENSE_BUFFERSIZE];
284};
285
286static struct kmem_cache *scsi_io_context_cache;
287
288static void scsi_end_async(struct request *req, int uptodate)
289{
290 struct scsi_io_context *sioc = req->end_io_data;
291
292 if (sioc->done)
293 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
294
295 kmem_cache_free(scsi_io_context_cache, sioc);
296 __blk_put_request(req->q, req);
297}
298
299static int scsi_merge_bio(struct request *rq, struct bio *bio)
300{
301 struct request_queue *q = rq->q;
302
303 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
304 if (rq_data_dir(rq) == WRITE)
305 bio->bi_rw |= (1 << BIO_RW);
306 blk_queue_bounce(q, &bio);
307
308 return blk_rq_append_bio(q, rq, bio);
309}
310
311static void scsi_bi_endio(struct bio *bio, int error)
312{
313 bio_put(bio);
314}
315
316/**
317 * scsi_req_map_sg - map a scatterlist into a request
318 * @rq: request to fill
319 * @sgl: scatterlist
320 * @nsegs: number of elements
321 * @bufflen: len of buffer
322 * @gfp: memory allocation flags
323 *
324 * scsi_req_map_sg maps a scatterlist into a request so that the
325 * request can be sent to the block layer. We do not trust the scatterlist
326 * sent to use, as some ULDs use that struct to only organize the pages.
327 */
328static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
329 int nsegs, unsigned bufflen, gfp_t gfp)
330{
331 struct request_queue *q = rq->q;
332 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
333 unsigned int data_len = bufflen, len, bytes, off;
334 struct scatterlist *sg;
335 struct page *page;
336 struct bio *bio = NULL;
337 int i, err, nr_vecs = 0;
338
339 for_each_sg(sgl, sg, nsegs, i) {
340 page = sg_page(sg);
341 off = sg->offset;
342 len = sg->length;
343
344 while (len > 0 && data_len > 0) {
345 /*
346 * sg sends a scatterlist that is larger than
347 * the data_len it wants transferred for certain
348 * IO sizes
349 */
350 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
351 bytes = min(bytes, data_len);
352
353 if (!bio) {
354 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
355 nr_pages -= nr_vecs;
356
357 bio = bio_alloc(gfp, nr_vecs);
358 if (!bio) {
359 err = -ENOMEM;
360 goto free_bios;
361 }
362 bio->bi_end_io = scsi_bi_endio;
363 }
364
365 if (bio_add_pc_page(q, bio, page, bytes, off) !=
366 bytes) {
367 bio_put(bio);
368 err = -EINVAL;
369 goto free_bios;
370 }
371
372 if (bio->bi_vcnt >= nr_vecs) {
373 err = scsi_merge_bio(rq, bio);
374 if (err) {
375 bio_endio(bio, 0);
376 goto free_bios;
377 }
378 bio = NULL;
379 }
380
381 page++;
382 len -= bytes;
383 data_len -=bytes;
384 off = 0;
385 }
386 }
387
388 rq->buffer = rq->data = NULL;
389 rq->data_len = bufflen;
390 return 0;
391
392free_bios:
393 while ((bio = rq->bio) != NULL) {
394 rq->bio = bio->bi_next;
395 /*
396 * call endio instead of bio_put incase it was bounced
397 */
398 bio_endio(bio, 0);
399 }
400
401 return err;
402}
403
404/**
405 * scsi_execute_async - insert request
406 * @sdev: scsi device
407 * @cmd: scsi command
408 * @cmd_len: length of scsi cdb
409 * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
410 * @buffer: data buffer (this can be a kernel buffer or scatterlist)
411 * @bufflen: len of buffer
412 * @use_sg: if buffer is a scatterlist this is the number of elements
413 * @timeout: request timeout in seconds
414 * @retries: number of times to retry request
415 * @privdata: data passed to done()
416 * @done: callback function when done
417 * @gfp: memory allocation flags
418 */
419int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
420 int cmd_len, int data_direction, void *buffer, unsigned bufflen,
421 int use_sg, int timeout, int retries, void *privdata,
422 void (*done)(void *, char *, int, int), gfp_t gfp)
423{
424 struct request *req;
425 struct scsi_io_context *sioc;
426 int err = 0;
427 int write = (data_direction == DMA_TO_DEVICE);
428
429 sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
430 if (!sioc)
431 return DRIVER_ERROR << 24;
432
433 req = blk_get_request(sdev->request_queue, write, gfp);
434 if (!req)
435 goto free_sense;
436 req->cmd_type = REQ_TYPE_BLOCK_PC;
437 req->cmd_flags |= REQ_QUIET;
438
439 if (use_sg)
440 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
441 else if (bufflen)
442 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
443
444 if (err)
445 goto free_req;
446
447 req->cmd_len = cmd_len;
448 memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
449 memcpy(req->cmd, cmd, req->cmd_len);
450 req->sense = sioc->sense;
451 req->sense_len = 0;
452 req->timeout = timeout;
453 req->retries = retries;
454 req->end_io_data = sioc;
455
456 sioc->data = privdata;
457 sioc->done = done;
458
459 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
460 return 0;
461
462free_req:
463 blk_put_request(req);
464free_sense:
465 kmem_cache_free(scsi_io_context_cache, sioc);
466 return DRIVER_ERROR << 24;
467}
468EXPORT_SYMBOL_GPL(scsi_execute_async);
469
470/* 280/*
471 * Function: scsi_init_cmd_errh() 281 * Function: scsi_init_cmd_errh()
472 * 282 *
@@ -1920,20 +1730,12 @@ int __init scsi_init_queue(void)
1920{ 1730{
1921 int i; 1731 int i;
1922 1732
1923 scsi_io_context_cache = kmem_cache_create("scsi_io_context",
1924 sizeof(struct scsi_io_context),
1925 0, 0, NULL);
1926 if (!scsi_io_context_cache) {
1927 printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
1928 return -ENOMEM;
1929 }
1930
1931 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 1733 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1932 sizeof(struct scsi_data_buffer), 1734 sizeof(struct scsi_data_buffer),
1933 0, 0, NULL); 1735 0, 0, NULL);
1934 if (!scsi_sdb_cache) { 1736 if (!scsi_sdb_cache) {
1935 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 1737 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1936 goto cleanup_io_context; 1738 return -ENOMEM;
1937 } 1739 }
1938 1740
1939 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1741 for (i = 0; i < SG_MEMPOOL_NR; i++) {
@@ -1968,8 +1770,6 @@ cleanup_sdb:
1968 kmem_cache_destroy(sgp->slab); 1770 kmem_cache_destroy(sgp->slab);
1969 } 1771 }
1970 kmem_cache_destroy(scsi_sdb_cache); 1772 kmem_cache_destroy(scsi_sdb_cache);
1971cleanup_io_context:
1972 kmem_cache_destroy(scsi_io_context_cache);
1973 1773
1974 return -ENOMEM; 1774 return -ENOMEM;
1975} 1775}
@@ -1978,7 +1778,6 @@ void scsi_exit_queue(void)
1978{ 1778{
1979 int i; 1779 int i;
1980 1780
1981 kmem_cache_destroy(scsi_io_context_cache);
1982 kmem_cache_destroy(scsi_sdb_cache); 1781 kmem_cache_destroy(scsi_sdb_cache);
1983 1782
1984 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1783 for (i = 0; i < SG_MEMPOOL_NR; i++) {