aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c274
1 files changed, 161 insertions, 113 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7c4c889c5221..b12fb310e399 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/bio.h> 10#include <linux/bio.h>
11#include <linux/bitops.h>
11#include <linux/blkdev.h> 12#include <linux/blkdev.h>
12#include <linux/completion.h> 13#include <linux/completion.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
@@ -34,13 +35,6 @@
34#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 35#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
35#define SG_MEMPOOL_SIZE 2 36#define SG_MEMPOOL_SIZE 2
36 37
37/*
38 * The maximum number of SG segments that we will put inside a scatterlist
39 * (unless chaining is used). Should ideally fit inside a single page, to
40 * avoid a higher order allocation.
41 */
42#define SCSI_MAX_SG_SEGMENTS 128
43
44struct scsi_host_sg_pool { 38struct scsi_host_sg_pool {
45 size_t size; 39 size_t size;
46 char *name; 40 char *name;
@@ -48,22 +42,31 @@ struct scsi_host_sg_pool {
48 mempool_t *pool; 42 mempool_t *pool;
49}; 43};
50 44
51#define SP(x) { x, "sgpool-" #x } 45#define SP(x) { x, "sgpool-" __stringify(x) }
46#if (SCSI_MAX_SG_SEGMENTS < 32)
47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48#endif
52static struct scsi_host_sg_pool scsi_sg_pools[] = { 49static struct scsi_host_sg_pool scsi_sg_pools[] = {
53 SP(8), 50 SP(8),
54 SP(16), 51 SP(16),
55#if (SCSI_MAX_SG_SEGMENTS > 16)
56 SP(32),
57#if (SCSI_MAX_SG_SEGMENTS > 32) 52#if (SCSI_MAX_SG_SEGMENTS > 32)
58 SP(64), 53 SP(32),
59#if (SCSI_MAX_SG_SEGMENTS > 64) 54#if (SCSI_MAX_SG_SEGMENTS > 64)
55 SP(64),
56#if (SCSI_MAX_SG_SEGMENTS > 128)
60 SP(128), 57 SP(128),
58#if (SCSI_MAX_SG_SEGMENTS > 256)
59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
60#endif
61#endif 61#endif
62#endif 62#endif
63#endif 63#endif
64 SP(SCSI_MAX_SG_SEGMENTS)
64}; 65};
65#undef SP 66#undef SP
66 67
68static struct kmem_cache *scsi_bidi_sdb_cache;
69
67static void scsi_run_queue(struct request_queue *q); 70static void scsi_run_queue(struct request_queue *q);
68 71
69/* 72/*
@@ -440,7 +443,7 @@ EXPORT_SYMBOL_GPL(scsi_execute_async);
440static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 443static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
441{ 444{
442 cmd->serial_number = 0; 445 cmd->serial_number = 0;
443 cmd->resid = 0; 446 scsi_set_resid(cmd, 0);
444 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 447 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
445 if (cmd->cmd_len == 0) 448 if (cmd->cmd_len == 0)
446 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 449 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
@@ -690,42 +693,16 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
690 return NULL; 693 return NULL;
691} 694}
692 695
693/*
694 * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
695 * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
696 */
697#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048
698
699static inline unsigned int scsi_sgtable_index(unsigned short nents) 696static inline unsigned int scsi_sgtable_index(unsigned short nents)
700{ 697{
701 unsigned int index; 698 unsigned int index;
702 699
703 switch (nents) { 700 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
704 case 1 ... 8: 701
702 if (nents <= 8)
705 index = 0; 703 index = 0;
706 break; 704 else
707 case 9 ... 16: 705 index = get_count_order(nents) - 3;
708 index = 1;
709 break;
710#if (SCSI_MAX_SG_SEGMENTS > 16)
711 case 17 ... 32:
712 index = 2;
713 break;
714#if (SCSI_MAX_SG_SEGMENTS > 32)
715 case 33 ... 64:
716 index = 3;
717 break;
718#if (SCSI_MAX_SG_SEGMENTS > 64)
719 case 65 ... 128:
720 index = 4;
721 break;
722#endif
723#endif
724#endif
725 default:
726 printk(KERN_ERR "scsi: bad segment count=%d\n", nents);
727 BUG();
728 }
729 706
730 return index; 707 return index;
731} 708}
@@ -746,31 +723,27 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
746 return mempool_alloc(sgp->pool, gfp_mask); 723 return mempool_alloc(sgp->pool, gfp_mask);
747} 724}
748 725
749int scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) 726static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
727 gfp_t gfp_mask)
750{ 728{
751 int ret; 729 int ret;
752 730
753 BUG_ON(!cmd->use_sg); 731 BUG_ON(!nents);
754 732
755 ret = __sg_alloc_table(&cmd->sg_table, cmd->use_sg, 733 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
756 SCSI_MAX_SG_SEGMENTS, gfp_mask, scsi_sg_alloc); 734 gfp_mask, scsi_sg_alloc);
757 if (unlikely(ret)) 735 if (unlikely(ret))
758 __sg_free_table(&cmd->sg_table, SCSI_MAX_SG_SEGMENTS, 736 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
759 scsi_sg_free); 737 scsi_sg_free);
760 738
761 cmd->request_buffer = cmd->sg_table.sgl;
762 return ret; 739 return ret;
763} 740}
764 741
765EXPORT_SYMBOL(scsi_alloc_sgtable); 742static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
766
767void scsi_free_sgtable(struct scsi_cmnd *cmd)
768{ 743{
769 __sg_free_table(&cmd->sg_table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 744 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
770} 745}
771 746
772EXPORT_SYMBOL(scsi_free_sgtable);
773
774/* 747/*
775 * Function: scsi_release_buffers() 748 * Function: scsi_release_buffers()
776 * 749 *
@@ -788,17 +761,49 @@ EXPORT_SYMBOL(scsi_free_sgtable);
788 * the scatter-gather table, and potentially any bounce 761 * the scatter-gather table, and potentially any bounce
789 * buffers. 762 * buffers.
790 */ 763 */
791static void scsi_release_buffers(struct scsi_cmnd *cmd) 764void scsi_release_buffers(struct scsi_cmnd *cmd)
765{
766 if (cmd->sdb.table.nents)
767 scsi_free_sgtable(&cmd->sdb);
768
769 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
770
771 if (scsi_bidi_cmnd(cmd)) {
772 struct scsi_data_buffer *bidi_sdb =
773 cmd->request->next_rq->special;
774 scsi_free_sgtable(bidi_sdb);
775 kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb);
776 cmd->request->next_rq->special = NULL;
777 }
778}
779EXPORT_SYMBOL(scsi_release_buffers);
780
781/*
782 * Bidi commands Must be complete as a whole, both sides at once.
783 * If part of the bytes were written and lld returned
784 * scsi_in()->resid and/or scsi_out()->resid this information will be left
785 * in req->data_len and req->next_rq->data_len. The upper-layer driver can
786 * decide what to do with this information.
787 */
788void scsi_end_bidi_request(struct scsi_cmnd *cmd)
792{ 789{
793 if (cmd->use_sg) 790 struct request *req = cmd->request;
794 scsi_free_sgtable(cmd); 791 unsigned int dlen = req->data_len;
792 unsigned int next_dlen = req->next_rq->data_len;
793
794 req->data_len = scsi_out(cmd)->resid;
795 req->next_rq->data_len = scsi_in(cmd)->resid;
796
797 /* The req and req->next_rq have not been completed */
798 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
799
800 scsi_release_buffers(cmd);
795 801
796 /* 802 /*
797 * Zero these out. They now point to freed memory, and it is 803 * This will goose the queue request function at the end, so we don't
798 * dangerous to hang onto the pointers. 804 * need to worry about launching another command.
799 */ 805 */
800 cmd->request_buffer = NULL; 806 scsi_next_command(cmd);
801 cmd->request_bufflen = 0;
802} 807}
803 808
804/* 809/*
@@ -832,7 +837,7 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
832void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 837void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
833{ 838{
834 int result = cmd->result; 839 int result = cmd->result;
835 int this_count = cmd->request_bufflen; 840 int this_count = scsi_bufflen(cmd);
836 struct request_queue *q = cmd->device->request_queue; 841 struct request_queue *q = cmd->device->request_queue;
837 struct request *req = cmd->request; 842 struct request *req = cmd->request;
838 int clear_errors = 1; 843 int clear_errors = 1;
@@ -840,8 +845,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
840 int sense_valid = 0; 845 int sense_valid = 0;
841 int sense_deferred = 0; 846 int sense_deferred = 0;
842 847
843 scsi_release_buffers(cmd);
844
845 if (result) { 848 if (result) {
846 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 849 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
847 if (sense_valid) 850 if (sense_valid)
@@ -864,9 +867,17 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
864 req->sense_len = len; 867 req->sense_len = len;
865 } 868 }
866 } 869 }
867 req->data_len = cmd->resid; 870 if (scsi_bidi_cmnd(cmd)) {
871 /* will also release_buffers */
872 scsi_end_bidi_request(cmd);
873 return;
874 }
875 req->data_len = scsi_get_resid(cmd);
868 } 876 }
869 877
878 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
879 scsi_release_buffers(cmd);
880
870 /* 881 /*
871 * Next deal with any sectors which we were able to correctly 882 * Next deal with any sectors which we were able to correctly
872 * handle. 883 * handle.
@@ -874,7 +885,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
874 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 885 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
875 "%d bytes done.\n", 886 "%d bytes done.\n",
876 req->nr_sectors, good_bytes)); 887 req->nr_sectors, good_bytes));
877 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
878 888
879 if (clear_errors) 889 if (clear_errors)
880 req->errors = 0; 890 req->errors = 0;
@@ -991,52 +1001,80 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
991 scsi_end_request(cmd, -EIO, this_count, !result); 1001 scsi_end_request(cmd, -EIO, this_count, !result);
992} 1002}
993 1003
994/* 1004static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
995 * Function: scsi_init_io() 1005 gfp_t gfp_mask)
996 *
997 * Purpose: SCSI I/O initialize function.
998 *
999 * Arguments: cmd - Command descriptor we wish to initialize
1000 *
1001 * Returns: 0 on success
1002 * BLKPREP_DEFER if the failure is retryable
1003 */
1004static int scsi_init_io(struct scsi_cmnd *cmd)
1005{ 1006{
1006 struct request *req = cmd->request; 1007 int count;
1007 int count;
1008
1009 /*
1010 * We used to not use scatter-gather for single segment request,
1011 * but now we do (it makes highmem I/O easier to support without
1012 * kmapping pages)
1013 */
1014 cmd->use_sg = req->nr_phys_segments;
1015 1008
1016 /* 1009 /*
1017 * If sg table allocation fails, requeue request later. 1010 * If sg table allocation fails, requeue request later.
1018 */ 1011 */
1019 if (unlikely(scsi_alloc_sgtable(cmd, GFP_ATOMIC))) { 1012 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1020 scsi_unprep_request(req); 1013 gfp_mask))) {
1021 return BLKPREP_DEFER; 1014 return BLKPREP_DEFER;
1022 } 1015 }
1023 1016
1024 req->buffer = NULL; 1017 req->buffer = NULL;
1025 if (blk_pc_request(req)) 1018 if (blk_pc_request(req))
1026 cmd->request_bufflen = req->data_len; 1019 sdb->length = req->data_len;
1027 else 1020 else
1028 cmd->request_bufflen = req->nr_sectors << 9; 1021 sdb->length = req->nr_sectors << 9;
1029 1022
1030 /* 1023 /*
1031 * Next, walk the list, and fill in the addresses and sizes of 1024 * Next, walk the list, and fill in the addresses and sizes of
1032 * each segment. 1025 * each segment.
1033 */ 1026 */
1034 count = blk_rq_map_sg(req->q, req, cmd->request_buffer); 1027 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1035 BUG_ON(count > cmd->use_sg); 1028 BUG_ON(count > sdb->table.nents);
1036 cmd->use_sg = count; 1029 sdb->table.nents = count;
1037 return BLKPREP_OK; 1030 return BLKPREP_OK;
1038} 1031}
1039 1032
1033/*
1034 * Function: scsi_init_io()
1035 *
1036 * Purpose: SCSI I/O initialize function.
1037 *
1038 * Arguments: cmd - Command descriptor we wish to initialize
1039 *
1040 * Returns: 0 on success
1041 * BLKPREP_DEFER if the failure is retryable
1042 * BLKPREP_KILL if the failure is fatal
1043 */
1044int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1045{
1046 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
1047 if (error)
1048 goto err_exit;
1049
1050 if (blk_bidi_rq(cmd->request)) {
1051 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
1052 scsi_bidi_sdb_cache, GFP_ATOMIC);
1053 if (!bidi_sdb) {
1054 error = BLKPREP_DEFER;
1055 goto err_exit;
1056 }
1057
1058 cmd->request->next_rq->special = bidi_sdb;
1059 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
1060 GFP_ATOMIC);
1061 if (error)
1062 goto err_exit;
1063 }
1064
1065 return BLKPREP_OK ;
1066
1067err_exit:
1068 scsi_release_buffers(cmd);
1069 if (error == BLKPREP_KILL)
1070 scsi_put_command(cmd);
1071 else /* BLKPREP_DEFER */
1072 scsi_unprep_request(cmd->request);
1073
1074 return error;
1075}
1076EXPORT_SYMBOL(scsi_init_io);
1077
1040static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1078static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1041 struct request *req) 1079 struct request *req)
1042{ 1080{
@@ -1081,16 +1119,14 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1081 1119
1082 BUG_ON(!req->nr_phys_segments); 1120 BUG_ON(!req->nr_phys_segments);
1083 1121
1084 ret = scsi_init_io(cmd); 1122 ret = scsi_init_io(cmd, GFP_ATOMIC);
1085 if (unlikely(ret)) 1123 if (unlikely(ret))
1086 return ret; 1124 return ret;
1087 } else { 1125 } else {
1088 BUG_ON(req->data_len); 1126 BUG_ON(req->data_len);
1089 BUG_ON(req->data); 1127 BUG_ON(req->data);
1090 1128
1091 cmd->request_bufflen = 0; 1129 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1092 cmd->request_buffer = NULL;
1093 cmd->use_sg = 0;
1094 req->buffer = NULL; 1130 req->buffer = NULL;
1095 } 1131 }
1096 1132
@@ -1132,7 +1168,7 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1132 if (unlikely(!cmd)) 1168 if (unlikely(!cmd))
1133 return BLKPREP_DEFER; 1169 return BLKPREP_DEFER;
1134 1170
1135 return scsi_init_io(cmd); 1171 return scsi_init_io(cmd, GFP_ATOMIC);
1136} 1172}
1137EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1173EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1138 1174
@@ -1542,20 +1578,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1542 * this limit is imposed by hardware restrictions 1578 * this limit is imposed by hardware restrictions
1543 */ 1579 */
1544 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1580 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1545 1581 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
1546 /*
1547 * In the future, sg chaining support will be mandatory and this
1548 * ifdef can then go away. Right now we don't have all archs
1549 * converted, so better keep it safe.
1550 */
1551#ifdef ARCH_HAS_SG_CHAIN
1552 if (shost->use_sg_chaining)
1553 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
1554 else
1555 blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
1556#else
1557 blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
1558#endif
1559 1582
1560 blk_queue_max_sectors(q, shost->max_sectors); 1583 blk_queue_max_sectors(q, shost->max_sectors);
1561 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1584 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
@@ -1654,6 +1677,14 @@ int __init scsi_init_queue(void)
1654 return -ENOMEM; 1677 return -ENOMEM;
1655 } 1678 }
1656 1679
1680 scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb",
1681 sizeof(struct scsi_data_buffer),
1682 0, 0, NULL);
1683 if (!scsi_bidi_sdb_cache) {
1684 printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n");
1685 goto cleanup_io_context;
1686 }
1687
1657 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1688 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1658 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1689 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1659 int size = sgp->size * sizeof(struct scatterlist); 1690 int size = sgp->size * sizeof(struct scatterlist);
@@ -1663,6 +1694,7 @@ int __init scsi_init_queue(void)
1663 if (!sgp->slab) { 1694 if (!sgp->slab) {
1664 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1695 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1665 sgp->name); 1696 sgp->name);
1697 goto cleanup_bidi_sdb;
1666 } 1698 }
1667 1699
1668 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1700 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
@@ -1670,10 +1702,25 @@ int __init scsi_init_queue(void)
1670 if (!sgp->pool) { 1702 if (!sgp->pool) {
1671 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1703 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1672 sgp->name); 1704 sgp->name);
1705 goto cleanup_bidi_sdb;
1673 } 1706 }
1674 } 1707 }
1675 1708
1676 return 0; 1709 return 0;
1710
1711cleanup_bidi_sdb:
1712 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1713 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1714 if (sgp->pool)
1715 mempool_destroy(sgp->pool);
1716 if (sgp->slab)
1717 kmem_cache_destroy(sgp->slab);
1718 }
1719 kmem_cache_destroy(scsi_bidi_sdb_cache);
1720cleanup_io_context:
1721 kmem_cache_destroy(scsi_io_context_cache);
1722
1723 return -ENOMEM;
1677} 1724}
1678 1725
1679void scsi_exit_queue(void) 1726void scsi_exit_queue(void)
@@ -1681,6 +1728,7 @@ void scsi_exit_queue(void)
1681 int i; 1728 int i;
1682 1729
1683 kmem_cache_destroy(scsi_io_context_cache); 1730 kmem_cache_destroy(scsi_io_context_cache);
1731 kmem_cache_destroy(scsi_bidi_sdb_cache);
1684 1732
1685 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1733 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1686 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1734 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;