aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 13:09:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 13:09:16 -0400
commit92d15c2ccbb3e31a3fc71ad28fdb55e1319383c0 (patch)
tree8d83c0dc3c6b935d8367e331872f242b742f0a8a /drivers/scsi
parentf20bf6125605acbbc7eb8c9420d7221c91aa83eb (diff)
parent644bd2f048972d75eb1979b1fdca257d528ce687 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block: (63 commits) Fix memory leak in dm-crypt SPARC64: sg chaining support SPARC: sg chaining support PPC: sg chaining support PS3: sg chaining support IA64: sg chaining support x86-64: enable sg chaining x86-64: update pci-gart iommu to sg helpers x86-64: update nommu to sg helpers x86-64: update calgary iommu to sg helpers swiotlb: sg chaining support i386: enable sg chaining i386 dma_map_sg: convert to using sg helpers mmc: need to zero sglist on init Panic in blk_rq_map_sg() from CCISS driver remove sglist_len remove blk_queue_max_phys_segments in libata revert sg segment size ifdefs Fixup u14-34f ENABLE_SG_CHAINING qla1280: enable use_sg_chaining option ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c1
-rw-r--r--drivers/scsi/3w-xxxx.c1
-rw-r--r--drivers/scsi/BusLogic.c1
-rw-r--r--drivers/scsi/NCR53c406a.c3
-rw-r--r--drivers/scsi/a100u2w.c1
-rw-r--r--drivers/scsi/aacraid/linit.c1
-rw-r--r--drivers/scsi/aha1542.c32
-rw-r--r--drivers/scsi/aha1740.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/aic7xxx_old.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1
-rw-r--r--drivers/scsi/dc395x.c1
-rw-r--r--drivers/scsi/dpt_i2o.c1
-rw-r--r--drivers/scsi/eata.c3
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/hptiop.c1
-rw-r--r--drivers/scsi/ibmmca.c1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c1
-rw-r--r--drivers/scsi/ide-scsi.c32
-rw-r--r--drivers/scsi/initio.c1
-rw-r--r--drivers/scsi/ips.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/mac53c94.c1
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c1
-rw-r--r--drivers/scsi/mesh.c1
-rw-r--r--drivers/scsi/nsp32.c1
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c1
-rw-r--r--drivers/scsi/qla1280.c70
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1
-rw-r--r--drivers/scsi/qlogicfas.c1
-rw-r--r--drivers/scsi/qlogicpti.c15
-rw-r--r--drivers/scsi/scsi_debug.c30
-rw-r--r--drivers/scsi/scsi_lib.c238
-rw-r--r--drivers/scsi/scsi_tgt_lib.c4
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/scsi/sg.c16
-rw-r--r--drivers/scsi/stex.c1
-rw-r--r--drivers/scsi/sym53c416.c1
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c1
-rw-r--r--drivers/scsi/u14-34f.c3
-rw-r--r--drivers/scsi/ultrastor.c1
-rw-r--r--drivers/scsi/wd7000.c1
47 files changed, 345 insertions, 178 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index efd9d8d3a890..fb14014ee16e 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1990,6 +1990,7 @@ static struct scsi_host_template driver_template = {
1990 .max_sectors = TW_MAX_SECTORS, 1990 .max_sectors = TW_MAX_SECTORS,
1991 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 1991 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1992 .use_clustering = ENABLE_CLUSTERING, 1992 .use_clustering = ENABLE_CLUSTERING,
1993 .use_sg_chaining = ENABLE_SG_CHAINING,
1993 .shost_attrs = twa_host_attrs, 1994 .shost_attrs = twa_host_attrs,
1994 .emulated = 1 1995 .emulated = 1
1995}; 1996};
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index c7995fc216e8..a64153b96034 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2261,6 +2261,7 @@ static struct scsi_host_template driver_template = {
2261 .max_sectors = TW_MAX_SECTORS, 2261 .max_sectors = TW_MAX_SECTORS,
2262 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 2262 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2263 .use_clustering = ENABLE_CLUSTERING, 2263 .use_clustering = ENABLE_CLUSTERING,
2264 .use_sg_chaining = ENABLE_SG_CHAINING,
2264 .shost_attrs = tw_host_attrs, 2265 .shost_attrs = tw_host_attrs,
2265 .emulated = 1 2266 .emulated = 1
2266}; 2267};
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 9b206176f717..49e1ffa4b2ff 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -3575,6 +3575,7 @@ static struct scsi_host_template Bus_Logic_template = {
3575 .unchecked_isa_dma = 1, 3575 .unchecked_isa_dma = 1,
3576 .max_sectors = 128, 3576 .max_sectors = 128,
3577 .use_clustering = ENABLE_CLUSTERING, 3577 .use_clustering = ENABLE_CLUSTERING,
3578 .use_sg_chaining = ENABLE_SG_CHAINING,
3578}; 3579};
3579 3580
3580/* 3581/*
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index eda8c48f6be7..3168a1794849 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -1066,7 +1066,8 @@ static struct scsi_host_template driver_template =
1066 .sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/, 1066 .sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/,
1067 .cmd_per_lun = 1 /* commands per lun */, 1067 .cmd_per_lun = 1 /* commands per lun */,
1068 .unchecked_isa_dma = 1 /* unchecked_isa_dma */, 1068 .unchecked_isa_dma = 1 /* unchecked_isa_dma */,
1069 .use_clustering = ENABLE_CLUSTERING 1069 .use_clustering = ENABLE_CLUSTERING,
1070 .use_sg_chaining = ENABLE_SG_CHAINING,
1070}; 1071};
1071 1072
1072#include "scsi_module.c" 1073#include "scsi_module.c"
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index f608d4a1d6da..d3a6d15fb77a 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1071,6 +1071,7 @@ static struct scsi_host_template inia100_template = {
1071 .sg_tablesize = SG_ALL, 1071 .sg_tablesize = SG_ALL,
1072 .cmd_per_lun = 1, 1072 .cmd_per_lun = 1,
1073 .use_clustering = ENABLE_CLUSTERING, 1073 .use_clustering = ENABLE_CLUSTERING,
1074 .use_sg_chaining = ENABLE_SG_CHAINING,
1074}; 1075};
1075 1076
1076static int __devinit inia100_probe_one(struct pci_dev *pdev, 1077static int __devinit inia100_probe_one(struct pci_dev *pdev,
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a7f42a17b5c7..038980be763d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -944,6 +944,7 @@ static struct scsi_host_template aac_driver_template = {
944 .cmd_per_lun = AAC_NUM_IO_FIB, 944 .cmd_per_lun = AAC_NUM_IO_FIB,
945#endif 945#endif
946 .use_clustering = ENABLE_CLUSTERING, 946 .use_clustering = ENABLE_CLUSTERING,
947 .use_sg_chaining = ENABLE_SG_CHAINING,
947 .emulated = 1, 948 .emulated = 1,
948}; 949};
949 950
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index cbbfbc9f3e0f..961a1882cb7e 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -61,15 +61,15 @@ static void BAD_DMA(void *address, unsigned int length)
61} 61}
62 62
63static void BAD_SG_DMA(Scsi_Cmnd * SCpnt, 63static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
64 struct scatterlist *sgpnt, 64 struct scatterlist *sgp,
65 int nseg, 65 int nseg,
66 int badseg) 66 int badseg)
67{ 67{
68 printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n", 68 printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n",
69 badseg, nseg, 69 badseg, nseg,
70 page_address(sgpnt[badseg].page) + sgpnt[badseg].offset, 70 page_address(sgp->page) + sgp->offset,
71 (unsigned long long)SCSI_SG_PA(&sgpnt[badseg]), 71 (unsigned long long)SCSI_SG_PA(sgp),
72 sgpnt[badseg].length); 72 sgp->length);
73 73
74 /* 74 /*
75 * Not safe to continue. 75 * Not safe to continue.
@@ -691,7 +691,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
691 memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen); 691 memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
692 692
693 if (SCpnt->use_sg) { 693 if (SCpnt->use_sg) {
694 struct scatterlist *sgpnt; 694 struct scatterlist *sg;
695 struct chain *cptr; 695 struct chain *cptr;
696#ifdef DEBUG 696#ifdef DEBUG
697 unsigned char *ptr; 697 unsigned char *ptr;
@@ -699,23 +699,21 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
699 int i; 699 int i;
700 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ 700 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
701 SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA); 701 SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA);
702 sgpnt = (struct scatterlist *) SCpnt->request_buffer;
703 cptr = (struct chain *) SCpnt->host_scribble; 702 cptr = (struct chain *) SCpnt->host_scribble;
704 if (cptr == NULL) { 703 if (cptr == NULL) {
705 /* free the claimed mailbox slot */ 704 /* free the claimed mailbox slot */
706 HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL; 705 HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL;
707 return SCSI_MLQUEUE_HOST_BUSY; 706 return SCSI_MLQUEUE_HOST_BUSY;
708 } 707 }
709 for (i = 0; i < SCpnt->use_sg; i++) { 708 scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
710 if (sgpnt[i].length == 0 || SCpnt->use_sg > 16 || 709 if (sg->length == 0 || SCpnt->use_sg > 16 ||
711 (((int) sgpnt[i].offset) & 1) || (sgpnt[i].length & 1)) { 710 (((int) sg->offset) & 1) || (sg->length & 1)) {
712 unsigned char *ptr; 711 unsigned char *ptr;
713 printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i); 712 printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
714 for (i = 0; i < SCpnt->use_sg; i++) { 713 scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
715 printk(KERN_CRIT "%d: %p %d\n", i, 714 printk(KERN_CRIT "%d: %p %d\n", i,
716 (page_address(sgpnt[i].page) + 715 (page_address(sg->page) +
717 sgpnt[i].offset), 716 sg->offset), sg->length);
718 sgpnt[i].length);
719 }; 717 };
720 printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr); 718 printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
721 ptr = (unsigned char *) &cptr[i]; 719 ptr = (unsigned char *) &cptr[i];
@@ -723,10 +721,10 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
723 printk("%02x ", ptr[i]); 721 printk("%02x ", ptr[i]);
724 panic("Foooooooood fight!"); 722 panic("Foooooooood fight!");
725 }; 723 };
726 any2scsi(cptr[i].dataptr, SCSI_SG_PA(&sgpnt[i])); 724 any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg));
727 if (SCSI_SG_PA(&sgpnt[i]) + sgpnt[i].length - 1 > ISA_DMA_THRESHOLD) 725 if (SCSI_SG_PA(sg) + sg->length - 1 > ISA_DMA_THRESHOLD)
728 BAD_SG_DMA(SCpnt, sgpnt, SCpnt->use_sg, i); 726 BAD_SG_DMA(SCpnt, sg, SCpnt->use_sg, i);
729 any2scsi(cptr[i].datalen, sgpnt[i].length); 727 any2scsi(cptr[i].datalen, sg->length);
730 }; 728 };
731 any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain)); 729 any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
732 any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr)); 730 any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr));
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index e4a4f3a965d9..f6722fd46008 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -563,6 +563,7 @@ static struct scsi_host_template aha1740_template = {
563 .sg_tablesize = AHA1740_SCATTER, 563 .sg_tablesize = AHA1740_SCATTER,
564 .cmd_per_lun = AHA1740_CMDLUN, 564 .cmd_per_lun = AHA1740_CMDLUN,
565 .use_clustering = ENABLE_CLUSTERING, 565 .use_clustering = ENABLE_CLUSTERING,
566 .use_sg_chaining = ENABLE_SG_CHAINING,
566 .eh_abort_handler = aha1740_eh_abort_handler, 567 .eh_abort_handler = aha1740_eh_abort_handler,
567}; 568};
568 569
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index a055a96e3ad3..42c0f14a262c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -766,6 +766,7 @@ struct scsi_host_template aic79xx_driver_template = {
766 .max_sectors = 8192, 766 .max_sectors = 8192,
767 .cmd_per_lun = 2, 767 .cmd_per_lun = 2,
768 .use_clustering = ENABLE_CLUSTERING, 768 .use_clustering = ENABLE_CLUSTERING,
769 .use_sg_chaining = ENABLE_SG_CHAINING,
769 .slave_alloc = ahd_linux_slave_alloc, 770 .slave_alloc = ahd_linux_slave_alloc,
770 .slave_configure = ahd_linux_slave_configure, 771 .slave_configure = ahd_linux_slave_configure,
771 .target_alloc = ahd_linux_target_alloc, 772 .target_alloc = ahd_linux_target_alloc,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 2e9c38f2e8a6..7770befbf50c 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -747,6 +747,7 @@ struct scsi_host_template aic7xxx_driver_template = {
747 .max_sectors = 8192, 747 .max_sectors = 8192,
748 .cmd_per_lun = 2, 748 .cmd_per_lun = 2,
749 .use_clustering = ENABLE_CLUSTERING, 749 .use_clustering = ENABLE_CLUSTERING,
750 .use_sg_chaining = ENABLE_SG_CHAINING,
750 .slave_alloc = ahc_linux_slave_alloc, 751 .slave_alloc = ahc_linux_slave_alloc,
751 .slave_configure = ahc_linux_slave_configure, 752 .slave_configure = ahc_linux_slave_configure,
752 .target_alloc = ahc_linux_target_alloc, 753 .target_alloc = ahc_linux_target_alloc,
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 1a71b0236c97..4025608d6964 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -11142,6 +11142,7 @@ static struct scsi_host_template driver_template = {
11142 .max_sectors = 2048, 11142 .max_sectors = 2048,
11143 .cmd_per_lun = 3, 11143 .cmd_per_lun = 3,
11144 .use_clustering = ENABLE_CLUSTERING, 11144 .use_clustering = ENABLE_CLUSTERING,
11145 .use_sg_chaining = ENABLE_SG_CHAINING,
11145}; 11146};
11146 11147
11147#include "scsi_module.c" 11148#include "scsi_module.c"
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index f2b23e01401a..ee0a98bffcd4 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -94,7 +94,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
94 res = -ENOMEM; 94 res = -ENOMEM;
95 goto err_unmap; 95 goto err_unmap;
96 } 96 }
97 for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) { 97 for_each_sg(task->scatter, sc, num_sg, i) {
98 struct sg_el *sg = 98 struct sg_el *sg =
99 &((struct sg_el *)ascb->sg_arr->vaddr)[i]; 99 &((struct sg_el *)ascb->sg_arr->vaddr)[i];
100 sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); 100 sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
@@ -103,7 +103,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
103 sg->flags |= ASD_SG_EL_LIST_EOL; 103 sg->flags |= ASD_SG_EL_LIST_EOL;
104 } 104 }
105 105
106 for (sc = task->scatter, i = 0; i < 2; i++, sc++) { 106 for_each_sg(task->scatter, sc, 2, i) {
107 sg_arr[i].bus_addr = 107 sg_arr[i].bus_addr =
108 cpu_to_le64((u64)sg_dma_address(sc)); 108 cpu_to_le64((u64)sg_dma_address(sc));
109 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); 109 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
@@ -115,7 +115,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
115 sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle); 115 sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
116 } else { 116 } else {
117 int i; 117 int i;
118 for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) { 118 for_each_sg(task->scatter, sc, num_sg, i) {
119 sg_arr[i].bus_addr = 119 sg_arr[i].bus_addr =
120 cpu_to_le64((u64)sg_dma_address(sc)); 120 cpu_to_le64((u64)sg_dma_address(sc));
121 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); 121 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index cfcf40159eab..f81777586b8f 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -122,6 +122,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
122 .max_sectors = ARCMSR_MAX_XFER_SECTORS, 122 .max_sectors = ARCMSR_MAX_XFER_SECTORS,
123 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, 123 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
124 .use_clustering = ENABLE_CLUSTERING, 124 .use_clustering = ENABLE_CLUSTERING,
125 .use_sg_chaining = ENABLE_SG_CHAINING,
125 .shost_attrs = arcmsr_host_attrs, 126 .shost_attrs = arcmsr_host_attrs,
126}; 127};
127#ifdef CONFIG_SCSI_ARCMSR_AER 128#ifdef CONFIG_SCSI_ARCMSR_AER
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 1591824cf4b3..fd42d4789202 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4765,6 +4765,7 @@ static struct scsi_host_template dc395x_driver_template = {
4765 .eh_bus_reset_handler = dc395x_eh_bus_reset, 4765 .eh_bus_reset_handler = dc395x_eh_bus_reset,
4766 .unchecked_isa_dma = 0, 4766 .unchecked_isa_dma = 0,
4767 .use_clustering = DISABLE_CLUSTERING, 4767 .use_clustering = DISABLE_CLUSTERING,
4768 .use_sg_chaining = ENABLE_SG_CHAINING,
4768}; 4769};
4769 4770
4770 4771
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index bea9d659af15..8258506ba7d7 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -3295,6 +3295,7 @@ static struct scsi_host_template adpt_template = {
3295 .this_id = 7, 3295 .this_id = 7,
3296 .cmd_per_lun = 1, 3296 .cmd_per_lun = 1,
3297 .use_clustering = ENABLE_CLUSTERING, 3297 .use_clustering = ENABLE_CLUSTERING,
3298 .use_sg_chaining = ENABLE_SG_CHAINING,
3298}; 3299};
3299 3300
3300static s32 adpt_scsi_register(adpt_hba* pHba) 3301static s32 adpt_scsi_register(adpt_hba* pHba)
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index ec2233114bc9..7ead5210de96 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -523,7 +523,8 @@ static struct scsi_host_template driver_template = {
523 .slave_configure = eata2x_slave_configure, 523 .slave_configure = eata2x_slave_configure,
524 .this_id = 7, 524 .this_id = 7,
525 .unchecked_isa_dma = 1, 525 .unchecked_isa_dma = 1,
526 .use_clustering = ENABLE_CLUSTERING 526 .use_clustering = ENABLE_CLUSTERING,
527 .use_sg_chaining = ENABLE_SG_CHAINING,
527}; 528};
528 529
529#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD) 530#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index adc9559cb6f4..112ab6abe62b 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -343,6 +343,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
343 shost->use_clustering = sht->use_clustering; 343 shost->use_clustering = sht->use_clustering;
344 shost->ordered_tag = sht->ordered_tag; 344 shost->ordered_tag = sht->ordered_tag;
345 shost->active_mode = sht->supported_mode; 345 shost->active_mode = sht->supported_mode;
346 shost->use_sg_chaining = sht->use_sg_chaining;
346 347
347 if (sht->max_host_blocked) 348 if (sht->max_host_blocked)
348 shost->max_host_blocked = sht->max_host_blocked; 349 shost->max_host_blocked = sht->max_host_blocked;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 8b384fa7f048..8515054cdf70 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -655,6 +655,7 @@ static struct scsi_host_template driver_template = {
655 .unchecked_isa_dma = 0, 655 .unchecked_isa_dma = 0,
656 .emulated = 0, 656 .emulated = 0,
657 .use_clustering = ENABLE_CLUSTERING, 657 .use_clustering = ENABLE_CLUSTERING,
658 .use_sg_chaining = ENABLE_SG_CHAINING,
658 .proc_name = driver_name, 659 .proc_name = driver_name,
659 .shost_attrs = hptiop_attrs, 660 .shost_attrs = hptiop_attrs,
660 .this_id = -1, 661 .this_id = -1,
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 1a924e9b0271..714e6273a70d 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -1501,6 +1501,7 @@ static struct scsi_host_template ibmmca_driver_template = {
1501 .sg_tablesize = 16, 1501 .sg_tablesize = 16,
1502 .cmd_per_lun = 1, 1502 .cmd_per_lun = 1,
1503 .use_clustering = ENABLE_CLUSTERING, 1503 .use_clustering = ENABLE_CLUSTERING,
1504 .use_sg_chaining = ENABLE_SG_CHAINING,
1504}; 1505};
1505 1506
1506static int ibmmca_probe(struct device *dev) 1507static int ibmmca_probe(struct device *dev)
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index cda0cc3d182f..22d91ee173c5 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1548,6 +1548,7 @@ static struct scsi_host_template driver_template = {
1548 .this_id = -1, 1548 .this_id = -1,
1549 .sg_tablesize = SG_ALL, 1549 .sg_tablesize = SG_ALL,
1550 .use_clustering = ENABLE_CLUSTERING, 1550 .use_clustering = ENABLE_CLUSTERING,
1551 .use_sg_chaining = ENABLE_SG_CHAINING,
1551 .shost_attrs = ibmvscsi_attrs, 1552 .shost_attrs = ibmvscsi_attrs,
1552}; 1553};
1553 1554
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index d81bb076a15a..d297f64cd432 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -70,6 +70,7 @@ typedef struct idescsi_pc_s {
70 u8 *buffer; /* Data buffer */ 70 u8 *buffer; /* Data buffer */
71 u8 *current_position; /* Pointer into the above buffer */ 71 u8 *current_position; /* Pointer into the above buffer */
72 struct scatterlist *sg; /* Scatter gather table */ 72 struct scatterlist *sg; /* Scatter gather table */
73 struct scatterlist *last_sg; /* Last sg element */
73 int b_count; /* Bytes transferred from current entry */ 74 int b_count; /* Bytes transferred from current entry */
74 struct scsi_cmnd *scsi_cmd; /* SCSI command */ 75 struct scsi_cmnd *scsi_cmd; /* SCSI command */
75 void (*done)(struct scsi_cmnd *); /* Scsi completion routine */ 76 void (*done)(struct scsi_cmnd *); /* Scsi completion routine */
@@ -173,12 +174,6 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
173 char *buf; 174 char *buf;
174 175
175 while (bcount) { 176 while (bcount) {
176 if (pc->sg - scsi_sglist(pc->scsi_cmd) >
177 scsi_sg_count(pc->scsi_cmd)) {
178 printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
179 idescsi_discard_data (drive, bcount);
180 return;
181 }
182 count = min(pc->sg->length - pc->b_count, bcount); 177 count = min(pc->sg->length - pc->b_count, bcount);
183 if (PageHighMem(pc->sg->page)) { 178 if (PageHighMem(pc->sg->page)) {
184 unsigned long flags; 179 unsigned long flags;
@@ -197,10 +192,17 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
197 } 192 }
198 bcount -= count; pc->b_count += count; 193 bcount -= count; pc->b_count += count;
199 if (pc->b_count == pc->sg->length) { 194 if (pc->b_count == pc->sg->length) {
200 pc->sg++; 195 if (pc->sg == pc->last_sg)
196 break;
197 pc->sg = sg_next(pc->sg);
201 pc->b_count = 0; 198 pc->b_count = 0;
202 } 199 }
203 } 200 }
201
202 if (bcount) {
203 printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
204 idescsi_discard_data (drive, bcount);
205 }
204} 206}
205 207
206static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigned int bcount) 208static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigned int bcount)
@@ -209,12 +211,6 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
209 char *buf; 211 char *buf;
210 212
211 while (bcount) { 213 while (bcount) {
212 if (pc->sg - scsi_sglist(pc->scsi_cmd) >
213 scsi_sg_count(pc->scsi_cmd)) {
214 printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
215 idescsi_output_zeros (drive, bcount);
216 return;
217 }
218 count = min(pc->sg->length - pc->b_count, bcount); 214 count = min(pc->sg->length - pc->b_count, bcount);
219 if (PageHighMem(pc->sg->page)) { 215 if (PageHighMem(pc->sg->page)) {
220 unsigned long flags; 216 unsigned long flags;
@@ -233,10 +229,17 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
233 } 229 }
234 bcount -= count; pc->b_count += count; 230 bcount -= count; pc->b_count += count;
235 if (pc->b_count == pc->sg->length) { 231 if (pc->b_count == pc->sg->length) {
236 pc->sg++; 232 if (pc->sg == pc->last_sg)
233 break;
234 pc->sg = sg_next(pc->sg);
237 pc->b_count = 0; 235 pc->b_count = 0;
238 } 236 }
239 } 237 }
238
239 if (bcount) {
240 printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
241 idescsi_output_zeros (drive, bcount);
242 }
240} 243}
241 244
242static void hexdump(u8 *x, int len) 245static void hexdump(u8 *x, int len)
@@ -804,6 +807,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
804 memcpy (pc->c, cmd->cmnd, cmd->cmd_len); 807 memcpy (pc->c, cmd->cmnd, cmd->cmd_len);
805 pc->buffer = NULL; 808 pc->buffer = NULL;
806 pc->sg = scsi_sglist(cmd); 809 pc->sg = scsi_sglist(cmd);
810 pc->last_sg = sg_last(pc->sg, cmd->use_sg);
807 pc->b_count = 0; 811 pc->b_count = 0;
808 pc->request_transfer = pc->buffer_size = scsi_bufflen(cmd); 812 pc->request_transfer = pc->buffer_size = scsi_bufflen(cmd);
809 pc->scsi_cmd = cmd; 813 pc->scsi_cmd = cmd;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index d9dfb69ae031..22d40fd5845b 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2831,6 +2831,7 @@ static struct scsi_host_template initio_template = {
2831 .sg_tablesize = SG_ALL, 2831 .sg_tablesize = SG_ALL,
2832 .cmd_per_lun = 1, 2832 .cmd_per_lun = 1,
2833 .use_clustering = ENABLE_CLUSTERING, 2833 .use_clustering = ENABLE_CLUSTERING,
2834 .use_sg_chaining = ENABLE_SG_CHAINING,
2834}; 2835};
2835 2836
2836static int initio_probe_one(struct pci_dev *pdev, 2837static int initio_probe_one(struct pci_dev *pdev,
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 2ed099e2c20d..edaac2714c5a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3252,7 +3252,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
3252 */ 3252 */
3253 if ((scb->breakup) || (scb->sg_break)) { 3253 if ((scb->breakup) || (scb->sg_break)) {
3254 struct scatterlist *sg; 3254 struct scatterlist *sg;
3255 int sg_dma_index, ips_sg_index = 0; 3255 int i, sg_dma_index, ips_sg_index = 0;
3256 3256
3257 /* we had a data breakup */ 3257 /* we had a data breakup */
3258 scb->data_len = 0; 3258 scb->data_len = 0;
@@ -3261,20 +3261,22 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
3261 3261
3262 /* Spin forward to last dma chunk */ 3262 /* Spin forward to last dma chunk */
3263 sg_dma_index = scb->breakup; 3263 sg_dma_index = scb->breakup;
3264 for (i = 0; i < scb->breakup; i++)
3265 sg = sg_next(sg);
3264 3266
3265 /* Take care of possible partial on last chunk */ 3267 /* Take care of possible partial on last chunk */
3266 ips_fill_scb_sg_single(ha, 3268 ips_fill_scb_sg_single(ha,
3267 sg_dma_address(&sg[sg_dma_index]), 3269 sg_dma_address(sg),
3268 scb, ips_sg_index++, 3270 scb, ips_sg_index++,
3269 sg_dma_len(&sg[sg_dma_index])); 3271 sg_dma_len(sg));
3270 3272
3271 for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd); 3273 for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
3272 sg_dma_index++) { 3274 sg_dma_index++, sg = sg_next(sg)) {
3273 if (ips_fill_scb_sg_single 3275 if (ips_fill_scb_sg_single
3274 (ha, 3276 (ha,
3275 sg_dma_address(&sg[sg_dma_index]), 3277 sg_dma_address(sg),
3276 scb, ips_sg_index++, 3278 scb, ips_sg_index++,
3277 sg_dma_len(&sg[sg_dma_index])) < 0) 3279 sg_dma_len(sg)) < 0)
3278 break; 3280 break;
3279 } 3281 }
3280 3282
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index cd674938ccd5..c0755565fae9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1438,6 +1438,7 @@ struct scsi_host_template lpfc_template = {
1438 .scan_finished = lpfc_scan_finished, 1438 .scan_finished = lpfc_scan_finished,
1439 .this_id = -1, 1439 .this_id = -1,
1440 .sg_tablesize = LPFC_SG_SEG_CNT, 1440 .sg_tablesize = LPFC_SG_SEG_CNT,
1441 .use_sg_chaining = ENABLE_SG_CHAINING,
1441 .cmd_per_lun = LPFC_CMD_PER_LUN, 1442 .cmd_per_lun = LPFC_CMD_PER_LUN,
1442 .use_clustering = ENABLE_CLUSTERING, 1443 .use_clustering = ENABLE_CLUSTERING,
1443 .shost_attrs = lpfc_hba_attrs, 1444 .shost_attrs = lpfc_hba_attrs,
@@ -1460,6 +1461,7 @@ struct scsi_host_template lpfc_vport_template = {
1460 .sg_tablesize = LPFC_SG_SEG_CNT, 1461 .sg_tablesize = LPFC_SG_SEG_CNT,
1461 .cmd_per_lun = LPFC_CMD_PER_LUN, 1462 .cmd_per_lun = LPFC_CMD_PER_LUN,
1462 .use_clustering = ENABLE_CLUSTERING, 1463 .use_clustering = ENABLE_CLUSTERING,
1464 .use_sg_chaining = ENABLE_SG_CHAINING,
1463 .shost_attrs = lpfc_vport_attrs, 1465 .shost_attrs = lpfc_vport_attrs,
1464 .max_sectors = 0xFFFF, 1466 .max_sectors = 0xFFFF,
1465}; 1467};
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index b12ad7c7c673..a035001f4438 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -402,6 +402,7 @@ static struct scsi_host_template mac53c94_template = {
402 .sg_tablesize = SG_ALL, 402 .sg_tablesize = SG_ALL,
403 .cmd_per_lun = 1, 403 .cmd_per_lun = 1,
404 .use_clustering = DISABLE_CLUSTERING, 404 .use_clustering = DISABLE_CLUSTERING,
405 .use_sg_chaining = ENABLE_SG_CHAINING,
405}; 406};
406 407
407static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match) 408static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index e7e11f282c8f..10d1aff9938a 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4492,6 +4492,7 @@ static struct scsi_host_template megaraid_template = {
4492 .sg_tablesize = MAX_SGLIST, 4492 .sg_tablesize = MAX_SGLIST,
4493 .cmd_per_lun = DEF_CMD_PER_LUN, 4493 .cmd_per_lun = DEF_CMD_PER_LUN,
4494 .use_clustering = ENABLE_CLUSTERING, 4494 .use_clustering = ENABLE_CLUSTERING,
4495 .use_sg_chaining = ENABLE_SG_CHAINING,
4495 .eh_abort_handler = megaraid_abort, 4496 .eh_abort_handler = megaraid_abort,
4496 .eh_device_reset_handler = megaraid_reset, 4497 .eh_device_reset_handler = megaraid_reset,
4497 .eh_bus_reset_handler = megaraid_reset, 4498 .eh_bus_reset_handler = megaraid_reset,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index c6a53dccc16a..e4e4c6a39ed6 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -361,6 +361,7 @@ static struct scsi_host_template megaraid_template_g = {
361 .eh_host_reset_handler = megaraid_reset_handler, 361 .eh_host_reset_handler = megaraid_reset_handler,
362 .change_queue_depth = megaraid_change_queue_depth, 362 .change_queue_depth = megaraid_change_queue_depth,
363 .use_clustering = ENABLE_CLUSTERING, 363 .use_clustering = ENABLE_CLUSTERING,
364 .use_sg_chaining = ENABLE_SG_CHAINING,
364 .sdev_attrs = megaraid_sdev_attrs, 365 .sdev_attrs = megaraid_sdev_attrs,
365 .shost_attrs = megaraid_shost_attrs, 366 .shost_attrs = megaraid_shost_attrs,
366}; 367};
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index ebb948c016bb..e3c5c5282203 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1110,6 +1110,7 @@ static struct scsi_host_template megasas_template = {
1110 .eh_timed_out = megasas_reset_timer, 1110 .eh_timed_out = megasas_reset_timer,
1111 .bios_param = megasas_bios_param, 1111 .bios_param = megasas_bios_param,
1112 .use_clustering = ENABLE_CLUSTERING, 1112 .use_clustering = ENABLE_CLUSTERING,
1113 .use_sg_chaining = ENABLE_SG_CHAINING,
1113}; 1114};
1114 1115
1115/** 1116/**
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 651d09b08f2a..7470ff39ab22 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1843,6 +1843,7 @@ static struct scsi_host_template mesh_template = {
1843 .sg_tablesize = SG_ALL, 1843 .sg_tablesize = SG_ALL,
1844 .cmd_per_lun = 2, 1844 .cmd_per_lun = 2,
1845 .use_clustering = DISABLE_CLUSTERING, 1845 .use_clustering = DISABLE_CLUSTERING,
1846 .use_sg_chaining = ENABLE_SG_CHAINING,
1846}; 1847};
1847 1848
1848static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) 1849static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 7fed35372150..28161dc95e0d 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -281,6 +281,7 @@ static struct scsi_host_template nsp32_template = {
281 .cmd_per_lun = 1, 281 .cmd_per_lun = 1,
282 .this_id = NSP32_HOST_SCSIID, 282 .this_id = NSP32_HOST_SCSIID,
283 .use_clustering = DISABLE_CLUSTERING, 283 .use_clustering = DISABLE_CLUSTERING,
284 .use_sg_chaining = ENABLE_SG_CHAINING,
284 .eh_abort_handler = nsp32_eh_abort, 285 .eh_abort_handler = nsp32_eh_abort,
285 .eh_bus_reset_handler = nsp32_eh_bus_reset, 286 .eh_bus_reset_handler = nsp32_eh_bus_reset,
286 .eh_host_reset_handler = nsp32_eh_host_reset, 287 .eh_host_reset_handler = nsp32_eh_host_reset,
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 961839ecfe86..190e2a7d7067 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -694,6 +694,7 @@ static struct scsi_host_template sym53c500_driver_template = {
694 .sg_tablesize = 32, 694 .sg_tablesize = 32,
695 .cmd_per_lun = 1, 695 .cmd_per_lun = 1,
696 .use_clustering = ENABLE_CLUSTERING, 696 .use_clustering = ENABLE_CLUSTERING,
697 .use_sg_chaining = ENABLE_SG_CHAINING,
697 .shost_attrs = SYM53C500_shost_attrs 698 .shost_attrs = SYM53C500_shost_attrs
698}; 699};
699 700
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index fba8aa8a81b5..76089cf55f4e 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2775,7 +2775,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2775 struct device_reg __iomem *reg = ha->iobase; 2775 struct device_reg __iomem *reg = ha->iobase;
2776 struct scsi_cmnd *cmd = sp->cmd; 2776 struct scsi_cmnd *cmd = sp->cmd;
2777 cmd_a64_entry_t *pkt; 2777 cmd_a64_entry_t *pkt;
2778 struct scatterlist *sg = NULL; 2778 struct scatterlist *sg = NULL, *s;
2779 __le32 *dword_ptr; 2779 __le32 *dword_ptr;
2780 dma_addr_t dma_handle; 2780 dma_addr_t dma_handle;
2781 int status = 0; 2781 int status = 0;
@@ -2889,13 +2889,16 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2889 * Load data segments. 2889 * Load data segments.
2890 */ 2890 */
2891 if (seg_cnt) { /* If data transfer. */ 2891 if (seg_cnt) { /* If data transfer. */
2892 int remseg = seg_cnt;
2892 /* Setup packet address segment pointer. */ 2893 /* Setup packet address segment pointer. */
2893 dword_ptr = (u32 *)&pkt->dseg_0_address; 2894 dword_ptr = (u32 *)&pkt->dseg_0_address;
2894 2895
2895 if (cmd->use_sg) { /* If scatter gather */ 2896 if (cmd->use_sg) { /* If scatter gather */
2896 /* Load command entry data segments. */ 2897 /* Load command entry data segments. */
2897 for (cnt = 0; cnt < 2 && seg_cnt; cnt++, seg_cnt--) { 2898 for_each_sg(sg, s, seg_cnt, cnt) {
2898 dma_handle = sg_dma_address(sg); 2899 if (cnt == 2)
2900 break;
2901 dma_handle = sg_dma_address(s);
2899#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 2902#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2900 if (ha->flags.use_pci_vchannel) 2903 if (ha->flags.use_pci_vchannel)
2901 sn_pci_set_vchan(ha->pdev, 2904 sn_pci_set_vchan(ha->pdev,
@@ -2906,12 +2909,12 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2906 cpu_to_le32(pci_dma_lo32(dma_handle)); 2909 cpu_to_le32(pci_dma_lo32(dma_handle));
2907 *dword_ptr++ = 2910 *dword_ptr++ =
2908 cpu_to_le32(pci_dma_hi32(dma_handle)); 2911 cpu_to_le32(pci_dma_hi32(dma_handle));
2909 *dword_ptr++ = cpu_to_le32(sg_dma_len(sg)); 2912 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2910 sg++;
2911 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", 2913 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2912 cpu_to_le32(pci_dma_hi32(dma_handle)), 2914 cpu_to_le32(pci_dma_hi32(dma_handle)),
2913 cpu_to_le32(pci_dma_lo32(dma_handle)), 2915 cpu_to_le32(pci_dma_lo32(dma_handle)),
2914 cpu_to_le32(sg_dma_len(sg))); 2916 cpu_to_le32(sg_dma_len(sg_next(s))));
2917 remseg--;
2915 } 2918 }
2916 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " 2919 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2917 "command packet data - b %i, t %i, l %i \n", 2920 "command packet data - b %i, t %i, l %i \n",
@@ -2926,7 +2929,9 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2926 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " 2929 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2927 "remains\n", seg_cnt); 2930 "remains\n", seg_cnt);
2928 2931
2929 while (seg_cnt > 0) { 2932 while (remseg > 0) {
2933 /* Update sg start */
2934 sg = s;
2930 /* Adjust ring index. */ 2935 /* Adjust ring index. */
2931 ha->req_ring_index++; 2936 ha->req_ring_index++;
2932 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 2937 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
@@ -2952,9 +2957,10 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2952 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; 2957 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2953 2958
2954 /* Load continuation entry data segments. */ 2959 /* Load continuation entry data segments. */
2955 for (cnt = 0; cnt < 5 && seg_cnt; 2960 for_each_sg(sg, s, remseg, cnt) {
2956 cnt++, seg_cnt--) { 2961 if (cnt == 5)
2957 dma_handle = sg_dma_address(sg); 2962 break;
2963 dma_handle = sg_dma_address(s);
2958#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 2964#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2959 if (ha->flags.use_pci_vchannel) 2965 if (ha->flags.use_pci_vchannel)
2960 sn_pci_set_vchan(ha->pdev, 2966 sn_pci_set_vchan(ha->pdev,
@@ -2966,13 +2972,13 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2966 *dword_ptr++ = 2972 *dword_ptr++ =
2967 cpu_to_le32(pci_dma_hi32(dma_handle)); 2973 cpu_to_le32(pci_dma_hi32(dma_handle));
2968 *dword_ptr++ = 2974 *dword_ptr++ =
2969 cpu_to_le32(sg_dma_len(sg)); 2975 cpu_to_le32(sg_dma_len(s));
2970 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", 2976 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2971 cpu_to_le32(pci_dma_hi32(dma_handle)), 2977 cpu_to_le32(pci_dma_hi32(dma_handle)),
2972 cpu_to_le32(pci_dma_lo32(dma_handle)), 2978 cpu_to_le32(pci_dma_lo32(dma_handle)),
2973 cpu_to_le32(sg_dma_len(sg))); 2979 cpu_to_le32(sg_dma_len(s)));
2974 sg++;
2975 } 2980 }
2981 remseg -= cnt;
2976 dprintk(5, "qla1280_64bit_start_scsi: " 2982 dprintk(5, "qla1280_64bit_start_scsi: "
2977 "continuation packet data - b %i, t " 2983 "continuation packet data - b %i, t "
2978 "%i, l %i \n", SCSI_BUS_32(cmd), 2984 "%i, l %i \n", SCSI_BUS_32(cmd),
@@ -3062,7 +3068,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3062 struct device_reg __iomem *reg = ha->iobase; 3068 struct device_reg __iomem *reg = ha->iobase;
3063 struct scsi_cmnd *cmd = sp->cmd; 3069 struct scsi_cmnd *cmd = sp->cmd;
3064 struct cmd_entry *pkt; 3070 struct cmd_entry *pkt;
3065 struct scatterlist *sg = NULL; 3071 struct scatterlist *sg = NULL, *s;
3066 __le32 *dword_ptr; 3072 __le32 *dword_ptr;
3067 int status = 0; 3073 int status = 0;
3068 int cnt; 3074 int cnt;
@@ -3188,6 +3194,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3188 * Load data segments. 3194 * Load data segments.
3189 */ 3195 */
3190 if (seg_cnt) { 3196 if (seg_cnt) {
3197 int remseg = seg_cnt;
3191 /* Setup packet address segment pointer. */ 3198 /* Setup packet address segment pointer. */
3192 dword_ptr = &pkt->dseg_0_address; 3199 dword_ptr = &pkt->dseg_0_address;
3193 3200
@@ -3196,22 +3203,25 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3196 qla1280_dump_buffer(1, (char *)sg, 4 * 16); 3203 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3197 3204
3198 /* Load command entry data segments. */ 3205 /* Load command entry data segments. */
3199 for (cnt = 0; cnt < 4 && seg_cnt; cnt++, seg_cnt--) { 3206 for_each_sg(sg, s, seg_cnt, cnt) {
3207 if (cnt == 4)
3208 break;
3200 *dword_ptr++ = 3209 *dword_ptr++ =
3201 cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))); 3210 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3202 *dword_ptr++ = 3211 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3203 cpu_to_le32(sg_dma_len(sg));
3204 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", 3212 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3205 (pci_dma_lo32(sg_dma_address(sg))), 3213 (pci_dma_lo32(sg_dma_address(s))),
3206 (sg_dma_len(sg))); 3214 (sg_dma_len(s)));
3207 sg++; 3215 remseg--;
3208 } 3216 }
3209 /* 3217 /*
3210 * Build continuation packets. 3218 * Build continuation packets.
3211 */ 3219 */
3212 dprintk(3, "S/G Building Continuation" 3220 dprintk(3, "S/G Building Continuation"
3213 "...seg_cnt=0x%x remains\n", seg_cnt); 3221 "...seg_cnt=0x%x remains\n", seg_cnt);
3214 while (seg_cnt > 0) { 3222 while (remseg > 0) {
3223 /* Continue from end point */
3224 sg = s;
3215 /* Adjust ring index. */ 3225 /* Adjust ring index. */
3216 ha->req_ring_index++; 3226 ha->req_ring_index++;
3217 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3227 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
@@ -3239,19 +3249,20 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3239 &((struct cont_entry *) pkt)->dseg_0_address; 3249 &((struct cont_entry *) pkt)->dseg_0_address;
3240 3250
3241 /* Load continuation entry data segments. */ 3251 /* Load continuation entry data segments. */
3242 for (cnt = 0; cnt < 7 && seg_cnt; 3252 for_each_sg(sg, s, remseg, cnt) {
3243 cnt++, seg_cnt--) { 3253 if (cnt == 7)
3254 break;
3244 *dword_ptr++ = 3255 *dword_ptr++ =
3245 cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))); 3256 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3246 *dword_ptr++ = 3257 *dword_ptr++ =
3247 cpu_to_le32(sg_dma_len(sg)); 3258 cpu_to_le32(sg_dma_len(s));
3248 dprintk(1, 3259 dprintk(1,
3249 "S/G Segment Cont. phys_addr=0x%x, " 3260 "S/G Segment Cont. phys_addr=0x%x, "
3250 "len=0x%x\n", 3261 "len=0x%x\n",
3251 cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))), 3262 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3252 cpu_to_le32(sg_dma_len(sg))); 3263 cpu_to_le32(sg_dma_len(s)));
3253 sg++;
3254 } 3264 }
3265 remseg -= cnt;
3255 dprintk(5, "qla1280_32bit_start_scsi: " 3266 dprintk(5, "qla1280_32bit_start_scsi: "
3256 "continuation packet data - " 3267 "continuation packet data - "
3257 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), 3268 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
@@ -4248,6 +4259,7 @@ static struct scsi_host_template qla1280_driver_template = {
4248 .sg_tablesize = SG_ALL, 4259 .sg_tablesize = SG_ALL,
4249 .cmd_per_lun = 1, 4260 .cmd_per_lun = 1,
4250 .use_clustering = ENABLE_CLUSTERING, 4261 .use_clustering = ENABLE_CLUSTERING,
4262 .use_sg_chaining = ENABLE_SG_CHAINING,
4251}; 4263};
4252 4264
4253 4265
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a6bb8d0ecf13..0351d380c2d7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -132,6 +132,7 @@ struct scsi_host_template qla2x00_driver_template = {
132 .this_id = -1, 132 .this_id = -1,
133 .cmd_per_lun = 3, 133 .cmd_per_lun = 3,
134 .use_clustering = ENABLE_CLUSTERING, 134 .use_clustering = ENABLE_CLUSTERING,
135 .use_sg_chaining = ENABLE_SG_CHAINING,
135 .sg_tablesize = SG_ALL, 136 .sg_tablesize = SG_ALL,
136 137
137 /* 138 /*
@@ -163,6 +164,7 @@ struct scsi_host_template qla24xx_driver_template = {
163 .this_id = -1, 164 .this_id = -1,
164 .cmd_per_lun = 3, 165 .cmd_per_lun = 3,
165 .use_clustering = ENABLE_CLUSTERING, 166 .use_clustering = ENABLE_CLUSTERING,
167 .use_sg_chaining = ENABLE_SG_CHAINING,
166 .sg_tablesize = SG_ALL, 168 .sg_tablesize = SG_ALL,
167 169
168 .max_sectors = 0xFFFF, 170 .max_sectors = 0xFFFF,
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index b1d565c12c5b..03b68d4f3bd0 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -94,6 +94,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
94 .this_id = -1, 94 .this_id = -1,
95 .cmd_per_lun = 3, 95 .cmd_per_lun = 3,
96 .use_clustering = ENABLE_CLUSTERING, 96 .use_clustering = ENABLE_CLUSTERING,
97 .use_sg_chaining = ENABLE_SG_CHAINING,
97 .sg_tablesize = SG_ALL, 98 .sg_tablesize = SG_ALL,
98 99
99 .max_sectors = 0xFFFF, 100 .max_sectors = 0xFFFF,
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index 1e874f1fb5c6..1769f965eedf 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -197,6 +197,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
197 .sg_tablesize = SG_ALL, 197 .sg_tablesize = SG_ALL,
198 .cmd_per_lun = 1, 198 .cmd_per_lun = 1,
199 .use_clustering = DISABLE_CLUSTERING, 199 .use_clustering = DISABLE_CLUSTERING,
200 .use_sg_chaining = ENABLE_SG_CHAINING,
200}; 201};
201 202
202static __init int qlogicfas_init(void) 203static __init int qlogicfas_init(void)
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index e93f80316a19..7a2e7986b038 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -868,7 +868,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
868 struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr) 868 struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
869{ 869{
870 struct dataseg *ds; 870 struct dataseg *ds;
871 struct scatterlist *sg; 871 struct scatterlist *sg, *s;
872 int i, n; 872 int i, n;
873 873
874 if (Cmnd->use_sg) { 874 if (Cmnd->use_sg) {
@@ -884,11 +884,12 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
884 n = sg_count; 884 n = sg_count;
885 if (n > 4) 885 if (n > 4)
886 n = 4; 886 n = 4;
887 for (i = 0; i < n; i++, sg++) { 887 for_each_sg(sg, s, n, i) {
888 ds[i].d_base = sg_dma_address(sg); 888 ds[i].d_base = sg_dma_address(s);
889 ds[i].d_count = sg_dma_len(sg); 889 ds[i].d_count = sg_dma_len(s);
890 } 890 }
891 sg_count -= 4; 891 sg_count -= 4;
892 sg = s;
892 while (sg_count > 0) { 893 while (sg_count > 0) {
893 struct Continuation_Entry *cont; 894 struct Continuation_Entry *cont;
894 895
@@ -907,9 +908,9 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
907 n = sg_count; 908 n = sg_count;
908 if (n > 7) 909 if (n > 7)
909 n = 7; 910 n = 7;
910 for (i = 0; i < n; i++, sg++) { 911 for_each_sg(sg, s, n, i) {
911 ds[i].d_base = sg_dma_address(sg); 912 ds[i].d_base = sg_dma_address(s);
912 ds[i].d_count = sg_dma_len(sg); 913 ds[i].d_count = sg_dma_len(s);
913 } 914 }
914 sg_count -= n; 915 sg_count -= n;
915 } 916 }
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 4947dfe625a6..72ee4c9cfb1a 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -38,6 +38,7 @@
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
39#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
40#include <linux/moduleparam.h> 40#include <linux/moduleparam.h>
41#include <linux/scatterlist.h>
41 42
42#include <linux/blkdev.h> 43#include <linux/blkdev.h>
43#include "scsi.h" 44#include "scsi.h"
@@ -600,7 +601,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
600 int k, req_len, act_len, len, active; 601 int k, req_len, act_len, len, active;
601 void * kaddr; 602 void * kaddr;
602 void * kaddr_off; 603 void * kaddr_off;
603 struct scatterlist * sgpnt; 604 struct scatterlist * sg;
604 605
605 if (0 == scp->request_bufflen) 606 if (0 == scp->request_bufflen)
606 return 0; 607 return 0;
@@ -619,16 +620,16 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
619 scp->resid = req_len - act_len; 620 scp->resid = req_len - act_len;
620 return 0; 621 return 0;
621 } 622 }
622 sgpnt = (struct scatterlist *)scp->request_buffer;
623 active = 1; 623 active = 1;
624 for (k = 0, req_len = 0, act_len = 0; k < scp->use_sg; ++k, ++sgpnt) { 624 req_len = act_len = 0;
625 scsi_for_each_sg(scp, sg, scp->use_sg, k) {
625 if (active) { 626 if (active) {
626 kaddr = (unsigned char *) 627 kaddr = (unsigned char *)
627 kmap_atomic(sgpnt->page, KM_USER0); 628 kmap_atomic(sg->page, KM_USER0);
628 if (NULL == kaddr) 629 if (NULL == kaddr)
629 return (DID_ERROR << 16); 630 return (DID_ERROR << 16);
630 kaddr_off = (unsigned char *)kaddr + sgpnt->offset; 631 kaddr_off = (unsigned char *)kaddr + sg->offset;
631 len = sgpnt->length; 632 len = sg->length;
632 if ((req_len + len) > arr_len) { 633 if ((req_len + len) > arr_len) {
633 active = 0; 634 active = 0;
634 len = arr_len - req_len; 635 len = arr_len - req_len;
@@ -637,7 +638,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
637 kunmap_atomic(kaddr, KM_USER0); 638 kunmap_atomic(kaddr, KM_USER0);
638 act_len += len; 639 act_len += len;
639 } 640 }
640 req_len += sgpnt->length; 641 req_len += sg->length;
641 } 642 }
642 if (scp->resid) 643 if (scp->resid)
643 scp->resid -= act_len; 644 scp->resid -= act_len;
@@ -653,7 +654,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
653 int k, req_len, len, fin; 654 int k, req_len, len, fin;
654 void * kaddr; 655 void * kaddr;
655 void * kaddr_off; 656 void * kaddr_off;
656 struct scatterlist * sgpnt; 657 struct scatterlist * sg;
657 658
658 if (0 == scp->request_bufflen) 659 if (0 == scp->request_bufflen)
659 return 0; 660 return 0;
@@ -668,13 +669,14 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
668 memcpy(arr, scp->request_buffer, len); 669 memcpy(arr, scp->request_buffer, len);
669 return len; 670 return len;
670 } 671 }
671 sgpnt = (struct scatterlist *)scp->request_buffer; 672 sg = scsi_sglist(scp);
672 for (k = 0, req_len = 0, fin = 0; k < scp->use_sg; ++k, ++sgpnt) { 673 req_len = fin = 0;
673 kaddr = (unsigned char *)kmap_atomic(sgpnt->page, KM_USER0); 674 for (k = 0; k < scp->use_sg; ++k, sg = sg_next(sg)) {
675 kaddr = (unsigned char *)kmap_atomic(sg->page, KM_USER0);
674 if (NULL == kaddr) 676 if (NULL == kaddr)
675 return -1; 677 return -1;
676 kaddr_off = (unsigned char *)kaddr + sgpnt->offset; 678 kaddr_off = (unsigned char *)kaddr + sg->offset;
677 len = sgpnt->length; 679 len = sg->length;
678 if ((req_len + len) > max_arr_len) { 680 if ((req_len + len) > max_arr_len) {
679 len = max_arr_len - req_len; 681 len = max_arr_len - req_len;
680 fin = 1; 682 fin = 1;
@@ -683,7 +685,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
683 kunmap_atomic(kaddr, KM_USER0); 685 kunmap_atomic(kaddr, KM_USER0);
684 if (fin) 686 if (fin)
685 return req_len + len; 687 return req_len + len;
686 req_len += sgpnt->length; 688 req_len += sg->length;
687 } 689 }
688 return req_len; 690 return req_len;
689} 691}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 207f1aa08869..aac8a02cbe80 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -17,6 +17,7 @@
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/hardirq.h> 19#include <linux/hardirq.h>
20#include <linux/scatterlist.h>
20 21
21#include <scsi/scsi.h> 22#include <scsi/scsi.h>
22#include <scsi/scsi_cmnd.h> 23#include <scsi/scsi_cmnd.h>
@@ -33,35 +34,34 @@
33#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 34#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
34#define SG_MEMPOOL_SIZE 2 35#define SG_MEMPOOL_SIZE 2
35 36
37/*
38 * The maximum number of SG segments that we will put inside a scatterlist
39 * (unless chaining is used). Should ideally fit inside a single page, to
40 * avoid a higher order allocation.
41 */
42#define SCSI_MAX_SG_SEGMENTS 128
43
36struct scsi_host_sg_pool { 44struct scsi_host_sg_pool {
37 size_t size; 45 size_t size;
38 char *name; 46 char *name;
39 struct kmem_cache *slab; 47 struct kmem_cache *slab;
40 mempool_t *pool; 48 mempool_t *pool;
41}; 49};
42 50
43#if (SCSI_MAX_PHYS_SEGMENTS < 32) 51#define SP(x) { x, "sgpool-" #x }
44#error SCSI_MAX_PHYS_SEGMENTS is too small
45#endif
46
47#define SP(x) { x, "sgpool-" #x }
48static struct scsi_host_sg_pool scsi_sg_pools[] = { 52static struct scsi_host_sg_pool scsi_sg_pools[] = {
49 SP(8), 53 SP(8),
50 SP(16), 54 SP(16),
55#if (SCSI_MAX_SG_SEGMENTS > 16)
51 SP(32), 56 SP(32),
52#if (SCSI_MAX_PHYS_SEGMENTS > 32) 57#if (SCSI_MAX_SG_SEGMENTS > 32)
53 SP(64), 58 SP(64),
54#if (SCSI_MAX_PHYS_SEGMENTS > 64) 59#if (SCSI_MAX_SG_SEGMENTS > 64)
55 SP(128), 60 SP(128),
56#if (SCSI_MAX_PHYS_SEGMENTS > 128)
57 SP(256),
58#if (SCSI_MAX_PHYS_SEGMENTS > 256)
59#error SCSI_MAX_PHYS_SEGMENTS is too large
60#endif
61#endif 61#endif
62#endif 62#endif
63#endif 63#endif
64}; 64};
65#undef SP 65#undef SP
66 66
67static void scsi_run_queue(struct request_queue *q); 67static void scsi_run_queue(struct request_queue *q);
@@ -289,14 +289,16 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
289 struct request_queue *q = rq->q; 289 struct request_queue *q = rq->q;
290 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 290 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
291 unsigned int data_len = bufflen, len, bytes, off; 291 unsigned int data_len = bufflen, len, bytes, off;
292 struct scatterlist *sg;
292 struct page *page; 293 struct page *page;
293 struct bio *bio = NULL; 294 struct bio *bio = NULL;
294 int i, err, nr_vecs = 0; 295 int i, err, nr_vecs = 0;
295 296
296 for (i = 0; i < nsegs; i++) { 297 for_each_sg(sgl, sg, nsegs, i) {
297 page = sgl[i].page; 298 page = sg->page;
298 off = sgl[i].offset; 299 off = sg->offset;
299 len = sgl[i].length; 300 len = sg->length;
301 data_len += len;
300 302
301 while (len > 0 && data_len > 0) { 303 while (len > 0 && data_len > 0) {
302 /* 304 /*
@@ -695,56 +697,170 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
695 return NULL; 697 return NULL;
696} 698}
697 699
698struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) 700/*
699{ 701 * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
700 struct scsi_host_sg_pool *sgp; 702 * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
701 struct scatterlist *sgl; 703 */
704#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048
702 705
703 BUG_ON(!cmd->use_sg); 706static inline unsigned int scsi_sgtable_index(unsigned short nents)
707{
708 unsigned int index;
704 709
705 switch (cmd->use_sg) { 710 switch (nents) {
706 case 1 ... 8: 711 case 1 ... 8:
707 cmd->sglist_len = 0; 712 index = 0;
708 break; 713 break;
709 case 9 ... 16: 714 case 9 ... 16:
710 cmd->sglist_len = 1; 715 index = 1;
711 break; 716 break;
717#if (SCSI_MAX_SG_SEGMENTS > 16)
712 case 17 ... 32: 718 case 17 ... 32:
713 cmd->sglist_len = 2; 719 index = 2;
714 break; 720 break;
715#if (SCSI_MAX_PHYS_SEGMENTS > 32) 721#if (SCSI_MAX_SG_SEGMENTS > 32)
716 case 33 ... 64: 722 case 33 ... 64:
717 cmd->sglist_len = 3; 723 index = 3;
718 break; 724 break;
719#if (SCSI_MAX_PHYS_SEGMENTS > 64) 725#if (SCSI_MAX_SG_SEGMENTS > 64)
720 case 65 ... 128: 726 case 65 ... 128:
721 cmd->sglist_len = 4; 727 index = 4;
722 break;
723#if (SCSI_MAX_PHYS_SEGMENTS > 128)
724 case 129 ... 256:
725 cmd->sglist_len = 5;
726 break; 728 break;
727#endif 729#endif
728#endif 730#endif
729#endif 731#endif
730 default: 732 default:
731 return NULL; 733 printk(KERN_ERR "scsi: bad segment count=%d\n", nents);
734 BUG();
732 } 735 }
733 736
734 sgp = scsi_sg_pools + cmd->sglist_len; 737 return index;
735 sgl = mempool_alloc(sgp->pool, gfp_mask); 738}
736 return sgl; 739
740struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
741{
742 struct scsi_host_sg_pool *sgp;
743 struct scatterlist *sgl, *prev, *ret;
744 unsigned int index;
745 int this, left;
746
747 BUG_ON(!cmd->use_sg);
748
749 left = cmd->use_sg;
750 ret = prev = NULL;
751 do {
752 this = left;
753 if (this > SCSI_MAX_SG_SEGMENTS) {
754 this = SCSI_MAX_SG_SEGMENTS - 1;
755 index = SG_MEMPOOL_NR - 1;
756 } else
757 index = scsi_sgtable_index(this);
758
759 left -= this;
760
761 sgp = scsi_sg_pools + index;
762
763 sgl = mempool_alloc(sgp->pool, gfp_mask);
764 if (unlikely(!sgl))
765 goto enomem;
766
767 memset(sgl, 0, sizeof(*sgl) * sgp->size);
768
769 /*
770 * first loop through, set initial index and return value
771 */
772 if (!ret)
773 ret = sgl;
774
775 /*
776 * chain previous sglist, if any. we know the previous
777 * sglist must be the biggest one, or we would not have
778 * ended up doing another loop.
779 */
780 if (prev)
781 sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
782
783 /*
784 * don't allow subsequent mempool allocs to sleep, it would
785 * violate the mempool principle.
786 */
787 gfp_mask &= ~__GFP_WAIT;
788 gfp_mask |= __GFP_HIGH;
789 prev = sgl;
790 } while (left);
791
792 /*
793 * ->use_sg may get modified after dma mapping has potentially
794 * shrunk the number of segments, so keep a copy of it for free.
795 */
796 cmd->__use_sg = cmd->use_sg;
797 return ret;
798enomem:
799 if (ret) {
800 /*
801 * Free entries chained off ret. Since we were trying to
802 * allocate another sglist, we know that all entries are of
803 * the max size.
804 */
805 sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
806 prev = ret;
807 ret = &ret[SCSI_MAX_SG_SEGMENTS - 1];
808
809 while ((sgl = sg_chain_ptr(ret)) != NULL) {
810 ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1];
811 mempool_free(sgl, sgp->pool);
812 }
813
814 mempool_free(prev, sgp->pool);
815 }
816 return NULL;
737} 817}
738 818
739EXPORT_SYMBOL(scsi_alloc_sgtable); 819EXPORT_SYMBOL(scsi_alloc_sgtable);
740 820
741void scsi_free_sgtable(struct scatterlist *sgl, int index) 821void scsi_free_sgtable(struct scsi_cmnd *cmd)
742{ 822{
823 struct scatterlist *sgl = cmd->request_buffer;
743 struct scsi_host_sg_pool *sgp; 824 struct scsi_host_sg_pool *sgp;
744 825
745 BUG_ON(index >= SG_MEMPOOL_NR); 826 /*
827 * if this is the biggest size sglist, check if we have
828 * chained parts we need to free
829 */
830 if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) {
831 unsigned short this, left;
832 struct scatterlist *next;
833 unsigned int index;
834
835 left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1);
836 next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]);
837 while (left && next) {
838 sgl = next;
839 this = left;
840 if (this > SCSI_MAX_SG_SEGMENTS) {
841 this = SCSI_MAX_SG_SEGMENTS - 1;
842 index = SG_MEMPOOL_NR - 1;
843 } else
844 index = scsi_sgtable_index(this);
845
846 left -= this;
847
848 sgp = scsi_sg_pools + index;
849
850 if (left)
851 next = sg_chain_ptr(&sgl[sgp->size - 1]);
852
853 mempool_free(sgl, sgp->pool);
854 }
855
856 /*
857 * Restore original, will be freed below
858 */
859 sgl = cmd->request_buffer;
860 sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
861 } else
862 sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg);
746 863
747 sgp = scsi_sg_pools + index;
748 mempool_free(sgl, sgp->pool); 864 mempool_free(sgl, sgp->pool);
749} 865}
750 866
@@ -770,7 +886,7 @@ EXPORT_SYMBOL(scsi_free_sgtable);
770static void scsi_release_buffers(struct scsi_cmnd *cmd) 886static void scsi_release_buffers(struct scsi_cmnd *cmd)
771{ 887{
772 if (cmd->use_sg) 888 if (cmd->use_sg)
773 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 889 scsi_free_sgtable(cmd);
774 890
775 /* 891 /*
776 * Zero these out. They now point to freed memory, and it is 892 * Zero these out. They now point to freed memory, and it is
@@ -984,7 +1100,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
984static int scsi_init_io(struct scsi_cmnd *cmd) 1100static int scsi_init_io(struct scsi_cmnd *cmd)
985{ 1101{
986 struct request *req = cmd->request; 1102 struct request *req = cmd->request;
987 struct scatterlist *sgpnt;
988 int count; 1103 int count;
989 1104
990 /* 1105 /*
@@ -997,14 +1112,13 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
997 /* 1112 /*
998 * If sg table allocation fails, requeue request later. 1113 * If sg table allocation fails, requeue request later.
999 */ 1114 */
1000 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1115 cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1001 if (unlikely(!sgpnt)) { 1116 if (unlikely(!cmd->request_buffer)) {
1002 scsi_unprep_request(req); 1117 scsi_unprep_request(req);
1003 return BLKPREP_DEFER; 1118 return BLKPREP_DEFER;
1004 } 1119 }
1005 1120
1006 req->buffer = NULL; 1121 req->buffer = NULL;
1007 cmd->request_buffer = (char *) sgpnt;
1008 if (blk_pc_request(req)) 1122 if (blk_pc_request(req))
1009 cmd->request_bufflen = req->data_len; 1123 cmd->request_bufflen = req->data_len;
1010 else 1124 else
@@ -1529,8 +1643,25 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1529 if (!q) 1643 if (!q)
1530 return NULL; 1644 return NULL;
1531 1645
1646 /*
1647 * this limit is imposed by hardware restrictions
1648 */
1532 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1649 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1533 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); 1650
1651 /*
1652 * In the future, sg chaining support will be mandatory and this
1653 * ifdef can then go away. Right now we don't have all archs
1654 * converted, so better keep it safe.
1655 */
1656#ifdef ARCH_HAS_SG_CHAIN
1657 if (shost->use_sg_chaining)
1658 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
1659 else
1660 blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
1661#else
1662 blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
1663#endif
1664
1534 blk_queue_max_sectors(q, shost->max_sectors); 1665 blk_queue_max_sectors(q, shost->max_sectors);
1535 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1666 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1536 blk_queue_segment_boundary(q, shost->dma_boundary); 1667 blk_queue_segment_boundary(q, shost->dma_boundary);
@@ -2193,18 +2324,19 @@ EXPORT_SYMBOL_GPL(scsi_target_unblock);
2193 * 2324 *
2194 * Returns virtual address of the start of the mapped page 2325 * Returns virtual address of the start of the mapped page
2195 */ 2326 */
2196void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, 2327void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2197 size_t *offset, size_t *len) 2328 size_t *offset, size_t *len)
2198{ 2329{
2199 int i; 2330 int i;
2200 size_t sg_len = 0, len_complete = 0; 2331 size_t sg_len = 0, len_complete = 0;
2332 struct scatterlist *sg;
2201 struct page *page; 2333 struct page *page;
2202 2334
2203 WARN_ON(!irqs_disabled()); 2335 WARN_ON(!irqs_disabled());
2204 2336
2205 for (i = 0; i < sg_count; i++) { 2337 for_each_sg(sgl, sg, sg_count, i) {
2206 len_complete = sg_len; /* Complete sg-entries */ 2338 len_complete = sg_len; /* Complete sg-entries */
2207 sg_len += sg[i].length; 2339 sg_len += sg->length;
2208 if (sg_len > *offset) 2340 if (sg_len > *offset)
2209 break; 2341 break;
2210 } 2342 }
@@ -2218,10 +2350,10 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
2218 } 2350 }
2219 2351
2220 /* Offset starting from the beginning of first page in this sg-entry */ 2352 /* Offset starting from the beginning of first page in this sg-entry */
2221 *offset = *offset - len_complete + sg[i].offset; 2353 *offset = *offset - len_complete + sg->offset;
2222 2354
2223 /* Assumption: contiguous pages can be accessed as "page + i" */ 2355 /* Assumption: contiguous pages can be accessed as "page + i" */
2224 page = nth_page(sg[i].page, (*offset >> PAGE_SHIFT)); 2356 page = nth_page(sg->page, (*offset >> PAGE_SHIFT));
2225 *offset &= ~PAGE_MASK; 2357 *offset &= ~PAGE_MASK;
2226 2358
2227 /* Bytes in this sg-entry from *offset to the end of the page */ 2359 /* Bytes in this sg-entry from *offset to the end of the page */
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 66c692ffa305..a91761c3645f 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -332,7 +332,7 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
332 scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag); 332 scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag);
333 333
334 if (cmd->request_buffer) 334 if (cmd->request_buffer)
335 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 335 scsi_free_sgtable(cmd);
336 336
337 queue_work(scsi_tgtd, &tcmd->work); 337 queue_work(scsi_tgtd, &tcmd->work);
338} 338}
@@ -373,7 +373,7 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
373 } 373 }
374 374
375 eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg); 375 eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg);
376 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 376 scsi_free_sgtable(cmd);
377 return -EINVAL; 377 return -EINVAL;
378} 378}
379 379
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0a3a528212c2..69f542c4923c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -826,27 +826,6 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
826 return 0; 826 return 0;
827} 827}
828 828
829static int sd_issue_flush(struct request_queue *q, struct gendisk *disk,
830 sector_t *error_sector)
831{
832 int ret = 0;
833 struct scsi_device *sdp = q->queuedata;
834 struct scsi_disk *sdkp;
835
836 if (sdp->sdev_state != SDEV_RUNNING)
837 return -ENXIO;
838
839 sdkp = scsi_disk_get_from_dev(&sdp->sdev_gendev);
840
841 if (!sdkp)
842 return -ENODEV;
843
844 if (sdkp->WCE)
845 ret = sd_sync_cache(sdkp);
846 scsi_disk_put(sdkp);
847 return ret;
848}
849
850static void sd_prepare_flush(struct request_queue *q, struct request *rq) 829static void sd_prepare_flush(struct request_queue *q, struct request *rq)
851{ 830{
852 memset(rq->cmd, 0, sizeof(rq->cmd)); 831 memset(rq->cmd, 0, sizeof(rq->cmd));
@@ -1697,7 +1676,6 @@ static int sd_probe(struct device *dev)
1697 sd_revalidate_disk(gd); 1676 sd_revalidate_disk(gd);
1698 1677
1699 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 1678 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
1700 blk_queue_issue_flush_fn(sdp->request_queue, sd_issue_flush);
1701 1679
1702 gd->driverfs_dev = &sdp->sdev_gendev; 1680 gd->driverfs_dev = &sdp->sdev_gendev;
1703 gd->flags = GENHD_FL_DRIVERFS; 1681 gd->flags = GENHD_FL_DRIVERFS;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index f6f5fc7d0cee..7238b2dfc497 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1165,7 +1165,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1165 sg = rsv_schp->buffer; 1165 sg = rsv_schp->buffer;
1166 sa = vma->vm_start; 1166 sa = vma->vm_start;
1167 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1167 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1168 ++k, ++sg) { 1168 ++k, sg = sg_next(sg)) {
1169 len = vma->vm_end - sa; 1169 len = vma->vm_end - sa;
1170 len = (len < sg->length) ? len : sg->length; 1170 len = (len < sg->length) ? len : sg->length;
1171 if (offset < len) { 1171 if (offset < len) {
@@ -1209,7 +1209,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1209 sa = vma->vm_start; 1209 sa = vma->vm_start;
1210 sg = rsv_schp->buffer; 1210 sg = rsv_schp->buffer;
1211 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1211 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1212 ++k, ++sg) { 1212 ++k, sg = sg_next(sg)) {
1213 len = vma->vm_end - sa; 1213 len = vma->vm_end - sa;
1214 len = (len < sg->length) ? len : sg->length; 1214 len = (len < sg->length) ? len : sg->length;
1215 sa += len; 1215 sa += len;
@@ -1840,7 +1840,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1840 } 1840 }
1841 for (k = 0, sg = schp->buffer, rem_sz = blk_size; 1841 for (k = 0, sg = schp->buffer, rem_sz = blk_size;
1842 (rem_sz > 0) && (k < mx_sc_elems); 1842 (rem_sz > 0) && (k < mx_sc_elems);
1843 ++k, rem_sz -= ret_sz, ++sg) { 1843 ++k, rem_sz -= ret_sz, sg = sg_next(sg)) {
1844 1844
1845 num = (rem_sz > scatter_elem_sz_prev) ? 1845 num = (rem_sz > scatter_elem_sz_prev) ?
1846 scatter_elem_sz_prev : rem_sz; 1846 scatter_elem_sz_prev : rem_sz;
@@ -1913,7 +1913,7 @@ sg_write_xfer(Sg_request * srp)
1913 if (res) 1913 if (res)
1914 return res; 1914 return res;
1915 1915
1916 for (; p; ++sg, ksglen = sg->length, 1916 for (; p; sg = sg_next(sg), ksglen = sg->length,
1917 p = page_address(sg->page)) { 1917 p = page_address(sg->page)) {
1918 if (usglen <= 0) 1918 if (usglen <= 0)
1919 break; 1919 break;
@@ -1992,7 +1992,7 @@ sg_remove_scat(Sg_scatter_hold * schp)
1992 int k; 1992 int k;
1993 1993
1994 for (k = 0; (k < schp->k_use_sg) && sg->page; 1994 for (k = 0; (k < schp->k_use_sg) && sg->page;
1995 ++k, ++sg) { 1995 ++k, sg = sg_next(sg)) {
1996 SCSI_LOG_TIMEOUT(5, printk( 1996 SCSI_LOG_TIMEOUT(5, printk(
1997 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", 1997 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
1998 k, sg->page, sg->length)); 1998 k, sg->page, sg->length));
@@ -2045,7 +2045,7 @@ sg_read_xfer(Sg_request * srp)
2045 if (res) 2045 if (res)
2046 return res; 2046 return res;
2047 2047
2048 for (; p; ++sg, ksglen = sg->length, 2048 for (; p; sg = sg_next(sg), ksglen = sg->length,
2049 p = page_address(sg->page)) { 2049 p = page_address(sg->page)) {
2050 if (usglen <= 0) 2050 if (usglen <= 0)
2051 break; 2051 break;
@@ -2092,7 +2092,7 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2092 if ((!outp) || (num_read_xfer <= 0)) 2092 if ((!outp) || (num_read_xfer <= 0))
2093 return 0; 2093 return 0;
2094 2094
2095 for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, ++sg) { 2095 for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, sg = sg_next(sg)) {
2096 num = sg->length; 2096 num = sg->length;
2097 if (num > num_read_xfer) { 2097 if (num > num_read_xfer) {
2098 if (__copy_to_user(outp, page_address(sg->page), 2098 if (__copy_to_user(outp, page_address(sg->page),
@@ -2142,7 +2142,7 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2142 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); 2142 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2143 rem = size; 2143 rem = size;
2144 2144
2145 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) { 2145 for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) {
2146 num = sg->length; 2146 num = sg->length;
2147 if (rem <= num) { 2147 if (rem <= num) {
2148 sfp->save_scat_len = num; 2148 sfp->save_scat_len = num;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 72f6d8015358..e3fab3a6aed7 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1123,6 +1123,7 @@ static struct scsi_host_template driver_template = {
1123 .this_id = -1, 1123 .this_id = -1,
1124 .sg_tablesize = ST_MAX_SG, 1124 .sg_tablesize = ST_MAX_SG,
1125 .cmd_per_lun = ST_CMD_PER_LUN, 1125 .cmd_per_lun = ST_CMD_PER_LUN,
1126 .use_sg_chaining = ENABLE_SG_CHAINING,
1126}; 1127};
1127 1128
1128static int stex_set_dma_mask(struct pci_dev * pdev) 1129static int stex_set_dma_mask(struct pci_dev * pdev)
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 92bfaeafe30d..8befab7e9839 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -854,5 +854,6 @@ static struct scsi_host_template driver_template = {
854 .cmd_per_lun = 1, 854 .cmd_per_lun = 1,
855 .unchecked_isa_dma = 1, 855 .unchecked_isa_dma = 1,
856 .use_clustering = ENABLE_CLUSTERING, 856 .use_clustering = ENABLE_CLUSTERING,
857 .use_sg_chaining = ENABLE_SG_CHAINING,
857}; 858};
858#include "scsi_module.c" 859#include "scsi_module.c"
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 3db22325ea2c..db03c4c8ec1e 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1808,6 +1808,7 @@ static struct scsi_host_template sym2_template = {
1808 .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler, 1808 .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler,
1809 .this_id = 7, 1809 .this_id = 7,
1810 .use_clustering = ENABLE_CLUSTERING, 1810 .use_clustering = ENABLE_CLUSTERING,
1811 .use_sg_chaining = ENABLE_SG_CHAINING,
1811 .max_sectors = 0xFFFF, 1812 .max_sectors = 0xFFFF,
1812#ifdef SYM_LINUX_PROC_INFO_SUPPORT 1813#ifdef SYM_LINUX_PROC_INFO_SUPPORT
1813 .proc_info = sym53c8xx_proc_info, 1814 .proc_info = sym53c8xx_proc_info,
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index fc9f51818e8f..7edd6ceb13b2 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -450,7 +450,8 @@ static struct scsi_host_template driver_template = {
450 .slave_configure = u14_34f_slave_configure, 450 .slave_configure = u14_34f_slave_configure,
451 .this_id = 7, 451 .this_id = 7,
452 .unchecked_isa_dma = 1, 452 .unchecked_isa_dma = 1,
453 .use_clustering = ENABLE_CLUSTERING 453 .use_clustering = ENABLE_CLUSTERING,
454 .use_sg_chaining = ENABLE_SG_CHAINING,
454 }; 455 };
455 456
456#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD) 457#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index c08235d5afc9..ea72bbeb8f9d 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -1197,5 +1197,6 @@ static struct scsi_host_template driver_template = {
1197 .cmd_per_lun = ULTRASTOR_MAX_CMDS_PER_LUN, 1197 .cmd_per_lun = ULTRASTOR_MAX_CMDS_PER_LUN,
1198 .unchecked_isa_dma = 1, 1198 .unchecked_isa_dma = 1,
1199 .use_clustering = ENABLE_CLUSTERING, 1199 .use_clustering = ENABLE_CLUSTERING,
1200 .use_sg_chaining = ENABLE_SG_CHAINING,
1200}; 1201};
1201#include "scsi_module.c" 1202#include "scsi_module.c"
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index d6fd4259c56b..255c611e78b8 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1671,6 +1671,7 @@ static struct scsi_host_template driver_template = {
1671 .cmd_per_lun = 1, 1671 .cmd_per_lun = 1,
1672 .unchecked_isa_dma = 1, 1672 .unchecked_isa_dma = 1,
1673 .use_clustering = ENABLE_CLUSTERING, 1673 .use_clustering = ENABLE_CLUSTERING,
1674 .use_sg_chaining = ENABLE_SG_CHAINING,
1674}; 1675};
1675 1676
1676#include "scsi_module.c" 1677#include "scsi_module.c"