aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 13:09:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 13:09:16 -0400
commit92d15c2ccbb3e31a3fc71ad28fdb55e1319383c0 (patch)
tree8d83c0dc3c6b935d8367e331872f242b742f0a8a
parentf20bf6125605acbbc7eb8c9420d7221c91aa83eb (diff)
parent644bd2f048972d75eb1979b1fdca257d528ce687 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block: (63 commits) Fix memory leak in dm-crypt SPARC64: sg chaining support SPARC: sg chaining support PPC: sg chaining support PS3: sg chaining support IA64: sg chaining support x86-64: enable sg chaining x86-64: update pci-gart iommu to sg helpers x86-64: update nommu to sg helpers x86-64: update calgary iommu to sg helpers swiotlb: sg chaining support i386: enable sg chaining i386 dma_map_sg: convert to using sg helpers mmc: need to zero sglist on init Panic in blk_rq_map_sg() from CCISS driver remove sglist_len remove blk_queue_max_phys_segments in libata revert sg segment size ifdefs Fixup u14-34f ENABLE_SG_CHAINING qla1280: enable use_sg_chaining option ...
-rw-r--r--Documentation/DMA-mapping.txt4
-rw-r--r--Documentation/HOWTO2
-rw-r--r--Documentation/block/00-INDEX20
-rw-r--r--Documentation/block/as-iosched.txt21
-rw-r--r--Documentation/block/biodoc.txt4
-rw-r--r--Documentation/block/deadline-iosched.txt25
-rw-r--r--Documentation/block/ioprio.txt2
-rw-r--r--Documentation/block/request.txt2
-rw-r--r--Documentation/block/switching-sched.txt21
-rw-r--r--arch/ia64/hp/common/sba_iommu.c14
-rw-r--r--arch/ia64/hp/sim/simscsi.c1
-rw-r--r--arch/ia64/sn/pci/pci_dma.c11
-rw-r--r--arch/powerpc/kernel/dma_64.c5
-rw-r--r--arch/powerpc/kernel/ibmebus.c11
-rw-r--r--arch/powerpc/kernel/iommu.c23
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c7
-rw-r--r--arch/sparc/kernel/ioport.c25
-rw-r--r--arch/sparc/mm/io-unit.c12
-rw-r--r--arch/sparc/mm/iommu.c10
-rw-r--r--arch/sparc/mm/sun4c.c10
-rw-r--r--arch/sparc64/kernel/iommu.c39
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c32
-rw-r--r--arch/x86/kernel/pci-calgary_64.c24
-rw-r--r--arch/x86/kernel/pci-gart_64.c65
-rw-r--r--arch/x86/kernel/pci-nommu_64.c5
-rw-r--r--block/bsg.c2
-rw-r--r--block/elevator.c17
-rw-r--r--block/ll_rw_blk.c311
-rw-r--r--crypto/digest.c2
-rw-r--r--crypto/scatterwalk.c2
-rw-r--r--crypto/scatterwalk.h2
-rw-r--r--drivers/ata/libata-core.c35
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/cpqarray.c3
-rw-r--r--drivers/block/pktcdvd.c7
-rw-r--r--drivers/block/ps3disk.c21
-rw-r--r--drivers/ide/cris/ide-cris.c3
-rw-r--r--drivers/ide/ide-disk.c29
-rw-r--r--drivers/ide/ide-dma.c2
-rw-r--r--drivers/ide/ide-io.c38
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/ide/ide-taskfile.c18
-rw-r--r--drivers/ide/mips/au1xxx-ide.c2
-rw-r--r--drivers/ide/pci/sgiioc4.c3
-rw-r--r--drivers/ide/ppc/pmac.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_dma.c10
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c75
-rw-r--r--drivers/md/dm-crypt.c31
-rw-r--r--drivers/md/dm-table.c28
-rw-r--r--drivers/md/dm.c16
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/linear.c20
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/md/multipath.c30
-rw-r--r--drivers/md/raid0.c21
-rw-r--r--drivers/md/raid1.c31
-rw-r--r--drivers/md/raid10.c31
-rw-r--r--drivers/md/raid5.c31
-rw-r--r--drivers/message/fusion/mptscsih.c6
-rw-r--r--drivers/message/i2o/i2o_block.c24
-rw-r--r--drivers/mmc/card/queue.c6
-rw-r--r--drivers/s390/scsi/zfcp_def.h1
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c6
-rw-r--r--drivers/scsi/3w-9xxx.c1
-rw-r--r--drivers/scsi/3w-xxxx.c1
-rw-r--r--drivers/scsi/BusLogic.c1
-rw-r--r--drivers/scsi/NCR53c406a.c3
-rw-r--r--drivers/scsi/a100u2w.c1
-rw-r--r--drivers/scsi/aacraid/linit.c1
-rw-r--r--drivers/scsi/aha1542.c32
-rw-r--r--drivers/scsi/aha1740.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/aic7xxx_old.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1
-rw-r--r--drivers/scsi/dc395x.c1
-rw-r--r--drivers/scsi/dpt_i2o.c1
-rw-r--r--drivers/scsi/eata.c3
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/hptiop.c1
-rw-r--r--drivers/scsi/ibmmca.c1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c1
-rw-r--r--drivers/scsi/ide-scsi.c32
-rw-r--r--drivers/scsi/initio.c1
-rw-r--r--drivers/scsi/ips.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/mac53c94.c1
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c1
-rw-r--r--drivers/scsi/mesh.c1
-rw-r--r--drivers/scsi/nsp32.c1
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c1
-rw-r--r--drivers/scsi/qla1280.c70
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1
-rw-r--r--drivers/scsi/qlogicfas.c1
-rw-r--r--drivers/scsi/qlogicpti.c15
-rw-r--r--drivers/scsi/scsi_debug.c30
-rw-r--r--drivers/scsi/scsi_lib.c238
-rw-r--r--drivers/scsi/scsi_tgt_lib.c4
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/scsi/sg.c16
-rw-r--r--drivers/scsi/stex.c1
-rw-r--r--drivers/scsi/sym53c416.c1
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c1
-rw-r--r--drivers/scsi/u14-34f.c3
-rw-r--r--drivers/scsi/ultrastor.c1
-rw-r--r--drivers/scsi/wd7000.c1
-rw-r--r--drivers/usb/storage/alauda.c16
-rw-r--r--drivers/usb/storage/datafab.c10
-rw-r--r--drivers/usb/storage/jumpshot.c10
-rw-r--r--drivers/usb/storage/protocol.c20
-rw-r--r--drivers/usb/storage/protocol.h2
-rw-r--r--drivers/usb/storage/sddr09.c16
-rw-r--r--drivers/usb/storage/sddr55.c16
-rw-r--r--drivers/usb/storage/shuttle_usbat.c17
-rw-r--r--fs/bio.c23
-rw-r--r--fs/splice.c2
-rw-r--r--include/asm-ia64/dma-mapping.h2
-rw-r--r--include/asm-ia64/scatterlist.h2
-rw-r--r--include/asm-powerpc/dma-mapping.h158
-rw-r--r--include/asm-powerpc/scatterlist.h2
-rw-r--r--include/asm-sparc/scatterlist.h2
-rw-r--r--include/asm-sparc64/scatterlist.h2
-rw-r--r--include/asm-x86/dma-mapping_32.h13
-rw-r--r--include/asm-x86/dma-mapping_64.h3
-rw-r--r--include/asm-x86/scatterlist_32.h2
-rw-r--r--include/asm-x86/scatterlist_64.h2
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/i2o.h3
-rw-r--r--include/linux/ide.h7
-rw-r--r--include/linux/libata.h16
-rw-r--r--include/linux/scatterlist.h84
-rw-r--r--include/scsi/scsi.h7
-rw-r--r--include/scsi/scsi_cmnd.h7
-rw-r--r--include/scsi/scsi_host.h13
-rw-r--r--lib/swiotlb.c19
-rw-r--r--mm/bounce.c6
142 files changed, 1186 insertions, 1115 deletions
diff --git a/Documentation/DMA-mapping.txt b/Documentation/DMA-mapping.txt
index e07f2530326b..3c8ae020b6a7 100644
--- a/Documentation/DMA-mapping.txt
+++ b/Documentation/DMA-mapping.txt
@@ -514,7 +514,7 @@ With scatterlists, you map a region gathered from several regions by:
514 int i, count = pci_map_sg(dev, sglist, nents, direction); 514 int i, count = pci_map_sg(dev, sglist, nents, direction);
515 struct scatterlist *sg; 515 struct scatterlist *sg;
516 516
517 for (i = 0, sg = sglist; i < count; i++, sg++) { 517 for_each_sg(sglist, sg, count, i) {
518 hw_address[i] = sg_dma_address(sg); 518 hw_address[i] = sg_dma_address(sg);
519 hw_len[i] = sg_dma_len(sg); 519 hw_len[i] = sg_dma_len(sg);
520 } 520 }
@@ -782,5 +782,5 @@ following people:
782 Jay Estabrook <Jay.Estabrook@compaq.com> 782 Jay Estabrook <Jay.Estabrook@compaq.com>
783 Thomas Sailer <sailer@ife.ee.ethz.ch> 783 Thomas Sailer <sailer@ife.ee.ethz.ch>
784 Andrea Arcangeli <andrea@suse.de> 784 Andrea Arcangeli <andrea@suse.de>
785 Jens Axboe <axboe@suse.de> 785 Jens Axboe <jens.axboe@oracle.com>
786 David Mosberger-Tang <davidm@hpl.hp.com> 786 David Mosberger-Tang <davidm@hpl.hp.com>
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index c64e969dc33b..dceb30921498 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -330,7 +330,7 @@ Here is a list of some of the different kernel trees available:
330 - ACPI development tree, Len Brown <len.brown@intel.com> 330 - ACPI development tree, Len Brown <len.brown@intel.com>
331 git.kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git 331 git.kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
332 332
333 - Block development tree, Jens Axboe <axboe@suse.de> 333 - Block development tree, Jens Axboe <jens.axboe@oracle.com>
334 git.kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git 334 git.kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git
335 335
336 - DRM development tree, Dave Airlie <airlied@linux.ie> 336 - DRM development tree, Dave Airlie <airlied@linux.ie>
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
new file mode 100644
index 000000000000..961a0513f8c3
--- /dev/null
+++ b/Documentation/block/00-INDEX
@@ -0,0 +1,20 @@
100-INDEX
2 - This file
3as-iosched.txt
4 - Anticipatory IO scheduler
5barrier.txt
6 - I/O Barriers
7biodoc.txt
8 - Notes on the Generic Block Layer Rewrite in Linux 2.5
9capability.txt
10 - Generic Block Device Capability (/sys/block/<disk>/capability)
11deadline-iosched.txt
12 - Deadline IO scheduler tunables
13ioprio.txt
14 - Block io priorities (in CFQ scheduler)
15request.txt
16 - The members of struct request (in include/linux/blkdev.h)
17stat.txt
18 - Block layer statistics in /sys/block/<dev>/stat
19switching-sched.txt
20 - Switching I/O schedulers at runtime
diff --git a/Documentation/block/as-iosched.txt b/Documentation/block/as-iosched.txt
index a598fe10a297..738b72be128e 100644
--- a/Documentation/block/as-iosched.txt
+++ b/Documentation/block/as-iosched.txt
@@ -20,15 +20,10 @@ actually has a head for each physical device in the logical RAID device.
20However, setting the antic_expire (see tunable parameters below) produces 20However, setting the antic_expire (see tunable parameters below) produces
21very similar behavior to the deadline IO scheduler. 21very similar behavior to the deadline IO scheduler.
22 22
23
24Selecting IO schedulers 23Selecting IO schedulers
25----------------------- 24-----------------------
26To choose IO schedulers at boot time, use the argument 'elevator=deadline'. 25Refer to Documentation/block/switching-sched.txt for information on
27'noop', 'as' and 'cfq' (the default) are also available. IO schedulers are 26selecting an io scheduler on a per-device basis.
28assigned globally at boot time only presently. It's also possible to change
29the IO scheduler for a determined device on the fly, as described in
30Documentation/block/switching-sched.txt.
31
32 27
33Anticipatory IO scheduler Policies 28Anticipatory IO scheduler Policies
34---------------------------------- 29----------------------------------
@@ -115,7 +110,7 @@ statistics (average think time, average seek distance) on the process
115that submitted the just completed request are examined. If it seems 110that submitted the just completed request are examined. If it seems
116likely that that process will submit another request soon, and that 111likely that that process will submit another request soon, and that
117request is likely to be near the just completed request, then the IO 112request is likely to be near the just completed request, then the IO
118scheduler will stop dispatching more read requests for up time (antic_expire) 113scheduler will stop dispatching more read requests for up to (antic_expire)
119milliseconds, hoping that process will submit a new request near the one 114milliseconds, hoping that process will submit a new request near the one
120that just completed. If such a request is made, then it is dispatched 115that just completed. If such a request is made, then it is dispatched
121immediately. If the antic_expire wait time expires, then the IO scheduler 116immediately. If the antic_expire wait time expires, then the IO scheduler
@@ -165,3 +160,13 @@ The parameters are:
165 for big seek time devices though not a linear correspondence - most 160 for big seek time devices though not a linear correspondence - most
166 processes have only a few ms thinktime. 161 processes have only a few ms thinktime.
167 162
163In addition to the tunables above there is a read-only file named est_time
164which, when read, will show:
165
166 - The probability of a task exiting without a cooperating task
167 submitting an anticipated IO.
168
169 - The current mean think time.
170
171 - The seek distance used to determine if an incoming IO is better.
172
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index dc3f49e3e539..93f223b9723f 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -2,7 +2,7 @@
2 ===================================================== 2 =====================================================
3 3
4Notes Written on Jan 15, 2002: 4Notes Written on Jan 15, 2002:
5 Jens Axboe <axboe@suse.de> 5 Jens Axboe <jens.axboe@oracle.com>
6 Suparna Bhattacharya <suparna@in.ibm.com> 6 Suparna Bhattacharya <suparna@in.ibm.com>
7 7
8Last Updated May 2, 2002 8Last Updated May 2, 2002
@@ -21,7 +21,7 @@ Credits:
21--------- 21---------
22 22
232.5 bio rewrite: 232.5 bio rewrite:
24 Jens Axboe <axboe@suse.de> 24 Jens Axboe <jens.axboe@oracle.com>
25 25
26Many aspects of the generic block layer redesign were driven by and evolved 26Many aspects of the generic block layer redesign were driven by and evolved
27over discussions, prior patches and the collective experience of several 27over discussions, prior patches and the collective experience of several
diff --git a/Documentation/block/deadline-iosched.txt b/Documentation/block/deadline-iosched.txt
index be08ffd1e9b8..c23cab13c3d1 100644
--- a/Documentation/block/deadline-iosched.txt
+++ b/Documentation/block/deadline-iosched.txt
@@ -5,16 +5,10 @@ This little file attempts to document how the deadline io scheduler works.
5In particular, it will clarify the meaning of the exposed tunables that may be 5In particular, it will clarify the meaning of the exposed tunables that may be
6of interest to power users. 6of interest to power users.
7 7
8Each io queue has a set of io scheduler tunables associated with it. These 8Selecting IO schedulers
9tunables control how the io scheduler works. You can find these entries 9-----------------------
10in: 10Refer to Documentation/block/switching-sched.txt for information on
11 11selecting an io scheduler on a per-device basis.
12/sys/block/<device>/queue/iosched
13
14assuming that you have sysfs mounted on /sys. If you don't have sysfs mounted,
15you can do so by typing:
16
17# mount none /sys -t sysfs
18 12
19 13
20******************************************************************************** 14********************************************************************************
@@ -41,14 +35,11 @@ fifo_batch
41 35
42When a read request expires its deadline, we must move some requests from 36When a read request expires its deadline, we must move some requests from
43the sorted io scheduler list to the block device dispatch queue. fifo_batch 37the sorted io scheduler list to the block device dispatch queue. fifo_batch
44controls how many requests we move, based on the cost of each request. A 38controls how many requests we move.
45request is either qualified as a seek or a stream. The io scheduler knows
46the last request that was serviced by the drive (or will be serviced right
47before this one). See seek_cost and stream_unit.
48 39
49 40
50write_starved (number of dispatches) 41writes_starved (number of dispatches)
51------------- 42--------------
52 43
53When we have to move requests from the io scheduler queue to the block 44When we have to move requests from the io scheduler queue to the block
54device dispatch queue, we always give a preference to reads. However, we 45device dispatch queue, we always give a preference to reads. However, we
@@ -73,6 +64,6 @@ that comes at basically 0 cost we leave that on. We simply disable the
73rbtree front sector lookup when the io scheduler merge function is called. 64rbtree front sector lookup when the io scheduler merge function is called.
74 65
75 66
76Nov 11 2002, Jens Axboe <axboe@suse.de> 67Nov 11 2002, Jens Axboe <jens.axboe@oracle.com>
77 68
78 69
diff --git a/Documentation/block/ioprio.txt b/Documentation/block/ioprio.txt
index 35e516b0b8a9..8ed8c59380b4 100644
--- a/Documentation/block/ioprio.txt
+++ b/Documentation/block/ioprio.txt
@@ -180,4 +180,4 @@ int main(int argc, char *argv[])
180---> snip ionice.c tool <--- 180---> snip ionice.c tool <---
181 181
182 182
183March 11 2005, Jens Axboe <axboe@suse.de> 183March 11 2005, Jens Axboe <jens.axboe@oracle.com>
diff --git a/Documentation/block/request.txt b/Documentation/block/request.txt
index fff58acb40a3..754e104ed369 100644
--- a/Documentation/block/request.txt
+++ b/Documentation/block/request.txt
@@ -1,7 +1,7 @@
1 1
2struct request documentation 2struct request documentation
3 3
4Jens Axboe <axboe@suse.de> 27/05/02 4Jens Axboe <jens.axboe@oracle.com> 27/05/02
5 5
61.0 61.0
7Index 7Index
diff --git a/Documentation/block/switching-sched.txt b/Documentation/block/switching-sched.txt
index 5fa130a67531..634c952e1964 100644
--- a/Documentation/block/switching-sched.txt
+++ b/Documentation/block/switching-sched.txt
@@ -1,3 +1,18 @@
1To choose IO schedulers at boot time, use the argument 'elevator=deadline'.
2'noop', 'as' and 'cfq' (the default) are also available. IO schedulers are
3assigned globally at boot time only presently.
4
5Each io queue has a set of io scheduler tunables associated with it. These
6tunables control how the io scheduler works. You can find these entries
7in:
8
9/sys/block/<device>/queue/iosched
10
11assuming that you have sysfs mounted on /sys. If you don't have sysfs mounted,
12you can do so by typing:
13
14# mount none /sys -t sysfs
15
1As of the Linux 2.6.10 kernel, it is now possible to change the 16As of the Linux 2.6.10 kernel, it is now possible to change the
2IO scheduler for a given block device on the fly (thus making it possible, 17IO scheduler for a given block device on the fly (thus making it possible,
3for instance, to set the CFQ scheduler for the system default, but 18for instance, to set the CFQ scheduler for the system default, but
@@ -20,3 +35,9 @@ noop anticipatory deadline [cfq]
20# echo anticipatory > /sys/block/hda/queue/scheduler 35# echo anticipatory > /sys/block/hda/queue/scheduler
21# cat /sys/block/hda/queue/scheduler 36# cat /sys/block/hda/queue/scheduler
22noop [anticipatory] deadline cfq 37noop [anticipatory] deadline cfq
38
39Each io queue has a set of io scheduler tunables associated with it. These
40tunables control how the io scheduler works. You can find these entries
41in:
42
43/sys/block/<device>/queue/iosched
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index e980e7aa2306..4338f4123f31 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -396,7 +396,7 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
396 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents, 396 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
397 startsg->dma_address, startsg->dma_length, 397 startsg->dma_address, startsg->dma_length,
398 sba_sg_address(startsg)); 398 sba_sg_address(startsg));
399 startsg++; 399 startsg = sg_next(startsg);
400 } 400 }
401} 401}
402 402
@@ -409,7 +409,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
409 while (the_nents-- > 0) { 409 while (the_nents-- > 0) {
410 if (sba_sg_address(the_sg) == 0x0UL) 410 if (sba_sg_address(the_sg) == 0x0UL)
411 sba_dump_sg(NULL, startsg, nents); 411 sba_dump_sg(NULL, startsg, nents);
412 the_sg++; 412 the_sg = sg_next(the_sg);
413 } 413 }
414} 414}
415 415
@@ -1201,7 +1201,7 @@ sba_fill_pdir(
1201 u32 pide = startsg->dma_address & ~PIDE_FLAG; 1201 u32 pide = startsg->dma_address & ~PIDE_FLAG;
1202 dma_offset = (unsigned long) pide & ~iovp_mask; 1202 dma_offset = (unsigned long) pide & ~iovp_mask;
1203 startsg->dma_address = 0; 1203 startsg->dma_address = 0;
1204 dma_sg++; 1204 dma_sg = sg_next(dma_sg);
1205 dma_sg->dma_address = pide | ioc->ibase; 1205 dma_sg->dma_address = pide | ioc->ibase;
1206 pdirp = &(ioc->pdir_base[pide >> iovp_shift]); 1206 pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1207 n_mappings++; 1207 n_mappings++;
@@ -1228,7 +1228,7 @@ sba_fill_pdir(
1228 pdirp++; 1228 pdirp++;
1229 } while (cnt > 0); 1229 } while (cnt > 0);
1230 } 1230 }
1231 startsg++; 1231 startsg = sg_next(startsg);
1232 } 1232 }
1233 /* force pdir update */ 1233 /* force pdir update */
1234 wmb(); 1234 wmb();
@@ -1297,7 +1297,7 @@ sba_coalesce_chunks( struct ioc *ioc,
1297 while (--nents > 0) { 1297 while (--nents > 0) {
1298 unsigned long vaddr; /* tmp */ 1298 unsigned long vaddr; /* tmp */
1299 1299
1300 startsg++; 1300 startsg = sg_next(startsg);
1301 1301
1302 /* PARANOID */ 1302 /* PARANOID */
1303 startsg->dma_address = startsg->dma_length = 0; 1303 startsg->dma_address = startsg->dma_length = 0;
@@ -1407,7 +1407,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1407#ifdef ALLOW_IOV_BYPASS_SG 1407#ifdef ALLOW_IOV_BYPASS_SG
1408 ASSERT(to_pci_dev(dev)->dma_mask); 1408 ASSERT(to_pci_dev(dev)->dma_mask);
1409 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) { 1409 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1410 for (sg = sglist ; filled < nents ; filled++, sg++){ 1410 for_each_sg(sglist, sg, nents, filled) {
1411 sg->dma_length = sg->length; 1411 sg->dma_length = sg->length;
1412 sg->dma_address = virt_to_phys(sba_sg_address(sg)); 1412 sg->dma_address = virt_to_phys(sba_sg_address(sg));
1413 } 1413 }
@@ -1501,7 +1501,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
1501 while (nents && sglist->dma_length) { 1501 while (nents && sglist->dma_length) {
1502 1502
1503 sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); 1503 sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
1504 sglist++; 1504 sglist = sg_next(sglist);
1505 nents--; 1505 nents--;
1506 } 1506 }
1507 1507
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index d62fa76e5a7d..a3a558a06757 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -360,6 +360,7 @@ static struct scsi_host_template driver_template = {
360 .max_sectors = 1024, 360 .max_sectors = 1024,
361 .cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN, 361 .cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN,
362 .use_clustering = DISABLE_CLUSTERING, 362 .use_clustering = DISABLE_CLUSTERING,
363 .use_sg_chaining = ENABLE_SG_CHAINING,
363}; 364};
364 365
365static int __init 366static int __init
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index d79ddacfba2d..ecd8a52b9b9e 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -218,16 +218,17 @@ EXPORT_SYMBOL(sn_dma_unmap_single);
218 * 218 *
219 * Unmap a set of streaming mode DMA translations. 219 * Unmap a set of streaming mode DMA translations.
220 */ 220 */
221void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 221void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
222 int nhwentries, int direction) 222 int nhwentries, int direction)
223{ 223{
224 int i; 224 int i;
225 struct pci_dev *pdev = to_pci_dev(dev); 225 struct pci_dev *pdev = to_pci_dev(dev);
226 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 226 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
227 struct scatterlist *sg;
227 228
228 BUG_ON(dev->bus != &pci_bus_type); 229 BUG_ON(dev->bus != &pci_bus_type);
229 230
230 for (i = 0; i < nhwentries; i++, sg++) { 231 for_each_sg(sgl, sg, nhwentries, i) {
231 provider->dma_unmap(pdev, sg->dma_address, direction); 232 provider->dma_unmap(pdev, sg->dma_address, direction);
232 sg->dma_address = (dma_addr_t) NULL; 233 sg->dma_address = (dma_addr_t) NULL;
233 sg->dma_length = 0; 234 sg->dma_length = 0;
@@ -244,11 +245,11 @@ EXPORT_SYMBOL(sn_dma_unmap_sg);
244 * 245 *
245 * Maps each entry of @sg for DMA. 246 * Maps each entry of @sg for DMA.
246 */ 247 */
247int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 248int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
248 int direction) 249 int direction)
249{ 250{
250 unsigned long phys_addr; 251 unsigned long phys_addr;
251 struct scatterlist *saved_sg = sg; 252 struct scatterlist *saved_sg = sgl, *sg;
252 struct pci_dev *pdev = to_pci_dev(dev); 253 struct pci_dev *pdev = to_pci_dev(dev);
253 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 254 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
254 int i; 255 int i;
@@ -258,7 +259,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
258 /* 259 /*
259 * Setup a DMA address for each entry in the scatterlist. 260 * Setup a DMA address for each entry in the scatterlist.
260 */ 261 */
261 for (i = 0; i < nhwentries; i++, sg++) { 262 for_each_sg(sgl, sg, nhwentries, i) {
262 phys_addr = SG_ENT_PHYS_ADDRESS(sg); 263 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
263 sg->dma_address = provider->dma_map(pdev, 264 sg->dma_address = provider->dma_map(pdev,
264 phys_addr, sg->length, 265 phys_addr, sg->length,
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 7b0e754383cf..9001104b56b0 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -154,12 +154,13 @@ static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
154{ 154{
155} 155}
156 156
157static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg, 157static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
158 int nents, enum dma_data_direction direction) 158 int nents, enum dma_data_direction direction)
159{ 159{
160 struct scatterlist *sg;
160 int i; 161 int i;
161 162
162 for (i = 0; i < nents; i++, sg++) { 163 for_each_sg(sgl, sg, nents, i) {
163 sg->dma_address = (page_to_phys(sg->page) + sg->offset) | 164 sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
164 dma_direct_offset; 165 dma_direct_offset;
165 sg->dma_length = sg->length; 166 sg->dma_length = sg->length;
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 53bf64623bd8..2e16ca5778a3 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -87,15 +87,16 @@ static void ibmebus_unmap_single(struct device *dev,
87} 87}
88 88
89static int ibmebus_map_sg(struct device *dev, 89static int ibmebus_map_sg(struct device *dev,
90 struct scatterlist *sg, 90 struct scatterlist *sgl,
91 int nents, enum dma_data_direction direction) 91 int nents, enum dma_data_direction direction)
92{ 92{
93 struct scatterlist *sg;
93 int i; 94 int i;
94 95
95 for (i = 0; i < nents; i++) { 96 for_each_sg(sgl, sg, nents, i) {
96 sg[i].dma_address = (dma_addr_t)page_address(sg[i].page) 97 sg->dma_address = (dma_addr_t)page_address(sg->page)
97 + sg[i].offset; 98 + sg->offset;
98 sg[i].dma_length = sg[i].length; 99 sg->dma_length = sg->length;
99 } 100 }
100 101
101 return nents; 102 return nents;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index e4ec6eee81a8..306a6f75b6c5 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -277,7 +277,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
277 dma_addr_t dma_next = 0, dma_addr; 277 dma_addr_t dma_next = 0, dma_addr;
278 unsigned long flags; 278 unsigned long flags;
279 struct scatterlist *s, *outs, *segstart; 279 struct scatterlist *s, *outs, *segstart;
280 int outcount, incount; 280 int outcount, incount, i;
281 unsigned long handle; 281 unsigned long handle;
282 282
283 BUG_ON(direction == DMA_NONE); 283 BUG_ON(direction == DMA_NONE);
@@ -297,7 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
297 297
298 spin_lock_irqsave(&(tbl->it_lock), flags); 298 spin_lock_irqsave(&(tbl->it_lock), flags);
299 299
300 for (s = outs; nelems; nelems--, s++) { 300 for_each_sg(sglist, s, nelems, i) {
301 unsigned long vaddr, npages, entry, slen; 301 unsigned long vaddr, npages, entry, slen;
302 302
303 slen = s->length; 303 slen = s->length;
@@ -341,7 +341,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
341 if (novmerge || (dma_addr != dma_next)) { 341 if (novmerge || (dma_addr != dma_next)) {
342 /* Can't merge: create a new segment */ 342 /* Can't merge: create a new segment */
343 segstart = s; 343 segstart = s;
344 outcount++; outs++; 344 outcount++;
345 outs = sg_next(outs);
345 DBG(" can't merge, new segment.\n"); 346 DBG(" can't merge, new segment.\n");
346 } else { 347 } else {
347 outs->dma_length += s->length; 348 outs->dma_length += s->length;
@@ -374,7 +375,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
374 * next entry of the sglist if we didn't fill the list completely 375 * next entry of the sglist if we didn't fill the list completely
375 */ 376 */
376 if (outcount < incount) { 377 if (outcount < incount) {
377 outs++; 378 outs = sg_next(outs);
378 outs->dma_address = DMA_ERROR_CODE; 379 outs->dma_address = DMA_ERROR_CODE;
379 outs->dma_length = 0; 380 outs->dma_length = 0;
380 } 381 }
@@ -385,7 +386,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
385 return outcount; 386 return outcount;
386 387
387 failure: 388 failure:
388 for (s = &sglist[0]; s <= outs; s++) { 389 for_each_sg(sglist, s, nelems, i) {
389 if (s->dma_length != 0) { 390 if (s->dma_length != 0) {
390 unsigned long vaddr, npages; 391 unsigned long vaddr, npages;
391 392
@@ -395,6 +396,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
395 s->dma_address = DMA_ERROR_CODE; 396 s->dma_address = DMA_ERROR_CODE;
396 s->dma_length = 0; 397 s->dma_length = 0;
397 } 398 }
399 if (s == outs)
400 break;
398 } 401 }
399 spin_unlock_irqrestore(&(tbl->it_lock), flags); 402 spin_unlock_irqrestore(&(tbl->it_lock), flags);
400 return 0; 403 return 0;
@@ -404,6 +407,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
404void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, 407void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
405 int nelems, enum dma_data_direction direction) 408 int nelems, enum dma_data_direction direction)
406{ 409{
410 struct scatterlist *sg;
407 unsigned long flags; 411 unsigned long flags;
408 412
409 BUG_ON(direction == DMA_NONE); 413 BUG_ON(direction == DMA_NONE);
@@ -413,15 +417,16 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
413 417
414 spin_lock_irqsave(&(tbl->it_lock), flags); 418 spin_lock_irqsave(&(tbl->it_lock), flags);
415 419
420 sg = sglist;
416 while (nelems--) { 421 while (nelems--) {
417 unsigned int npages; 422 unsigned int npages;
418 dma_addr_t dma_handle = sglist->dma_address; 423 dma_addr_t dma_handle = sg->dma_address;
419 424
420 if (sglist->dma_length == 0) 425 if (sg->dma_length == 0)
421 break; 426 break;
422 npages = iommu_num_pages(dma_handle,sglist->dma_length); 427 npages = iommu_num_pages(dma_handle, sg->dma_length);
423 __iommu_free(tbl, dma_handle, npages); 428 __iommu_free(tbl, dma_handle, npages);
424 sglist++; 429 sg = sg_next(sg);
425 } 430 }
426 431
427 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we 432 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 190ff4b59a55..07e64b48e7fc 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -616,17 +616,18 @@ static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr,
616 } 616 }
617} 617}
618 618
619static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sg, int nents, 619static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
620 enum dma_data_direction direction) 620 int nents, enum dma_data_direction direction)
621{ 621{
622#if defined(CONFIG_PS3_DYNAMIC_DMA) 622#if defined(CONFIG_PS3_DYNAMIC_DMA)
623 BUG_ON("do"); 623 BUG_ON("do");
624 return -EPERM; 624 return -EPERM;
625#else 625#else
626 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 626 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
627 struct scatterlist *sg;
627 int i; 628 int i;
628 629
629 for (i = 0; i < nents; i++, sg++) { 630 for_each_sg(sgl, sg, nents, i) {
630 int result = ps3_dma_map(dev->d_region, 631 int result = ps3_dma_map(dev->d_region,
631 page_to_phys(sg->page) + sg->offset, sg->length, 632 page_to_phys(sg->page) + sg->offset, sg->length,
632 &sg->dma_address, 0); 633 &sg->dma_address, 0);
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 62182d2d7b0d..9c3ed88853f3 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -35,6 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/pci.h> /* struct pci_dev */ 36#include <linux/pci.h> /* struct pci_dev */
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/scatterlist.h>
38 39
39#include <asm/io.h> 40#include <asm/io.h>
40#include <asm/vaddrs.h> 41#include <asm/vaddrs.h>
@@ -717,19 +718,19 @@ void pci_unmap_page(struct pci_dev *hwdev,
717 * Device ownership issues as mentioned above for pci_map_single are 718 * Device ownership issues as mentioned above for pci_map_single are
718 * the same here. 719 * the same here.
719 */ 720 */
720int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, 721int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
721 int direction) 722 int direction)
722{ 723{
724 struct scatterlist *sg;
723 int n; 725 int n;
724 726
725 BUG_ON(direction == PCI_DMA_NONE); 727 BUG_ON(direction == PCI_DMA_NONE);
726 /* IIep is write-through, not flushing. */ 728 /* IIep is write-through, not flushing. */
727 for (n = 0; n < nents; n++) { 729 for_each_sg(sgl, sg, nents, n) {
728 BUG_ON(page_address(sg->page) == NULL); 730 BUG_ON(page_address(sg->page) == NULL);
729 sg->dvma_address = 731 sg->dvma_address =
730 virt_to_phys(page_address(sg->page)) + sg->offset; 732 virt_to_phys(page_address(sg->page)) + sg->offset;
731 sg->dvma_length = sg->length; 733 sg->dvma_length = sg->length;
732 sg++;
733 } 734 }
734 return nents; 735 return nents;
735} 736}
@@ -738,19 +739,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
738 * Again, cpu read rules concerning calls here are the same as for 739 * Again, cpu read rules concerning calls here are the same as for
739 * pci_unmap_single() above. 740 * pci_unmap_single() above.
740 */ 741 */
741void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, 742void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
742 int direction) 743 int direction)
743{ 744{
745 struct scatterlist *sg;
744 int n; 746 int n;
745 747
746 BUG_ON(direction == PCI_DMA_NONE); 748 BUG_ON(direction == PCI_DMA_NONE);
747 if (direction != PCI_DMA_TODEVICE) { 749 if (direction != PCI_DMA_TODEVICE) {
748 for (n = 0; n < nents; n++) { 750 for_each_sg(sgl, sg, nents, n) {
749 BUG_ON(page_address(sg->page) == NULL); 751 BUG_ON(page_address(sg->page) == NULL);
750 mmu_inval_dma_area( 752 mmu_inval_dma_area(
751 (unsigned long) page_address(sg->page), 753 (unsigned long) page_address(sg->page),
752 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 754 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
753 sg++;
754 } 755 }
755 } 756 }
756} 757}
@@ -789,34 +790,34 @@ void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t
789 * The same as pci_dma_sync_single_* but for a scatter-gather list, 790 * The same as pci_dma_sync_single_* but for a scatter-gather list,
790 * same rules and usage. 791 * same rules and usage.
791 */ 792 */
792void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) 793void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
793{ 794{
795 struct scatterlist *sg;
794 int n; 796 int n;
795 797
796 BUG_ON(direction == PCI_DMA_NONE); 798 BUG_ON(direction == PCI_DMA_NONE);
797 if (direction != PCI_DMA_TODEVICE) { 799 if (direction != PCI_DMA_TODEVICE) {
798 for (n = 0; n < nents; n++) { 800 for_each_sg(sgl, sg, nents, n) {
799 BUG_ON(page_address(sg->page) == NULL); 801 BUG_ON(page_address(sg->page) == NULL);
800 mmu_inval_dma_area( 802 mmu_inval_dma_area(
801 (unsigned long) page_address(sg->page), 803 (unsigned long) page_address(sg->page),
802 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 804 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
803 sg++;
804 } 805 }
805 } 806 }
806} 807}
807 808
808void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) 809void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
809{ 810{
811 struct scatterlist *sg;
810 int n; 812 int n;
811 813
812 BUG_ON(direction == PCI_DMA_NONE); 814 BUG_ON(direction == PCI_DMA_NONE);
813 if (direction != PCI_DMA_TODEVICE) { 815 if (direction != PCI_DMA_TODEVICE) {
814 for (n = 0; n < nents; n++) { 816 for_each_sg(sgl, sg, nents, n) {
815 BUG_ON(page_address(sg->page) == NULL); 817 BUG_ON(page_address(sg->page) == NULL);
816 mmu_inval_dma_area( 818 mmu_inval_dma_area(
817 (unsigned long) page_address(sg->page), 819 (unsigned long) page_address(sg->page),
818 (sg->length + PAGE_SIZE-1) & PAGE_MASK); 820 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
819 sg++;
820 } 821 }
821 } 822 }
822} 823}
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 7c89893b1fe8..375b4db63704 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -11,8 +11,8 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */ 12#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
13#include <linux/bitops.h> 13#include <linux/bitops.h>
14#include <linux/scatterlist.h>
14 15
15#include <asm/scatterlist.h>
16#include <asm/pgalloc.h> 16#include <asm/pgalloc.h>
17#include <asm/pgtable.h> 17#include <asm/pgtable.h>
18#include <asm/sbus.h> 18#include <asm/sbus.h>
@@ -144,8 +144,9 @@ static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus
144 spin_lock_irqsave(&iounit->lock, flags); 144 spin_lock_irqsave(&iounit->lock, flags);
145 while (sz != 0) { 145 while (sz != 0) {
146 --sz; 146 --sz;
147 sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length); 147 sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length);
148 sg[sz].dvma_length = sg[sz].length; 148 sg->dvma_length = sg->length;
149 sg = sg_next(sg);
149 } 150 }
150 spin_unlock_irqrestore(&iounit->lock, flags); 151 spin_unlock_irqrestore(&iounit->lock, flags);
151} 152}
@@ -173,11 +174,12 @@ static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_
173 spin_lock_irqsave(&iounit->lock, flags); 174 spin_lock_irqsave(&iounit->lock, flags);
174 while (sz != 0) { 175 while (sz != 0) {
175 --sz; 176 --sz;
176 len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT; 177 len = ((sg->dvma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
177 vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; 178 vaddr = (sg->dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
178 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); 179 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
179 for (len += vaddr; vaddr < len; vaddr++) 180 for (len += vaddr; vaddr < len; vaddr++)
180 clear_bit(vaddr, iounit->bmap); 181 clear_bit(vaddr, iounit->bmap);
182 sg = sg_next(sg);
181 } 183 }
182 spin_unlock_irqrestore(&iounit->lock, flags); 184 spin_unlock_irqrestore(&iounit->lock, flags);
183} 185}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 52e907af9d29..283656d9f6ea 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -12,8 +12,8 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */ 14#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
15#include <linux/scatterlist.h>
15 16
16#include <asm/scatterlist.h>
17#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/sbus.h> 19#include <asm/sbus.h>
@@ -240,7 +240,7 @@ static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sb
240 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; 240 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
241 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; 241 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
242 sg->dvma_length = (__u32) sg->length; 242 sg->dvma_length = (__u32) sg->length;
243 sg++; 243 sg = sg_next(sg);
244 } 244 }
245} 245}
246 246
@@ -254,7 +254,7 @@ static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbu
254 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; 254 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
255 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; 255 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
256 sg->dvma_length = (__u32) sg->length; 256 sg->dvma_length = (__u32) sg->length;
257 sg++; 257 sg = sg_next(sg);
258 } 258 }
259} 259}
260 260
@@ -285,7 +285,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
285 285
286 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset; 286 sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
287 sg->dvma_length = (__u32) sg->length; 287 sg->dvma_length = (__u32) sg->length;
288 sg++; 288 sg = sg_next(sg);
289 } 289 }
290} 290}
291 291
@@ -325,7 +325,7 @@ static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b
325 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; 325 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
326 iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus); 326 iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
327 sg->dvma_address = 0x21212121; 327 sg->dvma_address = 0x21212121;
328 sg++; 328 sg = sg_next(sg);
329 } 329 }
330} 330}
331 331
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index 005a3e72d4f2..ee6708fc4492 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -17,8 +17,8 @@
17#include <linux/highmem.h> 17#include <linux/highmem.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/scatterlist.h>
20 21
21#include <asm/scatterlist.h>
22#include <asm/page.h> 22#include <asm/page.h>
23#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
24#include <asm/pgtable.h> 24#include <asm/pgtable.h>
@@ -1228,8 +1228,9 @@ static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *
1228{ 1228{
1229 while (sz != 0) { 1229 while (sz != 0) {
1230 --sz; 1230 --sz;
1231 sg[sz].dvma_address = (__u32)sun4c_lockarea(page_address(sg[sz].page) + sg[sz].offset, sg[sz].length); 1231 sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length);
1232 sg[sz].dvma_length = sg[sz].length; 1232 sg->dvma_length = sg->length;
1233 sg = sg_next(sg);
1233 } 1234 }
1234} 1235}
1235 1236
@@ -1244,7 +1245,8 @@ static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_b
1244{ 1245{
1245 while (sz != 0) { 1246 while (sz != 0) {
1246 --sz; 1247 --sz;
1247 sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length); 1248 sun4c_unlockarea((char *)sg->dvma_address, sg->length);
1249 sg = sg_next(sg);
1248 } 1250 }
1249} 1251}
1250 1252
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index b35a62167e9c..db3ffcf7a120 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -10,6 +10,7 @@
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/scatterlist.h>
13 14
14#ifdef CONFIG_PCI 15#ifdef CONFIG_PCI
15#include <linux/pci.h> 16#include <linux/pci.h>
@@ -480,7 +481,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
480 unsigned long iopte_protection) 481 unsigned long iopte_protection)
481{ 482{
482 struct scatterlist *dma_sg = sg; 483 struct scatterlist *dma_sg = sg;
483 struct scatterlist *sg_end = sg + nelems; 484 struct scatterlist *sg_end = sg_last(sg, nelems);
484 int i; 485 int i;
485 486
486 for (i = 0; i < nused; i++) { 487 for (i = 0; i < nused; i++) {
@@ -515,7 +516,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
515 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); 516 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
516 break; 517 break;
517 } 518 }
518 sg++; 519 sg = sg_next(sg);
519 } 520 }
520 521
521 pteval = iopte_protection | (pteval & IOPTE_PAGE); 522 pteval = iopte_protection | (pteval & IOPTE_PAGE);
@@ -528,24 +529,24 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
528 } 529 }
529 530
530 pteval = (pteval & IOPTE_PAGE) + len; 531 pteval = (pteval & IOPTE_PAGE) + len;
531 sg++; 532 sg = sg_next(sg);
532 533
533 /* Skip over any tail mappings we've fully mapped, 534 /* Skip over any tail mappings we've fully mapped,
534 * adjusting pteval along the way. Stop when we 535 * adjusting pteval along the way. Stop when we
535 * detect a page crossing event. 536 * detect a page crossing event.
536 */ 537 */
537 while (sg < sg_end && 538 while (sg != sg_end &&
538 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && 539 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
539 (pteval == SG_ENT_PHYS_ADDRESS(sg)) && 540 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
540 ((pteval ^ 541 ((pteval ^
541 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { 542 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
542 pteval += sg->length; 543 pteval += sg->length;
543 sg++; 544 sg = sg_next(sg);
544 } 545 }
545 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) 546 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
546 pteval = ~0UL; 547 pteval = ~0UL;
547 } while (dma_npages != 0); 548 } while (dma_npages != 0);
548 dma_sg++; 549 dma_sg = sg_next(dma_sg);
549 } 550 }
550} 551}
551 552
@@ -606,7 +607,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
606 sgtmp = sglist; 607 sgtmp = sglist;
607 while (used && sgtmp->dma_length) { 608 while (used && sgtmp->dma_length) {
608 sgtmp->dma_address += dma_base; 609 sgtmp->dma_address += dma_base;
609 sgtmp++; 610 sgtmp = sg_next(sgtmp);
610 used--; 611 used--;
611 } 612 }
612 used = nelems - used; 613 used = nelems - used;
@@ -642,6 +643,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
642 struct strbuf *strbuf; 643 struct strbuf *strbuf;
643 iopte_t *base; 644 iopte_t *base;
644 unsigned long flags, ctx, i, npages; 645 unsigned long flags, ctx, i, npages;
646 struct scatterlist *sg, *sgprv;
645 u32 bus_addr; 647 u32 bus_addr;
646 648
647 if (unlikely(direction == DMA_NONE)) { 649 if (unlikely(direction == DMA_NONE)) {
@@ -654,11 +656,14 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
654 656
655 bus_addr = sglist->dma_address & IO_PAGE_MASK; 657 bus_addr = sglist->dma_address & IO_PAGE_MASK;
656 658
657 for (i = 1; i < nelems; i++) 659 sgprv = NULL;
658 if (sglist[i].dma_length == 0) 660 for_each_sg(sglist, sg, nelems, i) {
661 if (sg->dma_length == 0)
659 break; 662 break;
660 i--; 663 sgprv = sg;
661 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - 664 }
665
666 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
662 bus_addr) >> IO_PAGE_SHIFT; 667 bus_addr) >> IO_PAGE_SHIFT;
663 668
664 base = iommu->page_table + 669 base = iommu->page_table +
@@ -730,6 +735,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
730 struct iommu *iommu; 735 struct iommu *iommu;
731 struct strbuf *strbuf; 736 struct strbuf *strbuf;
732 unsigned long flags, ctx, npages, i; 737 unsigned long flags, ctx, npages, i;
738 struct scatterlist *sg, *sgprv;
733 u32 bus_addr; 739 u32 bus_addr;
734 740
735 iommu = dev->archdata.iommu; 741 iommu = dev->archdata.iommu;
@@ -753,11 +759,14 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
753 759
754 /* Step 2: Kick data out of streaming buffers. */ 760 /* Step 2: Kick data out of streaming buffers. */
755 bus_addr = sglist[0].dma_address & IO_PAGE_MASK; 761 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
756 for(i = 1; i < nelems; i++) 762 sgprv = NULL;
757 if (!sglist[i].dma_length) 763 for_each_sg(sglist, sg, nelems, i) {
764 if (sg->dma_length == 0)
758 break; 765 break;
759 i--; 766 sgprv = sg;
760 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) 767 }
768
769 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
761 - bus_addr) >> IO_PAGE_SHIFT; 770 - bus_addr) >> IO_PAGE_SHIFT;
762 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 771 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
763 772
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 95de1444ee67..cacacfae5451 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -13,6 +13,7 @@
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/msi.h> 14#include <linux/msi.h>
15#include <linux/log2.h> 15#include <linux/log2.h>
16#include <linux/scatterlist.h>
16 17
17#include <asm/iommu.h> 18#include <asm/iommu.h>
18#include <asm/irq.h> 19#include <asm/irq.h>
@@ -373,7 +374,7 @@ static inline long fill_sg(long entry, struct device *dev,
373 int nused, int nelems, unsigned long prot) 374 int nused, int nelems, unsigned long prot)
374{ 375{
375 struct scatterlist *dma_sg = sg; 376 struct scatterlist *dma_sg = sg;
376 struct scatterlist *sg_end = sg + nelems; 377 struct scatterlist *sg_end = sg_last(sg, nelems);
377 unsigned long flags; 378 unsigned long flags;
378 int i; 379 int i;
379 380
@@ -413,7 +414,7 @@ static inline long fill_sg(long entry, struct device *dev,
413 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); 414 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
414 break; 415 break;
415 } 416 }
416 sg++; 417 sg = sg_next(sg);
417 } 418 }
418 419
419 pteval = (pteval & IOPTE_PAGE); 420 pteval = (pteval & IOPTE_PAGE);
@@ -431,24 +432,25 @@ static inline long fill_sg(long entry, struct device *dev,
431 } 432 }
432 433
433 pteval = (pteval & IOPTE_PAGE) + len; 434 pteval = (pteval & IOPTE_PAGE) + len;
434 sg++; 435 sg = sg_next(sg);
435 436
436 /* Skip over any tail mappings we've fully mapped, 437 /* Skip over any tail mappings we've fully mapped,
437 * adjusting pteval along the way. Stop when we 438 * adjusting pteval along the way. Stop when we
438 * detect a page crossing event. 439 * detect a page crossing event.
439 */ 440 */
440 while (sg < sg_end && 441 while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
441 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
442 (pteval == SG_ENT_PHYS_ADDRESS(sg)) && 442 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
443 ((pteval ^ 443 ((pteval ^
444 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { 444 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
445 pteval += sg->length; 445 pteval += sg->length;
446 sg++; 446 if (sg == sg_end)
447 break;
448 sg = sg_next(sg);
447 } 449 }
448 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) 450 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
449 pteval = ~0UL; 451 pteval = ~0UL;
450 } while (dma_npages != 0); 452 } while (dma_npages != 0);
451 dma_sg++; 453 dma_sg = sg_next(dma_sg);
452 } 454 }
453 455
454 if (unlikely(iommu_batch_end() < 0L)) 456 if (unlikely(iommu_batch_end() < 0L))
@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
510 sgtmp = sglist; 512 sgtmp = sglist;
511 while (used && sgtmp->dma_length) { 513 while (used && sgtmp->dma_length) {
512 sgtmp->dma_address += dma_base; 514 sgtmp->dma_address += dma_base;
513 sgtmp++; 515 sgtmp = sg_next(sgtmp);
514 used--; 516 used--;
515 } 517 }
516 used = nelems - used; 518 used = nelems - used;
@@ -545,6 +547,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
545 struct pci_pbm_info *pbm; 547 struct pci_pbm_info *pbm;
546 struct iommu *iommu; 548 struct iommu *iommu;
547 unsigned long flags, i, npages; 549 unsigned long flags, i, npages;
550 struct scatterlist *sg, *sgprv;
548 long entry; 551 long entry;
549 u32 devhandle, bus_addr; 552 u32 devhandle, bus_addr;
550 553
@@ -558,12 +561,15 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
558 devhandle = pbm->devhandle; 561 devhandle = pbm->devhandle;
559 562
560 bus_addr = sglist->dma_address & IO_PAGE_MASK; 563 bus_addr = sglist->dma_address & IO_PAGE_MASK;
561 564 sgprv = NULL;
562 for (i = 1; i < nelems; i++) 565 for_each_sg(sglist, sg, nelems, i) {
563 if (sglist[i].dma_length == 0) 566 if (sg->dma_length == 0)
564 break; 567 break;
565 i--; 568
566 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - 569 sgprv = sg;
570 }
571
572 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
567 bus_addr) >> IO_PAGE_SHIFT; 573 bus_addr) >> IO_PAGE_SHIFT;
568 574
569 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 575 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 71da01e73f03..a50b787b3bfa 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -35,6 +35,7 @@
35#include <linux/pci_ids.h> 35#include <linux/pci_ids.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/scatterlist.h>
38#include <asm/iommu.h> 39#include <asm/iommu.h>
39#include <asm/calgary.h> 40#include <asm/calgary.h>
40#include <asm/tce.h> 41#include <asm/tce.h>
@@ -384,31 +385,32 @@ static void calgary_unmap_sg(struct device *dev,
384 struct scatterlist *sglist, int nelems, int direction) 385 struct scatterlist *sglist, int nelems, int direction)
385{ 386{
386 struct iommu_table *tbl = find_iommu_table(dev); 387 struct iommu_table *tbl = find_iommu_table(dev);
388 struct scatterlist *s;
389 int i;
387 390
388 if (!translate_phb(to_pci_dev(dev))) 391 if (!translate_phb(to_pci_dev(dev)))
389 return; 392 return;
390 393
391 while (nelems--) { 394 for_each_sg(sglist, s, nelems, i) {
392 unsigned int npages; 395 unsigned int npages;
393 dma_addr_t dma = sglist->dma_address; 396 dma_addr_t dma = s->dma_address;
394 unsigned int dmalen = sglist->dma_length; 397 unsigned int dmalen = s->dma_length;
395 398
396 if (dmalen == 0) 399 if (dmalen == 0)
397 break; 400 break;
398 401
399 npages = num_dma_pages(dma, dmalen); 402 npages = num_dma_pages(dma, dmalen);
400 iommu_free(tbl, dma, npages); 403 iommu_free(tbl, dma, npages);
401 sglist++;
402 } 404 }
403} 405}
404 406
405static int calgary_nontranslate_map_sg(struct device* dev, 407static int calgary_nontranslate_map_sg(struct device* dev,
406 struct scatterlist *sg, int nelems, int direction) 408 struct scatterlist *sg, int nelems, int direction)
407{ 409{
410 struct scatterlist *s;
408 int i; 411 int i;
409 412
410 for (i = 0; i < nelems; i++ ) { 413 for_each_sg(sg, s, nelems, i) {
411 struct scatterlist *s = &sg[i];
412 BUG_ON(!s->page); 414 BUG_ON(!s->page);
413 s->dma_address = virt_to_bus(page_address(s->page) +s->offset); 415 s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
414 s->dma_length = s->length; 416 s->dma_length = s->length;
@@ -420,6 +422,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
420 int nelems, int direction) 422 int nelems, int direction)
421{ 423{
422 struct iommu_table *tbl = find_iommu_table(dev); 424 struct iommu_table *tbl = find_iommu_table(dev);
425 struct scatterlist *s;
423 unsigned long vaddr; 426 unsigned long vaddr;
424 unsigned int npages; 427 unsigned int npages;
425 unsigned long entry; 428 unsigned long entry;
@@ -428,8 +431,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
428 if (!translate_phb(to_pci_dev(dev))) 431 if (!translate_phb(to_pci_dev(dev)))
429 return calgary_nontranslate_map_sg(dev, sg, nelems, direction); 432 return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
430 433
431 for (i = 0; i < nelems; i++ ) { 434 for_each_sg(sg, s, nelems, i) {
432 struct scatterlist *s = &sg[i];
433 BUG_ON(!s->page); 435 BUG_ON(!s->page);
434 436
435 vaddr = (unsigned long)page_address(s->page) + s->offset; 437 vaddr = (unsigned long)page_address(s->page) + s->offset;
@@ -454,9 +456,9 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
454 return nelems; 456 return nelems;
455error: 457error:
456 calgary_unmap_sg(dev, sg, nelems, direction); 458 calgary_unmap_sg(dev, sg, nelems, direction);
457 for (i = 0; i < nelems; i++) { 459 for_each_sg(sg, s, nelems, i) {
458 sg[i].dma_address = bad_dma_address; 460 sg->dma_address = bad_dma_address;
459 sg[i].dma_length = 0; 461 sg->dma_length = 0;
460 } 462 }
461 return 0; 463 return 0;
462} 464}
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 4918c575d582..cfcc84e6c350 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/bitops.h> 24#include <linux/bitops.h>
25#include <linux/kdebug.h> 25#include <linux/kdebug.h>
26#include <linux/scatterlist.h>
26#include <asm/atomic.h> 27#include <asm/atomic.h>
27#include <asm/io.h> 28#include <asm/io.h>
28#include <asm/mtrr.h> 29#include <asm/mtrr.h>
@@ -278,10 +279,10 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
278 */ 279 */
279static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) 280static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
280{ 281{
282 struct scatterlist *s;
281 int i; 283 int i;
282 284
283 for (i = 0; i < nents; i++) { 285 for_each_sg(sg, s, nents, i) {
284 struct scatterlist *s = &sg[i];
285 if (!s->dma_length || !s->length) 286 if (!s->dma_length || !s->length)
286 break; 287 break;
287 gart_unmap_single(dev, s->dma_address, s->dma_length, dir); 288 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
@@ -292,14 +293,14 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
292static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, 293static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
293 int nents, int dir) 294 int nents, int dir)
294{ 295{
296 struct scatterlist *s;
295 int i; 297 int i;
296 298
297#ifdef CONFIG_IOMMU_DEBUG 299#ifdef CONFIG_IOMMU_DEBUG
298 printk(KERN_DEBUG "dma_map_sg overflow\n"); 300 printk(KERN_DEBUG "dma_map_sg overflow\n");
299#endif 301#endif
300 302
301 for (i = 0; i < nents; i++ ) { 303 for_each_sg(sg, s, nents, i) {
302 struct scatterlist *s = &sg[i];
303 unsigned long addr = page_to_phys(s->page) + s->offset; 304 unsigned long addr = page_to_phys(s->page) + s->offset;
304 if (nonforced_iommu(dev, addr, s->length)) { 305 if (nonforced_iommu(dev, addr, s->length)) {
305 addr = dma_map_area(dev, addr, s->length, dir); 306 addr = dma_map_area(dev, addr, s->length, dir);
@@ -319,23 +320,23 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
319} 320}
320 321
321/* Map multiple scatterlist entries continuous into the first. */ 322/* Map multiple scatterlist entries continuous into the first. */
322static int __dma_map_cont(struct scatterlist *sg, int start, int stopat, 323static int __dma_map_cont(struct scatterlist *start, int nelems,
323 struct scatterlist *sout, unsigned long pages) 324 struct scatterlist *sout, unsigned long pages)
324{ 325{
325 unsigned long iommu_start = alloc_iommu(pages); 326 unsigned long iommu_start = alloc_iommu(pages);
326 unsigned long iommu_page = iommu_start; 327 unsigned long iommu_page = iommu_start;
328 struct scatterlist *s;
327 int i; 329 int i;
328 330
329 if (iommu_start == -1) 331 if (iommu_start == -1)
330 return -1; 332 return -1;
331 333
332 for (i = start; i < stopat; i++) { 334 for_each_sg(start, s, nelems, i) {
333 struct scatterlist *s = &sg[i];
334 unsigned long pages, addr; 335 unsigned long pages, addr;
335 unsigned long phys_addr = s->dma_address; 336 unsigned long phys_addr = s->dma_address;
336 337
337 BUG_ON(i > start && s->offset); 338 BUG_ON(s != start && s->offset);
338 if (i == start) { 339 if (s == start) {
339 *sout = *s; 340 *sout = *s;
340 sout->dma_address = iommu_bus_base; 341 sout->dma_address = iommu_bus_base;
341 sout->dma_address += iommu_page*PAGE_SIZE + s->offset; 342 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
@@ -357,17 +358,17 @@ static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
357 return 0; 358 return 0;
358} 359}
359 360
360static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat, 361static inline int dma_map_cont(struct scatterlist *start, int nelems,
361 struct scatterlist *sout, 362 struct scatterlist *sout,
362 unsigned long pages, int need) 363 unsigned long pages, int need)
363{ 364{
364 if (!need) { 365 if (!need) {
365 BUG_ON(stopat - start != 1); 366 BUG_ON(nelems != 1);
366 *sout = sg[start]; 367 *sout = *start;
367 sout->dma_length = sg[start].length; 368 sout->dma_length = start->length;
368 return 0; 369 return 0;
369 } 370 }
370 return __dma_map_cont(sg, start, stopat, sout, pages); 371 return __dma_map_cont(start, nelems, sout, pages);
371} 372}
372 373
373/* 374/*
@@ -381,6 +382,7 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
381 int start; 382 int start;
382 unsigned long pages = 0; 383 unsigned long pages = 0;
383 int need = 0, nextneed; 384 int need = 0, nextneed;
385 struct scatterlist *s, *ps, *start_sg, *sgmap;
384 386
385 if (nents == 0) 387 if (nents == 0)
386 return 0; 388 return 0;
@@ -390,8 +392,9 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
390 392
391 out = 0; 393 out = 0;
392 start = 0; 394 start = 0;
393 for (i = 0; i < nents; i++) { 395 start_sg = sgmap = sg;
394 struct scatterlist *s = &sg[i]; 396 ps = NULL; /* shut up gcc */
397 for_each_sg(sg, s, nents, i) {
395 dma_addr_t addr = page_to_phys(s->page) + s->offset; 398 dma_addr_t addr = page_to_phys(s->page) + s->offset;
396 s->dma_address = addr; 399 s->dma_address = addr;
397 BUG_ON(s->length == 0); 400 BUG_ON(s->length == 0);
@@ -400,29 +403,33 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
400 403
401 /* Handle the previous not yet processed entries */ 404 /* Handle the previous not yet processed entries */
402 if (i > start) { 405 if (i > start) {
403 struct scatterlist *ps = &sg[i-1];
404 /* Can only merge when the last chunk ends on a page 406 /* Can only merge when the last chunk ends on a page
405 boundary and the new one doesn't have an offset. */ 407 boundary and the new one doesn't have an offset. */
406 if (!iommu_merge || !nextneed || !need || s->offset || 408 if (!iommu_merge || !nextneed || !need || s->offset ||
407 (ps->offset + ps->length) % PAGE_SIZE) { 409 (ps->offset + ps->length) % PAGE_SIZE) {
408 if (dma_map_cont(sg, start, i, sg+out, pages, 410 if (dma_map_cont(start_sg, i - start, sgmap,
409 need) < 0) 411 pages, need) < 0)
410 goto error; 412 goto error;
411 out++; 413 out++;
414 sgmap = sg_next(sgmap);
412 pages = 0; 415 pages = 0;
413 start = i; 416 start = i;
417 start_sg = s;
414 } 418 }
415 } 419 }
416 420
417 need = nextneed; 421 need = nextneed;
418 pages += to_pages(s->offset, s->length); 422 pages += to_pages(s->offset, s->length);
423 ps = s;
419 } 424 }
420 if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0) 425 if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
421 goto error; 426 goto error;
422 out++; 427 out++;
423 flush_gart(); 428 flush_gart();
424 if (out < nents) 429 if (out < nents) {
425 sg[out].dma_length = 0; 430 sgmap = sg_next(sgmap);
431 sgmap->dma_length = 0;
432 }
426 return out; 433 return out;
427 434
428error: 435error:
@@ -437,8 +444,8 @@ error:
437 if (panic_on_overflow) 444 if (panic_on_overflow)
438 panic("dma_map_sg: overflow on %lu pages\n", pages); 445 panic("dma_map_sg: overflow on %lu pages\n", pages);
439 iommu_full(dev, pages << PAGE_SHIFT, dir); 446 iommu_full(dev, pages << PAGE_SHIFT, dir);
440 for (i = 0; i < nents; i++) 447 for_each_sg(sg, s, nents, i)
441 sg[i].dma_address = bad_dma_address; 448 s->dma_address = bad_dma_address;
442 return 0; 449 return 0;
443} 450}
444 451
diff --git a/arch/x86/kernel/pci-nommu_64.c b/arch/x86/kernel/pci-nommu_64.c
index 2a34c6c025a9..e85d4360360c 100644
--- a/arch/x86/kernel/pci-nommu_64.c
+++ b/arch/x86/kernel/pci-nommu_64.c
@@ -5,6 +5,7 @@
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <linux/dma-mapping.h> 7#include <linux/dma-mapping.h>
8#include <linux/scatterlist.h>
8 9
9#include <asm/iommu.h> 10#include <asm/iommu.h>
10#include <asm/processor.h> 11#include <asm/processor.h>
@@ -57,10 +58,10 @@ static void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
57static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, 58static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
58 int nents, int direction) 59 int nents, int direction)
59{ 60{
61 struct scatterlist *s;
60 int i; 62 int i;
61 63
62 for (i = 0; i < nents; i++ ) { 64 for_each_sg(sg, s, nents, i) {
63 struct scatterlist *s = &sg[i];
64 BUG_ON(!s->page); 65 BUG_ON(!s->page);
65 s->dma_address = virt_to_bus(page_address(s->page) +s->offset); 66 s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
66 if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) 67 if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
diff --git a/block/bsg.c b/block/bsg.c
index b8ddfc66f210..8e181ab3afb9 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -908,7 +908,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
908 } 908 }
909} 909}
910 910
911static struct file_operations bsg_fops = { 911static const struct file_operations bsg_fops = {
912 .read = bsg_read, 912 .read = bsg_read,
913 .write = bsg_write, 913 .write = bsg_write,
914 .poll = bsg_poll, 914 .poll = bsg_poll,
diff --git a/block/elevator.c b/block/elevator.c
index b9c518afe1f8..952aee04a68a 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -712,6 +712,14 @@ struct request *elv_next_request(struct request_queue *q)
712 int ret; 712 int ret;
713 713
714 while ((rq = __elv_next_request(q)) != NULL) { 714 while ((rq = __elv_next_request(q)) != NULL) {
715 /*
716 * Kill the empty barrier place holder, the driver must
717 * not ever see it.
718 */
719 if (blk_empty_barrier(rq)) {
720 end_queued_request(rq, 1);
721 continue;
722 }
715 if (!(rq->cmd_flags & REQ_STARTED)) { 723 if (!(rq->cmd_flags & REQ_STARTED)) {
716 /* 724 /*
717 * This is the first time the device driver 725 * This is the first time the device driver
@@ -751,15 +759,8 @@ struct request *elv_next_request(struct request_queue *q)
751 rq = NULL; 759 rq = NULL;
752 break; 760 break;
753 } else if (ret == BLKPREP_KILL) { 761 } else if (ret == BLKPREP_KILL) {
754 int nr_bytes = rq->hard_nr_sectors << 9;
755
756 if (!nr_bytes)
757 nr_bytes = rq->data_len;
758
759 blkdev_dequeue_request(rq);
760 rq->cmd_flags |= REQ_QUIET; 762 rq->cmd_flags |= REQ_QUIET;
761 end_that_request_chunk(rq, 0, nr_bytes); 763 end_queued_request(rq, 0);
762 end_that_request_last(rq, 0);
763 } else { 764 } else {
764 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, 765 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
765 ret); 766 ret);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index a83823fcd74f..9eabac95fbe0 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -30,6 +30,7 @@
30#include <linux/cpu.h> 30#include <linux/cpu.h>
31#include <linux/blktrace_api.h> 31#include <linux/blktrace_api.h>
32#include <linux/fault-inject.h> 32#include <linux/fault-inject.h>
33#include <linux/scatterlist.h>
33 34
34/* 35/*
35 * for max sense size 36 * for max sense size
@@ -304,23 +305,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
304 305
305EXPORT_SYMBOL(blk_queue_ordered); 306EXPORT_SYMBOL(blk_queue_ordered);
306 307
307/**
308 * blk_queue_issue_flush_fn - set function for issuing a flush
309 * @q: the request queue
310 * @iff: the function to be called issuing the flush
311 *
312 * Description:
313 * If a driver supports issuing a flush command, the support is notified
314 * to the block layer by defining it through this call.
315 *
316 **/
317void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
318{
319 q->issue_flush_fn = iff;
320}
321
322EXPORT_SYMBOL(blk_queue_issue_flush_fn);
323
324/* 308/*
325 * Cache flushing for ordered writes handling 309 * Cache flushing for ordered writes handling
326 */ 310 */
@@ -377,10 +361,12 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
377 /* 361 /*
378 * Okay, sequence complete. 362 * Okay, sequence complete.
379 */ 363 */
380 rq = q->orig_bar_rq; 364 uptodate = 1;
381 uptodate = q->orderr ? q->orderr : 1; 365 if (q->orderr)
366 uptodate = q->orderr;
382 367
383 q->ordseq = 0; 368 q->ordseq = 0;
369 rq = q->orig_bar_rq;
384 370
385 end_that_request_first(rq, uptodate, rq->hard_nr_sectors); 371 end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
386 end_that_request_last(rq, uptodate); 372 end_that_request_last(rq, uptodate);
@@ -445,7 +431,8 @@ static inline struct request *start_ordered(struct request_queue *q,
445 rq_init(q, rq); 431 rq_init(q, rq);
446 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 432 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
447 rq->cmd_flags |= REQ_RW; 433 rq->cmd_flags |= REQ_RW;
448 rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; 434 if (q->ordered & QUEUE_ORDERED_FUA)
435 rq->cmd_flags |= REQ_FUA;
449 rq->elevator_private = NULL; 436 rq->elevator_private = NULL;
450 rq->elevator_private2 = NULL; 437 rq->elevator_private2 = NULL;
451 init_request_from_bio(rq, q->orig_bar_rq->bio); 438 init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -455,9 +442,12 @@ static inline struct request *start_ordered(struct request_queue *q,
455 * Queue ordered sequence. As we stack them at the head, we 442 * Queue ordered sequence. As we stack them at the head, we
456 * need to queue in reverse order. Note that we rely on that 443 * need to queue in reverse order. Note that we rely on that
457 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs 444 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
458 * request gets inbetween ordered sequence. 445 * request gets inbetween ordered sequence. If this request is
446 * an empty barrier, we don't need to do a postflush ever since
447 * there will be no data written between the pre and post flush.
448 * Hence a single flush will suffice.
459 */ 449 */
460 if (q->ordered & QUEUE_ORDERED_POSTFLUSH) 450 if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
461 queue_flush(q, QUEUE_ORDERED_POSTFLUSH); 451 queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
462 else 452 else
463 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; 453 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
@@ -481,7 +471,7 @@ static inline struct request *start_ordered(struct request_queue *q,
481int blk_do_ordered(struct request_queue *q, struct request **rqp) 471int blk_do_ordered(struct request_queue *q, struct request **rqp)
482{ 472{
483 struct request *rq = *rqp; 473 struct request *rq = *rqp;
484 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 474 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
485 475
486 if (!q->ordseq) { 476 if (!q->ordseq) {
487 if (!is_barrier) 477 if (!is_barrier)
@@ -1329,9 +1319,10 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
1329 * must make sure sg can hold rq->nr_phys_segments entries 1319 * must make sure sg can hold rq->nr_phys_segments entries
1330 */ 1320 */
1331int blk_rq_map_sg(struct request_queue *q, struct request *rq, 1321int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1332 struct scatterlist *sg) 1322 struct scatterlist *sglist)
1333{ 1323{
1334 struct bio_vec *bvec, *bvprv; 1324 struct bio_vec *bvec, *bvprv;
1325 struct scatterlist *next_sg, *sg;
1335 struct req_iterator iter; 1326 struct req_iterator iter;
1336 int nsegs, cluster; 1327 int nsegs, cluster;
1337 1328
@@ -1342,11 +1333,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1342 * for each bio in rq 1333 * for each bio in rq
1343 */ 1334 */
1344 bvprv = NULL; 1335 bvprv = NULL;
1336 sg = next_sg = &sglist[0];
1345 rq_for_each_segment(bvec, rq, iter) { 1337 rq_for_each_segment(bvec, rq, iter) {
1346 int nbytes = bvec->bv_len; 1338 int nbytes = bvec->bv_len;
1347 1339
1348 if (bvprv && cluster) { 1340 if (bvprv && cluster) {
1349 if (sg[nsegs - 1].length + nbytes > q->max_segment_size) 1341 if (sg->length + nbytes > q->max_segment_size)
1350 goto new_segment; 1342 goto new_segment;
1351 1343
1352 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) 1344 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -1354,14 +1346,15 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1354 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) 1346 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
1355 goto new_segment; 1347 goto new_segment;
1356 1348
1357 sg[nsegs - 1].length += nbytes; 1349 sg->length += nbytes;
1358 } else { 1350 } else {
1359new_segment: 1351new_segment:
1360 memset(&sg[nsegs],0,sizeof(struct scatterlist)); 1352 sg = next_sg;
1361 sg[nsegs].page = bvec->bv_page; 1353 next_sg = sg_next(sg);
1362 sg[nsegs].length = nbytes;
1363 sg[nsegs].offset = bvec->bv_offset;
1364 1354
1355 sg->page = bvec->bv_page;
1356 sg->length = nbytes;
1357 sg->offset = bvec->bv_offset;
1365 nsegs++; 1358 nsegs++;
1366 } 1359 }
1367 bvprv = bvec; 1360 bvprv = bvec;
@@ -2660,6 +2653,14 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
2660 2653
2661EXPORT_SYMBOL(blk_execute_rq); 2654EXPORT_SYMBOL(blk_execute_rq);
2662 2655
2656static void bio_end_empty_barrier(struct bio *bio, int err)
2657{
2658 if (err)
2659 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2660
2661 complete(bio->bi_private);
2662}
2663
2663/** 2664/**
2664 * blkdev_issue_flush - queue a flush 2665 * blkdev_issue_flush - queue a flush
2665 * @bdev: blockdev to issue flush for 2666 * @bdev: blockdev to issue flush for
@@ -2672,7 +2673,10 @@ EXPORT_SYMBOL(blk_execute_rq);
2672 */ 2673 */
2673int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) 2674int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2674{ 2675{
2676 DECLARE_COMPLETION_ONSTACK(wait);
2675 struct request_queue *q; 2677 struct request_queue *q;
2678 struct bio *bio;
2679 int ret;
2676 2680
2677 if (bdev->bd_disk == NULL) 2681 if (bdev->bd_disk == NULL)
2678 return -ENXIO; 2682 return -ENXIO;
@@ -2680,10 +2684,32 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2680 q = bdev_get_queue(bdev); 2684 q = bdev_get_queue(bdev);
2681 if (!q) 2685 if (!q)
2682 return -ENXIO; 2686 return -ENXIO;
2683 if (!q->issue_flush_fn)
2684 return -EOPNOTSUPP;
2685 2687
2686 return q->issue_flush_fn(q, bdev->bd_disk, error_sector); 2688 bio = bio_alloc(GFP_KERNEL, 0);
2689 if (!bio)
2690 return -ENOMEM;
2691
2692 bio->bi_end_io = bio_end_empty_barrier;
2693 bio->bi_private = &wait;
2694 bio->bi_bdev = bdev;
2695 submit_bio(1 << BIO_RW_BARRIER, bio);
2696
2697 wait_for_completion(&wait);
2698
2699 /*
2700 * The driver must store the error location in ->bi_sector, if
2701 * it supports it. For non-stacked drivers, this should be copied
2702 * from rq->sector.
2703 */
2704 if (error_sector)
2705 *error_sector = bio->bi_sector;
2706
2707 ret = 0;
2708 if (!bio_flagged(bio, BIO_UPTODATE))
2709 ret = -EIO;
2710
2711 bio_put(bio);
2712 return ret;
2687} 2713}
2688 2714
2689EXPORT_SYMBOL(blkdev_issue_flush); 2715EXPORT_SYMBOL(blkdev_issue_flush);
@@ -3051,7 +3077,7 @@ static inline void blk_partition_remap(struct bio *bio)
3051{ 3077{
3052 struct block_device *bdev = bio->bi_bdev; 3078 struct block_device *bdev = bio->bi_bdev;
3053 3079
3054 if (bdev != bdev->bd_contains) { 3080 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
3055 struct hd_struct *p = bdev->bd_part; 3081 struct hd_struct *p = bdev->bd_part;
3056 const int rw = bio_data_dir(bio); 3082 const int rw = bio_data_dir(bio);
3057 3083
@@ -3117,6 +3143,35 @@ static inline int should_fail_request(struct bio *bio)
3117 3143
3118#endif /* CONFIG_FAIL_MAKE_REQUEST */ 3144#endif /* CONFIG_FAIL_MAKE_REQUEST */
3119 3145
3146/*
3147 * Check whether this bio extends beyond the end of the device.
3148 */
3149static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
3150{
3151 sector_t maxsector;
3152
3153 if (!nr_sectors)
3154 return 0;
3155
3156 /* Test device or partition size, when known. */
3157 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
3158 if (maxsector) {
3159 sector_t sector = bio->bi_sector;
3160
3161 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
3162 /*
3163 * This may well happen - the kernel calls bread()
3164 * without checking the size of the device, e.g., when
3165 * mounting a device.
3166 */
3167 handle_bad_sector(bio);
3168 return 1;
3169 }
3170 }
3171
3172 return 0;
3173}
3174
3120/** 3175/**
3121 * generic_make_request: hand a buffer to its device driver for I/O 3176 * generic_make_request: hand a buffer to its device driver for I/O
3122 * @bio: The bio describing the location in memory and on the device. 3177 * @bio: The bio describing the location in memory and on the device.
@@ -3144,27 +3199,14 @@ static inline int should_fail_request(struct bio *bio)
3144static inline void __generic_make_request(struct bio *bio) 3199static inline void __generic_make_request(struct bio *bio)
3145{ 3200{
3146 struct request_queue *q; 3201 struct request_queue *q;
3147 sector_t maxsector;
3148 sector_t old_sector; 3202 sector_t old_sector;
3149 int ret, nr_sectors = bio_sectors(bio); 3203 int ret, nr_sectors = bio_sectors(bio);
3150 dev_t old_dev; 3204 dev_t old_dev;
3151 3205
3152 might_sleep(); 3206 might_sleep();
3153 /* Test device or partition size, when known. */
3154 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
3155 if (maxsector) {
3156 sector_t sector = bio->bi_sector;
3157 3207
3158 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 3208 if (bio_check_eod(bio, nr_sectors))
3159 /* 3209 goto end_io;
3160 * This may well happen - the kernel calls bread()
3161 * without checking the size of the device, e.g., when
3162 * mounting a device.
3163 */
3164 handle_bad_sector(bio);
3165 goto end_io;
3166 }
3167 }
3168 3210
3169 /* 3211 /*
3170 * Resolve the mapping until finished. (drivers are 3212 * Resolve the mapping until finished. (drivers are
@@ -3191,7 +3233,7 @@ end_io:
3191 break; 3233 break;
3192 } 3234 }
3193 3235
3194 if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) { 3236 if (unlikely(nr_sectors > q->max_hw_sectors)) {
3195 printk("bio too big device %s (%u > %u)\n", 3237 printk("bio too big device %s (%u > %u)\n",
3196 bdevname(bio->bi_bdev, b), 3238 bdevname(bio->bi_bdev, b),
3197 bio_sectors(bio), 3239 bio_sectors(bio),
@@ -3212,7 +3254,7 @@ end_io:
3212 blk_partition_remap(bio); 3254 blk_partition_remap(bio);
3213 3255
3214 if (old_sector != -1) 3256 if (old_sector != -1)
3215 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 3257 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
3216 old_sector); 3258 old_sector);
3217 3259
3218 blk_add_trace_bio(q, bio, BLK_TA_QUEUE); 3260 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
@@ -3220,21 +3262,8 @@ end_io:
3220 old_sector = bio->bi_sector; 3262 old_sector = bio->bi_sector;
3221 old_dev = bio->bi_bdev->bd_dev; 3263 old_dev = bio->bi_bdev->bd_dev;
3222 3264
3223 maxsector = bio->bi_bdev->bd_inode->i_size >> 9; 3265 if (bio_check_eod(bio, nr_sectors))
3224 if (maxsector) { 3266 goto end_io;
3225 sector_t sector = bio->bi_sector;
3226
3227 if (maxsector < nr_sectors ||
3228 maxsector - nr_sectors < sector) {
3229 /*
3230 * This may well happen - partitions are not
3231 * checked to make sure they are within the size
3232 * of the whole device.
3233 */
3234 handle_bad_sector(bio);
3235 goto end_io;
3236 }
3237 }
3238 3267
3239 ret = q->make_request_fn(q, bio); 3268 ret = q->make_request_fn(q, bio);
3240 } while (ret); 3269 } while (ret);
@@ -3307,23 +3336,32 @@ void submit_bio(int rw, struct bio *bio)
3307{ 3336{
3308 int count = bio_sectors(bio); 3337 int count = bio_sectors(bio);
3309 3338
3310 BIO_BUG_ON(!bio->bi_size);
3311 BIO_BUG_ON(!bio->bi_io_vec);
3312 bio->bi_rw |= rw; 3339 bio->bi_rw |= rw;
3313 if (rw & WRITE) {
3314 count_vm_events(PGPGOUT, count);
3315 } else {
3316 task_io_account_read(bio->bi_size);
3317 count_vm_events(PGPGIN, count);
3318 }
3319 3340
3320 if (unlikely(block_dump)) { 3341 /*
3321 char b[BDEVNAME_SIZE]; 3342 * If it's a regular read/write or a barrier with data attached,
3322 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", 3343 * go through the normal accounting stuff before submission.
3323 current->comm, current->pid, 3344 */
3324 (rw & WRITE) ? "WRITE" : "READ", 3345 if (!bio_empty_barrier(bio)) {
3325 (unsigned long long)bio->bi_sector, 3346
3326 bdevname(bio->bi_bdev,b)); 3347 BIO_BUG_ON(!bio->bi_size);
3348 BIO_BUG_ON(!bio->bi_io_vec);
3349
3350 if (rw & WRITE) {
3351 count_vm_events(PGPGOUT, count);
3352 } else {
3353 task_io_account_read(bio->bi_size);
3354 count_vm_events(PGPGIN, count);
3355 }
3356
3357 if (unlikely(block_dump)) {
3358 char b[BDEVNAME_SIZE];
3359 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
3360 current->comm, current->pid,
3361 (rw & WRITE) ? "WRITE" : "READ",
3362 (unsigned long long)bio->bi_sector,
3363 bdevname(bio->bi_bdev,b));
3364 }
3327 } 3365 }
3328 3366
3329 generic_make_request(bio); 3367 generic_make_request(bio);
@@ -3399,6 +3437,14 @@ static int __end_that_request_first(struct request *req, int uptodate,
3399 while ((bio = req->bio) != NULL) { 3437 while ((bio = req->bio) != NULL) {
3400 int nbytes; 3438 int nbytes;
3401 3439
3440 /*
3441 * For an empty barrier request, the low level driver must
3442 * store a potential error location in ->sector. We pass
3443 * that back up in ->bi_sector.
3444 */
3445 if (blk_empty_barrier(req))
3446 bio->bi_sector = req->sector;
3447
3402 if (nr_bytes >= bio->bi_size) { 3448 if (nr_bytes >= bio->bi_size) {
3403 req->bio = bio->bi_next; 3449 req->bio = bio->bi_next;
3404 nbytes = bio->bi_size; 3450 nbytes = bio->bi_size;
@@ -3564,7 +3610,7 @@ static struct notifier_block blk_cpu_notifier __cpuinitdata = {
3564 * Description: 3610 * Description:
3565 * Ends all I/O on a request. It does not handle partial completions, 3611 * Ends all I/O on a request. It does not handle partial completions,
3566 * unless the driver actually implements this in its completion callback 3612 * unless the driver actually implements this in its completion callback
3567 * through requeueing. Theh actual completion happens out-of-order, 3613 * through requeueing. The actual completion happens out-of-order,
3568 * through a softirq handler. The user must have registered a completion 3614 * through a softirq handler. The user must have registered a completion
3569 * callback through blk_queue_softirq_done(). 3615 * callback through blk_queue_softirq_done().
3570 **/ 3616 **/
@@ -3627,15 +3673,83 @@ void end_that_request_last(struct request *req, int uptodate)
3627 3673
3628EXPORT_SYMBOL(end_that_request_last); 3674EXPORT_SYMBOL(end_that_request_last);
3629 3675
3630void end_request(struct request *req, int uptodate) 3676static inline void __end_request(struct request *rq, int uptodate,
3677 unsigned int nr_bytes, int dequeue)
3631{ 3678{
3632 if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) { 3679 if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
3633 add_disk_randomness(req->rq_disk); 3680 if (dequeue)
3634 blkdev_dequeue_request(req); 3681 blkdev_dequeue_request(rq);
3635 end_that_request_last(req, uptodate); 3682 add_disk_randomness(rq->rq_disk);
3683 end_that_request_last(rq, uptodate);
3636 } 3684 }
3637} 3685}
3638 3686
3687static unsigned int rq_byte_size(struct request *rq)
3688{
3689 if (blk_fs_request(rq))
3690 return rq->hard_nr_sectors << 9;
3691
3692 return rq->data_len;
3693}
3694
3695/**
3696 * end_queued_request - end all I/O on a queued request
3697 * @rq: the request being processed
3698 * @uptodate: error value or 0/1 uptodate flag
3699 *
3700 * Description:
3701 * Ends all I/O on a request, and removes it from the block layer queues.
3702 * Not suitable for normal IO completion, unless the driver still has
3703 * the request attached to the block layer.
3704 *
3705 **/
3706void end_queued_request(struct request *rq, int uptodate)
3707{
3708 __end_request(rq, uptodate, rq_byte_size(rq), 1);
3709}
3710EXPORT_SYMBOL(end_queued_request);
3711
3712/**
3713 * end_dequeued_request - end all I/O on a dequeued request
3714 * @rq: the request being processed
3715 * @uptodate: error value or 0/1 uptodate flag
3716 *
3717 * Description:
3718 * Ends all I/O on a request. The request must already have been
3719 * dequeued using blkdev_dequeue_request(), as is normally the case
3720 * for most drivers.
3721 *
3722 **/
3723void end_dequeued_request(struct request *rq, int uptodate)
3724{
3725 __end_request(rq, uptodate, rq_byte_size(rq), 0);
3726}
3727EXPORT_SYMBOL(end_dequeued_request);
3728
3729
3730/**
3731 * end_request - end I/O on the current segment of the request
3732 * @rq: the request being processed
3733 * @uptodate: error value or 0/1 uptodate flag
3734 *
3735 * Description:
3736 * Ends I/O on the current segment of a request. If that is the only
3737 * remaining segment, the request is also completed and freed.
3738 *
3739 * This is a remnant of how older block drivers handled IO completions.
3740 * Modern drivers typically end IO on the full request in one go, unless
3741 * they have a residual value to account for. For that case this function
3742 * isn't really useful, unless the residual just happens to be the
3743 * full current segment. In other words, don't use this function in new
3744 * code. Either use end_request_completely(), or the
3745 * end_that_request_chunk() (along with end_that_request_last()) for
3746 * partial completions.
3747 *
3748 **/
3749void end_request(struct request *req, int uptodate)
3750{
3751 __end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
3752}
3639EXPORT_SYMBOL(end_request); 3753EXPORT_SYMBOL(end_request);
3640 3754
3641static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 3755static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
@@ -3949,7 +4063,23 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
3949 return queue_var_show(max_hw_sectors_kb, (page)); 4063 return queue_var_show(max_hw_sectors_kb, (page));
3950} 4064}
3951 4065
4066static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
4067{
4068 return queue_var_show(q->max_phys_segments, page);
4069}
4070
4071static ssize_t queue_max_segments_store(struct request_queue *q,
4072 const char *page, size_t count)
4073{
4074 unsigned long segments;
4075 ssize_t ret = queue_var_store(&segments, page, count);
4076
4077 spin_lock_irq(q->queue_lock);
4078 q->max_phys_segments = segments;
4079 spin_unlock_irq(q->queue_lock);
3952 4080
4081 return ret;
4082}
3953static struct queue_sysfs_entry queue_requests_entry = { 4083static struct queue_sysfs_entry queue_requests_entry = {
3954 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 4084 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
3955 .show = queue_requests_show, 4085 .show = queue_requests_show,
@@ -3973,6 +4103,12 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
3973 .show = queue_max_hw_sectors_show, 4103 .show = queue_max_hw_sectors_show,
3974}; 4104};
3975 4105
4106static struct queue_sysfs_entry queue_max_segments_entry = {
4107 .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
4108 .show = queue_max_segments_show,
4109 .store = queue_max_segments_store,
4110};
4111
3976static struct queue_sysfs_entry queue_iosched_entry = { 4112static struct queue_sysfs_entry queue_iosched_entry = {
3977 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, 4113 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
3978 .show = elv_iosched_show, 4114 .show = elv_iosched_show,
@@ -3984,6 +4120,7 @@ static struct attribute *default_attrs[] = {
3984 &queue_ra_entry.attr, 4120 &queue_ra_entry.attr,
3985 &queue_max_hw_sectors_entry.attr, 4121 &queue_max_hw_sectors_entry.attr,
3986 &queue_max_sectors_entry.attr, 4122 &queue_max_sectors_entry.attr,
4123 &queue_max_segments_entry.attr,
3987 &queue_iosched_entry.attr, 4124 &queue_iosched_entry.attr,
3988 NULL, 4125 NULL,
3989}; 4126};
diff --git a/crypto/digest.c b/crypto/digest.c
index 1bf7414aeb9e..e56de6748b15 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -77,7 +77,7 @@ static int update2(struct hash_desc *desc,
77 77
78 if (!nbytes) 78 if (!nbytes)
79 break; 79 break;
80 sg = sg_next(sg); 80 sg = scatterwalk_sg_next(sg);
81 } 81 }
82 82
83 return 0; 83 return 0;
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 3052f6507f53..d6852c33cfb7 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -62,7 +62,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
62 walk->offset += PAGE_SIZE - 1; 62 walk->offset += PAGE_SIZE - 1;
63 walk->offset &= PAGE_MASK; 63 walk->offset &= PAGE_MASK;
64 if (walk->offset >= walk->sg->offset + walk->sg->length) 64 if (walk->offset >= walk->sg->offset + walk->sg->length)
65 scatterwalk_start(walk, sg_next(walk->sg)); 65 scatterwalk_start(walk, scatterwalk_sg_next(walk->sg));
66 } 66 }
67} 67}
68 68
diff --git a/crypto/scatterwalk.h b/crypto/scatterwalk.h
index 500a220ad908..9c73e37a42ce 100644
--- a/crypto/scatterwalk.h
+++ b/crypto/scatterwalk.h
@@ -20,7 +20,7 @@
20 20
21#include "internal.h" 21#include "internal.h"
22 22
23static inline struct scatterlist *sg_next(struct scatterlist *sg) 23static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
24{ 24{
25 return (++sg)->length ? sg : (void *)sg->page; 25 return (++sg)->length ? sg : (void *)sg->page;
26} 26}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 68699b3e7998..bbaa545ea999 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1410,7 +1410,7 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1410 */ 1410 */
1411unsigned ata_exec_internal_sg(struct ata_device *dev, 1411unsigned ata_exec_internal_sg(struct ata_device *dev,
1412 struct ata_taskfile *tf, const u8 *cdb, 1412 struct ata_taskfile *tf, const u8 *cdb,
1413 int dma_dir, struct scatterlist *sg, 1413 int dma_dir, struct scatterlist *sgl,
1414 unsigned int n_elem, unsigned long timeout) 1414 unsigned int n_elem, unsigned long timeout)
1415{ 1415{
1416 struct ata_link *link = dev->link; 1416 struct ata_link *link = dev->link;
@@ -1472,11 +1472,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1472 qc->dma_dir = dma_dir; 1472 qc->dma_dir = dma_dir;
1473 if (dma_dir != DMA_NONE) { 1473 if (dma_dir != DMA_NONE) {
1474 unsigned int i, buflen = 0; 1474 unsigned int i, buflen = 0;
1475 struct scatterlist *sg;
1475 1476
1476 for (i = 0; i < n_elem; i++) 1477 for_each_sg(sgl, sg, n_elem, i)
1477 buflen += sg[i].length; 1478 buflen += sg->length;
1478 1479
1479 ata_sg_init(qc, sg, n_elem); 1480 ata_sg_init(qc, sgl, n_elem);
1480 qc->nbytes = buflen; 1481 qc->nbytes = buflen;
1481 } 1482 }
1482 1483
@@ -4292,7 +4293,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
4292 if (qc->n_elem) 4293 if (qc->n_elem)
4293 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); 4294 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4294 /* restore last sg */ 4295 /* restore last sg */
4295 sg[qc->orig_n_elem - 1].length += qc->pad_len; 4296 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
4296 if (pad_buf) { 4297 if (pad_buf) {
4297 struct scatterlist *psg = &qc->pad_sgent; 4298 struct scatterlist *psg = &qc->pad_sgent;
4298 void *addr = kmap_atomic(psg->page, KM_IRQ0); 4299 void *addr = kmap_atomic(psg->page, KM_IRQ0);
@@ -4547,6 +4548,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4547 qc->orig_n_elem = 1; 4548 qc->orig_n_elem = 1;
4548 qc->buf_virt = buf; 4549 qc->buf_virt = buf;
4549 qc->nbytes = buflen; 4550 qc->nbytes = buflen;
4551 qc->cursg = qc->__sg;
4550 4552
4551 sg_init_one(&qc->sgent, buf, buflen); 4553 sg_init_one(&qc->sgent, buf, buflen);
4552} 4554}
@@ -4572,6 +4574,7 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4572 qc->__sg = sg; 4574 qc->__sg = sg;
4573 qc->n_elem = n_elem; 4575 qc->n_elem = n_elem;
4574 qc->orig_n_elem = n_elem; 4576 qc->orig_n_elem = n_elem;
4577 qc->cursg = qc->__sg;
4575} 4578}
4576 4579
4577/** 4580/**
@@ -4661,7 +4664,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
4661{ 4664{
4662 struct ata_port *ap = qc->ap; 4665 struct ata_port *ap = qc->ap;
4663 struct scatterlist *sg = qc->__sg; 4666 struct scatterlist *sg = qc->__sg;
4664 struct scatterlist *lsg = &sg[qc->n_elem - 1]; 4667 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4665 int n_elem, pre_n_elem, dir, trim_sg = 0; 4668 int n_elem, pre_n_elem, dir, trim_sg = 0;
4666 4669
4667 VPRINTK("ENTER, ata%u\n", ap->print_id); 4670 VPRINTK("ENTER, ata%u\n", ap->print_id);
@@ -4825,7 +4828,6 @@ void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4825static void ata_pio_sector(struct ata_queued_cmd *qc) 4828static void ata_pio_sector(struct ata_queued_cmd *qc)
4826{ 4829{
4827 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 4830 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4828 struct scatterlist *sg = qc->__sg;
4829 struct ata_port *ap = qc->ap; 4831 struct ata_port *ap = qc->ap;
4830 struct page *page; 4832 struct page *page;
4831 unsigned int offset; 4833 unsigned int offset;
@@ -4834,8 +4836,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
4834 if (qc->curbytes == qc->nbytes - qc->sect_size) 4836 if (qc->curbytes == qc->nbytes - qc->sect_size)
4835 ap->hsm_task_state = HSM_ST_LAST; 4837 ap->hsm_task_state = HSM_ST_LAST;
4836 4838
4837 page = sg[qc->cursg].page; 4839 page = qc->cursg->page;
4838 offset = sg[qc->cursg].offset + qc->cursg_ofs; 4840 offset = qc->cursg->offset + qc->cursg_ofs;
4839 4841
4840 /* get the current page and offset */ 4842 /* get the current page and offset */
4841 page = nth_page(page, (offset >> PAGE_SHIFT)); 4843 page = nth_page(page, (offset >> PAGE_SHIFT));
@@ -4863,8 +4865,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
4863 qc->curbytes += qc->sect_size; 4865 qc->curbytes += qc->sect_size;
4864 qc->cursg_ofs += qc->sect_size; 4866 qc->cursg_ofs += qc->sect_size;
4865 4867
4866 if (qc->cursg_ofs == (&sg[qc->cursg])->length) { 4868 if (qc->cursg_ofs == qc->cursg->length) {
4867 qc->cursg++; 4869 qc->cursg = sg_next(qc->cursg);
4868 qc->cursg_ofs = 0; 4870 qc->cursg_ofs = 0;
4869 } 4871 }
4870} 4872}
@@ -4950,16 +4952,18 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4950{ 4952{
4951 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 4953 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4952 struct scatterlist *sg = qc->__sg; 4954 struct scatterlist *sg = qc->__sg;
4955 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4953 struct ata_port *ap = qc->ap; 4956 struct ata_port *ap = qc->ap;
4954 struct page *page; 4957 struct page *page;
4955 unsigned char *buf; 4958 unsigned char *buf;
4956 unsigned int offset, count; 4959 unsigned int offset, count;
4960 int no_more_sg = 0;
4957 4961
4958 if (qc->curbytes + bytes >= qc->nbytes) 4962 if (qc->curbytes + bytes >= qc->nbytes)
4959 ap->hsm_task_state = HSM_ST_LAST; 4963 ap->hsm_task_state = HSM_ST_LAST;
4960 4964
4961next_sg: 4965next_sg:
4962 if (unlikely(qc->cursg >= qc->n_elem)) { 4966 if (unlikely(no_more_sg)) {
4963 /* 4967 /*
4964 * The end of qc->sg is reached and the device expects 4968 * The end of qc->sg is reached and the device expects
4965 * more data to transfer. In order not to overrun qc->sg 4969 * more data to transfer. In order not to overrun qc->sg
@@ -4982,7 +4986,7 @@ next_sg:
4982 return; 4986 return;
4983 } 4987 }
4984 4988
4985 sg = &qc->__sg[qc->cursg]; 4989 sg = qc->cursg;
4986 4990
4987 page = sg->page; 4991 page = sg->page;
4988 offset = sg->offset + qc->cursg_ofs; 4992 offset = sg->offset + qc->cursg_ofs;
@@ -5021,7 +5025,10 @@ next_sg:
5021 qc->cursg_ofs += count; 5025 qc->cursg_ofs += count;
5022 5026
5023 if (qc->cursg_ofs == sg->length) { 5027 if (qc->cursg_ofs == sg->length) {
5024 qc->cursg++; 5028 if (qc->cursg == lsg)
5029 no_more_sg = 1;
5030
5031 qc->cursg = sg_next(qc->cursg);
5025 qc->cursg_ofs = 0; 5032 qc->cursg_ofs = 0;
5026 } 5033 }
5027 5034
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 5237a491622b..9fbb39cd0f58 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -801,8 +801,6 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
801 801
802 ata_scsi_sdev_config(sdev); 802 ata_scsi_sdev_config(sdev);
803 803
804 blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
805
806 sdev->manage_start_stop = 1; 804 sdev->manage_start_stop = 1;
807 805
808 if (dev) 806 if (dev)
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 55c3237fb1bc..3fb7e8bc436d 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1191,7 +1191,6 @@ static inline void complete_buffers(struct bio *bio, int status)
1191{ 1191{
1192 while (bio) { 1192 while (bio) {
1193 struct bio *xbh = bio->bi_next; 1193 struct bio *xbh = bio->bi_next;
1194 int nr_sectors = bio_sectors(bio);
1195 1194
1196 bio->bi_next = NULL; 1195 bio->bi_next = NULL;
1197 bio_endio(bio, status ? 0 : -EIO); 1196 bio_endio(bio, status ? 0 : -EIO);
@@ -2570,6 +2569,7 @@ static void do_cciss_request(struct request_queue *q)
2570 (int)creq->nr_sectors); 2569 (int)creq->nr_sectors);
2571#endif /* CCISS_DEBUG */ 2570#endif /* CCISS_DEBUG */
2572 2571
2572 memset(tmp_sg, 0, sizeof(tmp_sg));
2573 seg = blk_rq_map_sg(q, creq, tmp_sg); 2573 seg = blk_rq_map_sg(q, creq, tmp_sg);
2574 2574
2575 /* get the DMA records for the setup */ 2575 /* get the DMA records for the setup */
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 3853c9a38d6a..568603d3043e 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -981,9 +981,8 @@ static void start_io(ctlr_info_t *h)
981static inline void complete_buffers(struct bio *bio, int ok) 981static inline void complete_buffers(struct bio *bio, int ok)
982{ 982{
983 struct bio *xbh; 983 struct bio *xbh;
984 while(bio) {
985 int nr_sectors = bio_sectors(bio);
986 984
985 while (bio) {
987 xbh = bio->bi_next; 986 xbh = bio->bi_next;
988 bio->bi_next = NULL; 987 bio->bi_next = NULL;
989 988
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 540bf3676985..a8130a4ad6d4 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1133,16 +1133,21 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1133 * Schedule reads for missing parts of the packet. 1133 * Schedule reads for missing parts of the packet.
1134 */ 1134 */
1135 for (f = 0; f < pkt->frames; f++) { 1135 for (f = 0; f < pkt->frames; f++) {
1136 struct bio_vec *vec;
1137
1136 int p, offset; 1138 int p, offset;
1137 if (written[f]) 1139 if (written[f])
1138 continue; 1140 continue;
1139 bio = pkt->r_bios[f]; 1141 bio = pkt->r_bios[f];
1142 vec = bio->bi_io_vec;
1140 bio_init(bio); 1143 bio_init(bio);
1141 bio->bi_max_vecs = 1; 1144 bio->bi_max_vecs = 1;
1142 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1145 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1143 bio->bi_bdev = pd->bdev; 1146 bio->bi_bdev = pd->bdev;
1144 bio->bi_end_io = pkt_end_io_read; 1147 bio->bi_end_io = pkt_end_io_read;
1145 bio->bi_private = pkt; 1148 bio->bi_private = pkt;
1149 bio->bi_io_vec = vec;
1150 bio->bi_destructor = pkt_bio_destructor;
1146 1151
1147 p = (f * CD_FRAMESIZE) / PAGE_SIZE; 1152 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1148 offset = (f * CD_FRAMESIZE) % PAGE_SIZE; 1153 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
@@ -1439,6 +1444,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1439 pkt->w_bio->bi_bdev = pd->bdev; 1444 pkt->w_bio->bi_bdev = pd->bdev;
1440 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1445 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1441 pkt->w_bio->bi_private = pkt; 1446 pkt->w_bio->bi_private = pkt;
1447 pkt->w_bio->bi_io_vec = bvec;
1448 pkt->w_bio->bi_destructor = pkt_bio_destructor;
1442 for (f = 0; f < pkt->frames; f++) 1449 for (f = 0; f < pkt->frames; f++)
1443 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) 1450 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
1444 BUG(); 1451 BUG();
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 06d0552cf49c..e354bfc070e1 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -414,26 +414,6 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
414 req->cmd_type = REQ_TYPE_FLUSH; 414 req->cmd_type = REQ_TYPE_FLUSH;
415} 415}
416 416
417static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk,
418 sector_t *sector)
419{
420 struct ps3_storage_device *dev = q->queuedata;
421 struct request *req;
422 int res;
423
424 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
425
426 req = blk_get_request(q, WRITE, __GFP_WAIT);
427 ps3disk_prepare_flush(q, req);
428 res = blk_execute_rq(q, gendisk, req, 0);
429 if (res)
430 dev_err(&dev->sbd.core, "%s:%u: flush request failed %d\n",
431 __func__, __LINE__, res);
432 blk_put_request(req);
433 return res;
434}
435
436
437static unsigned long ps3disk_mask; 417static unsigned long ps3disk_mask;
438 418
439static DEFINE_MUTEX(ps3disk_mask_mutex); 419static DEFINE_MUTEX(ps3disk_mask_mutex);
@@ -506,7 +486,6 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
506 blk_queue_dma_alignment(queue, dev->blk_size-1); 486 blk_queue_dma_alignment(queue, dev->blk_size-1);
507 blk_queue_hardsect_size(queue, dev->blk_size); 487 blk_queue_hardsect_size(queue, dev->blk_size);
508 488
509 blk_queue_issue_flush_fn(queue, ps3disk_issue_flush);
510 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, 489 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
511 ps3disk_prepare_flush); 490 ps3disk_prepare_flush);
512 491
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index 2b4d2a0ae5c2..c306c9f534ab 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -939,7 +939,8 @@ static int cris_ide_build_dmatable (ide_drive_t *drive)
939 /* group sequential buffers into one large buffer */ 939 /* group sequential buffers into one large buffer */
940 addr = page_to_phys(sg->page) + sg->offset; 940 addr = page_to_phys(sg->page) + sg->offset;
941 size = sg_dma_len(sg); 941 size = sg_dma_len(sg);
942 while (sg++, --i) { 942 while (--i) {
943 sg = sg_next(sg);
943 if ((addr + size) != page_to_phys(sg->page) + sg->offset) 944 if ((addr + size) != page_to_phys(sg->page) + sg->offset)
944 break; 945 break;
945 size += sg_dma_len(sg); 946 size += sg_dma_len(sg);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 4754769eda97..92177ca48b4d 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -716,32 +716,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
716 rq->buffer = rq->cmd; 716 rq->buffer = rq->cmd;
717} 717}
718 718
719static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk,
720 sector_t *error_sector)
721{
722 ide_drive_t *drive = q->queuedata;
723 struct request *rq;
724 int ret;
725
726 if (!drive->wcache)
727 return 0;
728
729 rq = blk_get_request(q, WRITE, __GFP_WAIT);
730
731 idedisk_prepare_flush(q, rq);
732
733 ret = blk_execute_rq(q, disk, rq, 0);
734
735 /*
736 * if we failed and caller wants error offset, get it
737 */
738 if (ret && error_sector)
739 *error_sector = ide_get_error_location(drive, rq->cmd);
740
741 blk_put_request(rq);
742 return ret;
743}
744
745/* 719/*
746 * This is tightly woven into the driver->do_special can not touch. 720 * This is tightly woven into the driver->do_special can not touch.
747 * DON'T do it again until a total personality rewrite is committed. 721 * DON'T do it again until a total personality rewrite is committed.
@@ -781,7 +755,6 @@ static void update_ordered(ide_drive_t *drive)
781 struct hd_driveid *id = drive->id; 755 struct hd_driveid *id = drive->id;
782 unsigned ordered = QUEUE_ORDERED_NONE; 756 unsigned ordered = QUEUE_ORDERED_NONE;
783 prepare_flush_fn *prep_fn = NULL; 757 prepare_flush_fn *prep_fn = NULL;
784 issue_flush_fn *issue_fn = NULL;
785 758
786 if (drive->wcache) { 759 if (drive->wcache) {
787 unsigned long long capacity; 760 unsigned long long capacity;
@@ -805,13 +778,11 @@ static void update_ordered(ide_drive_t *drive)
805 if (barrier) { 778 if (barrier) {
806 ordered = QUEUE_ORDERED_DRAIN_FLUSH; 779 ordered = QUEUE_ORDERED_DRAIN_FLUSH;
807 prep_fn = idedisk_prepare_flush; 780 prep_fn = idedisk_prepare_flush;
808 issue_fn = idedisk_issue_flush;
809 } 781 }
810 } else 782 } else
811 ordered = QUEUE_ORDERED_DRAIN; 783 ordered = QUEUE_ORDERED_DRAIN;
812 784
813 blk_queue_ordered(drive->queue, ordered, prep_fn); 785 blk_queue_ordered(drive->queue, ordered, prep_fn);
814 blk_queue_issue_flush_fn(drive->queue, issue_fn);
815} 786}
816 787
817static int write_cache(ide_drive_t *drive, int arg) 788static int write_cache(ide_drive_t *drive, int arg)
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index b453211ee0fc..a4cbbbaccde9 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -280,7 +280,7 @@ int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
280 } 280 }
281 } 281 }
282 282
283 sg++; 283 sg = sg_next(sg);
284 i--; 284 i--;
285 } 285 }
286 286
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 4cece930114c..04273d3c147c 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -322,41 +322,6 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
322 spin_unlock_irqrestore(&ide_lock, flags); 322 spin_unlock_irqrestore(&ide_lock, flags);
323} 323}
324 324
325/*
326 * FIXME: probably move this somewhere else, name is bad too :)
327 */
328u64 ide_get_error_location(ide_drive_t *drive, char *args)
329{
330 u32 high, low;
331 u8 hcyl, lcyl, sect;
332 u64 sector;
333
334 high = 0;
335 hcyl = args[5];
336 lcyl = args[4];
337 sect = args[3];
338
339 if (ide_id_has_flush_cache_ext(drive->id)) {
340 low = (hcyl << 16) | (lcyl << 8) | sect;
341 HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
342 high = ide_read_24(drive);
343 } else {
344 u8 cur = HWIF(drive)->INB(IDE_SELECT_REG);
345 if (cur & 0x40) {
346 high = cur & 0xf;
347 low = (hcyl << 16) | (lcyl << 8) | sect;
348 } else {
349 low = hcyl * drive->head * drive->sect;
350 low += lcyl * drive->sect;
351 low += sect - 1;
352 }
353 }
354
355 sector = ((u64) high << 24) | low;
356 return sector;
357}
358EXPORT_SYMBOL(ide_get_error_location);
359
360/** 325/**
361 * ide_end_drive_cmd - end an explicit drive command 326 * ide_end_drive_cmd - end an explicit drive command
362 * @drive: command 327 * @drive: command
@@ -881,7 +846,8 @@ void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
881 ide_hwif_t *hwif = drive->hwif; 846 ide_hwif_t *hwif = drive->hwif;
882 847
883 hwif->nsect = hwif->nleft = rq->nr_sectors; 848 hwif->nsect = hwif->nleft = rq->nr_sectors;
884 hwif->cursg = hwif->cursg_ofs = 0; 849 hwif->cursg_ofs = 0;
850 hwif->cursg = NULL;
885} 851}
886 852
887EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 853EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index d1011712601c..34b1fb65bc79 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1349,7 +1349,7 @@ static int hwif_init(ide_hwif_t *hwif)
1349 if (!hwif->sg_max_nents) 1349 if (!hwif->sg_max_nents)
1350 hwif->sg_max_nents = PRD_ENTRIES; 1350 hwif->sg_max_nents = PRD_ENTRIES;
1351 1351
1352 hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents, 1352 hwif->sg_table = kzalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
1353 GFP_KERNEL); 1353 GFP_KERNEL);
1354 if (!hwif->sg_table) { 1354 if (!hwif->sg_table) {
1355 printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name); 1355 printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index aa06dafb74ac..2a3c8d498343 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -45,6 +45,7 @@
45#include <linux/hdreg.h> 45#include <linux/hdreg.h>
46#include <linux/ide.h> 46#include <linux/ide.h>
47#include <linux/bitops.h> 47#include <linux/bitops.h>
48#include <linux/scatterlist.h>
48 49
49#include <asm/byteorder.h> 50#include <asm/byteorder.h>
50#include <asm/irq.h> 51#include <asm/irq.h>
@@ -263,6 +264,7 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
263{ 264{
264 ide_hwif_t *hwif = drive->hwif; 265 ide_hwif_t *hwif = drive->hwif;
265 struct scatterlist *sg = hwif->sg_table; 266 struct scatterlist *sg = hwif->sg_table;
267 struct scatterlist *cursg = hwif->cursg;
266 struct page *page; 268 struct page *page;
267#ifdef CONFIG_HIGHMEM 269#ifdef CONFIG_HIGHMEM
268 unsigned long flags; 270 unsigned long flags;
@@ -270,8 +272,14 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
270 unsigned int offset; 272 unsigned int offset;
271 u8 *buf; 273 u8 *buf;
272 274
273 page = sg[hwif->cursg].page; 275 cursg = hwif->cursg;
274 offset = sg[hwif->cursg].offset + hwif->cursg_ofs * SECTOR_SIZE; 276 if (!cursg) {
277 cursg = sg;
278 hwif->cursg = sg;
279 }
280
281 page = cursg->page;
282 offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
275 283
276 /* get the current page and offset */ 284 /* get the current page and offset */
277 page = nth_page(page, (offset >> PAGE_SHIFT)); 285 page = nth_page(page, (offset >> PAGE_SHIFT));
@@ -285,8 +293,8 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
285 hwif->nleft--; 293 hwif->nleft--;
286 hwif->cursg_ofs++; 294 hwif->cursg_ofs++;
287 295
288 if ((hwif->cursg_ofs * SECTOR_SIZE) == sg[hwif->cursg].length) { 296 if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
289 hwif->cursg++; 297 hwif->cursg = sg_next(hwif->cursg);
290 hwif->cursg_ofs = 0; 298 hwif->cursg_ofs = 0;
291 } 299 }
292 300
@@ -367,6 +375,8 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
367 375
368static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) 376static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
369{ 377{
378 HWIF(drive)->cursg = NULL;
379
370 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 380 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
371 ide_task_t *task = rq->special; 381 ide_task_t *task = rq->special;
372 382
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index aebde49365d1..892d08f61dc0 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -296,7 +296,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
296 cur_addr += tc; 296 cur_addr += tc;
297 cur_len -= tc; 297 cur_len -= tc;
298 } 298 }
299 sg++; 299 sg = sg_next(sg);
300 i--; 300 i--;
301 } 301 }
302 302
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 85ffaaa39b1b..c74fef6bbc91 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -29,6 +29,7 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/ioport.h> 30#include <linux/ioport.h>
31#include <linux/blkdev.h> 31#include <linux/blkdev.h>
32#include <linux/scatterlist.h>
32#include <linux/ioc4.h> 33#include <linux/ioc4.h>
33#include <asm/io.h> 34#include <asm/io.h>
34 35
@@ -537,7 +538,7 @@ sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
537 } 538 }
538 } 539 }
539 540
540 sg++; 541 sg = sg_next(sg);
541 i--; 542 i--;
542 } 543 }
543 544
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 7d8873839e21..9e86406bf44b 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1539,7 +1539,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
1539 cur_len -= tc; 1539 cur_len -= tc;
1540 ++table; 1540 ++table;
1541 } 1541 }
1542 sg++; 1542 sg = sg_next(sg);
1543 i--; 1543 i--;
1544 } 1544 }
1545 1545
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
index f87f003e3ef8..22709a4f8fc8 100644
--- a/drivers/infiniband/hw/ipath/ipath_dma.c
+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
@@ -30,6 +30,7 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32 32
33#include <linux/scatterlist.h>
33#include <rdma/ib_verbs.h> 34#include <rdma/ib_verbs.h>
34 35
35#include "ipath_verbs.h" 36#include "ipath_verbs.h"
@@ -96,17 +97,18 @@ static void ipath_dma_unmap_page(struct ib_device *dev,
96 BUG_ON(!valid_dma_direction(direction)); 97 BUG_ON(!valid_dma_direction(direction));
97} 98}
98 99
99static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, 100static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
100 enum dma_data_direction direction) 101 int nents, enum dma_data_direction direction)
101{ 102{
103 struct scatterlist *sg;
102 u64 addr; 104 u64 addr;
103 int i; 105 int i;
104 int ret = nents; 106 int ret = nents;
105 107
106 BUG_ON(!valid_dma_direction(direction)); 108 BUG_ON(!valid_dma_direction(direction));
107 109
108 for (i = 0; i < nents; i++) { 110 for_each_sg(sgl, sg, nents, i) {
109 addr = (u64) page_address(sg[i].page); 111 addr = (u64) page_address(sg->page);
110 /* TODO: handle highmem pages */ 112 /* TODO: handle highmem pages */
111 if (!addr) { 113 if (!addr) {
112 ret = 0; 114 ret = 0;
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index e05690e3592f..f3529b6f0a33 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -124,17 +124,19 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
124 124
125 if (cmd_dir == ISER_DIR_OUT) { 125 if (cmd_dir == ISER_DIR_OUT) {
126 /* copy the unaligned sg the buffer which is used for RDMA */ 126 /* copy the unaligned sg the buffer which is used for RDMA */
127 struct scatterlist *sg = (struct scatterlist *)data->buf; 127 struct scatterlist *sgl = (struct scatterlist *)data->buf;
128 struct scatterlist *sg;
128 int i; 129 int i;
129 char *p, *from; 130 char *p, *from;
130 131
131 for (p = mem, i = 0; i < data->size; i++) { 132 p = mem;
132 from = kmap_atomic(sg[i].page, KM_USER0); 133 for_each_sg(sgl, sg, data->size, i) {
134 from = kmap_atomic(sg->page, KM_USER0);
133 memcpy(p, 135 memcpy(p,
134 from + sg[i].offset, 136 from + sg->offset,
135 sg[i].length); 137 sg->length);
136 kunmap_atomic(from, KM_USER0); 138 kunmap_atomic(from, KM_USER0);
137 p += sg[i].length; 139 p += sg->length;
138 } 140 }
139 } 141 }
140 142
@@ -176,7 +178,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
176 178
177 if (cmd_dir == ISER_DIR_IN) { 179 if (cmd_dir == ISER_DIR_IN) {
178 char *mem; 180 char *mem;
179 struct scatterlist *sg; 181 struct scatterlist *sgl, *sg;
180 unsigned char *p, *to; 182 unsigned char *p, *to;
181 unsigned int sg_size; 183 unsigned int sg_size;
182 int i; 184 int i;
@@ -184,16 +186,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
184 /* copy back read RDMA to unaligned sg */ 186 /* copy back read RDMA to unaligned sg */
185 mem = mem_copy->copy_buf; 187 mem = mem_copy->copy_buf;
186 188
187 sg = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; 189 sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
188 sg_size = iser_ctask->data[ISER_DIR_IN].size; 190 sg_size = iser_ctask->data[ISER_DIR_IN].size;
189 191
190 for (p = mem, i = 0; i < sg_size; i++){ 192 p = mem;
191 to = kmap_atomic(sg[i].page, KM_SOFTIRQ0); 193 for_each_sg(sgl, sg, sg_size, i) {
192 memcpy(to + sg[i].offset, 194 to = kmap_atomic(sg->page, KM_SOFTIRQ0);
195 memcpy(to + sg->offset,
193 p, 196 p,
194 sg[i].length); 197 sg->length);
195 kunmap_atomic(to, KM_SOFTIRQ0); 198 kunmap_atomic(to, KM_SOFTIRQ0);
196 p += sg[i].length; 199 p += sg->length;
197 } 200 }
198 } 201 }
199 202
@@ -224,7 +227,8 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
224 struct iser_page_vec *page_vec, 227 struct iser_page_vec *page_vec,
225 struct ib_device *ibdev) 228 struct ib_device *ibdev)
226{ 229{
227 struct scatterlist *sg = (struct scatterlist *)data->buf; 230 struct scatterlist *sgl = (struct scatterlist *)data->buf;
231 struct scatterlist *sg;
228 u64 first_addr, last_addr, page; 232 u64 first_addr, last_addr, page;
229 int end_aligned; 233 int end_aligned;
230 unsigned int cur_page = 0; 234 unsigned int cur_page = 0;
@@ -232,24 +236,25 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
232 int i; 236 int i;
233 237
234 /* compute the offset of first element */ 238 /* compute the offset of first element */
235 page_vec->offset = (u64) sg[0].offset & ~MASK_4K; 239 page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
236 240
237 for (i = 0; i < data->dma_nents; i++) { 241 for_each_sg(sgl, sg, data->dma_nents, i) {
238 unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); 242 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
239 243
240 total_sz += dma_len; 244 total_sz += dma_len;
241 245
242 first_addr = ib_sg_dma_address(ibdev, &sg[i]); 246 first_addr = ib_sg_dma_address(ibdev, sg);
243 last_addr = first_addr + dma_len; 247 last_addr = first_addr + dma_len;
244 248
245 end_aligned = !(last_addr & ~MASK_4K); 249 end_aligned = !(last_addr & ~MASK_4K);
246 250
247 /* continue to collect page fragments till aligned or SG ends */ 251 /* continue to collect page fragments till aligned or SG ends */
248 while (!end_aligned && (i + 1 < data->dma_nents)) { 252 while (!end_aligned && (i + 1 < data->dma_nents)) {
253 sg = sg_next(sg);
249 i++; 254 i++;
250 dma_len = ib_sg_dma_len(ibdev, &sg[i]); 255 dma_len = ib_sg_dma_len(ibdev, sg);
251 total_sz += dma_len; 256 total_sz += dma_len;
252 last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; 257 last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
253 end_aligned = !(last_addr & ~MASK_4K); 258 end_aligned = !(last_addr & ~MASK_4K);
254 } 259 }
255 260
@@ -284,25 +289,26 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
284static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, 289static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
285 struct ib_device *ibdev) 290 struct ib_device *ibdev)
286{ 291{
287 struct scatterlist *sg; 292 struct scatterlist *sgl, *sg;
288 u64 end_addr, next_addr; 293 u64 end_addr, next_addr;
289 int i, cnt; 294 int i, cnt;
290 unsigned int ret_len = 0; 295 unsigned int ret_len = 0;
291 296
292 sg = (struct scatterlist *)data->buf; 297 sgl = (struct scatterlist *)data->buf;
293 298
294 for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) { 299 cnt = 0;
300 for_each_sg(sgl, sg, data->dma_nents, i) {
295 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " 301 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
296 "offset: %ld sz: %ld\n", i, 302 "offset: %ld sz: %ld\n", i,
297 (unsigned long)page_to_phys(sg[i].page), 303 (unsigned long)page_to_phys(sg->page),
298 (unsigned long)sg[i].offset, 304 (unsigned long)sg->offset,
299 (unsigned long)sg[i].length); */ 305 (unsigned long)sg->length); */
300 end_addr = ib_sg_dma_address(ibdev, &sg[i]) + 306 end_addr = ib_sg_dma_address(ibdev, sg) +
301 ib_sg_dma_len(ibdev, &sg[i]); 307 ib_sg_dma_len(ibdev, sg);
302 /* iser_dbg("Checking sg iobuf end address " 308 /* iser_dbg("Checking sg iobuf end address "
303 "0x%08lX\n", end_addr); */ 309 "0x%08lX\n", end_addr); */
304 if (i + 1 < data->dma_nents) { 310 if (i + 1 < data->dma_nents) {
305 next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); 311 next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
306 /* are i, i+1 fragments of the same page? */ 312 /* are i, i+1 fragments of the same page? */
307 if (end_addr == next_addr) 313 if (end_addr == next_addr)
308 continue; 314 continue;
@@ -322,15 +328,16 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
322static void iser_data_buf_dump(struct iser_data_buf *data, 328static void iser_data_buf_dump(struct iser_data_buf *data,
323 struct ib_device *ibdev) 329 struct ib_device *ibdev)
324{ 330{
325 struct scatterlist *sg = (struct scatterlist *)data->buf; 331 struct scatterlist *sgl = (struct scatterlist *)data->buf;
332 struct scatterlist *sg;
326 int i; 333 int i;
327 334
328 for (i = 0; i < data->dma_nents; i++) 335 for_each_sg(sgl, sg, data->dma_nents, i)
329 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 336 iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
330 "off:0x%x sz:0x%x dma_len:0x%x\n", 337 "off:0x%x sz:0x%x dma_len:0x%x\n",
331 i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), 338 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
332 sg[i].page, sg[i].offset, 339 sg->page, sg->offset,
333 sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); 340 sg->length, ib_sg_dma_len(ibdev, sg));
334} 341}
335 342
336static void iser_dump_page_vec(struct iser_page_vec *page_vec) 343static void iser_dump_page_vec(struct iser_page_vec *page_vec)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 8216a6f75be5..64fee90bb68b 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -441,33 +441,12 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
441 return clone; 441 return clone;
442} 442}
443 443
444static void crypt_free_buffer_pages(struct crypt_config *cc, 444static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
445 struct bio *clone, unsigned int bytes)
446{ 445{
447 unsigned int i, start, end; 446 unsigned int i;
448 struct bio_vec *bv; 447 struct bio_vec *bv;
449 448
450 /* 449 for (i = 0; i < clone->bi_vcnt; i++) {
451 * This is ugly, but Jens Axboe thinks that using bi_idx in the
452 * endio function is too dangerous at the moment, so I calculate the
453 * correct position using bi_vcnt and bi_size.
454 * The bv_offset and bv_len fields might already be modified but we
455 * know that we always allocated whole pages.
456 * A fix to the bi_idx issue in the kernel is in the works, so
457 * we will hopefully be able to revert to the cleaner solution soon.
458 */
459 i = clone->bi_vcnt - 1;
460 bv = bio_iovec_idx(clone, i);
461 end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - clone->bi_size;
462 start = end - bytes;
463
464 start >>= PAGE_SHIFT;
465 if (!clone->bi_size)
466 end = clone->bi_vcnt;
467 else
468 end >>= PAGE_SHIFT;
469
470 for (i = start; i < end; i++) {
471 bv = bio_iovec_idx(clone, i); 450 bv = bio_iovec_idx(clone, i);
472 BUG_ON(!bv->bv_page); 451 BUG_ON(!bv->bv_page);
473 mempool_free(bv->bv_page, cc->page_pool); 452 mempool_free(bv->bv_page, cc->page_pool);
@@ -519,7 +498,7 @@ static void crypt_endio(struct bio *clone, int error)
519 * free the processed pages 498 * free the processed pages
520 */ 499 */
521 if (!read_io) { 500 if (!read_io) {
522 crypt_free_buffer_pages(cc, clone, clone->bi_size); 501 crypt_free_buffer_pages(cc, clone);
523 goto out; 502 goto out;
524 } 503 }
525 504
@@ -608,7 +587,7 @@ static void process_write(struct dm_crypt_io *io)
608 ctx.idx_out = 0; 587 ctx.idx_out = 0;
609 588
610 if (unlikely(crypt_convert(cc, &ctx) < 0)) { 589 if (unlikely(crypt_convert(cc, &ctx) < 0)) {
611 crypt_free_buffer_pages(cc, clone, clone->bi_size); 590 crypt_free_buffer_pages(cc, clone);
612 bio_put(clone); 591 bio_put(clone);
613 dec_pending(io, -EIO); 592 dec_pending(io, -EIO);
614 return; 593 return;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2bcde5798b5a..fbe477bb2c68 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -999,33 +999,6 @@ void dm_table_unplug_all(struct dm_table *t)
999 } 999 }
1000} 1000}
1001 1001
1002int dm_table_flush_all(struct dm_table *t)
1003{
1004 struct list_head *d, *devices = dm_table_get_devices(t);
1005 int ret = 0;
1006 unsigned i;
1007
1008 for (i = 0; i < t->num_targets; i++)
1009 if (t->targets[i].type->flush)
1010 t->targets[i].type->flush(&t->targets[i]);
1011
1012 for (d = devices->next; d != devices; d = d->next) {
1013 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
1014 struct request_queue *q = bdev_get_queue(dd->bdev);
1015 int err;
1016
1017 if (!q->issue_flush_fn)
1018 err = -EOPNOTSUPP;
1019 else
1020 err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
1021
1022 if (!ret)
1023 ret = err;
1024 }
1025
1026 return ret;
1027}
1028
1029struct mapped_device *dm_table_get_md(struct dm_table *t) 1002struct mapped_device *dm_table_get_md(struct dm_table *t)
1030{ 1003{
1031 dm_get(t->md); 1004 dm_get(t->md);
@@ -1043,4 +1016,3 @@ EXPORT_SYMBOL(dm_table_get_md);
1043EXPORT_SYMBOL(dm_table_put); 1016EXPORT_SYMBOL(dm_table_put);
1044EXPORT_SYMBOL(dm_table_get); 1017EXPORT_SYMBOL(dm_table_get);
1045EXPORT_SYMBOL(dm_table_unplug_all); 1018EXPORT_SYMBOL(dm_table_unplug_all);
1046EXPORT_SYMBOL(dm_table_flush_all);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 167765c47747..d837d37f6209 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -840,21 +840,6 @@ static int dm_request(struct request_queue *q, struct bio *bio)
840 return 0; 840 return 0;
841} 841}
842 842
843static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
844 sector_t *error_sector)
845{
846 struct mapped_device *md = q->queuedata;
847 struct dm_table *map = dm_get_table(md);
848 int ret = -ENXIO;
849
850 if (map) {
851 ret = dm_table_flush_all(map);
852 dm_table_put(map);
853 }
854
855 return ret;
856}
857
858static void dm_unplug_all(struct request_queue *q) 843static void dm_unplug_all(struct request_queue *q)
859{ 844{
860 struct mapped_device *md = q->queuedata; 845 struct mapped_device *md = q->queuedata;
@@ -1003,7 +988,6 @@ static struct mapped_device *alloc_dev(int minor)
1003 blk_queue_make_request(md->queue, dm_request); 988 blk_queue_make_request(md->queue, dm_request);
1004 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 989 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1005 md->queue->unplug_fn = dm_unplug_all; 990 md->queue->unplug_fn = dm_unplug_all;
1006 md->queue->issue_flush_fn = dm_flush_all;
1007 991
1008 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); 992 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
1009 if (!md->io_pool) 993 if (!md->io_pool)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 462ee652a890..4b3faa45277e 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -111,7 +111,6 @@ void dm_table_postsuspend_targets(struct dm_table *t);
111int dm_table_resume_targets(struct dm_table *t); 111int dm_table_resume_targets(struct dm_table *t);
112int dm_table_any_congested(struct dm_table *t, int bdi_bits); 112int dm_table_any_congested(struct dm_table *t, int bdi_bits);
113void dm_table_unplug_all(struct dm_table *t); 113void dm_table_unplug_all(struct dm_table *t);
114int dm_table_flush_all(struct dm_table *t);
115 114
116/*----------------------------------------------------------------- 115/*-----------------------------------------------------------------
117 * A registry of target types. 116 * A registry of target types.
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 550148770bb2..56a11f6c127b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -92,25 +92,6 @@ static void linear_unplug(struct request_queue *q)
92 } 92 }
93} 93}
94 94
95static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
96 sector_t *error_sector)
97{
98 mddev_t *mddev = q->queuedata;
99 linear_conf_t *conf = mddev_to_conf(mddev);
100 int i, ret = 0;
101
102 for (i=0; i < mddev->raid_disks && ret == 0; i++) {
103 struct block_device *bdev = conf->disks[i].rdev->bdev;
104 struct request_queue *r_queue = bdev_get_queue(bdev);
105
106 if (!r_queue->issue_flush_fn)
107 ret = -EOPNOTSUPP;
108 else
109 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
110 }
111 return ret;
112}
113
114static int linear_congested(void *data, int bits) 95static int linear_congested(void *data, int bits)
115{ 96{
116 mddev_t *mddev = data; 97 mddev_t *mddev = data;
@@ -279,7 +260,6 @@ static int linear_run (mddev_t *mddev)
279 260
280 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); 261 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
281 mddev->queue->unplug_fn = linear_unplug; 262 mddev->queue->unplug_fn = linear_unplug;
282 mddev->queue->issue_flush_fn = linear_issue_flush;
283 mddev->queue->backing_dev_info.congested_fn = linear_congested; 263 mddev->queue->backing_dev_info.congested_fn = linear_congested;
284 mddev->queue->backing_dev_info.congested_data = mddev; 264 mddev->queue->backing_dev_info.congested_data = mddev;
285 return 0; 265 return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index acf1b81b47cb..0dc563d76b39 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3463,7 +3463,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
3463 mddev->pers->stop(mddev); 3463 mddev->pers->stop(mddev);
3464 mddev->queue->merge_bvec_fn = NULL; 3464 mddev->queue->merge_bvec_fn = NULL;
3465 mddev->queue->unplug_fn = NULL; 3465 mddev->queue->unplug_fn = NULL;
3466 mddev->queue->issue_flush_fn = NULL;
3467 mddev->queue->backing_dev_info.congested_fn = NULL; 3466 mddev->queue->backing_dev_info.congested_fn = NULL;
3468 if (mddev->pers->sync_request) 3467 if (mddev->pers->sync_request)
3469 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 3468 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index f2a63f394ad9..b35731cceac6 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -194,35 +194,6 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
194 seq_printf (seq, "]"); 194 seq_printf (seq, "]");
195} 195}
196 196
197static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
198 sector_t *error_sector)
199{
200 mddev_t *mddev = q->queuedata;
201 multipath_conf_t *conf = mddev_to_conf(mddev);
202 int i, ret = 0;
203
204 rcu_read_lock();
205 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
206 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
207 if (rdev && !test_bit(Faulty, &rdev->flags)) {
208 struct block_device *bdev = rdev->bdev;
209 struct request_queue *r_queue = bdev_get_queue(bdev);
210
211 if (!r_queue->issue_flush_fn)
212 ret = -EOPNOTSUPP;
213 else {
214 atomic_inc(&rdev->nr_pending);
215 rcu_read_unlock();
216 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
217 error_sector);
218 rdev_dec_pending(rdev, mddev);
219 rcu_read_lock();
220 }
221 }
222 }
223 rcu_read_unlock();
224 return ret;
225}
226static int multipath_congested(void *data, int bits) 197static int multipath_congested(void *data, int bits)
227{ 198{
228 mddev_t *mddev = data; 199 mddev_t *mddev = data;
@@ -527,7 +498,6 @@ static int multipath_run (mddev_t *mddev)
527 mddev->array_size = mddev->size; 498 mddev->array_size = mddev->size;
528 499
529 mddev->queue->unplug_fn = multipath_unplug; 500 mddev->queue->unplug_fn = multipath_unplug;
530 mddev->queue->issue_flush_fn = multipath_issue_flush;
531 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 501 mddev->queue->backing_dev_info.congested_fn = multipath_congested;
532 mddev->queue->backing_dev_info.congested_data = mddev; 502 mddev->queue->backing_dev_info.congested_data = mddev;
533 503
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ef0da2d84959..e79e1a538d44 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -40,26 +40,6 @@ static void raid0_unplug(struct request_queue *q)
40 } 40 }
41} 41}
42 42
43static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
44 sector_t *error_sector)
45{
46 mddev_t *mddev = q->queuedata;
47 raid0_conf_t *conf = mddev_to_conf(mddev);
48 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
49 int i, ret = 0;
50
51 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
52 struct block_device *bdev = devlist[i]->bdev;
53 struct request_queue *r_queue = bdev_get_queue(bdev);
54
55 if (!r_queue->issue_flush_fn)
56 ret = -EOPNOTSUPP;
57 else
58 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
59 }
60 return ret;
61}
62
63static int raid0_congested(void *data, int bits) 43static int raid0_congested(void *data, int bits)
64{ 44{
65 mddev_t *mddev = data; 45 mddev_t *mddev = data;
@@ -250,7 +230,6 @@ static int create_strip_zones (mddev_t *mddev)
250 230
251 mddev->queue->unplug_fn = raid0_unplug; 231 mddev->queue->unplug_fn = raid0_unplug;
252 232
253 mddev->queue->issue_flush_fn = raid0_issue_flush;
254 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 233 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
255 mddev->queue->backing_dev_info.congested_data = mddev; 234 mddev->queue->backing_dev_info.congested_data = mddev;
256 235
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6d03bea6fa58..0bcefad82413 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -567,36 +567,6 @@ static void raid1_unplug(struct request_queue *q)
567 md_wakeup_thread(mddev->thread); 567 md_wakeup_thread(mddev->thread);
568} 568}
569 569
570static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
571 sector_t *error_sector)
572{
573 mddev_t *mddev = q->queuedata;
574 conf_t *conf = mddev_to_conf(mddev);
575 int i, ret = 0;
576
577 rcu_read_lock();
578 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
579 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
580 if (rdev && !test_bit(Faulty, &rdev->flags)) {
581 struct block_device *bdev = rdev->bdev;
582 struct request_queue *r_queue = bdev_get_queue(bdev);
583
584 if (!r_queue->issue_flush_fn)
585 ret = -EOPNOTSUPP;
586 else {
587 atomic_inc(&rdev->nr_pending);
588 rcu_read_unlock();
589 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
590 error_sector);
591 rdev_dec_pending(rdev, mddev);
592 rcu_read_lock();
593 }
594 }
595 }
596 rcu_read_unlock();
597 return ret;
598}
599
600static int raid1_congested(void *data, int bits) 570static int raid1_congested(void *data, int bits)
601{ 571{
602 mddev_t *mddev = data; 572 mddev_t *mddev = data;
@@ -1997,7 +1967,6 @@ static int run(mddev_t *mddev)
1997 mddev->array_size = mddev->size; 1967 mddev->array_size = mddev->size;
1998 1968
1999 mddev->queue->unplug_fn = raid1_unplug; 1969 mddev->queue->unplug_fn = raid1_unplug;
2000 mddev->queue->issue_flush_fn = raid1_issue_flush;
2001 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 1970 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2002 mddev->queue->backing_dev_info.congested_data = mddev; 1971 mddev->queue->backing_dev_info.congested_data = mddev;
2003 1972
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 25a96c42bdb0..fc6607acb6e4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -611,36 +611,6 @@ static void raid10_unplug(struct request_queue *q)
611 md_wakeup_thread(mddev->thread); 611 md_wakeup_thread(mddev->thread);
612} 612}
613 613
614static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
615 sector_t *error_sector)
616{
617 mddev_t *mddev = q->queuedata;
618 conf_t *conf = mddev_to_conf(mddev);
619 int i, ret = 0;
620
621 rcu_read_lock();
622 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
623 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
624 if (rdev && !test_bit(Faulty, &rdev->flags)) {
625 struct block_device *bdev = rdev->bdev;
626 struct request_queue *r_queue = bdev_get_queue(bdev);
627
628 if (!r_queue->issue_flush_fn)
629 ret = -EOPNOTSUPP;
630 else {
631 atomic_inc(&rdev->nr_pending);
632 rcu_read_unlock();
633 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
634 error_sector);
635 rdev_dec_pending(rdev, mddev);
636 rcu_read_lock();
637 }
638 }
639 }
640 rcu_read_unlock();
641 return ret;
642}
643
644static int raid10_congested(void *data, int bits) 614static int raid10_congested(void *data, int bits)
645{ 615{
646 mddev_t *mddev = data; 616 mddev_t *mddev = data;
@@ -2118,7 +2088,6 @@ static int run(mddev_t *mddev)
2118 mddev->resync_max_sectors = size << conf->chunk_shift; 2088 mddev->resync_max_sectors = size << conf->chunk_shift;
2119 2089
2120 mddev->queue->unplug_fn = raid10_unplug; 2090 mddev->queue->unplug_fn = raid10_unplug;
2121 mddev->queue->issue_flush_fn = raid10_issue_flush;
2122 mddev->queue->backing_dev_info.congested_fn = raid10_congested; 2091 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2123 mddev->queue->backing_dev_info.congested_data = mddev; 2092 mddev->queue->backing_dev_info.congested_data = mddev;
2124 2093
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index caaca9e178bc..8ee181a01f52 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3204,36 +3204,6 @@ static void raid5_unplug_device(struct request_queue *q)
3204 unplug_slaves(mddev); 3204 unplug_slaves(mddev);
3205} 3205}
3206 3206
3207static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
3208 sector_t *error_sector)
3209{
3210 mddev_t *mddev = q->queuedata;
3211 raid5_conf_t *conf = mddev_to_conf(mddev);
3212 int i, ret = 0;
3213
3214 rcu_read_lock();
3215 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
3216 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3217 if (rdev && !test_bit(Faulty, &rdev->flags)) {
3218 struct block_device *bdev = rdev->bdev;
3219 struct request_queue *r_queue = bdev_get_queue(bdev);
3220
3221 if (!r_queue->issue_flush_fn)
3222 ret = -EOPNOTSUPP;
3223 else {
3224 atomic_inc(&rdev->nr_pending);
3225 rcu_read_unlock();
3226 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
3227 error_sector);
3228 rdev_dec_pending(rdev, mddev);
3229 rcu_read_lock();
3230 }
3231 }
3232 }
3233 rcu_read_unlock();
3234 return ret;
3235}
3236
3237static int raid5_congested(void *data, int bits) 3207static int raid5_congested(void *data, int bits)
3238{ 3208{
3239 mddev_t *mddev = data; 3209 mddev_t *mddev = data;
@@ -4263,7 +4233,6 @@ static int run(mddev_t *mddev)
4263 mdname(mddev)); 4233 mdname(mddev));
4264 4234
4265 mddev->queue->unplug_fn = raid5_unplug_device; 4235 mddev->queue->unplug_fn = raid5_unplug_device;
4266 mddev->queue->issue_flush_fn = raid5_issue_flush;
4267 mddev->queue->backing_dev_info.congested_data = mddev; 4236 mddev->queue->backing_dev_info.congested_data = mddev;
4268 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4237 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
4269 4238
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 822a3aa4fae5..626bb3c9af2b 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -293,7 +293,7 @@ nextSGEset:
293 for (ii=0; ii < (numSgeThisFrame-1); ii++) { 293 for (ii=0; ii < (numSgeThisFrame-1); ii++) {
294 thisxfer = sg_dma_len(sg); 294 thisxfer = sg_dma_len(sg);
295 if (thisxfer == 0) { 295 if (thisxfer == 0) {
296 sg ++; /* Get next SG element from the OS */ 296 sg = sg_next(sg); /* Get next SG element from the OS */
297 sg_done++; 297 sg_done++;
298 continue; 298 continue;
299 } 299 }
@@ -301,7 +301,7 @@ nextSGEset:
301 v2 = sg_dma_address(sg); 301 v2 = sg_dma_address(sg);
302 mptscsih_add_sge(psge, sgflags | thisxfer, v2); 302 mptscsih_add_sge(psge, sgflags | thisxfer, v2);
303 303
304 sg++; /* Get next SG element from the OS */ 304 sg = sg_next(sg); /* Get next SG element from the OS */
305 psge += (sizeof(u32) + sizeof(dma_addr_t)); 305 psge += (sizeof(u32) + sizeof(dma_addr_t));
306 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); 306 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
307 sg_done++; 307 sg_done++;
@@ -322,7 +322,7 @@ nextSGEset:
322 v2 = sg_dma_address(sg); 322 v2 = sg_dma_address(sg);
323 mptscsih_add_sge(psge, sgflags | thisxfer, v2); 323 mptscsih_add_sge(psge, sgflags | thisxfer, v2);
324 /* 324 /*
325 sg++; 325 sg = sg_next(sg);
326 psge += (sizeof(u32) + sizeof(dma_addr_t)); 326 psge += (sizeof(u32) + sizeof(dma_addr_t));
327 */ 327 */
328 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); 328 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 50b2c7334410..d602ba6d5417 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -149,29 +149,6 @@ static int i2o_block_device_flush(struct i2o_device *dev)
149}; 149};
150 150
151/** 151/**
152 * i2o_block_issue_flush - device-flush interface for block-layer
153 * @queue: the request queue of the device which should be flushed
154 * @disk: gendisk
155 * @error_sector: error offset
156 *
157 * Helper function to provide flush functionality to block-layer.
158 *
159 * Returns 0 on success or negative error code on failure.
160 */
161
162static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk,
163 sector_t * error_sector)
164{
165 struct i2o_block_device *i2o_blk_dev = queue->queuedata;
166 int rc = -ENODEV;
167
168 if (likely(i2o_blk_dev))
169 rc = i2o_block_device_flush(i2o_blk_dev->i2o_dev);
170
171 return rc;
172}
173
174/**
175 * i2o_block_device_mount - Mount (load) the media of device dev 152 * i2o_block_device_mount - Mount (load) the media of device dev
176 * @dev: I2O device which should receive the mount request 153 * @dev: I2O device which should receive the mount request
177 * @media_id: Media Identifier 154 * @media_id: Media Identifier
@@ -1009,7 +986,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
1009 } 986 }
1010 987
1011 blk_queue_prep_rq(queue, i2o_block_prep_req_fn); 988 blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
1012 blk_queue_issue_flush_fn(queue, i2o_block_issue_flush);
1013 989
1014 gd->major = I2O_MAJOR; 990 gd->major = I2O_MAJOR;
1015 gd->queue = queue; 991 gd->queue = queue;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index b0abc7d92805..a5d0354bbbda 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -153,14 +153,14 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
153 blk_queue_max_hw_segments(mq->queue, bouncesz / 512); 153 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
154 blk_queue_max_segment_size(mq->queue, bouncesz); 154 blk_queue_max_segment_size(mq->queue, bouncesz);
155 155
156 mq->sg = kmalloc(sizeof(struct scatterlist), 156 mq->sg = kzalloc(sizeof(struct scatterlist),
157 GFP_KERNEL); 157 GFP_KERNEL);
158 if (!mq->sg) { 158 if (!mq->sg) {
159 ret = -ENOMEM; 159 ret = -ENOMEM;
160 goto cleanup_queue; 160 goto cleanup_queue;
161 } 161 }
162 162
163 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * 163 mq->bounce_sg = kzalloc(sizeof(struct scatterlist) *
164 bouncesz / 512, GFP_KERNEL); 164 bouncesz / 512, GFP_KERNEL);
165 if (!mq->bounce_sg) { 165 if (!mq->bounce_sg) {
166 ret = -ENOMEM; 166 ret = -ENOMEM;
@@ -177,7 +177,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
177 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 177 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
178 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 178 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
179 179
180 mq->sg = kmalloc(sizeof(struct scatterlist) * 180 mq->sg = kzalloc(sizeof(struct scatterlist) *
181 host->max_phys_segs, GFP_KERNEL); 181 host->max_phys_segs, GFP_KERNEL);
182 if (!mq->sg) { 182 if (!mq->sg) {
183 ret = -ENOMEM; 183 ret = -ENOMEM;
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 16e5563e0c65..57cac7008e0b 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/mempool.h> 35#include <linux/mempool.h>
36#include <linux/syscalls.h> 36#include <linux/syscalls.h>
37#include <linux/scatterlist.h>
37#include <linux/ioctl.h> 38#include <linux/ioctl.h>
38#include <scsi/scsi.h> 39#include <scsi/scsi.h>
39#include <scsi/scsi_tcq.h> 40#include <scsi/scsi_tcq.h>
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 3f105fdcf239..51d92b196ee7 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -590,7 +590,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
590 */ 590 */
591int 591int
592zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 592zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
593 struct scatterlist *sg, int sg_count, int max_sbals) 593 struct scatterlist *sgl, int sg_count, int max_sbals)
594{ 594{
595 int sg_index; 595 int sg_index;
596 struct scatterlist *sg_segment; 596 struct scatterlist *sg_segment;
@@ -606,9 +606,7 @@ zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
606 sbale->flags |= sbtype; 606 sbale->flags |= sbtype;
607 607
608 /* process all segements of scatter-gather list */ 608 /* process all segements of scatter-gather list */
609 for (sg_index = 0, sg_segment = sg, bytes = 0; 609 for_each_sg(sgl, sg_segment, sg_count, sg_index) {
610 sg_index < sg_count;
611 sg_index++, sg_segment++) {
612 retval = zfcp_qdio_sbals_from_segment( 610 retval = zfcp_qdio_sbals_from_segment(
613 fsf_req, 611 fsf_req,
614 sbtype, 612 sbtype,
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index efd9d8d3a890..fb14014ee16e 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1990,6 +1990,7 @@ static struct scsi_host_template driver_template = {
1990 .max_sectors = TW_MAX_SECTORS, 1990 .max_sectors = TW_MAX_SECTORS,
1991 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 1991 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1992 .use_clustering = ENABLE_CLUSTERING, 1992 .use_clustering = ENABLE_CLUSTERING,
1993 .use_sg_chaining = ENABLE_SG_CHAINING,
1993 .shost_attrs = twa_host_attrs, 1994 .shost_attrs = twa_host_attrs,
1994 .emulated = 1 1995 .emulated = 1
1995}; 1996};
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index c7995fc216e8..a64153b96034 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2261,6 +2261,7 @@ static struct scsi_host_template driver_template = {
2261 .max_sectors = TW_MAX_SECTORS, 2261 .max_sectors = TW_MAX_SECTORS,
2262 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 2262 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2263 .use_clustering = ENABLE_CLUSTERING, 2263 .use_clustering = ENABLE_CLUSTERING,
2264 .use_sg_chaining = ENABLE_SG_CHAINING,
2264 .shost_attrs = tw_host_attrs, 2265 .shost_attrs = tw_host_attrs,
2265 .emulated = 1 2266 .emulated = 1
2266}; 2267};
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 9b206176f717..49e1ffa4b2ff 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -3575,6 +3575,7 @@ static struct scsi_host_template Bus_Logic_template = {
3575 .unchecked_isa_dma = 1, 3575 .unchecked_isa_dma = 1,
3576 .max_sectors = 128, 3576 .max_sectors = 128,
3577 .use_clustering = ENABLE_CLUSTERING, 3577 .use_clustering = ENABLE_CLUSTERING,
3578 .use_sg_chaining = ENABLE_SG_CHAINING,
3578}; 3579};
3579 3580
3580/* 3581/*
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index eda8c48f6be7..3168a1794849 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -1066,7 +1066,8 @@ static struct scsi_host_template driver_template =
1066 .sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/, 1066 .sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/,
1067 .cmd_per_lun = 1 /* commands per lun */, 1067 .cmd_per_lun = 1 /* commands per lun */,
1068 .unchecked_isa_dma = 1 /* unchecked_isa_dma */, 1068 .unchecked_isa_dma = 1 /* unchecked_isa_dma */,
1069 .use_clustering = ENABLE_CLUSTERING 1069 .use_clustering = ENABLE_CLUSTERING,
1070 .use_sg_chaining = ENABLE_SG_CHAINING,
1070}; 1071};
1071 1072
1072#include "scsi_module.c" 1073#include "scsi_module.c"
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index f608d4a1d6da..d3a6d15fb77a 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1071,6 +1071,7 @@ static struct scsi_host_template inia100_template = {
1071 .sg_tablesize = SG_ALL, 1071 .sg_tablesize = SG_ALL,
1072 .cmd_per_lun = 1, 1072 .cmd_per_lun = 1,
1073 .use_clustering = ENABLE_CLUSTERING, 1073 .use_clustering = ENABLE_CLUSTERING,
1074 .use_sg_chaining = ENABLE_SG_CHAINING,
1074}; 1075};
1075 1076
1076static int __devinit inia100_probe_one(struct pci_dev *pdev, 1077static int __devinit inia100_probe_one(struct pci_dev *pdev,
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a7f42a17b5c7..038980be763d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -944,6 +944,7 @@ static struct scsi_host_template aac_driver_template = {
944 .cmd_per_lun = AAC_NUM_IO_FIB, 944 .cmd_per_lun = AAC_NUM_IO_FIB,
945#endif 945#endif
946 .use_clustering = ENABLE_CLUSTERING, 946 .use_clustering = ENABLE_CLUSTERING,
947 .use_sg_chaining = ENABLE_SG_CHAINING,
947 .emulated = 1, 948 .emulated = 1,
948}; 949};
949 950
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index cbbfbc9f3e0f..961a1882cb7e 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -61,15 +61,15 @@ static void BAD_DMA(void *address, unsigned int length)
61} 61}
62 62
63static void BAD_SG_DMA(Scsi_Cmnd * SCpnt, 63static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
64 struct scatterlist *sgpnt, 64 struct scatterlist *sgp,
65 int nseg, 65 int nseg,
66 int badseg) 66 int badseg)
67{ 67{
68 printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n", 68 printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n",
69 badseg, nseg, 69 badseg, nseg,
70 page_address(sgpnt[badseg].page) + sgpnt[badseg].offset, 70 page_address(sgp->page) + sgp->offset,
71 (unsigned long long)SCSI_SG_PA(&sgpnt[badseg]), 71 (unsigned long long)SCSI_SG_PA(sgp),
72 sgpnt[badseg].length); 72 sgp->length);
73 73
74 /* 74 /*
75 * Not safe to continue. 75 * Not safe to continue.
@@ -691,7 +691,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
691 memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen); 691 memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
692 692
693 if (SCpnt->use_sg) { 693 if (SCpnt->use_sg) {
694 struct scatterlist *sgpnt; 694 struct scatterlist *sg;
695 struct chain *cptr; 695 struct chain *cptr;
696#ifdef DEBUG 696#ifdef DEBUG
697 unsigned char *ptr; 697 unsigned char *ptr;
@@ -699,23 +699,21 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
699 int i; 699 int i;
700 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ 700 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
701 SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA); 701 SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA);
702 sgpnt = (struct scatterlist *) SCpnt->request_buffer;
703 cptr = (struct chain *) SCpnt->host_scribble; 702 cptr = (struct chain *) SCpnt->host_scribble;
704 if (cptr == NULL) { 703 if (cptr == NULL) {
705 /* free the claimed mailbox slot */ 704 /* free the claimed mailbox slot */
706 HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL; 705 HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL;
707 return SCSI_MLQUEUE_HOST_BUSY; 706 return SCSI_MLQUEUE_HOST_BUSY;
708 } 707 }
709 for (i = 0; i < SCpnt->use_sg; i++) { 708 scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
710 if (sgpnt[i].length == 0 || SCpnt->use_sg > 16 || 709 if (sg->length == 0 || SCpnt->use_sg > 16 ||
711 (((int) sgpnt[i].offset) & 1) || (sgpnt[i].length & 1)) { 710 (((int) sg->offset) & 1) || (sg->length & 1)) {
712 unsigned char *ptr; 711 unsigned char *ptr;
713 printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i); 712 printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
714 for (i = 0; i < SCpnt->use_sg; i++) { 713 scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
715 printk(KERN_CRIT "%d: %p %d\n", i, 714 printk(KERN_CRIT "%d: %p %d\n", i,
716 (page_address(sgpnt[i].page) + 715 (page_address(sg->page) +
717 sgpnt[i].offset), 716 sg->offset), sg->length);
718 sgpnt[i].length);
719 }; 717 };
720 printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr); 718 printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
721 ptr = (unsigned char *) &cptr[i]; 719 ptr = (unsigned char *) &cptr[i];
@@ -723,10 +721,10 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
723 printk("%02x ", ptr[i]); 721 printk("%02x ", ptr[i]);
724 panic("Foooooooood fight!"); 722 panic("Foooooooood fight!");
725 }; 723 };
726 any2scsi(cptr[i].dataptr, SCSI_SG_PA(&sgpnt[i])); 724 any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg));
727 if (SCSI_SG_PA(&sgpnt[i]) + sgpnt[i].length - 1 > ISA_DMA_THRESHOLD) 725 if (SCSI_SG_PA(sg) + sg->length - 1 > ISA_DMA_THRESHOLD)
728 BAD_SG_DMA(SCpnt, sgpnt, SCpnt->use_sg, i); 726 BAD_SG_DMA(SCpnt, sg, SCpnt->use_sg, i);
729 any2scsi(cptr[i].datalen, sgpnt[i].length); 727 any2scsi(cptr[i].datalen, sg->length);
730 }; 728 };
731 any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain)); 729 any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
732 any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr)); 730 any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr));
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index e4a4f3a965d9..f6722fd46008 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -563,6 +563,7 @@ static struct scsi_host_template aha1740_template = {
563 .sg_tablesize = AHA1740_SCATTER, 563 .sg_tablesize = AHA1740_SCATTER,
564 .cmd_per_lun = AHA1740_CMDLUN, 564 .cmd_per_lun = AHA1740_CMDLUN,
565 .use_clustering = ENABLE_CLUSTERING, 565 .use_clustering = ENABLE_CLUSTERING,
566 .use_sg_chaining = ENABLE_SG_CHAINING,
566 .eh_abort_handler = aha1740_eh_abort_handler, 567 .eh_abort_handler = aha1740_eh_abort_handler,
567}; 568};
568 569
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index a055a96e3ad3..42c0f14a262c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -766,6 +766,7 @@ struct scsi_host_template aic79xx_driver_template = {
766 .max_sectors = 8192, 766 .max_sectors = 8192,
767 .cmd_per_lun = 2, 767 .cmd_per_lun = 2,
768 .use_clustering = ENABLE_CLUSTERING, 768 .use_clustering = ENABLE_CLUSTERING,
769 .use_sg_chaining = ENABLE_SG_CHAINING,
769 .slave_alloc = ahd_linux_slave_alloc, 770 .slave_alloc = ahd_linux_slave_alloc,
770 .slave_configure = ahd_linux_slave_configure, 771 .slave_configure = ahd_linux_slave_configure,
771 .target_alloc = ahd_linux_target_alloc, 772 .target_alloc = ahd_linux_target_alloc,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 2e9c38f2e8a6..7770befbf50c 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -747,6 +747,7 @@ struct scsi_host_template aic7xxx_driver_template = {
747 .max_sectors = 8192, 747 .max_sectors = 8192,
748 .cmd_per_lun = 2, 748 .cmd_per_lun = 2,
749 .use_clustering = ENABLE_CLUSTERING, 749 .use_clustering = ENABLE_CLUSTERING,
750 .use_sg_chaining = ENABLE_SG_CHAINING,
750 .slave_alloc = ahc_linux_slave_alloc, 751 .slave_alloc = ahc_linux_slave_alloc,
751 .slave_configure = ahc_linux_slave_configure, 752 .slave_configure = ahc_linux_slave_configure,
752 .target_alloc = ahc_linux_target_alloc, 753 .target_alloc = ahc_linux_target_alloc,
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 1a71b0236c97..4025608d6964 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -11142,6 +11142,7 @@ static struct scsi_host_template driver_template = {
11142 .max_sectors = 2048, 11142 .max_sectors = 2048,
11143 .cmd_per_lun = 3, 11143 .cmd_per_lun = 3,
11144 .use_clustering = ENABLE_CLUSTERING, 11144 .use_clustering = ENABLE_CLUSTERING,
11145 .use_sg_chaining = ENABLE_SG_CHAINING,
11145}; 11146};
11146 11147
11147#include "scsi_module.c" 11148#include "scsi_module.c"
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index f2b23e01401a..ee0a98bffcd4 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -94,7 +94,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
94 res = -ENOMEM; 94 res = -ENOMEM;
95 goto err_unmap; 95 goto err_unmap;
96 } 96 }
97 for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) { 97 for_each_sg(task->scatter, sc, num_sg, i) {
98 struct sg_el *sg = 98 struct sg_el *sg =
99 &((struct sg_el *)ascb->sg_arr->vaddr)[i]; 99 &((struct sg_el *)ascb->sg_arr->vaddr)[i];
100 sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); 100 sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
@@ -103,7 +103,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
103 sg->flags |= ASD_SG_EL_LIST_EOL; 103 sg->flags |= ASD_SG_EL_LIST_EOL;
104 } 104 }
105 105
106 for (sc = task->scatter, i = 0; i < 2; i++, sc++) { 106 for_each_sg(task->scatter, sc, 2, i) {
107 sg_arr[i].bus_addr = 107 sg_arr[i].bus_addr =
108 cpu_to_le64((u64)sg_dma_address(sc)); 108 cpu_to_le64((u64)sg_dma_address(sc));
109 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); 109 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
@@ -115,7 +115,7 @@ static inline int asd_map_scatterlist(struct sas_task *task,
115 sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle); 115 sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
116 } else { 116 } else {
117 int i; 117 int i;
118 for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) { 118 for_each_sg(task->scatter, sc, num_sg, i) {
119 sg_arr[i].bus_addr = 119 sg_arr[i].bus_addr =
120 cpu_to_le64((u64)sg_dma_address(sc)); 120 cpu_to_le64((u64)sg_dma_address(sc));
121 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); 121 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index cfcf40159eab..f81777586b8f 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -122,6 +122,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
122 .max_sectors = ARCMSR_MAX_XFER_SECTORS, 122 .max_sectors = ARCMSR_MAX_XFER_SECTORS,
123 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, 123 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
124 .use_clustering = ENABLE_CLUSTERING, 124 .use_clustering = ENABLE_CLUSTERING,
125 .use_sg_chaining = ENABLE_SG_CHAINING,
125 .shost_attrs = arcmsr_host_attrs, 126 .shost_attrs = arcmsr_host_attrs,
126}; 127};
127#ifdef CONFIG_SCSI_ARCMSR_AER 128#ifdef CONFIG_SCSI_ARCMSR_AER
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 1591824cf4b3..fd42d4789202 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4765,6 +4765,7 @@ static struct scsi_host_template dc395x_driver_template = {
4765 .eh_bus_reset_handler = dc395x_eh_bus_reset, 4765 .eh_bus_reset_handler = dc395x_eh_bus_reset,
4766 .unchecked_isa_dma = 0, 4766 .unchecked_isa_dma = 0,
4767 .use_clustering = DISABLE_CLUSTERING, 4767 .use_clustering = DISABLE_CLUSTERING,
4768 .use_sg_chaining = ENABLE_SG_CHAINING,
4768}; 4769};
4769 4770
4770 4771
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index bea9d659af15..8258506ba7d7 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -3295,6 +3295,7 @@ static struct scsi_host_template adpt_template = {
3295 .this_id = 7, 3295 .this_id = 7,
3296 .cmd_per_lun = 1, 3296 .cmd_per_lun = 1,
3297 .use_clustering = ENABLE_CLUSTERING, 3297 .use_clustering = ENABLE_CLUSTERING,
3298 .use_sg_chaining = ENABLE_SG_CHAINING,
3298}; 3299};
3299 3300
3300static s32 adpt_scsi_register(adpt_hba* pHba) 3301static s32 adpt_scsi_register(adpt_hba* pHba)
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index ec2233114bc9..7ead5210de96 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -523,7 +523,8 @@ static struct scsi_host_template driver_template = {
523 .slave_configure = eata2x_slave_configure, 523 .slave_configure = eata2x_slave_configure,
524 .this_id = 7, 524 .this_id = 7,
525 .unchecked_isa_dma = 1, 525 .unchecked_isa_dma = 1,
526 .use_clustering = ENABLE_CLUSTERING 526 .use_clustering = ENABLE_CLUSTERING,
527 .use_sg_chaining = ENABLE_SG_CHAINING,
527}; 528};
528 529
529#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD) 530#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index adc9559cb6f4..112ab6abe62b 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -343,6 +343,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
343 shost->use_clustering = sht->use_clustering; 343 shost->use_clustering = sht->use_clustering;
344 shost->ordered_tag = sht->ordered_tag; 344 shost->ordered_tag = sht->ordered_tag;
345 shost->active_mode = sht->supported_mode; 345 shost->active_mode = sht->supported_mode;
346 shost->use_sg_chaining = sht->use_sg_chaining;
346 347
347 if (sht->max_host_blocked) 348 if (sht->max_host_blocked)
348 shost->max_host_blocked = sht->max_host_blocked; 349 shost->max_host_blocked = sht->max_host_blocked;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 8b384fa7f048..8515054cdf70 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -655,6 +655,7 @@ static struct scsi_host_template driver_template = {
655 .unchecked_isa_dma = 0, 655 .unchecked_isa_dma = 0,
656 .emulated = 0, 656 .emulated = 0,
657 .use_clustering = ENABLE_CLUSTERING, 657 .use_clustering = ENABLE_CLUSTERING,
658 .use_sg_chaining = ENABLE_SG_CHAINING,
658 .proc_name = driver_name, 659 .proc_name = driver_name,
659 .shost_attrs = hptiop_attrs, 660 .shost_attrs = hptiop_attrs,
660 .this_id = -1, 661 .this_id = -1,
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 1a924e9b0271..714e6273a70d 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -1501,6 +1501,7 @@ static struct scsi_host_template ibmmca_driver_template = {
1501 .sg_tablesize = 16, 1501 .sg_tablesize = 16,
1502 .cmd_per_lun = 1, 1502 .cmd_per_lun = 1,
1503 .use_clustering = ENABLE_CLUSTERING, 1503 .use_clustering = ENABLE_CLUSTERING,
1504 .use_sg_chaining = ENABLE_SG_CHAINING,
1504}; 1505};
1505 1506
1506static int ibmmca_probe(struct device *dev) 1507static int ibmmca_probe(struct device *dev)
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index cda0cc3d182f..22d91ee173c5 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1548,6 +1548,7 @@ static struct scsi_host_template driver_template = {
1548 .this_id = -1, 1548 .this_id = -1,
1549 .sg_tablesize = SG_ALL, 1549 .sg_tablesize = SG_ALL,
1550 .use_clustering = ENABLE_CLUSTERING, 1550 .use_clustering = ENABLE_CLUSTERING,
1551 .use_sg_chaining = ENABLE_SG_CHAINING,
1551 .shost_attrs = ibmvscsi_attrs, 1552 .shost_attrs = ibmvscsi_attrs,
1552}; 1553};
1553 1554
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index d81bb076a15a..d297f64cd432 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -70,6 +70,7 @@ typedef struct idescsi_pc_s {
70 u8 *buffer; /* Data buffer */ 70 u8 *buffer; /* Data buffer */
71 u8 *current_position; /* Pointer into the above buffer */ 71 u8 *current_position; /* Pointer into the above buffer */
72 struct scatterlist *sg; /* Scatter gather table */ 72 struct scatterlist *sg; /* Scatter gather table */
73 struct scatterlist *last_sg; /* Last sg element */
73 int b_count; /* Bytes transferred from current entry */ 74 int b_count; /* Bytes transferred from current entry */
74 struct scsi_cmnd *scsi_cmd; /* SCSI command */ 75 struct scsi_cmnd *scsi_cmd; /* SCSI command */
75 void (*done)(struct scsi_cmnd *); /* Scsi completion routine */ 76 void (*done)(struct scsi_cmnd *); /* Scsi completion routine */
@@ -173,12 +174,6 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
173 char *buf; 174 char *buf;
174 175
175 while (bcount) { 176 while (bcount) {
176 if (pc->sg - scsi_sglist(pc->scsi_cmd) >
177 scsi_sg_count(pc->scsi_cmd)) {
178 printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
179 idescsi_discard_data (drive, bcount);
180 return;
181 }
182 count = min(pc->sg->length - pc->b_count, bcount); 177 count = min(pc->sg->length - pc->b_count, bcount);
183 if (PageHighMem(pc->sg->page)) { 178 if (PageHighMem(pc->sg->page)) {
184 unsigned long flags; 179 unsigned long flags;
@@ -197,10 +192,17 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
197 } 192 }
198 bcount -= count; pc->b_count += count; 193 bcount -= count; pc->b_count += count;
199 if (pc->b_count == pc->sg->length) { 194 if (pc->b_count == pc->sg->length) {
200 pc->sg++; 195 if (pc->sg == pc->last_sg)
196 break;
197 pc->sg = sg_next(pc->sg);
201 pc->b_count = 0; 198 pc->b_count = 0;
202 } 199 }
203 } 200 }
201
202 if (bcount) {
203 printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
204 idescsi_discard_data (drive, bcount);
205 }
204} 206}
205 207
206static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigned int bcount) 208static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigned int bcount)
@@ -209,12 +211,6 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
209 char *buf; 211 char *buf;
210 212
211 while (bcount) { 213 while (bcount) {
212 if (pc->sg - scsi_sglist(pc->scsi_cmd) >
213 scsi_sg_count(pc->scsi_cmd)) {
214 printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
215 idescsi_output_zeros (drive, bcount);
216 return;
217 }
218 count = min(pc->sg->length - pc->b_count, bcount); 214 count = min(pc->sg->length - pc->b_count, bcount);
219 if (PageHighMem(pc->sg->page)) { 215 if (PageHighMem(pc->sg->page)) {
220 unsigned long flags; 216 unsigned long flags;
@@ -233,10 +229,17 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
233 } 229 }
234 bcount -= count; pc->b_count += count; 230 bcount -= count; pc->b_count += count;
235 if (pc->b_count == pc->sg->length) { 231 if (pc->b_count == pc->sg->length) {
236 pc->sg++; 232 if (pc->sg == pc->last_sg)
233 break;
234 pc->sg = sg_next(pc->sg);
237 pc->b_count = 0; 235 pc->b_count = 0;
238 } 236 }
239 } 237 }
238
239 if (bcount) {
240 printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
241 idescsi_output_zeros (drive, bcount);
242 }
240} 243}
241 244
242static void hexdump(u8 *x, int len) 245static void hexdump(u8 *x, int len)
@@ -804,6 +807,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
804 memcpy (pc->c, cmd->cmnd, cmd->cmd_len); 807 memcpy (pc->c, cmd->cmnd, cmd->cmd_len);
805 pc->buffer = NULL; 808 pc->buffer = NULL;
806 pc->sg = scsi_sglist(cmd); 809 pc->sg = scsi_sglist(cmd);
810 pc->last_sg = sg_last(pc->sg, cmd->use_sg);
807 pc->b_count = 0; 811 pc->b_count = 0;
808 pc->request_transfer = pc->buffer_size = scsi_bufflen(cmd); 812 pc->request_transfer = pc->buffer_size = scsi_bufflen(cmd);
809 pc->scsi_cmd = cmd; 813 pc->scsi_cmd = cmd;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index d9dfb69ae031..22d40fd5845b 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2831,6 +2831,7 @@ static struct scsi_host_template initio_template = {
2831 .sg_tablesize = SG_ALL, 2831 .sg_tablesize = SG_ALL,
2832 .cmd_per_lun = 1, 2832 .cmd_per_lun = 1,
2833 .use_clustering = ENABLE_CLUSTERING, 2833 .use_clustering = ENABLE_CLUSTERING,
2834 .use_sg_chaining = ENABLE_SG_CHAINING,
2834}; 2835};
2835 2836
2836static int initio_probe_one(struct pci_dev *pdev, 2837static int initio_probe_one(struct pci_dev *pdev,
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 2ed099e2c20d..edaac2714c5a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3252,7 +3252,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
3252 */ 3252 */
3253 if ((scb->breakup) || (scb->sg_break)) { 3253 if ((scb->breakup) || (scb->sg_break)) {
3254 struct scatterlist *sg; 3254 struct scatterlist *sg;
3255 int sg_dma_index, ips_sg_index = 0; 3255 int i, sg_dma_index, ips_sg_index = 0;
3256 3256
3257 /* we had a data breakup */ 3257 /* we had a data breakup */
3258 scb->data_len = 0; 3258 scb->data_len = 0;
@@ -3261,20 +3261,22 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
3261 3261
3262 /* Spin forward to last dma chunk */ 3262 /* Spin forward to last dma chunk */
3263 sg_dma_index = scb->breakup; 3263 sg_dma_index = scb->breakup;
3264 for (i = 0; i < scb->breakup; i++)
3265 sg = sg_next(sg);
3264 3266
3265 /* Take care of possible partial on last chunk */ 3267 /* Take care of possible partial on last chunk */
3266 ips_fill_scb_sg_single(ha, 3268 ips_fill_scb_sg_single(ha,
3267 sg_dma_address(&sg[sg_dma_index]), 3269 sg_dma_address(sg),
3268 scb, ips_sg_index++, 3270 scb, ips_sg_index++,
3269 sg_dma_len(&sg[sg_dma_index])); 3271 sg_dma_len(sg));
3270 3272
3271 for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd); 3273 for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
3272 sg_dma_index++) { 3274 sg_dma_index++, sg = sg_next(sg)) {
3273 if (ips_fill_scb_sg_single 3275 if (ips_fill_scb_sg_single
3274 (ha, 3276 (ha,
3275 sg_dma_address(&sg[sg_dma_index]), 3277 sg_dma_address(sg),
3276 scb, ips_sg_index++, 3278 scb, ips_sg_index++,
3277 sg_dma_len(&sg[sg_dma_index])) < 0) 3279 sg_dma_len(sg)) < 0)
3278 break; 3280 break;
3279 } 3281 }
3280 3282
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index cd674938ccd5..c0755565fae9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1438,6 +1438,7 @@ struct scsi_host_template lpfc_template = {
1438 .scan_finished = lpfc_scan_finished, 1438 .scan_finished = lpfc_scan_finished,
1439 .this_id = -1, 1439 .this_id = -1,
1440 .sg_tablesize = LPFC_SG_SEG_CNT, 1440 .sg_tablesize = LPFC_SG_SEG_CNT,
1441 .use_sg_chaining = ENABLE_SG_CHAINING,
1441 .cmd_per_lun = LPFC_CMD_PER_LUN, 1442 .cmd_per_lun = LPFC_CMD_PER_LUN,
1442 .use_clustering = ENABLE_CLUSTERING, 1443 .use_clustering = ENABLE_CLUSTERING,
1443 .shost_attrs = lpfc_hba_attrs, 1444 .shost_attrs = lpfc_hba_attrs,
@@ -1460,6 +1461,7 @@ struct scsi_host_template lpfc_vport_template = {
1460 .sg_tablesize = LPFC_SG_SEG_CNT, 1461 .sg_tablesize = LPFC_SG_SEG_CNT,
1461 .cmd_per_lun = LPFC_CMD_PER_LUN, 1462 .cmd_per_lun = LPFC_CMD_PER_LUN,
1462 .use_clustering = ENABLE_CLUSTERING, 1463 .use_clustering = ENABLE_CLUSTERING,
1464 .use_sg_chaining = ENABLE_SG_CHAINING,
1463 .shost_attrs = lpfc_vport_attrs, 1465 .shost_attrs = lpfc_vport_attrs,
1464 .max_sectors = 0xFFFF, 1466 .max_sectors = 0xFFFF,
1465}; 1467};
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index b12ad7c7c673..a035001f4438 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -402,6 +402,7 @@ static struct scsi_host_template mac53c94_template = {
402 .sg_tablesize = SG_ALL, 402 .sg_tablesize = SG_ALL,
403 .cmd_per_lun = 1, 403 .cmd_per_lun = 1,
404 .use_clustering = DISABLE_CLUSTERING, 404 .use_clustering = DISABLE_CLUSTERING,
405 .use_sg_chaining = ENABLE_SG_CHAINING,
405}; 406};
406 407
407static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match) 408static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index e7e11f282c8f..10d1aff9938a 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4492,6 +4492,7 @@ static struct scsi_host_template megaraid_template = {
4492 .sg_tablesize = MAX_SGLIST, 4492 .sg_tablesize = MAX_SGLIST,
4493 .cmd_per_lun = DEF_CMD_PER_LUN, 4493 .cmd_per_lun = DEF_CMD_PER_LUN,
4494 .use_clustering = ENABLE_CLUSTERING, 4494 .use_clustering = ENABLE_CLUSTERING,
4495 .use_sg_chaining = ENABLE_SG_CHAINING,
4495 .eh_abort_handler = megaraid_abort, 4496 .eh_abort_handler = megaraid_abort,
4496 .eh_device_reset_handler = megaraid_reset, 4497 .eh_device_reset_handler = megaraid_reset,
4497 .eh_bus_reset_handler = megaraid_reset, 4498 .eh_bus_reset_handler = megaraid_reset,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index c6a53dccc16a..e4e4c6a39ed6 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -361,6 +361,7 @@ static struct scsi_host_template megaraid_template_g = {
361 .eh_host_reset_handler = megaraid_reset_handler, 361 .eh_host_reset_handler = megaraid_reset_handler,
362 .change_queue_depth = megaraid_change_queue_depth, 362 .change_queue_depth = megaraid_change_queue_depth,
363 .use_clustering = ENABLE_CLUSTERING, 363 .use_clustering = ENABLE_CLUSTERING,
364 .use_sg_chaining = ENABLE_SG_CHAINING,
364 .sdev_attrs = megaraid_sdev_attrs, 365 .sdev_attrs = megaraid_sdev_attrs,
365 .shost_attrs = megaraid_shost_attrs, 366 .shost_attrs = megaraid_shost_attrs,
366}; 367};
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index ebb948c016bb..e3c5c5282203 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1110,6 +1110,7 @@ static struct scsi_host_template megasas_template = {
1110 .eh_timed_out = megasas_reset_timer, 1110 .eh_timed_out = megasas_reset_timer,
1111 .bios_param = megasas_bios_param, 1111 .bios_param = megasas_bios_param,
1112 .use_clustering = ENABLE_CLUSTERING, 1112 .use_clustering = ENABLE_CLUSTERING,
1113 .use_sg_chaining = ENABLE_SG_CHAINING,
1113}; 1114};
1114 1115
1115/** 1116/**
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 651d09b08f2a..7470ff39ab22 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1843,6 +1843,7 @@ static struct scsi_host_template mesh_template = {
1843 .sg_tablesize = SG_ALL, 1843 .sg_tablesize = SG_ALL,
1844 .cmd_per_lun = 2, 1844 .cmd_per_lun = 2,
1845 .use_clustering = DISABLE_CLUSTERING, 1845 .use_clustering = DISABLE_CLUSTERING,
1846 .use_sg_chaining = ENABLE_SG_CHAINING,
1846}; 1847};
1847 1848
1848static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) 1849static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 7fed35372150..28161dc95e0d 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -281,6 +281,7 @@ static struct scsi_host_template nsp32_template = {
281 .cmd_per_lun = 1, 281 .cmd_per_lun = 1,
282 .this_id = NSP32_HOST_SCSIID, 282 .this_id = NSP32_HOST_SCSIID,
283 .use_clustering = DISABLE_CLUSTERING, 283 .use_clustering = DISABLE_CLUSTERING,
284 .use_sg_chaining = ENABLE_SG_CHAINING,
284 .eh_abort_handler = nsp32_eh_abort, 285 .eh_abort_handler = nsp32_eh_abort,
285 .eh_bus_reset_handler = nsp32_eh_bus_reset, 286 .eh_bus_reset_handler = nsp32_eh_bus_reset,
286 .eh_host_reset_handler = nsp32_eh_host_reset, 287 .eh_host_reset_handler = nsp32_eh_host_reset,
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 961839ecfe86..190e2a7d7067 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -694,6 +694,7 @@ static struct scsi_host_template sym53c500_driver_template = {
694 .sg_tablesize = 32, 694 .sg_tablesize = 32,
695 .cmd_per_lun = 1, 695 .cmd_per_lun = 1,
696 .use_clustering = ENABLE_CLUSTERING, 696 .use_clustering = ENABLE_CLUSTERING,
697 .use_sg_chaining = ENABLE_SG_CHAINING,
697 .shost_attrs = SYM53C500_shost_attrs 698 .shost_attrs = SYM53C500_shost_attrs
698}; 699};
699 700
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index fba8aa8a81b5..76089cf55f4e 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2775,7 +2775,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2775 struct device_reg __iomem *reg = ha->iobase; 2775 struct device_reg __iomem *reg = ha->iobase;
2776 struct scsi_cmnd *cmd = sp->cmd; 2776 struct scsi_cmnd *cmd = sp->cmd;
2777 cmd_a64_entry_t *pkt; 2777 cmd_a64_entry_t *pkt;
2778 struct scatterlist *sg = NULL; 2778 struct scatterlist *sg = NULL, *s;
2779 __le32 *dword_ptr; 2779 __le32 *dword_ptr;
2780 dma_addr_t dma_handle; 2780 dma_addr_t dma_handle;
2781 int status = 0; 2781 int status = 0;
@@ -2889,13 +2889,16 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2889 * Load data segments. 2889 * Load data segments.
2890 */ 2890 */
2891 if (seg_cnt) { /* If data transfer. */ 2891 if (seg_cnt) { /* If data transfer. */
2892 int remseg = seg_cnt;
2892 /* Setup packet address segment pointer. */ 2893 /* Setup packet address segment pointer. */
2893 dword_ptr = (u32 *)&pkt->dseg_0_address; 2894 dword_ptr = (u32 *)&pkt->dseg_0_address;
2894 2895
2895 if (cmd->use_sg) { /* If scatter gather */ 2896 if (cmd->use_sg) { /* If scatter gather */
2896 /* Load command entry data segments. */ 2897 /* Load command entry data segments. */
2897 for (cnt = 0; cnt < 2 && seg_cnt; cnt++, seg_cnt--) { 2898 for_each_sg(sg, s, seg_cnt, cnt) {
2898 dma_handle = sg_dma_address(sg); 2899 if (cnt == 2)
2900 break;
2901 dma_handle = sg_dma_address(s);
2899#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 2902#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2900 if (ha->flags.use_pci_vchannel) 2903 if (ha->flags.use_pci_vchannel)
2901 sn_pci_set_vchan(ha->pdev, 2904 sn_pci_set_vchan(ha->pdev,
@@ -2906,12 +2909,12 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2906 cpu_to_le32(pci_dma_lo32(dma_handle)); 2909 cpu_to_le32(pci_dma_lo32(dma_handle));
2907 *dword_ptr++ = 2910 *dword_ptr++ =
2908 cpu_to_le32(pci_dma_hi32(dma_handle)); 2911 cpu_to_le32(pci_dma_hi32(dma_handle));
2909 *dword_ptr++ = cpu_to_le32(sg_dma_len(sg)); 2912 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2910 sg++;
2911 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", 2913 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2912 cpu_to_le32(pci_dma_hi32(dma_handle)), 2914 cpu_to_le32(pci_dma_hi32(dma_handle)),
2913 cpu_to_le32(pci_dma_lo32(dma_handle)), 2915 cpu_to_le32(pci_dma_lo32(dma_handle)),
2914 cpu_to_le32(sg_dma_len(sg))); 2916 cpu_to_le32(sg_dma_len(sg_next(s))));
2917 remseg--;
2915 } 2918 }
2916 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " 2919 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2917 "command packet data - b %i, t %i, l %i \n", 2920 "command packet data - b %i, t %i, l %i \n",
@@ -2926,7 +2929,9 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2926 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " 2929 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2927 "remains\n", seg_cnt); 2930 "remains\n", seg_cnt);
2928 2931
2929 while (seg_cnt > 0) { 2932 while (remseg > 0) {
2933 /* Update sg start */
2934 sg = s;
2930 /* Adjust ring index. */ 2935 /* Adjust ring index. */
2931 ha->req_ring_index++; 2936 ha->req_ring_index++;
2932 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 2937 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
@@ -2952,9 +2957,10 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2952 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; 2957 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2953 2958
2954 /* Load continuation entry data segments. */ 2959 /* Load continuation entry data segments. */
2955 for (cnt = 0; cnt < 5 && seg_cnt; 2960 for_each_sg(sg, s, remseg, cnt) {
2956 cnt++, seg_cnt--) { 2961 if (cnt == 5)
2957 dma_handle = sg_dma_address(sg); 2962 break;
2963 dma_handle = sg_dma_address(s);
2958#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 2964#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2959 if (ha->flags.use_pci_vchannel) 2965 if (ha->flags.use_pci_vchannel)
2960 sn_pci_set_vchan(ha->pdev, 2966 sn_pci_set_vchan(ha->pdev,
@@ -2966,13 +2972,13 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2966 *dword_ptr++ = 2972 *dword_ptr++ =
2967 cpu_to_le32(pci_dma_hi32(dma_handle)); 2973 cpu_to_le32(pci_dma_hi32(dma_handle));
2968 *dword_ptr++ = 2974 *dword_ptr++ =
2969 cpu_to_le32(sg_dma_len(sg)); 2975 cpu_to_le32(sg_dma_len(s));
2970 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", 2976 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2971 cpu_to_le32(pci_dma_hi32(dma_handle)), 2977 cpu_to_le32(pci_dma_hi32(dma_handle)),
2972 cpu_to_le32(pci_dma_lo32(dma_handle)), 2978 cpu_to_le32(pci_dma_lo32(dma_handle)),
2973 cpu_to_le32(sg_dma_len(sg))); 2979 cpu_to_le32(sg_dma_len(s)));
2974 sg++;
2975 } 2980 }
2981 remseg -= cnt;
2976 dprintk(5, "qla1280_64bit_start_scsi: " 2982 dprintk(5, "qla1280_64bit_start_scsi: "
2977 "continuation packet data - b %i, t " 2983 "continuation packet data - b %i, t "
2978 "%i, l %i \n", SCSI_BUS_32(cmd), 2984 "%i, l %i \n", SCSI_BUS_32(cmd),
@@ -3062,7 +3068,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3062 struct device_reg __iomem *reg = ha->iobase; 3068 struct device_reg __iomem *reg = ha->iobase;
3063 struct scsi_cmnd *cmd = sp->cmd; 3069 struct scsi_cmnd *cmd = sp->cmd;
3064 struct cmd_entry *pkt; 3070 struct cmd_entry *pkt;
3065 struct scatterlist *sg = NULL; 3071 struct scatterlist *sg = NULL, *s;
3066 __le32 *dword_ptr; 3072 __le32 *dword_ptr;
3067 int status = 0; 3073 int status = 0;
3068 int cnt; 3074 int cnt;
@@ -3188,6 +3194,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3188 * Load data segments. 3194 * Load data segments.
3189 */ 3195 */
3190 if (seg_cnt) { 3196 if (seg_cnt) {
3197 int remseg = seg_cnt;
3191 /* Setup packet address segment pointer. */ 3198 /* Setup packet address segment pointer. */
3192 dword_ptr = &pkt->dseg_0_address; 3199 dword_ptr = &pkt->dseg_0_address;
3193 3200
@@ -3196,22 +3203,25 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3196 qla1280_dump_buffer(1, (char *)sg, 4 * 16); 3203 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3197 3204
3198 /* Load command entry data segments. */ 3205 /* Load command entry data segments. */
3199 for (cnt = 0; cnt < 4 && seg_cnt; cnt++, seg_cnt--) { 3206 for_each_sg(sg, s, seg_cnt, cnt) {
3207 if (cnt == 4)
3208 break;
3200 *dword_ptr++ = 3209 *dword_ptr++ =
3201 cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))); 3210 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3202 *dword_ptr++ = 3211 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3203 cpu_to_le32(sg_dma_len(sg));
3204 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", 3212 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3205 (pci_dma_lo32(sg_dma_address(sg))), 3213 (pci_dma_lo32(sg_dma_address(s))),
3206 (sg_dma_len(sg))); 3214 (sg_dma_len(s)));
3207 sg++; 3215 remseg--;
3208 } 3216 }
3209 /* 3217 /*
3210 * Build continuation packets. 3218 * Build continuation packets.
3211 */ 3219 */
3212 dprintk(3, "S/G Building Continuation" 3220 dprintk(3, "S/G Building Continuation"
3213 "...seg_cnt=0x%x remains\n", seg_cnt); 3221 "...seg_cnt=0x%x remains\n", seg_cnt);
3214 while (seg_cnt > 0) { 3222 while (remseg > 0) {
3223 /* Continue from end point */
3224 sg = s;
3215 /* Adjust ring index. */ 3225 /* Adjust ring index. */
3216 ha->req_ring_index++; 3226 ha->req_ring_index++;
3217 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3227 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
@@ -3239,19 +3249,20 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3239 &((struct cont_entry *) pkt)->dseg_0_address; 3249 &((struct cont_entry *) pkt)->dseg_0_address;
3240 3250
3241 /* Load continuation entry data segments. */ 3251 /* Load continuation entry data segments. */
3242 for (cnt = 0; cnt < 7 && seg_cnt; 3252 for_each_sg(sg, s, remseg, cnt) {
3243 cnt++, seg_cnt--) { 3253 if (cnt == 7)
3254 break;
3244 *dword_ptr++ = 3255 *dword_ptr++ =
3245 cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))); 3256 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3246 *dword_ptr++ = 3257 *dword_ptr++ =
3247 cpu_to_le32(sg_dma_len(sg)); 3258 cpu_to_le32(sg_dma_len(s));
3248 dprintk(1, 3259 dprintk(1,
3249 "S/G Segment Cont. phys_addr=0x%x, " 3260 "S/G Segment Cont. phys_addr=0x%x, "
3250 "len=0x%x\n", 3261 "len=0x%x\n",
3251 cpu_to_le32(pci_dma_lo32(sg_dma_address(sg))), 3262 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3252 cpu_to_le32(sg_dma_len(sg))); 3263 cpu_to_le32(sg_dma_len(s)));
3253 sg++;
3254 } 3264 }
3265 remseg -= cnt;
3255 dprintk(5, "qla1280_32bit_start_scsi: " 3266 dprintk(5, "qla1280_32bit_start_scsi: "
3256 "continuation packet data - " 3267 "continuation packet data - "
3257 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), 3268 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
@@ -4248,6 +4259,7 @@ static struct scsi_host_template qla1280_driver_template = {
4248 .sg_tablesize = SG_ALL, 4259 .sg_tablesize = SG_ALL,
4249 .cmd_per_lun = 1, 4260 .cmd_per_lun = 1,
4250 .use_clustering = ENABLE_CLUSTERING, 4261 .use_clustering = ENABLE_CLUSTERING,
4262 .use_sg_chaining = ENABLE_SG_CHAINING,
4251}; 4263};
4252 4264
4253 4265
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a6bb8d0ecf13..0351d380c2d7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -132,6 +132,7 @@ struct scsi_host_template qla2x00_driver_template = {
132 .this_id = -1, 132 .this_id = -1,
133 .cmd_per_lun = 3, 133 .cmd_per_lun = 3,
134 .use_clustering = ENABLE_CLUSTERING, 134 .use_clustering = ENABLE_CLUSTERING,
135 .use_sg_chaining = ENABLE_SG_CHAINING,
135 .sg_tablesize = SG_ALL, 136 .sg_tablesize = SG_ALL,
136 137
137 /* 138 /*
@@ -163,6 +164,7 @@ struct scsi_host_template qla24xx_driver_template = {
163 .this_id = -1, 164 .this_id = -1,
164 .cmd_per_lun = 3, 165 .cmd_per_lun = 3,
165 .use_clustering = ENABLE_CLUSTERING, 166 .use_clustering = ENABLE_CLUSTERING,
167 .use_sg_chaining = ENABLE_SG_CHAINING,
166 .sg_tablesize = SG_ALL, 168 .sg_tablesize = SG_ALL,
167 169
168 .max_sectors = 0xFFFF, 170 .max_sectors = 0xFFFF,
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index b1d565c12c5b..03b68d4f3bd0 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -94,6 +94,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
94 .this_id = -1, 94 .this_id = -1,
95 .cmd_per_lun = 3, 95 .cmd_per_lun = 3,
96 .use_clustering = ENABLE_CLUSTERING, 96 .use_clustering = ENABLE_CLUSTERING,
97 .use_sg_chaining = ENABLE_SG_CHAINING,
97 .sg_tablesize = SG_ALL, 98 .sg_tablesize = SG_ALL,
98 99
99 .max_sectors = 0xFFFF, 100 .max_sectors = 0xFFFF,
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index 1e874f1fb5c6..1769f965eedf 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -197,6 +197,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
197 .sg_tablesize = SG_ALL, 197 .sg_tablesize = SG_ALL,
198 .cmd_per_lun = 1, 198 .cmd_per_lun = 1,
199 .use_clustering = DISABLE_CLUSTERING, 199 .use_clustering = DISABLE_CLUSTERING,
200 .use_sg_chaining = ENABLE_SG_CHAINING,
200}; 201};
201 202
202static __init int qlogicfas_init(void) 203static __init int qlogicfas_init(void)
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index e93f80316a19..7a2e7986b038 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -868,7 +868,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
868 struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr) 868 struct qlogicpti *qpti, u_int in_ptr, u_int out_ptr)
869{ 869{
870 struct dataseg *ds; 870 struct dataseg *ds;
871 struct scatterlist *sg; 871 struct scatterlist *sg, *s;
872 int i, n; 872 int i, n;
873 873
874 if (Cmnd->use_sg) { 874 if (Cmnd->use_sg) {
@@ -884,11 +884,12 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
884 n = sg_count; 884 n = sg_count;
885 if (n > 4) 885 if (n > 4)
886 n = 4; 886 n = 4;
887 for (i = 0; i < n; i++, sg++) { 887 for_each_sg(sg, s, n, i) {
888 ds[i].d_base = sg_dma_address(sg); 888 ds[i].d_base = sg_dma_address(s);
889 ds[i].d_count = sg_dma_len(sg); 889 ds[i].d_count = sg_dma_len(s);
890 } 890 }
891 sg_count -= 4; 891 sg_count -= 4;
892 sg = s;
892 while (sg_count > 0) { 893 while (sg_count > 0) {
893 struct Continuation_Entry *cont; 894 struct Continuation_Entry *cont;
894 895
@@ -907,9 +908,9 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
907 n = sg_count; 908 n = sg_count;
908 if (n > 7) 909 if (n > 7)
909 n = 7; 910 n = 7;
910 for (i = 0; i < n; i++, sg++) { 911 for_each_sg(sg, s, n, i) {
911 ds[i].d_base = sg_dma_address(sg); 912 ds[i].d_base = sg_dma_address(s);
912 ds[i].d_count = sg_dma_len(sg); 913 ds[i].d_count = sg_dma_len(s);
913 } 914 }
914 sg_count -= n; 915 sg_count -= n;
915 } 916 }
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 4947dfe625a6..72ee4c9cfb1a 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -38,6 +38,7 @@
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
39#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
40#include <linux/moduleparam.h> 40#include <linux/moduleparam.h>
41#include <linux/scatterlist.h>
41 42
42#include <linux/blkdev.h> 43#include <linux/blkdev.h>
43#include "scsi.h" 44#include "scsi.h"
@@ -600,7 +601,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
600 int k, req_len, act_len, len, active; 601 int k, req_len, act_len, len, active;
601 void * kaddr; 602 void * kaddr;
602 void * kaddr_off; 603 void * kaddr_off;
603 struct scatterlist * sgpnt; 604 struct scatterlist * sg;
604 605
605 if (0 == scp->request_bufflen) 606 if (0 == scp->request_bufflen)
606 return 0; 607 return 0;
@@ -619,16 +620,16 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
619 scp->resid = req_len - act_len; 620 scp->resid = req_len - act_len;
620 return 0; 621 return 0;
621 } 622 }
622 sgpnt = (struct scatterlist *)scp->request_buffer;
623 active = 1; 623 active = 1;
624 for (k = 0, req_len = 0, act_len = 0; k < scp->use_sg; ++k, ++sgpnt) { 624 req_len = act_len = 0;
625 scsi_for_each_sg(scp, sg, scp->use_sg, k) {
625 if (active) { 626 if (active) {
626 kaddr = (unsigned char *) 627 kaddr = (unsigned char *)
627 kmap_atomic(sgpnt->page, KM_USER0); 628 kmap_atomic(sg->page, KM_USER0);
628 if (NULL == kaddr) 629 if (NULL == kaddr)
629 return (DID_ERROR << 16); 630 return (DID_ERROR << 16);
630 kaddr_off = (unsigned char *)kaddr + sgpnt->offset; 631 kaddr_off = (unsigned char *)kaddr + sg->offset;
631 len = sgpnt->length; 632 len = sg->length;
632 if ((req_len + len) > arr_len) { 633 if ((req_len + len) > arr_len) {
633 active = 0; 634 active = 0;
634 len = arr_len - req_len; 635 len = arr_len - req_len;
@@ -637,7 +638,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
637 kunmap_atomic(kaddr, KM_USER0); 638 kunmap_atomic(kaddr, KM_USER0);
638 act_len += len; 639 act_len += len;
639 } 640 }
640 req_len += sgpnt->length; 641 req_len += sg->length;
641 } 642 }
642 if (scp->resid) 643 if (scp->resid)
643 scp->resid -= act_len; 644 scp->resid -= act_len;
@@ -653,7 +654,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
653 int k, req_len, len, fin; 654 int k, req_len, len, fin;
654 void * kaddr; 655 void * kaddr;
655 void * kaddr_off; 656 void * kaddr_off;
656 struct scatterlist * sgpnt; 657 struct scatterlist * sg;
657 658
658 if (0 == scp->request_bufflen) 659 if (0 == scp->request_bufflen)
659 return 0; 660 return 0;
@@ -668,13 +669,14 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
668 memcpy(arr, scp->request_buffer, len); 669 memcpy(arr, scp->request_buffer, len);
669 return len; 670 return len;
670 } 671 }
671 sgpnt = (struct scatterlist *)scp->request_buffer; 672 sg = scsi_sglist(scp);
672 for (k = 0, req_len = 0, fin = 0; k < scp->use_sg; ++k, ++sgpnt) { 673 req_len = fin = 0;
673 kaddr = (unsigned char *)kmap_atomic(sgpnt->page, KM_USER0); 674 for (k = 0; k < scp->use_sg; ++k, sg = sg_next(sg)) {
675 kaddr = (unsigned char *)kmap_atomic(sg->page, KM_USER0);
674 if (NULL == kaddr) 676 if (NULL == kaddr)
675 return -1; 677 return -1;
676 kaddr_off = (unsigned char *)kaddr + sgpnt->offset; 678 kaddr_off = (unsigned char *)kaddr + sg->offset;
677 len = sgpnt->length; 679 len = sg->length;
678 if ((req_len + len) > max_arr_len) { 680 if ((req_len + len) > max_arr_len) {
679 len = max_arr_len - req_len; 681 len = max_arr_len - req_len;
680 fin = 1; 682 fin = 1;
@@ -683,7 +685,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
683 kunmap_atomic(kaddr, KM_USER0); 685 kunmap_atomic(kaddr, KM_USER0);
684 if (fin) 686 if (fin)
685 return req_len + len; 687 return req_len + len;
686 req_len += sgpnt->length; 688 req_len += sg->length;
687 } 689 }
688 return req_len; 690 return req_len;
689} 691}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 207f1aa08869..aac8a02cbe80 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -17,6 +17,7 @@
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/hardirq.h> 19#include <linux/hardirq.h>
20#include <linux/scatterlist.h>
20 21
21#include <scsi/scsi.h> 22#include <scsi/scsi.h>
22#include <scsi/scsi_cmnd.h> 23#include <scsi/scsi_cmnd.h>
@@ -33,35 +34,34 @@
33#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 34#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
34#define SG_MEMPOOL_SIZE 2 35#define SG_MEMPOOL_SIZE 2
35 36
37/*
38 * The maximum number of SG segments that we will put inside a scatterlist
39 * (unless chaining is used). Should ideally fit inside a single page, to
40 * avoid a higher order allocation.
41 */
42#define SCSI_MAX_SG_SEGMENTS 128
43
36struct scsi_host_sg_pool { 44struct scsi_host_sg_pool {
37 size_t size; 45 size_t size;
38 char *name; 46 char *name;
39 struct kmem_cache *slab; 47 struct kmem_cache *slab;
40 mempool_t *pool; 48 mempool_t *pool;
41}; 49};
42 50
43#if (SCSI_MAX_PHYS_SEGMENTS < 32) 51#define SP(x) { x, "sgpool-" #x }
44#error SCSI_MAX_PHYS_SEGMENTS is too small
45#endif
46
47#define SP(x) { x, "sgpool-" #x }
48static struct scsi_host_sg_pool scsi_sg_pools[] = { 52static struct scsi_host_sg_pool scsi_sg_pools[] = {
49 SP(8), 53 SP(8),
50 SP(16), 54 SP(16),
55#if (SCSI_MAX_SG_SEGMENTS > 16)
51 SP(32), 56 SP(32),
52#if (SCSI_MAX_PHYS_SEGMENTS > 32) 57#if (SCSI_MAX_SG_SEGMENTS > 32)
53 SP(64), 58 SP(64),
54#if (SCSI_MAX_PHYS_SEGMENTS > 64) 59#if (SCSI_MAX_SG_SEGMENTS > 64)
55 SP(128), 60 SP(128),
56#if (SCSI_MAX_PHYS_SEGMENTS > 128)
57 SP(256),
58#if (SCSI_MAX_PHYS_SEGMENTS > 256)
59#error SCSI_MAX_PHYS_SEGMENTS is too large
60#endif
61#endif 61#endif
62#endif 62#endif
63#endif 63#endif
64}; 64};
65#undef SP 65#undef SP
66 66
67static void scsi_run_queue(struct request_queue *q); 67static void scsi_run_queue(struct request_queue *q);
@@ -289,14 +289,16 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
289 struct request_queue *q = rq->q; 289 struct request_queue *q = rq->q;
290 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 290 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
291 unsigned int data_len = bufflen, len, bytes, off; 291 unsigned int data_len = bufflen, len, bytes, off;
292 struct scatterlist *sg;
292 struct page *page; 293 struct page *page;
293 struct bio *bio = NULL; 294 struct bio *bio = NULL;
294 int i, err, nr_vecs = 0; 295 int i, err, nr_vecs = 0;
295 296
296 for (i = 0; i < nsegs; i++) { 297 for_each_sg(sgl, sg, nsegs, i) {
297 page = sgl[i].page; 298 page = sg->page;
298 off = sgl[i].offset; 299 off = sg->offset;
299 len = sgl[i].length; 300 len = sg->length;
301 data_len += len;
300 302
301 while (len > 0 && data_len > 0) { 303 while (len > 0 && data_len > 0) {
302 /* 304 /*
@@ -695,56 +697,170 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
695 return NULL; 697 return NULL;
696} 698}
697 699
698struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) 700/*
699{ 701 * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
700 struct scsi_host_sg_pool *sgp; 702 * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
701 struct scatterlist *sgl; 703 */
704#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048
702 705
703 BUG_ON(!cmd->use_sg); 706static inline unsigned int scsi_sgtable_index(unsigned short nents)
707{
708 unsigned int index;
704 709
705 switch (cmd->use_sg) { 710 switch (nents) {
706 case 1 ... 8: 711 case 1 ... 8:
707 cmd->sglist_len = 0; 712 index = 0;
708 break; 713 break;
709 case 9 ... 16: 714 case 9 ... 16:
710 cmd->sglist_len = 1; 715 index = 1;
711 break; 716 break;
717#if (SCSI_MAX_SG_SEGMENTS > 16)
712 case 17 ... 32: 718 case 17 ... 32:
713 cmd->sglist_len = 2; 719 index = 2;
714 break; 720 break;
715#if (SCSI_MAX_PHYS_SEGMENTS > 32) 721#if (SCSI_MAX_SG_SEGMENTS > 32)
716 case 33 ... 64: 722 case 33 ... 64:
717 cmd->sglist_len = 3; 723 index = 3;
718 break; 724 break;
719#if (SCSI_MAX_PHYS_SEGMENTS > 64) 725#if (SCSI_MAX_SG_SEGMENTS > 64)
720 case 65 ... 128: 726 case 65 ... 128:
721 cmd->sglist_len = 4; 727 index = 4;
722 break;
723#if (SCSI_MAX_PHYS_SEGMENTS > 128)
724 case 129 ... 256:
725 cmd->sglist_len = 5;
726 break; 728 break;
727#endif 729#endif
728#endif 730#endif
729#endif 731#endif
730 default: 732 default:
731 return NULL; 733 printk(KERN_ERR "scsi: bad segment count=%d\n", nents);
734 BUG();
732 } 735 }
733 736
734 sgp = scsi_sg_pools + cmd->sglist_len; 737 return index;
735 sgl = mempool_alloc(sgp->pool, gfp_mask); 738}
736 return sgl; 739
740struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
741{
742 struct scsi_host_sg_pool *sgp;
743 struct scatterlist *sgl, *prev, *ret;
744 unsigned int index;
745 int this, left;
746
747 BUG_ON(!cmd->use_sg);
748
749 left = cmd->use_sg;
750 ret = prev = NULL;
751 do {
752 this = left;
753 if (this > SCSI_MAX_SG_SEGMENTS) {
754 this = SCSI_MAX_SG_SEGMENTS - 1;
755 index = SG_MEMPOOL_NR - 1;
756 } else
757 index = scsi_sgtable_index(this);
758
759 left -= this;
760
761 sgp = scsi_sg_pools + index;
762
763 sgl = mempool_alloc(sgp->pool, gfp_mask);
764 if (unlikely(!sgl))
765 goto enomem;
766
767 memset(sgl, 0, sizeof(*sgl) * sgp->size);
768
769 /*
770 * first loop through, set initial index and return value
771 */
772 if (!ret)
773 ret = sgl;
774
775 /*
776 * chain previous sglist, if any. we know the previous
777 * sglist must be the biggest one, or we would not have
778 * ended up doing another loop.
779 */
780 if (prev)
781 sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
782
783 /*
784 * don't allow subsequent mempool allocs to sleep, it would
785 * violate the mempool principle.
786 */
787 gfp_mask &= ~__GFP_WAIT;
788 gfp_mask |= __GFP_HIGH;
789 prev = sgl;
790 } while (left);
791
792 /*
793 * ->use_sg may get modified after dma mapping has potentially
794 * shrunk the number of segments, so keep a copy of it for free.
795 */
796 cmd->__use_sg = cmd->use_sg;
797 return ret;
798enomem:
799 if (ret) {
800 /*
801 * Free entries chained off ret. Since we were trying to
802 * allocate another sglist, we know that all entries are of
803 * the max size.
804 */
805 sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
806 prev = ret;
807 ret = &ret[SCSI_MAX_SG_SEGMENTS - 1];
808
809 while ((sgl = sg_chain_ptr(ret)) != NULL) {
810 ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1];
811 mempool_free(sgl, sgp->pool);
812 }
813
814 mempool_free(prev, sgp->pool);
815 }
816 return NULL;
737} 817}
738 818
739EXPORT_SYMBOL(scsi_alloc_sgtable); 819EXPORT_SYMBOL(scsi_alloc_sgtable);
740 820
741void scsi_free_sgtable(struct scatterlist *sgl, int index) 821void scsi_free_sgtable(struct scsi_cmnd *cmd)
742{ 822{
823 struct scatterlist *sgl = cmd->request_buffer;
743 struct scsi_host_sg_pool *sgp; 824 struct scsi_host_sg_pool *sgp;
744 825
745 BUG_ON(index >= SG_MEMPOOL_NR); 826 /*
827 * if this is the biggest size sglist, check if we have
828 * chained parts we need to free
829 */
830 if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) {
831 unsigned short this, left;
832 struct scatterlist *next;
833 unsigned int index;
834
835 left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1);
836 next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]);
837 while (left && next) {
838 sgl = next;
839 this = left;
840 if (this > SCSI_MAX_SG_SEGMENTS) {
841 this = SCSI_MAX_SG_SEGMENTS - 1;
842 index = SG_MEMPOOL_NR - 1;
843 } else
844 index = scsi_sgtable_index(this);
845
846 left -= this;
847
848 sgp = scsi_sg_pools + index;
849
850 if (left)
851 next = sg_chain_ptr(&sgl[sgp->size - 1]);
852
853 mempool_free(sgl, sgp->pool);
854 }
855
856 /*
857 * Restore original, will be freed below
858 */
859 sgl = cmd->request_buffer;
860 sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
861 } else
862 sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg);
746 863
747 sgp = scsi_sg_pools + index;
748 mempool_free(sgl, sgp->pool); 864 mempool_free(sgl, sgp->pool);
749} 865}
750 866
@@ -770,7 +886,7 @@ EXPORT_SYMBOL(scsi_free_sgtable);
770static void scsi_release_buffers(struct scsi_cmnd *cmd) 886static void scsi_release_buffers(struct scsi_cmnd *cmd)
771{ 887{
772 if (cmd->use_sg) 888 if (cmd->use_sg)
773 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 889 scsi_free_sgtable(cmd);
774 890
775 /* 891 /*
776 * Zero these out. They now point to freed memory, and it is 892 * Zero these out. They now point to freed memory, and it is
@@ -984,7 +1100,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
984static int scsi_init_io(struct scsi_cmnd *cmd) 1100static int scsi_init_io(struct scsi_cmnd *cmd)
985{ 1101{
986 struct request *req = cmd->request; 1102 struct request *req = cmd->request;
987 struct scatterlist *sgpnt;
988 int count; 1103 int count;
989 1104
990 /* 1105 /*
@@ -997,14 +1112,13 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
997 /* 1112 /*
998 * If sg table allocation fails, requeue request later. 1113 * If sg table allocation fails, requeue request later.
999 */ 1114 */
1000 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1115 cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1001 if (unlikely(!sgpnt)) { 1116 if (unlikely(!cmd->request_buffer)) {
1002 scsi_unprep_request(req); 1117 scsi_unprep_request(req);
1003 return BLKPREP_DEFER; 1118 return BLKPREP_DEFER;
1004 } 1119 }
1005 1120
1006 req->buffer = NULL; 1121 req->buffer = NULL;
1007 cmd->request_buffer = (char *) sgpnt;
1008 if (blk_pc_request(req)) 1122 if (blk_pc_request(req))
1009 cmd->request_bufflen = req->data_len; 1123 cmd->request_bufflen = req->data_len;
1010 else 1124 else
@@ -1529,8 +1643,25 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1529 if (!q) 1643 if (!q)
1530 return NULL; 1644 return NULL;
1531 1645
1646 /*
1647 * this limit is imposed by hardware restrictions
1648 */
1532 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1649 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1533 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); 1650
1651 /*
1652 * In the future, sg chaining support will be mandatory and this
1653 * ifdef can then go away. Right now we don't have all archs
1654 * converted, so better keep it safe.
1655 */
1656#ifdef ARCH_HAS_SG_CHAIN
1657 if (shost->use_sg_chaining)
1658 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
1659 else
1660 blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
1661#else
1662 blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
1663#endif
1664
1534 blk_queue_max_sectors(q, shost->max_sectors); 1665 blk_queue_max_sectors(q, shost->max_sectors);
1535 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1666 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1536 blk_queue_segment_boundary(q, shost->dma_boundary); 1667 blk_queue_segment_boundary(q, shost->dma_boundary);
@@ -2193,18 +2324,19 @@ EXPORT_SYMBOL_GPL(scsi_target_unblock);
2193 * 2324 *
2194 * Returns virtual address of the start of the mapped page 2325 * Returns virtual address of the start of the mapped page
2195 */ 2326 */
2196void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, 2327void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2197 size_t *offset, size_t *len) 2328 size_t *offset, size_t *len)
2198{ 2329{
2199 int i; 2330 int i;
2200 size_t sg_len = 0, len_complete = 0; 2331 size_t sg_len = 0, len_complete = 0;
2332 struct scatterlist *sg;
2201 struct page *page; 2333 struct page *page;
2202 2334
2203 WARN_ON(!irqs_disabled()); 2335 WARN_ON(!irqs_disabled());
2204 2336
2205 for (i = 0; i < sg_count; i++) { 2337 for_each_sg(sgl, sg, sg_count, i) {
2206 len_complete = sg_len; /* Complete sg-entries */ 2338 len_complete = sg_len; /* Complete sg-entries */
2207 sg_len += sg[i].length; 2339 sg_len += sg->length;
2208 if (sg_len > *offset) 2340 if (sg_len > *offset)
2209 break; 2341 break;
2210 } 2342 }
@@ -2218,10 +2350,10 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
2218 } 2350 }
2219 2351
2220 /* Offset starting from the beginning of first page in this sg-entry */ 2352 /* Offset starting from the beginning of first page in this sg-entry */
2221 *offset = *offset - len_complete + sg[i].offset; 2353 *offset = *offset - len_complete + sg->offset;
2222 2354
2223 /* Assumption: contiguous pages can be accessed as "page + i" */ 2355 /* Assumption: contiguous pages can be accessed as "page + i" */
2224 page = nth_page(sg[i].page, (*offset >> PAGE_SHIFT)); 2356 page = nth_page(sg->page, (*offset >> PAGE_SHIFT));
2225 *offset &= ~PAGE_MASK; 2357 *offset &= ~PAGE_MASK;
2226 2358
2227 /* Bytes in this sg-entry from *offset to the end of the page */ 2359 /* Bytes in this sg-entry from *offset to the end of the page */
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 66c692ffa305..a91761c3645f 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -332,7 +332,7 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
332 scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag); 332 scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag);
333 333
334 if (cmd->request_buffer) 334 if (cmd->request_buffer)
335 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 335 scsi_free_sgtable(cmd);
336 336
337 queue_work(scsi_tgtd, &tcmd->work); 337 queue_work(scsi_tgtd, &tcmd->work);
338} 338}
@@ -373,7 +373,7 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
373 } 373 }
374 374
375 eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg); 375 eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg);
376 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 376 scsi_free_sgtable(cmd);
377 return -EINVAL; 377 return -EINVAL;
378} 378}
379 379
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0a3a528212c2..69f542c4923c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -826,27 +826,6 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
826 return 0; 826 return 0;
827} 827}
828 828
829static int sd_issue_flush(struct request_queue *q, struct gendisk *disk,
830 sector_t *error_sector)
831{
832 int ret = 0;
833 struct scsi_device *sdp = q->queuedata;
834 struct scsi_disk *sdkp;
835
836 if (sdp->sdev_state != SDEV_RUNNING)
837 return -ENXIO;
838
839 sdkp = scsi_disk_get_from_dev(&sdp->sdev_gendev);
840
841 if (!sdkp)
842 return -ENODEV;
843
844 if (sdkp->WCE)
845 ret = sd_sync_cache(sdkp);
846 scsi_disk_put(sdkp);
847 return ret;
848}
849
850static void sd_prepare_flush(struct request_queue *q, struct request *rq) 829static void sd_prepare_flush(struct request_queue *q, struct request *rq)
851{ 830{
852 memset(rq->cmd, 0, sizeof(rq->cmd)); 831 memset(rq->cmd, 0, sizeof(rq->cmd));
@@ -1697,7 +1676,6 @@ static int sd_probe(struct device *dev)
1697 sd_revalidate_disk(gd); 1676 sd_revalidate_disk(gd);
1698 1677
1699 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 1678 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
1700 blk_queue_issue_flush_fn(sdp->request_queue, sd_issue_flush);
1701 1679
1702 gd->driverfs_dev = &sdp->sdev_gendev; 1680 gd->driverfs_dev = &sdp->sdev_gendev;
1703 gd->flags = GENHD_FL_DRIVERFS; 1681 gd->flags = GENHD_FL_DRIVERFS;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index f6f5fc7d0cee..7238b2dfc497 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1165,7 +1165,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1165 sg = rsv_schp->buffer; 1165 sg = rsv_schp->buffer;
1166 sa = vma->vm_start; 1166 sa = vma->vm_start;
1167 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1167 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1168 ++k, ++sg) { 1168 ++k, sg = sg_next(sg)) {
1169 len = vma->vm_end - sa; 1169 len = vma->vm_end - sa;
1170 len = (len < sg->length) ? len : sg->length; 1170 len = (len < sg->length) ? len : sg->length;
1171 if (offset < len) { 1171 if (offset < len) {
@@ -1209,7 +1209,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1209 sa = vma->vm_start; 1209 sa = vma->vm_start;
1210 sg = rsv_schp->buffer; 1210 sg = rsv_schp->buffer;
1211 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1211 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1212 ++k, ++sg) { 1212 ++k, sg = sg_next(sg)) {
1213 len = vma->vm_end - sa; 1213 len = vma->vm_end - sa;
1214 len = (len < sg->length) ? len : sg->length; 1214 len = (len < sg->length) ? len : sg->length;
1215 sa += len; 1215 sa += len;
@@ -1840,7 +1840,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1840 } 1840 }
1841 for (k = 0, sg = schp->buffer, rem_sz = blk_size; 1841 for (k = 0, sg = schp->buffer, rem_sz = blk_size;
1842 (rem_sz > 0) && (k < mx_sc_elems); 1842 (rem_sz > 0) && (k < mx_sc_elems);
1843 ++k, rem_sz -= ret_sz, ++sg) { 1843 ++k, rem_sz -= ret_sz, sg = sg_next(sg)) {
1844 1844
1845 num = (rem_sz > scatter_elem_sz_prev) ? 1845 num = (rem_sz > scatter_elem_sz_prev) ?
1846 scatter_elem_sz_prev : rem_sz; 1846 scatter_elem_sz_prev : rem_sz;
@@ -1913,7 +1913,7 @@ sg_write_xfer(Sg_request * srp)
1913 if (res) 1913 if (res)
1914 return res; 1914 return res;
1915 1915
1916 for (; p; ++sg, ksglen = sg->length, 1916 for (; p; sg = sg_next(sg), ksglen = sg->length,
1917 p = page_address(sg->page)) { 1917 p = page_address(sg->page)) {
1918 if (usglen <= 0) 1918 if (usglen <= 0)
1919 break; 1919 break;
@@ -1992,7 +1992,7 @@ sg_remove_scat(Sg_scatter_hold * schp)
1992 int k; 1992 int k;
1993 1993
1994 for (k = 0; (k < schp->k_use_sg) && sg->page; 1994 for (k = 0; (k < schp->k_use_sg) && sg->page;
1995 ++k, ++sg) { 1995 ++k, sg = sg_next(sg)) {
1996 SCSI_LOG_TIMEOUT(5, printk( 1996 SCSI_LOG_TIMEOUT(5, printk(
1997 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", 1997 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
1998 k, sg->page, sg->length)); 1998 k, sg->page, sg->length));
@@ -2045,7 +2045,7 @@ sg_read_xfer(Sg_request * srp)
2045 if (res) 2045 if (res)
2046 return res; 2046 return res;
2047 2047
2048 for (; p; ++sg, ksglen = sg->length, 2048 for (; p; sg = sg_next(sg), ksglen = sg->length,
2049 p = page_address(sg->page)) { 2049 p = page_address(sg->page)) {
2050 if (usglen <= 0) 2050 if (usglen <= 0)
2051 break; 2051 break;
@@ -2092,7 +2092,7 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2092 if ((!outp) || (num_read_xfer <= 0)) 2092 if ((!outp) || (num_read_xfer <= 0))
2093 return 0; 2093 return 0;
2094 2094
2095 for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, ++sg) { 2095 for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, sg = sg_next(sg)) {
2096 num = sg->length; 2096 num = sg->length;
2097 if (num > num_read_xfer) { 2097 if (num > num_read_xfer) {
2098 if (__copy_to_user(outp, page_address(sg->page), 2098 if (__copy_to_user(outp, page_address(sg->page),
@@ -2142,7 +2142,7 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2142 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); 2142 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2143 rem = size; 2143 rem = size;
2144 2144
2145 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) { 2145 for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) {
2146 num = sg->length; 2146 num = sg->length;
2147 if (rem <= num) { 2147 if (rem <= num) {
2148 sfp->save_scat_len = num; 2148 sfp->save_scat_len = num;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 72f6d8015358..e3fab3a6aed7 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1123,6 +1123,7 @@ static struct scsi_host_template driver_template = {
1123 .this_id = -1, 1123 .this_id = -1,
1124 .sg_tablesize = ST_MAX_SG, 1124 .sg_tablesize = ST_MAX_SG,
1125 .cmd_per_lun = ST_CMD_PER_LUN, 1125 .cmd_per_lun = ST_CMD_PER_LUN,
1126 .use_sg_chaining = ENABLE_SG_CHAINING,
1126}; 1127};
1127 1128
1128static int stex_set_dma_mask(struct pci_dev * pdev) 1129static int stex_set_dma_mask(struct pci_dev * pdev)
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 92bfaeafe30d..8befab7e9839 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -854,5 +854,6 @@ static struct scsi_host_template driver_template = {
854 .cmd_per_lun = 1, 854 .cmd_per_lun = 1,
855 .unchecked_isa_dma = 1, 855 .unchecked_isa_dma = 1,
856 .use_clustering = ENABLE_CLUSTERING, 856 .use_clustering = ENABLE_CLUSTERING,
857 .use_sg_chaining = ENABLE_SG_CHAINING,
857}; 858};
858#include "scsi_module.c" 859#include "scsi_module.c"
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 3db22325ea2c..db03c4c8ec1e 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1808,6 +1808,7 @@ static struct scsi_host_template sym2_template = {
1808 .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler, 1808 .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler,
1809 .this_id = 7, 1809 .this_id = 7,
1810 .use_clustering = ENABLE_CLUSTERING, 1810 .use_clustering = ENABLE_CLUSTERING,
1811 .use_sg_chaining = ENABLE_SG_CHAINING,
1811 .max_sectors = 0xFFFF, 1812 .max_sectors = 0xFFFF,
1812#ifdef SYM_LINUX_PROC_INFO_SUPPORT 1813#ifdef SYM_LINUX_PROC_INFO_SUPPORT
1813 .proc_info = sym53c8xx_proc_info, 1814 .proc_info = sym53c8xx_proc_info,
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index fc9f51818e8f..7edd6ceb13b2 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -450,7 +450,8 @@ static struct scsi_host_template driver_template = {
450 .slave_configure = u14_34f_slave_configure, 450 .slave_configure = u14_34f_slave_configure,
451 .this_id = 7, 451 .this_id = 7,
452 .unchecked_isa_dma = 1, 452 .unchecked_isa_dma = 1,
453 .use_clustering = ENABLE_CLUSTERING 453 .use_clustering = ENABLE_CLUSTERING,
454 .use_sg_chaining = ENABLE_SG_CHAINING,
454 }; 455 };
455 456
456#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD) 457#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index c08235d5afc9..ea72bbeb8f9d 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -1197,5 +1197,6 @@ static struct scsi_host_template driver_template = {
1197 .cmd_per_lun = ULTRASTOR_MAX_CMDS_PER_LUN, 1197 .cmd_per_lun = ULTRASTOR_MAX_CMDS_PER_LUN,
1198 .unchecked_isa_dma = 1, 1198 .unchecked_isa_dma = 1,
1199 .use_clustering = ENABLE_CLUSTERING, 1199 .use_clustering = ENABLE_CLUSTERING,
1200 .use_sg_chaining = ENABLE_SG_CHAINING,
1200}; 1201};
1201#include "scsi_module.c" 1202#include "scsi_module.c"
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index d6fd4259c56b..255c611e78b8 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1671,6 +1671,7 @@ static struct scsi_host_template driver_template = {
1671 .cmd_per_lun = 1, 1671 .cmd_per_lun = 1,
1672 .unchecked_isa_dma = 1, 1672 .unchecked_isa_dma = 1,
1673 .use_clustering = ENABLE_CLUSTERING, 1673 .use_clustering = ENABLE_CLUSTERING,
1674 .use_sg_chaining = ENABLE_SG_CHAINING,
1674}; 1675};
1675 1676
1676#include "scsi_module.c" 1677#include "scsi_module.c"
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 4d3cbb12b713..8d3711a7ff06 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -798,12 +798,13 @@ static int alauda_read_data(struct us_data *us, unsigned long address,
798{ 798{
799 unsigned char *buffer; 799 unsigned char *buffer;
800 u16 lba, max_lba; 800 u16 lba, max_lba;
801 unsigned int page, len, index, offset; 801 unsigned int page, len, offset;
802 unsigned int blockshift = MEDIA_INFO(us).blockshift; 802 unsigned int blockshift = MEDIA_INFO(us).blockshift;
803 unsigned int pageshift = MEDIA_INFO(us).pageshift; 803 unsigned int pageshift = MEDIA_INFO(us).pageshift;
804 unsigned int blocksize = MEDIA_INFO(us).blocksize; 804 unsigned int blocksize = MEDIA_INFO(us).blocksize;
805 unsigned int pagesize = MEDIA_INFO(us).pagesize; 805 unsigned int pagesize = MEDIA_INFO(us).pagesize;
806 unsigned int uzonesize = MEDIA_INFO(us).uzonesize; 806 unsigned int uzonesize = MEDIA_INFO(us).uzonesize;
807 struct scatterlist *sg;
807 int result; 808 int result;
808 809
809 /* 810 /*
@@ -827,7 +828,8 @@ static int alauda_read_data(struct us_data *us, unsigned long address,
827 max_lba = MEDIA_INFO(us).capacity >> (blockshift + pageshift); 828 max_lba = MEDIA_INFO(us).capacity >> (blockshift + pageshift);
828 829
829 result = USB_STOR_TRANSPORT_GOOD; 830 result = USB_STOR_TRANSPORT_GOOD;
830 index = offset = 0; 831 offset = 0;
832 sg = NULL;
831 833
832 while (sectors > 0) { 834 while (sectors > 0) {
833 unsigned int zone = lba / uzonesize; /* integer division */ 835 unsigned int zone = lba / uzonesize; /* integer division */
@@ -873,7 +875,7 @@ static int alauda_read_data(struct us_data *us, unsigned long address,
873 875
874 /* Store the data in the transfer buffer */ 876 /* Store the data in the transfer buffer */
875 usb_stor_access_xfer_buf(buffer, len, us->srb, 877 usb_stor_access_xfer_buf(buffer, len, us->srb,
876 &index, &offset, TO_XFER_BUF); 878 &sg, &offset, TO_XFER_BUF);
877 879
878 page = 0; 880 page = 0;
879 lba++; 881 lba++;
@@ -891,11 +893,12 @@ static int alauda_write_data(struct us_data *us, unsigned long address,
891 unsigned int sectors) 893 unsigned int sectors)
892{ 894{
893 unsigned char *buffer, *blockbuffer; 895 unsigned char *buffer, *blockbuffer;
894 unsigned int page, len, index, offset; 896 unsigned int page, len, offset;
895 unsigned int blockshift = MEDIA_INFO(us).blockshift; 897 unsigned int blockshift = MEDIA_INFO(us).blockshift;
896 unsigned int pageshift = MEDIA_INFO(us).pageshift; 898 unsigned int pageshift = MEDIA_INFO(us).pageshift;
897 unsigned int blocksize = MEDIA_INFO(us).blocksize; 899 unsigned int blocksize = MEDIA_INFO(us).blocksize;
898 unsigned int pagesize = MEDIA_INFO(us).pagesize; 900 unsigned int pagesize = MEDIA_INFO(us).pagesize;
901 struct scatterlist *sg;
899 u16 lba, max_lba; 902 u16 lba, max_lba;
900 int result; 903 int result;
901 904
@@ -929,7 +932,8 @@ static int alauda_write_data(struct us_data *us, unsigned long address,
929 max_lba = MEDIA_INFO(us).capacity >> (pageshift + blockshift); 932 max_lba = MEDIA_INFO(us).capacity >> (pageshift + blockshift);
930 933
931 result = USB_STOR_TRANSPORT_GOOD; 934 result = USB_STOR_TRANSPORT_GOOD;
932 index = offset = 0; 935 offset = 0;
936 sg = NULL;
933 937
934 while (sectors > 0) { 938 while (sectors > 0) {
935 /* Write as many sectors as possible in this block */ 939 /* Write as many sectors as possible in this block */
@@ -946,7 +950,7 @@ static int alauda_write_data(struct us_data *us, unsigned long address,
946 950
947 /* Get the data from the transfer buffer */ 951 /* Get the data from the transfer buffer */
948 usb_stor_access_xfer_buf(buffer, len, us->srb, 952 usb_stor_access_xfer_buf(buffer, len, us->srb,
949 &index, &offset, FROM_XFER_BUF); 953 &sg, &offset, FROM_XFER_BUF);
950 954
951 result = alauda_write_lba(us, lba, page, pages, buffer, 955 result = alauda_write_lba(us, lba, page, pages, buffer,
952 blockbuffer); 956 blockbuffer);
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index c87ad1bae1d6..579e9f52053a 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -98,7 +98,8 @@ static int datafab_read_data(struct us_data *us,
98 unsigned char thistime; 98 unsigned char thistime;
99 unsigned int totallen, alloclen; 99 unsigned int totallen, alloclen;
100 int len, result; 100 int len, result;
101 unsigned int sg_idx = 0, sg_offset = 0; 101 unsigned int sg_offset = 0;
102 struct scatterlist *sg = NULL;
102 103
103 // we're working in LBA mode. according to the ATA spec, 104 // we're working in LBA mode. according to the ATA spec,
104 // we can support up to 28-bit addressing. I don't know if Datafab 105 // we can support up to 28-bit addressing. I don't know if Datafab
@@ -155,7 +156,7 @@ static int datafab_read_data(struct us_data *us,
155 156
156 // Store the data in the transfer buffer 157 // Store the data in the transfer buffer
157 usb_stor_access_xfer_buf(buffer, len, us->srb, 158 usb_stor_access_xfer_buf(buffer, len, us->srb,
158 &sg_idx, &sg_offset, TO_XFER_BUF); 159 &sg, &sg_offset, TO_XFER_BUF);
159 160
160 sector += thistime; 161 sector += thistime;
161 totallen -= len; 162 totallen -= len;
@@ -181,7 +182,8 @@ static int datafab_write_data(struct us_data *us,
181 unsigned char thistime; 182 unsigned char thistime;
182 unsigned int totallen, alloclen; 183 unsigned int totallen, alloclen;
183 int len, result; 184 int len, result;
184 unsigned int sg_idx = 0, sg_offset = 0; 185 unsigned int sg_offset = 0;
186 struct scatterlist *sg = NULL;
185 187
186 // we're working in LBA mode. according to the ATA spec, 188 // we're working in LBA mode. according to the ATA spec,
187 // we can support up to 28-bit addressing. I don't know if Datafab 189 // we can support up to 28-bit addressing. I don't know if Datafab
@@ -217,7 +219,7 @@ static int datafab_write_data(struct us_data *us,
217 219
218 // Get the data from the transfer buffer 220 // Get the data from the transfer buffer
219 usb_stor_access_xfer_buf(buffer, len, us->srb, 221 usb_stor_access_xfer_buf(buffer, len, us->srb,
220 &sg_idx, &sg_offset, FROM_XFER_BUF); 222 &sg, &sg_offset, FROM_XFER_BUF);
221 223
222 command[0] = 0; 224 command[0] = 0;
223 command[1] = thistime; 225 command[1] = thistime;
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index 003fcf545888..61097cbb1585 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -119,7 +119,8 @@ static int jumpshot_read_data(struct us_data *us,
119 unsigned char thistime; 119 unsigned char thistime;
120 unsigned int totallen, alloclen; 120 unsigned int totallen, alloclen;
121 int len, result; 121 int len, result;
122 unsigned int sg_idx = 0, sg_offset = 0; 122 unsigned int sg_offset = 0;
123 struct scatterlist *sg = NULL;
123 124
124 // we're working in LBA mode. according to the ATA spec, 125 // we're working in LBA mode. according to the ATA spec,
125 // we can support up to 28-bit addressing. I don't know if Jumpshot 126 // we can support up to 28-bit addressing. I don't know if Jumpshot
@@ -170,7 +171,7 @@ static int jumpshot_read_data(struct us_data *us,
170 171
171 // Store the data in the transfer buffer 172 // Store the data in the transfer buffer
172 usb_stor_access_xfer_buf(buffer, len, us->srb, 173 usb_stor_access_xfer_buf(buffer, len, us->srb,
173 &sg_idx, &sg_offset, TO_XFER_BUF); 174 &sg, &sg_offset, TO_XFER_BUF);
174 175
175 sector += thistime; 176 sector += thistime;
176 totallen -= len; 177 totallen -= len;
@@ -195,7 +196,8 @@ static int jumpshot_write_data(struct us_data *us,
195 unsigned char thistime; 196 unsigned char thistime;
196 unsigned int totallen, alloclen; 197 unsigned int totallen, alloclen;
197 int len, result, waitcount; 198 int len, result, waitcount;
198 unsigned int sg_idx = 0, sg_offset = 0; 199 unsigned int sg_offset = 0;
200 struct scatterlist *sg = NULL;
199 201
200 // we're working in LBA mode. according to the ATA spec, 202 // we're working in LBA mode. according to the ATA spec,
201 // we can support up to 28-bit addressing. I don't know if Jumpshot 203 // we can support up to 28-bit addressing. I don't know if Jumpshot
@@ -225,7 +227,7 @@ static int jumpshot_write_data(struct us_data *us,
225 227
226 // Get the data from the transfer buffer 228 // Get the data from the transfer buffer
227 usb_stor_access_xfer_buf(buffer, len, us->srb, 229 usb_stor_access_xfer_buf(buffer, len, us->srb,
228 &sg_idx, &sg_offset, FROM_XFER_BUF); 230 &sg, &sg_offset, FROM_XFER_BUF);
229 231
230 command[0] = 0; 232 command[0] = 0;
231 command[1] = thistime; 233 command[1] = thistime;
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 9ad30428d2dd..cc8f7c52c729 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -157,7 +157,7 @@ void usb_stor_transparent_scsi_command(struct scsi_cmnd *srb,
157 * pick up from where this one left off. */ 157 * pick up from where this one left off. */
158 158
159unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, 159unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
160 unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index, 160 unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **sgptr,
161 unsigned int *offset, enum xfer_buf_dir dir) 161 unsigned int *offset, enum xfer_buf_dir dir)
162{ 162{
163 unsigned int cnt; 163 unsigned int cnt;
@@ -184,16 +184,17 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
184 * located in high memory -- then kmap() will map it to a temporary 184 * located in high memory -- then kmap() will map it to a temporary
185 * position in the kernel's virtual address space. */ 185 * position in the kernel's virtual address space. */
186 } else { 186 } else {
187 struct scatterlist *sg = 187 struct scatterlist *sg = *sgptr;
188 (struct scatterlist *) srb->request_buffer 188
189 + *index; 189 if (!sg)
190 sg = (struct scatterlist *) srb->request_buffer;
190 191
191 /* This loop handles a single s-g list entry, which may 192 /* This loop handles a single s-g list entry, which may
192 * include multiple pages. Find the initial page structure 193 * include multiple pages. Find the initial page structure
193 * and the starting offset within the page, and update 194 * and the starting offset within the page, and update
194 * the *offset and *index values for the next loop. */ 195 * the *offset and *index values for the next loop. */
195 cnt = 0; 196 cnt = 0;
196 while (cnt < buflen && *index < srb->use_sg) { 197 while (cnt < buflen) {
197 struct page *page = sg->page + 198 struct page *page = sg->page +
198 ((sg->offset + *offset) >> PAGE_SHIFT); 199 ((sg->offset + *offset) >> PAGE_SHIFT);
199 unsigned int poff = 200 unsigned int poff =
@@ -209,8 +210,7 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
209 210
210 /* Transfer continues to next s-g entry */ 211 /* Transfer continues to next s-g entry */
211 *offset = 0; 212 *offset = 0;
212 ++*index; 213 sg = sg_next(sg);
213 ++sg;
214 } 214 }
215 215
216 /* Transfer the data for all the pages in this 216 /* Transfer the data for all the pages in this
@@ -234,6 +234,7 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
234 sglen -= plen; 234 sglen -= plen;
235 } 235 }
236 } 236 }
237 *sgptr = sg;
237 } 238 }
238 239
239 /* Return the amount actually transferred */ 240 /* Return the amount actually transferred */
@@ -245,9 +246,10 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
245void usb_stor_set_xfer_buf(unsigned char *buffer, 246void usb_stor_set_xfer_buf(unsigned char *buffer,
246 unsigned int buflen, struct scsi_cmnd *srb) 247 unsigned int buflen, struct scsi_cmnd *srb)
247{ 248{
248 unsigned int index = 0, offset = 0; 249 unsigned int offset = 0;
250 struct scatterlist *sg = NULL;
249 251
250 usb_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset, 252 usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
251 TO_XFER_BUF); 253 TO_XFER_BUF);
252 if (buflen < srb->request_bufflen) 254 if (buflen < srb->request_bufflen)
253 srb->resid = srb->request_bufflen - buflen; 255 srb->resid = srb->request_bufflen - buflen;
diff --git a/drivers/usb/storage/protocol.h b/drivers/usb/storage/protocol.h
index 845bed4b8031..8737a36891ca 100644
--- a/drivers/usb/storage/protocol.h
+++ b/drivers/usb/storage/protocol.h
@@ -52,7 +52,7 @@ extern void usb_stor_transparent_scsi_command(struct scsi_cmnd*,
52enum xfer_buf_dir {TO_XFER_BUF, FROM_XFER_BUF}; 52enum xfer_buf_dir {TO_XFER_BUF, FROM_XFER_BUF};
53 53
54extern unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, 54extern unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
55 unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index, 55 unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **,
56 unsigned int *offset, enum xfer_buf_dir dir); 56 unsigned int *offset, enum xfer_buf_dir dir);
57 57
58extern void usb_stor_set_xfer_buf(unsigned char *buffer, 58extern void usb_stor_set_xfer_buf(unsigned char *buffer,
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index b2ed2a3e6fca..b12202c5da2d 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -705,7 +705,8 @@ sddr09_read_data(struct us_data *us,
705 unsigned char *buffer; 705 unsigned char *buffer;
706 unsigned int lba, maxlba, pba; 706 unsigned int lba, maxlba, pba;
707 unsigned int page, pages; 707 unsigned int page, pages;
708 unsigned int len, index, offset; 708 unsigned int len, offset;
709 struct scatterlist *sg;
709 int result; 710 int result;
710 711
711 // Figure out the initial LBA and page 712 // Figure out the initial LBA and page
@@ -730,7 +731,8 @@ sddr09_read_data(struct us_data *us,
730 // contiguous LBA's. Another exercise left to the student. 731 // contiguous LBA's. Another exercise left to the student.
731 732
732 result = 0; 733 result = 0;
733 index = offset = 0; 734 offset = 0;
735 sg = NULL;
734 736
735 while (sectors > 0) { 737 while (sectors > 0) {
736 738
@@ -777,7 +779,7 @@ sddr09_read_data(struct us_data *us,
777 779
778 // Store the data in the transfer buffer 780 // Store the data in the transfer buffer
779 usb_stor_access_xfer_buf(buffer, len, us->srb, 781 usb_stor_access_xfer_buf(buffer, len, us->srb,
780 &index, &offset, TO_XFER_BUF); 782 &sg, &offset, TO_XFER_BUF);
781 783
782 page = 0; 784 page = 0;
783 lba++; 785 lba++;
@@ -931,7 +933,8 @@ sddr09_write_data(struct us_data *us,
931 unsigned int pagelen, blocklen; 933 unsigned int pagelen, blocklen;
932 unsigned char *blockbuffer; 934 unsigned char *blockbuffer;
933 unsigned char *buffer; 935 unsigned char *buffer;
934 unsigned int len, index, offset; 936 unsigned int len, offset;
937 struct scatterlist *sg;
935 int result; 938 int result;
936 939
937 // Figure out the initial LBA and page 940 // Figure out the initial LBA and page
@@ -968,7 +971,8 @@ sddr09_write_data(struct us_data *us,
968 } 971 }
969 972
970 result = 0; 973 result = 0;
971 index = offset = 0; 974 offset = 0;
975 sg = NULL;
972 976
973 while (sectors > 0) { 977 while (sectors > 0) {
974 978
@@ -987,7 +991,7 @@ sddr09_write_data(struct us_data *us,
987 991
988 // Get the data from the transfer buffer 992 // Get the data from the transfer buffer
989 usb_stor_access_xfer_buf(buffer, len, us->srb, 993 usb_stor_access_xfer_buf(buffer, len, us->srb,
990 &index, &offset, FROM_XFER_BUF); 994 &sg, &offset, FROM_XFER_BUF);
991 995
992 result = sddr09_write_lba(us, lba, page, pages, 996 result = sddr09_write_lba(us, lba, page, pages,
993 buffer, blockbuffer); 997 buffer, blockbuffer);
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index 0b1b5b59ca7b..d43a3415e12f 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -167,7 +167,8 @@ static int sddr55_read_data(struct us_data *us,
167 unsigned long address; 167 unsigned long address;
168 168
169 unsigned short pages; 169 unsigned short pages;
170 unsigned int len, index, offset; 170 unsigned int len, offset;
171 struct scatterlist *sg;
171 172
172 // Since we only read in one block at a time, we have to create 173 // Since we only read in one block at a time, we have to create
173 // a bounce buffer and move the data a piece at a time between the 174 // a bounce buffer and move the data a piece at a time between the
@@ -178,7 +179,8 @@ static int sddr55_read_data(struct us_data *us,
178 buffer = kmalloc(len, GFP_NOIO); 179 buffer = kmalloc(len, GFP_NOIO);
179 if (buffer == NULL) 180 if (buffer == NULL)
180 return USB_STOR_TRANSPORT_ERROR; /* out of memory */ 181 return USB_STOR_TRANSPORT_ERROR; /* out of memory */
181 index = offset = 0; 182 offset = 0;
183 sg = NULL;
182 184
183 while (sectors>0) { 185 while (sectors>0) {
184 186
@@ -255,7 +257,7 @@ static int sddr55_read_data(struct us_data *us,
255 257
256 // Store the data in the transfer buffer 258 // Store the data in the transfer buffer
257 usb_stor_access_xfer_buf(buffer, len, us->srb, 259 usb_stor_access_xfer_buf(buffer, len, us->srb,
258 &index, &offset, TO_XFER_BUF); 260 &sg, &offset, TO_XFER_BUF);
259 261
260 page = 0; 262 page = 0;
261 lba++; 263 lba++;
@@ -287,7 +289,8 @@ static int sddr55_write_data(struct us_data *us,
287 289
288 unsigned short pages; 290 unsigned short pages;
289 int i; 291 int i;
290 unsigned int len, index, offset; 292 unsigned int len, offset;
293 struct scatterlist *sg;
291 294
292 /* check if we are allowed to write */ 295 /* check if we are allowed to write */
293 if (info->read_only || info->force_read_only) { 296 if (info->read_only || info->force_read_only) {
@@ -304,7 +307,8 @@ static int sddr55_write_data(struct us_data *us,
304 buffer = kmalloc(len, GFP_NOIO); 307 buffer = kmalloc(len, GFP_NOIO);
305 if (buffer == NULL) 308 if (buffer == NULL)
306 return USB_STOR_TRANSPORT_ERROR; 309 return USB_STOR_TRANSPORT_ERROR;
307 index = offset = 0; 310 offset = 0;
311 sg = NULL;
308 312
309 while (sectors > 0) { 313 while (sectors > 0) {
310 314
@@ -322,7 +326,7 @@ static int sddr55_write_data(struct us_data *us,
322 326
323 // Get the data from the transfer buffer 327 // Get the data from the transfer buffer
324 usb_stor_access_xfer_buf(buffer, len, us->srb, 328 usb_stor_access_xfer_buf(buffer, len, us->srb,
325 &index, &offset, FROM_XFER_BUF); 329 &sg, &offset, FROM_XFER_BUF);
326 330
327 US_DEBUGP("Write %02X pages, to PBA %04X" 331 US_DEBUGP("Write %02X pages, to PBA %04X"
328 " (LBA %04X) page %02X\n", 332 " (LBA %04X) page %02X\n",
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 17ca4d73577b..cb22a9ad1694 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -993,7 +993,8 @@ static int usbat_flash_read_data(struct us_data *us,
993 unsigned char thistime; 993 unsigned char thistime;
994 unsigned int totallen, alloclen; 994 unsigned int totallen, alloclen;
995 int len, result; 995 int len, result;
996 unsigned int sg_idx = 0, sg_offset = 0; 996 unsigned int sg_offset = 0;
997 struct scatterlist *sg = NULL;
997 998
998 result = usbat_flash_check_media(us, info); 999 result = usbat_flash_check_media(us, info);
999 if (result != USB_STOR_TRANSPORT_GOOD) 1000 if (result != USB_STOR_TRANSPORT_GOOD)
@@ -1047,7 +1048,7 @@ static int usbat_flash_read_data(struct us_data *us,
1047 1048
1048 /* Store the data in the transfer buffer */ 1049 /* Store the data in the transfer buffer */
1049 usb_stor_access_xfer_buf(buffer, len, us->srb, 1050 usb_stor_access_xfer_buf(buffer, len, us->srb,
1050 &sg_idx, &sg_offset, TO_XFER_BUF); 1051 &sg, &sg_offset, TO_XFER_BUF);
1051 1052
1052 sector += thistime; 1053 sector += thistime;
1053 totallen -= len; 1054 totallen -= len;
@@ -1083,7 +1084,8 @@ static int usbat_flash_write_data(struct us_data *us,
1083 unsigned char thistime; 1084 unsigned char thistime;
1084 unsigned int totallen, alloclen; 1085 unsigned int totallen, alloclen;
1085 int len, result; 1086 int len, result;
1086 unsigned int sg_idx = 0, sg_offset = 0; 1087 unsigned int sg_offset = 0;
1088 struct scatterlist *sg = NULL;
1087 1089
1088 result = usbat_flash_check_media(us, info); 1090 result = usbat_flash_check_media(us, info);
1089 if (result != USB_STOR_TRANSPORT_GOOD) 1091 if (result != USB_STOR_TRANSPORT_GOOD)
@@ -1122,7 +1124,7 @@ static int usbat_flash_write_data(struct us_data *us,
1122 1124
1123 /* Get the data from the transfer buffer */ 1125 /* Get the data from the transfer buffer */
1124 usb_stor_access_xfer_buf(buffer, len, us->srb, 1126 usb_stor_access_xfer_buf(buffer, len, us->srb,
1125 &sg_idx, &sg_offset, FROM_XFER_BUF); 1127 &sg, &sg_offset, FROM_XFER_BUF);
1126 1128
1127 /* ATA command 0x30 (WRITE SECTORS) */ 1129 /* ATA command 0x30 (WRITE SECTORS) */
1128 usbat_pack_ata_sector_cmd(command, thistime, sector, 0x30); 1130 usbat_pack_ata_sector_cmd(command, thistime, sector, 0x30);
@@ -1162,8 +1164,8 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
1162 unsigned char *buffer; 1164 unsigned char *buffer;
1163 unsigned int len; 1165 unsigned int len;
1164 unsigned int sector; 1166 unsigned int sector;
1165 unsigned int sg_segment = 0;
1166 unsigned int sg_offset = 0; 1167 unsigned int sg_offset = 0;
1168 struct scatterlist *sg = NULL;
1167 1169
1168 US_DEBUGP("handle_read10: transfersize %d\n", 1170 US_DEBUGP("handle_read10: transfersize %d\n",
1169 srb->transfersize); 1171 srb->transfersize);
@@ -1220,9 +1222,6 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
1220 sector |= short_pack(data[7+5], data[7+4]); 1222 sector |= short_pack(data[7+5], data[7+4]);
1221 transferred = 0; 1223 transferred = 0;
1222 1224
1223 sg_segment = 0; /* for keeping track of where we are in */
1224 sg_offset = 0; /* the scatter/gather list */
1225
1226 while (transferred != srb->request_bufflen) { 1225 while (transferred != srb->request_bufflen) {
1227 1226
1228 if (len > srb->request_bufflen - transferred) 1227 if (len > srb->request_bufflen - transferred)
@@ -1255,7 +1254,7 @@ static int usbat_hp8200e_handle_read10(struct us_data *us,
1255 1254
1256 /* Store the data in the transfer buffer */ 1255 /* Store the data in the transfer buffer */
1257 usb_stor_access_xfer_buf(buffer, len, srb, 1256 usb_stor_access_xfer_buf(buffer, len, srb,
1258 &sg_segment, &sg_offset, TO_XFER_BUF); 1257 &sg, &sg_offset, TO_XFER_BUF);
1259 1258
1260 /* Update the amount transferred and the sector number */ 1259 /* Update the amount transferred and the sector number */
1261 1260
diff --git a/fs/bio.c b/fs/bio.c
index 5f604f269dfa..d59ddbf79626 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -109,11 +109,14 @@ static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned lon
109 109
110void bio_free(struct bio *bio, struct bio_set *bio_set) 110void bio_free(struct bio *bio, struct bio_set *bio_set)
111{ 111{
112 const int pool_idx = BIO_POOL_IDX(bio); 112 if (bio->bi_io_vec) {
113 const int pool_idx = BIO_POOL_IDX(bio);
113 114
114 BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); 115 BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
116
117 mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
118 }
115 119
116 mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
117 mempool_free(bio, bio_set->bio_pool); 120 mempool_free(bio, bio_set->bio_pool);
118} 121}
119 122
@@ -127,21 +130,9 @@ static void bio_fs_destructor(struct bio *bio)
127 130
128void bio_init(struct bio *bio) 131void bio_init(struct bio *bio)
129{ 132{
130 bio->bi_next = NULL; 133 memset(bio, 0, sizeof(*bio));
131 bio->bi_bdev = NULL;
132 bio->bi_flags = 1 << BIO_UPTODATE; 134 bio->bi_flags = 1 << BIO_UPTODATE;
133 bio->bi_rw = 0;
134 bio->bi_vcnt = 0;
135 bio->bi_idx = 0;
136 bio->bi_phys_segments = 0;
137 bio->bi_hw_segments = 0;
138 bio->bi_hw_front_size = 0;
139 bio->bi_hw_back_size = 0;
140 bio->bi_size = 0;
141 bio->bi_max_vecs = 0;
142 bio->bi_end_io = NULL;
143 atomic_set(&bio->bi_cnt, 1); 135 atomic_set(&bio->bi_cnt, 1);
144 bio->bi_private = NULL;
145} 136}
146 137
147/** 138/**
diff --git a/fs/splice.c b/fs/splice.c
index a7568bcc0f99..59a941d404d9 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1335,10 +1335,10 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1335 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len)) 1335 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
1336 ret = -EFAULT; 1336 ret = -EFAULT;
1337 1337
1338 buf->ops->unmap(pipe, buf, src);
1338out: 1339out:
1339 if (ret > 0) 1340 if (ret > 0)
1340 sd->u.userptr += ret; 1341 sd->u.userptr += ret;
1341 buf->ops->unmap(pipe, buf, src);
1342 return ret; 1342 return ret;
1343} 1343}
1344 1344
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
index 3ca6d5c14b2e..f1735a22d0ea 100644
--- a/include/asm-ia64/dma-mapping.h
+++ b/include/asm-ia64/dma-mapping.h
@@ -6,7 +6,7 @@
6 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */ 7 */
8#include <asm/machvec.h> 8#include <asm/machvec.h>
9#include <asm/scatterlist.h> 9#include <linux/scatterlist.h>
10 10
11#define dma_alloc_coherent platform_dma_alloc_coherent 11#define dma_alloc_coherent platform_dma_alloc_coherent
12/* coherent mem. is cheap */ 12/* coherent mem. is cheap */
diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h
index a452ea24205a..7d5234d50312 100644
--- a/include/asm-ia64/scatterlist.h
+++ b/include/asm-ia64/scatterlist.h
@@ -30,4 +30,6 @@ struct scatterlist {
30#define sg_dma_len(sg) ((sg)->dma_length) 30#define sg_dma_len(sg) ((sg)->dma_length)
31#define sg_dma_address(sg) ((sg)->dma_address) 31#define sg_dma_address(sg) ((sg)->dma_address)
32 32
33#define ARCH_HAS_SG_CHAIN
34
33#endif /* _ASM_IA64_SCATTERLIST_H */ 35#endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index d05891608f74..2af321f36aba 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -6,149 +6,6 @@
6 */ 6 */
7#ifndef _ASM_DMA_MAPPING_H 7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H 8#define _ASM_DMA_MAPPING_H
9#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <asm/scatterlist.h>
16#include <asm/io.h>
17
18#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
19
20#ifdef CONFIG_NOT_COHERENT_CACHE
21/*
22 * DMA-consistent mapping functions for PowerPCs that don't support
23 * cache snooping. These allocate/free a region of uncached mapped
24 * memory space for use with DMA devices. Alternatively, you could
25 * allocate the space "normally" and use the cache management functions
26 * to ensure it is consistent.
27 */
28extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
29extern void __dma_free_coherent(size_t size, void *vaddr);
30extern void __dma_sync(void *vaddr, size_t size, int direction);
31extern void __dma_sync_page(struct page *page, unsigned long offset,
32 size_t size, int direction);
33
34#else /* ! CONFIG_NOT_COHERENT_CACHE */
35/*
36 * Cache coherent cores.
37 */
38
39#define __dma_alloc_coherent(gfp, size, handle) NULL
40#define __dma_free_coherent(size, addr) ((void)0)
41#define __dma_sync(addr, size, rw) ((void)0)
42#define __dma_sync_page(pg, off, sz, rw) ((void)0)
43
44#endif /* ! CONFIG_NOT_COHERENT_CACHE */
45
46#ifdef CONFIG_PPC64
47/*
48 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
49 */
50struct dma_mapping_ops {
51 void * (*alloc_coherent)(struct device *dev, size_t size,
52 dma_addr_t *dma_handle, gfp_t flag);
53 void (*free_coherent)(struct device *dev, size_t size,
54 void *vaddr, dma_addr_t dma_handle);
55 dma_addr_t (*map_single)(struct device *dev, void *ptr,
56 size_t size, enum dma_data_direction direction);
57 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
58 size_t size, enum dma_data_direction direction);
59 int (*map_sg)(struct device *dev, struct scatterlist *sg,
60 int nents, enum dma_data_direction direction);
61 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
62 int nents, enum dma_data_direction direction);
63 int (*dma_supported)(struct device *dev, u64 mask);
64 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
65};
66
67static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
68{
69 /* We don't handle the NULL dev case for ISA for now. We could
70 * do it via an out of line call but it is not needed for now. The
71 * only ISA DMA device we support is the floppy and we have a hack
72 * in the floppy driver directly to get a device for us.
73 */
74 if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
75 return NULL;
76 return dev->archdata.dma_ops;
77}
78
79static inline int dma_supported(struct device *dev, u64 mask)
80{
81 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
82
83 if (unlikely(dma_ops == NULL))
84 return 0;
85 if (dma_ops->dma_supported == NULL)
86 return 1;
87 return dma_ops->dma_supported(dev, mask);
88}
89
90static inline int dma_set_mask(struct device *dev, u64 dma_mask)
91{
92 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
93
94 if (unlikely(dma_ops == NULL))
95 return -EIO;
96 if (dma_ops->set_dma_mask != NULL)
97 return dma_ops->set_dma_mask(dev, dma_mask);
98 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
99 return -EIO;
100 *dev->dma_mask = dma_mask;
101 return 0;
102}
103
104static inline void *dma_alloc_coherent(struct device *dev, size_t size,
105 dma_addr_t *dma_handle, gfp_t flag)
106{
107 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
108
109 BUG_ON(!dma_ops);
110 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
111}
112
113static inline void dma_free_coherent(struct device *dev, size_t size,
114 void *cpu_addr, dma_addr_t dma_handle)
115{
116 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
117
118 BUG_ON(!dma_ops);
119 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
120}
121
122static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
123 size_t size,
124 enum dma_data_direction direction)
125{
126 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
127
128 BUG_ON(!dma_ops);
129 return dma_ops->map_single(dev, cpu_addr, size, direction);
130}
131
132static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
133 size_t size,
134 enum dma_data_direction direction)
135{
136 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
137
138 BUG_ON(!dma_ops);
139 dma_ops->unmap_single(dev, dma_addr, size, direction);
140}
141
142static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
143 unsigned long offset, size_t size,
144 enum dma_data_direction direction)
145{
146 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
147
148 BUG_ON(!dma_ops);
149 return dma_ops->map_single(dev, page_address(page) + offset, size,
150 direction);
151}
152 9
153static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 10static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
154 size_t size, 11 size_t size,
@@ -276,14 +133,15 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
276} 133}
277 134
278static inline int 135static inline int
279dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 136dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
280 enum dma_data_direction direction) 137 enum dma_data_direction direction)
281{ 138{
139 struct scatterlist *sg;
282 int i; 140 int i;
283 141
284 BUG_ON(direction == DMA_NONE); 142 BUG_ON(direction == DMA_NONE);
285 143
286 for (i = 0; i < nents; i++, sg++) { 144 for_each_sg(sgl, sg, nents, i) {
287 BUG_ON(!sg->page); 145 BUG_ON(!sg->page);
288 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 146 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
289 sg->dma_address = page_to_bus(sg->page) + sg->offset; 147 sg->dma_address = page_to_bus(sg->page) + sg->offset;
@@ -318,26 +176,28 @@ static inline void dma_sync_single_for_device(struct device *dev,
318} 176}
319 177
320static inline void dma_sync_sg_for_cpu(struct device *dev, 178static inline void dma_sync_sg_for_cpu(struct device *dev,
321 struct scatterlist *sg, int nents, 179 struct scatterlist *sgl, int nents,
322 enum dma_data_direction direction) 180 enum dma_data_direction direction)
323{ 181{
182 struct scatterlist *sg;
324 int i; 183 int i;
325 184
326 BUG_ON(direction == DMA_NONE); 185 BUG_ON(direction == DMA_NONE);
327 186
328 for (i = 0; i < nents; i++, sg++) 187 for_each_sg(sgl, sg, nents, i)
329 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 188 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
330} 189}
331 190
332static inline void dma_sync_sg_for_device(struct device *dev, 191static inline void dma_sync_sg_for_device(struct device *dev,
333 struct scatterlist *sg, int nents, 192 struct scatterlist *sgl, int nents,
334 enum dma_data_direction direction) 193 enum dma_data_direction direction)
335{ 194{
195 struct scatterlist *sg;
336 int i; 196 int i;
337 197
338 BUG_ON(direction == DMA_NONE); 198 BUG_ON(direction == DMA_NONE);
339 199
340 for (i = 0; i < nents; i++, sg++) 200 for_each_sg(sgl, sg, nents, i)
341 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 201 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
342} 202}
343 203
diff --git a/include/asm-powerpc/scatterlist.h b/include/asm-powerpc/scatterlist.h
index 8c992d1491d4..b075f619c3b7 100644
--- a/include/asm-powerpc/scatterlist.h
+++ b/include/asm-powerpc/scatterlist.h
@@ -41,5 +41,7 @@ struct scatterlist {
41#define ISA_DMA_THRESHOLD (~0UL) 41#define ISA_DMA_THRESHOLD (~0UL)
42#endif 42#endif
43 43
44#define ARCH_HAS_SG_CHAIN
45
44#endif /* __KERNEL__ */ 46#endif /* __KERNEL__ */
45#endif /* _ASM_POWERPC_SCATTERLIST_H */ 47#endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/include/asm-sparc/scatterlist.h b/include/asm-sparc/scatterlist.h
index a4fcf9ac9649..4055af90ad7e 100644
--- a/include/asm-sparc/scatterlist.h
+++ b/include/asm-sparc/scatterlist.h
@@ -19,4 +19,6 @@ struct scatterlist {
19 19
20#define ISA_DMA_THRESHOLD (~0UL) 20#define ISA_DMA_THRESHOLD (~0UL)
21 21
22#define ARCH_HAS_SG_CHAIN
23
22#endif /* !(_SPARC_SCATTERLIST_H) */ 24#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/include/asm-sparc64/scatterlist.h b/include/asm-sparc64/scatterlist.h
index 048fdb40e81d..703c5bbe6c8c 100644
--- a/include/asm-sparc64/scatterlist.h
+++ b/include/asm-sparc64/scatterlist.h
@@ -20,4 +20,6 @@ struct scatterlist {
20 20
21#define ISA_DMA_THRESHOLD (~0UL) 21#define ISA_DMA_THRESHOLD (~0UL)
22 22
23#define ARCH_HAS_SG_CHAIN
24
23#endif /* !(_SPARC64_SCATTERLIST_H) */ 25#endif /* !(_SPARC64_SCATTERLIST_H) */
diff --git a/include/asm-x86/dma-mapping_32.h b/include/asm-x86/dma-mapping_32.h
index f1d72d177f68..6a2d26cb5da6 100644
--- a/include/asm-x86/dma-mapping_32.h
+++ b/include/asm-x86/dma-mapping_32.h
@@ -2,10 +2,10 @@
2#define _ASM_I386_DMA_MAPPING_H 2#define _ASM_I386_DMA_MAPPING_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/scatterlist.h>
5 6
6#include <asm/cache.h> 7#include <asm/cache.h>
7#include <asm/io.h> 8#include <asm/io.h>
8#include <asm/scatterlist.h>
9#include <asm/bug.h> 9#include <asm/bug.h>
10 10
11#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 11#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
@@ -35,18 +35,19 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
35} 35}
36 36
37static inline int 37static inline int
38dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 38dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
39 enum dma_data_direction direction) 39 enum dma_data_direction direction)
40{ 40{
41 struct scatterlist *sg;
41 int i; 42 int i;
42 43
43 BUG_ON(!valid_dma_direction(direction)); 44 BUG_ON(!valid_dma_direction(direction));
44 WARN_ON(nents == 0 || sg[0].length == 0); 45 WARN_ON(nents == 0 || sglist[0].length == 0);
45 46
46 for (i = 0; i < nents; i++ ) { 47 for_each_sg(sglist, sg, nents, i) {
47 BUG_ON(!sg[i].page); 48 BUG_ON(!sg->page);
48 49
49 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 50 sg->dma_address = page_to_phys(sg->page) + sg->offset;
50 } 51 }
51 52
52 flush_write_buffers(); 53 flush_write_buffers();
diff --git a/include/asm-x86/dma-mapping_64.h b/include/asm-x86/dma-mapping_64.h
index 6897e2a436e5..ecd0f6125ba3 100644
--- a/include/asm-x86/dma-mapping_64.h
+++ b/include/asm-x86/dma-mapping_64.h
@@ -6,8 +6,7 @@
6 * documentation. 6 * documentation.
7 */ 7 */
8 8
9 9#include <linux/scatterlist.h>
10#include <asm/scatterlist.h>
11#include <asm/io.h> 10#include <asm/io.h>
12#include <asm/swiotlb.h> 11#include <asm/swiotlb.h>
13 12
diff --git a/include/asm-x86/scatterlist_32.h b/include/asm-x86/scatterlist_32.h
index d7e45a8f1aae..bd5164aa8f63 100644
--- a/include/asm-x86/scatterlist_32.h
+++ b/include/asm-x86/scatterlist_32.h
@@ -10,6 +10,8 @@ struct scatterlist {
10 unsigned int length; 10 unsigned int length;
11}; 11};
12 12
13#define ARCH_HAS_SG_CHAIN
14
13/* These macros should be used after a pci_map_sg call has been done 15/* These macros should be used after a pci_map_sg call has been done
14 * to get bus addresses of each of the SG entries and their lengths. 16 * to get bus addresses of each of the SG entries and their lengths.
15 * You should only work with the number of sg entries pci_map_sg 17 * You should only work with the number of sg entries pci_map_sg
diff --git a/include/asm-x86/scatterlist_64.h b/include/asm-x86/scatterlist_64.h
index eaf7ada27e14..ef3986ba4b79 100644
--- a/include/asm-x86/scatterlist_64.h
+++ b/include/asm-x86/scatterlist_64.h
@@ -11,6 +11,8 @@ struct scatterlist {
11 unsigned int dma_length; 11 unsigned int dma_length;
12}; 12};
13 13
14#define ARCH_HAS_SG_CHAIN
15
14#define ISA_DMA_THRESHOLD (0x00ffffff) 16#define ISA_DMA_THRESHOLD (0x00ffffff)
15 17
16/* These macros should be used after a pci_map_sg call has been done 18/* These macros should be used after a pci_map_sg call has been done
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 089a8bc55dd4..4da441337d6e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -176,13 +176,28 @@ struct bio {
176#define bio_offset(bio) bio_iovec((bio))->bv_offset 176#define bio_offset(bio) bio_iovec((bio))->bv_offset
177#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 177#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
178#define bio_sectors(bio) ((bio)->bi_size >> 9) 178#define bio_sectors(bio) ((bio)->bi_size >> 9)
179#define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
180#define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
181#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER)) 179#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
182#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC)) 180#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
183#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) 181#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
184#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) 182#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
185#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) 183#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
184#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size)
185
186static inline unsigned int bio_cur_sectors(struct bio *bio)
187{
188 if (bio->bi_vcnt)
189 return bio_iovec(bio)->bv_len >> 9;
190
191 return 0;
192}
193
194static inline void *bio_data(struct bio *bio)
195{
196 if (bio->bi_vcnt)
197 return page_address(bio_page(bio)) + bio_offset(bio);
198
199 return NULL;
200}
186 201
187/* 202/*
188 * will die 203 * will die
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5ed888b04b29..bbf906a0b419 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -330,7 +330,6 @@ typedef void (unplug_fn) (struct request_queue *);
330 330
331struct bio_vec; 331struct bio_vec;
332typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); 332typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
333typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
334typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 333typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
335typedef void (softirq_done_fn)(struct request *); 334typedef void (softirq_done_fn)(struct request *);
336 335
@@ -368,7 +367,6 @@ struct request_queue
368 prep_rq_fn *prep_rq_fn; 367 prep_rq_fn *prep_rq_fn;
369 unplug_fn *unplug_fn; 368 unplug_fn *unplug_fn;
370 merge_bvec_fn *merge_bvec_fn; 369 merge_bvec_fn *merge_bvec_fn;
371 issue_flush_fn *issue_flush_fn;
372 prepare_flush_fn *prepare_flush_fn; 370 prepare_flush_fn *prepare_flush_fn;
373 softirq_done_fn *softirq_done_fn; 371 softirq_done_fn *softirq_done_fn;
374 372
@@ -540,6 +538,7 @@ enum {
540#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 538#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
541#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 539#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
542#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 540#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
541#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
543 542
544#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 543#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
545 544
@@ -729,7 +728,9 @@ static inline void blk_run_address_space(struct address_space *mapping)
729extern int end_that_request_first(struct request *, int, int); 728extern int end_that_request_first(struct request *, int, int);
730extern int end_that_request_chunk(struct request *, int, int); 729extern int end_that_request_chunk(struct request *, int, int);
731extern void end_that_request_last(struct request *, int); 730extern void end_that_request_last(struct request *, int);
732extern void end_request(struct request *req, int uptodate); 731extern void end_request(struct request *, int);
732extern void end_queued_request(struct request *, int);
733extern void end_dequeued_request(struct request *, int);
733extern void blk_complete_request(struct request *); 734extern void blk_complete_request(struct request *);
734 735
735/* 736/*
@@ -767,7 +768,6 @@ extern void blk_queue_dma_alignment(struct request_queue *, int);
767extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 768extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
768extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 769extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
769extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 770extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
770extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
771extern int blk_do_ordered(struct request_queue *, struct request **); 771extern int blk_do_ordered(struct request_queue *, struct request **);
772extern unsigned blk_ordered_cur_seq(struct request_queue *); 772extern unsigned blk_ordered_cur_seq(struct request_queue *);
773extern unsigned blk_ordered_req_seq(struct request *); 773extern unsigned blk_ordered_req_seq(struct request *);
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 9752307d16ba..7da5b98d90e6 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -32,6 +32,7 @@
32#include <linux/workqueue.h> /* work_struct */ 32#include <linux/workqueue.h> /* work_struct */
33#include <linux/mempool.h> 33#include <linux/mempool.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/scatterlist.h>
35 36
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/semaphore.h> /* Needed for MUTEX init macros */ 38#include <asm/semaphore.h> /* Needed for MUTEX init macros */
@@ -837,7 +838,7 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c,
837 if ((sizeof(dma_addr_t) > 4) && c->pae_support) 838 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
838 *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); 839 *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
839#endif 840#endif
840 sg++; 841 sg = sg_next(sg);
841 } 842 }
842 *sg_ptr = mptr; 843 *sg_ptr = mptr;
843 844
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 02a27e8cbad2..30a1931466a6 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -772,7 +772,7 @@ typedef struct hwif_s {
772 772
773 unsigned int nsect; 773 unsigned int nsect;
774 unsigned int nleft; 774 unsigned int nleft;
775 unsigned int cursg; 775 struct scatterlist *cursg;
776 unsigned int cursg_ofs; 776 unsigned int cursg_ofs;
777 777
778 int rqsize; /* max sectors per request */ 778 int rqsize; /* max sectors per request */
@@ -1093,11 +1093,6 @@ extern ide_startstop_t ide_do_reset (ide_drive_t *);
1093extern void ide_init_drive_cmd (struct request *rq); 1093extern void ide_init_drive_cmd (struct request *rq);
1094 1094
1095/* 1095/*
1096 * this function returns error location sector offset in case of a write error
1097 */
1098extern u64 ide_get_error_location(ide_drive_t *, char *);
1099
1100/*
1101 * "action" parameter type for ide_do_drive_cmd() below. 1096 * "action" parameter type for ide_do_drive_cmd() below.
1102 */ 1097 */
1103typedef enum { 1098typedef enum {
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 229a9ff9f924..377e6d4d9be3 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -29,7 +29,7 @@
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <asm/scatterlist.h> 32#include <linux/scatterlist.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/ata.h> 34#include <linux/ata.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
@@ -416,6 +416,7 @@ struct ata_queued_cmd {
416 unsigned long flags; /* ATA_QCFLAG_xxx */ 416 unsigned long flags; /* ATA_QCFLAG_xxx */
417 unsigned int tag; 417 unsigned int tag;
418 unsigned int n_elem; 418 unsigned int n_elem;
419 unsigned int n_iter;
419 unsigned int orig_n_elem; 420 unsigned int orig_n_elem;
420 421
421 int dma_dir; 422 int dma_dir;
@@ -426,7 +427,7 @@ struct ata_queued_cmd {
426 unsigned int nbytes; 427 unsigned int nbytes;
427 unsigned int curbytes; 428 unsigned int curbytes;
428 429
429 unsigned int cursg; 430 struct scatterlist *cursg;
430 unsigned int cursg_ofs; 431 unsigned int cursg_ofs;
431 432
432 struct scatterlist sgent; 433 struct scatterlist sgent;
@@ -1043,7 +1044,7 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
1043 return 1; 1044 return 1;
1044 if (qc->pad_len) 1045 if (qc->pad_len)
1045 return 0; 1046 return 0;
1046 if (((sg - qc->__sg) + 1) == qc->n_elem) 1047 if (qc->n_iter == qc->n_elem)
1047 return 1; 1048 return 1;
1048 return 0; 1049 return 0;
1049} 1050}
@@ -1051,6 +1052,7 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
1051static inline struct scatterlist * 1052static inline struct scatterlist *
1052ata_qc_first_sg(struct ata_queued_cmd *qc) 1053ata_qc_first_sg(struct ata_queued_cmd *qc)
1053{ 1054{
1055 qc->n_iter = 0;
1054 if (qc->n_elem) 1056 if (qc->n_elem)
1055 return qc->__sg; 1057 return qc->__sg;
1056 if (qc->pad_len) 1058 if (qc->pad_len)
@@ -1063,8 +1065,8 @@ ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
1063{ 1065{
1064 if (sg == &qc->pad_sgent) 1066 if (sg == &qc->pad_sgent)
1065 return NULL; 1067 return NULL;
1066 if (++sg - qc->__sg < qc->n_elem) 1068 if (++qc->n_iter < qc->n_elem)
1067 return sg; 1069 return sg_next(sg);
1068 if (qc->pad_len) 1070 if (qc->pad_len)
1069 return &qc->pad_sgent; 1071 return &qc->pad_sgent;
1070 return NULL; 1072 return NULL;
@@ -1309,9 +1311,11 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
1309 qc->dma_dir = DMA_NONE; 1311 qc->dma_dir = DMA_NONE;
1310 qc->__sg = NULL; 1312 qc->__sg = NULL;
1311 qc->flags = 0; 1313 qc->flags = 0;
1312 qc->cursg = qc->cursg_ofs = 0; 1314 qc->cursg = NULL;
1315 qc->cursg_ofs = 0;
1313 qc->nbytes = qc->curbytes = 0; 1316 qc->nbytes = qc->curbytes = 0;
1314 qc->n_elem = 0; 1317 qc->n_elem = 0;
1318 qc->n_iter = 0;
1315 qc->err_mask = 0; 1319 qc->err_mask = 0;
1316 qc->pad_len = 0; 1320 qc->pad_len = 0;
1317 qc->sect_size = ATA_SECT_SIZE; 1321 qc->sect_size = ATA_SECT_SIZE;
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 4efbd9c445f5..2dc7464cce52 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -20,4 +20,88 @@ static inline void sg_init_one(struct scatterlist *sg, const void *buf,
20 sg_set_buf(sg, buf, buflen); 20 sg_set_buf(sg, buf, buflen);
21} 21}
22 22
23/*
24 * We overload the LSB of the page pointer to indicate whether it's
25 * a valid sg entry, or whether it points to the start of a new scatterlist.
26 * Those low bits are there for everyone! (thanks mason :-)
27 */
28#define sg_is_chain(sg) ((unsigned long) (sg)->page & 0x01)
29#define sg_chain_ptr(sg) \
30 ((struct scatterlist *) ((unsigned long) (sg)->page & ~0x01))
31
32/**
33 * sg_next - return the next scatterlist entry in a list
34 * @sg: The current sg entry
35 *
36 * Usually the next entry will be @sg@ + 1, but if this sg element is part
37 * of a chained scatterlist, it could jump to the start of a new
38 * scatterlist array.
39 *
40 * Note that the caller must ensure that there are further entries after
41 * the current entry, this function will NOT return NULL for an end-of-list.
42 *
43 */
44static inline struct scatterlist *sg_next(struct scatterlist *sg)
45{
46 sg++;
47
48 if (unlikely(sg_is_chain(sg)))
49 sg = sg_chain_ptr(sg);
50
51 return sg;
52}
53
54/*
55 * Loop over each sg element, following the pointer to a new list if necessary
56 */
57#define for_each_sg(sglist, sg, nr, __i) \
58 for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
59
60/**
61 * sg_last - return the last scatterlist entry in a list
62 * @sgl: First entry in the scatterlist
63 * @nents: Number of entries in the scatterlist
64 *
65 * Should only be used casually, it (currently) scan the entire list
66 * to get the last entry.
67 *
68 * Note that the @sgl@ pointer passed in need not be the first one,
69 * the important bit is that @nents@ denotes the number of entries that
70 * exist from @sgl@.
71 *
72 */
73static inline struct scatterlist *sg_last(struct scatterlist *sgl,
74 unsigned int nents)
75{
76#ifndef ARCH_HAS_SG_CHAIN
77 struct scatterlist *ret = &sgl[nents - 1];
78#else
79 struct scatterlist *sg, *ret = NULL;
80 int i;
81
82 for_each_sg(sgl, sg, nents, i)
83 ret = sg;
84
85#endif
86 return ret;
87}
88
89/**
90 * sg_chain - Chain two sglists together
91 * @prv: First scatterlist
92 * @prv_nents: Number of entries in prv
93 * @sgl: Second scatterlist
94 *
95 * Links @prv@ and @sgl@ together, to form a longer scatterlist.
96 *
97 */
98static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
99 struct scatterlist *sgl)
100{
101#ifndef ARCH_HAS_SG_CHAIN
102 BUG();
103#endif
104 prv[prv_nents - 1].page = (struct page *) ((unsigned long) sgl | 0x01);
105}
106
23#endif /* _LINUX_SCATTERLIST_H */ 107#endif /* _LINUX_SCATTERLIST_H */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 9f8f80ab0c8b..702fcfeb37f1 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -11,13 +11,6 @@
11#include <linux/types.h> 11#include <linux/types.h>
12 12
13/* 13/*
14 * The maximum sg list length SCSI can cope with
15 * (currently must be a power of 2 between 32 and 256)
16 */
17#define SCSI_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
18
19
20/*
21 * SCSI command lengths 14 * SCSI command lengths
22 */ 15 */
23 16
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 65ab5145a09b..3f47e522a1ec 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -5,6 +5,7 @@
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/timer.h> 7#include <linux/timer.h>
8#include <linux/scatterlist.h>
8 9
9struct request; 10struct request;
10struct scatterlist; 11struct scatterlist;
@@ -68,7 +69,7 @@ struct scsi_cmnd {
68 69
69 /* These elements define the operation we ultimately want to perform */ 70 /* These elements define the operation we ultimately want to perform */
70 unsigned short use_sg; /* Number of pieces of scatter-gather */ 71 unsigned short use_sg; /* Number of pieces of scatter-gather */
71 unsigned short sglist_len; /* size of malloc'd scatter-gather list */ 72 unsigned short __use_sg;
72 73
73 unsigned underflow; /* Return error if less than 74 unsigned underflow; /* Return error if less than
74 this amount is transferred */ 75 this amount is transferred */
@@ -128,7 +129,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
128extern void scsi_kunmap_atomic_sg(void *virt); 129extern void scsi_kunmap_atomic_sg(void *virt);
129 130
130extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t); 131extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
131extern void scsi_free_sgtable(struct scatterlist *, int); 132extern void scsi_free_sgtable(struct scsi_cmnd *);
132 133
133extern int scsi_dma_map(struct scsi_cmnd *cmd); 134extern int scsi_dma_map(struct scsi_cmnd *cmd);
134extern void scsi_dma_unmap(struct scsi_cmnd *cmd); 135extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
@@ -148,6 +149,6 @@ static inline int scsi_get_resid(struct scsi_cmnd *cmd)
148} 149}
149 150
150#define scsi_for_each_sg(cmd, sg, nseg, __i) \ 151#define scsi_for_each_sg(cmd, sg, nseg, __i) \
151 for (__i = 0, sg = scsi_sglist(cmd); __i < (nseg); __i++, (sg)++) 152 for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
152 153
153#endif /* _SCSI_SCSI_CMND_H */ 154#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 7d210cd6c38d..0fd4746ee39d 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -39,6 +39,9 @@ struct blk_queue_tags;
39#define DISABLE_CLUSTERING 0 39#define DISABLE_CLUSTERING 0
40#define ENABLE_CLUSTERING 1 40#define ENABLE_CLUSTERING 1
41 41
42#define DISABLE_SG_CHAINING 0
43#define ENABLE_SG_CHAINING 1
44
42enum scsi_eh_timer_return { 45enum scsi_eh_timer_return {
43 EH_NOT_HANDLED, 46 EH_NOT_HANDLED,
44 EH_HANDLED, 47 EH_HANDLED,
@@ -443,6 +446,15 @@ struct scsi_host_template {
443 unsigned ordered_tag:1; 446 unsigned ordered_tag:1;
444 447
445 /* 448 /*
449 * true if the low-level driver can support sg chaining. this
450 * will be removed eventually when all the drivers are
451 * converted to support sg chaining.
452 *
453 * Status: OBSOLETE
454 */
455 unsigned use_sg_chaining:1;
456
457 /*
446 * Countdown for host blocking with no commands outstanding 458 * Countdown for host blocking with no commands outstanding
447 */ 459 */
448 unsigned int max_host_blocked; 460 unsigned int max_host_blocked;
@@ -586,6 +598,7 @@ struct Scsi_Host {
586 unsigned unchecked_isa_dma:1; 598 unsigned unchecked_isa_dma:1;
587 unsigned use_clustering:1; 599 unsigned use_clustering:1;
588 unsigned use_blk_tcq:1; 600 unsigned use_blk_tcq:1;
601 unsigned use_sg_chaining:1;
589 602
590 /* 603 /*
591 * Host has requested that no further requests come through for the 604 * Host has requested that no further requests come through for the
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 30c1400e749e..c419ecf334c3 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -677,16 +677,17 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
677 * same here. 677 * same here.
678 */ 678 */
679int 679int
680swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, 680swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
681 int dir) 681 int dir)
682{ 682{
683 struct scatterlist *sg;
683 void *addr; 684 void *addr;
684 dma_addr_t dev_addr; 685 dma_addr_t dev_addr;
685 int i; 686 int i;
686 687
687 BUG_ON(dir == DMA_NONE); 688 BUG_ON(dir == DMA_NONE);
688 689
689 for (i = 0; i < nelems; i++, sg++) { 690 for_each_sg(sgl, sg, nelems, i) {
690 addr = SG_ENT_VIRT_ADDRESS(sg); 691 addr = SG_ENT_VIRT_ADDRESS(sg);
691 dev_addr = virt_to_bus(addr); 692 dev_addr = virt_to_bus(addr);
692 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { 693 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
@@ -696,7 +697,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
696 to do proper error handling. */ 697 to do proper error handling. */
697 swiotlb_full(hwdev, sg->length, dir, 0); 698 swiotlb_full(hwdev, sg->length, dir, 0);
698 swiotlb_unmap_sg(hwdev, sg - i, i, dir); 699 swiotlb_unmap_sg(hwdev, sg - i, i, dir);
699 sg[0].dma_length = 0; 700 sgl[0].dma_length = 0;
700 return 0; 701 return 0;
701 } 702 }
702 sg->dma_address = virt_to_bus(map); 703 sg->dma_address = virt_to_bus(map);
@@ -712,19 +713,21 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
712 * concerning calls here are the same as for swiotlb_unmap_single() above. 713 * concerning calls here are the same as for swiotlb_unmap_single() above.
713 */ 714 */
714void 715void
715swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, 716swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
716 int dir) 717 int dir)
717{ 718{
719 struct scatterlist *sg;
718 int i; 720 int i;
719 721
720 BUG_ON(dir == DMA_NONE); 722 BUG_ON(dir == DMA_NONE);
721 723
722 for (i = 0; i < nelems; i++, sg++) 724 for_each_sg(sgl, sg, nelems, i) {
723 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 725 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
724 unmap_single(hwdev, bus_to_virt(sg->dma_address), 726 unmap_single(hwdev, bus_to_virt(sg->dma_address),
725 sg->dma_length, dir); 727 sg->dma_length, dir);
726 else if (dir == DMA_FROM_DEVICE) 728 else if (dir == DMA_FROM_DEVICE)
727 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 729 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
730 }
728} 731}
729 732
730/* 733/*
@@ -735,19 +738,21 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
735 * and usage. 738 * and usage.
736 */ 739 */
737static void 740static void
738swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, 741swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
739 int nelems, int dir, int target) 742 int nelems, int dir, int target)
740{ 743{
744 struct scatterlist *sg;
741 int i; 745 int i;
742 746
743 BUG_ON(dir == DMA_NONE); 747 BUG_ON(dir == DMA_NONE);
744 748
745 for (i = 0; i < nelems; i++, sg++) 749 for_each_sg(sgl, sg, nelems, i) {
746 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 750 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
747 sync_single(hwdev, bus_to_virt(sg->dma_address), 751 sync_single(hwdev, bus_to_virt(sg->dma_address),
748 sg->dma_length, dir, target); 752 sg->dma_length, dir, target);
749 else if (dir == DMA_FROM_DEVICE) 753 else if (dir == DMA_FROM_DEVICE)
750 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 754 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
755 }
751} 756}
752 757
753void 758void
diff --git a/mm/bounce.c b/mm/bounce.c
index 3b549bf31f7d..b6d2d0f1019b 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -265,6 +265,12 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
265 mempool_t *pool; 265 mempool_t *pool;
266 266
267 /* 267 /*
268 * Data-less bio, nothing to bounce
269 */
270 if (bio_empty_barrier(*bio_orig))
271 return;
272
273 /*
268 * for non-isa bounce case, just check if the bounce pfn is equal 274 * for non-isa bounce case, just check if the bounce pfn is equal
269 * to or bigger than the highest pfn in the system -- in that case, 275 * to or bigger than the highest pfn in the system -- in that case,
270 * don't waste time iterating over bio segments 276 * don't waste time iterating over bio segments