aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 13:09:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 13:09:16 -0400
commit92d15c2ccbb3e31a3fc71ad28fdb55e1319383c0 (patch)
tree8d83c0dc3c6b935d8367e331872f242b742f0a8a /include
parentf20bf6125605acbbc7eb8c9420d7221c91aa83eb (diff)
parent644bd2f048972d75eb1979b1fdca257d528ce687 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block: (63 commits) Fix memory leak in dm-crypt SPARC64: sg chaining support SPARC: sg chaining support PPC: sg chaining support PS3: sg chaining support IA64: sg chaining support x86-64: enable sg chaining x86-64: update pci-gart iommu to sg helpers x86-64: update nommu to sg helpers x86-64: update calgary iommu to sg helpers swiotlb: sg chaining support i386: enable sg chaining i386 dma_map_sg: convert to using sg helpers mmc: need to zero sglist on init Panic in blk_rq_map_sg() from CCISS driver remove sglist_len remove blk_queue_max_phys_segments in libata revert sg segment size ifdefs Fixup u14-34f ENABLE_SG_CHAINING qla1280: enable use_sg_chaining option ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-ia64/dma-mapping.h2
-rw-r--r--include/asm-ia64/scatterlist.h2
-rw-r--r--include/asm-powerpc/dma-mapping.h158
-rw-r--r--include/asm-powerpc/scatterlist.h2
-rw-r--r--include/asm-sparc/scatterlist.h2
-rw-r--r--include/asm-sparc64/scatterlist.h2
-rw-r--r--include/asm-x86/dma-mapping_32.h13
-rw-r--r--include/asm-x86/dma-mapping_64.h3
-rw-r--r--include/asm-x86/scatterlist_32.h2
-rw-r--r--include/asm-x86/scatterlist_64.h2
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/i2o.h3
-rw-r--r--include/linux/ide.h7
-rw-r--r--include/linux/libata.h16
-rw-r--r--include/linux/scatterlist.h84
-rw-r--r--include/scsi/scsi.h7
-rw-r--r--include/scsi/scsi_cmnd.h7
-rw-r--r--include/scsi/scsi_host.h13
19 files changed, 165 insertions, 187 deletions
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
index 3ca6d5c14b2e..f1735a22d0ea 100644
--- a/include/asm-ia64/dma-mapping.h
+++ b/include/asm-ia64/dma-mapping.h
@@ -6,7 +6,7 @@
6 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */ 7 */
8#include <asm/machvec.h> 8#include <asm/machvec.h>
9#include <asm/scatterlist.h> 9#include <linux/scatterlist.h>
10 10
11#define dma_alloc_coherent platform_dma_alloc_coherent 11#define dma_alloc_coherent platform_dma_alloc_coherent
12/* coherent mem. is cheap */ 12/* coherent mem. is cheap */
diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h
index a452ea24205a..7d5234d50312 100644
--- a/include/asm-ia64/scatterlist.h
+++ b/include/asm-ia64/scatterlist.h
@@ -30,4 +30,6 @@ struct scatterlist {
30#define sg_dma_len(sg) ((sg)->dma_length) 30#define sg_dma_len(sg) ((sg)->dma_length)
31#define sg_dma_address(sg) ((sg)->dma_address) 31#define sg_dma_address(sg) ((sg)->dma_address)
32 32
33#define ARCH_HAS_SG_CHAIN
34
33#endif /* _ASM_IA64_SCATTERLIST_H */ 35#endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index d05891608f74..2af321f36aba 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -6,149 +6,6 @@
6 */ 6 */
7#ifndef _ASM_DMA_MAPPING_H 7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H 8#define _ASM_DMA_MAPPING_H
9#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <asm/scatterlist.h>
16#include <asm/io.h>
17
18#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
19
20#ifdef CONFIG_NOT_COHERENT_CACHE
21/*
22 * DMA-consistent mapping functions for PowerPCs that don't support
23 * cache snooping. These allocate/free a region of uncached mapped
24 * memory space for use with DMA devices. Alternatively, you could
25 * allocate the space "normally" and use the cache management functions
26 * to ensure it is consistent.
27 */
28extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
29extern void __dma_free_coherent(size_t size, void *vaddr);
30extern void __dma_sync(void *vaddr, size_t size, int direction);
31extern void __dma_sync_page(struct page *page, unsigned long offset,
32 size_t size, int direction);
33
34#else /* ! CONFIG_NOT_COHERENT_CACHE */
35/*
36 * Cache coherent cores.
37 */
38
39#define __dma_alloc_coherent(gfp, size, handle) NULL
40#define __dma_free_coherent(size, addr) ((void)0)
41#define __dma_sync(addr, size, rw) ((void)0)
42#define __dma_sync_page(pg, off, sz, rw) ((void)0)
43
44#endif /* ! CONFIG_NOT_COHERENT_CACHE */
45
46#ifdef CONFIG_PPC64
47/*
48 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
49 */
50struct dma_mapping_ops {
51 void * (*alloc_coherent)(struct device *dev, size_t size,
52 dma_addr_t *dma_handle, gfp_t flag);
53 void (*free_coherent)(struct device *dev, size_t size,
54 void *vaddr, dma_addr_t dma_handle);
55 dma_addr_t (*map_single)(struct device *dev, void *ptr,
56 size_t size, enum dma_data_direction direction);
57 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
58 size_t size, enum dma_data_direction direction);
59 int (*map_sg)(struct device *dev, struct scatterlist *sg,
60 int nents, enum dma_data_direction direction);
61 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
62 int nents, enum dma_data_direction direction);
63 int (*dma_supported)(struct device *dev, u64 mask);
64 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
65};
66
67static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
68{
69 /* We don't handle the NULL dev case for ISA for now. We could
70 * do it via an out of line call but it is not needed for now. The
71 * only ISA DMA device we support is the floppy and we have a hack
72 * in the floppy driver directly to get a device for us.
73 */
74 if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
75 return NULL;
76 return dev->archdata.dma_ops;
77}
78
79static inline int dma_supported(struct device *dev, u64 mask)
80{
81 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
82
83 if (unlikely(dma_ops == NULL))
84 return 0;
85 if (dma_ops->dma_supported == NULL)
86 return 1;
87 return dma_ops->dma_supported(dev, mask);
88}
89
90static inline int dma_set_mask(struct device *dev, u64 dma_mask)
91{
92 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
93
94 if (unlikely(dma_ops == NULL))
95 return -EIO;
96 if (dma_ops->set_dma_mask != NULL)
97 return dma_ops->set_dma_mask(dev, dma_mask);
98 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
99 return -EIO;
100 *dev->dma_mask = dma_mask;
101 return 0;
102}
103
104static inline void *dma_alloc_coherent(struct device *dev, size_t size,
105 dma_addr_t *dma_handle, gfp_t flag)
106{
107 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
108
109 BUG_ON(!dma_ops);
110 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
111}
112
113static inline void dma_free_coherent(struct device *dev, size_t size,
114 void *cpu_addr, dma_addr_t dma_handle)
115{
116 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
117
118 BUG_ON(!dma_ops);
119 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
120}
121
122static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
123 size_t size,
124 enum dma_data_direction direction)
125{
126 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
127
128 BUG_ON(!dma_ops);
129 return dma_ops->map_single(dev, cpu_addr, size, direction);
130}
131
132static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
133 size_t size,
134 enum dma_data_direction direction)
135{
136 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
137
138 BUG_ON(!dma_ops);
139 dma_ops->unmap_single(dev, dma_addr, size, direction);
140}
141
142static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
143 unsigned long offset, size_t size,
144 enum dma_data_direction direction)
145{
146 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
147
148 BUG_ON(!dma_ops);
149 return dma_ops->map_single(dev, page_address(page) + offset, size,
150 direction);
151}
152 9
153static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 10static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
154 size_t size, 11 size_t size,
@@ -276,14 +133,15 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
276} 133}
277 134
278static inline int 135static inline int
279dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 136dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
280 enum dma_data_direction direction) 137 enum dma_data_direction direction)
281{ 138{
139 struct scatterlist *sg;
282 int i; 140 int i;
283 141
284 BUG_ON(direction == DMA_NONE); 142 BUG_ON(direction == DMA_NONE);
285 143
286 for (i = 0; i < nents; i++, sg++) { 144 for_each_sg(sgl, sg, nents, i) {
287 BUG_ON(!sg->page); 145 BUG_ON(!sg->page);
288 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 146 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
289 sg->dma_address = page_to_bus(sg->page) + sg->offset; 147 sg->dma_address = page_to_bus(sg->page) + sg->offset;
@@ -318,26 +176,28 @@ static inline void dma_sync_single_for_device(struct device *dev,
318} 176}
319 177
320static inline void dma_sync_sg_for_cpu(struct device *dev, 178static inline void dma_sync_sg_for_cpu(struct device *dev,
321 struct scatterlist *sg, int nents, 179 struct scatterlist *sgl, int nents,
322 enum dma_data_direction direction) 180 enum dma_data_direction direction)
323{ 181{
182 struct scatterlist *sg;
324 int i; 183 int i;
325 184
326 BUG_ON(direction == DMA_NONE); 185 BUG_ON(direction == DMA_NONE);
327 186
328 for (i = 0; i < nents; i++, sg++) 187 for_each_sg(sgl, sg, nents, i)
329 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 188 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
330} 189}
331 190
332static inline void dma_sync_sg_for_device(struct device *dev, 191static inline void dma_sync_sg_for_device(struct device *dev,
333 struct scatterlist *sg, int nents, 192 struct scatterlist *sgl, int nents,
334 enum dma_data_direction direction) 193 enum dma_data_direction direction)
335{ 194{
195 struct scatterlist *sg;
336 int i; 196 int i;
337 197
338 BUG_ON(direction == DMA_NONE); 198 BUG_ON(direction == DMA_NONE);
339 199
340 for (i = 0; i < nents; i++, sg++) 200 for_each_sg(sgl, sg, nents, i)
341 __dma_sync_page(sg->page, sg->offset, sg->length, direction); 201 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
342} 202}
343 203
diff --git a/include/asm-powerpc/scatterlist.h b/include/asm-powerpc/scatterlist.h
index 8c992d1491d4..b075f619c3b7 100644
--- a/include/asm-powerpc/scatterlist.h
+++ b/include/asm-powerpc/scatterlist.h
@@ -41,5 +41,7 @@ struct scatterlist {
41#define ISA_DMA_THRESHOLD (~0UL) 41#define ISA_DMA_THRESHOLD (~0UL)
42#endif 42#endif
43 43
44#define ARCH_HAS_SG_CHAIN
45
44#endif /* __KERNEL__ */ 46#endif /* __KERNEL__ */
45#endif /* _ASM_POWERPC_SCATTERLIST_H */ 47#endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/include/asm-sparc/scatterlist.h b/include/asm-sparc/scatterlist.h
index a4fcf9ac9649..4055af90ad7e 100644
--- a/include/asm-sparc/scatterlist.h
+++ b/include/asm-sparc/scatterlist.h
@@ -19,4 +19,6 @@ struct scatterlist {
19 19
20#define ISA_DMA_THRESHOLD (~0UL) 20#define ISA_DMA_THRESHOLD (~0UL)
21 21
22#define ARCH_HAS_SG_CHAIN
23
22#endif /* !(_SPARC_SCATTERLIST_H) */ 24#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/include/asm-sparc64/scatterlist.h b/include/asm-sparc64/scatterlist.h
index 048fdb40e81d..703c5bbe6c8c 100644
--- a/include/asm-sparc64/scatterlist.h
+++ b/include/asm-sparc64/scatterlist.h
@@ -20,4 +20,6 @@ struct scatterlist {
20 20
21#define ISA_DMA_THRESHOLD (~0UL) 21#define ISA_DMA_THRESHOLD (~0UL)
22 22
23#define ARCH_HAS_SG_CHAIN
24
23#endif /* !(_SPARC64_SCATTERLIST_H) */ 25#endif /* !(_SPARC64_SCATTERLIST_H) */
diff --git a/include/asm-x86/dma-mapping_32.h b/include/asm-x86/dma-mapping_32.h
index f1d72d177f68..6a2d26cb5da6 100644
--- a/include/asm-x86/dma-mapping_32.h
+++ b/include/asm-x86/dma-mapping_32.h
@@ -2,10 +2,10 @@
2#define _ASM_I386_DMA_MAPPING_H 2#define _ASM_I386_DMA_MAPPING_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/scatterlist.h>
5 6
6#include <asm/cache.h> 7#include <asm/cache.h>
7#include <asm/io.h> 8#include <asm/io.h>
8#include <asm/scatterlist.h>
9#include <asm/bug.h> 9#include <asm/bug.h>
10 10
11#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 11#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
@@ -35,18 +35,19 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
35} 35}
36 36
37static inline int 37static inline int
38dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 38dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
39 enum dma_data_direction direction) 39 enum dma_data_direction direction)
40{ 40{
41 struct scatterlist *sg;
41 int i; 42 int i;
42 43
43 BUG_ON(!valid_dma_direction(direction)); 44 BUG_ON(!valid_dma_direction(direction));
44 WARN_ON(nents == 0 || sg[0].length == 0); 45 WARN_ON(nents == 0 || sglist[0].length == 0);
45 46
46 for (i = 0; i < nents; i++ ) { 47 for_each_sg(sglist, sg, nents, i) {
47 BUG_ON(!sg[i].page); 48 BUG_ON(!sg->page);
48 49
49 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; 50 sg->dma_address = page_to_phys(sg->page) + sg->offset;
50 } 51 }
51 52
52 flush_write_buffers(); 53 flush_write_buffers();
diff --git a/include/asm-x86/dma-mapping_64.h b/include/asm-x86/dma-mapping_64.h
index 6897e2a436e5..ecd0f6125ba3 100644
--- a/include/asm-x86/dma-mapping_64.h
+++ b/include/asm-x86/dma-mapping_64.h
@@ -6,8 +6,7 @@
6 * documentation. 6 * documentation.
7 */ 7 */
8 8
9 9#include <linux/scatterlist.h>
10#include <asm/scatterlist.h>
11#include <asm/io.h> 10#include <asm/io.h>
12#include <asm/swiotlb.h> 11#include <asm/swiotlb.h>
13 12
diff --git a/include/asm-x86/scatterlist_32.h b/include/asm-x86/scatterlist_32.h
index d7e45a8f1aae..bd5164aa8f63 100644
--- a/include/asm-x86/scatterlist_32.h
+++ b/include/asm-x86/scatterlist_32.h
@@ -10,6 +10,8 @@ struct scatterlist {
10 unsigned int length; 10 unsigned int length;
11}; 11};
12 12
13#define ARCH_HAS_SG_CHAIN
14
13/* These macros should be used after a pci_map_sg call has been done 15/* These macros should be used after a pci_map_sg call has been done
14 * to get bus addresses of each of the SG entries and their lengths. 16 * to get bus addresses of each of the SG entries and their lengths.
15 * You should only work with the number of sg entries pci_map_sg 17 * You should only work with the number of sg entries pci_map_sg
diff --git a/include/asm-x86/scatterlist_64.h b/include/asm-x86/scatterlist_64.h
index eaf7ada27e14..ef3986ba4b79 100644
--- a/include/asm-x86/scatterlist_64.h
+++ b/include/asm-x86/scatterlist_64.h
@@ -11,6 +11,8 @@ struct scatterlist {
11 unsigned int dma_length; 11 unsigned int dma_length;
12}; 12};
13 13
14#define ARCH_HAS_SG_CHAIN
15
14#define ISA_DMA_THRESHOLD (0x00ffffff) 16#define ISA_DMA_THRESHOLD (0x00ffffff)
15 17
16/* These macros should be used after a pci_map_sg call has been done 18/* These macros should be used after a pci_map_sg call has been done
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 089a8bc55dd4..4da441337d6e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -176,13 +176,28 @@ struct bio {
176#define bio_offset(bio) bio_iovec((bio))->bv_offset 176#define bio_offset(bio) bio_iovec((bio))->bv_offset
177#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 177#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
178#define bio_sectors(bio) ((bio)->bi_size >> 9) 178#define bio_sectors(bio) ((bio)->bi_size >> 9)
179#define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
180#define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
181#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER)) 179#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
182#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC)) 180#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
183#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) 181#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
184#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) 182#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
185#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) 183#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
184#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size)
185
186static inline unsigned int bio_cur_sectors(struct bio *bio)
187{
188 if (bio->bi_vcnt)
189 return bio_iovec(bio)->bv_len >> 9;
190
191 return 0;
192}
193
194static inline void *bio_data(struct bio *bio)
195{
196 if (bio->bi_vcnt)
197 return page_address(bio_page(bio)) + bio_offset(bio);
198
199 return NULL;
200}
186 201
187/* 202/*
188 * will die 203 * will die
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5ed888b04b29..bbf906a0b419 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -330,7 +330,6 @@ typedef void (unplug_fn) (struct request_queue *);
330 330
331struct bio_vec; 331struct bio_vec;
332typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); 332typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
333typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
334typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 333typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
335typedef void (softirq_done_fn)(struct request *); 334typedef void (softirq_done_fn)(struct request *);
336 335
@@ -368,7 +367,6 @@ struct request_queue
368 prep_rq_fn *prep_rq_fn; 367 prep_rq_fn *prep_rq_fn;
369 unplug_fn *unplug_fn; 368 unplug_fn *unplug_fn;
370 merge_bvec_fn *merge_bvec_fn; 369 merge_bvec_fn *merge_bvec_fn;
371 issue_flush_fn *issue_flush_fn;
372 prepare_flush_fn *prepare_flush_fn; 370 prepare_flush_fn *prepare_flush_fn;
373 softirq_done_fn *softirq_done_fn; 371 softirq_done_fn *softirq_done_fn;
374 372
@@ -540,6 +538,7 @@ enum {
540#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 538#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
541#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 539#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
542#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 540#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
541#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
543 542
544#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 543#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
545 544
@@ -729,7 +728,9 @@ static inline void blk_run_address_space(struct address_space *mapping)
729extern int end_that_request_first(struct request *, int, int); 728extern int end_that_request_first(struct request *, int, int);
730extern int end_that_request_chunk(struct request *, int, int); 729extern int end_that_request_chunk(struct request *, int, int);
731extern void end_that_request_last(struct request *, int); 730extern void end_that_request_last(struct request *, int);
732extern void end_request(struct request *req, int uptodate); 731extern void end_request(struct request *, int);
732extern void end_queued_request(struct request *, int);
733extern void end_dequeued_request(struct request *, int);
733extern void blk_complete_request(struct request *); 734extern void blk_complete_request(struct request *);
734 735
735/* 736/*
@@ -767,7 +768,6 @@ extern void blk_queue_dma_alignment(struct request_queue *, int);
767extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 768extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
768extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 769extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
769extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 770extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
770extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
771extern int blk_do_ordered(struct request_queue *, struct request **); 771extern int blk_do_ordered(struct request_queue *, struct request **);
772extern unsigned blk_ordered_cur_seq(struct request_queue *); 772extern unsigned blk_ordered_cur_seq(struct request_queue *);
773extern unsigned blk_ordered_req_seq(struct request *); 773extern unsigned blk_ordered_req_seq(struct request *);
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 9752307d16ba..7da5b98d90e6 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -32,6 +32,7 @@
32#include <linux/workqueue.h> /* work_struct */ 32#include <linux/workqueue.h> /* work_struct */
33#include <linux/mempool.h> 33#include <linux/mempool.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/scatterlist.h>
35 36
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/semaphore.h> /* Needed for MUTEX init macros */ 38#include <asm/semaphore.h> /* Needed for MUTEX init macros */
@@ -837,7 +838,7 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c,
837 if ((sizeof(dma_addr_t) > 4) && c->pae_support) 838 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
838 *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); 839 *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
839#endif 840#endif
840 sg++; 841 sg = sg_next(sg);
841 } 842 }
842 *sg_ptr = mptr; 843 *sg_ptr = mptr;
843 844
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 02a27e8cbad2..30a1931466a6 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -772,7 +772,7 @@ typedef struct hwif_s {
772 772
773 unsigned int nsect; 773 unsigned int nsect;
774 unsigned int nleft; 774 unsigned int nleft;
775 unsigned int cursg; 775 struct scatterlist *cursg;
776 unsigned int cursg_ofs; 776 unsigned int cursg_ofs;
777 777
778 int rqsize; /* max sectors per request */ 778 int rqsize; /* max sectors per request */
@@ -1093,11 +1093,6 @@ extern ide_startstop_t ide_do_reset (ide_drive_t *);
1093extern void ide_init_drive_cmd (struct request *rq); 1093extern void ide_init_drive_cmd (struct request *rq);
1094 1094
1095/* 1095/*
1096 * this function returns error location sector offset in case of a write error
1097 */
1098extern u64 ide_get_error_location(ide_drive_t *, char *);
1099
1100/*
1101 * "action" parameter type for ide_do_drive_cmd() below. 1096 * "action" parameter type for ide_do_drive_cmd() below.
1102 */ 1097 */
1103typedef enum { 1098typedef enum {
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 229a9ff9f924..377e6d4d9be3 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -29,7 +29,7 @@
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <asm/scatterlist.h> 32#include <linux/scatterlist.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/ata.h> 34#include <linux/ata.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
@@ -416,6 +416,7 @@ struct ata_queued_cmd {
416 unsigned long flags; /* ATA_QCFLAG_xxx */ 416 unsigned long flags; /* ATA_QCFLAG_xxx */
417 unsigned int tag; 417 unsigned int tag;
418 unsigned int n_elem; 418 unsigned int n_elem;
419 unsigned int n_iter;
419 unsigned int orig_n_elem; 420 unsigned int orig_n_elem;
420 421
421 int dma_dir; 422 int dma_dir;
@@ -426,7 +427,7 @@ struct ata_queued_cmd {
426 unsigned int nbytes; 427 unsigned int nbytes;
427 unsigned int curbytes; 428 unsigned int curbytes;
428 429
429 unsigned int cursg; 430 struct scatterlist *cursg;
430 unsigned int cursg_ofs; 431 unsigned int cursg_ofs;
431 432
432 struct scatterlist sgent; 433 struct scatterlist sgent;
@@ -1043,7 +1044,7 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
1043 return 1; 1044 return 1;
1044 if (qc->pad_len) 1045 if (qc->pad_len)
1045 return 0; 1046 return 0;
1046 if (((sg - qc->__sg) + 1) == qc->n_elem) 1047 if (qc->n_iter == qc->n_elem)
1047 return 1; 1048 return 1;
1048 return 0; 1049 return 0;
1049} 1050}
@@ -1051,6 +1052,7 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
1051static inline struct scatterlist * 1052static inline struct scatterlist *
1052ata_qc_first_sg(struct ata_queued_cmd *qc) 1053ata_qc_first_sg(struct ata_queued_cmd *qc)
1053{ 1054{
1055 qc->n_iter = 0;
1054 if (qc->n_elem) 1056 if (qc->n_elem)
1055 return qc->__sg; 1057 return qc->__sg;
1056 if (qc->pad_len) 1058 if (qc->pad_len)
@@ -1063,8 +1065,8 @@ ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
1063{ 1065{
1064 if (sg == &qc->pad_sgent) 1066 if (sg == &qc->pad_sgent)
1065 return NULL; 1067 return NULL;
1066 if (++sg - qc->__sg < qc->n_elem) 1068 if (++qc->n_iter < qc->n_elem)
1067 return sg; 1069 return sg_next(sg);
1068 if (qc->pad_len) 1070 if (qc->pad_len)
1069 return &qc->pad_sgent; 1071 return &qc->pad_sgent;
1070 return NULL; 1072 return NULL;
@@ -1309,9 +1311,11 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
1309 qc->dma_dir = DMA_NONE; 1311 qc->dma_dir = DMA_NONE;
1310 qc->__sg = NULL; 1312 qc->__sg = NULL;
1311 qc->flags = 0; 1313 qc->flags = 0;
1312 qc->cursg = qc->cursg_ofs = 0; 1314 qc->cursg = NULL;
1315 qc->cursg_ofs = 0;
1313 qc->nbytes = qc->curbytes = 0; 1316 qc->nbytes = qc->curbytes = 0;
1314 qc->n_elem = 0; 1317 qc->n_elem = 0;
1318 qc->n_iter = 0;
1315 qc->err_mask = 0; 1319 qc->err_mask = 0;
1316 qc->pad_len = 0; 1320 qc->pad_len = 0;
1317 qc->sect_size = ATA_SECT_SIZE; 1321 qc->sect_size = ATA_SECT_SIZE;
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 4efbd9c445f5..2dc7464cce52 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -20,4 +20,88 @@ static inline void sg_init_one(struct scatterlist *sg, const void *buf,
20 sg_set_buf(sg, buf, buflen); 20 sg_set_buf(sg, buf, buflen);
21} 21}
22 22
23/*
24 * We overload the LSB of the page pointer to indicate whether it's
25 * a valid sg entry, or whether it points to the start of a new scatterlist.
26 * Those low bits are there for everyone! (thanks mason :-)
27 */
28#define sg_is_chain(sg) ((unsigned long) (sg)->page & 0x01)
29#define sg_chain_ptr(sg) \
30 ((struct scatterlist *) ((unsigned long) (sg)->page & ~0x01))
31
32/**
33 * sg_next - return the next scatterlist entry in a list
34 * @sg: The current sg entry
35 *
36 * Usually the next entry will be @sg@ + 1, but if this sg element is part
37 * of a chained scatterlist, it could jump to the start of a new
38 * scatterlist array.
39 *
40 * Note that the caller must ensure that there are further entries after
41 * the current entry, this function will NOT return NULL for an end-of-list.
42 *
43 */
44static inline struct scatterlist *sg_next(struct scatterlist *sg)
45{
46 sg++;
47
48 if (unlikely(sg_is_chain(sg)))
49 sg = sg_chain_ptr(sg);
50
51 return sg;
52}
53
54/*
55 * Loop over each sg element, following the pointer to a new list if necessary
56 */
57#define for_each_sg(sglist, sg, nr, __i) \
58 for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
59
60/**
61 * sg_last - return the last scatterlist entry in a list
62 * @sgl: First entry in the scatterlist
63 * @nents: Number of entries in the scatterlist
64 *
65 * Should only be used casually, it (currently) scan the entire list
66 * to get the last entry.
67 *
68 * Note that the @sgl@ pointer passed in need not be the first one,
69 * the important bit is that @nents@ denotes the number of entries that
70 * exist from @sgl@.
71 *
72 */
73static inline struct scatterlist *sg_last(struct scatterlist *sgl,
74 unsigned int nents)
75{
76#ifndef ARCH_HAS_SG_CHAIN
77 struct scatterlist *ret = &sgl[nents - 1];
78#else
79 struct scatterlist *sg, *ret = NULL;
80 int i;
81
82 for_each_sg(sgl, sg, nents, i)
83 ret = sg;
84
85#endif
86 return ret;
87}
88
89/**
90 * sg_chain - Chain two sglists together
91 * @prv: First scatterlist
92 * @prv_nents: Number of entries in prv
93 * @sgl: Second scatterlist
94 *
95 * Links @prv@ and @sgl@ together, to form a longer scatterlist.
96 *
97 */
98static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
99 struct scatterlist *sgl)
100{
101#ifndef ARCH_HAS_SG_CHAIN
102 BUG();
103#endif
104 prv[prv_nents - 1].page = (struct page *) ((unsigned long) sgl | 0x01);
105}
106
23#endif /* _LINUX_SCATTERLIST_H */ 107#endif /* _LINUX_SCATTERLIST_H */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 9f8f80ab0c8b..702fcfeb37f1 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -11,13 +11,6 @@
11#include <linux/types.h> 11#include <linux/types.h>
12 12
13/* 13/*
14 * The maximum sg list length SCSI can cope with
15 * (currently must be a power of 2 between 32 and 256)
16 */
17#define SCSI_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
18
19
20/*
21 * SCSI command lengths 14 * SCSI command lengths
22 */ 15 */
23 16
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 65ab5145a09b..3f47e522a1ec 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -5,6 +5,7 @@
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/timer.h> 7#include <linux/timer.h>
8#include <linux/scatterlist.h>
8 9
9struct request; 10struct request;
10struct scatterlist; 11struct scatterlist;
@@ -68,7 +69,7 @@ struct scsi_cmnd {
68 69
69 /* These elements define the operation we ultimately want to perform */ 70 /* These elements define the operation we ultimately want to perform */
70 unsigned short use_sg; /* Number of pieces of scatter-gather */ 71 unsigned short use_sg; /* Number of pieces of scatter-gather */
71 unsigned short sglist_len; /* size of malloc'd scatter-gather list */ 72 unsigned short __use_sg;
72 73
73 unsigned underflow; /* Return error if less than 74 unsigned underflow; /* Return error if less than
74 this amount is transferred */ 75 this amount is transferred */
@@ -128,7 +129,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
128extern void scsi_kunmap_atomic_sg(void *virt); 129extern void scsi_kunmap_atomic_sg(void *virt);
129 130
130extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t); 131extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
131extern void scsi_free_sgtable(struct scatterlist *, int); 132extern void scsi_free_sgtable(struct scsi_cmnd *);
132 133
133extern int scsi_dma_map(struct scsi_cmnd *cmd); 134extern int scsi_dma_map(struct scsi_cmnd *cmd);
134extern void scsi_dma_unmap(struct scsi_cmnd *cmd); 135extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
@@ -148,6 +149,6 @@ static inline int scsi_get_resid(struct scsi_cmnd *cmd)
148} 149}
149 150
150#define scsi_for_each_sg(cmd, sg, nseg, __i) \ 151#define scsi_for_each_sg(cmd, sg, nseg, __i) \
151 for (__i = 0, sg = scsi_sglist(cmd); __i < (nseg); __i++, (sg)++) 152 for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
152 153
153#endif /* _SCSI_SCSI_CMND_H */ 154#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 7d210cd6c38d..0fd4746ee39d 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -39,6 +39,9 @@ struct blk_queue_tags;
39#define DISABLE_CLUSTERING 0 39#define DISABLE_CLUSTERING 0
40#define ENABLE_CLUSTERING 1 40#define ENABLE_CLUSTERING 1
41 41
42#define DISABLE_SG_CHAINING 0
43#define ENABLE_SG_CHAINING 1
44
42enum scsi_eh_timer_return { 45enum scsi_eh_timer_return {
43 EH_NOT_HANDLED, 46 EH_NOT_HANDLED,
44 EH_HANDLED, 47 EH_HANDLED,
@@ -443,6 +446,15 @@ struct scsi_host_template {
443 unsigned ordered_tag:1; 446 unsigned ordered_tag:1;
444 447
445 /* 448 /*
449 * true if the low-level driver can support sg chaining. this
450 * will be removed eventually when all the drivers are
451 * converted to support sg chaining.
452 *
453 * Status: OBSOLETE
454 */
455 unsigned use_sg_chaining:1;
456
457 /*
446 * Countdown for host blocking with no commands outstanding 458 * Countdown for host blocking with no commands outstanding
447 */ 459 */
448 unsigned int max_host_blocked; 460 unsigned int max_host_blocked;
@@ -586,6 +598,7 @@ struct Scsi_Host {
586 unsigned unchecked_isa_dma:1; 598 unsigned unchecked_isa_dma:1;
587 unsigned use_clustering:1; 599 unsigned use_clustering:1;
588 unsigned use_blk_tcq:1; 600 unsigned use_blk_tcq:1;
601 unsigned use_sg_chaining:1;
589 602
590 /* 603 /*
591 * Host has requested that no further requests come through for the 604 * Host has requested that no further requests come through for the