aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 13:09:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 13:09:16 -0400
commit92d15c2ccbb3e31a3fc71ad28fdb55e1319383c0 (patch)
tree8d83c0dc3c6b935d8367e331872f242b742f0a8a /arch/powerpc/kernel
parentf20bf6125605acbbc7eb8c9420d7221c91aa83eb (diff)
parent644bd2f048972d75eb1979b1fdca257d528ce687 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block: (63 commits) Fix memory leak in dm-crypt SPARC64: sg chaining support SPARC: sg chaining support PPC: sg chaining support PS3: sg chaining support IA64: sg chaining support x86-64: enable sg chaining x86-64: update pci-gart iommu to sg helpers x86-64: update nommu to sg helpers x86-64: update calgary iommu to sg helpers swiotlb: sg chaining support i386: enable sg chaining i386 dma_map_sg: convert to using sg helpers mmc: need to zero sglist on init Panic in blk_rq_map_sg() from CCISS driver remove sglist_len remove blk_queue_max_phys_segments in libata revert sg segment size ifdefs Fixup u14-34f ENABLE_SG_CHAINING qla1280: enable use_sg_chaining option ...
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/dma_64.c5
-rw-r--r--arch/powerpc/kernel/ibmebus.c11
-rw-r--r--arch/powerpc/kernel/iommu.c23
3 files changed, 23 insertions, 16 deletions
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 7b0e754383cf..9001104b56b0 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -154,12 +154,13 @@ static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
154{ 154{
155} 155}
156 156
157static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg, 157static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
158 int nents, enum dma_data_direction direction) 158 int nents, enum dma_data_direction direction)
159{ 159{
160 struct scatterlist *sg;
160 int i; 161 int i;
161 162
162 for (i = 0; i < nents; i++, sg++) { 163 for_each_sg(sgl, sg, nents, i) {
163 sg->dma_address = (page_to_phys(sg->page) + sg->offset) | 164 sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
164 dma_direct_offset; 165 dma_direct_offset;
165 sg->dma_length = sg->length; 166 sg->dma_length = sg->length;
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 53bf64623bd8..2e16ca5778a3 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -87,15 +87,16 @@ static void ibmebus_unmap_single(struct device *dev,
87} 87}
88 88
89static int ibmebus_map_sg(struct device *dev, 89static int ibmebus_map_sg(struct device *dev,
90 struct scatterlist *sg, 90 struct scatterlist *sgl,
91 int nents, enum dma_data_direction direction) 91 int nents, enum dma_data_direction direction)
92{ 92{
93 struct scatterlist *sg;
93 int i; 94 int i;
94 95
95 for (i = 0; i < nents; i++) { 96 for_each_sg(sgl, sg, nents, i) {
96 sg[i].dma_address = (dma_addr_t)page_address(sg[i].page) 97 sg->dma_address = (dma_addr_t)page_address(sg->page)
97 + sg[i].offset; 98 + sg->offset;
98 sg[i].dma_length = sg[i].length; 99 sg->dma_length = sg->length;
99 } 100 }
100 101
101 return nents; 102 return nents;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index e4ec6eee81a8..306a6f75b6c5 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -277,7 +277,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
277 dma_addr_t dma_next = 0, dma_addr; 277 dma_addr_t dma_next = 0, dma_addr;
278 unsigned long flags; 278 unsigned long flags;
279 struct scatterlist *s, *outs, *segstart; 279 struct scatterlist *s, *outs, *segstart;
280 int outcount, incount; 280 int outcount, incount, i;
281 unsigned long handle; 281 unsigned long handle;
282 282
283 BUG_ON(direction == DMA_NONE); 283 BUG_ON(direction == DMA_NONE);
@@ -297,7 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
297 297
298 spin_lock_irqsave(&(tbl->it_lock), flags); 298 spin_lock_irqsave(&(tbl->it_lock), flags);
299 299
300 for (s = outs; nelems; nelems--, s++) { 300 for_each_sg(sglist, s, nelems, i) {
301 unsigned long vaddr, npages, entry, slen; 301 unsigned long vaddr, npages, entry, slen;
302 302
303 slen = s->length; 303 slen = s->length;
@@ -341,7 +341,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
341 if (novmerge || (dma_addr != dma_next)) { 341 if (novmerge || (dma_addr != dma_next)) {
342 /* Can't merge: create a new segment */ 342 /* Can't merge: create a new segment */
343 segstart = s; 343 segstart = s;
344 outcount++; outs++; 344 outcount++;
345 outs = sg_next(outs);
345 DBG(" can't merge, new segment.\n"); 346 DBG(" can't merge, new segment.\n");
346 } else { 347 } else {
347 outs->dma_length += s->length; 348 outs->dma_length += s->length;
@@ -374,7 +375,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
374 * next entry of the sglist if we didn't fill the list completely 375 * next entry of the sglist if we didn't fill the list completely
375 */ 376 */
376 if (outcount < incount) { 377 if (outcount < incount) {
377 outs++; 378 outs = sg_next(outs);
378 outs->dma_address = DMA_ERROR_CODE; 379 outs->dma_address = DMA_ERROR_CODE;
379 outs->dma_length = 0; 380 outs->dma_length = 0;
380 } 381 }
@@ -385,7 +386,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
385 return outcount; 386 return outcount;
386 387
387 failure: 388 failure:
388 for (s = &sglist[0]; s <= outs; s++) { 389 for_each_sg(sglist, s, nelems, i) {
389 if (s->dma_length != 0) { 390 if (s->dma_length != 0) {
390 unsigned long vaddr, npages; 391 unsigned long vaddr, npages;
391 392
@@ -395,6 +396,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
395 s->dma_address = DMA_ERROR_CODE; 396 s->dma_address = DMA_ERROR_CODE;
396 s->dma_length = 0; 397 s->dma_length = 0;
397 } 398 }
399 if (s == outs)
400 break;
398 } 401 }
399 spin_unlock_irqrestore(&(tbl->it_lock), flags); 402 spin_unlock_irqrestore(&(tbl->it_lock), flags);
400 return 0; 403 return 0;
@@ -404,6 +407,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
404void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, 407void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
405 int nelems, enum dma_data_direction direction) 408 int nelems, enum dma_data_direction direction)
406{ 409{
410 struct scatterlist *sg;
407 unsigned long flags; 411 unsigned long flags;
408 412
409 BUG_ON(direction == DMA_NONE); 413 BUG_ON(direction == DMA_NONE);
@@ -413,15 +417,16 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
413 417
414 spin_lock_irqsave(&(tbl->it_lock), flags); 418 spin_lock_irqsave(&(tbl->it_lock), flags);
415 419
420 sg = sglist;
416 while (nelems--) { 421 while (nelems--) {
417 unsigned int npages; 422 unsigned int npages;
418 dma_addr_t dma_handle = sglist->dma_address; 423 dma_addr_t dma_handle = sg->dma_address;
419 424
420 if (sglist->dma_length == 0) 425 if (sg->dma_length == 0)
421 break; 426 break;
422 npages = iommu_num_pages(dma_handle,sglist->dma_length); 427 npages = iommu_num_pages(dma_handle, sg->dma_length);
423 __iommu_free(tbl, dma_handle, npages); 428 __iommu_free(tbl, dma_handle, npages);
424 sglist++; 429 sg = sg_next(sg);
425 } 430 }
426 431
427 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we 432 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we