aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorFUJITA Tomonori <tomof@acm.org>2008-02-05 01:27:58 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:10 -0500
commit7c53664dcd5df7349edb56f04c743bf66510a6f1 (patch)
tree377cd9cced6757f986806d51c6e428f5b1874e49 /arch
parenta031bbcb8d7559d61f383880f23dd0e047247410 (diff)
iommu sg merging: alpha: make pci_iommu respect the segment size limits
This patch makes pci_iommu respect segment size limits when merging sg lists. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jeff Garzik <jeff@garzik.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Acked-by: Jens Axboe <jens.axboe@oracle.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/pci_iommu.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 2d00a08d3f08..26d3789dfdd0 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -9,6 +9,7 @@
9#include <linux/bootmem.h> 9#include <linux/bootmem.h>
10#include <linux/scatterlist.h> 10#include <linux/scatterlist.h>
11#include <linux/log2.h> 11#include <linux/log2.h>
12#include <linux/dma-mapping.h>
12 13
13#include <asm/io.h> 14#include <asm/io.h>
14#include <asm/hwrpb.h> 15#include <asm/hwrpb.h>
@@ -470,22 +471,29 @@ EXPORT_SYMBOL(pci_free_consistent);
470#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG)) 471#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
471 472
472static void 473static void
473sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok) 474sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
475 int virt_ok)
474{ 476{
475 unsigned long next_paddr; 477 unsigned long next_paddr;
476 struct scatterlist *leader; 478 struct scatterlist *leader;
477 long leader_flag, leader_length; 479 long leader_flag, leader_length;
480 unsigned int max_seg_size;
478 481
479 leader = sg; 482 leader = sg;
480 leader_flag = 0; 483 leader_flag = 0;
481 leader_length = leader->length; 484 leader_length = leader->length;
482 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length; 485 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
483 486
487 /* we will not marge sg without device. */
488 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
484 for (++sg; sg < end; ++sg) { 489 for (++sg; sg < end; ++sg) {
485 unsigned long addr, len; 490 unsigned long addr, len;
486 addr = SG_ENT_PHYS_ADDRESS(sg); 491 addr = SG_ENT_PHYS_ADDRESS(sg);
487 len = sg->length; 492 len = sg->length;
488 493
494 if (leader_length + len > max_seg_size)
495 goto new_segment;
496
489 if (next_paddr == addr) { 497 if (next_paddr == addr) {
490 sg->dma_address = -1; 498 sg->dma_address = -1;
491 leader_length += len; 499 leader_length += len;
@@ -494,6 +502,7 @@ sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
494 leader_flag = 1; 502 leader_flag = 1;
495 leader_length += len; 503 leader_length += len;
496 } else { 504 } else {
505new_segment:
497 leader->dma_address = leader_flag; 506 leader->dma_address = leader_flag;
498 leader->dma_length = leader_length; 507 leader->dma_length = leader_length;
499 leader = sg; 508 leader = sg;
@@ -512,7 +521,7 @@ sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
512 in the blanks. */ 521 in the blanks. */
513 522
514static int 523static int
515sg_fill(struct scatterlist *leader, struct scatterlist *end, 524sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
516 struct scatterlist *out, struct pci_iommu_arena *arena, 525 struct scatterlist *out, struct pci_iommu_arena *arena,
517 dma_addr_t max_dma, int dac_allowed) 526 dma_addr_t max_dma, int dac_allowed)
518{ 527{
@@ -562,8 +571,8 @@ sg_fill(struct scatterlist *leader, struct scatterlist *end,
562 571
563 /* Otherwise, break up the remaining virtually contiguous 572 /* Otherwise, break up the remaining virtually contiguous
564 hunks into individual direct maps and retry. */ 573 hunks into individual direct maps and retry. */
565 sg_classify(leader, end, 0); 574 sg_classify(dev, leader, end, 0);
566 return sg_fill(leader, end, out, arena, max_dma, dac_allowed); 575 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
567 } 576 }
568 577
569 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; 578 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
@@ -619,12 +628,15 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
619 struct pci_iommu_arena *arena; 628 struct pci_iommu_arena *arena;
620 dma_addr_t max_dma; 629 dma_addr_t max_dma;
621 int dac_allowed; 630 int dac_allowed;
631 struct device *dev;
622 632
623 if (direction == PCI_DMA_NONE) 633 if (direction == PCI_DMA_NONE)
624 BUG(); 634 BUG();
625 635
626 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; 636 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
627 637
638 dev = pdev ? &pdev->dev : NULL;
639
628 /* Fast path single entry scatterlists. */ 640 /* Fast path single entry scatterlists. */
629 if (nents == 1) { 641 if (nents == 1) {
630 sg->dma_length = sg->length; 642 sg->dma_length = sg->length;
@@ -638,7 +650,7 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
638 end = sg + nents; 650 end = sg + nents;
639 651
640 /* First, prepare information about the entries. */ 652 /* First, prepare information about the entries. */
641 sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0); 653 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
642 654
643 /* Second, figure out where we're going to map things. */ 655 /* Second, figure out where we're going to map things. */
644 if (alpha_mv.mv_pci_tbi) { 656 if (alpha_mv.mv_pci_tbi) {
@@ -658,7 +670,7 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
658 for (out = sg; sg < end; ++sg) { 670 for (out = sg; sg < end; ++sg) {
659 if ((int) sg->dma_address < 0) 671 if ((int) sg->dma_address < 0)
660 continue; 672 continue;
661 if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0) 673 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
662 goto error; 674 goto error;
663 out++; 675 out++;
664 } 676 }