diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-09-04 13:18:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-19 06:59:10 -0400 |
commit | 6d4f343f84993eb0d5864c0823dc9babd171a33a (patch) | |
tree | f9fc776688b0b0b9b495f10f6fda707661b38792 /arch | |
parent | 5507eef835c9c941e69d6d96e4b43af23eeb4ac9 (diff) |
AMD IOMMU: align alloc_coherent addresses properly
The API definition for dma_alloc_coherent states that the bus address
has to be aligned to the next power of 2 boundary greater than the
allocation size. This is violated by AMD IOMMU so far and this patch
fixes it.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 22 |
1 files changed, 14 insertions, 8 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index d743aa0adccc..15792ed082e0 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -383,7 +383,8 @@ static unsigned long dma_mask_to_pages(unsigned long mask) | |||
383 | */ | 383 | */ |
384 | static unsigned long dma_ops_alloc_addresses(struct device *dev, | 384 | static unsigned long dma_ops_alloc_addresses(struct device *dev, |
385 | struct dma_ops_domain *dom, | 385 | struct dma_ops_domain *dom, |
386 | unsigned int pages) | 386 | unsigned int pages, |
387 | unsigned long align_mask) | ||
387 | { | 388 | { |
388 | unsigned long limit = dma_mask_to_pages(*dev->dma_mask); | 389 | unsigned long limit = dma_mask_to_pages(*dev->dma_mask); |
389 | unsigned long address; | 390 | unsigned long address; |
@@ -400,10 +401,10 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, | |||
400 | } | 401 | } |
401 | 402 | ||
402 | address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages, | 403 | address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages, |
403 | 0 , boundary_size, 0); | 404 | 0 , boundary_size, align_mask); |
404 | if (address == -1) { | 405 | if (address == -1) { |
405 | address = iommu_area_alloc(dom->bitmap, limit, 0, pages, | 406 | address = iommu_area_alloc(dom->bitmap, limit, 0, pages, |
406 | 0, boundary_size, 0); | 407 | 0, boundary_size, align_mask); |
407 | dom->need_flush = true; | 408 | dom->need_flush = true; |
408 | } | 409 | } |
409 | 410 | ||
@@ -787,17 +788,22 @@ static dma_addr_t __map_single(struct device *dev, | |||
787 | struct dma_ops_domain *dma_dom, | 788 | struct dma_ops_domain *dma_dom, |
788 | phys_addr_t paddr, | 789 | phys_addr_t paddr, |
789 | size_t size, | 790 | size_t size, |
790 | int dir) | 791 | int dir, |
792 | bool align) | ||
791 | { | 793 | { |
792 | dma_addr_t offset = paddr & ~PAGE_MASK; | 794 | dma_addr_t offset = paddr & ~PAGE_MASK; |
793 | dma_addr_t address, start; | 795 | dma_addr_t address, start; |
794 | unsigned int pages; | 796 | unsigned int pages; |
797 | unsigned long align_mask = 0; | ||
795 | int i; | 798 | int i; |
796 | 799 | ||
797 | pages = iommu_num_pages(paddr, size); | 800 | pages = iommu_num_pages(paddr, size); |
798 | paddr &= PAGE_MASK; | 801 | paddr &= PAGE_MASK; |
799 | 802 | ||
800 | address = dma_ops_alloc_addresses(dev, dma_dom, pages); | 803 | if (align) |
804 | align_mask = (1UL << get_order(size)) - 1; | ||
805 | |||
806 | address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask); | ||
801 | if (unlikely(address == bad_dma_address)) | 807 | if (unlikely(address == bad_dma_address)) |
802 | goto out; | 808 | goto out; |
803 | 809 | ||
@@ -872,7 +878,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | |||
872 | return (dma_addr_t)paddr; | 878 | return (dma_addr_t)paddr; |
873 | 879 | ||
874 | spin_lock_irqsave(&domain->lock, flags); | 880 | spin_lock_irqsave(&domain->lock, flags); |
875 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir); | 881 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false); |
876 | if (addr == bad_dma_address) | 882 | if (addr == bad_dma_address) |
877 | goto out; | 883 | goto out; |
878 | 884 | ||
@@ -959,7 +965,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
959 | paddr = sg_phys(s); | 965 | paddr = sg_phys(s); |
960 | 966 | ||
961 | s->dma_address = __map_single(dev, iommu, domain->priv, | 967 | s->dma_address = __map_single(dev, iommu, domain->priv, |
962 | paddr, s->length, dir); | 968 | paddr, s->length, dir, false); |
963 | 969 | ||
964 | if (s->dma_address) { | 970 | if (s->dma_address) { |
965 | s->dma_length = s->length; | 971 | s->dma_length = s->length; |
@@ -1053,7 +1059,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
1053 | spin_lock_irqsave(&domain->lock, flags); | 1059 | spin_lock_irqsave(&domain->lock, flags); |
1054 | 1060 | ||
1055 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, | 1061 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, |
1056 | size, DMA_BIDIRECTIONAL); | 1062 | size, DMA_BIDIRECTIONAL, true); |
1057 | 1063 | ||
1058 | if (*dma_addr == bad_dma_address) { | 1064 | if (*dma_addr == bad_dma_address) { |
1059 | free_pages((unsigned long)virt_addr, get_order(size)); | 1065 | free_pages((unsigned long)virt_addr, get_order(size)); |