aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-09-18 09:54:23 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-19 06:59:34 -0400
commit832a90c30485117d65180cc9a8d9869c1b158570 (patch)
treee8a183e647252766d178bb297854d1e1862553ac /arch/x86/kernel/amd_iommu.c
parentbbd001c73cb92aa8f779ae44bb89d8a5dee74ad5 (diff)
AMD IOMMU: use coherent_dma_mask in alloc_coherent
The alloc_coherent implementation for AMD IOMMU currently uses *dev->dma_mask per default. This patch changes it to prefer dev->coherent_dma_mask if it is set. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index db64482b1796..6f7b97445738 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -483,9 +483,10 @@ static unsigned long dma_mask_to_pages(unsigned long mask)
483static unsigned long dma_ops_alloc_addresses(struct device *dev, 483static unsigned long dma_ops_alloc_addresses(struct device *dev,
484 struct dma_ops_domain *dom, 484 struct dma_ops_domain *dom,
485 unsigned int pages, 485 unsigned int pages,
486 unsigned long align_mask) 486 unsigned long align_mask,
487 u64 dma_mask)
487{ 488{
488 unsigned long limit = dma_mask_to_pages(*dev->dma_mask); 489 unsigned long limit = dma_mask_to_pages(dma_mask);
489 unsigned long address; 490 unsigned long address;
490 unsigned long size = dom->aperture_size >> PAGE_SHIFT; 491 unsigned long size = dom->aperture_size >> PAGE_SHIFT;
491 unsigned long boundary_size; 492 unsigned long boundary_size;
@@ -919,7 +920,8 @@ static dma_addr_t __map_single(struct device *dev,
919 phys_addr_t paddr, 920 phys_addr_t paddr,
920 size_t size, 921 size_t size,
921 int dir, 922 int dir,
922 bool align) 923 bool align,
924 u64 dma_mask)
923{ 925{
924 dma_addr_t offset = paddr & ~PAGE_MASK; 926 dma_addr_t offset = paddr & ~PAGE_MASK;
925 dma_addr_t address, start; 927 dma_addr_t address, start;
@@ -933,7 +935,8 @@ static dma_addr_t __map_single(struct device *dev,
933 if (align) 935 if (align)
934 align_mask = (1UL << get_order(size)) - 1; 936 align_mask = (1UL << get_order(size)) - 1;
935 937
936 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask); 938 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
939 dma_mask);
937 if (unlikely(address == bad_dma_address)) 940 if (unlikely(address == bad_dma_address))
938 goto out; 941 goto out;
939 942
@@ -997,10 +1000,13 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
997 struct protection_domain *domain; 1000 struct protection_domain *domain;
998 u16 devid; 1001 u16 devid;
999 dma_addr_t addr; 1002 dma_addr_t addr;
1003 u64 dma_mask;
1000 1004
1001 if (!check_device(dev)) 1005 if (!check_device(dev))
1002 return bad_dma_address; 1006 return bad_dma_address;
1003 1007
1008 dma_mask = *dev->dma_mask;
1009
1004 get_device_resources(dev, &iommu, &domain, &devid); 1010 get_device_resources(dev, &iommu, &domain, &devid);
1005 1011
1006 if (iommu == NULL || domain == NULL) 1012 if (iommu == NULL || domain == NULL)
@@ -1008,7 +1014,8 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1008 return (dma_addr_t)paddr; 1014 return (dma_addr_t)paddr;
1009 1015
1010 spin_lock_irqsave(&domain->lock, flags); 1016 spin_lock_irqsave(&domain->lock, flags);
1011 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false); 1017 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
1018 dma_mask);
1012 if (addr == bad_dma_address) 1019 if (addr == bad_dma_address)
1013 goto out; 1020 goto out;
1014 1021
@@ -1080,10 +1087,13 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1080 struct scatterlist *s; 1087 struct scatterlist *s;
1081 phys_addr_t paddr; 1088 phys_addr_t paddr;
1082 int mapped_elems = 0; 1089 int mapped_elems = 0;
1090 u64 dma_mask;
1083 1091
1084 if (!check_device(dev)) 1092 if (!check_device(dev))
1085 return 0; 1093 return 0;
1086 1094
1095 dma_mask = *dev->dma_mask;
1096
1087 get_device_resources(dev, &iommu, &domain, &devid); 1097 get_device_resources(dev, &iommu, &domain, &devid);
1088 1098
1089 if (!iommu || !domain) 1099 if (!iommu || !domain)
@@ -1095,7 +1105,8 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1095 paddr = sg_phys(s); 1105 paddr = sg_phys(s);
1096 1106
1097 s->dma_address = __map_single(dev, iommu, domain->priv, 1107 s->dma_address = __map_single(dev, iommu, domain->priv,
1098 paddr, s->length, dir, false); 1108 paddr, s->length, dir, false,
1109 dma_mask);
1099 1110
1100 if (s->dma_address) { 1111 if (s->dma_address) {
1101 s->dma_length = s->length; 1112 s->dma_length = s->length;
@@ -1168,6 +1179,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
1168 struct protection_domain *domain; 1179 struct protection_domain *domain;
1169 u16 devid; 1180 u16 devid;
1170 phys_addr_t paddr; 1181 phys_addr_t paddr;
1182 u64 dma_mask = dev->coherent_dma_mask;
1171 1183
1172 if (!check_device(dev)) 1184 if (!check_device(dev))
1173 return NULL; 1185 return NULL;
@@ -1187,10 +1199,13 @@ static void *alloc_coherent(struct device *dev, size_t size,
1187 return virt_addr; 1199 return virt_addr;
1188 } 1200 }
1189 1201
1202 if (!dma_mask)
1203 dma_mask = *dev->dma_mask;
1204
1190 spin_lock_irqsave(&domain->lock, flags); 1205 spin_lock_irqsave(&domain->lock, flags);
1191 1206
1192 *dma_addr = __map_single(dev, iommu, domain->priv, paddr, 1207 *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
1193 size, DMA_BIDIRECTIONAL, true); 1208 size, DMA_BIDIRECTIONAL, true, dma_mask);
1194 1209
1195 if (*dma_addr == bad_dma_address) { 1210 if (*dma_addr == bad_dma_address) {
1196 free_pages((unsigned long)virt_addr, get_order(size)); 1211 free_pages((unsigned long)virt_addr, get_order(size));