aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArthur Kepner <akepner@sgi.com>2008-04-29 04:00:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:06:12 -0400
commit309df0c503c35fbb5a09537fcbb1f4967b9ca489 (patch)
tree56a9df627a229bd64b934608b5f84d20bdaabf3a
parenta75b0a2f68d3937f96ed39525e4750601483e3b4 (diff)
dma/ia64: update ia64 machvecs, swiotlb.c
Change all ia64 machvecs to use the new dma_*map*_attrs() interfaces. Implement the old dma_*map_*() interfaces in terms of the corresponding new interfaces. For ia64/sn, make use of one dma attribute, DMA_ATTR_WRITE_BARRIER. Introduce swiotlb_*map*_attrs() functions. Signed-off-by: Arthur Kepner <akepner@sgi.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: Jes Sorensen <jes@sgi.com> Cc: Randy Dunlap <randy.dunlap@oracle.com> Cc: Roland Dreier <rdreier@cisco.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: David Miller <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Grant Grundler <grundler@parisc-linux.org> Cc: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c61
-rw-r--r--arch/ia64/hp/common/sba_iommu.c64
-rw-r--r--arch/ia64/sn/pci/pci_dma.c81
-rw-r--r--include/asm-ia64/dma-mapping.h28
-rw-r--r--include/asm-ia64/machvec.h50
-rw-r--r--include/asm-ia64/machvec_hpzx1.h16
-rw-r--r--include/asm-ia64/machvec_hpzx1_swiotlb.h16
-rw-r--r--include/asm-ia64/machvec_sn2.h16
-rw-r--r--lib/swiotlb.c50
9 files changed, 249 insertions, 133 deletions
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 8f6bcfe1dada..1c44ec2a1d58 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -20,10 +20,10 @@
20extern int swiotlb_late_init_with_default_size (size_t size); 20extern int swiotlb_late_init_with_default_size (size_t size);
21extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; 21extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
22extern ia64_mv_dma_free_coherent swiotlb_free_coherent; 22extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
23extern ia64_mv_dma_map_single swiotlb_map_single; 23extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs;
24extern ia64_mv_dma_unmap_single swiotlb_unmap_single; 24extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs;
25extern ia64_mv_dma_map_sg swiotlb_map_sg; 25extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs;
26extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; 26extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs;
27extern ia64_mv_dma_supported swiotlb_dma_supported; 27extern ia64_mv_dma_supported swiotlb_dma_supported;
28extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; 28extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
29 29
@@ -31,19 +31,19 @@ extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
31 31
32extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; 32extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
33extern ia64_mv_dma_free_coherent sba_free_coherent; 33extern ia64_mv_dma_free_coherent sba_free_coherent;
34extern ia64_mv_dma_map_single sba_map_single; 34extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
35extern ia64_mv_dma_unmap_single sba_unmap_single; 35extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
36extern ia64_mv_dma_map_sg sba_map_sg; 36extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
37extern ia64_mv_dma_unmap_sg sba_unmap_sg; 37extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
38extern ia64_mv_dma_supported sba_dma_supported; 38extern ia64_mv_dma_supported sba_dma_supported;
39extern ia64_mv_dma_mapping_error sba_dma_mapping_error; 39extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
40 40
41#define hwiommu_alloc_coherent sba_alloc_coherent 41#define hwiommu_alloc_coherent sba_alloc_coherent
42#define hwiommu_free_coherent sba_free_coherent 42#define hwiommu_free_coherent sba_free_coherent
43#define hwiommu_map_single sba_map_single 43#define hwiommu_map_single_attrs sba_map_single_attrs
44#define hwiommu_unmap_single sba_unmap_single 44#define hwiommu_unmap_single_attrs sba_unmap_single_attrs
45#define hwiommu_map_sg sba_map_sg 45#define hwiommu_map_sg_attrs sba_map_sg_attrs
46#define hwiommu_unmap_sg sba_unmap_sg 46#define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs
47#define hwiommu_dma_supported sba_dma_supported 47#define hwiommu_dma_supported sba_dma_supported
48#define hwiommu_dma_mapping_error sba_dma_mapping_error 48#define hwiommu_dma_mapping_error sba_dma_mapping_error
49#define hwiommu_sync_single_for_cpu machvec_dma_sync_single 49#define hwiommu_sync_single_for_cpu machvec_dma_sync_single
@@ -98,41 +98,48 @@ hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma
98} 98}
99 99
100dma_addr_t 100dma_addr_t
101hwsw_map_single (struct device *dev, void *addr, size_t size, int dir) 101hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
102 struct dma_attrs *attrs)
102{ 103{
103 if (use_swiotlb(dev)) 104 if (use_swiotlb(dev))
104 return swiotlb_map_single(dev, addr, size, dir); 105 return swiotlb_map_single_attrs(dev, addr, size, dir, attrs);
105 else 106 else
106 return hwiommu_map_single(dev, addr, size, dir); 107 return hwiommu_map_single_attrs(dev, addr, size, dir, attrs);
107} 108}
109EXPORT_SYMBOL(hwsw_map_single_attrs);
108 110
109void 111void
110hwsw_unmap_single (struct device *dev, dma_addr_t iova, size_t size, int dir) 112hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
113 int dir, struct dma_attrs *attrs)
111{ 114{
112 if (use_swiotlb(dev)) 115 if (use_swiotlb(dev))
113 return swiotlb_unmap_single(dev, iova, size, dir); 116 return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs);
114 else 117 else
115 return hwiommu_unmap_single(dev, iova, size, dir); 118 return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs);
116} 119}
117 120EXPORT_SYMBOL(hwsw_unmap_single_attrs);
118 121
119int 122int
120hwsw_map_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) 123hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
124 int dir, struct dma_attrs *attrs)
121{ 125{
122 if (use_swiotlb(dev)) 126 if (use_swiotlb(dev))
123 return swiotlb_map_sg(dev, sglist, nents, dir); 127 return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs);
124 else 128 else
125 return hwiommu_map_sg(dev, sglist, nents, dir); 129 return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs);
126} 130}
131EXPORT_SYMBOL(hwsw_map_sg_attrs);
127 132
128void 133void
129hwsw_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) 134hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
135 int dir, struct dma_attrs *attrs)
130{ 136{
131 if (use_swiotlb(dev)) 137 if (use_swiotlb(dev))
132 return swiotlb_unmap_sg(dev, sglist, nents, dir); 138 return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
133 else 139 else
134 return hwiommu_unmap_sg(dev, sglist, nents, dir); 140 return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
135} 141}
142EXPORT_SYMBOL(hwsw_unmap_sg_attrs);
136 143
137void 144void
138hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) 145hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
@@ -185,10 +192,6 @@ hwsw_dma_mapping_error (dma_addr_t dma_addr)
185} 192}
186 193
187EXPORT_SYMBOL(hwsw_dma_mapping_error); 194EXPORT_SYMBOL(hwsw_dma_mapping_error);
188EXPORT_SYMBOL(hwsw_map_single);
189EXPORT_SYMBOL(hwsw_unmap_single);
190EXPORT_SYMBOL(hwsw_map_sg);
191EXPORT_SYMBOL(hwsw_unmap_sg);
192EXPORT_SYMBOL(hwsw_dma_supported); 195EXPORT_SYMBOL(hwsw_dma_supported);
193EXPORT_SYMBOL(hwsw_alloc_coherent); 196EXPORT_SYMBOL(hwsw_alloc_coherent);
194EXPORT_SYMBOL(hwsw_free_coherent); 197EXPORT_SYMBOL(hwsw_free_coherent);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 9409de5c9441..6ce729f46de8 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -899,16 +899,18 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
899} 899}
900 900
901/** 901/**
902 * sba_map_single - map one buffer and return IOVA for DMA 902 * sba_map_single_attrs - map one buffer and return IOVA for DMA
903 * @dev: instance of PCI owned by the driver that's asking. 903 * @dev: instance of PCI owned by the driver that's asking.
904 * @addr: driver buffer to map. 904 * @addr: driver buffer to map.
905 * @size: number of bytes to map in driver buffer. 905 * @size: number of bytes to map in driver buffer.
906 * @dir: R/W or both. 906 * @dir: R/W or both.
907 * @attrs: optional dma attributes
907 * 908 *
908 * See Documentation/DMA-mapping.txt 909 * See Documentation/DMA-mapping.txt
909 */ 910 */
910dma_addr_t 911dma_addr_t
911sba_map_single(struct device *dev, void *addr, size_t size, int dir) 912sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
913 struct dma_attrs *attrs)
912{ 914{
913 struct ioc *ioc; 915 struct ioc *ioc;
914 dma_addr_t iovp; 916 dma_addr_t iovp;
@@ -932,7 +934,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
932 ** Device is bit capable of DMA'ing to the buffer... 934 ** Device is bit capable of DMA'ing to the buffer...
933 ** just return the PCI address of ptr 935 ** just return the PCI address of ptr
934 */ 936 */
935 DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n", 937 DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
938 "0x%lx/0x%lx\n",
936 to_pci_dev(dev)->dma_mask, pci_addr); 939 to_pci_dev(dev)->dma_mask, pci_addr);
937 return pci_addr; 940 return pci_addr;
938 } 941 }
@@ -953,7 +956,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
953 956
954#ifdef ASSERT_PDIR_SANITY 957#ifdef ASSERT_PDIR_SANITY
955 spin_lock_irqsave(&ioc->res_lock, flags); 958 spin_lock_irqsave(&ioc->res_lock, flags);
956 if (sba_check_pdir(ioc,"Check before sba_map_single()")) 959 if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
957 panic("Sanity check failed"); 960 panic("Sanity check failed");
958 spin_unlock_irqrestore(&ioc->res_lock, flags); 961 spin_unlock_irqrestore(&ioc->res_lock, flags);
959#endif 962#endif
@@ -982,11 +985,12 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
982 /* form complete address */ 985 /* form complete address */
983#ifdef ASSERT_PDIR_SANITY 986#ifdef ASSERT_PDIR_SANITY
984 spin_lock_irqsave(&ioc->res_lock, flags); 987 spin_lock_irqsave(&ioc->res_lock, flags);
985 sba_check_pdir(ioc,"Check after sba_map_single()"); 988 sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
986 spin_unlock_irqrestore(&ioc->res_lock, flags); 989 spin_unlock_irqrestore(&ioc->res_lock, flags);
987#endif 990#endif
988 return SBA_IOVA(ioc, iovp, offset); 991 return SBA_IOVA(ioc, iovp, offset);
989} 992}
993EXPORT_SYMBOL(sba_map_single_attrs);
990 994
991#ifdef ENABLE_MARK_CLEAN 995#ifdef ENABLE_MARK_CLEAN
992static SBA_INLINE void 996static SBA_INLINE void
@@ -1013,15 +1017,17 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1013#endif 1017#endif
1014 1018
1015/** 1019/**
1016 * sba_unmap_single - unmap one IOVA and free resources 1020 * sba_unmap_single_attrs - unmap one IOVA and free resources
1017 * @dev: instance of PCI owned by the driver that's asking. 1021 * @dev: instance of PCI owned by the driver that's asking.
1018 * @iova: IOVA of driver buffer previously mapped. 1022 * @iova: IOVA of driver buffer previously mapped.
1019 * @size: number of bytes mapped in driver buffer. 1023 * @size: number of bytes mapped in driver buffer.
1020 * @dir: R/W or both. 1024 * @dir: R/W or both.
1025 * @attrs: optional dma attributes
1021 * 1026 *
1022 * See Documentation/DMA-mapping.txt 1027 * See Documentation/DMA-mapping.txt
1023 */ 1028 */
1024void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) 1029void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1030 int dir, struct dma_attrs *attrs)
1025{ 1031{
1026 struct ioc *ioc; 1032 struct ioc *ioc;
1027#if DELAYED_RESOURCE_CNT > 0 1033#if DELAYED_RESOURCE_CNT > 0
@@ -1038,7 +1044,8 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
1038 /* 1044 /*
1039 ** Address does not fall w/in IOVA, must be bypassing 1045 ** Address does not fall w/in IOVA, must be bypassing
1040 */ 1046 */
1041 DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova); 1047 DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n",
1048 iova);
1042 1049
1043#ifdef ENABLE_MARK_CLEAN 1050#ifdef ENABLE_MARK_CLEAN
1044 if (dir == DMA_FROM_DEVICE) { 1051 if (dir == DMA_FROM_DEVICE) {
@@ -1087,7 +1094,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
1087 spin_unlock_irqrestore(&ioc->res_lock, flags); 1094 spin_unlock_irqrestore(&ioc->res_lock, flags);
1088#endif /* DELAYED_RESOURCE_CNT == 0 */ 1095#endif /* DELAYED_RESOURCE_CNT == 0 */
1089} 1096}
1090 1097EXPORT_SYMBOL(sba_unmap_single_attrs);
1091 1098
1092/** 1099/**
1093 * sba_alloc_coherent - allocate/map shared mem for DMA 1100 * sba_alloc_coherent - allocate/map shared mem for DMA
@@ -1144,7 +1151,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
1144 * If device can't bypass or bypass is disabled, pass the 32bit fake 1151 * If device can't bypass or bypass is disabled, pass the 32bit fake
1145 * device to map single to get an iova mapping. 1152 * device to map single to get an iova mapping.
1146 */ 1153 */
1147 *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0); 1154 *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
1155 size, 0, NULL);
1148 1156
1149 return addr; 1157 return addr;
1150} 1158}
@@ -1161,7 +1169,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
1161 */ 1169 */
1162void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) 1170void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
1163{ 1171{
1164 sba_unmap_single(dev, dma_handle, size, 0); 1172 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
1165 free_pages((unsigned long) vaddr, get_order(size)); 1173 free_pages((unsigned long) vaddr, get_order(size));
1166} 1174}
1167 1175
@@ -1410,10 +1418,12 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1410 * @sglist: array of buffer/length pairs 1418 * @sglist: array of buffer/length pairs
1411 * @nents: number of entries in list 1419 * @nents: number of entries in list
1412 * @dir: R/W or both. 1420 * @dir: R/W or both.
1421 * @attrs: optional dma attributes
1413 * 1422 *
1414 * See Documentation/DMA-mapping.txt 1423 * See Documentation/DMA-mapping.txt
1415 */ 1424 */
1416int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir) 1425int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
1426 int dir, struct dma_attrs *attrs)
1417{ 1427{
1418 struct ioc *ioc; 1428 struct ioc *ioc;
1419 int coalesced, filled = 0; 1429 int coalesced, filled = 0;
@@ -1441,16 +1451,16 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1441 /* Fast path single entry scatterlists. */ 1451 /* Fast path single entry scatterlists. */
1442 if (nents == 1) { 1452 if (nents == 1) {
1443 sglist->dma_length = sglist->length; 1453 sglist->dma_length = sglist->length;
1444 sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir); 1454 sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
1445 return 1; 1455 return 1;
1446 } 1456 }
1447 1457
1448#ifdef ASSERT_PDIR_SANITY 1458#ifdef ASSERT_PDIR_SANITY
1449 spin_lock_irqsave(&ioc->res_lock, flags); 1459 spin_lock_irqsave(&ioc->res_lock, flags);
1450 if (sba_check_pdir(ioc,"Check before sba_map_sg()")) 1460 if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1451 { 1461 {
1452 sba_dump_sg(ioc, sglist, nents); 1462 sba_dump_sg(ioc, sglist, nents);
1453 panic("Check before sba_map_sg()"); 1463 panic("Check before sba_map_sg_attrs()");
1454 } 1464 }
1455 spin_unlock_irqrestore(&ioc->res_lock, flags); 1465 spin_unlock_irqrestore(&ioc->res_lock, flags);
1456#endif 1466#endif
@@ -1479,10 +1489,10 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1479 1489
1480#ifdef ASSERT_PDIR_SANITY 1490#ifdef ASSERT_PDIR_SANITY
1481 spin_lock_irqsave(&ioc->res_lock, flags); 1491 spin_lock_irqsave(&ioc->res_lock, flags);
1482 if (sba_check_pdir(ioc,"Check after sba_map_sg()")) 1492 if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1483 { 1493 {
1484 sba_dump_sg(ioc, sglist, nents); 1494 sba_dump_sg(ioc, sglist, nents);
1485 panic("Check after sba_map_sg()\n"); 1495 panic("Check after sba_map_sg_attrs()\n");
1486 } 1496 }
1487 spin_unlock_irqrestore(&ioc->res_lock, flags); 1497 spin_unlock_irqrestore(&ioc->res_lock, flags);
1488#endif 1498#endif
@@ -1492,18 +1502,20 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1492 1502
1493 return filled; 1503 return filled;
1494} 1504}
1495 1505EXPORT_SYMBOL(sba_map_sg_attrs);
1496 1506
1497/** 1507/**
1498 * sba_unmap_sg - unmap Scatter/Gather list 1508 * sba_unmap_sg_attrs - unmap Scatter/Gather list
1499 * @dev: instance of PCI owned by the driver that's asking. 1509 * @dev: instance of PCI owned by the driver that's asking.
1500 * @sglist: array of buffer/length pairs 1510 * @sglist: array of buffer/length pairs
1501 * @nents: number of entries in list 1511 * @nents: number of entries in list
1502 * @dir: R/W or both. 1512 * @dir: R/W or both.
1513 * @attrs: optional dma attributes
1503 * 1514 *
1504 * See Documentation/DMA-mapping.txt 1515 * See Documentation/DMA-mapping.txt
1505 */ 1516 */
1506void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) 1517void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1518 int nents, int dir, struct dma_attrs *attrs)
1507{ 1519{
1508#ifdef ASSERT_PDIR_SANITY 1520#ifdef ASSERT_PDIR_SANITY
1509 struct ioc *ioc; 1521 struct ioc *ioc;
@@ -1518,13 +1530,14 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
1518 ASSERT(ioc); 1530 ASSERT(ioc);
1519 1531
1520 spin_lock_irqsave(&ioc->res_lock, flags); 1532 spin_lock_irqsave(&ioc->res_lock, flags);
1521 sba_check_pdir(ioc,"Check before sba_unmap_sg()"); 1533 sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1522 spin_unlock_irqrestore(&ioc->res_lock, flags); 1534 spin_unlock_irqrestore(&ioc->res_lock, flags);
1523#endif 1535#endif
1524 1536
1525 while (nents && sglist->dma_length) { 1537 while (nents && sglist->dma_length) {
1526 1538
1527 sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); 1539 sba_unmap_single_attrs(dev, sglist->dma_address,
1540 sglist->dma_length, dir, attrs);
1528 sglist = sg_next(sglist); 1541 sglist = sg_next(sglist);
1529 nents--; 1542 nents--;
1530 } 1543 }
@@ -1533,11 +1546,12 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
1533 1546
1534#ifdef ASSERT_PDIR_SANITY 1547#ifdef ASSERT_PDIR_SANITY
1535 spin_lock_irqsave(&ioc->res_lock, flags); 1548 spin_lock_irqsave(&ioc->res_lock, flags);
1536 sba_check_pdir(ioc,"Check after sba_unmap_sg()"); 1549 sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1537 spin_unlock_irqrestore(&ioc->res_lock, flags); 1550 spin_unlock_irqrestore(&ioc->res_lock, flags);
1538#endif 1551#endif
1539 1552
1540} 1553}
1554EXPORT_SYMBOL(sba_unmap_sg_attrs);
1541 1555
1542/************************************************************** 1556/**************************************************************
1543* 1557*
@@ -2166,10 +2180,6 @@ sba_page_override(char *str)
2166__setup("sbapagesize=",sba_page_override); 2180__setup("sbapagesize=",sba_page_override);
2167 2181
2168EXPORT_SYMBOL(sba_dma_mapping_error); 2182EXPORT_SYMBOL(sba_dma_mapping_error);
2169EXPORT_SYMBOL(sba_map_single);
2170EXPORT_SYMBOL(sba_unmap_single);
2171EXPORT_SYMBOL(sba_map_sg);
2172EXPORT_SYMBOL(sba_unmap_sg);
2173EXPORT_SYMBOL(sba_dma_supported); 2183EXPORT_SYMBOL(sba_dma_supported);
2174EXPORT_SYMBOL(sba_alloc_coherent); 2184EXPORT_SYMBOL(sba_alloc_coherent);
2175EXPORT_SYMBOL(sba_free_coherent); 2185EXPORT_SYMBOL(sba_free_coherent);
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 18b94b792d54..52175af299a0 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/dma-attrs.h>
13#include <asm/dma.h> 14#include <asm/dma.h>
14#include <asm/sn/intr.h> 15#include <asm/sn/intr.h>
15#include <asm/sn/pcibus_provider_defs.h> 16#include <asm/sn/pcibus_provider_defs.h>
@@ -149,11 +150,12 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
149EXPORT_SYMBOL(sn_dma_free_coherent); 150EXPORT_SYMBOL(sn_dma_free_coherent);
150 151
151/** 152/**
152 * sn_dma_map_single - map a single page for DMA 153 * sn_dma_map_single_attrs - map a single page for DMA
153 * @dev: device to map for 154 * @dev: device to map for
154 * @cpu_addr: kernel virtual address of the region to map 155 * @cpu_addr: kernel virtual address of the region to map
155 * @size: size of the region 156 * @size: size of the region
156 * @direction: DMA direction 157 * @direction: DMA direction
158 * @attrs: optional dma attributes
157 * 159 *
158 * Map the region pointed to by @cpu_addr for DMA and return the 160 * Map the region pointed to by @cpu_addr for DMA and return the
159 * DMA address. 161 * DMA address.
@@ -163,42 +165,59 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
163 * no way of saving the dmamap handle from the alloc to later free 165 * no way of saving the dmamap handle from the alloc to later free
164 * (which is pretty much unacceptable). 166 * (which is pretty much unacceptable).
165 * 167 *
168 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
169 * dma_map_consistent() so that writes force a flush of pending DMA.
170 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
171 * Document Number: 007-4763-001)
172 *
166 * TODO: simplify our interface; 173 * TODO: simplify our interface;
167 * figure out how to save dmamap handle so can use two step. 174 * figure out how to save dmamap handle so can use two step.
168 */ 175 */
169dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, 176dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
170 int direction) 177 size_t size, int direction,
178 struct dma_attrs *attrs)
171{ 179{
172 dma_addr_t dma_addr; 180 dma_addr_t dma_addr;
173 unsigned long phys_addr; 181 unsigned long phys_addr;
174 struct pci_dev *pdev = to_pci_dev(dev); 182 struct pci_dev *pdev = to_pci_dev(dev);
175 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 183 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
184 int dmabarr;
185
186 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
176 187
177 BUG_ON(dev->bus != &pci_bus_type); 188 BUG_ON(dev->bus != &pci_bus_type);
178 189
179 phys_addr = __pa(cpu_addr); 190 phys_addr = __pa(cpu_addr);
180 dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); 191 if (dmabarr)
192 dma_addr = provider->dma_map_consistent(pdev, phys_addr,
193 size, SN_DMA_ADDR_PHYS);
194 else
195 dma_addr = provider->dma_map(pdev, phys_addr, size,
196 SN_DMA_ADDR_PHYS);
197
181 if (!dma_addr) { 198 if (!dma_addr) {
182 printk(KERN_ERR "%s: out of ATEs\n", __func__); 199 printk(KERN_ERR "%s: out of ATEs\n", __func__);
183 return 0; 200 return 0;
184 } 201 }
185 return dma_addr; 202 return dma_addr;
186} 203}
187EXPORT_SYMBOL(sn_dma_map_single); 204EXPORT_SYMBOL(sn_dma_map_single_attrs);
188 205
189/** 206/**
190 * sn_dma_unmap_single - unamp a DMA mapped page 207 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
191 * @dev: device to sync 208 * @dev: device to sync
192 * @dma_addr: DMA address to sync 209 * @dma_addr: DMA address to sync
193 * @size: size of region 210 * @size: size of region
194 * @direction: DMA direction 211 * @direction: DMA direction
212 * @attrs: optional dma attributes
195 * 213 *
196 * This routine is supposed to sync the DMA region specified 214 * This routine is supposed to sync the DMA region specified
197 * by @dma_handle into the coherence domain. On SN, we're always cache 215 * by @dma_handle into the coherence domain. On SN, we're always cache
198 * coherent, so we just need to free any ATEs associated with this mapping. 216 * coherent, so we just need to free any ATEs associated with this mapping.
199 */ 217 */
200void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 218void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
201 int direction) 219 size_t size, int direction,
220 struct dma_attrs *attrs)
202{ 221{
203 struct pci_dev *pdev = to_pci_dev(dev); 222 struct pci_dev *pdev = to_pci_dev(dev);
204 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 223 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -207,19 +226,21 @@ void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
207 226
208 provider->dma_unmap(pdev, dma_addr, direction); 227 provider->dma_unmap(pdev, dma_addr, direction);
209} 228}
210EXPORT_SYMBOL(sn_dma_unmap_single); 229EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
211 230
212/** 231/**
213 * sn_dma_unmap_sg - unmap a DMA scatterlist 232 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist
214 * @dev: device to unmap 233 * @dev: device to unmap
215 * @sg: scatterlist to unmap 234 * @sg: scatterlist to unmap
216 * @nhwentries: number of scatterlist entries 235 * @nhwentries: number of scatterlist entries
217 * @direction: DMA direction 236 * @direction: DMA direction
237 * @attrs: optional dma attributes
218 * 238 *
219 * Unmap a set of streaming mode DMA translations. 239 * Unmap a set of streaming mode DMA translations.
220 */ 240 */
221void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 241void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
222 int nhwentries, int direction) 242 int nhwentries, int direction,
243 struct dma_attrs *attrs)
223{ 244{
224 int i; 245 int i;
225 struct pci_dev *pdev = to_pci_dev(dev); 246 struct pci_dev *pdev = to_pci_dev(dev);
@@ -234,25 +255,34 @@ void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
234 sg->dma_length = 0; 255 sg->dma_length = 0;
235 } 256 }
236} 257}
237EXPORT_SYMBOL(sn_dma_unmap_sg); 258EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
238 259
239/** 260/**
240 * sn_dma_map_sg - map a scatterlist for DMA 261 * sn_dma_map_sg_attrs - map a scatterlist for DMA
241 * @dev: device to map for 262 * @dev: device to map for
242 * @sg: scatterlist to map 263 * @sg: scatterlist to map
243 * @nhwentries: number of entries 264 * @nhwentries: number of entries
244 * @direction: direction of the DMA transaction 265 * @direction: direction of the DMA transaction
266 * @attrs: optional dma attributes
267 *
268 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
269 * dma_map_consistent() so that writes force a flush of pending DMA.
270 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
271 * Document Number: 007-4763-001)
245 * 272 *
246 * Maps each entry of @sg for DMA. 273 * Maps each entry of @sg for DMA.
247 */ 274 */
248int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, 275int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
249 int direction) 276 int nhwentries, int direction, struct dma_attrs *attrs)
250{ 277{
251 unsigned long phys_addr; 278 unsigned long phys_addr;
252 struct scatterlist *saved_sg = sgl, *sg; 279 struct scatterlist *saved_sg = sgl, *sg;
253 struct pci_dev *pdev = to_pci_dev(dev); 280 struct pci_dev *pdev = to_pci_dev(dev);
254 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 281 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
255 int i; 282 int i;
283 int dmabarr;
284
285 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
256 286
257 BUG_ON(dev->bus != &pci_bus_type); 287 BUG_ON(dev->bus != &pci_bus_type);
258 288
@@ -260,11 +290,19 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
260 * Setup a DMA address for each entry in the scatterlist. 290 * Setup a DMA address for each entry in the scatterlist.
261 */ 291 */
262 for_each_sg(sgl, sg, nhwentries, i) { 292 for_each_sg(sgl, sg, nhwentries, i) {
293 dma_addr_t dma_addr;
263 phys_addr = SG_ENT_PHYS_ADDRESS(sg); 294 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
264 sg->dma_address = provider->dma_map(pdev, 295 if (dmabarr)
265 phys_addr, sg->length, 296 dma_addr = provider->dma_map_consistent(pdev,
266 SN_DMA_ADDR_PHYS); 297 phys_addr,
298 sg->length,
299 SN_DMA_ADDR_PHYS);
300 else
301 dma_addr = provider->dma_map(pdev, phys_addr,
302 sg->length,
303 SN_DMA_ADDR_PHYS);
267 304
305 sg->dma_address = dma_addr;
268 if (!sg->dma_address) { 306 if (!sg->dma_address) {
269 printk(KERN_ERR "%s: out of ATEs\n", __func__); 307 printk(KERN_ERR "%s: out of ATEs\n", __func__);
270 308
@@ -272,7 +310,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
272 * Free any successfully allocated entries. 310 * Free any successfully allocated entries.
273 */ 311 */
274 if (i > 0) 312 if (i > 0)
275 sn_dma_unmap_sg(dev, saved_sg, i, direction); 313 sn_dma_unmap_sg_attrs(dev, saved_sg, i,
314 direction, attrs);
276 return 0; 315 return 0;
277 } 316 }
278 317
@@ -281,7 +320,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
281 320
282 return nhwentries; 321 return nhwentries;
283} 322}
284EXPORT_SYMBOL(sn_dma_map_sg); 323EXPORT_SYMBOL(sn_dma_map_sg_attrs);
285 324
286void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 325void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
287 size_t size, int direction) 326 size_t size, int direction)
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
index f1735a22d0ea..9f0df9bd46b7 100644
--- a/include/asm-ia64/dma-mapping.h
+++ b/include/asm-ia64/dma-mapping.h
@@ -23,10 +23,30 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
23{ 23{
24 dma_free_coherent(dev, size, cpu_addr, dma_handle); 24 dma_free_coherent(dev, size, cpu_addr, dma_handle);
25} 25}
26#define dma_map_single platform_dma_map_single 26#define dma_map_single_attrs platform_dma_map_single_attrs
27#define dma_map_sg platform_dma_map_sg 27static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
28#define dma_unmap_single platform_dma_unmap_single 28 size_t size, int dir)
29#define dma_unmap_sg platform_dma_unmap_sg 29{
30 return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL);
31}
32#define dma_map_sg_attrs platform_dma_map_sg_attrs
33static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl,
34 int nents, int dir)
35{
36 return dma_map_sg_attrs(dev, sgl, nents, dir, NULL);
37}
38#define dma_unmap_single_attrs platform_dma_unmap_single_attrs
39static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr,
40 size_t size, int dir)
41{
42 return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL);
43}
44#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs
45static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
46 int nents, int dir)
47{
48 return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL);
49}
30#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu 50#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
31#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu 51#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
32#define dma_sync_single_for_device platform_dma_sync_single_for_device 52#define dma_sync_single_for_device platform_dma_sync_single_for_device
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index c201a2020aa4..9f020eb825c5 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -22,6 +22,7 @@ struct pci_bus;
22struct task_struct; 22struct task_struct;
23struct pci_dev; 23struct pci_dev;
24struct msi_desc; 24struct msi_desc;
25struct dma_attrs;
25 26
26typedef void ia64_mv_setup_t (char **); 27typedef void ia64_mv_setup_t (char **);
27typedef void ia64_mv_cpu_init_t (void); 28typedef void ia64_mv_cpu_init_t (void);
@@ -56,6 +57,11 @@ typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist
56typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr); 57typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
57typedef int ia64_mv_dma_supported (struct device *, u64); 58typedef int ia64_mv_dma_supported (struct device *, u64);
58 59
60typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
61typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
62typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
63typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
64
59/* 65/*
60 * WARNING: The legacy I/O space is _architected_. Platforms are 66 * WARNING: The legacy I/O space is _architected_. Platforms are
61 * expected to follow this architected model (see Section 10.7 in the 67 * expected to follow this architected model (see Section 10.7 in the
@@ -136,10 +142,10 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
136# define platform_dma_init ia64_mv.dma_init 142# define platform_dma_init ia64_mv.dma_init
137# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent 143# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
138# define platform_dma_free_coherent ia64_mv.dma_free_coherent 144# define platform_dma_free_coherent ia64_mv.dma_free_coherent
139# define platform_dma_map_single ia64_mv.dma_map_single 145# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs
140# define platform_dma_unmap_single ia64_mv.dma_unmap_single 146# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs
141# define platform_dma_map_sg ia64_mv.dma_map_sg 147# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs
142# define platform_dma_unmap_sg ia64_mv.dma_unmap_sg 148# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs
143# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu 149# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
144# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu 150# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
145# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device 151# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
@@ -190,10 +196,10 @@ struct ia64_machine_vector {
190 ia64_mv_dma_init *dma_init; 196 ia64_mv_dma_init *dma_init;
191 ia64_mv_dma_alloc_coherent *dma_alloc_coherent; 197 ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
192 ia64_mv_dma_free_coherent *dma_free_coherent; 198 ia64_mv_dma_free_coherent *dma_free_coherent;
193 ia64_mv_dma_map_single *dma_map_single; 199 ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
194 ia64_mv_dma_unmap_single *dma_unmap_single; 200 ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
195 ia64_mv_dma_map_sg *dma_map_sg; 201 ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
196 ia64_mv_dma_unmap_sg *dma_unmap_sg; 202 ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
197 ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; 203 ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
198 ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; 204 ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
199 ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; 205 ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
@@ -240,10 +246,10 @@ struct ia64_machine_vector {
240 platform_dma_init, \ 246 platform_dma_init, \
241 platform_dma_alloc_coherent, \ 247 platform_dma_alloc_coherent, \
242 platform_dma_free_coherent, \ 248 platform_dma_free_coherent, \
243 platform_dma_map_single, \ 249 platform_dma_map_single_attrs, \
244 platform_dma_unmap_single, \ 250 platform_dma_unmap_single_attrs, \
245 platform_dma_map_sg, \ 251 platform_dma_map_sg_attrs, \
246 platform_dma_unmap_sg, \ 252 platform_dma_unmap_sg_attrs, \
247 platform_dma_sync_single_for_cpu, \ 253 platform_dma_sync_single_for_cpu, \
248 platform_dma_sync_sg_for_cpu, \ 254 platform_dma_sync_sg_for_cpu, \
249 platform_dma_sync_single_for_device, \ 255 platform_dma_sync_single_for_device, \
@@ -292,9 +298,13 @@ extern ia64_mv_dma_init swiotlb_init;
292extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; 298extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
293extern ia64_mv_dma_free_coherent swiotlb_free_coherent; 299extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
294extern ia64_mv_dma_map_single swiotlb_map_single; 300extern ia64_mv_dma_map_single swiotlb_map_single;
301extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs;
295extern ia64_mv_dma_unmap_single swiotlb_unmap_single; 302extern ia64_mv_dma_unmap_single swiotlb_unmap_single;
303extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs;
296extern ia64_mv_dma_map_sg swiotlb_map_sg; 304extern ia64_mv_dma_map_sg swiotlb_map_sg;
305extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs;
297extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; 306extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg;
307extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs;
298extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu; 308extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
299extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu; 309extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu;
300extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device; 310extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
@@ -340,17 +350,17 @@ extern ia64_mv_dma_supported swiotlb_dma_supported;
340#ifndef platform_dma_free_coherent 350#ifndef platform_dma_free_coherent
341# define platform_dma_free_coherent swiotlb_free_coherent 351# define platform_dma_free_coherent swiotlb_free_coherent
342#endif 352#endif
343#ifndef platform_dma_map_single 353#ifndef platform_dma_map_single_attrs
344# define platform_dma_map_single swiotlb_map_single 354# define platform_dma_map_single_attrs swiotlb_map_single_attrs
345#endif 355#endif
346#ifndef platform_dma_unmap_single 356#ifndef platform_dma_unmap_single_attrs
347# define platform_dma_unmap_single swiotlb_unmap_single 357# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs
348#endif 358#endif
349#ifndef platform_dma_map_sg 359#ifndef platform_dma_map_sg_attrs
350# define platform_dma_map_sg swiotlb_map_sg 360# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs
351#endif 361#endif
352#ifndef platform_dma_unmap_sg 362#ifndef platform_dma_unmap_sg_attrs
353# define platform_dma_unmap_sg swiotlb_unmap_sg 363# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs
354#endif 364#endif
355#ifndef platform_dma_sync_single_for_cpu 365#ifndef platform_dma_sync_single_for_cpu
356# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu 366# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
diff --git a/include/asm-ia64/machvec_hpzx1.h b/include/asm-ia64/machvec_hpzx1.h
index e90daf9ce340..2f57f5144b9f 100644
--- a/include/asm-ia64/machvec_hpzx1.h
+++ b/include/asm-ia64/machvec_hpzx1.h
@@ -4,10 +4,10 @@
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; 5extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
6extern ia64_mv_dma_free_coherent sba_free_coherent; 6extern ia64_mv_dma_free_coherent sba_free_coherent;
7extern ia64_mv_dma_map_single sba_map_single; 7extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
8extern ia64_mv_dma_unmap_single sba_unmap_single; 8extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
9extern ia64_mv_dma_map_sg sba_map_sg; 9extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg sba_unmap_sg; 10extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
11extern ia64_mv_dma_supported sba_dma_supported; 11extern ia64_mv_dma_supported sba_dma_supported;
12extern ia64_mv_dma_mapping_error sba_dma_mapping_error; 12extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
13 13
@@ -23,10 +23,10 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
23#define platform_dma_init machvec_noop 23#define platform_dma_init machvec_noop
24#define platform_dma_alloc_coherent sba_alloc_coherent 24#define platform_dma_alloc_coherent sba_alloc_coherent
25#define platform_dma_free_coherent sba_free_coherent 25#define platform_dma_free_coherent sba_free_coherent
26#define platform_dma_map_single sba_map_single 26#define platform_dma_map_single_attrs sba_map_single_attrs
27#define platform_dma_unmap_single sba_unmap_single 27#define platform_dma_unmap_single_attrs sba_unmap_single_attrs
28#define platform_dma_map_sg sba_map_sg 28#define platform_dma_map_sg_attrs sba_map_sg_attrs
29#define platform_dma_unmap_sg sba_unmap_sg 29#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs
30#define platform_dma_sync_single_for_cpu machvec_dma_sync_single 30#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
31#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg 31#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
32#define platform_dma_sync_single_for_device machvec_dma_sync_single 32#define platform_dma_sync_single_for_device machvec_dma_sync_single
diff --git a/include/asm-ia64/machvec_hpzx1_swiotlb.h b/include/asm-ia64/machvec_hpzx1_swiotlb.h
index f00a34a148ff..a842cdda827b 100644
--- a/include/asm-ia64/machvec_hpzx1_swiotlb.h
+++ b/include/asm-ia64/machvec_hpzx1_swiotlb.h
@@ -4,10 +4,10 @@
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; 5extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent;
6extern ia64_mv_dma_free_coherent hwsw_free_coherent; 6extern ia64_mv_dma_free_coherent hwsw_free_coherent;
7extern ia64_mv_dma_map_single hwsw_map_single; 7extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs;
8extern ia64_mv_dma_unmap_single hwsw_unmap_single; 8extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs;
9extern ia64_mv_dma_map_sg hwsw_map_sg; 9extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg hwsw_unmap_sg; 10extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs;
11extern ia64_mv_dma_supported hwsw_dma_supported; 11extern ia64_mv_dma_supported hwsw_dma_supported;
12extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error; 12extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error;
13extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu; 13extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu;
@@ -28,10 +28,10 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
28#define platform_dma_init machvec_noop 28#define platform_dma_init machvec_noop
29#define platform_dma_alloc_coherent hwsw_alloc_coherent 29#define platform_dma_alloc_coherent hwsw_alloc_coherent
30#define platform_dma_free_coherent hwsw_free_coherent 30#define platform_dma_free_coherent hwsw_free_coherent
31#define platform_dma_map_single hwsw_map_single 31#define platform_dma_map_single_attrs hwsw_map_single_attrs
32#define platform_dma_unmap_single hwsw_unmap_single 32#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs
33#define platform_dma_map_sg hwsw_map_sg 33#define platform_dma_map_sg_attrs hwsw_map_sg_attrs
34#define platform_dma_unmap_sg hwsw_unmap_sg 34#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs
35#define platform_dma_supported hwsw_dma_supported 35#define platform_dma_supported hwsw_dma_supported
36#define platform_dma_mapping_error hwsw_dma_mapping_error 36#define platform_dma_mapping_error hwsw_dma_mapping_error
37#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu 37#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu
diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h
index 61439a7f5b08..781308ea7b88 100644
--- a/include/asm-ia64/machvec_sn2.h
+++ b/include/asm-ia64/machvec_sn2.h
@@ -57,10 +57,10 @@ extern ia64_mv_readl_t __sn_readl_relaxed;
57extern ia64_mv_readq_t __sn_readq_relaxed; 57extern ia64_mv_readq_t __sn_readq_relaxed;
58extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; 58extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
59extern ia64_mv_dma_free_coherent sn_dma_free_coherent; 59extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
60extern ia64_mv_dma_map_single sn_dma_map_single; 60extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs;
61extern ia64_mv_dma_unmap_single sn_dma_unmap_single; 61extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs;
62extern ia64_mv_dma_map_sg sn_dma_map_sg; 62extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs;
63extern ia64_mv_dma_unmap_sg sn_dma_unmap_sg; 63extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs;
64extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu; 64extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
65extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu; 65extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
66extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; 66extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
@@ -113,10 +113,10 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
113#define platform_dma_init machvec_noop 113#define platform_dma_init machvec_noop
114#define platform_dma_alloc_coherent sn_dma_alloc_coherent 114#define platform_dma_alloc_coherent sn_dma_alloc_coherent
115#define platform_dma_free_coherent sn_dma_free_coherent 115#define platform_dma_free_coherent sn_dma_free_coherent
116#define platform_dma_map_single sn_dma_map_single 116#define platform_dma_map_single_attrs sn_dma_map_single_attrs
117#define platform_dma_unmap_single sn_dma_unmap_single 117#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs
118#define platform_dma_map_sg sn_dma_map_sg 118#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs
119#define platform_dma_unmap_sg sn_dma_unmap_sg 119#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs
120#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu 120#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
121#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu 121#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
122#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device 122#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 3c95922e51e7..d568894df8cc 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -555,7 +555,8 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
555 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. 555 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
556 */ 556 */
557dma_addr_t 557dma_addr_t
558swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) 558swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
559 int dir, struct dma_attrs *attrs)
559{ 560{
560 dma_addr_t dev_addr = virt_to_bus(ptr); 561 dma_addr_t dev_addr = virt_to_bus(ptr);
561 void *map; 562 void *map;
@@ -588,6 +589,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
588 589
589 return dev_addr; 590 return dev_addr;
590} 591}
592EXPORT_SYMBOL(swiotlb_map_single_attrs);
593
594dma_addr_t
595swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
596{
597 return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
598}
591 599
592/* 600/*
593 * Unmap a single streaming mode DMA translation. The dma_addr and size must 601 * Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -598,8 +606,8 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
598 * whatever the device wrote there. 606 * whatever the device wrote there.
599 */ 607 */
600void 608void
601swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, 609swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
602 int dir) 610 size_t size, int dir, struct dma_attrs *attrs)
603{ 611{
604 char *dma_addr = bus_to_virt(dev_addr); 612 char *dma_addr = bus_to_virt(dev_addr);
605 613
@@ -609,7 +617,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
609 else if (dir == DMA_FROM_DEVICE) 617 else if (dir == DMA_FROM_DEVICE)
610 dma_mark_clean(dma_addr, size); 618 dma_mark_clean(dma_addr, size);
611} 619}
620EXPORT_SYMBOL(swiotlb_unmap_single_attrs);
612 621
622void
623swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
624 int dir)
625{
626 return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
627}
613/* 628/*
614 * Make physical memory consistent for a single streaming mode DMA translation 629 * Make physical memory consistent for a single streaming mode DMA translation
615 * after a transfer. 630 * after a transfer.
@@ -680,6 +695,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
680 SYNC_FOR_DEVICE); 695 SYNC_FOR_DEVICE);
681} 696}
682 697
698void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
699 struct dma_attrs *);
683/* 700/*
684 * Map a set of buffers described by scatterlist in streaming mode for DMA. 701 * Map a set of buffers described by scatterlist in streaming mode for DMA.
685 * This is the scatter-gather version of the above swiotlb_map_single 702 * This is the scatter-gather version of the above swiotlb_map_single
@@ -697,8 +714,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
697 * same here. 714 * same here.
698 */ 715 */
699int 716int
700swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 717swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
701 int dir) 718 int dir, struct dma_attrs *attrs)
702{ 719{
703 struct scatterlist *sg; 720 struct scatterlist *sg;
704 void *addr; 721 void *addr;
@@ -716,7 +733,8 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
716 /* Don't panic here, we expect map_sg users 733 /* Don't panic here, we expect map_sg users
717 to do proper error handling. */ 734 to do proper error handling. */
718 swiotlb_full(hwdev, sg->length, dir, 0); 735 swiotlb_full(hwdev, sg->length, dir, 0);
719 swiotlb_unmap_sg(hwdev, sgl, i, dir); 736 swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
737 attrs);
720 sgl[0].dma_length = 0; 738 sgl[0].dma_length = 0;
721 return 0; 739 return 0;
722 } 740 }
@@ -727,14 +745,22 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
727 } 745 }
728 return nelems; 746 return nelems;
729} 747}
748EXPORT_SYMBOL(swiotlb_map_sg_attrs);
749
750int
751swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
752 int dir)
753{
754 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
755}
730 756
731/* 757/*
732 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 758 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
733 * concerning calls here are the same as for swiotlb_unmap_single() above. 759 * concerning calls here are the same as for swiotlb_unmap_single() above.
734 */ 760 */
735void 761void
736swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 762swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
737 int dir) 763 int nelems, int dir, struct dma_attrs *attrs)
738{ 764{
739 struct scatterlist *sg; 765 struct scatterlist *sg;
740 int i; 766 int i;
@@ -749,6 +775,14 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
749 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 775 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
750 } 776 }
751} 777}
778EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
779
780void
781swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
782 int dir)
783{
784 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
785}
752 786
753/* 787/*
754 * Make physical memory consistent for a set of streaming mode DMA translations 788 * Make physical memory consistent for a set of streaming mode DMA translations