aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorArthur Kepner <akepner@sgi.com>2008-04-29 04:00:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:06:12 -0400
commit309df0c503c35fbb5a09537fcbb1f4967b9ca489 (patch)
tree56a9df627a229bd64b934608b5f84d20bdaabf3a /arch/ia64
parenta75b0a2f68d3937f96ed39525e4750601483e3b4 (diff)
dma/ia64: update ia64 machvecs, swiotlb.c
Change all ia64 machvecs to use the new dma_*map*_attrs() interfaces. Implement the old dma_*map_*() interfaces in terms of the corresponding new interfaces. For ia64/sn, make use of one dma attribute, DMA_ATTR_WRITE_BARRIER. Introduce swiotlb_*map*_attrs() functions. Signed-off-by: Arthur Kepner <akepner@sgi.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: Jes Sorensen <jes@sgi.com> Cc: Randy Dunlap <randy.dunlap@oracle.com> Cc: Roland Dreier <rdreier@cisco.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: David Miller <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Grant Grundler <grundler@parisc-linux.org> Cc: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c61
-rw-r--r--arch/ia64/hp/common/sba_iommu.c64
-rw-r--r--arch/ia64/sn/pci/pci_dma.c81
3 files changed, 129 insertions, 77 deletions
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 8f6bcfe1dada..1c44ec2a1d58 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -20,10 +20,10 @@
20extern int swiotlb_late_init_with_default_size (size_t size); 20extern int swiotlb_late_init_with_default_size (size_t size);
21extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; 21extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
22extern ia64_mv_dma_free_coherent swiotlb_free_coherent; 22extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
23extern ia64_mv_dma_map_single swiotlb_map_single; 23extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs;
24extern ia64_mv_dma_unmap_single swiotlb_unmap_single; 24extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs;
25extern ia64_mv_dma_map_sg swiotlb_map_sg; 25extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs;
26extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; 26extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs;
27extern ia64_mv_dma_supported swiotlb_dma_supported; 27extern ia64_mv_dma_supported swiotlb_dma_supported;
28extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; 28extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
29 29
@@ -31,19 +31,19 @@ extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
31 31
32extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; 32extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
33extern ia64_mv_dma_free_coherent sba_free_coherent; 33extern ia64_mv_dma_free_coherent sba_free_coherent;
34extern ia64_mv_dma_map_single sba_map_single; 34extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
35extern ia64_mv_dma_unmap_single sba_unmap_single; 35extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
36extern ia64_mv_dma_map_sg sba_map_sg; 36extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
37extern ia64_mv_dma_unmap_sg sba_unmap_sg; 37extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
38extern ia64_mv_dma_supported sba_dma_supported; 38extern ia64_mv_dma_supported sba_dma_supported;
39extern ia64_mv_dma_mapping_error sba_dma_mapping_error; 39extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
40 40
41#define hwiommu_alloc_coherent sba_alloc_coherent 41#define hwiommu_alloc_coherent sba_alloc_coherent
42#define hwiommu_free_coherent sba_free_coherent 42#define hwiommu_free_coherent sba_free_coherent
43#define hwiommu_map_single sba_map_single 43#define hwiommu_map_single_attrs sba_map_single_attrs
44#define hwiommu_unmap_single sba_unmap_single 44#define hwiommu_unmap_single_attrs sba_unmap_single_attrs
45#define hwiommu_map_sg sba_map_sg 45#define hwiommu_map_sg_attrs sba_map_sg_attrs
46#define hwiommu_unmap_sg sba_unmap_sg 46#define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs
47#define hwiommu_dma_supported sba_dma_supported 47#define hwiommu_dma_supported sba_dma_supported
48#define hwiommu_dma_mapping_error sba_dma_mapping_error 48#define hwiommu_dma_mapping_error sba_dma_mapping_error
49#define hwiommu_sync_single_for_cpu machvec_dma_sync_single 49#define hwiommu_sync_single_for_cpu machvec_dma_sync_single
@@ -98,41 +98,48 @@ hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma
98} 98}
99 99
100dma_addr_t 100dma_addr_t
101hwsw_map_single (struct device *dev, void *addr, size_t size, int dir) 101hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
102 struct dma_attrs *attrs)
102{ 103{
103 if (use_swiotlb(dev)) 104 if (use_swiotlb(dev))
104 return swiotlb_map_single(dev, addr, size, dir); 105 return swiotlb_map_single_attrs(dev, addr, size, dir, attrs);
105 else 106 else
106 return hwiommu_map_single(dev, addr, size, dir); 107 return hwiommu_map_single_attrs(dev, addr, size, dir, attrs);
107} 108}
109EXPORT_SYMBOL(hwsw_map_single_attrs);
108 110
109void 111void
110hwsw_unmap_single (struct device *dev, dma_addr_t iova, size_t size, int dir) 112hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
113 int dir, struct dma_attrs *attrs)
111{ 114{
112 if (use_swiotlb(dev)) 115 if (use_swiotlb(dev))
113 return swiotlb_unmap_single(dev, iova, size, dir); 116 return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs);
114 else 117 else
115 return hwiommu_unmap_single(dev, iova, size, dir); 118 return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs);
116} 119}
117 120EXPORT_SYMBOL(hwsw_unmap_single_attrs);
118 121
119int 122int
120hwsw_map_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) 123hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
124 int dir, struct dma_attrs *attrs)
121{ 125{
122 if (use_swiotlb(dev)) 126 if (use_swiotlb(dev))
123 return swiotlb_map_sg(dev, sglist, nents, dir); 127 return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs);
124 else 128 else
125 return hwiommu_map_sg(dev, sglist, nents, dir); 129 return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs);
126} 130}
131EXPORT_SYMBOL(hwsw_map_sg_attrs);
127 132
128void 133void
129hwsw_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) 134hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
135 int dir, struct dma_attrs *attrs)
130{ 136{
131 if (use_swiotlb(dev)) 137 if (use_swiotlb(dev))
132 return swiotlb_unmap_sg(dev, sglist, nents, dir); 138 return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
133 else 139 else
134 return hwiommu_unmap_sg(dev, sglist, nents, dir); 140 return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
135} 141}
142EXPORT_SYMBOL(hwsw_unmap_sg_attrs);
136 143
137void 144void
138hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) 145hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
@@ -185,10 +192,6 @@ hwsw_dma_mapping_error (dma_addr_t dma_addr)
185} 192}
186 193
187EXPORT_SYMBOL(hwsw_dma_mapping_error); 194EXPORT_SYMBOL(hwsw_dma_mapping_error);
188EXPORT_SYMBOL(hwsw_map_single);
189EXPORT_SYMBOL(hwsw_unmap_single);
190EXPORT_SYMBOL(hwsw_map_sg);
191EXPORT_SYMBOL(hwsw_unmap_sg);
192EXPORT_SYMBOL(hwsw_dma_supported); 195EXPORT_SYMBOL(hwsw_dma_supported);
193EXPORT_SYMBOL(hwsw_alloc_coherent); 196EXPORT_SYMBOL(hwsw_alloc_coherent);
194EXPORT_SYMBOL(hwsw_free_coherent); 197EXPORT_SYMBOL(hwsw_free_coherent);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 9409de5c9441..6ce729f46de8 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -899,16 +899,18 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
899} 899}
900 900
901/** 901/**
902 * sba_map_single - map one buffer and return IOVA for DMA 902 * sba_map_single_attrs - map one buffer and return IOVA for DMA
903 * @dev: instance of PCI owned by the driver that's asking. 903 * @dev: instance of PCI owned by the driver that's asking.
904 * @addr: driver buffer to map. 904 * @addr: driver buffer to map.
905 * @size: number of bytes to map in driver buffer. 905 * @size: number of bytes to map in driver buffer.
906 * @dir: R/W or both. 906 * @dir: R/W or both.
907 * @attrs: optional dma attributes
907 * 908 *
908 * See Documentation/DMA-mapping.txt 909 * See Documentation/DMA-mapping.txt
909 */ 910 */
910dma_addr_t 911dma_addr_t
911sba_map_single(struct device *dev, void *addr, size_t size, int dir) 912sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
913 struct dma_attrs *attrs)
912{ 914{
913 struct ioc *ioc; 915 struct ioc *ioc;
914 dma_addr_t iovp; 916 dma_addr_t iovp;
@@ -932,7 +934,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
932 ** Device is bit capable of DMA'ing to the buffer... 934 ** Device is bit capable of DMA'ing to the buffer...
933 ** just return the PCI address of ptr 935 ** just return the PCI address of ptr
934 */ 936 */
935 DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n", 937 DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
938 "0x%lx/0x%lx\n",
936 to_pci_dev(dev)->dma_mask, pci_addr); 939 to_pci_dev(dev)->dma_mask, pci_addr);
937 return pci_addr; 940 return pci_addr;
938 } 941 }
@@ -953,7 +956,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
953 956
954#ifdef ASSERT_PDIR_SANITY 957#ifdef ASSERT_PDIR_SANITY
955 spin_lock_irqsave(&ioc->res_lock, flags); 958 spin_lock_irqsave(&ioc->res_lock, flags);
956 if (sba_check_pdir(ioc,"Check before sba_map_single()")) 959 if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
957 panic("Sanity check failed"); 960 panic("Sanity check failed");
958 spin_unlock_irqrestore(&ioc->res_lock, flags); 961 spin_unlock_irqrestore(&ioc->res_lock, flags);
959#endif 962#endif
@@ -982,11 +985,12 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
982 /* form complete address */ 985 /* form complete address */
983#ifdef ASSERT_PDIR_SANITY 986#ifdef ASSERT_PDIR_SANITY
984 spin_lock_irqsave(&ioc->res_lock, flags); 987 spin_lock_irqsave(&ioc->res_lock, flags);
985 sba_check_pdir(ioc,"Check after sba_map_single()"); 988 sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
986 spin_unlock_irqrestore(&ioc->res_lock, flags); 989 spin_unlock_irqrestore(&ioc->res_lock, flags);
987#endif 990#endif
988 return SBA_IOVA(ioc, iovp, offset); 991 return SBA_IOVA(ioc, iovp, offset);
989} 992}
993EXPORT_SYMBOL(sba_map_single_attrs);
990 994
991#ifdef ENABLE_MARK_CLEAN 995#ifdef ENABLE_MARK_CLEAN
992static SBA_INLINE void 996static SBA_INLINE void
@@ -1013,15 +1017,17 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1013#endif 1017#endif
1014 1018
1015/** 1019/**
1016 * sba_unmap_single - unmap one IOVA and free resources 1020 * sba_unmap_single_attrs - unmap one IOVA and free resources
1017 * @dev: instance of PCI owned by the driver that's asking. 1021 * @dev: instance of PCI owned by the driver that's asking.
1018 * @iova: IOVA of driver buffer previously mapped. 1022 * @iova: IOVA of driver buffer previously mapped.
1019 * @size: number of bytes mapped in driver buffer. 1023 * @size: number of bytes mapped in driver buffer.
1020 * @dir: R/W or both. 1024 * @dir: R/W or both.
1025 * @attrs: optional dma attributes
1021 * 1026 *
1022 * See Documentation/DMA-mapping.txt 1027 * See Documentation/DMA-mapping.txt
1023 */ 1028 */
1024void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) 1029void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1030 int dir, struct dma_attrs *attrs)
1025{ 1031{
1026 struct ioc *ioc; 1032 struct ioc *ioc;
1027#if DELAYED_RESOURCE_CNT > 0 1033#if DELAYED_RESOURCE_CNT > 0
@@ -1038,7 +1044,8 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
1038 /* 1044 /*
1039 ** Address does not fall w/in IOVA, must be bypassing 1045 ** Address does not fall w/in IOVA, must be bypassing
1040 */ 1046 */
1041 DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova); 1047 DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n",
1048 iova);
1042 1049
1043#ifdef ENABLE_MARK_CLEAN 1050#ifdef ENABLE_MARK_CLEAN
1044 if (dir == DMA_FROM_DEVICE) { 1051 if (dir == DMA_FROM_DEVICE) {
@@ -1087,7 +1094,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
1087 spin_unlock_irqrestore(&ioc->res_lock, flags); 1094 spin_unlock_irqrestore(&ioc->res_lock, flags);
1088#endif /* DELAYED_RESOURCE_CNT == 0 */ 1095#endif /* DELAYED_RESOURCE_CNT == 0 */
1089} 1096}
1090 1097EXPORT_SYMBOL(sba_unmap_single_attrs);
1091 1098
1092/** 1099/**
1093 * sba_alloc_coherent - allocate/map shared mem for DMA 1100 * sba_alloc_coherent - allocate/map shared mem for DMA
@@ -1144,7 +1151,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
1144 * If device can't bypass or bypass is disabled, pass the 32bit fake 1151 * If device can't bypass or bypass is disabled, pass the 32bit fake
1145 * device to map single to get an iova mapping. 1152 * device to map single to get an iova mapping.
1146 */ 1153 */
1147 *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0); 1154 *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
1155 size, 0, NULL);
1148 1156
1149 return addr; 1157 return addr;
1150} 1158}
@@ -1161,7 +1169,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
1161 */ 1169 */
1162void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) 1170void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
1163{ 1171{
1164 sba_unmap_single(dev, dma_handle, size, 0); 1172 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
1165 free_pages((unsigned long) vaddr, get_order(size)); 1173 free_pages((unsigned long) vaddr, get_order(size));
1166} 1174}
1167 1175
@@ -1410,10 +1418,12 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1410 * @sglist: array of buffer/length pairs 1418 * @sglist: array of buffer/length pairs
1411 * @nents: number of entries in list 1419 * @nents: number of entries in list
1412 * @dir: R/W or both. 1420 * @dir: R/W or both.
1421 * @attrs: optional dma attributes
1413 * 1422 *
1414 * See Documentation/DMA-mapping.txt 1423 * See Documentation/DMA-mapping.txt
1415 */ 1424 */
1416int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir) 1425int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
1426 int dir, struct dma_attrs *attrs)
1417{ 1427{
1418 struct ioc *ioc; 1428 struct ioc *ioc;
1419 int coalesced, filled = 0; 1429 int coalesced, filled = 0;
@@ -1441,16 +1451,16 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1441 /* Fast path single entry scatterlists. */ 1451 /* Fast path single entry scatterlists. */
1442 if (nents == 1) { 1452 if (nents == 1) {
1443 sglist->dma_length = sglist->length; 1453 sglist->dma_length = sglist->length;
1444 sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir); 1454 sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
1445 return 1; 1455 return 1;
1446 } 1456 }
1447 1457
1448#ifdef ASSERT_PDIR_SANITY 1458#ifdef ASSERT_PDIR_SANITY
1449 spin_lock_irqsave(&ioc->res_lock, flags); 1459 spin_lock_irqsave(&ioc->res_lock, flags);
1450 if (sba_check_pdir(ioc,"Check before sba_map_sg()")) 1460 if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1451 { 1461 {
1452 sba_dump_sg(ioc, sglist, nents); 1462 sba_dump_sg(ioc, sglist, nents);
1453 panic("Check before sba_map_sg()"); 1463 panic("Check before sba_map_sg_attrs()");
1454 } 1464 }
1455 spin_unlock_irqrestore(&ioc->res_lock, flags); 1465 spin_unlock_irqrestore(&ioc->res_lock, flags);
1456#endif 1466#endif
@@ -1479,10 +1489,10 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1479 1489
1480#ifdef ASSERT_PDIR_SANITY 1490#ifdef ASSERT_PDIR_SANITY
1481 spin_lock_irqsave(&ioc->res_lock, flags); 1491 spin_lock_irqsave(&ioc->res_lock, flags);
1482 if (sba_check_pdir(ioc,"Check after sba_map_sg()")) 1492 if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1483 { 1493 {
1484 sba_dump_sg(ioc, sglist, nents); 1494 sba_dump_sg(ioc, sglist, nents);
1485 panic("Check after sba_map_sg()\n"); 1495 panic("Check after sba_map_sg_attrs()\n");
1486 } 1496 }
1487 spin_unlock_irqrestore(&ioc->res_lock, flags); 1497 spin_unlock_irqrestore(&ioc->res_lock, flags);
1488#endif 1498#endif
@@ -1492,18 +1502,20 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1492 1502
1493 return filled; 1503 return filled;
1494} 1504}
1495 1505EXPORT_SYMBOL(sba_map_sg_attrs);
1496 1506
1497/** 1507/**
1498 * sba_unmap_sg - unmap Scatter/Gather list 1508 * sba_unmap_sg_attrs - unmap Scatter/Gather list
1499 * @dev: instance of PCI owned by the driver that's asking. 1509 * @dev: instance of PCI owned by the driver that's asking.
1500 * @sglist: array of buffer/length pairs 1510 * @sglist: array of buffer/length pairs
1501 * @nents: number of entries in list 1511 * @nents: number of entries in list
1502 * @dir: R/W or both. 1512 * @dir: R/W or both.
1513 * @attrs: optional dma attributes
1503 * 1514 *
1504 * See Documentation/DMA-mapping.txt 1515 * See Documentation/DMA-mapping.txt
1505 */ 1516 */
1506void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) 1517void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1518 int nents, int dir, struct dma_attrs *attrs)
1507{ 1519{
1508#ifdef ASSERT_PDIR_SANITY 1520#ifdef ASSERT_PDIR_SANITY
1509 struct ioc *ioc; 1521 struct ioc *ioc;
@@ -1518,13 +1530,14 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
1518 ASSERT(ioc); 1530 ASSERT(ioc);
1519 1531
1520 spin_lock_irqsave(&ioc->res_lock, flags); 1532 spin_lock_irqsave(&ioc->res_lock, flags);
1521 sba_check_pdir(ioc,"Check before sba_unmap_sg()"); 1533 sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1522 spin_unlock_irqrestore(&ioc->res_lock, flags); 1534 spin_unlock_irqrestore(&ioc->res_lock, flags);
1523#endif 1535#endif
1524 1536
1525 while (nents && sglist->dma_length) { 1537 while (nents && sglist->dma_length) {
1526 1538
1527 sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); 1539 sba_unmap_single_attrs(dev, sglist->dma_address,
1540 sglist->dma_length, dir, attrs);
1528 sglist = sg_next(sglist); 1541 sglist = sg_next(sglist);
1529 nents--; 1542 nents--;
1530 } 1543 }
@@ -1533,11 +1546,12 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
1533 1546
1534#ifdef ASSERT_PDIR_SANITY 1547#ifdef ASSERT_PDIR_SANITY
1535 spin_lock_irqsave(&ioc->res_lock, flags); 1548 spin_lock_irqsave(&ioc->res_lock, flags);
1536 sba_check_pdir(ioc,"Check after sba_unmap_sg()"); 1549 sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1537 spin_unlock_irqrestore(&ioc->res_lock, flags); 1550 spin_unlock_irqrestore(&ioc->res_lock, flags);
1538#endif 1551#endif
1539 1552
1540} 1553}
1554EXPORT_SYMBOL(sba_unmap_sg_attrs);
1541 1555
1542/************************************************************** 1556/**************************************************************
1543* 1557*
@@ -2166,10 +2180,6 @@ sba_page_override(char *str)
2166__setup("sbapagesize=",sba_page_override); 2180__setup("sbapagesize=",sba_page_override);
2167 2181
2168EXPORT_SYMBOL(sba_dma_mapping_error); 2182EXPORT_SYMBOL(sba_dma_mapping_error);
2169EXPORT_SYMBOL(sba_map_single);
2170EXPORT_SYMBOL(sba_unmap_single);
2171EXPORT_SYMBOL(sba_map_sg);
2172EXPORT_SYMBOL(sba_unmap_sg);
2173EXPORT_SYMBOL(sba_dma_supported); 2183EXPORT_SYMBOL(sba_dma_supported);
2174EXPORT_SYMBOL(sba_alloc_coherent); 2184EXPORT_SYMBOL(sba_alloc_coherent);
2175EXPORT_SYMBOL(sba_free_coherent); 2185EXPORT_SYMBOL(sba_free_coherent);
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 18b94b792d54..52175af299a0 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/dma-attrs.h>
13#include <asm/dma.h> 14#include <asm/dma.h>
14#include <asm/sn/intr.h> 15#include <asm/sn/intr.h>
15#include <asm/sn/pcibus_provider_defs.h> 16#include <asm/sn/pcibus_provider_defs.h>
@@ -149,11 +150,12 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
149EXPORT_SYMBOL(sn_dma_free_coherent); 150EXPORT_SYMBOL(sn_dma_free_coherent);
150 151
151/** 152/**
152 * sn_dma_map_single - map a single page for DMA 153 * sn_dma_map_single_attrs - map a single page for DMA
153 * @dev: device to map for 154 * @dev: device to map for
154 * @cpu_addr: kernel virtual address of the region to map 155 * @cpu_addr: kernel virtual address of the region to map
155 * @size: size of the region 156 * @size: size of the region
156 * @direction: DMA direction 157 * @direction: DMA direction
158 * @attrs: optional dma attributes
157 * 159 *
158 * Map the region pointed to by @cpu_addr for DMA and return the 160 * Map the region pointed to by @cpu_addr for DMA and return the
159 * DMA address. 161 * DMA address.
@@ -163,42 +165,59 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
163 * no way of saving the dmamap handle from the alloc to later free 165 * no way of saving the dmamap handle from the alloc to later free
164 * (which is pretty much unacceptable). 166 * (which is pretty much unacceptable).
165 * 167 *
168 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
169 * dma_map_consistent() so that writes force a flush of pending DMA.
170 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
171 * Document Number: 007-4763-001)
172 *
166 * TODO: simplify our interface; 173 * TODO: simplify our interface;
167 * figure out how to save dmamap handle so can use two step. 174 * figure out how to save dmamap handle so can use two step.
168 */ 175 */
169dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, 176dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
170 int direction) 177 size_t size, int direction,
178 struct dma_attrs *attrs)
171{ 179{
172 dma_addr_t dma_addr; 180 dma_addr_t dma_addr;
173 unsigned long phys_addr; 181 unsigned long phys_addr;
174 struct pci_dev *pdev = to_pci_dev(dev); 182 struct pci_dev *pdev = to_pci_dev(dev);
175 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 183 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
184 int dmabarr;
185
186 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
176 187
177 BUG_ON(dev->bus != &pci_bus_type); 188 BUG_ON(dev->bus != &pci_bus_type);
178 189
179 phys_addr = __pa(cpu_addr); 190 phys_addr = __pa(cpu_addr);
180 dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); 191 if (dmabarr)
192 dma_addr = provider->dma_map_consistent(pdev, phys_addr,
193 size, SN_DMA_ADDR_PHYS);
194 else
195 dma_addr = provider->dma_map(pdev, phys_addr, size,
196 SN_DMA_ADDR_PHYS);
197
181 if (!dma_addr) { 198 if (!dma_addr) {
182 printk(KERN_ERR "%s: out of ATEs\n", __func__); 199 printk(KERN_ERR "%s: out of ATEs\n", __func__);
183 return 0; 200 return 0;
184 } 201 }
185 return dma_addr; 202 return dma_addr;
186} 203}
187EXPORT_SYMBOL(sn_dma_map_single); 204EXPORT_SYMBOL(sn_dma_map_single_attrs);
188 205
189/** 206/**
190 * sn_dma_unmap_single - unamp a DMA mapped page 207 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
191 * @dev: device to sync 208 * @dev: device to sync
192 * @dma_addr: DMA address to sync 209 * @dma_addr: DMA address to sync
193 * @size: size of region 210 * @size: size of region
194 * @direction: DMA direction 211 * @direction: DMA direction
212 * @attrs: optional dma attributes
195 * 213 *
196 * This routine is supposed to sync the DMA region specified 214 * This routine is supposed to sync the DMA region specified
197 * by @dma_handle into the coherence domain. On SN, we're always cache 215 * by @dma_handle into the coherence domain. On SN, we're always cache
198 * coherent, so we just need to free any ATEs associated with this mapping. 216 * coherent, so we just need to free any ATEs associated with this mapping.
199 */ 217 */
200void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 218void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
201 int direction) 219 size_t size, int direction,
220 struct dma_attrs *attrs)
202{ 221{
203 struct pci_dev *pdev = to_pci_dev(dev); 222 struct pci_dev *pdev = to_pci_dev(dev);
204 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 223 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -207,19 +226,21 @@ void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
207 226
208 provider->dma_unmap(pdev, dma_addr, direction); 227 provider->dma_unmap(pdev, dma_addr, direction);
209} 228}
210EXPORT_SYMBOL(sn_dma_unmap_single); 229EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
211 230
212/** 231/**
213 * sn_dma_unmap_sg - unmap a DMA scatterlist 232 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist
214 * @dev: device to unmap 233 * @dev: device to unmap
215 * @sg: scatterlist to unmap 234 * @sg: scatterlist to unmap
216 * @nhwentries: number of scatterlist entries 235 * @nhwentries: number of scatterlist entries
217 * @direction: DMA direction 236 * @direction: DMA direction
237 * @attrs: optional dma attributes
218 * 238 *
219 * Unmap a set of streaming mode DMA translations. 239 * Unmap a set of streaming mode DMA translations.
220 */ 240 */
221void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 241void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
222 int nhwentries, int direction) 242 int nhwentries, int direction,
243 struct dma_attrs *attrs)
223{ 244{
224 int i; 245 int i;
225 struct pci_dev *pdev = to_pci_dev(dev); 246 struct pci_dev *pdev = to_pci_dev(dev);
@@ -234,25 +255,34 @@ void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
234 sg->dma_length = 0; 255 sg->dma_length = 0;
235 } 256 }
236} 257}
237EXPORT_SYMBOL(sn_dma_unmap_sg); 258EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
238 259
239/** 260/**
240 * sn_dma_map_sg - map a scatterlist for DMA 261 * sn_dma_map_sg_attrs - map a scatterlist for DMA
241 * @dev: device to map for 262 * @dev: device to map for
242 * @sg: scatterlist to map 263 * @sg: scatterlist to map
243 * @nhwentries: number of entries 264 * @nhwentries: number of entries
244 * @direction: direction of the DMA transaction 265 * @direction: direction of the DMA transaction
266 * @attrs: optional dma attributes
267 *
268 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
269 * dma_map_consistent() so that writes force a flush of pending DMA.
270 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
271 * Document Number: 007-4763-001)
245 * 272 *
246 * Maps each entry of @sg for DMA. 273 * Maps each entry of @sg for DMA.
247 */ 274 */
248int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, 275int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
249 int direction) 276 int nhwentries, int direction, struct dma_attrs *attrs)
250{ 277{
251 unsigned long phys_addr; 278 unsigned long phys_addr;
252 struct scatterlist *saved_sg = sgl, *sg; 279 struct scatterlist *saved_sg = sgl, *sg;
253 struct pci_dev *pdev = to_pci_dev(dev); 280 struct pci_dev *pdev = to_pci_dev(dev);
254 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 281 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
255 int i; 282 int i;
283 int dmabarr;
284
285 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
256 286
257 BUG_ON(dev->bus != &pci_bus_type); 287 BUG_ON(dev->bus != &pci_bus_type);
258 288
@@ -260,11 +290,19 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
260 * Setup a DMA address for each entry in the scatterlist. 290 * Setup a DMA address for each entry in the scatterlist.
261 */ 291 */
262 for_each_sg(sgl, sg, nhwentries, i) { 292 for_each_sg(sgl, sg, nhwentries, i) {
293 dma_addr_t dma_addr;
263 phys_addr = SG_ENT_PHYS_ADDRESS(sg); 294 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
264 sg->dma_address = provider->dma_map(pdev, 295 if (dmabarr)
265 phys_addr, sg->length, 296 dma_addr = provider->dma_map_consistent(pdev,
266 SN_DMA_ADDR_PHYS); 297 phys_addr,
298 sg->length,
299 SN_DMA_ADDR_PHYS);
300 else
301 dma_addr = provider->dma_map(pdev, phys_addr,
302 sg->length,
303 SN_DMA_ADDR_PHYS);
267 304
305 sg->dma_address = dma_addr;
268 if (!sg->dma_address) { 306 if (!sg->dma_address) {
269 printk(KERN_ERR "%s: out of ATEs\n", __func__); 307 printk(KERN_ERR "%s: out of ATEs\n", __func__);
270 308
@@ -272,7 +310,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
272 * Free any successfully allocated entries. 310 * Free any successfully allocated entries.
273 */ 311 */
274 if (i > 0) 312 if (i > 0)
275 sn_dma_unmap_sg(dev, saved_sg, i, direction); 313 sn_dma_unmap_sg_attrs(dev, saved_sg, i,
314 direction, attrs);
276 return 0; 315 return 0;
277 } 316 }
278 317
@@ -281,7 +320,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
281 320
282 return nhwentries; 321 return nhwentries;
283} 322}
284EXPORT_SYMBOL(sn_dma_map_sg); 323EXPORT_SYMBOL(sn_dma_map_sg_attrs);
285 324
286void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 325void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
287 size_t size, int direction) 326 size_t size, int direction)