aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/hp
diff options
context:
space:
mode:
authorArthur Kepner <akepner@sgi.com>2008-04-29 04:00:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:06:12 -0400
commit309df0c503c35fbb5a09537fcbb1f4967b9ca489 (patch)
tree56a9df627a229bd64b934608b5f84d20bdaabf3a /arch/ia64/hp
parenta75b0a2f68d3937f96ed39525e4750601483e3b4 (diff)
dma/ia64: update ia64 machvecs, swiotlb.c
Change all ia64 machvecs to use the new dma_*map*_attrs() interfaces. Implement the old dma_*map_*() interfaces in terms of the corresponding new interfaces. For ia64/sn, make use of one dma attribute, DMA_ATTR_WRITE_BARRIER. Introduce swiotlb_*map*_attrs() functions. Signed-off-by: Arthur Kepner <akepner@sgi.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: Jes Sorensen <jes@sgi.com> Cc: Randy Dunlap <randy.dunlap@oracle.com> Cc: Roland Dreier <rdreier@cisco.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: David Miller <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Grant Grundler <grundler@parisc-linux.org> Cc: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/ia64/hp')
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c61
-rw-r--r--arch/ia64/hp/common/sba_iommu.c64
2 files changed, 69 insertions, 56 deletions
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 8f6bcfe1dada..1c44ec2a1d58 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -20,10 +20,10 @@
20extern int swiotlb_late_init_with_default_size (size_t size); 20extern int swiotlb_late_init_with_default_size (size_t size);
21extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; 21extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
22extern ia64_mv_dma_free_coherent swiotlb_free_coherent; 22extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
23extern ia64_mv_dma_map_single swiotlb_map_single; 23extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs;
24extern ia64_mv_dma_unmap_single swiotlb_unmap_single; 24extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs;
25extern ia64_mv_dma_map_sg swiotlb_map_sg; 25extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs;
26extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; 26extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs;
27extern ia64_mv_dma_supported swiotlb_dma_supported; 27extern ia64_mv_dma_supported swiotlb_dma_supported;
28extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; 28extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
29 29
@@ -31,19 +31,19 @@ extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
31 31
32extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; 32extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
33extern ia64_mv_dma_free_coherent sba_free_coherent; 33extern ia64_mv_dma_free_coherent sba_free_coherent;
34extern ia64_mv_dma_map_single sba_map_single; 34extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
35extern ia64_mv_dma_unmap_single sba_unmap_single; 35extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
36extern ia64_mv_dma_map_sg sba_map_sg; 36extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
37extern ia64_mv_dma_unmap_sg sba_unmap_sg; 37extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
38extern ia64_mv_dma_supported sba_dma_supported; 38extern ia64_mv_dma_supported sba_dma_supported;
39extern ia64_mv_dma_mapping_error sba_dma_mapping_error; 39extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
40 40
41#define hwiommu_alloc_coherent sba_alloc_coherent 41#define hwiommu_alloc_coherent sba_alloc_coherent
42#define hwiommu_free_coherent sba_free_coherent 42#define hwiommu_free_coherent sba_free_coherent
43#define hwiommu_map_single sba_map_single 43#define hwiommu_map_single_attrs sba_map_single_attrs
44#define hwiommu_unmap_single sba_unmap_single 44#define hwiommu_unmap_single_attrs sba_unmap_single_attrs
45#define hwiommu_map_sg sba_map_sg 45#define hwiommu_map_sg_attrs sba_map_sg_attrs
46#define hwiommu_unmap_sg sba_unmap_sg 46#define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs
47#define hwiommu_dma_supported sba_dma_supported 47#define hwiommu_dma_supported sba_dma_supported
48#define hwiommu_dma_mapping_error sba_dma_mapping_error 48#define hwiommu_dma_mapping_error sba_dma_mapping_error
49#define hwiommu_sync_single_for_cpu machvec_dma_sync_single 49#define hwiommu_sync_single_for_cpu machvec_dma_sync_single
@@ -98,41 +98,48 @@ hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma
98} 98}
99 99
100dma_addr_t 100dma_addr_t
101hwsw_map_single (struct device *dev, void *addr, size_t size, int dir) 101hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
102 struct dma_attrs *attrs)
102{ 103{
103 if (use_swiotlb(dev)) 104 if (use_swiotlb(dev))
104 return swiotlb_map_single(dev, addr, size, dir); 105 return swiotlb_map_single_attrs(dev, addr, size, dir, attrs);
105 else 106 else
106 return hwiommu_map_single(dev, addr, size, dir); 107 return hwiommu_map_single_attrs(dev, addr, size, dir, attrs);
107} 108}
109EXPORT_SYMBOL(hwsw_map_single_attrs);
108 110
109void 111void
110hwsw_unmap_single (struct device *dev, dma_addr_t iova, size_t size, int dir) 112hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
113 int dir, struct dma_attrs *attrs)
111{ 114{
112 if (use_swiotlb(dev)) 115 if (use_swiotlb(dev))
113 return swiotlb_unmap_single(dev, iova, size, dir); 116 return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs);
114 else 117 else
115 return hwiommu_unmap_single(dev, iova, size, dir); 118 return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs);
116} 119}
117 120EXPORT_SYMBOL(hwsw_unmap_single_attrs);
118 121
119int 122int
120hwsw_map_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) 123hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
124 int dir, struct dma_attrs *attrs)
121{ 125{
122 if (use_swiotlb(dev)) 126 if (use_swiotlb(dev))
123 return swiotlb_map_sg(dev, sglist, nents, dir); 127 return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs);
124 else 128 else
125 return hwiommu_map_sg(dev, sglist, nents, dir); 129 return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs);
126} 130}
131EXPORT_SYMBOL(hwsw_map_sg_attrs);
127 132
128void 133void
129hwsw_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) 134hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
135 int dir, struct dma_attrs *attrs)
130{ 136{
131 if (use_swiotlb(dev)) 137 if (use_swiotlb(dev))
132 return swiotlb_unmap_sg(dev, sglist, nents, dir); 138 return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
133 else 139 else
134 return hwiommu_unmap_sg(dev, sglist, nents, dir); 140 return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
135} 141}
142EXPORT_SYMBOL(hwsw_unmap_sg_attrs);
136 143
137void 144void
138hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) 145hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
@@ -185,10 +192,6 @@ hwsw_dma_mapping_error (dma_addr_t dma_addr)
185} 192}
186 193
187EXPORT_SYMBOL(hwsw_dma_mapping_error); 194EXPORT_SYMBOL(hwsw_dma_mapping_error);
188EXPORT_SYMBOL(hwsw_map_single);
189EXPORT_SYMBOL(hwsw_unmap_single);
190EXPORT_SYMBOL(hwsw_map_sg);
191EXPORT_SYMBOL(hwsw_unmap_sg);
192EXPORT_SYMBOL(hwsw_dma_supported); 195EXPORT_SYMBOL(hwsw_dma_supported);
193EXPORT_SYMBOL(hwsw_alloc_coherent); 196EXPORT_SYMBOL(hwsw_alloc_coherent);
194EXPORT_SYMBOL(hwsw_free_coherent); 197EXPORT_SYMBOL(hwsw_free_coherent);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 9409de5c9441..6ce729f46de8 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -899,16 +899,18 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
899} 899}
900 900
901/** 901/**
902 * sba_map_single - map one buffer and return IOVA for DMA 902 * sba_map_single_attrs - map one buffer and return IOVA for DMA
903 * @dev: instance of PCI owned by the driver that's asking. 903 * @dev: instance of PCI owned by the driver that's asking.
904 * @addr: driver buffer to map. 904 * @addr: driver buffer to map.
905 * @size: number of bytes to map in driver buffer. 905 * @size: number of bytes to map in driver buffer.
906 * @dir: R/W or both. 906 * @dir: R/W or both.
907 * @attrs: optional dma attributes
907 * 908 *
908 * See Documentation/DMA-mapping.txt 909 * See Documentation/DMA-mapping.txt
909 */ 910 */
910dma_addr_t 911dma_addr_t
911sba_map_single(struct device *dev, void *addr, size_t size, int dir) 912sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
913 struct dma_attrs *attrs)
912{ 914{
913 struct ioc *ioc; 915 struct ioc *ioc;
914 dma_addr_t iovp; 916 dma_addr_t iovp;
@@ -932,7 +934,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
932 ** Device is bit capable of DMA'ing to the buffer... 934 ** Device is bit capable of DMA'ing to the buffer...
933 ** just return the PCI address of ptr 935 ** just return the PCI address of ptr
934 */ 936 */
935 DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n", 937 DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
938 "0x%lx/0x%lx\n",
936 to_pci_dev(dev)->dma_mask, pci_addr); 939 to_pci_dev(dev)->dma_mask, pci_addr);
937 return pci_addr; 940 return pci_addr;
938 } 941 }
@@ -953,7 +956,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
953 956
954#ifdef ASSERT_PDIR_SANITY 957#ifdef ASSERT_PDIR_SANITY
955 spin_lock_irqsave(&ioc->res_lock, flags); 958 spin_lock_irqsave(&ioc->res_lock, flags);
956 if (sba_check_pdir(ioc,"Check before sba_map_single()")) 959 if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
957 panic("Sanity check failed"); 960 panic("Sanity check failed");
958 spin_unlock_irqrestore(&ioc->res_lock, flags); 961 spin_unlock_irqrestore(&ioc->res_lock, flags);
959#endif 962#endif
@@ -982,11 +985,12 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
982 /* form complete address */ 985 /* form complete address */
983#ifdef ASSERT_PDIR_SANITY 986#ifdef ASSERT_PDIR_SANITY
984 spin_lock_irqsave(&ioc->res_lock, flags); 987 spin_lock_irqsave(&ioc->res_lock, flags);
985 sba_check_pdir(ioc,"Check after sba_map_single()"); 988 sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
986 spin_unlock_irqrestore(&ioc->res_lock, flags); 989 spin_unlock_irqrestore(&ioc->res_lock, flags);
987#endif 990#endif
988 return SBA_IOVA(ioc, iovp, offset); 991 return SBA_IOVA(ioc, iovp, offset);
989} 992}
993EXPORT_SYMBOL(sba_map_single_attrs);
990 994
991#ifdef ENABLE_MARK_CLEAN 995#ifdef ENABLE_MARK_CLEAN
992static SBA_INLINE void 996static SBA_INLINE void
@@ -1013,15 +1017,17 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1013#endif 1017#endif
1014 1018
1015/** 1019/**
1016 * sba_unmap_single - unmap one IOVA and free resources 1020 * sba_unmap_single_attrs - unmap one IOVA and free resources
1017 * @dev: instance of PCI owned by the driver that's asking. 1021 * @dev: instance of PCI owned by the driver that's asking.
1018 * @iova: IOVA of driver buffer previously mapped. 1022 * @iova: IOVA of driver buffer previously mapped.
1019 * @size: number of bytes mapped in driver buffer. 1023 * @size: number of bytes mapped in driver buffer.
1020 * @dir: R/W or both. 1024 * @dir: R/W or both.
1025 * @attrs: optional dma attributes
1021 * 1026 *
1022 * See Documentation/DMA-mapping.txt 1027 * See Documentation/DMA-mapping.txt
1023 */ 1028 */
1024void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) 1029void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1030 int dir, struct dma_attrs *attrs)
1025{ 1031{
1026 struct ioc *ioc; 1032 struct ioc *ioc;
1027#if DELAYED_RESOURCE_CNT > 0 1033#if DELAYED_RESOURCE_CNT > 0
@@ -1038,7 +1044,8 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
1038 /* 1044 /*
1039 ** Address does not fall w/in IOVA, must be bypassing 1045 ** Address does not fall w/in IOVA, must be bypassing
1040 */ 1046 */
1041 DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova); 1047 DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n",
1048 iova);
1042 1049
1043#ifdef ENABLE_MARK_CLEAN 1050#ifdef ENABLE_MARK_CLEAN
1044 if (dir == DMA_FROM_DEVICE) { 1051 if (dir == DMA_FROM_DEVICE) {
@@ -1087,7 +1094,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
1087 spin_unlock_irqrestore(&ioc->res_lock, flags); 1094 spin_unlock_irqrestore(&ioc->res_lock, flags);
1088#endif /* DELAYED_RESOURCE_CNT == 0 */ 1095#endif /* DELAYED_RESOURCE_CNT == 0 */
1089} 1096}
1090 1097EXPORT_SYMBOL(sba_unmap_single_attrs);
1091 1098
1092/** 1099/**
1093 * sba_alloc_coherent - allocate/map shared mem for DMA 1100 * sba_alloc_coherent - allocate/map shared mem for DMA
@@ -1144,7 +1151,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
1144 * If device can't bypass or bypass is disabled, pass the 32bit fake 1151 * If device can't bypass or bypass is disabled, pass the 32bit fake
1145 * device to map single to get an iova mapping. 1152 * device to map single to get an iova mapping.
1146 */ 1153 */
1147 *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0); 1154 *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
1155 size, 0, NULL);
1148 1156
1149 return addr; 1157 return addr;
1150} 1158}
@@ -1161,7 +1169,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
1161 */ 1169 */
1162void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) 1170void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
1163{ 1171{
1164 sba_unmap_single(dev, dma_handle, size, 0); 1172 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
1165 free_pages((unsigned long) vaddr, get_order(size)); 1173 free_pages((unsigned long) vaddr, get_order(size));
1166} 1174}
1167 1175
@@ -1410,10 +1418,12 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1410 * @sglist: array of buffer/length pairs 1418 * @sglist: array of buffer/length pairs
1411 * @nents: number of entries in list 1419 * @nents: number of entries in list
1412 * @dir: R/W or both. 1420 * @dir: R/W or both.
1421 * @attrs: optional dma attributes
1413 * 1422 *
1414 * See Documentation/DMA-mapping.txt 1423 * See Documentation/DMA-mapping.txt
1415 */ 1424 */
1416int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir) 1425int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
1426 int dir, struct dma_attrs *attrs)
1417{ 1427{
1418 struct ioc *ioc; 1428 struct ioc *ioc;
1419 int coalesced, filled = 0; 1429 int coalesced, filled = 0;
@@ -1441,16 +1451,16 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1441 /* Fast path single entry scatterlists. */ 1451 /* Fast path single entry scatterlists. */
1442 if (nents == 1) { 1452 if (nents == 1) {
1443 sglist->dma_length = sglist->length; 1453 sglist->dma_length = sglist->length;
1444 sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir); 1454 sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
1445 return 1; 1455 return 1;
1446 } 1456 }
1447 1457
1448#ifdef ASSERT_PDIR_SANITY 1458#ifdef ASSERT_PDIR_SANITY
1449 spin_lock_irqsave(&ioc->res_lock, flags); 1459 spin_lock_irqsave(&ioc->res_lock, flags);
1450 if (sba_check_pdir(ioc,"Check before sba_map_sg()")) 1460 if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1451 { 1461 {
1452 sba_dump_sg(ioc, sglist, nents); 1462 sba_dump_sg(ioc, sglist, nents);
1453 panic("Check before sba_map_sg()"); 1463 panic("Check before sba_map_sg_attrs()");
1454 } 1464 }
1455 spin_unlock_irqrestore(&ioc->res_lock, flags); 1465 spin_unlock_irqrestore(&ioc->res_lock, flags);
1456#endif 1466#endif
@@ -1479,10 +1489,10 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1479 1489
1480#ifdef ASSERT_PDIR_SANITY 1490#ifdef ASSERT_PDIR_SANITY
1481 spin_lock_irqsave(&ioc->res_lock, flags); 1491 spin_lock_irqsave(&ioc->res_lock, flags);
1482 if (sba_check_pdir(ioc,"Check after sba_map_sg()")) 1492 if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1483 { 1493 {
1484 sba_dump_sg(ioc, sglist, nents); 1494 sba_dump_sg(ioc, sglist, nents);
1485 panic("Check after sba_map_sg()\n"); 1495 panic("Check after sba_map_sg_attrs()\n");
1486 } 1496 }
1487 spin_unlock_irqrestore(&ioc->res_lock, flags); 1497 spin_unlock_irqrestore(&ioc->res_lock, flags);
1488#endif 1498#endif
@@ -1492,18 +1502,20 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
1492 1502
1493 return filled; 1503 return filled;
1494} 1504}
1495 1505EXPORT_SYMBOL(sba_map_sg_attrs);
1496 1506
1497/** 1507/**
1498 * sba_unmap_sg - unmap Scatter/Gather list 1508 * sba_unmap_sg_attrs - unmap Scatter/Gather list
1499 * @dev: instance of PCI owned by the driver that's asking. 1509 * @dev: instance of PCI owned by the driver that's asking.
1500 * @sglist: array of buffer/length pairs 1510 * @sglist: array of buffer/length pairs
1501 * @nents: number of entries in list 1511 * @nents: number of entries in list
1502 * @dir: R/W or both. 1512 * @dir: R/W or both.
1513 * @attrs: optional dma attributes
1503 * 1514 *
1504 * See Documentation/DMA-mapping.txt 1515 * See Documentation/DMA-mapping.txt
1505 */ 1516 */
1506void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) 1517void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1518 int nents, int dir, struct dma_attrs *attrs)
1507{ 1519{
1508#ifdef ASSERT_PDIR_SANITY 1520#ifdef ASSERT_PDIR_SANITY
1509 struct ioc *ioc; 1521 struct ioc *ioc;
@@ -1518,13 +1530,14 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
1518 ASSERT(ioc); 1530 ASSERT(ioc);
1519 1531
1520 spin_lock_irqsave(&ioc->res_lock, flags); 1532 spin_lock_irqsave(&ioc->res_lock, flags);
1521 sba_check_pdir(ioc,"Check before sba_unmap_sg()"); 1533 sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1522 spin_unlock_irqrestore(&ioc->res_lock, flags); 1534 spin_unlock_irqrestore(&ioc->res_lock, flags);
1523#endif 1535#endif
1524 1536
1525 while (nents && sglist->dma_length) { 1537 while (nents && sglist->dma_length) {
1526 1538
1527 sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); 1539 sba_unmap_single_attrs(dev, sglist->dma_address,
1540 sglist->dma_length, dir, attrs);
1528 sglist = sg_next(sglist); 1541 sglist = sg_next(sglist);
1529 nents--; 1542 nents--;
1530 } 1543 }
@@ -1533,11 +1546,12 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
1533 1546
1534#ifdef ASSERT_PDIR_SANITY 1547#ifdef ASSERT_PDIR_SANITY
1535 spin_lock_irqsave(&ioc->res_lock, flags); 1548 spin_lock_irqsave(&ioc->res_lock, flags);
1536 sba_check_pdir(ioc,"Check after sba_unmap_sg()"); 1549 sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1537 spin_unlock_irqrestore(&ioc->res_lock, flags); 1550 spin_unlock_irqrestore(&ioc->res_lock, flags);
1538#endif 1551#endif
1539 1552
1540} 1553}
1554EXPORT_SYMBOL(sba_unmap_sg_attrs);
1541 1555
1542/************************************************************** 1556/**************************************************************
1543* 1557*
@@ -2166,10 +2180,6 @@ sba_page_override(char *str)
2166__setup("sbapagesize=",sba_page_override); 2180__setup("sbapagesize=",sba_page_override);
2167 2181
2168EXPORT_SYMBOL(sba_dma_mapping_error); 2182EXPORT_SYMBOL(sba_dma_mapping_error);
2169EXPORT_SYMBOL(sba_map_single);
2170EXPORT_SYMBOL(sba_unmap_single);
2171EXPORT_SYMBOL(sba_map_sg);
2172EXPORT_SYMBOL(sba_unmap_sg);
2173EXPORT_SYMBOL(sba_dma_supported); 2183EXPORT_SYMBOL(sba_dma_supported);
2174EXPORT_SYMBOL(sba_alloc_coherent); 2184EXPORT_SYMBOL(sba_alloc_coherent);
2175EXPORT_SYMBOL(sba_free_coherent); 2185EXPORT_SYMBOL(sba_free_coherent);