aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/hp/common
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-01-05 09:59:02 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-06 08:06:57 -0500
commit160c1d8e40866edfeae7d68816b7005d70acf391 (patch)
tree37dd78b2ea28a3953a46d401bd9657005eb444d7 /arch/ia64/hp/common
parentf0402a262e1a4c03fc66b83659823bdcaac3c41a (diff)
x86, ia64: convert to use generic dma_map_ops struct
This converts X86 and IA64 to use include/linux/dma-mapping.h. It's a bit large but pretty boring. The major change for X86 is converting 'int dir' to 'enum dma_data_direction dir' in DMA mapping operations. The major changes for IA64 is using map_page and unmap_page instead of map_single and unmap_single. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/ia64/hp/common')
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c6
-rw-r--r--arch/ia64/hp/common/sba_iommu.c46
2 files changed, 35 insertions, 17 deletions
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index e5bbeba77810..e4a80d82e3d8 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -17,7 +17,7 @@
17#include <linux/swiotlb.h> 17#include <linux/swiotlb.h>
18#include <asm/machvec.h> 18#include <asm/machvec.h>
19 19
20extern struct dma_mapping_ops sba_dma_ops, swiotlb_dma_ops; 20extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
21 21
22/* swiotlb declarations & definitions: */ 22/* swiotlb declarations & definitions: */
23extern int swiotlb_late_init_with_default_size (size_t size); 23extern int swiotlb_late_init_with_default_size (size_t size);
@@ -30,10 +30,10 @@ extern int swiotlb_late_init_with_default_size (size_t size);
30static inline int use_swiotlb(struct device *dev) 30static inline int use_swiotlb(struct device *dev)
31{ 31{
32 return dev && dev->dma_mask && 32 return dev && dev->dma_mask &&
33 !sba_dma_ops.dma_supported_op(dev, *dev->dma_mask); 33 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
34} 34}
35 35
36struct dma_mapping_ops *hwsw_dma_get_ops(struct device *dev) 36struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
37{ 37{
38 if (use_swiotlb(dev)) 38 if (use_swiotlb(dev))
39 return &swiotlb_dma_ops; 39 return &swiotlb_dma_ops;
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 29e7206f3dc6..129b62eb39e5 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -909,11 +909,13 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
909 * 909 *
910 * See Documentation/DMA-mapping.txt 910 * See Documentation/DMA-mapping.txt
911 */ 911 */
912static dma_addr_t 912static dma_addr_t sba_map_page(struct device *dev, struct page *page,
913sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, 913 unsigned long poff, size_t size,
914 struct dma_attrs *attrs) 914 enum dma_data_direction dir,
915 struct dma_attrs *attrs)
915{ 916{
916 struct ioc *ioc; 917 struct ioc *ioc;
918 void *addr = page_address(page) + poff;
917 dma_addr_t iovp; 919 dma_addr_t iovp;
918 dma_addr_t offset; 920 dma_addr_t offset;
919 u64 *pdir_start; 921 u64 *pdir_start;
@@ -992,6 +994,14 @@ sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
992 return SBA_IOVA(ioc, iovp, offset); 994 return SBA_IOVA(ioc, iovp, offset);
993} 995}
994 996
997static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
998 size_t size, enum dma_data_direction dir,
999 struct dma_attrs *attrs)
1000{
1001 return sba_map_page(dev, virt_to_page(addr),
1002 (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
1003}
1004
995#ifdef ENABLE_MARK_CLEAN 1005#ifdef ENABLE_MARK_CLEAN
996static SBA_INLINE void 1006static SBA_INLINE void
997sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) 1007sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
@@ -1026,8 +1036,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1026 * 1036 *
1027 * See Documentation/DMA-mapping.txt 1037 * See Documentation/DMA-mapping.txt
1028 */ 1038 */
1029static void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, 1039static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1030 int dir, struct dma_attrs *attrs) 1040 enum dma_data_direction dir, struct dma_attrs *attrs)
1031{ 1041{
1032 struct ioc *ioc; 1042 struct ioc *ioc;
1033#if DELAYED_RESOURCE_CNT > 0 1043#if DELAYED_RESOURCE_CNT > 0
@@ -1095,6 +1105,12 @@ static void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t s
1095#endif /* DELAYED_RESOURCE_CNT == 0 */ 1105#endif /* DELAYED_RESOURCE_CNT == 0 */
1096} 1106}
1097 1107
1108void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1109 enum dma_data_direction dir, struct dma_attrs *attrs)
1110{
1111 sba_unmap_page(dev, iova, size, dir, attrs);
1112}
1113
1098/** 1114/**
1099 * sba_alloc_coherent - allocate/map shared mem for DMA 1115 * sba_alloc_coherent - allocate/map shared mem for DMA
1100 * @dev: instance of PCI owned by the driver that's asking. 1116 * @dev: instance of PCI owned by the driver that's asking.
@@ -1423,7 +1439,8 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1423 * See Documentation/DMA-mapping.txt 1439 * See Documentation/DMA-mapping.txt
1424 */ 1440 */
1425static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, 1441static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1426 int nents, int dir, struct dma_attrs *attrs) 1442 int nents, enum dma_data_direction dir,
1443 struct dma_attrs *attrs)
1427{ 1444{
1428 struct ioc *ioc; 1445 struct ioc *ioc;
1429 int coalesced, filled = 0; 1446 int coalesced, filled = 0;
@@ -1514,7 +1531,8 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1514 * See Documentation/DMA-mapping.txt 1531 * See Documentation/DMA-mapping.txt
1515 */ 1532 */
1516static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, 1533static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1517 int nents, int dir, struct dma_attrs *attrs) 1534 int nents, enum dma_data_direction dir,
1535 struct dma_attrs *attrs)
1518{ 1536{
1519#ifdef ASSERT_PDIR_SANITY 1537#ifdef ASSERT_PDIR_SANITY
1520 struct ioc *ioc; 1538 struct ioc *ioc;
@@ -2062,7 +2080,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
2062 }, 2080 },
2063}; 2081};
2064 2082
2065extern struct dma_mapping_ops swiotlb_dma_ops; 2083extern struct dma_map_ops swiotlb_dma_ops;
2066 2084
2067static int __init 2085static int __init
2068sba_init(void) 2086sba_init(void)
@@ -2176,18 +2194,18 @@ sba_page_override(char *str)
2176 2194
2177__setup("sbapagesize=",sba_page_override); 2195__setup("sbapagesize=",sba_page_override);
2178 2196
2179struct dma_mapping_ops sba_dma_ops = { 2197struct dma_map_ops sba_dma_ops = {
2180 .alloc_coherent = sba_alloc_coherent, 2198 .alloc_coherent = sba_alloc_coherent,
2181 .free_coherent = sba_free_coherent, 2199 .free_coherent = sba_free_coherent,
2182 .map_single_attrs = sba_map_single_attrs, 2200 .map_page = sba_map_page,
2183 .unmap_single_attrs = sba_unmap_single_attrs, 2201 .unmap_page = sba_unmap_page,
2184 .map_sg_attrs = sba_map_sg_attrs, 2202 .map_sg = sba_map_sg_attrs,
2185 .unmap_sg_attrs = sba_unmap_sg_attrs, 2203 .unmap_sg = sba_unmap_sg_attrs,
2186 .sync_single_for_cpu = machvec_dma_sync_single, 2204 .sync_single_for_cpu = machvec_dma_sync_single,
2187 .sync_sg_for_cpu = machvec_dma_sync_sg, 2205 .sync_sg_for_cpu = machvec_dma_sync_sg,
2188 .sync_single_for_device = machvec_dma_sync_single, 2206 .sync_single_for_device = machvec_dma_sync_single,
2189 .sync_sg_for_device = machvec_dma_sync_sg, 2207 .sync_sg_for_device = machvec_dma_sync_sg,
2190 .dma_supported_op = sba_dma_supported, 2208 .dma_supported = sba_dma_supported,
2191 .mapping_error = sba_dma_mapping_error, 2209 .mapping_error = sba_dma_mapping_error,
2192}; 2210};
2193 2211