diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-06-17 19:28:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-18 16:03:58 -0400 |
commit | d6d0a6aee252f004b06f27f74e401198f9c9ffb8 (patch) | |
tree | 42349c0047da974a5b4f4f62d6192108ec70bf76 /arch/ia64 | |
parent | 7c095e4603dd6ce78ff5b9b70896fe3e05c13f5c (diff) |
dma-mapping: ia64: use asm-generic/dma-mapping-common.h
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Joerg Roedel <joerg.roedel@amd.com>
Cc: Ingo Molnar <mingo@elte.hu>
Acked-by: "Luck, Tony" <tony.luck@intel.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc; "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/dma-mapping.h | 102 |
1 files changed, 3 insertions, 99 deletions
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 36c0009dbece..2475c91adc98 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -37,82 +37,10 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
37 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 37 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
38 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 38 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
39 | 39 | ||
40 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, | 40 | #define get_dma_ops(dev) platform_dma_get_ops(dev) |
41 | void *caddr, size_t size, | 41 | #define flush_write_buffers() |
42 | enum dma_data_direction dir, | ||
43 | struct dma_attrs *attrs) | ||
44 | { | ||
45 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
46 | return ops->map_page(dev, virt_to_page(caddr), | ||
47 | (unsigned long)caddr & ~PAGE_MASK, size, | ||
48 | dir, attrs); | ||
49 | } | ||
50 | |||
51 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, | ||
52 | size_t size, | ||
53 | enum dma_data_direction dir, | ||
54 | struct dma_attrs *attrs) | ||
55 | { | ||
56 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
57 | ops->unmap_page(dev, daddr, size, dir, attrs); | ||
58 | } | ||
59 | |||
60 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | ||
61 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) | ||
62 | |||
63 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | ||
64 | int nents, enum dma_data_direction dir, | ||
65 | struct dma_attrs *attrs) | ||
66 | { | ||
67 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
68 | return ops->map_sg(dev, sgl, nents, dir, attrs); | ||
69 | } | ||
70 | |||
71 | static inline void dma_unmap_sg_attrs(struct device *dev, | ||
72 | struct scatterlist *sgl, int nents, | ||
73 | enum dma_data_direction dir, | ||
74 | struct dma_attrs *attrs) | ||
75 | { | ||
76 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
77 | ops->unmap_sg(dev, sgl, nents, dir, attrs); | ||
78 | } | ||
79 | 42 | ||
80 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) | 43 | #include <asm-generic/dma-mapping-common.h> |
81 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) | ||
82 | |||
83 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, | ||
84 | size_t size, | ||
85 | enum dma_data_direction dir) | ||
86 | { | ||
87 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
88 | ops->sync_single_for_cpu(dev, daddr, size, dir); | ||
89 | } | ||
90 | |||
91 | static inline void dma_sync_sg_for_cpu(struct device *dev, | ||
92 | struct scatterlist *sgl, | ||
93 | int nents, enum dma_data_direction dir) | ||
94 | { | ||
95 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
96 | ops->sync_sg_for_cpu(dev, sgl, nents, dir); | ||
97 | } | ||
98 | |||
99 | static inline void dma_sync_single_for_device(struct device *dev, | ||
100 | dma_addr_t daddr, | ||
101 | size_t size, | ||
102 | enum dma_data_direction dir) | ||
103 | { | ||
104 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
105 | ops->sync_single_for_device(dev, daddr, size, dir); | ||
106 | } | ||
107 | |||
108 | static inline void dma_sync_sg_for_device(struct device *dev, | ||
109 | struct scatterlist *sgl, | ||
110 | int nents, | ||
111 | enum dma_data_direction dir) | ||
112 | { | ||
113 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
114 | ops->sync_sg_for_device(dev, sgl, nents, dir); | ||
115 | } | ||
116 | 44 | ||
117 | static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) | 45 | static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) |
118 | { | 46 | { |
@@ -120,30 +48,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) | |||
120 | return ops->mapping_error(dev, daddr); | 48 | return ops->mapping_error(dev, daddr); |
121 | } | 49 | } |
122 | 50 | ||
123 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
124 | size_t offset, size_t size, | ||
125 | enum dma_data_direction dir) | ||
126 | { | ||
127 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
128 | return ops->map_page(dev, page, offset, size, dir, NULL); | ||
129 | } | ||
130 | |||
131 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
132 | size_t size, enum dma_data_direction dir) | ||
133 | { | ||
134 | dma_unmap_single(dev, addr, size, dir); | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. | ||
139 | * See Documentation/DMA-API.txt for details. | ||
140 | */ | ||
141 | |||
142 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ | ||
143 | dma_sync_single_for_cpu(dev, dma_handle, size, dir) | ||
144 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | ||
145 | dma_sync_single_for_device(dev, dma_handle, size, dir) | ||
146 | |||
147 | static inline int dma_supported(struct device *dev, u64 mask) | 51 | static inline int dma_supported(struct device *dev, u64 mask) |
148 | { | 52 | { |
149 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | 53 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |