aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-06-17 19:28:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-18 16:03:58 -0400
commitc147d8ea3e2f6f953647f2347ae732fd99b32e73 (patch)
tree235159755038adfb0309f71ffe835fbe550da001
parent7bf99fb673f18408be1ebc958321ef4c3f6da9e2 (diff)
dma-mapping: add asm-generic/dma-mapping-common.h
We unified x86 and IA64's handling of multiple dma mapping operations (struct dma_map_ops in linux/dma-mapping.h) so we can remove duplication in their arch/include/asm/dma-mapping.h. This patchset adds include/asm-generic/dma-mapping-common.h that provides some generic dma mapping function definitions for the users of struct dma_map_ops. This enables us to remove about 100 lines. This also enables us to easily add CONFIG_DMA_API_DEBUG support, which only x86 supports for now. The 4th patch adds CONFIG_DMA_API_DEBUG support to IA64 by adding only 8 lines. This patch: This header file provides some mapping function definitions that the users of struct dma_map_ops can use. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Joerg Roedel <joerg.roedel@amd.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/asm-generic/dma-mapping-common.h190
1 files changed, 190 insertions, 0 deletions
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
new file mode 100644
index 000000000000..5406a601185c
--- /dev/null
+++ b/include/asm-generic/dma-mapping-common.h
@@ -0,0 +1,190 @@
1#ifndef _ASM_GENERIC_DMA_MAPPING_H
2#define _ASM_GENERIC_DMA_MAPPING_H
3
4#include <linux/kmemcheck.h>
5#include <linux/scatterlist.h>
6#include <linux/dma-debug.h>
7#include <linux/dma-attrs.h>
8
9static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
10 size_t size,
11 enum dma_data_direction dir,
12 struct dma_attrs *attrs)
13{
14 struct dma_map_ops *ops = get_dma_ops(dev);
15 dma_addr_t addr;
16
17 kmemcheck_mark_initialized(ptr, size);
18 BUG_ON(!valid_dma_direction(dir));
19 addr = ops->map_page(dev, virt_to_page(ptr),
20 (unsigned long)ptr & ~PAGE_MASK, size,
21 dir, attrs);
22 debug_dma_map_page(dev, virt_to_page(ptr),
23 (unsigned long)ptr & ~PAGE_MASK, size,
24 dir, addr, true);
25 return addr;
26}
27
28static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
29 size_t size,
30 enum dma_data_direction dir,
31 struct dma_attrs *attrs)
32{
33 struct dma_map_ops *ops = get_dma_ops(dev);
34
35 BUG_ON(!valid_dma_direction(dir));
36 if (ops->unmap_page)
37 ops->unmap_page(dev, addr, size, dir, attrs);
38 debug_dma_unmap_page(dev, addr, size, dir, true);
39}
40
41static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
42 int nents, enum dma_data_direction dir,
43 struct dma_attrs *attrs)
44{
45 struct dma_map_ops *ops = get_dma_ops(dev);
46 int i, ents;
47 struct scatterlist *s;
48
49 for_each_sg(sg, s, nents, i)
50 kmemcheck_mark_initialized(sg_virt(s), s->length);
51 BUG_ON(!valid_dma_direction(dir));
52 ents = ops->map_sg(dev, sg, nents, dir, attrs);
53 debug_dma_map_sg(dev, sg, nents, ents, dir);
54
55 return ents;
56}
57
58static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
59 int nents, enum dma_data_direction dir,
60 struct dma_attrs *attrs)
61{
62 struct dma_map_ops *ops = get_dma_ops(dev);
63
64 BUG_ON(!valid_dma_direction(dir));
65 debug_dma_unmap_sg(dev, sg, nents, dir);
66 if (ops->unmap_sg)
67 ops->unmap_sg(dev, sg, nents, dir, attrs);
68}
69
70static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
71 size_t offset, size_t size,
72 enum dma_data_direction dir)
73{
74 struct dma_map_ops *ops = get_dma_ops(dev);
75 dma_addr_t addr;
76
77 kmemcheck_mark_initialized(page_address(page) + offset, size);
78 BUG_ON(!valid_dma_direction(dir));
79 addr = ops->map_page(dev, page, offset, size, dir, NULL);
80 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
81
82 return addr;
83}
84
85static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
86 size_t size, enum dma_data_direction dir)
87{
88 struct dma_map_ops *ops = get_dma_ops(dev);
89
90 BUG_ON(!valid_dma_direction(dir));
91 if (ops->unmap_page)
92 ops->unmap_page(dev, addr, size, dir, NULL);
93 debug_dma_unmap_page(dev, addr, size, dir, false);
94}
95
96static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
97 size_t size,
98 enum dma_data_direction dir)
99{
100 struct dma_map_ops *ops = get_dma_ops(dev);
101
102 BUG_ON(!valid_dma_direction(dir));
103 if (ops->sync_single_for_cpu)
104 ops->sync_single_for_cpu(dev, addr, size, dir);
105 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
106 flush_write_buffers();
107}
108
109static inline void dma_sync_single_for_device(struct device *dev,
110 dma_addr_t addr, size_t size,
111 enum dma_data_direction dir)
112{
113 struct dma_map_ops *ops = get_dma_ops(dev);
114
115 BUG_ON(!valid_dma_direction(dir));
116 if (ops->sync_single_for_device)
117 ops->sync_single_for_device(dev, addr, size, dir);
118 debug_dma_sync_single_for_device(dev, addr, size, dir);
119 flush_write_buffers();
120}
121
122static inline void dma_sync_single_range_for_cpu(struct device *dev,
123 dma_addr_t addr,
124 unsigned long offset,
125 size_t size,
126 enum dma_data_direction dir)
127{
128 struct dma_map_ops *ops = get_dma_ops(dev);
129
130 BUG_ON(!valid_dma_direction(dir));
131 if (ops->sync_single_range_for_cpu) {
132 ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
133 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
134
135 flush_write_buffers();
136 } else
137 dma_sync_single_for_cpu(dev, addr, size, dir);
138}
139
140static inline void dma_sync_single_range_for_device(struct device *dev,
141 dma_addr_t addr,
142 unsigned long offset,
143 size_t size,
144 enum dma_data_direction dir)
145{
146 struct dma_map_ops *ops = get_dma_ops(dev);
147
148 BUG_ON(!valid_dma_direction(dir));
149 if (ops->sync_single_range_for_device) {
150 ops->sync_single_range_for_device(dev, addr, offset, size, dir);
151 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
152
153 flush_write_buffers();
154 } else
155 dma_sync_single_for_device(dev, addr, size, dir);
156}
157
158static inline void
159dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
160 int nelems, enum dma_data_direction dir)
161{
162 struct dma_map_ops *ops = get_dma_ops(dev);
163
164 BUG_ON(!valid_dma_direction(dir));
165 if (ops->sync_sg_for_cpu)
166 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
167 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
168 flush_write_buffers();
169}
170
171static inline void
172dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
173 int nelems, enum dma_data_direction dir)
174{
175 struct dma_map_ops *ops = get_dma_ops(dev);
176
177 BUG_ON(!valid_dma_direction(dir));
178 if (ops->sync_sg_for_device)
179 ops->sync_sg_for_device(dev, sg, nelems, dir);
180 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
181
182 flush_write_buffers();
183}
184
185#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
186#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
187#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
188#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
189
190#endif