diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-28 18:07:55 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-28 18:07:55 -0400 |
commit | cb28a1bbdb4790378e7366d6c9ee1d2340b84f92 (patch) | |
tree | 316436f77dac75335fd2c3ef5f109e71606c50d3 /include/asm-x86/dma-mapping.h | |
parent | b6d4f7e3ef25beb8c658c97867d98883e69dc544 (diff) | |
parent | f934fb19ef34730263e6afc01e8ec27a8a71470f (diff) |
Merge branch 'linus' into core/generic-dma-coherent
Conflicts:
arch/x86/Kconfig
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/dma-mapping.h')
-rw-r--r-- | include/asm-x86/dma-mapping.h | 100 |
1 files changed, 68 insertions, 32 deletions
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index c68f360ef564..ad9cd6d49bfc 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h | |||
@@ -14,11 +14,11 @@ extern dma_addr_t bad_dma_address; | |||
14 | extern int iommu_merge; | 14 | extern int iommu_merge; |
15 | extern struct device fallback_dev; | 15 | extern struct device fallback_dev; |
16 | extern int panic_on_overflow; | 16 | extern int panic_on_overflow; |
17 | extern int forbid_dac; | ||
18 | extern int force_iommu; | 17 | extern int force_iommu; |
19 | 18 | ||
20 | struct dma_mapping_ops { | 19 | struct dma_mapping_ops { |
21 | int (*mapping_error)(dma_addr_t dma_addr); | 20 | int (*mapping_error)(struct device *dev, |
21 | dma_addr_t dma_addr); | ||
22 | void* (*alloc_coherent)(struct device *dev, size_t size, | 22 | void* (*alloc_coherent)(struct device *dev, size_t size, |
23 | dma_addr_t *dma_handle, gfp_t gfp); | 23 | dma_addr_t *dma_handle, gfp_t gfp); |
24 | void (*free_coherent)(struct device *dev, size_t size, | 24 | void (*free_coherent)(struct device *dev, size_t size, |
@@ -57,14 +57,32 @@ struct dma_mapping_ops { | |||
57 | int is_phys; | 57 | int is_phys; |
58 | }; | 58 | }; |
59 | 59 | ||
60 | extern const struct dma_mapping_ops *dma_ops; | 60 | extern struct dma_mapping_ops *dma_ops; |
61 | 61 | ||
62 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 62 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) |
63 | { | 63 | { |
64 | if (dma_ops->mapping_error) | 64 | #ifdef CONFIG_X86_32 |
65 | return dma_ops->mapping_error(dma_addr); | 65 | return dma_ops; |
66 | #else | ||
67 | if (unlikely(!dev) || !dev->archdata.dma_ops) | ||
68 | return dma_ops; | ||
69 | else | ||
70 | return dev->archdata.dma_ops; | ||
71 | #endif | ||
72 | } | ||
73 | |||
74 | /* Make sure we keep the same behaviour */ | ||
75 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
76 | { | ||
77 | #ifdef CONFIG_X86_32 | ||
78 | return 0; | ||
79 | #else | ||
80 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
81 | if (ops->mapping_error) | ||
82 | return ops->mapping_error(dev, dma_addr); | ||
66 | 83 | ||
67 | return (dma_addr == bad_dma_address); | 84 | return (dma_addr == bad_dma_address); |
85 | #endif | ||
68 | } | 86 | } |
69 | 87 | ||
70 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 88 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
@@ -84,44 +102,53 @@ static inline dma_addr_t | |||
84 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | 102 | dma_map_single(struct device *hwdev, void *ptr, size_t size, |
85 | int direction) | 103 | int direction) |
86 | { | 104 | { |
105 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
106 | |||
87 | BUG_ON(!valid_dma_direction(direction)); | 107 | BUG_ON(!valid_dma_direction(direction)); |
88 | return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); | 108 | return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); |
89 | } | 109 | } |
90 | 110 | ||
91 | static inline void | 111 | static inline void |
92 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | 112 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
93 | int direction) | 113 | int direction) |
94 | { | 114 | { |
115 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
116 | |||
95 | BUG_ON(!valid_dma_direction(direction)); | 117 | BUG_ON(!valid_dma_direction(direction)); |
96 | if (dma_ops->unmap_single) | 118 | if (ops->unmap_single) |
97 | dma_ops->unmap_single(dev, addr, size, direction); | 119 | ops->unmap_single(dev, addr, size, direction); |
98 | } | 120 | } |
99 | 121 | ||
100 | static inline int | 122 | static inline int |
101 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | 123 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, |
102 | int nents, int direction) | 124 | int nents, int direction) |
103 | { | 125 | { |
126 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
127 | |||
104 | BUG_ON(!valid_dma_direction(direction)); | 128 | BUG_ON(!valid_dma_direction(direction)); |
105 | return dma_ops->map_sg(hwdev, sg, nents, direction); | 129 | return ops->map_sg(hwdev, sg, nents, direction); |
106 | } | 130 | } |
107 | 131 | ||
108 | static inline void | 132 | static inline void |
109 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 133 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
110 | int direction) | 134 | int direction) |
111 | { | 135 | { |
136 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
137 | |||
112 | BUG_ON(!valid_dma_direction(direction)); | 138 | BUG_ON(!valid_dma_direction(direction)); |
113 | if (dma_ops->unmap_sg) | 139 | if (ops->unmap_sg) |
114 | dma_ops->unmap_sg(hwdev, sg, nents, direction); | 140 | ops->unmap_sg(hwdev, sg, nents, direction); |
115 | } | 141 | } |
116 | 142 | ||
117 | static inline void | 143 | static inline void |
118 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 144 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
119 | size_t size, int direction) | 145 | size_t size, int direction) |
120 | { | 146 | { |
147 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
148 | |||
121 | BUG_ON(!valid_dma_direction(direction)); | 149 | BUG_ON(!valid_dma_direction(direction)); |
122 | if (dma_ops->sync_single_for_cpu) | 150 | if (ops->sync_single_for_cpu) |
123 | dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, | 151 | ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); |
124 | direction); | ||
125 | flush_write_buffers(); | 152 | flush_write_buffers(); |
126 | } | 153 | } |
127 | 154 | ||
@@ -129,10 +156,11 @@ static inline void | |||
129 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | 156 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, |
130 | size_t size, int direction) | 157 | size_t size, int direction) |
131 | { | 158 | { |
159 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
160 | |||
132 | BUG_ON(!valid_dma_direction(direction)); | 161 | BUG_ON(!valid_dma_direction(direction)); |
133 | if (dma_ops->sync_single_for_device) | 162 | if (ops->sync_single_for_device) |
134 | dma_ops->sync_single_for_device(hwdev, dma_handle, size, | 163 | ops->sync_single_for_device(hwdev, dma_handle, size, direction); |
135 | direction); | ||
136 | flush_write_buffers(); | 164 | flush_write_buffers(); |
137 | } | 165 | } |
138 | 166 | ||
@@ -140,11 +168,12 @@ static inline void | |||
140 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 168 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
141 | unsigned long offset, size_t size, int direction) | 169 | unsigned long offset, size_t size, int direction) |
142 | { | 170 | { |
143 | BUG_ON(!valid_dma_direction(direction)); | 171 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
144 | if (dma_ops->sync_single_range_for_cpu) | ||
145 | dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | ||
146 | size, direction); | ||
147 | 172 | ||
173 | BUG_ON(!valid_dma_direction(direction)); | ||
174 | if (ops->sync_single_range_for_cpu) | ||
175 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | ||
176 | size, direction); | ||
148 | flush_write_buffers(); | 177 | flush_write_buffers(); |
149 | } | 178 | } |
150 | 179 | ||
@@ -153,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | |||
153 | unsigned long offset, size_t size, | 182 | unsigned long offset, size_t size, |
154 | int direction) | 183 | int direction) |
155 | { | 184 | { |
156 | BUG_ON(!valid_dma_direction(direction)); | 185 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
157 | if (dma_ops->sync_single_range_for_device) | ||
158 | dma_ops->sync_single_range_for_device(hwdev, dma_handle, | ||
159 | offset, size, direction); | ||
160 | 186 | ||
187 | BUG_ON(!valid_dma_direction(direction)); | ||
188 | if (ops->sync_single_range_for_device) | ||
189 | ops->sync_single_range_for_device(hwdev, dma_handle, | ||
190 | offset, size, direction); | ||
161 | flush_write_buffers(); | 191 | flush_write_buffers(); |
162 | } | 192 | } |
163 | 193 | ||
@@ -165,9 +195,11 @@ static inline void | |||
165 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 195 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
166 | int nelems, int direction) | 196 | int nelems, int direction) |
167 | { | 197 | { |
198 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
199 | |||
168 | BUG_ON(!valid_dma_direction(direction)); | 200 | BUG_ON(!valid_dma_direction(direction)); |
169 | if (dma_ops->sync_sg_for_cpu) | 201 | if (ops->sync_sg_for_cpu) |
170 | dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); | 202 | ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); |
171 | flush_write_buffers(); | 203 | flush_write_buffers(); |
172 | } | 204 | } |
173 | 205 | ||
@@ -175,9 +207,11 @@ static inline void | |||
175 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 207 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
176 | int nelems, int direction) | 208 | int nelems, int direction) |
177 | { | 209 | { |
210 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
211 | |||
178 | BUG_ON(!valid_dma_direction(direction)); | 212 | BUG_ON(!valid_dma_direction(direction)); |
179 | if (dma_ops->sync_sg_for_device) | 213 | if (ops->sync_sg_for_device) |
180 | dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); | 214 | ops->sync_sg_for_device(hwdev, sg, nelems, direction); |
181 | 215 | ||
182 | flush_write_buffers(); | 216 | flush_write_buffers(); |
183 | } | 217 | } |
@@ -186,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
186 | size_t offset, size_t size, | 220 | size_t offset, size_t size, |
187 | int direction) | 221 | int direction) |
188 | { | 222 | { |
223 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
224 | |||
189 | BUG_ON(!valid_dma_direction(direction)); | 225 | BUG_ON(!valid_dma_direction(direction)); |
190 | return dma_ops->map_single(dev, page_to_phys(page)+offset, | 226 | return ops->map_single(dev, page_to_phys(page) + offset, |
191 | size, direction); | 227 | size, direction); |
192 | } | 228 | } |
193 | 229 | ||
194 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 230 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |