diff options
Diffstat (limited to 'include/asm-x86/dma-mapping.h')
| -rw-r--r-- | include/asm-x86/dma-mapping.h | 99 |
1 files changed, 68 insertions, 31 deletions
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index c2ddd3d1b883..0eaa9bf6011f 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h | |||
| @@ -17,7 +17,8 @@ extern int panic_on_overflow; | |||
| 17 | extern int force_iommu; | 17 | extern int force_iommu; |
| 18 | 18 | ||
| 19 | struct dma_mapping_ops { | 19 | struct dma_mapping_ops { |
| 20 | int (*mapping_error)(dma_addr_t dma_addr); | 20 | int (*mapping_error)(struct device *dev, |
| 21 | dma_addr_t dma_addr); | ||
| 21 | void* (*alloc_coherent)(struct device *dev, size_t size, | 22 | void* (*alloc_coherent)(struct device *dev, size_t size, |
| 22 | dma_addr_t *dma_handle, gfp_t gfp); | 23 | dma_addr_t *dma_handle, gfp_t gfp); |
| 23 | void (*free_coherent)(struct device *dev, size_t size, | 24 | void (*free_coherent)(struct device *dev, size_t size, |
| @@ -56,14 +57,32 @@ struct dma_mapping_ops { | |||
| 56 | int is_phys; | 57 | int is_phys; |
| 57 | }; | 58 | }; |
| 58 | 59 | ||
| 59 | extern const struct dma_mapping_ops *dma_ops; | 60 | extern struct dma_mapping_ops *dma_ops; |
| 60 | 61 | ||
| 61 | static inline int dma_mapping_error(dma_addr_t dma_addr) | 62 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) |
| 62 | { | 63 | { |
| 63 | if (dma_ops->mapping_error) | 64 | #ifdef CONFIG_X86_32 |
| 64 | return dma_ops->mapping_error(dma_addr); | 65 | return dma_ops; |
| 66 | #else | ||
| 67 | if (unlikely(!dev) || !dev->archdata.dma_ops) | ||
| 68 | return dma_ops; | ||
| 69 | else | ||
| 70 | return dev->archdata.dma_ops; | ||
| 71 | #endif | ||
| 72 | } | ||
| 73 | |||
| 74 | /* Make sure we keep the same behaviour */ | ||
| 75 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
| 76 | { | ||
| 77 | #ifdef CONFIG_X86_32 | ||
| 78 | return 0; | ||
| 79 | #else | ||
| 80 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
| 81 | if (ops->mapping_error) | ||
| 82 | return ops->mapping_error(dev, dma_addr); | ||
| 65 | 83 | ||
| 66 | return (dma_addr == bad_dma_address); | 84 | return (dma_addr == bad_dma_address); |
| 85 | #endif | ||
| 67 | } | 86 | } |
| 68 | 87 | ||
| 69 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 88 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| @@ -83,44 +102,53 @@ static inline dma_addr_t | |||
| 83 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | 102 | dma_map_single(struct device *hwdev, void *ptr, size_t size, |
| 84 | int direction) | 103 | int direction) |
| 85 | { | 104 | { |
| 105 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
| 106 | |||
| 86 | BUG_ON(!valid_dma_direction(direction)); | 107 | BUG_ON(!valid_dma_direction(direction)); |
| 87 | return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); | 108 | return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); |
| 88 | } | 109 | } |
| 89 | 110 | ||
| 90 | static inline void | 111 | static inline void |
| 91 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | 112 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
| 92 | int direction) | 113 | int direction) |
| 93 | { | 114 | { |
| 115 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
| 116 | |||
| 94 | BUG_ON(!valid_dma_direction(direction)); | 117 | BUG_ON(!valid_dma_direction(direction)); |
| 95 | if (dma_ops->unmap_single) | 118 | if (ops->unmap_single) |
| 96 | dma_ops->unmap_single(dev, addr, size, direction); | 119 | ops->unmap_single(dev, addr, size, direction); |
| 97 | } | 120 | } |
| 98 | 121 | ||
| 99 | static inline int | 122 | static inline int |
| 100 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | 123 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, |
| 101 | int nents, int direction) | 124 | int nents, int direction) |
| 102 | { | 125 | { |
| 126 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
| 127 | |||
| 103 | BUG_ON(!valid_dma_direction(direction)); | 128 | BUG_ON(!valid_dma_direction(direction)); |
| 104 | return dma_ops->map_sg(hwdev, sg, nents, direction); | 129 | return ops->map_sg(hwdev, sg, nents, direction); |
| 105 | } | 130 | } |
| 106 | 131 | ||
| 107 | static inline void | 132 | static inline void |
| 108 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 133 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
| 109 | int direction) | 134 | int direction) |
| 110 | { | 135 | { |
| 136 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
| 137 | |||
| 111 | BUG_ON(!valid_dma_direction(direction)); | 138 | BUG_ON(!valid_dma_direction(direction)); |
| 112 | if (dma_ops->unmap_sg) | 139 | if (ops->unmap_sg) |
| 113 | dma_ops->unmap_sg(hwdev, sg, nents, direction); | 140 | ops->unmap_sg(hwdev, sg, nents, direction); |
| 114 | } | 141 | } |
| 115 | 142 | ||
| 116 | static inline void | 143 | static inline void |
| 117 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 144 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
| 118 | size_t size, int direction) | 145 | size_t size, int direction) |
| 119 | { | 146 | { |
| 147 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
| 148 | |||
| 120 | BUG_ON(!valid_dma_direction(direction)); | 149 | BUG_ON(!valid_dma_direction(direction)); |
| 121 | if (dma_ops->sync_single_for_cpu) | 150 | if (ops->sync_single_for_cpu) |
| 122 | dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, | 151 | ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); |
| 123 | direction); | ||
| 124 | flush_write_buffers(); | 152 | flush_write_buffers(); |
| 125 | } | 153 | } |
| 126 | 154 | ||
| @@ -128,10 +156,11 @@ static inline void | |||
| 128 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | 156 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, |
| 129 | size_t size, int direction) | 157 | size_t size, int direction) |
| 130 | { | 158 | { |
| 159 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
| 160 | |||
| 131 | BUG_ON(!valid_dma_direction(direction)); | 161 | BUG_ON(!valid_dma_direction(direction)); |
| 132 | if (dma_ops->sync_single_for_device) | 162 | if (ops->sync_single_for_device) |
| 133 | dma_ops->sync_single_for_device(hwdev, dma_handle, size, | 163 | ops->sync_single_for_device(hwdev, dma_handle, size, direction); |
| 134 | direction); | ||
| 135 | flush_write_buffers(); | 164 | flush_write_buffers(); |
| 136 | } | 165 | } |
| 137 | 166 | ||
| @@ -139,11 +168,12 @@ static inline void | |||
| 139 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | 168 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
| 140 | unsigned long offset, size_t size, int direction) | 169 | unsigned long offset, size_t size, int direction) |
| 141 | { | 170 | { |
| 142 | BUG_ON(!valid_dma_direction(direction)); | 171 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
| 143 | if (dma_ops->sync_single_range_for_cpu) | ||
| 144 | dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | ||
| 145 | size, direction); | ||
| 146 | 172 | ||
| 173 | BUG_ON(!valid_dma_direction(direction)); | ||
| 174 | if (ops->sync_single_range_for_cpu) | ||
| 175 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | ||
| 176 | size, direction); | ||
| 147 | flush_write_buffers(); | 177 | flush_write_buffers(); |
| 148 | } | 178 | } |
| 149 | 179 | ||
| @@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | |||
| 152 | unsigned long offset, size_t size, | 182 | unsigned long offset, size_t size, |
| 153 | int direction) | 183 | int direction) |
| 154 | { | 184 | { |
| 155 | BUG_ON(!valid_dma_direction(direction)); | 185 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
| 156 | if (dma_ops->sync_single_range_for_device) | ||
| 157 | dma_ops->sync_single_range_for_device(hwdev, dma_handle, | ||
| 158 | offset, size, direction); | ||
| 159 | 186 | ||
| 187 | BUG_ON(!valid_dma_direction(direction)); | ||
| 188 | if (ops->sync_single_range_for_device) | ||
| 189 | ops->sync_single_range_for_device(hwdev, dma_handle, | ||
| 190 | offset, size, direction); | ||
| 160 | flush_write_buffers(); | 191 | flush_write_buffers(); |
| 161 | } | 192 | } |
| 162 | 193 | ||
| @@ -164,9 +195,11 @@ static inline void | |||
| 164 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 195 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
| 165 | int nelems, int direction) | 196 | int nelems, int direction) |
| 166 | { | 197 | { |
| 198 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
| 199 | |||
| 167 | BUG_ON(!valid_dma_direction(direction)); | 200 | BUG_ON(!valid_dma_direction(direction)); |
| 168 | if (dma_ops->sync_sg_for_cpu) | 201 | if (ops->sync_sg_for_cpu) |
| 169 | dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); | 202 | ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); |
| 170 | flush_write_buffers(); | 203 | flush_write_buffers(); |
| 171 | } | 204 | } |
| 172 | 205 | ||
| @@ -174,9 +207,11 @@ static inline void | |||
| 174 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 207 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
| 175 | int nelems, int direction) | 208 | int nelems, int direction) |
| 176 | { | 209 | { |
| 210 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); | ||
| 211 | |||
| 177 | BUG_ON(!valid_dma_direction(direction)); | 212 | BUG_ON(!valid_dma_direction(direction)); |
| 178 | if (dma_ops->sync_sg_for_device) | 213 | if (ops->sync_sg_for_device) |
| 179 | dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); | 214 | ops->sync_sg_for_device(hwdev, sg, nelems, direction); |
| 180 | 215 | ||
| 181 | flush_write_buffers(); | 216 | flush_write_buffers(); |
| 182 | } | 217 | } |
| @@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
| 185 | size_t offset, size_t size, | 220 | size_t offset, size_t size, |
| 186 | int direction) | 221 | int direction) |
| 187 | { | 222 | { |
| 223 | struct dma_mapping_ops *ops = get_dma_ops(dev); | ||
| 224 | |||
| 188 | BUG_ON(!valid_dma_direction(direction)); | 225 | BUG_ON(!valid_dma_direction(direction)); |
| 189 | return dma_ops->map_single(dev, page_to_phys(page)+offset, | 226 | return ops->map_single(dev, page_to_phys(page) + offset, |
| 190 | size, direction); | 227 | size, direction); |
| 191 | } | 228 | } |
| 192 | 229 | ||
| 193 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 230 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
