diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-01-05 09:59:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-06 08:06:57 -0500 |
commit | 160c1d8e40866edfeae7d68816b7005d70acf391 (patch) | |
tree | 37dd78b2ea28a3953a46d401bd9657005eb444d7 /arch/ia64/include/asm/dma-mapping.h | |
parent | f0402a262e1a4c03fc66b83659823bdcaac3c41a (diff) |
x86, ia64: convert to use generic dma_map_ops struct
This converts X86 and IA64 to use include/linux/dma-mapping.h.
It's a bit large but pretty boring. The major change for X86 is
converting 'int dir' to 'enum dma_data_direction dir' in DMA mapping
operations. The major changes for IA64 is using map_page and
unmap_page instead of map_single and unmap_single.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/ia64/include/asm/dma-mapping.h')
-rw-r--r-- | arch/ia64/include/asm/dma-mapping.h | 107 |
1 files changed, 33 insertions, 74 deletions
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index bac3159379f7..d6230f514536 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -9,73 +9,21 @@ | |||
9 | #include <linux/scatterlist.h> | 9 | #include <linux/scatterlist.h> |
10 | #include <asm/swiotlb.h> | 10 | #include <asm/swiotlb.h> |
11 | 11 | ||
12 | struct dma_mapping_ops { | 12 | extern struct dma_map_ops *dma_ops; |
13 | int (*mapping_error)(struct device *dev, | ||
14 | dma_addr_t dma_addr); | ||
15 | void* (*alloc_coherent)(struct device *dev, size_t size, | ||
16 | dma_addr_t *dma_handle, gfp_t gfp); | ||
17 | void (*free_coherent)(struct device *dev, size_t size, | ||
18 | void *vaddr, dma_addr_t dma_handle); | ||
19 | dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr, | ||
20 | size_t size, int direction); | ||
21 | void (*unmap_single)(struct device *dev, dma_addr_t addr, | ||
22 | size_t size, int direction); | ||
23 | dma_addr_t (*map_single_attrs)(struct device *dev, void *cpu_addr, | ||
24 | size_t size, int direction, | ||
25 | struct dma_attrs *attrs); | ||
26 | void (*unmap_single_attrs)(struct device *dev, | ||
27 | dma_addr_t dma_addr, | ||
28 | size_t size, int direction, | ||
29 | struct dma_attrs *attrs); | ||
30 | void (*sync_single_for_cpu)(struct device *hwdev, | ||
31 | dma_addr_t dma_handle, size_t size, | ||
32 | int direction); | ||
33 | void (*sync_single_for_device)(struct device *hwdev, | ||
34 | dma_addr_t dma_handle, size_t size, | ||
35 | int direction); | ||
36 | void (*sync_single_range_for_cpu)(struct device *hwdev, | ||
37 | dma_addr_t dma_handle, unsigned long offset, | ||
38 | size_t size, int direction); | ||
39 | void (*sync_single_range_for_device)(struct device *hwdev, | ||
40 | dma_addr_t dma_handle, unsigned long offset, | ||
41 | size_t size, int direction); | ||
42 | void (*sync_sg_for_cpu)(struct device *hwdev, | ||
43 | struct scatterlist *sg, int nelems, | ||
44 | int direction); | ||
45 | void (*sync_sg_for_device)(struct device *hwdev, | ||
46 | struct scatterlist *sg, int nelems, | ||
47 | int direction); | ||
48 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, | ||
49 | int nents, int direction); | ||
50 | void (*unmap_sg)(struct device *hwdev, | ||
51 | struct scatterlist *sg, int nents, | ||
52 | int direction); | ||
53 | int (*map_sg_attrs)(struct device *dev, | ||
54 | struct scatterlist *sg, int nents, | ||
55 | int direction, struct dma_attrs *attrs); | ||
56 | void (*unmap_sg_attrs)(struct device *dev, | ||
57 | struct scatterlist *sg, int nents, | ||
58 | int direction, | ||
59 | struct dma_attrs *attrs); | ||
60 | int (*dma_supported_op)(struct device *hwdev, u64 mask); | ||
61 | int is_phys; | ||
62 | }; | ||
63 | |||
64 | extern struct dma_mapping_ops *dma_ops; | ||
65 | extern struct ia64_machine_vector ia64_mv; | 13 | extern struct ia64_machine_vector ia64_mv; |
66 | extern void set_iommu_machvec(void); | 14 | extern void set_iommu_machvec(void); |
67 | 15 | ||
68 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 16 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
69 | dma_addr_t *daddr, gfp_t gfp) | 17 | dma_addr_t *daddr, gfp_t gfp) |
70 | { | 18 | { |
71 | struct dma_mapping_ops *ops = platform_dma_get_ops(dev); | 19 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
72 | return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA); | 20 | return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA); |
73 | } | 21 | } |
74 | 22 | ||
75 | static inline void dma_free_coherent(struct device *dev, size_t size, | 23 | static inline void dma_free_coherent(struct device *dev, size_t size, |
76 | void *caddr, dma_addr_t daddr) | 24 | void *caddr, dma_addr_t daddr) |
77 | { | 25 | { |
78 | struct dma_mapping_ops *ops = platform_dma_get_ops(dev); | 26 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
79 | ops->free_coherent(dev, size, caddr, daddr); | 27 | ops->free_coherent(dev, size, caddr, daddr); |
80 | } | 28 | } |
81 | 29 | ||
@@ -87,8 +35,10 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, | |||
87 | enum dma_data_direction dir, | 35 | enum dma_data_direction dir, |
88 | struct dma_attrs *attrs) | 36 | struct dma_attrs *attrs) |
89 | { | 37 | { |
90 | struct dma_mapping_ops *ops = platform_dma_get_ops(dev); | 38 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
91 | return ops->map_single_attrs(dev, caddr, size, dir, attrs); | 39 | return ops->map_page(dev, virt_to_page(caddr), |
40 | (unsigned long)caddr & ~PAGE_MASK, size, | ||
41 | dir, attrs); | ||
92 | } | 42 | } |
93 | 43 | ||
94 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, | 44 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, |
@@ -96,8 +46,8 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, | |||
96 | enum dma_data_direction dir, | 46 | enum dma_data_direction dir, |
97 | struct dma_attrs *attrs) | 47 | struct dma_attrs *attrs) |
98 | { | 48 | { |
99 | struct dma_mapping_ops *ops = platform_dma_get_ops(dev); | 49 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
100 | ops->unmap_single_attrs(dev, daddr, size, dir, attrs); | 50 | ops->unmap_page(dev, daddr, size, dir, attrs); |
101 | } | 51 | } |
102 | 52 | ||
103 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | 53 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) |
@@ -107,8 +57,8 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
107 | int nents, enum dma_data_direction dir, | 57 | int nents, enum dma_data_direction dir, |
108 | struct dma_attrs *attrs) | 58 | struct dma_attrs *attrs) |
109 | { | 59 | { |
110 | struct dma_mapping_ops *ops = platform_dma_get_ops(dev); | 60 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
111 | return ops->map_sg_attrs(dev, sgl, nents, dir, attrs); | 61 | return ops->map_sg(dev, sgl, nents, dir, attrs); |
112 | } | 62 | } |
113 | 63 | ||
114 | static inline void dma_unmap_sg_attrs(struct device *dev, | 64 | static inline void dma_unmap_sg_attrs(struct device *dev, |
@@ -116,8 +66,8 @@ static inline void dma_unmap_sg_attrs(struct device *dev, | |||
116 | enum dma_data_direction dir, | 66 | enum dma_data_direction dir, |
117 | struct dma_attrs *attrs) | 67 | struct dma_attrs *attrs) |
118 | { | 68 | { |
119 | struct dma_mapping_ops *ops = platform_dma_get_ops(dev); | 69 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
120 | ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs); | 70 | ops->unmap_sg(dev, sgl, nents, dir, attrs); |
121 | } | 71 | } |
122 | 72 | ||
123 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) | 73 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) |
@@ -127,7 +77,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, | |||
127 | size_t size, | 77 | size_t size, |
128 | enum dma_data_direction dir) | 78 | enum dma_data_direction dir) |
129 | { | 79 | { |
130 | struct dma_mapping_ops *ops = platform_dma_get_ops(dev); | 80 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
131 | ops->sync_single_for_cpu(dev, daddr, size, dir); | 81 | ops->sync_single_for_cpu(dev, daddr, size, dir); |
132 | } | 82 | } |
133 | 83 | ||
@@ -135,7 +85,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, | |||
135 | struct scatterlist *sgl, | 85 | struct scatterlist *sgl, |
136 | int nents, enum dma_data_direction dir) | 86 | int nents, enum dma_data_direction dir) |
137 | { | 87 | { |
138 | struct dma_mapping_ops *ops = platform_dma_get_ops(dev); | 88 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
139 | ops->sync_sg_for_cpu(dev, sgl, nents, dir); | 89 | ops->sync_sg_for_cpu(dev, sgl, nents, dir); |
140 | } | 90 | } |
141 | 91 | ||
@@ -144,7 +94,7 @@ static inline void dma_sync_single_for_device(struct device *dev, | |||
144 | size_t size, | 94 | size_t size, |
145 | enum dma_data_direction dir) | 95 | enum dma_data_direction dir) |
146 | { | 96 | { |
147 | struct dma_mapping_ops *ops = platform_dma_get_ops(dev); | 97 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
148 | ops->sync_single_for_device(dev, daddr, size, dir); | 98 | ops->sync_single_for_device(dev, daddr, size, dir); |
149 | } | 99 | } |
150 | 100 | ||
@@ -153,20 +103,29 @@ static inline void dma_sync_sg_for_device(struct device *dev, | |||
153 | int nents, | 103 | int nents, |
154 | enum dma_data_direction dir) | 104 | enum dma_data_direction dir) |
155 | { | 105 | { |
156 | struct dma_mapping_ops *ops = platform_dma_get_ops(dev); | 106 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
157 | ops->sync_sg_for_device(dev, sgl, nents, dir); | 107 | ops->sync_sg_for_device(dev, sgl, nents, dir); |
158 | } | 108 | } |
159 | 109 | ||
160 | static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) | 110 | static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) |
161 | { | 111 | { |
162 | struct dma_mapping_ops *ops = platform_dma_get_ops(dev); | 112 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
163 | return ops->mapping_error(dev, daddr); | 113 | return ops->mapping_error(dev, daddr); |
164 | } | 114 | } |
165 | 115 | ||
166 | #define dma_map_page(dev, pg, off, size, dir) \ | 116 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
167 | dma_map_single(dev, page_address(pg) + (off), (size), (dir)) | 117 | size_t offset, size_t size, |
168 | #define dma_unmap_page(dev, dma_addr, size, dir) \ | 118 | enum dma_data_direction dir) |
169 | dma_unmap_single(dev, dma_addr, size, dir) | 119 | { |
120 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
121 | return ops->map_page(dev, page, offset, size, dir, NULL); | ||
122 | } | ||
123 | |||
124 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
125 | size_t size, enum dma_data_direction dir) | ||
126 | { | ||
127 | dma_unmap_single(dev, addr, size, dir); | ||
128 | } | ||
170 | 129 | ||
171 | /* | 130 | /* |
172 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. | 131 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. |
@@ -180,8 +139,8 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) | |||
180 | 139 | ||
181 | static inline int dma_supported(struct device *dev, u64 mask) | 140 | static inline int dma_supported(struct device *dev, u64 mask) |
182 | { | 141 | { |
183 | struct dma_mapping_ops *ops = platform_dma_get_ops(dev); | 142 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
184 | return ops->dma_supported_op(dev, mask); | 143 | return ops->dma_supported(dev, mask); |
185 | } | 144 | } |
186 | 145 | ||
187 | static inline int | 146 | static inline int |