diff options
author | Jonas Bonn <jonas@southpole.se> | 2012-04-15 15:09:25 -0400 |
---|---|---|
committer | Jonas Bonn <jonas@southpole.se> | 2012-05-08 05:43:51 -0400 |
commit | 7b903e6c021a5462e26ea7a8f014fa60b6782bdb (patch) | |
tree | 47bf29fa57bff2cd3940beeeace93e1da1d5cee6 /arch/openrisc | |
parent | b0e026f4dc118752382fa926431b4512a8042e09 (diff) |
openrisc: provide dma_map_ops
This switches OpenRISC over to fully using the generic dma-mapping
framework. This was almost already the case as the architecture's
implementation was essentially a copy of the generic header.
This also brings this architecture in line with the recent changes
to dma_map_ops (adding attributes to ops->alloc).
Signed-off-by: Jonas Bonn <jonas@southpole.se>
Diffstat (limited to 'arch/openrisc')
-rw-r--r-- | arch/openrisc/include/asm/dma-mapping.h | 147 | ||||
-rw-r--r-- | arch/openrisc/kernel/dma.c | 109 |
2 files changed, 107 insertions, 149 deletions
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index b206ba4608b2..fab8628e1b6e 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h | |||
@@ -20,150 +20,71 @@ | |||
20 | /* | 20 | /* |
21 | * See Documentation/DMA-API-HOWTO.txt and | 21 | * See Documentation/DMA-API-HOWTO.txt and |
22 | * Documentation/DMA-API.txt for documentation. | 22 | * Documentation/DMA-API.txt for documentation. |
23 | * | ||
24 | * This file is written with the intention of eventually moving over | ||
25 | * to largely using asm-generic/dma-mapping-common.h in its place. | ||
26 | */ | 23 | */ |
27 | 24 | ||
28 | #include <linux/dma-debug.h> | 25 | #include <linux/dma-debug.h> |
29 | #include <asm-generic/dma-coherent.h> | 26 | #include <asm-generic/dma-coherent.h> |
30 | #include <linux/kmemcheck.h> | 27 | #include <linux/kmemcheck.h> |
28 | #include <linux/dma-mapping.h> | ||
31 | 29 | ||
32 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | 30 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
33 | 31 | ||
32 | extern struct dma_map_ops or1k_dma_map_ops; | ||
34 | 33 | ||
35 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 34 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
36 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
37 | |||
38 | void *or1k_dma_alloc_coherent(struct device *dev, size_t size, | ||
39 | dma_addr_t *dma_handle, gfp_t flag); | ||
40 | void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
41 | dma_addr_t dma_handle); | ||
42 | dma_addr_t or1k_map_page(struct device *dev, struct page *page, | ||
43 | unsigned long offset, size_t size, | ||
44 | enum dma_data_direction dir, | ||
45 | struct dma_attrs *attrs); | ||
46 | void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, | ||
47 | size_t size, enum dma_data_direction dir, | ||
48 | struct dma_attrs *attrs); | ||
49 | int or1k_map_sg(struct device *dev, struct scatterlist *sg, | ||
50 | int nents, enum dma_data_direction dir, | ||
51 | struct dma_attrs *attrs); | ||
52 | void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
53 | int nents, enum dma_data_direction dir, | ||
54 | struct dma_attrs *attrs); | ||
55 | void or1k_sync_single_for_cpu(struct device *dev, | ||
56 | dma_addr_t dma_handle, size_t size, | ||
57 | enum dma_data_direction dir); | ||
58 | void or1k_sync_single_for_device(struct device *dev, | ||
59 | dma_addr_t dma_handle, size_t size, | ||
60 | enum dma_data_direction dir); | ||
61 | |||
62 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | ||
63 | dma_addr_t *dma_handle, gfp_t flag) | ||
64 | { | 35 | { |
65 | void *memory; | 36 | return &or1k_dma_map_ops; |
66 | |||
67 | memory = or1k_dma_alloc_coherent(dev, size, dma_handle, flag); | ||
68 | |||
69 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); | ||
70 | return memory; | ||
71 | } | 37 | } |
72 | 38 | ||
73 | static inline void dma_free_coherent(struct device *dev, size_t size, | 39 | #include <asm-generic/dma-mapping-common.h> |
74 | void *cpu_addr, dma_addr_t dma_handle) | ||
75 | { | ||
76 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
77 | or1k_dma_free_coherent(dev, size, cpu_addr, dma_handle); | ||
78 | } | ||
79 | 40 | ||
80 | static inline dma_addr_t dma_map_single(struct device *dev, void *ptr, | 41 | #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) |
81 | size_t size, | ||
82 | enum dma_data_direction dir) | ||
83 | { | ||
84 | dma_addr_t addr; | ||
85 | |||
86 | kmemcheck_mark_initialized(ptr, size); | ||
87 | BUG_ON(!valid_dma_direction(dir)); | ||
88 | addr = or1k_map_page(dev, virt_to_page(ptr), | ||
89 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
90 | dir, NULL); | ||
91 | debug_dma_map_page(dev, virt_to_page(ptr), | ||
92 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
93 | dir, addr, true); | ||
94 | return addr; | ||
95 | } | ||
96 | 42 | ||
97 | static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, | 43 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, |
98 | size_t size, | 44 | dma_addr_t *dma_handle, gfp_t gfp, |
99 | enum dma_data_direction dir) | 45 | struct dma_attrs *attrs) |
100 | { | 46 | { |
101 | BUG_ON(!valid_dma_direction(dir)); | 47 | struct dma_map_ops *ops = get_dma_ops(dev); |
102 | or1k_unmap_page(dev, addr, size, dir, NULL); | 48 | void *memory; |
103 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
104 | } | ||
105 | 49 | ||
106 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | 50 | memory = ops->alloc(dev, size, dma_handle, gfp, attrs); |
107 | int nents, enum dma_data_direction dir) | ||
108 | { | ||
109 | int i, ents; | ||
110 | struct scatterlist *s; | ||
111 | 51 | ||
112 | for_each_sg(sg, s, nents, i) | 52 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); |
113 | kmemcheck_mark_initialized(sg_virt(s), s->length); | ||
114 | BUG_ON(!valid_dma_direction(dir)); | ||
115 | ents = or1k_map_sg(dev, sg, nents, dir, NULL); | ||
116 | debug_dma_map_sg(dev, sg, nents, ents, dir); | ||
117 | 53 | ||
118 | return ents; | 54 | return memory; |
119 | } | 55 | } |
120 | 56 | ||
121 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | 57 | #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) |
122 | int nents, enum dma_data_direction dir) | ||
123 | { | ||
124 | BUG_ON(!valid_dma_direction(dir)); | ||
125 | debug_dma_unmap_sg(dev, sg, nents, dir); | ||
126 | or1k_unmap_sg(dev, sg, nents, dir, NULL); | ||
127 | } | ||
128 | 58 | ||
129 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | 59 | static inline void dma_free_attrs(struct device *dev, size_t size, |
130 | size_t offset, size_t size, | 60 | void *cpu_addr, dma_addr_t dma_handle, |
131 | enum dma_data_direction dir) | 61 | struct dma_attrs *attrs) |
132 | { | 62 | { |
133 | dma_addr_t addr; | 63 | struct dma_map_ops *ops = get_dma_ops(dev); |
134 | 64 | ||
135 | kmemcheck_mark_initialized(page_address(page) + offset, size); | 65 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); |
136 | BUG_ON(!valid_dma_direction(dir)); | ||
137 | addr = or1k_map_page(dev, page, offset, size, dir, NULL); | ||
138 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | ||
139 | 66 | ||
140 | return addr; | 67 | ops->free(dev, size, cpu_addr, dma_handle, attrs); |
141 | } | 68 | } |
142 | 69 | ||
143 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | 70 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
144 | size_t size, enum dma_data_direction dir) | 71 | dma_addr_t *dma_handle, gfp_t gfp) |
145 | { | 72 | { |
146 | BUG_ON(!valid_dma_direction(dir)); | 73 | struct dma_attrs attrs; |
147 | or1k_unmap_page(dev, addr, size, dir, NULL); | ||
148 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
149 | } | ||
150 | 74 | ||
151 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | 75 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); |
152 | size_t size, | 76 | |
153 | enum dma_data_direction dir) | 77 | return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); |
154 | { | ||
155 | BUG_ON(!valid_dma_direction(dir)); | ||
156 | or1k_sync_single_for_cpu(dev, addr, size, dir); | ||
157 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | ||
158 | } | 78 | } |
159 | 79 | ||
160 | static inline void dma_sync_single_for_device(struct device *dev, | 80 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
161 | dma_addr_t addr, size_t size, | 81 | void *cpu_addr, dma_addr_t dma_handle) |
162 | enum dma_data_direction dir) | ||
163 | { | 82 | { |
164 | BUG_ON(!valid_dma_direction(dir)); | 83 | struct dma_attrs attrs; |
165 | or1k_sync_single_for_device(dev, addr, size, dir); | 84 | |
166 | debug_dma_sync_single_for_device(dev, addr, size, dir); | 85 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); |
86 | |||
87 | dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); | ||
167 | } | 88 | } |
168 | 89 | ||
169 | static inline int dma_supported(struct device *dev, u64 dma_mask) | 90 | static inline int dma_supported(struct device *dev, u64 dma_mask) |
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index f1c8ee2895d0..0b77ddb1ee07 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c | |||
@@ -21,13 +21,16 @@ | |||
21 | 21 | ||
22 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-mapping.h> |
23 | #include <linux/dma-debug.h> | 23 | #include <linux/dma-debug.h> |
24 | #include <linux/export.h> | ||
25 | #include <linux/dma-attrs.h> | ||
24 | 26 | ||
25 | #include <asm/cpuinfo.h> | 27 | #include <asm/cpuinfo.h> |
26 | #include <asm/spr_defs.h> | 28 | #include <asm/spr_defs.h> |
27 | #include <asm/tlbflush.h> | 29 | #include <asm/tlbflush.h> |
28 | 30 | ||
29 | static int page_set_nocache(pte_t *pte, unsigned long addr, | 31 | static int |
30 | unsigned long next, struct mm_walk *walk) | 32 | page_set_nocache(pte_t *pte, unsigned long addr, |
33 | unsigned long next, struct mm_walk *walk) | ||
31 | { | 34 | { |
32 | unsigned long cl; | 35 | unsigned long cl; |
33 | 36 | ||
@@ -46,8 +49,9 @@ static int page_set_nocache(pte_t *pte, unsigned long addr, | |||
46 | return 0; | 49 | return 0; |
47 | } | 50 | } |
48 | 51 | ||
49 | static int page_clear_nocache(pte_t *pte, unsigned long addr, | 52 | static int |
50 | unsigned long next, struct mm_walk *walk) | 53 | page_clear_nocache(pte_t *pte, unsigned long addr, |
54 | unsigned long next, struct mm_walk *walk) | ||
51 | { | 55 | { |
52 | pte_val(*pte) &= ~_PAGE_CI; | 56 | pte_val(*pte) &= ~_PAGE_CI; |
53 | 57 | ||
@@ -67,9 +71,19 @@ static int page_clear_nocache(pte_t *pte, unsigned long addr, | |||
67 | * cache-inhibit bit on those pages, and makes sure that the pages are | 71 | * cache-inhibit bit on those pages, and makes sure that the pages are |
68 | * flushed out of the cache before they are used. | 72 | * flushed out of the cache before they are used. |
69 | * | 73 | * |
74 | * If the NON_CONSISTENT attribute is set, then this function just | ||
75 | * returns "normal", cachable memory. | ||
76 | * | ||
77 | * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take | ||
78 | * into consideration here, too. All current known implementations of | ||
79 | * the OR1K support only strongly ordered memory accesses, so that flag | ||
80 | * is being ignored for now; uncached but write-combined memory is a | ||
81 | * missing feature of the OR1K. | ||
70 | */ | 82 | */ |
71 | void *or1k_dma_alloc_coherent(struct device *dev, size_t size, | 83 | static void * |
72 | dma_addr_t *dma_handle, gfp_t gfp) | 84 | or1k_dma_alloc(struct device *dev, size_t size, |
85 | dma_addr_t *dma_handle, gfp_t gfp, | ||
86 | struct dma_attrs *attrs) | ||
73 | { | 87 | { |
74 | unsigned long va; | 88 | unsigned long va; |
75 | void *page; | 89 | void *page; |
@@ -87,20 +101,23 @@ void *or1k_dma_alloc_coherent(struct device *dev, size_t size, | |||
87 | 101 | ||
88 | va = (unsigned long)page; | 102 | va = (unsigned long)page; |
89 | 103 | ||
90 | /* | 104 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { |
91 | * We need to iterate through the pages, clearing the dcache for | 105 | /* |
92 | * them and setting the cache-inhibit bit. | 106 | * We need to iterate through the pages, clearing the dcache for |
93 | */ | 107 | * them and setting the cache-inhibit bit. |
94 | if (walk_page_range(va, va + size, &walk)) { | 108 | */ |
95 | free_pages_exact(page, size); | 109 | if (walk_page_range(va, va + size, &walk)) { |
96 | return NULL; | 110 | free_pages_exact(page, size); |
111 | return NULL; | ||
112 | } | ||
97 | } | 113 | } |
98 | 114 | ||
99 | return (void *)va; | 115 | return (void *)va; |
100 | } | 116 | } |
101 | 117 | ||
102 | void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr, | 118 | static void |
103 | dma_addr_t dma_handle) | 119 | or1k_dma_free(struct device *dev, size_t size, void *vaddr, |
120 | dma_addr_t dma_handle, struct dma_attrs *attrs) | ||
104 | { | 121 | { |
105 | unsigned long va = (unsigned long)vaddr; | 122 | unsigned long va = (unsigned long)vaddr; |
106 | struct mm_walk walk = { | 123 | struct mm_walk walk = { |
@@ -108,16 +125,19 @@ void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
108 | .mm = &init_mm | 125 | .mm = &init_mm |
109 | }; | 126 | }; |
110 | 127 | ||
111 | /* walk_page_range shouldn't be able to fail here */ | 128 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { |
112 | WARN_ON(walk_page_range(va, va + size, &walk)); | 129 | /* walk_page_range shouldn't be able to fail here */ |
130 | WARN_ON(walk_page_range(va, va + size, &walk)); | ||
131 | } | ||
113 | 132 | ||
114 | free_pages_exact(vaddr, size); | 133 | free_pages_exact(vaddr, size); |
115 | } | 134 | } |
116 | 135 | ||
117 | dma_addr_t or1k_map_page(struct device *dev, struct page *page, | 136 | static dma_addr_t |
118 | unsigned long offset, size_t size, | 137 | or1k_map_page(struct device *dev, struct page *page, |
119 | enum dma_data_direction dir, | 138 | unsigned long offset, size_t size, |
120 | struct dma_attrs *attrs) | 139 | enum dma_data_direction dir, |
140 | struct dma_attrs *attrs) | ||
121 | { | 141 | { |
122 | unsigned long cl; | 142 | unsigned long cl; |
123 | dma_addr_t addr = page_to_phys(page) + offset; | 143 | dma_addr_t addr = page_to_phys(page) + offset; |
@@ -147,16 +167,18 @@ dma_addr_t or1k_map_page(struct device *dev, struct page *page, | |||
147 | return addr; | 167 | return addr; |
148 | } | 168 | } |
149 | 169 | ||
150 | void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, | 170 | static void |
151 | size_t size, enum dma_data_direction dir, | 171 | or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, |
152 | struct dma_attrs *attrs) | 172 | size_t size, enum dma_data_direction dir, |
173 | struct dma_attrs *attrs) | ||
153 | { | 174 | { |
154 | /* Nothing special to do here... */ | 175 | /* Nothing special to do here... */ |
155 | } | 176 | } |
156 | 177 | ||
157 | int or1k_map_sg(struct device *dev, struct scatterlist *sg, | 178 | static int |
158 | int nents, enum dma_data_direction dir, | 179 | or1k_map_sg(struct device *dev, struct scatterlist *sg, |
159 | struct dma_attrs *attrs) | 180 | int nents, enum dma_data_direction dir, |
181 | struct dma_attrs *attrs) | ||
160 | { | 182 | { |
161 | struct scatterlist *s; | 183 | struct scatterlist *s; |
162 | int i; | 184 | int i; |
@@ -169,9 +191,10 @@ int or1k_map_sg(struct device *dev, struct scatterlist *sg, | |||
169 | return nents; | 191 | return nents; |
170 | } | 192 | } |
171 | 193 | ||
172 | void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, | 194 | static void |
173 | int nents, enum dma_data_direction dir, | 195 | or1k_unmap_sg(struct device *dev, struct scatterlist *sg, |
174 | struct dma_attrs *attrs) | 196 | int nents, enum dma_data_direction dir, |
197 | struct dma_attrs *attrs) | ||
175 | { | 198 | { |
176 | struct scatterlist *s; | 199 | struct scatterlist *s; |
177 | int i; | 200 | int i; |
@@ -181,9 +204,10 @@ void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
181 | } | 204 | } |
182 | } | 205 | } |
183 | 206 | ||
184 | void or1k_sync_single_for_cpu(struct device *dev, | 207 | static void |
185 | dma_addr_t dma_handle, size_t size, | 208 | or1k_sync_single_for_cpu(struct device *dev, |
186 | enum dma_data_direction dir) | 209 | dma_addr_t dma_handle, size_t size, |
210 | enum dma_data_direction dir) | ||
187 | { | 211 | { |
188 | unsigned long cl; | 212 | unsigned long cl; |
189 | dma_addr_t addr = dma_handle; | 213 | dma_addr_t addr = dma_handle; |
@@ -193,9 +217,10 @@ void or1k_sync_single_for_cpu(struct device *dev, | |||
193 | mtspr(SPR_DCBIR, cl); | 217 | mtspr(SPR_DCBIR, cl); |
194 | } | 218 | } |
195 | 219 | ||
196 | void or1k_sync_single_for_device(struct device *dev, | 220 | static void |
197 | dma_addr_t dma_handle, size_t size, | 221 | or1k_sync_single_for_device(struct device *dev, |
198 | enum dma_data_direction dir) | 222 | dma_addr_t dma_handle, size_t size, |
223 | enum dma_data_direction dir) | ||
199 | { | 224 | { |
200 | unsigned long cl; | 225 | unsigned long cl; |
201 | dma_addr_t addr = dma_handle; | 226 | dma_addr_t addr = dma_handle; |
@@ -205,6 +230,18 @@ void or1k_sync_single_for_device(struct device *dev, | |||
205 | mtspr(SPR_DCBFR, cl); | 230 | mtspr(SPR_DCBFR, cl); |
206 | } | 231 | } |
207 | 232 | ||
233 | struct dma_map_ops or1k_dma_map_ops = { | ||
234 | .alloc = or1k_dma_alloc, | ||
235 | .free = or1k_dma_free, | ||
236 | .map_page = or1k_map_page, | ||
237 | .unmap_page = or1k_unmap_page, | ||
238 | .map_sg = or1k_map_sg, | ||
239 | .unmap_sg = or1k_unmap_sg, | ||
240 | .sync_single_for_cpu = or1k_sync_single_for_cpu, | ||
241 | .sync_single_for_device = or1k_sync_single_for_device, | ||
242 | }; | ||
243 | EXPORT_SYMBOL(or1k_dma_map_ops); | ||
244 | |||
208 | /* Number of entries preallocated for DMA-API debugging */ | 245 | /* Number of entries preallocated for DMA-API debugging */ |
209 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | 246 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
210 | 247 | ||