diff options
author | Jonas Bonn <jonas@southpole.se> | 2012-04-15 15:09:25 -0400 |
---|---|---|
committer | Jonas Bonn <jonas@southpole.se> | 2012-05-08 05:43:51 -0400 |
commit | 7b903e6c021a5462e26ea7a8f014fa60b6782bdb (patch) | |
tree | 47bf29fa57bff2cd3940beeeace93e1da1d5cee6 /arch/openrisc/kernel/dma.c | |
parent | b0e026f4dc118752382fa926431b4512a8042e09 (diff) |
openrisc: provide dma_map_ops
This switches OpenRISC over to fully using the generic dma-mapping
framework. This was almost already the case as the architecture's
implementation was essentially a copy of the generic header.
This also brings this architecture in line with the recent changes
to dma_map_ops (adding attributes to ops->alloc).
Signed-off-by: Jonas Bonn <jonas@southpole.se>
Diffstat (limited to 'arch/openrisc/kernel/dma.c')
-rw-r--r-- | arch/openrisc/kernel/dma.c | 109 |
1 files changed, 73 insertions, 36 deletions
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index f1c8ee2895d0..0b77ddb1ee07 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c | |||
@@ -21,13 +21,16 @@ | |||
21 | 21 | ||
22 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-mapping.h> |
23 | #include <linux/dma-debug.h> | 23 | #include <linux/dma-debug.h> |
24 | #include <linux/export.h> | ||
25 | #include <linux/dma-attrs.h> | ||
24 | 26 | ||
25 | #include <asm/cpuinfo.h> | 27 | #include <asm/cpuinfo.h> |
26 | #include <asm/spr_defs.h> | 28 | #include <asm/spr_defs.h> |
27 | #include <asm/tlbflush.h> | 29 | #include <asm/tlbflush.h> |
28 | 30 | ||
29 | static int page_set_nocache(pte_t *pte, unsigned long addr, | 31 | static int |
30 | unsigned long next, struct mm_walk *walk) | 32 | page_set_nocache(pte_t *pte, unsigned long addr, |
33 | unsigned long next, struct mm_walk *walk) | ||
31 | { | 34 | { |
32 | unsigned long cl; | 35 | unsigned long cl; |
33 | 36 | ||
@@ -46,8 +49,9 @@ static int page_set_nocache(pte_t *pte, unsigned long addr, | |||
46 | return 0; | 49 | return 0; |
47 | } | 50 | } |
48 | 51 | ||
49 | static int page_clear_nocache(pte_t *pte, unsigned long addr, | 52 | static int |
50 | unsigned long next, struct mm_walk *walk) | 53 | page_clear_nocache(pte_t *pte, unsigned long addr, |
54 | unsigned long next, struct mm_walk *walk) | ||
51 | { | 55 | { |
52 | pte_val(*pte) &= ~_PAGE_CI; | 56 | pte_val(*pte) &= ~_PAGE_CI; |
53 | 57 | ||
@@ -67,9 +71,19 @@ static int page_clear_nocache(pte_t *pte, unsigned long addr, | |||
67 | * cache-inhibit bit on those pages, and makes sure that the pages are | 71 | * cache-inhibit bit on those pages, and makes sure that the pages are |
68 | * flushed out of the cache before they are used. | 72 | * flushed out of the cache before they are used. |
69 | * | 73 | * |
74 | * If the NON_CONSISTENT attribute is set, then this function just | ||
75 | * returns "normal", cachable memory. | ||
76 | * | ||
77 | * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take | ||
78 | * into consideration here, too. All current known implementations of | ||
79 | * the OR1K support only strongly ordered memory accesses, so that flag | ||
80 | * is being ignored for now; uncached but write-combined memory is a | ||
81 | * missing feature of the OR1K. | ||
70 | */ | 82 | */ |
71 | void *or1k_dma_alloc_coherent(struct device *dev, size_t size, | 83 | static void * |
72 | dma_addr_t *dma_handle, gfp_t gfp) | 84 | or1k_dma_alloc(struct device *dev, size_t size, |
85 | dma_addr_t *dma_handle, gfp_t gfp, | ||
86 | struct dma_attrs *attrs) | ||
73 | { | 87 | { |
74 | unsigned long va; | 88 | unsigned long va; |
75 | void *page; | 89 | void *page; |
@@ -87,20 +101,23 @@ void *or1k_dma_alloc_coherent(struct device *dev, size_t size, | |||
87 | 101 | ||
88 | va = (unsigned long)page; | 102 | va = (unsigned long)page; |
89 | 103 | ||
90 | /* | 104 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { |
91 | * We need to iterate through the pages, clearing the dcache for | 105 | /* |
92 | * them and setting the cache-inhibit bit. | 106 | * We need to iterate through the pages, clearing the dcache for |
93 | */ | 107 | * them and setting the cache-inhibit bit. |
94 | if (walk_page_range(va, va + size, &walk)) { | 108 | */ |
95 | free_pages_exact(page, size); | 109 | if (walk_page_range(va, va + size, &walk)) { |
96 | return NULL; | 110 | free_pages_exact(page, size); |
111 | return NULL; | ||
112 | } | ||
97 | } | 113 | } |
98 | 114 | ||
99 | return (void *)va; | 115 | return (void *)va; |
100 | } | 116 | } |
101 | 117 | ||
102 | void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr, | 118 | static void |
103 | dma_addr_t dma_handle) | 119 | or1k_dma_free(struct device *dev, size_t size, void *vaddr, |
120 | dma_addr_t dma_handle, struct dma_attrs *attrs) | ||
104 | { | 121 | { |
105 | unsigned long va = (unsigned long)vaddr; | 122 | unsigned long va = (unsigned long)vaddr; |
106 | struct mm_walk walk = { | 123 | struct mm_walk walk = { |
@@ -108,16 +125,19 @@ void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
108 | .mm = &init_mm | 125 | .mm = &init_mm |
109 | }; | 126 | }; |
110 | 127 | ||
111 | /* walk_page_range shouldn't be able to fail here */ | 128 | if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { |
112 | WARN_ON(walk_page_range(va, va + size, &walk)); | 129 | /* walk_page_range shouldn't be able to fail here */ |
130 | WARN_ON(walk_page_range(va, va + size, &walk)); | ||
131 | } | ||
113 | 132 | ||
114 | free_pages_exact(vaddr, size); | 133 | free_pages_exact(vaddr, size); |
115 | } | 134 | } |
116 | 135 | ||
117 | dma_addr_t or1k_map_page(struct device *dev, struct page *page, | 136 | static dma_addr_t |
118 | unsigned long offset, size_t size, | 137 | or1k_map_page(struct device *dev, struct page *page, |
119 | enum dma_data_direction dir, | 138 | unsigned long offset, size_t size, |
120 | struct dma_attrs *attrs) | 139 | enum dma_data_direction dir, |
140 | struct dma_attrs *attrs) | ||
121 | { | 141 | { |
122 | unsigned long cl; | 142 | unsigned long cl; |
123 | dma_addr_t addr = page_to_phys(page) + offset; | 143 | dma_addr_t addr = page_to_phys(page) + offset; |
@@ -147,16 +167,18 @@ dma_addr_t or1k_map_page(struct device *dev, struct page *page, | |||
147 | return addr; | 167 | return addr; |
148 | } | 168 | } |
149 | 169 | ||
150 | void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, | 170 | static void |
151 | size_t size, enum dma_data_direction dir, | 171 | or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, |
152 | struct dma_attrs *attrs) | 172 | size_t size, enum dma_data_direction dir, |
173 | struct dma_attrs *attrs) | ||
153 | { | 174 | { |
154 | /* Nothing special to do here... */ | 175 | /* Nothing special to do here... */ |
155 | } | 176 | } |
156 | 177 | ||
157 | int or1k_map_sg(struct device *dev, struct scatterlist *sg, | 178 | static int |
158 | int nents, enum dma_data_direction dir, | 179 | or1k_map_sg(struct device *dev, struct scatterlist *sg, |
159 | struct dma_attrs *attrs) | 180 | int nents, enum dma_data_direction dir, |
181 | struct dma_attrs *attrs) | ||
160 | { | 182 | { |
161 | struct scatterlist *s; | 183 | struct scatterlist *s; |
162 | int i; | 184 | int i; |
@@ -169,9 +191,10 @@ int or1k_map_sg(struct device *dev, struct scatterlist *sg, | |||
169 | return nents; | 191 | return nents; |
170 | } | 192 | } |
171 | 193 | ||
172 | void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, | 194 | static void |
173 | int nents, enum dma_data_direction dir, | 195 | or1k_unmap_sg(struct device *dev, struct scatterlist *sg, |
174 | struct dma_attrs *attrs) | 196 | int nents, enum dma_data_direction dir, |
197 | struct dma_attrs *attrs) | ||
175 | { | 198 | { |
176 | struct scatterlist *s; | 199 | struct scatterlist *s; |
177 | int i; | 200 | int i; |
@@ -181,9 +204,10 @@ void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
181 | } | 204 | } |
182 | } | 205 | } |
183 | 206 | ||
184 | void or1k_sync_single_for_cpu(struct device *dev, | 207 | static void |
185 | dma_addr_t dma_handle, size_t size, | 208 | or1k_sync_single_for_cpu(struct device *dev, |
186 | enum dma_data_direction dir) | 209 | dma_addr_t dma_handle, size_t size, |
210 | enum dma_data_direction dir) | ||
187 | { | 211 | { |
188 | unsigned long cl; | 212 | unsigned long cl; |
189 | dma_addr_t addr = dma_handle; | 213 | dma_addr_t addr = dma_handle; |
@@ -193,9 +217,10 @@ void or1k_sync_single_for_cpu(struct device *dev, | |||
193 | mtspr(SPR_DCBIR, cl); | 217 | mtspr(SPR_DCBIR, cl); |
194 | } | 218 | } |
195 | 219 | ||
196 | void or1k_sync_single_for_device(struct device *dev, | 220 | static void |
197 | dma_addr_t dma_handle, size_t size, | 221 | or1k_sync_single_for_device(struct device *dev, |
198 | enum dma_data_direction dir) | 222 | dma_addr_t dma_handle, size_t size, |
223 | enum dma_data_direction dir) | ||
199 | { | 224 | { |
200 | unsigned long cl; | 225 | unsigned long cl; |
201 | dma_addr_t addr = dma_handle; | 226 | dma_addr_t addr = dma_handle; |
@@ -205,6 +230,18 @@ void or1k_sync_single_for_device(struct device *dev, | |||
205 | mtspr(SPR_DCBFR, cl); | 230 | mtspr(SPR_DCBFR, cl); |
206 | } | 231 | } |
207 | 232 | ||
233 | struct dma_map_ops or1k_dma_map_ops = { | ||
234 | .alloc = or1k_dma_alloc, | ||
235 | .free = or1k_dma_free, | ||
236 | .map_page = or1k_map_page, | ||
237 | .unmap_page = or1k_unmap_page, | ||
238 | .map_sg = or1k_map_sg, | ||
239 | .unmap_sg = or1k_unmap_sg, | ||
240 | .sync_single_for_cpu = or1k_sync_single_for_cpu, | ||
241 | .sync_single_for_device = or1k_sync_single_for_device, | ||
242 | }; | ||
243 | EXPORT_SYMBOL(or1k_dma_map_ops); | ||
244 | |||
208 | /* Number of entries preallocated for DMA-API debugging */ | 245 | /* Number of entries preallocated for DMA-API debugging */ |
209 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | 246 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
210 | 247 | ||