diff options
author | Jonathan Corbet <corbet@lwn.net> | 2010-04-23 12:04:12 -0400 |
---|---|---|
committer | Jonathan Corbet <corbet@lwn.net> | 2010-05-07 19:17:38 -0400 |
commit | 3d28eb42c52a799c806082e6d856f634ed1db902 (patch) | |
tree | ca39bb9655c48a59f7a0ea5af3023d3bf8153dab | |
parent | 94dd1a856b23bd51dfebf68e6dd63cfd4d4fd5ae (diff) |
viafb: Add a simple VX855 DMA engine driver
This code provides a minimal amount of access to the DMA engine as
needed by the camera driver. VX855 only; it's guaranteed not to work
on other chipsets, so it won't try.
Cc: ScottFang@viatech.com.cn
Cc: JosephChan@via.com.tw
Cc: Harald Welte <laforge@gnumonks.org>
Acked-by: Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
Signed-off-by: Jonathan Corbet <corbet@lwn.net>
-rw-r--r-- | drivers/video/via/via-core.c | 232 | ||||
-rw-r--r-- | drivers/video/via/via-core.h | 29 |
2 files changed, 261 insertions, 0 deletions
diff --git a/drivers/video/via/via-core.c b/drivers/video/via/via-core.c index 701b95575747..9929bb1549b6 100644 --- a/drivers/video/via/via-core.c +++ b/drivers/video/via/via-core.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include "global.h" | 13 | #include "global.h" |
14 | 14 | ||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | ||
16 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
17 | 18 | ||
18 | /* | 19 | /* |
@@ -92,7 +93,238 @@ void viafb_irq_disable(u32 mask) | |||
92 | } | 93 | } |
93 | EXPORT_SYMBOL_GPL(viafb_irq_disable); | 94 | EXPORT_SYMBOL_GPL(viafb_irq_disable); |
94 | 95 | ||
96 | /* ---------------------------------------------------------------------- */ | ||
97 | /* | ||
98 | * Access to the DMA engine. This currently provides what the camera | ||
99 | * driver needs (i.e. outgoing only) but is easily expandable if need | ||
100 | * be. | ||
101 | */ | ||
102 | |||
103 | /* | ||
104 | * There are four DMA channels in the vx855. For now, we only | ||
105 | * use one of them, though. Most of the time, the DMA channel | ||
106 | * will be idle, so we keep the IRQ handler unregistered except | ||
107 | * when some subsystem has indicated an interest. | ||
108 | */ | ||
109 | static int viafb_dma_users; | ||
110 | static DECLARE_COMPLETION(viafb_dma_completion); | ||
111 | /* | ||
112 | * This mutex protects viafb_dma_users and our global interrupt | ||
113 | * registration state; it also serializes access to the DMA | ||
114 | * engine. | ||
115 | */ | ||
116 | static DEFINE_MUTEX(viafb_dma_lock); | ||
117 | |||
118 | /* | ||
119 | * The VX855 DMA descriptor (used for s/g transfers) looks | ||
120 | * like this. | ||
121 | */ | ||
122 | struct viafb_vx855_dma_descr { | ||
123 | u32 addr_low; /* Low part of phys addr */ | ||
124 | u32 addr_high; /* High 12 bits of addr */ | ||
125 | u32 fb_offset; /* Offset into FB memory */ | ||
126 | u32 seg_size; /* Size, 16-byte units */ | ||
127 | u32 tile_mode; /* "tile mode" setting */ | ||
128 | u32 next_desc_low; /* Next descriptor addr */ | ||
129 | u32 next_desc_high; | ||
130 | u32 pad; /* Fill out to 64 bytes */ | ||
131 | }; | ||
132 | |||
133 | /* | ||
134 | * Flags added to the "next descriptor low" pointers | ||
135 | */ | ||
136 | #define VIAFB_DMA_MAGIC 0x01 /* ??? Just has to be there */ | ||
137 | #define VIAFB_DMA_FINAL_SEGMENT 0x02 /* Final segment */ | ||
138 | |||
139 | /* | ||
140 | * The completion IRQ handler. | ||
141 | */ | ||
142 | static irqreturn_t viafb_dma_irq(int irq, void *data) | ||
143 | { | ||
144 | int csr; | ||
145 | irqreturn_t ret = IRQ_NONE; | ||
146 | |||
147 | spin_lock(&global_dev.reg_lock); | ||
148 | csr = viafb_mmio_read(VDMA_CSR0); | ||
149 | if (csr & VDMA_C_DONE) { | ||
150 | viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE); | ||
151 | complete(&viafb_dma_completion); | ||
152 | ret = IRQ_HANDLED; | ||
153 | } | ||
154 | spin_unlock(&global_dev.reg_lock); | ||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Indicate a need for DMA functionality. | ||
160 | */ | ||
161 | int viafb_request_dma(void) | ||
162 | { | ||
163 | int ret = 0; | ||
164 | |||
165 | /* | ||
166 | * Only VX855 is supported currently. | ||
167 | */ | ||
168 | if (global_dev.chip_type != UNICHROME_VX855) | ||
169 | return -ENODEV; | ||
170 | /* | ||
171 | * Note the new user and set up our interrupt handler | ||
172 | * if need be. | ||
173 | */ | ||
174 | mutex_lock(&viafb_dma_lock); | ||
175 | viafb_dma_users++; | ||
176 | if (viafb_dma_users == 1) { | ||
177 | ret = request_irq(global_dev.pdev->irq, viafb_dma_irq, | ||
178 | IRQF_SHARED, "via-dma", &viafb_dma_users); | ||
179 | if (ret) | ||
180 | viafb_dma_users--; | ||
181 | else | ||
182 | viafb_irq_enable(VDE_I_DMA0TDEN); | ||
183 | } | ||
184 | mutex_unlock(&viafb_dma_lock); | ||
185 | return ret; | ||
186 | } | ||
187 | EXPORT_SYMBOL_GPL(viafb_request_dma); | ||
188 | |||
189 | void viafb_release_dma(void) | ||
190 | { | ||
191 | mutex_lock(&viafb_dma_lock); | ||
192 | viafb_dma_users--; | ||
193 | if (viafb_dma_users == 0) { | ||
194 | viafb_irq_disable(VDE_I_DMA0TDEN); | ||
195 | free_irq(global_dev.pdev->irq, &viafb_dma_users); | ||
196 | } | ||
197 | mutex_unlock(&viafb_dma_lock); | ||
198 | } | ||
199 | EXPORT_SYMBOL_GPL(viafb_release_dma); | ||
200 | |||
201 | |||
202 | #if 0 | ||
203 | /* | ||
204 | * Copy a single buffer from FB memory, synchronously. This code works | ||
205 | * but is not currently used. | ||
206 | */ | ||
207 | void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len) | ||
208 | { | ||
209 | unsigned long flags; | ||
210 | int csr; | ||
211 | |||
212 | mutex_lock(&viafb_dma_lock); | ||
213 | init_completion(&viafb_dma_completion); | ||
214 | /* | ||
215 | * Program the controller. | ||
216 | */ | ||
217 | spin_lock_irqsave(&global_dev.reg_lock, flags); | ||
218 | viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE); | ||
219 | /* Enable ints; must happen after CSR0 write! */ | ||
220 | viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE); | ||
221 | viafb_mmio_write(VDMA_MARL0, (int) (paddr & 0xfffffff0)); | ||
222 | viafb_mmio_write(VDMA_MARH0, (int) ((paddr >> 28) & 0xfff)); | ||
223 | /* Data sheet suggests DAR0 should be <<4, but it lies */ | ||
224 | viafb_mmio_write(VDMA_DAR0, offset); | ||
225 | viafb_mmio_write(VDMA_DQWCR0, len >> 4); | ||
226 | viafb_mmio_write(VDMA_TMR0, 0); | ||
227 | viafb_mmio_write(VDMA_DPRL0, 0); | ||
228 | viafb_mmio_write(VDMA_DPRH0, 0); | ||
229 | viafb_mmio_write(VDMA_PMR0, 0); | ||
230 | csr = viafb_mmio_read(VDMA_CSR0); | ||
231 | viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START); | ||
232 | spin_unlock_irqrestore(&global_dev.reg_lock, flags); | ||
233 | /* | ||
234 | * Now we just wait until the interrupt handler says | ||
235 | * we're done. | ||
236 | */ | ||
237 | wait_for_completion_interruptible(&viafb_dma_completion); | ||
238 | viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */ | ||
239 | mutex_unlock(&viafb_dma_lock); | ||
240 | } | ||
241 | EXPORT_SYMBOL_GPL(viafb_dma_copy_out); | ||
242 | #endif | ||
243 | |||
244 | /* | ||
245 | * Do a scatter/gather DMA copy from FB memory. You must have done | ||
246 | * a successful call to viafb_request_dma() first. | ||
247 | */ | ||
248 | int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg) | ||
249 | { | ||
250 | struct viafb_vx855_dma_descr *descr; | ||
251 | void *descrpages; | ||
252 | dma_addr_t descr_handle; | ||
253 | unsigned long flags; | ||
254 | int i; | ||
255 | struct scatterlist *sgentry; | ||
256 | dma_addr_t nextdesc; | ||
95 | 257 | ||
258 | /* | ||
259 | * Get a place to put the descriptors. | ||
260 | */ | ||
261 | descrpages = dma_alloc_coherent(&global_dev.pdev->dev, | ||
262 | nsg*sizeof(struct viafb_vx855_dma_descr), | ||
263 | &descr_handle, GFP_KERNEL); | ||
264 | if (descrpages == NULL) { | ||
265 | dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n"); | ||
266 | return -ENOMEM; | ||
267 | } | ||
268 | mutex_lock(&viafb_dma_lock); | ||
269 | /* | ||
270 | * Fill them in. | ||
271 | */ | ||
272 | descr = descrpages; | ||
273 | nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr); | ||
274 | for_each_sg(sg, sgentry, nsg, i) { | ||
275 | dma_addr_t paddr = sg_dma_address(sgentry); | ||
276 | descr->addr_low = paddr & 0xfffffff0; | ||
277 | descr->addr_high = ((u64) paddr >> 32) & 0x0fff; | ||
278 | descr->fb_offset = offset; | ||
279 | descr->seg_size = sg_dma_len(sgentry) >> 4; | ||
280 | descr->tile_mode = 0; | ||
281 | descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC; | ||
282 | descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff; | ||
283 | descr->pad = 0xffffffff; /* VIA driver does this */ | ||
284 | offset += sg_dma_len(sgentry); | ||
285 | nextdesc += sizeof(struct viafb_vx855_dma_descr); | ||
286 | descr++; | ||
287 | } | ||
288 | descr[-1].next_desc_low = VIAFB_DMA_FINAL_SEGMENT|VIAFB_DMA_MAGIC; | ||
289 | /* | ||
290 | * Program the engine. | ||
291 | */ | ||
292 | spin_lock_irqsave(&global_dev.reg_lock, flags); | ||
293 | init_completion(&viafb_dma_completion); | ||
294 | viafb_mmio_write(VDMA_DQWCR0, 0); | ||
295 | viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE); | ||
296 | viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE | VDMA_MR_CHAIN); | ||
297 | viafb_mmio_write(VDMA_DPRL0, descr_handle | VIAFB_DMA_MAGIC); | ||
298 | viafb_mmio_write(VDMA_DPRH0, | ||
299 | (((u64)descr_handle >> 32) & 0x0fff) | 0xf0000); | ||
300 | (void) viafb_mmio_read(VDMA_CSR0); | ||
301 | viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START); | ||
302 | spin_unlock_irqrestore(&global_dev.reg_lock, flags); | ||
303 | /* | ||
304 | * Now we just wait until the interrupt handler says | ||
305 | * we're done. Except that, actually, we need to wait a little | ||
306 | * longer: the interrupts seem to jump the gun a little and we | ||
307 | * get corrupted frames sometimes. | ||
308 | */ | ||
309 | wait_for_completion_timeout(&viafb_dma_completion, 1); | ||
310 | msleep(1); | ||
311 | if ((viafb_mmio_read(VDMA_CSR0)&VDMA_C_DONE) == 0) | ||
312 | printk(KERN_ERR "VIA DMA timeout!\n"); | ||
313 | /* | ||
314 | * Clean up and we're done. | ||
315 | */ | ||
316 | viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE); | ||
317 | viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */ | ||
318 | mutex_unlock(&viafb_dma_lock); | ||
319 | dma_free_coherent(&global_dev.pdev->dev, | ||
320 | nsg*sizeof(struct viafb_vx855_dma_descr), descrpages, | ||
321 | descr_handle); | ||
322 | return 0; | ||
323 | } | ||
324 | EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg); | ||
325 | |||
326 | |||
327 | /* ---------------------------------------------------------------------- */ | ||
96 | /* | 328 | /* |
97 | * Figure out how big our framebuffer memory is. Kind of ugly, | 329 | * Figure out how big our framebuffer memory is. Kind of ugly, |
98 | * but evidently we can't trust the information found in the | 330 | * but evidently we can't trust the information found in the |
diff --git a/drivers/video/via/via-core.h b/drivers/video/via/via-core.h index ba64b36d58e3..3d03141d6074 100644 --- a/drivers/video/via/via-core.h +++ b/drivers/video/via/via-core.h | |||
@@ -131,4 +131,33 @@ void viafb_irq_disable(u32 mask); | |||
131 | #define VDE_I_LVDSSIEN 0x40000000 /* LVDS Sense enable */ | 131 | #define VDE_I_LVDSSIEN 0x40000000 /* LVDS Sense enable */ |
132 | #define VDE_I_ENABLE 0x80000000 /* Global interrupt enable */ | 132 | #define VDE_I_ENABLE 0x80000000 /* Global interrupt enable */ |
133 | 133 | ||
134 | /* | ||
135 | * DMA management. | ||
136 | */ | ||
137 | int viafb_request_dma(void); | ||
138 | void viafb_release_dma(void); | ||
139 | /* void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len); */ | ||
140 | int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg); | ||
141 | |||
142 | /* | ||
143 | * DMA Controller registers. | ||
144 | */ | ||
145 | #define VDMA_MR0 0xe00 /* Mod reg 0 */ | ||
146 | #define VDMA_MR_CHAIN 0x01 /* Chaining mode */ | ||
147 | #define VDMA_MR_TDIE 0x02 /* Transfer done int enable */ | ||
148 | #define VDMA_CSR0 0xe04 /* Control/status */ | ||
149 | #define VDMA_C_ENABLE 0x01 /* DMA Enable */ | ||
150 | #define VDMA_C_START 0x02 /* Start a transfer */ | ||
151 | #define VDMA_C_ABORT 0x04 /* Abort a transfer */ | ||
152 | #define VDMA_C_DONE 0x08 /* Transfer is done */ | ||
153 | #define VDMA_MARL0 0xe20 /* Mem addr low */ | ||
154 | #define VDMA_MARH0 0xe24 /* Mem addr high */ | ||
155 | #define VDMA_DAR0 0xe28 /* Device address */ | ||
156 | #define VDMA_DQWCR0 0xe2c /* Count (16-byte) */ | ||
157 | #define VDMA_TMR0 0xe30 /* Tile mode reg */ | ||
158 | #define VDMA_DPRL0 0xe34 /* Not sure */ | ||
159 | #define VDMA_DPR_IN 0x08 /* Inbound transfer to FB */ | ||
160 | #define VDMA_DPRH0 0xe38 | ||
161 | #define VDMA_PMR0 (0xe00 + 0x134) /* Pitch mode */ | ||
162 | |||
134 | #endif /* __VIA_CORE_H__ */ | 163 | #endif /* __VIA_CORE_H__ */ |