diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma/omap-dma.c | 659 | ||||
-rw-r--r-- | drivers/mmc/card/queue.c | 2 | ||||
-rw-r--r-- | drivers/mmc/host/mmci.h | 9 | ||||
-rw-r--r-- | drivers/scsi/arm/acornscsi.c | 2 | ||||
-rw-r--r-- | drivers/scsi/arm/cumana_1.c | 2 | ||||
-rw-r--r-- | drivers/scsi/arm/cumana_2.c | 2 | ||||
-rw-r--r-- | drivers/scsi/arm/powertec.c | 2 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 2 | ||||
-rw-r--r-- | drivers/usb/gadget/lpc32xx_udc.c | 1 |
9 files changed, 603 insertions, 78 deletions
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 362e7c49f2e1..64ceca2920b8 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | #include <linux/delay.h> | ||
8 | #include <linux/dmaengine.h> | 9 | #include <linux/dmaengine.h> |
9 | #include <linux/dma-mapping.h> | 10 | #include <linux/dma-mapping.h> |
10 | #include <linux/err.h> | 11 | #include <linux/err.h> |
@@ -26,11 +27,21 @@ struct omap_dmadev { | |||
26 | spinlock_t lock; | 27 | spinlock_t lock; |
27 | struct tasklet_struct task; | 28 | struct tasklet_struct task; |
28 | struct list_head pending; | 29 | struct list_head pending; |
30 | void __iomem *base; | ||
31 | const struct omap_dma_reg *reg_map; | ||
32 | struct omap_system_dma_plat_info *plat; | ||
33 | bool legacy; | ||
34 | spinlock_t irq_lock; | ||
35 | uint32_t irq_enable_mask; | ||
36 | struct omap_chan *lch_map[32]; | ||
29 | }; | 37 | }; |
30 | 38 | ||
31 | struct omap_chan { | 39 | struct omap_chan { |
32 | struct virt_dma_chan vc; | 40 | struct virt_dma_chan vc; |
33 | struct list_head node; | 41 | struct list_head node; |
42 | void __iomem *channel_base; | ||
43 | const struct omap_dma_reg *reg_map; | ||
44 | uint32_t ccr; | ||
34 | 45 | ||
35 | struct dma_slave_config cfg; | 46 | struct dma_slave_config cfg; |
36 | unsigned dma_sig; | 47 | unsigned dma_sig; |
@@ -54,19 +65,93 @@ struct omap_desc { | |||
54 | dma_addr_t dev_addr; | 65 | dma_addr_t dev_addr; |
55 | 66 | ||
56 | int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ | 67 | int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ |
57 | uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ | 68 | uint8_t es; /* CSDP_DATA_TYPE_xxx */ |
58 | uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ | 69 | uint32_t ccr; /* CCR value */ |
59 | uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ | 70 | uint16_t clnk_ctrl; /* CLNK_CTRL value */ |
60 | uint8_t periph_port; /* Peripheral port */ | 71 | uint16_t cicr; /* CICR value */ |
72 | uint32_t csdp; /* CSDP value */ | ||
61 | 73 | ||
62 | unsigned sglen; | 74 | unsigned sglen; |
63 | struct omap_sg sg[0]; | 75 | struct omap_sg sg[0]; |
64 | }; | 76 | }; |
65 | 77 | ||
78 | enum { | ||
79 | CCR_FS = BIT(5), | ||
80 | CCR_READ_PRIORITY = BIT(6), | ||
81 | CCR_ENABLE = BIT(7), | ||
82 | CCR_AUTO_INIT = BIT(8), /* OMAP1 only */ | ||
83 | CCR_REPEAT = BIT(9), /* OMAP1 only */ | ||
84 | CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */ | ||
85 | CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */ | ||
86 | CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */ | ||
87 | CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */ | ||
88 | CCR_SRC_AMODE_CONSTANT = 0 << 12, | ||
89 | CCR_SRC_AMODE_POSTINC = 1 << 12, | ||
90 | CCR_SRC_AMODE_SGLIDX = 2 << 12, | ||
91 | CCR_SRC_AMODE_DBLIDX = 3 << 12, | ||
92 | CCR_DST_AMODE_CONSTANT = 0 << 14, | ||
93 | CCR_DST_AMODE_POSTINC = 1 << 14, | ||
94 | CCR_DST_AMODE_SGLIDX = 2 << 14, | ||
95 | CCR_DST_AMODE_DBLIDX = 3 << 14, | ||
96 | CCR_CONSTANT_FILL = BIT(16), | ||
97 | CCR_TRANSPARENT_COPY = BIT(17), | ||
98 | CCR_BS = BIT(18), | ||
99 | CCR_SUPERVISOR = BIT(22), | ||
100 | CCR_PREFETCH = BIT(23), | ||
101 | CCR_TRIGGER_SRC = BIT(24), | ||
102 | CCR_BUFFERING_DISABLE = BIT(25), | ||
103 | CCR_WRITE_PRIORITY = BIT(26), | ||
104 | CCR_SYNC_ELEMENT = 0, | ||
105 | CCR_SYNC_FRAME = CCR_FS, | ||
106 | CCR_SYNC_BLOCK = CCR_BS, | ||
107 | CCR_SYNC_PACKET = CCR_BS | CCR_FS, | ||
108 | |||
109 | CSDP_DATA_TYPE_8 = 0, | ||
110 | CSDP_DATA_TYPE_16 = 1, | ||
111 | CSDP_DATA_TYPE_32 = 2, | ||
112 | CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */ | ||
113 | CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */ | ||
114 | CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */ | ||
115 | CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */ | ||
116 | CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */ | ||
117 | CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */ | ||
118 | CSDP_SRC_PACKED = BIT(6), | ||
119 | CSDP_SRC_BURST_1 = 0 << 7, | ||
120 | CSDP_SRC_BURST_16 = 1 << 7, | ||
121 | CSDP_SRC_BURST_32 = 2 << 7, | ||
122 | CSDP_SRC_BURST_64 = 3 << 7, | ||
123 | CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */ | ||
124 | CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */ | ||
125 | CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */ | ||
126 | CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */ | ||
127 | CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */ | ||
128 | CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */ | ||
129 | CSDP_DST_PACKED = BIT(13), | ||
130 | CSDP_DST_BURST_1 = 0 << 14, | ||
131 | CSDP_DST_BURST_16 = 1 << 14, | ||
132 | CSDP_DST_BURST_32 = 2 << 14, | ||
133 | CSDP_DST_BURST_64 = 3 << 14, | ||
134 | |||
135 | CICR_TOUT_IE = BIT(0), /* OMAP1 only */ | ||
136 | CICR_DROP_IE = BIT(1), | ||
137 | CICR_HALF_IE = BIT(2), | ||
138 | CICR_FRAME_IE = BIT(3), | ||
139 | CICR_LAST_IE = BIT(4), | ||
140 | CICR_BLOCK_IE = BIT(5), | ||
141 | CICR_PKT_IE = BIT(7), /* OMAP2+ only */ | ||
142 | CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */ | ||
143 | CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */ | ||
144 | CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */ | ||
145 | CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */ | ||
146 | CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */ | ||
147 | |||
148 | CLNK_CTRL_ENABLE_LNK = BIT(15), | ||
149 | }; | ||
150 | |||
66 | static const unsigned es_bytes[] = { | 151 | static const unsigned es_bytes[] = { |
67 | [OMAP_DMA_DATA_TYPE_S8] = 1, | 152 | [CSDP_DATA_TYPE_8] = 1, |
68 | [OMAP_DMA_DATA_TYPE_S16] = 2, | 153 | [CSDP_DATA_TYPE_16] = 2, |
69 | [OMAP_DMA_DATA_TYPE_S32] = 4, | 154 | [CSDP_DATA_TYPE_32] = 4, |
70 | }; | 155 | }; |
71 | 156 | ||
72 | static struct of_dma_filter_info omap_dma_info = { | 157 | static struct of_dma_filter_info omap_dma_info = { |
@@ -93,28 +178,214 @@ static void omap_dma_desc_free(struct virt_dma_desc *vd) | |||
93 | kfree(container_of(vd, struct omap_desc, vd)); | 178 | kfree(container_of(vd, struct omap_desc, vd)); |
94 | } | 179 | } |
95 | 180 | ||
181 | static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr) | ||
182 | { | ||
183 | switch (type) { | ||
184 | case OMAP_DMA_REG_16BIT: | ||
185 | writew_relaxed(val, addr); | ||
186 | break; | ||
187 | case OMAP_DMA_REG_2X16BIT: | ||
188 | writew_relaxed(val, addr); | ||
189 | writew_relaxed(val >> 16, addr + 2); | ||
190 | break; | ||
191 | case OMAP_DMA_REG_32BIT: | ||
192 | writel_relaxed(val, addr); | ||
193 | break; | ||
194 | default: | ||
195 | WARN_ON(1); | ||
196 | } | ||
197 | } | ||
198 | |||
199 | static unsigned omap_dma_read(unsigned type, void __iomem *addr) | ||
200 | { | ||
201 | unsigned val; | ||
202 | |||
203 | switch (type) { | ||
204 | case OMAP_DMA_REG_16BIT: | ||
205 | val = readw_relaxed(addr); | ||
206 | break; | ||
207 | case OMAP_DMA_REG_2X16BIT: | ||
208 | val = readw_relaxed(addr); | ||
209 | val |= readw_relaxed(addr + 2) << 16; | ||
210 | break; | ||
211 | case OMAP_DMA_REG_32BIT: | ||
212 | val = readl_relaxed(addr); | ||
213 | break; | ||
214 | default: | ||
215 | WARN_ON(1); | ||
216 | val = 0; | ||
217 | } | ||
218 | |||
219 | return val; | ||
220 | } | ||
221 | |||
222 | static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val) | ||
223 | { | ||
224 | const struct omap_dma_reg *r = od->reg_map + reg; | ||
225 | |||
226 | WARN_ON(r->stride); | ||
227 | |||
228 | omap_dma_write(val, r->type, od->base + r->offset); | ||
229 | } | ||
230 | |||
231 | static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg) | ||
232 | { | ||
233 | const struct omap_dma_reg *r = od->reg_map + reg; | ||
234 | |||
235 | WARN_ON(r->stride); | ||
236 | |||
237 | return omap_dma_read(r->type, od->base + r->offset); | ||
238 | } | ||
239 | |||
240 | static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val) | ||
241 | { | ||
242 | const struct omap_dma_reg *r = c->reg_map + reg; | ||
243 | |||
244 | omap_dma_write(val, r->type, c->channel_base + r->offset); | ||
245 | } | ||
246 | |||
247 | static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg) | ||
248 | { | ||
249 | const struct omap_dma_reg *r = c->reg_map + reg; | ||
250 | |||
251 | return omap_dma_read(r->type, c->channel_base + r->offset); | ||
252 | } | ||
253 | |||
254 | static void omap_dma_clear_csr(struct omap_chan *c) | ||
255 | { | ||
256 | if (dma_omap1()) | ||
257 | omap_dma_chan_read(c, CSR); | ||
258 | else | ||
259 | omap_dma_chan_write(c, CSR, ~0); | ||
260 | } | ||
261 | |||
262 | static unsigned omap_dma_get_csr(struct omap_chan *c) | ||
263 | { | ||
264 | unsigned val = omap_dma_chan_read(c, CSR); | ||
265 | |||
266 | if (!dma_omap1()) | ||
267 | omap_dma_chan_write(c, CSR, val); | ||
268 | |||
269 | return val; | ||
270 | } | ||
271 | |||
272 | static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, | ||
273 | unsigned lch) | ||
274 | { | ||
275 | c->channel_base = od->base + od->plat->channel_stride * lch; | ||
276 | |||
277 | od->lch_map[lch] = c; | ||
278 | } | ||
279 | |||
280 | static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) | ||
281 | { | ||
282 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); | ||
283 | |||
284 | if (__dma_omap15xx(od->plat->dma_attr)) | ||
285 | omap_dma_chan_write(c, CPC, 0); | ||
286 | else | ||
287 | omap_dma_chan_write(c, CDAC, 0); | ||
288 | |||
289 | omap_dma_clear_csr(c); | ||
290 | |||
291 | /* Enable interrupts */ | ||
292 | omap_dma_chan_write(c, CICR, d->cicr); | ||
293 | |||
294 | /* Enable channel */ | ||
295 | omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); | ||
296 | } | ||
297 | |||
298 | static void omap_dma_stop(struct omap_chan *c) | ||
299 | { | ||
300 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); | ||
301 | uint32_t val; | ||
302 | |||
303 | /* disable irq */ | ||
304 | omap_dma_chan_write(c, CICR, 0); | ||
305 | |||
306 | omap_dma_clear_csr(c); | ||
307 | |||
308 | val = omap_dma_chan_read(c, CCR); | ||
309 | if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) { | ||
310 | uint32_t sysconfig; | ||
311 | unsigned i; | ||
312 | |||
313 | sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); | ||
314 | val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK; | ||
315 | val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); | ||
316 | omap_dma_glbl_write(od, OCP_SYSCONFIG, val); | ||
317 | |||
318 | val = omap_dma_chan_read(c, CCR); | ||
319 | val &= ~CCR_ENABLE; | ||
320 | omap_dma_chan_write(c, CCR, val); | ||
321 | |||
322 | /* Wait for sDMA FIFO to drain */ | ||
323 | for (i = 0; ; i++) { | ||
324 | val = omap_dma_chan_read(c, CCR); | ||
325 | if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))) | ||
326 | break; | ||
327 | |||
328 | if (i > 100) | ||
329 | break; | ||
330 | |||
331 | udelay(5); | ||
332 | } | ||
333 | |||
334 | if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)) | ||
335 | dev_err(c->vc.chan.device->dev, | ||
336 | "DMA drain did not complete on lch %d\n", | ||
337 | c->dma_ch); | ||
338 | |||
339 | omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig); | ||
340 | } else { | ||
341 | val &= ~CCR_ENABLE; | ||
342 | omap_dma_chan_write(c, CCR, val); | ||
343 | } | ||
344 | |||
345 | mb(); | ||
346 | |||
347 | if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) { | ||
348 | val = omap_dma_chan_read(c, CLNK_CTRL); | ||
349 | |||
350 | if (dma_omap1()) | ||
351 | val |= 1 << 14; /* set the STOP_LNK bit */ | ||
352 | else | ||
353 | val &= ~CLNK_CTRL_ENABLE_LNK; | ||
354 | |||
355 | omap_dma_chan_write(c, CLNK_CTRL, val); | ||
356 | } | ||
357 | } | ||
358 | |||
96 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, | 359 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, |
97 | unsigned idx) | 360 | unsigned idx) |
98 | { | 361 | { |
99 | struct omap_sg *sg = d->sg + idx; | 362 | struct omap_sg *sg = d->sg + idx; |
363 | unsigned cxsa, cxei, cxfi; | ||
100 | 364 | ||
101 | if (d->dir == DMA_DEV_TO_MEM) | 365 | if (d->dir == DMA_DEV_TO_MEM) { |
102 | omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, | 366 | cxsa = CDSA; |
103 | OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); | 367 | cxei = CDEI; |
104 | else | 368 | cxfi = CDFI; |
105 | omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, | 369 | } else { |
106 | OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); | 370 | cxsa = CSSA; |
371 | cxei = CSEI; | ||
372 | cxfi = CSFI; | ||
373 | } | ||
107 | 374 | ||
108 | omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, | 375 | omap_dma_chan_write(c, cxsa, sg->addr); |
109 | d->sync_mode, c->dma_sig, d->sync_type); | 376 | omap_dma_chan_write(c, cxei, 0); |
377 | omap_dma_chan_write(c, cxfi, 0); | ||
378 | omap_dma_chan_write(c, CEN, sg->en); | ||
379 | omap_dma_chan_write(c, CFN, sg->fn); | ||
110 | 380 | ||
111 | omap_start_dma(c->dma_ch); | 381 | omap_dma_start(c, d); |
112 | } | 382 | } |
113 | 383 | ||
114 | static void omap_dma_start_desc(struct omap_chan *c) | 384 | static void omap_dma_start_desc(struct omap_chan *c) |
115 | { | 385 | { |
116 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); | 386 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); |
117 | struct omap_desc *d; | 387 | struct omap_desc *d; |
388 | unsigned cxsa, cxei, cxfi; | ||
118 | 389 | ||
119 | if (!vd) { | 390 | if (!vd) { |
120 | c->desc = NULL; | 391 | c->desc = NULL; |
@@ -126,12 +397,32 @@ static void omap_dma_start_desc(struct omap_chan *c) | |||
126 | c->desc = d = to_omap_dma_desc(&vd->tx); | 397 | c->desc = d = to_omap_dma_desc(&vd->tx); |
127 | c->sgidx = 0; | 398 | c->sgidx = 0; |
128 | 399 | ||
129 | if (d->dir == DMA_DEV_TO_MEM) | 400 | /* |
130 | omap_set_dma_src_params(c->dma_ch, d->periph_port, | 401 | * This provides the necessary barrier to ensure data held in |
131 | OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); | 402 | * DMA coherent memory is visible to the DMA engine prior to |
132 | else | 403 | * the transfer starting. |
133 | omap_set_dma_dest_params(c->dma_ch, d->periph_port, | 404 | */ |
134 | OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); | 405 | mb(); |
406 | |||
407 | omap_dma_chan_write(c, CCR, d->ccr); | ||
408 | if (dma_omap1()) | ||
409 | omap_dma_chan_write(c, CCR2, d->ccr >> 16); | ||
410 | |||
411 | if (d->dir == DMA_DEV_TO_MEM) { | ||
412 | cxsa = CSSA; | ||
413 | cxei = CSEI; | ||
414 | cxfi = CSFI; | ||
415 | } else { | ||
416 | cxsa = CDSA; | ||
417 | cxei = CDEI; | ||
418 | cxfi = CDFI; | ||
419 | } | ||
420 | |||
421 | omap_dma_chan_write(c, cxsa, d->dev_addr); | ||
422 | omap_dma_chan_write(c, cxei, 0); | ||
423 | omap_dma_chan_write(c, cxfi, d->fi); | ||
424 | omap_dma_chan_write(c, CSDP, d->csdp); | ||
425 | omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); | ||
135 | 426 | ||
136 | omap_dma_start_sg(c, d, 0); | 427 | omap_dma_start_sg(c, d, 0); |
137 | } | 428 | } |
@@ -186,24 +477,118 @@ static void omap_dma_sched(unsigned long data) | |||
186 | } | 477 | } |
187 | } | 478 | } |
188 | 479 | ||
480 | static irqreturn_t omap_dma_irq(int irq, void *devid) | ||
481 | { | ||
482 | struct omap_dmadev *od = devid; | ||
483 | unsigned status, channel; | ||
484 | |||
485 | spin_lock(&od->irq_lock); | ||
486 | |||
487 | status = omap_dma_glbl_read(od, IRQSTATUS_L1); | ||
488 | status &= od->irq_enable_mask; | ||
489 | if (status == 0) { | ||
490 | spin_unlock(&od->irq_lock); | ||
491 | return IRQ_NONE; | ||
492 | } | ||
493 | |||
494 | while ((channel = ffs(status)) != 0) { | ||
495 | unsigned mask, csr; | ||
496 | struct omap_chan *c; | ||
497 | |||
498 | channel -= 1; | ||
499 | mask = BIT(channel); | ||
500 | status &= ~mask; | ||
501 | |||
502 | c = od->lch_map[channel]; | ||
503 | if (c == NULL) { | ||
504 | /* This should never happen */ | ||
505 | dev_err(od->ddev.dev, "invalid channel %u\n", channel); | ||
506 | continue; | ||
507 | } | ||
508 | |||
509 | csr = omap_dma_get_csr(c); | ||
510 | omap_dma_glbl_write(od, IRQSTATUS_L1, mask); | ||
511 | |||
512 | omap_dma_callback(channel, csr, c); | ||
513 | } | ||
514 | |||
515 | spin_unlock(&od->irq_lock); | ||
516 | |||
517 | return IRQ_HANDLED; | ||
518 | } | ||
519 | |||
189 | static int omap_dma_alloc_chan_resources(struct dma_chan *chan) | 520 | static int omap_dma_alloc_chan_resources(struct dma_chan *chan) |
190 | { | 521 | { |
522 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); | ||
191 | struct omap_chan *c = to_omap_dma_chan(chan); | 523 | struct omap_chan *c = to_omap_dma_chan(chan); |
524 | int ret; | ||
525 | |||
526 | if (od->legacy) { | ||
527 | ret = omap_request_dma(c->dma_sig, "DMA engine", | ||
528 | omap_dma_callback, c, &c->dma_ch); | ||
529 | } else { | ||
530 | ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL, | ||
531 | &c->dma_ch); | ||
532 | } | ||
533 | |||
534 | dev_dbg(od->ddev.dev, "allocating channel %u for %u\n", | ||
535 | c->dma_ch, c->dma_sig); | ||
192 | 536 | ||
193 | dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); | 537 | if (ret >= 0) { |
538 | omap_dma_assign(od, c, c->dma_ch); | ||
194 | 539 | ||
195 | return omap_request_dma(c->dma_sig, "DMA engine", | 540 | if (!od->legacy) { |
196 | omap_dma_callback, c, &c->dma_ch); | 541 | unsigned val; |
542 | |||
543 | spin_lock_irq(&od->irq_lock); | ||
544 | val = BIT(c->dma_ch); | ||
545 | omap_dma_glbl_write(od, IRQSTATUS_L1, val); | ||
546 | od->irq_enable_mask |= val; | ||
547 | omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); | ||
548 | |||
549 | val = omap_dma_glbl_read(od, IRQENABLE_L0); | ||
550 | val &= ~BIT(c->dma_ch); | ||
551 | omap_dma_glbl_write(od, IRQENABLE_L0, val); | ||
552 | spin_unlock_irq(&od->irq_lock); | ||
553 | } | ||
554 | } | ||
555 | |||
556 | if (dma_omap1()) { | ||
557 | if (__dma_omap16xx(od->plat->dma_attr)) { | ||
558 | c->ccr = CCR_OMAP31_DISABLE; | ||
559 | /* Duplicate what plat-omap/dma.c does */ | ||
560 | c->ccr |= c->dma_ch + 1; | ||
561 | } else { | ||
562 | c->ccr = c->dma_sig & 0x1f; | ||
563 | } | ||
564 | } else { | ||
565 | c->ccr = c->dma_sig & 0x1f; | ||
566 | c->ccr |= (c->dma_sig & ~0x1f) << 14; | ||
567 | } | ||
568 | if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING) | ||
569 | c->ccr |= CCR_BUFFERING_DISABLE; | ||
570 | |||
571 | return ret; | ||
197 | } | 572 | } |
198 | 573 | ||
199 | static void omap_dma_free_chan_resources(struct dma_chan *chan) | 574 | static void omap_dma_free_chan_resources(struct dma_chan *chan) |
200 | { | 575 | { |
576 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); | ||
201 | struct omap_chan *c = to_omap_dma_chan(chan); | 577 | struct omap_chan *c = to_omap_dma_chan(chan); |
202 | 578 | ||
579 | if (!od->legacy) { | ||
580 | spin_lock_irq(&od->irq_lock); | ||
581 | od->irq_enable_mask &= ~BIT(c->dma_ch); | ||
582 | omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); | ||
583 | spin_unlock_irq(&od->irq_lock); | ||
584 | } | ||
585 | |||
586 | c->channel_base = NULL; | ||
587 | od->lch_map[c->dma_ch] = NULL; | ||
203 | vchan_free_chan_resources(&c->vc); | 588 | vchan_free_chan_resources(&c->vc); |
204 | omap_free_dma(c->dma_ch); | 589 | omap_free_dma(c->dma_ch); |
205 | 590 | ||
206 | dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); | 591 | dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig); |
207 | } | 592 | } |
208 | 593 | ||
209 | static size_t omap_dma_sg_size(struct omap_sg *sg) | 594 | static size_t omap_dma_sg_size(struct omap_sg *sg) |
@@ -239,6 +624,74 @@ static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) | |||
239 | return size; | 624 | return size; |
240 | } | 625 | } |
241 | 626 | ||
627 | /* | ||
628 | * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is | ||
629 | * read before the DMA controller finished disabling the channel. | ||
630 | */ | ||
631 | static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg) | ||
632 | { | ||
633 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); | ||
634 | uint32_t val; | ||
635 | |||
636 | val = omap_dma_chan_read(c, reg); | ||
637 | if (val == 0 && od->plat->errata & DMA_ERRATA_3_3) | ||
638 | val = omap_dma_chan_read(c, reg); | ||
639 | |||
640 | return val; | ||
641 | } | ||
642 | |||
643 | static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c) | ||
644 | { | ||
645 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); | ||
646 | dma_addr_t addr, cdac; | ||
647 | |||
648 | if (__dma_omap15xx(od->plat->dma_attr)) { | ||
649 | addr = omap_dma_chan_read(c, CPC); | ||
650 | } else { | ||
651 | addr = omap_dma_chan_read_3_3(c, CSAC); | ||
652 | cdac = omap_dma_chan_read_3_3(c, CDAC); | ||
653 | |||
654 | /* | ||
655 | * CDAC == 0 indicates that the DMA transfer on the channel has | ||
656 | * not been started (no data has been transferred so far). | ||
657 | * Return the programmed source start address in this case. | ||
658 | */ | ||
659 | if (cdac == 0) | ||
660 | addr = omap_dma_chan_read(c, CSSA); | ||
661 | } | ||
662 | |||
663 | if (dma_omap1()) | ||
664 | addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000; | ||
665 | |||
666 | return addr; | ||
667 | } | ||
668 | |||
669 | static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c) | ||
670 | { | ||
671 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); | ||
672 | dma_addr_t addr; | ||
673 | |||
674 | if (__dma_omap15xx(od->plat->dma_attr)) { | ||
675 | addr = omap_dma_chan_read(c, CPC); | ||
676 | } else { | ||
677 | addr = omap_dma_chan_read_3_3(c, CDAC); | ||
678 | |||
679 | /* | ||
680 | * CDAC == 0 indicates that the DMA transfer on the channel | ||
681 | * has not been started (no data has been transferred so | ||
682 | * far). Return the programmed destination start address in | ||
683 | * this case. | ||
684 | */ | ||
685 | if (addr == 0) | ||
686 | addr = omap_dma_chan_read(c, CDSA); | ||
687 | } | ||
688 | |||
689 | if (dma_omap1()) | ||
690 | addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000; | ||
691 | |||
692 | return addr; | ||
693 | } | ||
694 | |||
242 | static enum dma_status omap_dma_tx_status(struct dma_chan *chan, | 695 | static enum dma_status omap_dma_tx_status(struct dma_chan *chan, |
243 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 696 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
244 | { | 697 | { |
@@ -260,9 +713,9 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, | |||
260 | dma_addr_t pos; | 713 | dma_addr_t pos; |
261 | 714 | ||
262 | if (d->dir == DMA_MEM_TO_DEV) | 715 | if (d->dir == DMA_MEM_TO_DEV) |
263 | pos = omap_get_dma_src_pos(c->dma_ch); | 716 | pos = omap_dma_get_src_pos(c); |
264 | else if (d->dir == DMA_DEV_TO_MEM) | 717 | else if (d->dir == DMA_DEV_TO_MEM) |
265 | pos = omap_get_dma_dst_pos(c->dma_ch); | 718 | pos = omap_dma_get_dst_pos(c); |
266 | else | 719 | else |
267 | pos = 0; | 720 | pos = 0; |
268 | 721 | ||
@@ -304,24 +757,23 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | |||
304 | struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, | 757 | struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, |
305 | enum dma_transfer_direction dir, unsigned long tx_flags, void *context) | 758 | enum dma_transfer_direction dir, unsigned long tx_flags, void *context) |
306 | { | 759 | { |
760 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); | ||
307 | struct omap_chan *c = to_omap_dma_chan(chan); | 761 | struct omap_chan *c = to_omap_dma_chan(chan); |
308 | enum dma_slave_buswidth dev_width; | 762 | enum dma_slave_buswidth dev_width; |
309 | struct scatterlist *sgent; | 763 | struct scatterlist *sgent; |
310 | struct omap_desc *d; | 764 | struct omap_desc *d; |
311 | dma_addr_t dev_addr; | 765 | dma_addr_t dev_addr; |
312 | unsigned i, j = 0, es, en, frame_bytes, sync_type; | 766 | unsigned i, j = 0, es, en, frame_bytes; |
313 | u32 burst; | 767 | u32 burst; |
314 | 768 | ||
315 | if (dir == DMA_DEV_TO_MEM) { | 769 | if (dir == DMA_DEV_TO_MEM) { |
316 | dev_addr = c->cfg.src_addr; | 770 | dev_addr = c->cfg.src_addr; |
317 | dev_width = c->cfg.src_addr_width; | 771 | dev_width = c->cfg.src_addr_width; |
318 | burst = c->cfg.src_maxburst; | 772 | burst = c->cfg.src_maxburst; |
319 | sync_type = OMAP_DMA_SRC_SYNC; | ||
320 | } else if (dir == DMA_MEM_TO_DEV) { | 773 | } else if (dir == DMA_MEM_TO_DEV) { |
321 | dev_addr = c->cfg.dst_addr; | 774 | dev_addr = c->cfg.dst_addr; |
322 | dev_width = c->cfg.dst_addr_width; | 775 | dev_width = c->cfg.dst_addr_width; |
323 | burst = c->cfg.dst_maxburst; | 776 | burst = c->cfg.dst_maxburst; |
324 | sync_type = OMAP_DMA_DST_SYNC; | ||
325 | } else { | 777 | } else { |
326 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | 778 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); |
327 | return NULL; | 779 | return NULL; |
@@ -330,13 +782,13 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | |||
330 | /* Bus width translates to the element size (ES) */ | 782 | /* Bus width translates to the element size (ES) */ |
331 | switch (dev_width) { | 783 | switch (dev_width) { |
332 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | 784 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
333 | es = OMAP_DMA_DATA_TYPE_S8; | 785 | es = CSDP_DATA_TYPE_8; |
334 | break; | 786 | break; |
335 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | 787 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
336 | es = OMAP_DMA_DATA_TYPE_S16; | 788 | es = CSDP_DATA_TYPE_16; |
337 | break; | 789 | break; |
338 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | 790 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
339 | es = OMAP_DMA_DATA_TYPE_S32; | 791 | es = CSDP_DATA_TYPE_32; |
340 | break; | 792 | break; |
341 | default: /* not reached */ | 793 | default: /* not reached */ |
342 | return NULL; | 794 | return NULL; |
@@ -350,9 +802,31 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | |||
350 | d->dir = dir; | 802 | d->dir = dir; |
351 | d->dev_addr = dev_addr; | 803 | d->dev_addr = dev_addr; |
352 | d->es = es; | 804 | d->es = es; |
353 | d->sync_mode = OMAP_DMA_SYNC_FRAME; | 805 | |
354 | d->sync_type = sync_type; | 806 | d->ccr = c->ccr | CCR_SYNC_FRAME; |
355 | d->periph_port = OMAP_DMA_PORT_TIPB; | 807 | if (dir == DMA_DEV_TO_MEM) |
808 | d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; | ||
809 | else | ||
810 | d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; | ||
811 | |||
812 | d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; | ||
813 | d->csdp = es; | ||
814 | |||
815 | if (dma_omap1()) { | ||
816 | d->cicr |= CICR_TOUT_IE; | ||
817 | |||
818 | if (dir == DMA_DEV_TO_MEM) | ||
819 | d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB; | ||
820 | else | ||
821 | d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF; | ||
822 | } else { | ||
823 | if (dir == DMA_DEV_TO_MEM) | ||
824 | d->ccr |= CCR_TRIGGER_SRC; | ||
825 | |||
826 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; | ||
827 | } | ||
828 | if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS) | ||
829 | d->clnk_ctrl = c->dma_ch; | ||
356 | 830 | ||
357 | /* | 831 | /* |
358 | * Build our scatterlist entries: each contains the address, | 832 | * Build our scatterlist entries: each contains the address, |
@@ -382,23 +856,22 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( | |||
382 | size_t period_len, enum dma_transfer_direction dir, unsigned long flags, | 856 | size_t period_len, enum dma_transfer_direction dir, unsigned long flags, |
383 | void *context) | 857 | void *context) |
384 | { | 858 | { |
859 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); | ||
385 | struct omap_chan *c = to_omap_dma_chan(chan); | 860 | struct omap_chan *c = to_omap_dma_chan(chan); |
386 | enum dma_slave_buswidth dev_width; | 861 | enum dma_slave_buswidth dev_width; |
387 | struct omap_desc *d; | 862 | struct omap_desc *d; |
388 | dma_addr_t dev_addr; | 863 | dma_addr_t dev_addr; |
389 | unsigned es, sync_type; | 864 | unsigned es; |
390 | u32 burst; | 865 | u32 burst; |
391 | 866 | ||
392 | if (dir == DMA_DEV_TO_MEM) { | 867 | if (dir == DMA_DEV_TO_MEM) { |
393 | dev_addr = c->cfg.src_addr; | 868 | dev_addr = c->cfg.src_addr; |
394 | dev_width = c->cfg.src_addr_width; | 869 | dev_width = c->cfg.src_addr_width; |
395 | burst = c->cfg.src_maxburst; | 870 | burst = c->cfg.src_maxburst; |
396 | sync_type = OMAP_DMA_SRC_SYNC; | ||
397 | } else if (dir == DMA_MEM_TO_DEV) { | 871 | } else if (dir == DMA_MEM_TO_DEV) { |
398 | dev_addr = c->cfg.dst_addr; | 872 | dev_addr = c->cfg.dst_addr; |
399 | dev_width = c->cfg.dst_addr_width; | 873 | dev_width = c->cfg.dst_addr_width; |
400 | burst = c->cfg.dst_maxburst; | 874 | burst = c->cfg.dst_maxburst; |
401 | sync_type = OMAP_DMA_DST_SYNC; | ||
402 | } else { | 875 | } else { |
403 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | 876 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); |
404 | return NULL; | 877 | return NULL; |
@@ -407,13 +880,13 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( | |||
407 | /* Bus width translates to the element size (ES) */ | 880 | /* Bus width translates to the element size (ES) */ |
408 | switch (dev_width) { | 881 | switch (dev_width) { |
409 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | 882 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
410 | es = OMAP_DMA_DATA_TYPE_S8; | 883 | es = CSDP_DATA_TYPE_8; |
411 | break; | 884 | break; |
412 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | 885 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
413 | es = OMAP_DMA_DATA_TYPE_S16; | 886 | es = CSDP_DATA_TYPE_16; |
414 | break; | 887 | break; |
415 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | 888 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
416 | es = OMAP_DMA_DATA_TYPE_S32; | 889 | es = CSDP_DATA_TYPE_32; |
417 | break; | 890 | break; |
418 | default: /* not reached */ | 891 | default: /* not reached */ |
419 | return NULL; | 892 | return NULL; |
@@ -428,32 +901,51 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( | |||
428 | d->dev_addr = dev_addr; | 901 | d->dev_addr = dev_addr; |
429 | d->fi = burst; | 902 | d->fi = burst; |
430 | d->es = es; | 903 | d->es = es; |
431 | if (burst) | ||
432 | d->sync_mode = OMAP_DMA_SYNC_PACKET; | ||
433 | else | ||
434 | d->sync_mode = OMAP_DMA_SYNC_ELEMENT; | ||
435 | d->sync_type = sync_type; | ||
436 | d->periph_port = OMAP_DMA_PORT_MPUI; | ||
437 | d->sg[0].addr = buf_addr; | 904 | d->sg[0].addr = buf_addr; |
438 | d->sg[0].en = period_len / es_bytes[es]; | 905 | d->sg[0].en = period_len / es_bytes[es]; |
439 | d->sg[0].fn = buf_len / period_len; | 906 | d->sg[0].fn = buf_len / period_len; |
440 | d->sglen = 1; | 907 | d->sglen = 1; |
441 | 908 | ||
442 | if (!c->cyclic) { | 909 | d->ccr = c->ccr; |
443 | c->cyclic = true; | 910 | if (dir == DMA_DEV_TO_MEM) |
444 | omap_dma_link_lch(c->dma_ch, c->dma_ch); | 911 | d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; |
912 | else | ||
913 | d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; | ||
445 | 914 | ||
446 | if (flags & DMA_PREP_INTERRUPT) | 915 | d->cicr = CICR_DROP_IE; |
447 | omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); | 916 | if (flags & DMA_PREP_INTERRUPT) |
917 | d->cicr |= CICR_FRAME_IE; | ||
448 | 918 | ||
449 | omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); | 919 | d->csdp = es; |
450 | } | 920 | |
921 | if (dma_omap1()) { | ||
922 | d->cicr |= CICR_TOUT_IE; | ||
923 | |||
924 | if (dir == DMA_DEV_TO_MEM) | ||
925 | d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI; | ||
926 | else | ||
927 | d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF; | ||
928 | } else { | ||
929 | if (burst) | ||
930 | d->ccr |= CCR_SYNC_PACKET; | ||
931 | else | ||
932 | d->ccr |= CCR_SYNC_ELEMENT; | ||
933 | |||
934 | if (dir == DMA_DEV_TO_MEM) | ||
935 | d->ccr |= CCR_TRIGGER_SRC; | ||
936 | |||
937 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; | ||
451 | 938 | ||
452 | if (dma_omap2plus()) { | 939 | d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; |
453 | omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); | ||
454 | omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); | ||
455 | } | 940 | } |
456 | 941 | ||
942 | if (__dma_omap15xx(od->plat->dma_attr)) | ||
943 | d->ccr |= CCR_AUTO_INIT | CCR_REPEAT; | ||
944 | else | ||
945 | d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK; | ||
946 | |||
947 | c->cyclic = true; | ||
948 | |||
457 | return vchan_tx_prep(&c->vc, &d->vd, flags); | 949 | return vchan_tx_prep(&c->vc, &d->vd, flags); |
458 | } | 950 | } |
459 | 951 | ||
@@ -483,20 +975,19 @@ static int omap_dma_terminate_all(struct omap_chan *c) | |||
483 | 975 | ||
484 | /* | 976 | /* |
485 | * Stop DMA activity: we assume the callback will not be called | 977 | * Stop DMA activity: we assume the callback will not be called |
486 | * after omap_stop_dma() returns (even if it does, it will see | 978 | * after omap_dma_stop() returns (even if it does, it will see |
487 | * c->desc is NULL and exit.) | 979 | * c->desc is NULL and exit.) |
488 | */ | 980 | */ |
489 | if (c->desc) { | 981 | if (c->desc) { |
490 | c->desc = NULL; | 982 | c->desc = NULL; |
491 | /* Avoid stopping the dma twice */ | 983 | /* Avoid stopping the dma twice */ |
492 | if (!c->paused) | 984 | if (!c->paused) |
493 | omap_stop_dma(c->dma_ch); | 985 | omap_dma_stop(c); |
494 | } | 986 | } |
495 | 987 | ||
496 | if (c->cyclic) { | 988 | if (c->cyclic) { |
497 | c->cyclic = false; | 989 | c->cyclic = false; |
498 | c->paused = false; | 990 | c->paused = false; |
499 | omap_dma_unlink_lch(c->dma_ch, c->dma_ch); | ||
500 | } | 991 | } |
501 | 992 | ||
502 | vchan_get_all_descriptors(&c->vc, &head); | 993 | vchan_get_all_descriptors(&c->vc, &head); |
@@ -513,7 +1004,7 @@ static int omap_dma_pause(struct omap_chan *c) | |||
513 | return -EINVAL; | 1004 | return -EINVAL; |
514 | 1005 | ||
515 | if (!c->paused) { | 1006 | if (!c->paused) { |
516 | omap_stop_dma(c->dma_ch); | 1007 | omap_dma_stop(c); |
517 | c->paused = true; | 1008 | c->paused = true; |
518 | } | 1009 | } |
519 | 1010 | ||
@@ -527,7 +1018,7 @@ static int omap_dma_resume(struct omap_chan *c) | |||
527 | return -EINVAL; | 1018 | return -EINVAL; |
528 | 1019 | ||
529 | if (c->paused) { | 1020 | if (c->paused) { |
530 | omap_start_dma(c->dma_ch); | 1021 | omap_dma_start(c, c->desc); |
531 | c->paused = false; | 1022 | c->paused = false; |
532 | } | 1023 | } |
533 | 1024 | ||
@@ -573,6 +1064,7 @@ static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) | |||
573 | if (!c) | 1064 | if (!c) |
574 | return -ENOMEM; | 1065 | return -ENOMEM; |
575 | 1066 | ||
1067 | c->reg_map = od->reg_map; | ||
576 | c->dma_sig = dma_sig; | 1068 | c->dma_sig = dma_sig; |
577 | c->vc.desc_free = omap_dma_desc_free; | 1069 | c->vc.desc_free = omap_dma_desc_free; |
578 | vchan_init(&c->vc, &od->ddev); | 1070 | vchan_init(&c->vc, &od->ddev); |
@@ -594,18 +1086,29 @@ static void omap_dma_free(struct omap_dmadev *od) | |||
594 | tasklet_kill(&c->vc.task); | 1086 | tasklet_kill(&c->vc.task); |
595 | kfree(c); | 1087 | kfree(c); |
596 | } | 1088 | } |
597 | kfree(od); | ||
598 | } | 1089 | } |
599 | 1090 | ||
600 | static int omap_dma_probe(struct platform_device *pdev) | 1091 | static int omap_dma_probe(struct platform_device *pdev) |
601 | { | 1092 | { |
602 | struct omap_dmadev *od; | 1093 | struct omap_dmadev *od; |
603 | int rc, i; | 1094 | struct resource *res; |
1095 | int rc, i, irq; | ||
604 | 1096 | ||
605 | od = kzalloc(sizeof(*od), GFP_KERNEL); | 1097 | od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); |
606 | if (!od) | 1098 | if (!od) |
607 | return -ENOMEM; | 1099 | return -ENOMEM; |
608 | 1100 | ||
1101 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1102 | od->base = devm_ioremap_resource(&pdev->dev, res); | ||
1103 | if (IS_ERR(od->base)) | ||
1104 | return PTR_ERR(od->base); | ||
1105 | |||
1106 | od->plat = omap_get_plat_info(); | ||
1107 | if (!od->plat) | ||
1108 | return -EPROBE_DEFER; | ||
1109 | |||
1110 | od->reg_map = od->plat->reg_map; | ||
1111 | |||
609 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); | 1112 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); |
610 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); | 1113 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); |
611 | od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; | 1114 | od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; |
@@ -619,6 +1122,7 @@ static int omap_dma_probe(struct platform_device *pdev) | |||
619 | INIT_LIST_HEAD(&od->ddev.channels); | 1122 | INIT_LIST_HEAD(&od->ddev.channels); |
620 | INIT_LIST_HEAD(&od->pending); | 1123 | INIT_LIST_HEAD(&od->pending); |
621 | spin_lock_init(&od->lock); | 1124 | spin_lock_init(&od->lock); |
1125 | spin_lock_init(&od->irq_lock); | ||
622 | 1126 | ||
623 | tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); | 1127 | tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); |
624 | 1128 | ||
@@ -630,6 +1134,21 @@ static int omap_dma_probe(struct platform_device *pdev) | |||
630 | } | 1134 | } |
631 | } | 1135 | } |
632 | 1136 | ||
1137 | irq = platform_get_irq(pdev, 1); | ||
1138 | if (irq <= 0) { | ||
1139 | dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq); | ||
1140 | od->legacy = true; | ||
1141 | } else { | ||
1142 | /* Disable all interrupts */ | ||
1143 | od->irq_enable_mask = 0; | ||
1144 | omap_dma_glbl_write(od, IRQENABLE_L1, 0); | ||
1145 | |||
1146 | rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, | ||
1147 | IRQF_SHARED, "omap-dma-engine", od); | ||
1148 | if (rc) | ||
1149 | return rc; | ||
1150 | } | ||
1151 | |||
633 | rc = dma_async_device_register(&od->ddev); | 1152 | rc = dma_async_device_register(&od->ddev); |
634 | if (rc) { | 1153 | if (rc) { |
635 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", | 1154 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", |
@@ -666,6 +1185,12 @@ static int omap_dma_remove(struct platform_device *pdev) | |||
666 | of_dma_controller_free(pdev->dev.of_node); | 1185 | of_dma_controller_free(pdev->dev.of_node); |
667 | 1186 | ||
668 | dma_async_device_unregister(&od->ddev); | 1187 | dma_async_device_unregister(&od->ddev); |
1188 | |||
1189 | if (!od->legacy) { | ||
1190 | /* Disable all interrupts */ | ||
1191 | omap_dma_glbl_write(od, IRQENABLE_L0, 0); | ||
1192 | } | ||
1193 | |||
669 | omap_dma_free(od); | 1194 | omap_dma_free(od); |
670 | 1195 | ||
671 | return 0; | 1196 | return 0; |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 357bbc54fe4b..3e049c13429c 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
197 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; | 197 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; |
198 | 198 | ||
199 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | 199 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
200 | limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; | 200 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; |
201 | 201 | ||
202 | mq->card = card; | 202 | mq->card = card; |
203 | mq->queue = blk_init_queue(mmc_request_fn, lock); | 203 | mq->queue = blk_init_queue(mmc_request_fn, lock); |
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 168bc72f7a94..84c0e59b792a 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h | |||
@@ -38,10 +38,11 @@ | |||
38 | #define MCI_CPSM_INTERRUPT (1 << 8) | 38 | #define MCI_CPSM_INTERRUPT (1 << 8) |
39 | #define MCI_CPSM_PENDING (1 << 9) | 39 | #define MCI_CPSM_PENDING (1 << 9) |
40 | #define MCI_CPSM_ENABLE (1 << 10) | 40 | #define MCI_CPSM_ENABLE (1 << 10) |
41 | #define MCI_SDIO_SUSP (1 << 11) | 41 | /* Argument flag extenstions in the ST Micro versions */ |
42 | #define MCI_ENCMD_COMPL (1 << 12) | 42 | #define MCI_ST_SDIO_SUSP (1 << 11) |
43 | #define MCI_NIEN (1 << 13) | 43 | #define MCI_ST_ENCMD_COMPL (1 << 12) |
44 | #define MCI_CE_ATACMD (1 << 14) | 44 | #define MCI_ST_NIEN (1 << 13) |
45 | #define MCI_ST_CE_ATACMD (1 << 14) | ||
45 | 46 | ||
46 | #define MMCIRESPCMD 0x010 | 47 | #define MMCIRESPCMD 0x010 |
47 | #define MMCIRESPONSE0 0x014 | 48 | #define MMCIRESPONSE0 0x014 |
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 09ba1869d366..059ff477a398 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c | |||
@@ -2971,7 +2971,7 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id) | |||
2971 | ec->irqaddr = ashost->fast + INT_REG; | 2971 | ec->irqaddr = ashost->fast + INT_REG; |
2972 | ec->irqmask = 0x0a; | 2972 | ec->irqmask = 0x0a; |
2973 | 2973 | ||
2974 | ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost); | 2974 | ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost); |
2975 | if (ret) { | 2975 | if (ret) { |
2976 | printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n", | 2976 | printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n", |
2977 | host->host_no, ashost->scsi.irq, ret); | 2977 | host->host_no, ashost->scsi.irq, ret); |
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index b679778376c5..f8e060900052 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c | |||
@@ -262,7 +262,7 @@ static int cumanascsi1_probe(struct expansion_card *ec, | |||
262 | goto out_unmap; | 262 | goto out_unmap; |
263 | } | 263 | } |
264 | 264 | ||
265 | ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED, | 265 | ret = request_irq(host->irq, cumanascsi_intr, 0, |
266 | "CumanaSCSI-1", host); | 266 | "CumanaSCSI-1", host); |
267 | if (ret) { | 267 | if (ret) { |
268 | printk("scsi%d: IRQ%d not free: %d\n", | 268 | printk("scsi%d: IRQ%d not free: %d\n", |
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index 58915f29055b..abc66f5263ec 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c | |||
@@ -431,7 +431,7 @@ static int cumanascsi2_probe(struct expansion_card *ec, | |||
431 | goto out_free; | 431 | goto out_free; |
432 | 432 | ||
433 | ret = request_irq(ec->irq, cumanascsi_2_intr, | 433 | ret = request_irq(ec->irq, cumanascsi_2_intr, |
434 | IRQF_DISABLED, "cumanascsi2", info); | 434 | 0, "cumanascsi2", info); |
435 | if (ret) { | 435 | if (ret) { |
436 | printk("scsi%d: IRQ%d not free: %d\n", | 436 | printk("scsi%d: IRQ%d not free: %d\n", |
437 | host->host_no, ec->irq, ret); | 437 | host->host_no, ec->irq, ret); |
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c index abc9593615e9..5e1b73e1b743 100644 --- a/drivers/scsi/arm/powertec.c +++ b/drivers/scsi/arm/powertec.c | |||
@@ -358,7 +358,7 @@ static int powertecscsi_probe(struct expansion_card *ec, | |||
358 | goto out_free; | 358 | goto out_free; |
359 | 359 | ||
360 | ret = request_irq(ec->irq, powertecscsi_intr, | 360 | ret = request_irq(ec->irq, powertecscsi_intr, |
361 | IRQF_DISABLED, "powertec", info); | 361 | 0, "powertec", info); |
362 | if (ret) { | 362 | if (ret) { |
363 | printk("scsi%d: IRQ%d not free: %d\n", | 363 | printk("scsi%d: IRQ%d not free: %d\n", |
364 | host->host_no, ec->irq, ret); | 364 | host->host_no, ec->irq, ret); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7bd7f0d5f050..62ec84b42e31 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) | |||
1684 | 1684 | ||
1685 | host_dev = scsi_get_device(shost); | 1685 | host_dev = scsi_get_device(shost); |
1686 | if (host_dev && host_dev->dma_mask) | 1686 | if (host_dev && host_dev->dma_mask) |
1687 | bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT; | 1687 | bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; |
1688 | 1688 | ||
1689 | return bounce_limit; | 1689 | return bounce_limit; |
1690 | } | 1690 | } |
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c index 049ebab0d360..a94bb10eeb03 100644 --- a/drivers/usb/gadget/lpc32xx_udc.c +++ b/drivers/usb/gadget/lpc32xx_udc.c | |||
@@ -55,7 +55,6 @@ | |||
55 | #include <mach/hardware.h> | 55 | #include <mach/hardware.h> |
56 | #include <linux/io.h> | 56 | #include <linux/io.h> |
57 | #include <asm/irq.h> | 57 | #include <asm/irq.h> |
58 | #include <asm/system.h> | ||
59 | 58 | ||
60 | #include <mach/platform.h> | 59 | #include <mach/platform.h> |
61 | #include <mach/irqs.h> | 60 | #include <mach/irqs.h> |